sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
tinygrad/tinygrad:extra/thunder/tiny/tk/kernel.py | from contextlib import AbstractContextManager
from tinygrad.uop.ops import UOp, KernelInfo, AxisType, AddrSpace
from extra.thunder.tiny.tk import WARP_THREADS
from extra.thunder.tiny.tk.group import Group
from extra.thunder.tiny.tk.tiles import GL, ST_16X16, ST, RT_16X16, RT, RV, TileLayout, VecLayout
class _tk_range:
def __init__(self, start:int, end:int, step:int, axis_type:AxisType, rid:int):
self.start, self.end, self.step = start, end, step
self.axis_type, self.rid, self.done = axis_type, rid, False
def __iter__(self): return self
def __next__(self):
if not self.done:
self.done = True
self._rng = UOp.range((self.end - self.start) // self.step, self.rid, axis_type=self.axis_type) * self.step + self.start
return self._rng
raise StopIteration
class Kernel(AbstractContextManager):
def __init__(self, name:str, grid_size:tuple[int, int, int], block_size:int):
self.name = name
self.blockIdx_x = UOp.special(grid_size[0], "gidx0")
self.blockIdx_y = UOp.special(grid_size[1], "gidx1")
self.blockIdx_z = UOp.special(grid_size[2], "gidx2")
self.threadIdx_x = UOp.special(block_size, "lidx0")
self.range_stack: list[_tk_range] = []
self.store_stack: list[tuple[UOp, UOp]] = []
self.global_slot = 0
self.shared_slot = 0
self.register_slot = 0
self.range_id = 0
self.allocs: dict[tuple[str, tuple], UOp] = {}
@property
def warpid(self): return self.threadIdx_x // WARP_THREADS
@property
def laneid(self): return self.threadIdx_x % WARP_THREADS
def __enter__(self): return self
def __exit__(self, exc_type, exc_value, traceback): pass
def group(self, size:int): return Group(size, self)
@property
def warp(self): return self.group(1)
@property
def warpgroup(self): return self.group(4)
def range(self, start:int, end:int=0, step:int=1, axis_type:AxisType=AxisType.LOOP, track:bool=True):
if end == 0: start, end = 0, start
rng = _tk_range(start, end, step, axis_type, self.range_id)
self.range_id += 1
if track: self.range_stack.append(rng)
return rng
def raw_range(self, end:int=0, axis_type:AxisType=AxisType.LOOP):
rng = UOp.range(end, self.range_id, axis_type=axis_type)
self.range_id += 1
return rng
def alloc(self, shape, dtype, addrspace:AddrSpace, name:str|None=None):
match addrspace:
case AddrSpace.GLOBAL:
slot = self.global_slot
self.global_slot += 1
case AddrSpace.LOCAL:
slot = self.shared_slot
self.shared_slot += 1
case AddrSpace.REG:
slot = self.register_slot
self.register_slot += 1
uop = UOp.placeholder(shape, dtype, slot=slot, addrspace=addrspace)
if name:
if (name, shape) in self.allocs: return self.allocs[(name, shape)]
self.allocs[(name, shape)] = uop
return uop
def gl(self, shape, dtype): return GL.create(shape, dtype, self)
def st(self, shape, dtype, layout=TileLayout.ROW, base_shape=ST_16X16): return ST.create(shape, dtype, layout, base_shape, self)
def rt(self, shape, dtype, layout=TileLayout.ROW, base_shape=RT_16X16): return RT.create(shape, dtype, layout, base_shape, self)
def rv(self, length, dtype, layout=VecLayout.ORTHO, rt_base_shape=RT_16X16): return RV.create(length, dtype, layout, rt_base_shape, self)
def push_store(self, store:UOp, uop:UOp): self.store_stack.append((store, uop))
def finish(self, stores:int=1):
# end all ranges
rngs = []
while self.range_stack: rngs.append(self.range_stack.pop(0)._rng)
# end stores stores
store_uops = []
for _ in range(stores):
store = self.store_stack.pop()[0]
if hasattr(store, '_uop'): store_uops.append(store._uop)
else: store_uops.append(store)
uop = UOp.group(*store_uops)
return uop.end(*rngs).sink(arg=KernelInfo(name=self.name, opts_to_apply=())).simplify()
def endrange(self, ranges:int=1):
last_store = self.store_stack.pop()
rngs = []
for _ in range(ranges):
last_range = self.range_stack.pop()
rngs.append(last_range._rng)
return last_store[1].after(last_store[0].end(*rngs)).reshape(last_store[1].shape)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/thunder/tiny/tk/kernel.py",
"license": "MIT License",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/thunder/tiny/tk/tiles.py | from enum import Enum, auto
import functools
from typing import Callable
from dataclasses import dataclass
from tinygrad.dtype import AddrSpace, DType
from tinygrad.mixin import ElementwiseMixin
from tinygrad.uop.ops import UOp, Ops
from extra.thunder.tiny.tk import WARP_THREADS
def unwrap(x):
if hasattr(x, "_uop"): return x._uop
if isinstance(x, (list, tuple)): return type(x)(unwrap(y) for y in x)
if isinstance(x, dict): return {k: unwrap(v) for k,v in x.items()}
return x
def wrap(x, s):
if isinstance(x, UOp): return s.ruop(x)
if isinstance(x, (list, tuple)): return type(x)(wrap(y, s) for y in x)
return x
def autowrap(source_cls, blacklist=None):
if blacklist is None:
blacklist = {
"__init__", "__new__", "__str__", "__del__", "__repr__", "__dict__", "__getattribute__",
"__setattr__", "__delattr__", "__weakref__", "__slots__", "__class__",
"__reduce__", "__reduce_ex__", "__getstate__", "__setstate__", "__hash__"
}
def decorator(cls):
def __getattr__(self, name):
uop = object.__getattribute__(self, "_uop")
val = getattr(uop, name)
if callable(val):
@functools.wraps(val)
def proxy(*args, **kwargs):
return wrap(val(*unwrap(args), **unwrap(kwargs)), self)
return proxy
if name in UOp.__slots__: return val # type: ignore
return wrap(val, self)
cls.__getattr__ = __getattr__
for name in dir(source_cls):
if name in blacklist or not name.startswith("__"): continue
for base in cls.mro():
if base is source_cls: break
if name in base.__dict__: break
else:
original = getattr(source_cls, name)
if callable(original):
def make_proxy(_, func):
def proxy(self, *args, **kwargs):
return wrap(func(self._uop, *unwrap(args), **unwrap(kwargs)), self)
return proxy
setattr(cls, name, make_proxy(name, original))
return cls
return decorator
class TileMathMixin(ElementwiseMixin):
def alu(self, op, *src, inner_op=lambda x:x):
assert isinstance(self, (RT, RV))
if len(src) == 0:
if self._uop._shape is None: uop = UOp.alu(self._uop, op)
else: uop = self.ker.warp.map(self._uop, lambda x: UOp.alu(x, op))
elif len(src) == 1:
if self._uop._shape is None: uop = UOp.alu(self._uop, op, inner_op(self._uop.ufix(src[0])))
elif isinstance(src[0], (int,float,bool)): uop = self.ker.warp.map(self._uop, lambda x: UOp.alu(x, op, inner_op(x.ufix(src[0]))))
elif src[0]._shape is None: uop = UOp.alu(self._uop, op, inner_op(self._uop.ufix(src[0])))
else:
if isinstance(self, RT) and isinstance(src[0], RV):
match self.layout:
case TileLayout.ROW: uop = self.ker.warp.map(self._uop, lambda x, idx: UOp.alu(x, op, inner_op(src[0]._uop[idx[0], 0])))
case TileLayout.COL: uop = self.ker.warp.map(self._uop, lambda x, idx: UOp.alu(x, op, inner_op(src[0]._uop[idx[1], 0])))
else: uop = self.ker.warp.map(self._uop, lambda x, idx: UOp.alu(x, op, inner_op(src[0]._uop[*idx])))
else: raise NotImplementedError
return self.ruop(uop)
def const_like(self, b): return b
@property
def dtype(self): return self._uop.dtype
def cast(self, dtype:DType): return self.ruop(self._uop.cast(dtype))
# override ops that do compute on the src uop
def sub(self, x, reverse=False):
return self.ufix(x).alu(Ops.ADD, self, inner_op=lambda y: -y) if reverse else self.alu(Ops.ADD, self.ufix(x), inner_op=lambda y: -y)
def div(self, x, reverse=False):
return self.ufix(x).alu(Ops.MUL, self, inner_op=lambda y: 1/y) if reverse else self.alu(Ops.MUL, self.ufix(x), inner_op=lambda y: 1/y)
@autowrap(UOp)
class GL:
def __init__(self, uop:UOp, ker):
self._uop, self.ker = uop, ker
def ruop(self, uop:UOp):
return GL(uop, self.ker)
@classmethod
def create(cls, shape, dtype:DType, ker):
uop = ker.alloc(shape, dtype, AddrSpace.GLOBAL)
return cls(uop, ker)
class TileLayout(Enum):
ROW = auto()
COL = auto()
class VecLayout(Enum):
ORTHO = auto()
@dataclass(frozen=True)
class BaseShape:
rows: int
cols: int
@property
def num_elements(self): return self.rows * self.cols
@property
def elements_per_thread(self): return self.num_elements // WARP_THREADS
@dataclass(frozen=True)
class STBaseShape(BaseShape):
_swizzle: Callable[[UOp, DType], UOp]
bytes_per_thread: Callable[[DType], int]
def swizzle(self, row, col, dtype:DType):
offset = row * self.cols + col
offset *= dtype.itemsize
offset = self._swizzle(offset, dtype)
offset //= dtype.itemsize
return offset
def st_16x16_swizzle(offset:UOp, _): return offset
def st_16x16_bpt(dtype:DType):
if dtype.itemsize == 2 or dtype.itemsize == 4: return 16
else: raise NotImplementedError
ST_16X16 = STBaseShape(16, 16, st_16x16_swizzle, st_16x16_bpt)
def st_16x16_swizzled_swizzle(offset:UOp, dtype:DType):
if dtype.itemsize == 2:
swizzle = ((offset % 512) >> 7) << 3
return offset ^ swizzle
elif dtype.itemsize == 4:
return offset
else: raise NotImplementedError
def st_16x16_swizzled_bpt(dtype:DType):
if dtype.itemsize == 2: return 4
elif dtype.itemsize == 4: return 16
else: raise NotImplementedError
ST_16X16_SWIZZLED = STBaseShape(16, 16, st_16x16_swizzled_swizzle, st_16x16_swizzled_bpt)
def st_32x32_swizzle(offset:UOp, dtype:DType):
if dtype.itemsize == 2:
first_swizzle = ((offset % 1024) >> 9) << 5
second_swizzle = ((offset % 2048) >> 10) << 4
return offset ^ first_swizzle ^ second_swizzle
elif dtype.itemsize == 4:
return offset
else: raise NotImplementedError
def st_32x32_bpt(dtype:DType):
if dtype.itemsize == 2 or dtype.itemsize == 4: return 16
else: raise NotImplementedError
ST_32X32 = STBaseShape(32, 32, st_32x32_swizzle, st_32x32_bpt)
def st_16x32_swizzle(offset:UOp, dtype:DType):
if dtype.itemsize == 2:
swizzle = ((offset % 1024) >> 9) << 5
return offset ^ swizzle
elif dtype.itemsize == 4:
return offset
else: raise NotImplementedError
def st_16x32_bpt(dtype:DType):
if dtype.itemsize == 2 or dtype.itemsize == 4: return 16
else: raise NotImplementedError
ST_16X32 = STBaseShape(16, 32, st_16x32_swizzle, st_16x32_bpt)
def st_32x16_swizzle(offset:UOp, dtype:DType):
if dtype.itemsize == 2:
swizzle = ((offset % 1024) >> 9) << 4
return offset ^ swizzle
elif dtype.itemsize == 4:
return offset
else: raise NotImplementedError
def st_32x16_bpt(dtype:DType):
if dtype.itemsize == 2 or dtype.itemsize == 4: return 16
else: raise NotImplementedError
ST_32X16 = STBaseShape(32, 16, st_32x16_swizzle, st_32x16_bpt)
@autowrap(UOp)
class ST:
def __init__(self, uop:UOp, rows:int, cols:int, layout:TileLayout, base_shape:STBaseShape, ker):
self._uop, self.rows, self.cols, self.layout, self.base_shape, self.ker = uop, rows, cols, layout, base_shape, ker
def ruop(self, uop:UOp):
return ST(uop, self.rows, self.cols, self.layout, self.base_shape, self.ker)
@classmethod
def create(cls, shape, dtype:DType, layout:TileLayout, base_shape:STBaseShape, ker):
rows = shape[-2]
cols = shape[-1]
assert rows % base_shape.rows == 0
assert cols % base_shape.cols == 0
assert cols % base_shape.elements_per_thread == 0
height = rows // base_shape.rows
width = cols // base_shape.cols
uop = ker.alloc(shape[:-2] + (height, width, base_shape.rows, base_shape.cols), dtype, AddrSpace.LOCAL)
return cls(uop, rows, cols, layout, base_shape, ker)
def swizzle(self, row, col):
swizzled_offset = self.base_shape.swizzle(row, col, self._uop.dtype.base.scalar())
row = swizzled_offset // self.base_shape.cols
col = swizzled_offset % self.base_shape.cols
return row, col
@dataclass(frozen=True)
class RTBaseShape(BaseShape):
stride: int
@property
def num_strides(self):
return self.elements_per_thread // self.stride
RT_16X16 = RTBaseShape(rows=16, cols=16, stride=4)
RT_32X32 = RTBaseShape(rows=32, cols=32, stride=4)
RT_32X32_8 = RTBaseShape(rows=32, cols=32, stride=8)
RT_16X32 = RTBaseShape(rows=16, cols=32, stride=8)
RT_32X16 = RTBaseShape(rows=32, cols=16, stride=8)
RT_32X16_4 = RTBaseShape(rows=32, cols=16, stride=4)
RT_16X32_4 = RTBaseShape(rows=16, cols=32, stride=4)
@autowrap(UOp)
class RT(TileMathMixin):
def __init__(self, uop:UOp, layout:TileLayout, base_shape:RTBaseShape, ker):
self._uop, self.layout, self.base_shape, self.ker = uop, layout, base_shape, ker
def ruop(self, uop:UOp):
return RT(uop, self.layout, self.base_shape, self.ker)
@classmethod
def create(cls, shape, dtype:DType, layout:TileLayout, base_shape:RTBaseShape, ker):
assert len(shape) == 2
assert shape[0] % base_shape.rows == 0
assert shape[1] % base_shape.cols == 0
height = shape[0] // base_shape.rows
width = shape[1] // base_shape.cols
uop = ker.alloc((height, width, base_shape.elements_per_thread), dtype, AddrSpace.REG)
return cls(uop, layout, base_shape, ker)
@autowrap(UOp)
class RV(TileMathMixin):
def __init__(self, uop:UOp, length:int, layout:VecLayout, base_shape:RTBaseShape, ker):
self._uop, self.ker = uop, ker
self.length, self.layout, self.base_shape = length, layout, base_shape
def ruop(self, uop:UOp):
return RV(uop, self.length, self.layout, self.base_shape, self.ker)
@classmethod
def create(cls, length, dtype:DType, layout:VecLayout, base_shape:RTBaseShape, ker):
tiles = length // base_shape.rows
match layout:
case VecLayout.ORTHO:
inner_dim = 1
outer_dim = tiles
uop = ker.alloc((outer_dim, inner_dim), dtype, AddrSpace.REG)
return RV(uop, length, layout, base_shape, ker)
ALL_TILES = UOp | GL | ST | RT | RV
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/thunder/tiny/tk/tiles.py",
"license": "MIT License",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/external/external_test_hcq_open.py | import os
if "DEV" not in os.environ: os.environ["DEV"] = "AMD"
import unittest, time
from tinygrad import Device
class TestOpen(unittest.TestCase):
def generate_test_open(n):
def test(self):
dev = Device[Device.DEFAULT]
for i in range(10):
dev.allocator.alloc(10 << 20)
time.sleep(0.5)
test.__name__ = f'test_open_{n}'
return test
for i in range(64): locals()[f'test_open_{i}'] = generate_test_open(i)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_hcq_open.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_fuzz_hcq_mp.py | import subprocess
import random
import time
from concurrent.futures import ProcessPoolExecutor, as_completed
from tinygrad.helpers import getenv
# checks that HCQ drivers can be killed during operation without causing issues
def run_test(i, full_run=False, force_ok=False):
print(f"\rRunning iteration {i}...", end=" ", flush=True)
p = subprocess.Popen(["python3", "test/test_tiny.py", "TestTiny.test_plus"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not full_run:
time.sleep(random.uniform(0, 1200) / 1000.0)
p.kill()
_, stderr = p.communicate()
else:
_, stderr = p.communicate()
stderr_text = stderr.decode()
assert ("Ran 1 test in" in stderr_text and "OK" in stderr_text) or (not force_ok and "Failed to take lock file" in stderr_text), stderr_text
if __name__ == "__main__":
max_workers = getenv("MAX_WORKERS", 4)
with ProcessPoolExecutor(max_workers=max_workers) as executor:
futures = []
for i in range(1000000):
if i % 100 == 0:
# wait for everything we launched so far
for f in as_completed(futures):
try:
f.result()
except Exception as e:
print(f"\nError in iteration: {e}")
futures = []
# do a full run in the main proc
run_test(i, True, force_ok=True)
else:
futures.append(executor.submit(run_test, i, bool(getenv("FULL_RUN", 0))))
# keep list small
if len(futures) > max_workers * 2:
futures = [f for f in futures if not f.done()]
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_fuzz_hcq_mp.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/mixin/movement.py | # mixins add syntactic sugar to Tensor and UOp
from __future__ import annotations
from typing import TYPE_CHECKING, Self
from tinygrad.uop import Ops
from tinygrad.helpers import prod, argfix, argsort, flatten, dedup, make_tuple, ceildiv
from tinygrad.uop.ops import resolve, smax
if TYPE_CHECKING:
from tinygrad.uop.ops import sint
def _align_left(*shapes: tuple[sint, ...]) -> tuple[tuple[sint, ...], ...]:
# unsqueeze left to make every shape same length
max_dim = max(len(shape) for shape in shapes)
return tuple((1,) * (max_dim - len(shape)) + shape for shape in shapes)
class MovementMixin:
# required to implement
def _mop(self, op: Ops, arg) -> Self:
raise NotImplementedError
@property
def shape(self) -> tuple[sint, ...]:
raise NotImplementedError
# great functions you get!
@property
def ndim(self) -> int:
"""
Returns the number of dimensions in the tensor.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([[1, 2], [3, 4]])
print(t.ndim)
```
"""
return len(self.shape)
def numel(self) -> sint:
"""
Returns the total number of elements in the tensor.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
print(t.numel())
```
"""
return prod(self.shape)
def _resolve_dim(self, dim: int, *, extra: bool = False) -> int:
total = self.ndim + int(extra)
if not -max(1, total) <= dim <= max(1, total) - 1:
raise IndexError(f"{dim=} out of range {[-max(1, total), max(1, total) - 1]}")
return dim + total if dim < 0 else dim
def _broadcast_to(self, new_shape: tuple[sint, ...]) -> Self:
if self.shape == new_shape:
return self
if self.ndim > len(new_shape):
raise ValueError(f"cannot broadcast tensor to fewer dimensions. shape={self.shape} to {new_shape=}")
# first unsqueeze left with 1s https://data-apis.org/array-api/latest/API_specification/broadcasting.html
shape, _ = _align_left(self.shape, new_shape)
# for each dimension, check either dim is 1, or it does not change
if not all(s == ns or s == 1 for s, ns in zip(shape, new_shape)):
raise ValueError(f"cannot broadcast {self.shape} to {new_shape=}")
reshaped = self.reshape(shape)
ret = reshaped._mop(Ops.EXPAND, arg=new_shape)
return reshaped if ret.shape == reshaped.shape else ret
def expand(self, shape, *args) -> Self:
"""
Returns a tensor that is expanded to the shape that is specified.
Expand can also increase the number of dimensions that a tensor has.
Passing a `-1` or `None` to a dimension means that its size will not be changed.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([1, 2, 3])
print(t.expand(4, -1).numpy())
```
"""
new_shape = tuple(from_ if to == -1 or to is None else to for from_, to in zip(*(_align_left(self.shape, argfix(shape, *args)))))
return self._broadcast_to(new_shape)
def reshape(self, shape, *args) -> Self:
"""
Returns a tensor with the same data as the original tensor but with a different shape.
`shape` can be passed as a tuple or as separate arguments.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor.arange(6)
print(t.reshape(2, 3).numpy())
```
"""
# resolve None and args
new_shape = tuple([s if s is not None else self.shape[i] for i, s in enumerate(argfix(shape, *args))])
# resolve -1
if (c := new_shape.count(-1)) > 1:
raise RuntimeError(f"only one dimension can be inferred using -1, getting {new_shape}")
if c:
new_shape = tuple([-prod(self.shape) // prod(new_shape) if s == -1 else s for s in new_shape])
if prod(self.shape) != prod(new_shape):
raise ValueError(f"size mismatch, can't reshape ({self.shape}) -> ({new_shape})")
ret = self._mop(Ops.RESHAPE, arg=new_shape)
return self if ret.shape == self.shape else ret
def shrink(self, arg: tuple[tuple[sint, sint] | None, ...]) -> Self:
"""
Returns a tensor that shrinks the each axis based on input arg.
`arg` must have the same length as `self.ndim`.
For each axis, it can be `None`, which means no shrink, or a tuple `(start, end)` that works the same as Python slice.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor.arange(9).reshape(3, 3)
print(t.numpy())
```
```python exec="true" source="above" session="tensor" result="python"
print(t.shrink(((None, (1, 3)))).numpy())
```
```python exec="true" source="above" session="tensor" result="python"
print(t.shrink((((0, 2), (0, 2)))).numpy())
```
"""
if self.ndim != len(arg):
raise ValueError(f"{self.ndim=} != {len(arg)=}")
ret = self._mop(Ops.SHRINK, arg=[x if x is not None else (0, s) for x, s in zip(arg, self.shape)])
return self if ret.shape == self.shape else ret
def permute(self, order, *args) -> Self:
"""
Returns a tensor that is a permutation of the original tensor.
The new tensor has the same data as the original tensor but with the dimensions permuted according to the order specified.
`order` can be passed as a tuple or as separate arguments.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor.empty(2, 3, 5)
print(t.shape)
```
```python exec="true" source="above" session="tensor" result="python"
print(t.permute(2, 0, 1).shape)
```
"""
order_arg = tuple(self._resolve_dim(x) for x in argfix(order, *args))
if sorted(order_arg) != list(range(self.ndim)):
raise RuntimeError(f"order is not a valid permutation, getting {order_arg}")
return self._mop(Ops.PERMUTE, arg=order_arg) if order_arg != tuple(range(self.ndim)) else self
def flip(self, axis, *args) -> Self:
"""
Returns a tensor that reverses the order of the original tensor along given `axis`.
`axis` can be passed as a tuple or as separate arguments.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor.arange(6).reshape(2, 3)
print(t.numpy())
```
```python exec="true" source="above" session="tensor" result="python"
print(t.flip(0).numpy())
```
```python exec="true" source="above" session="tensor" result="python"
print(t.flip((0, 1)).numpy())
```
"""
axis_arg = tuple(self._resolve_dim(x) for x in argfix(axis, *args))
assert all(not isinstance(x, bool) and x >= 0 and x < self.ndim for x in axis_arg), f"flip args must be axis ints {axis_arg}"
if len(axis_arg) != len(dedup(axis_arg)):
raise RuntimeError(f"dim can appear at most once, getting {axis_arg}")
flip_arg = tuple([i in axis_arg for i in range(len(self.shape))])
return self._mop(Ops.FLIP, arg=flip_arg) if any(flip_arg) else self
# **** high level ****
def shrink_to(self, shape, *args) -> Self:
return self.shrink(tuple([None if ns is None else (0, ns) for ns in argfix(shape, *args)]))
def view(self, shape, *args) -> Self:
"""`.view` is an alias for `.reshape`."""
return self.reshape(shape, *args)
def squeeze(self, dim: int | None = None) -> Self:
"""
Returns a tensor with specified dimensions of input of size 1 removed.
If `dim` is not specified, all dimensions with size 1 are removed.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor.zeros(2, 1, 2, 1, 2)
print(t.squeeze().shape)
```
```python exec="true" source="above" session="tensor" result="python"
print(t.squeeze(0).shape)
```
```python exec="true" source="above" session="tensor" result="python"
print(t.squeeze(1).shape)
```
"""
if dim is None:
return self.reshape(tuple(dim for dim in self.shape if dim != 1))
dim = self._resolve_dim(dim)
return self if not self.ndim or self.shape[dim] != 1 else self.reshape(self.shape[:dim] + self.shape[dim + 1 :])
def unsqueeze(self, dim: int) -> Self:
"""
Returns a tensor with a new dimension of size 1 inserted at the specified `dim`.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([1, 2, 3, 4])
print(t.unsqueeze(0).numpy())
```
```python exec="true" source="above" session="tensor" result="python"
print(t.unsqueeze(1).numpy())
```
"""
dim = self._resolve_dim(dim, extra=True)
return self.reshape(self.shape[:dim] + (1,) + self.shape[dim:])
@property
def T(self) -> Self:
"""`.T` is an alias for `.transpose()`."""
return self.transpose()
def transpose(self, dim0=1, dim1=0) -> Self:
"""
Returns a tensor that is a transposed version of the original tensor.
The given dimensions `dim0` and `dim1` are swapped.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor.arange(6).reshape(2, 3)
print(t.numpy())
```
```python exec="true" source="above" session="tensor" result="python"
print(t.transpose(0, 1).numpy())
```
"""
order = list(range(self.ndim))
order[dim0], order[dim1] = order[dim1], order[dim0]
return self.permute(order)
def flatten(self, start_dim=0, end_dim=-1) -> Self:
"""
Flattens the tensor by reshaping it into a one-dimensional tensor.
If `start_dim` or `end_dim` are passed, only dimensions starting with `start_dim` and ending with `end_dim` are flattened.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor.arange(8).reshape(2, 2, 2)
print(t.flatten().numpy())
```
```python exec="true" source="above" session="tensor" result="python"
print(t.flatten(start_dim=1).numpy())
```
"""
start_dim, end_dim = self._resolve_dim(start_dim), self._resolve_dim(end_dim)
return self.reshape(self.shape[:start_dim] + (prod(self.shape[start_dim : end_dim + 1]),) + self.shape[end_dim + 1 :])
def unflatten(self, dim: int, sizes: tuple[int, ...]) -> Self:
"""
Unflattens dimension `dim` of the tensor into multiple dimensions specified by `sizes`. `Tensor.flatten()` is the inverse of this function.
```python exec="true" source="above" session="tensor" result="python"
print(Tensor.ones(3, 4, 1).unflatten(1, (2, 2)).shape)
```
```python exec="true" source="above" session="tensor" result="python"
print(Tensor.ones(3, 4, 1).unflatten(1, (-1, 2)).shape)
```
```python exec="true" source="above" session="tensor" result="python"
print(Tensor.ones(5, 12, 3).unflatten(-2, (2, 2, 3, 1, 1)).shape)
```
"""
dim = self._resolve_dim(dim)
return self.reshape(self.shape[:dim] + sizes + self.shape[dim + 1 :])
def rearrange(self, formula: str, **sizes) -> Self:
"""
Rearranges input according to formula
See: https://einops.rocks/api/rearrange/
```python exec="true" source="above" session="tensor" result="python"
x = Tensor([[1, 2], [3, 4]])
print(Tensor.rearrange(x, "batch channel -> (batch channel)").numpy())
```
"""
def parse_side(s: str) -> tuple[list[str], list[tuple[int, int]]]:
"""Parse one side of formula into (axis_names, dims) where dims are (start, end) index pairs for parens."""
tokens = f" {s} ".replace("β¦", "...").replace("(", " ( ").replace(")", " ) ").replace(" ", " ").replace(" 1 ", " ( ) ").split()
lparens, rparens = [i for i, tok in enumerate(tokens) if tok == "("], [i for i, tok in enumerate(tokens) if tok == ")"]
pairs = list(zip(lparens, rparens))
assert len(lparens) == len(rparens) and sorted(flatten(pairs)) == flatten(pairs), "bracket mismatch"
return [tok for tok in tokens if tok not in ("(", ")")], [(lp - 2*i, rp - 1 - 2*i) for i, (lp, rp) in enumerate(pairs)]
assert formula.count("->") == 1, 'need exactly one "->" in formula'
(lhs, unflatten_dims), (rhs, flatten_dims) = map(parse_side, formula.split("->"))
for name in sizes: assert name in lhs, f"axis {name} is not used in transform"
assert sorted(lhs) == sorted(rhs) and len(lhs) == len(set(lhs)), f"name mismatch in {formula}"
for name in lhs+rhs: assert name == "..." or (name.isidentifier() and "_" not in (name[0], name[-1])), f"invalid axis name {name}"
assert "..." not in flatten([lhs[s:e] for s, e in unflatten_dims]), f"cannot have collapsed ellipsis (...) in lhs of {formula}"
assert lhs.count("...") <= 1, f"too many ellipses in {formula}"
# resolve ellipsis
if "..." in lhs:
ell_len = len(self.shape) - len(lhs) + 1 + sum(e - s - 1 for s, e in unflatten_dims)
lhs, rhs = map(lambda l: l[:(i := l.index("..."))] + [f"...{j}" for j in range(ell_len)] + l[i + 1:] if "..." in l else l, (lhs, rhs))
def newdims(side, s, e): return (s + (ell_len - 1 if "...0" in side[:s] else 0), e + (ell_len - 1 if "...0" in side[:e] else 0))
unflatten_dims, flatten_dims = [newdims(lhs, s, e) for s, e in unflatten_dims], [newdims(rhs, s, e) for s, e in flatten_dims]
# unflatten -> permute -> flatten
t = self
for start, end in unflatten_dims: t = t.unflatten(start, tuple(sizes.get(lhs[i], -1) for i in range(start, end)))
for i, name in enumerate(lhs):
if name in sizes: assert sizes[name] == t.shape[i], f"size provided for dimension {name} incorrect"
t = t.permute([lhs.index(name) for name in rhs])
for start, end in reversed(flatten_dims): t = t.flatten(start, end - 1) if start < end else t.unsqueeze(start)
return t
# *** movement ops with expand ***
def repeat_interleave(self, repeats: int, dim: int | None = None) -> Self:
"""
Repeats elements of a tensor.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([1, 2, 3])
print(t.repeat_interleave(2).numpy())
```
"""
x, dim = (self.flatten(), 0) if dim is None else (self, self._resolve_dim(dim))
shp = x.shape
x = x.reshape(*shp[: dim + 1], 1, *shp[dim + 1 :])
x = x.expand(*shp[: dim + 1], repeats, *shp[dim + 1 :])
x = x.reshape(*shp[:dim], shp[dim] * repeats, *shp[dim + 1 :])
return x
def repeat(self, repeats, *args) -> Self:
"""
Repeats tensor number of times along each dimension specified by `repeats`.
`repeats` can be passed as a tuple or as separate arguments.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([1, 2, 3])
print(t.repeat(4, 2).numpy())
```
```python exec="true" source="above" session="tensor" result="python"
print(t.repeat(4, 2, 1).shape)
```
"""
repeats = argfix(repeats, *args)
base_shape = _align_left(self.shape, repeats)[0]
unsqueezed_shape = flatten([[s] if r == 1 else [1, s] for r, s in zip(repeats, base_shape)])
expanded_shape = flatten([[s] if r == 1 else [r, s] for r, s in zip(repeats, base_shape)])
final_shape = [r * s for r, s in zip(repeats, base_shape)]
return self.reshape(unsqueezed_shape).expand(expanded_shape).reshape(final_shape)
# **** pool level ****
def _pool(self, k_: tuple[sint, ...], stride: int | tuple[int, ...] = 1, dilation: int | tuple[int, ...] = 1) -> Self:
assert len(self.shape) >= len(k_), f"can't pool {self.shape} with {k_}"
s_, d_ = make_tuple(stride, len(k_)), make_tuple(dilation, len(k_))
assert len(k_) == len(s_) == len(d_), f"stride/dilation mismatch kernel:{k_} stride:{s_} dilation:{d_}"
noop, i_ = [None] * (self.ndim - len(k_)), self.shape[-len(k_) :]
assert all(resolve(d * (k - 1) + 1 <= i) for k, d, i in zip(k_, d_, i_)), "kernel size cannot be greater than actual input size"
o_ = [ceildiv(i - d * (k - 1), s) for i, d, k, s in zip(i_, d_, k_, s_)]
# input size scaling factor to make sure shrink for stride is possible
f_ = [smax(1, ceildiv(o * s - d, i)) for o, s, i, d in zip(o_, s_, i_, d_)]
# repeats such that we don't need padding
x = self.repeat([1] * len(noop) + [ceildiv(k * (i * f + d), i) for k, i, d, f in zip(k_, i_, d_, f_)])
# handle dilation
x = x.shrink_to(noop + [k * (i * f + d) for k, i, d, f in zip(k_, i_, d_, f_)])
x = x.reshape(noop + flatten((k, (i * f + d)) for k, i, d, f in zip(k_, i_, d_, f_)))
# handle stride
x = x.shrink_to(noop + flatten((k, o * s) for k, o, s in zip(k_, o_, s_))).reshape(noop + flatten((k, o, s) for k, o, s in zip(k_, o_, s_)))
x = x.shrink_to(noop + flatten((k, o, 1) for k, o in zip(k_, o_))).reshape(noop + flatten((k, o) for k, o in zip(k_, o_)))
# permute to move reduce to the end
return x.permute(*range(len(noop)), *[len(noop) + i * 2 + 1 for i in range(len(i_))], *[len(noop) + i * 2 for i in range(len(i_))])
def unfold(self, dim:int, size, step:int) -> Self:
"""
Unfolds the tensor along dimension `dim` into overlapping windows.
Each window has length `size` and begins every `step` elements of `self`.
Returns the input tensor with dimension `dim` replaced by dims `(n_windows, size)`
where `n_windows = (self.shape[dim] - size) // step + 1`.
```python exec="true" source="above" session="tensor" result="python"
unfolded = Tensor.arange(8).unfold(0,2,2)
print("\\n".join([repr(x.numpy()) for x in unfolded]))
```
```python exec="true" source="above" session="tensor" result="python"
unfolded = Tensor.arange(27).reshape(3,3,3).unfold(-1,2,3)
print("\\n".join([repr(x.numpy()) for x in unfolded]))
```
"""
if size < 0: raise RuntimeError(f'size must be >= 0 but got {size=}')
if step <= 0: raise RuntimeError(f'step must be > 0 but got {step=}')
if size > self.shape[dim]: raise RuntimeError(f'maximum size for tensor at dimension {dim} is {self.shape[dim]} but size is {size}')
dim = self._resolve_dim(dim)
perm_to_last = tuple(i for i in range(self.ndim) if i != dim) + (dim,)
return self.permute(perm_to_last)._pool((size,), step).permute(argsort(perm_to_last) + (self.ndim,))
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/mixin/movement.py",
"license": "MIT License",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
tinygrad/tinygrad:extra/gemm/mi350x_uop_matmul.py | import os
import numpy as np
np.set_printoptions(linewidth=1000000)
os.environ["AMD_LLVM"] = "0"
from tinygrad import Tensor, Context, dtypes, UOp, GlobalCounters
from tinygrad.helpers import DEBUG, getenv
from tinygrad.dtype import AddrSpace
from tinygrad.uop.ops import AxisType, KernelInfo, Ops
WARP_SIZE = 64
# Reg tile sizes (tensor cores)
TC_M = 16
TC_N = 16
TC_K = 32
# 1024 matrix cores
# 16 cycle mfma
# 2.2 GHz
# 16x16x32x2 FLOPS/mma = 16384
# 2.2*1e9*16384*1024/16*1e-12 TFLOPS = 2306 TFLOPS
#N,M,K = 256,256,64
N,M,K = 4096,4096,4096
# Threadblock tile sizes (block-level tile of C that a block computes)
#BLOCK_M = 128 # rows of C (M-dim) per block
#BLOCK_N = 128 # columns of C (N-dim) per block
#BLOCK_K = 128 # K-slice per block iteration
BLOCK_M = 64
BLOCK_N = 64
BLOCK_K = 128
WARPGROUP_SIZE = 1
BLOCK_M = BLOCK_M * WARPGROUP_SIZE
# TODO: improve the syntax of this. better syntax, faster iteration
# -- DONE: add working slice a[gx, :, i] -> shape of the : (aka (16,16,32) becomes (16,))
# -- DONE(ish): add argfix to movement (traits shared with Tensor)
# -- fix WMMA to not require all the junk
# -- improve syntax for vectorized loads/stores (both with DEVECTORIZE and without)
# -- DONE: be able to use CONTRACT on a range
# -- fix upcasted RANGE on an already vectorized buffer
# -- improve "all ranges not ended error" / fix the bug with after on ended ranges (if you are after end of range, range is closed)
CUS_PER_GPU = 256
assert ((M//BLOCK_M) * (N//BLOCK_N)) >= CUS_PER_GPU, "not enough globals"
def custom_gemm(C:UOp, A:UOp, B:UOp) -> UOp:
# A = (M x K)
# B = (K x N)
# C = (M x N)
# check it's proper matmul
assert C.shape[0] == A.shape[0]
assert C.shape[1] == B.shape[1]
assert A.shape[1] == B.shape[0]
gx, gy = UOp.special(M//BLOCK_M, "gidx0"), UOp.special(N//BLOCK_N, "gidx1")
warp = UOp.special(WARP_SIZE, "lidx0")
warpgroup = UOp.special(WARPGROUP_SIZE, "lidx1")
# generic copy logic (not good)
def generic_copy(glbl, gargs, lcl, rng):
# Fully coalesced 128-bit loads/stores.
INNER_SIZE = 8
cp_i = UOp.range(lcl.size//(WARPGROUP_SIZE*WARP_SIZE*INNER_SIZE), rng)
cp_inner = UOp.range(INNER_SIZE, rng+1, AxisType.UPCAST)
idx_i = cp_i*WARPGROUP_SIZE*WARP_SIZE*INNER_SIZE + warpgroup*WARP_SIZE*INNER_SIZE + warp*INNER_SIZE + cp_inner
return lcl[idx_i].store(glbl[*gargs, idx_i]).end(cp_i, cp_inner)
# split out the globals into blocks
C = C.reshape((M//BLOCK_M, BLOCK_M, N//BLOCK_N, BLOCK_N))
A = A.reshape((M//BLOCK_M, BLOCK_M, K//BLOCK_K, BLOCK_K))
B = B.reshape((K//BLOCK_K, BLOCK_K, N//BLOCK_N, BLOCK_N))
# this is the big accumulator
acc = UOp.placeholder((BLOCK_N//TC_N, BLOCK_M//TC_M//WARPGROUP_SIZE), dtypes.float.vec(4), 0, AddrSpace.REG)
assert acc.size*WARP_SIZE*WARPGROUP_SIZE*4 == BLOCK_M*BLOCK_N
acc = acc[init_l:=UOp.range(acc.size, 500)].set(UOp.const(dtypes.float.vec(4), 0.0), end=init_l)
# create locals (note A is permuted, and the stride is changed to avoid bank conflicts)
def make_locals(slot) -> tuple[UOp, UOp]:
BM_As_stride = (BLOCK_M + 1)
BN_Bs_stride = (BLOCK_N + 0)
INNER_SLICE = 8
As = UOp.placeholder((BLOCK_K//INNER_SLICE, BM_As_stride, INNER_SLICE), dtypes.half, slot=slot, addrspace=AddrSpace.LOCAL)
INNER_SLICE = 1
Bs = UOp.placeholder((BLOCK_K//INNER_SLICE, BN_Bs_stride, INNER_SLICE), dtypes.half, slot=slot+1, addrspace=AddrSpace.LOCAL)
As = As.permute((0,2,1)).reshape((BLOCK_K, BM_As_stride)).shrink_to((BLOCK_K, BLOCK_M))
Bs = Bs.permute((0,2,1)).reshape((BLOCK_K, BN_Bs_stride)).shrink_to((BLOCK_K, BLOCK_N))
return As, Bs
# load from globals into locals (TODO: use the warpgroup)
def load_to_locals(l_K_outer_loop:UOp, Asl:UOp, Bsl:UOp, rng:int, barrier=True) -> tuple[UOp, UOp]:
if getenv("FAKE"):
return Asl[0].set(0), Bsl[0].set(0)
else:
pA = A.permute((0,2,1,3)).reshape((M//BLOCK_M, K//BLOCK_K, BLOCK_M*BLOCK_K))
pas = Asl.permute((1,0)).reshape((BLOCK_M*BLOCK_K,))
As_store = generic_copy(pA, (gx, l_K_outer_loop), pas, rng)
pB = B.permute((0,2,1,3)).reshape((K//BLOCK_K, N//BLOCK_N, BLOCK_K*BLOCK_N))
pbs = Bsl.reshape((BLOCK_K*BLOCK_N,))
Bs_store = generic_copy(pB, (l_K_outer_loop, gy), pbs, rng+2)
barrier = UOp.barrier(As_store, Bs_store) if barrier else UOp.group(As_store, Bs_store)
return Asl.after(barrier), Bsl.after(barrier)
def compute_on_locals(acc:UOp, Asl:UOp, Bsl:UOp, rng:int, afters:tuple[UOp, ...]=()) -> UOp:
K_inner_loop = UOp.range(BLOCK_K//TC_K, rng, AxisType.REDUCE)
# load from locals into registers
Ar = UOp.placeholder((BLOCK_M//TC_M//WARPGROUP_SIZE,), dtypes.half.vec(8), slot=1, addrspace=AddrSpace.REG)
Br = UOp.placeholder((BLOCK_N//TC_N,), dtypes.half.vec(8), slot=2, addrspace=AddrSpace.REG)
M_load_loop = UOp.range(BLOCK_M//TC_M//WARPGROUP_SIZE, rng+10)
Asl = Asl.reshape((BLOCK_K//TC_K, TC_K, BLOCK_M//TC_M//WARPGROUP_SIZE, WARPGROUP_SIZE, TC_M))
load_rng = UOp.range(8, rng+11, axis_type=AxisType.UPCAST)
A_in = Asl[K_inner_loop, (warp//16)*8+load_rng, M_load_loop, warpgroup, warp%16].contract(load_rng)
Ar = Ar[M_load_loop].set(A_in, end=M_load_loop)
N_load_loop = UOp.range(BLOCK_N//TC_N, rng+20)
Bsl = Bsl.reshape((BLOCK_K//TC_K, TC_K, BLOCK_N//TC_N, TC_N))
load_rng = UOp.range(8, rng+21, axis_type=AxisType.UPCAST)
B_in = Bsl[K_inner_loop, (warp//16)*8+load_rng, N_load_loop, warp%16].contract(load_rng)
Br = Br[N_load_loop].set(B_in, end=N_load_loop)
M_inner_loop = UOp.range(BLOCK_M//TC_M//WARPGROUP_SIZE, rng+30)
N_inner_loop = UOp.range(BLOCK_N//TC_N, rng+31)
# load values
acc_after = acc.after(*afters, M_inner_loop, N_inner_loop, K_inner_loop)
acc_load = acc_after[N_inner_loop, M_inner_loop]
# do WMMA
wmma_arg = ('WMMA_16_16_32_half_float', (16, 16, 32), dtypes.half, dtypes.float, 'AMD', 64, ((), (), ((3, 2), (2, 2))), ())
out = UOp(Ops.WMMA, dtypes.float.vec(4), (Ar[M_inner_loop], Br[N_inner_loop], acc_load), arg=wmma_arg)
# store back the acc
acc_store = acc[N_inner_loop, M_inner_loop].store(out)
return acc_store.end(M_inner_loop, N_inner_loop, K_inner_loop)
# **** START INNER LOOP *****
# inner loop -- locals -> regs
# no pipeline
if not getenv("PIPELINE"):
As, Bs = make_locals(slot=0)
K_outer_loop = UOp.range(K//BLOCK_K, 0, AxisType.REDUCE)
As, Bs = load_to_locals(K_outer_loop, As, Bs, 1000, barrier=True)
acc_store = compute_on_locals(acc, As, Bs, 1500, afters=(K_outer_loop,))
acc = acc.after(acc_store.barrier().end(K_outer_loop))
else:
# this doesn't work
As0, Bs0 = make_locals(slot=0)
As1, Bs1 = make_locals(slot=2)
As0, Bs0 = load_to_locals(0, As0, Bs0, 1000)
K_outer_loop = UOp.range((K//BLOCK_K-2)//2, 0, AxisType.REDUCE)
As1, Bs1 = load_to_locals(K_outer_loop+1, As1, Bs1, 2000, barrier=False)
acc_store = compute_on_locals(acc, As0, Bs0, 1500, afters=(K_outer_loop,))
As0, Bs0 = load_to_locals(K_outer_loop+2, As0, Bs0, 3000, barrier=False)
acc_store = compute_on_locals(acc, As1, Bs1, 2500, afters=(acc_store, As0, Bs0))
acc = acc.after(acc_store.barrier().end(K_outer_loop))
#acc_store = compute_on_locals(acc, As0, Bs0, 3500, afters=(acc_store.barrier().end(K_outer_loop)))
"""
As1, Bs1 = load_to_locals(K//BLOCK_K-1, As1, Bs1, 4000)
acc_store = compute_on_locals(acc, As1, Bs1, 4500, afters=(acc_store))
"""
#acc = acc.after(acc_store)
# **** END LOOPS *****
# store the acc into gmem
cp_i, cp_j = UOp.range(BLOCK_M//TC_M//WARPGROUP_SIZE, 10004), UOp.range(BLOCK_N//TC_N, 10005)
c_load = lambda i: C[gx, cp_i*TC_M*WARPGROUP_SIZE + warpgroup*TC_M + (warp//16)*4+i, gy, cp_j*TC_N + warp%16]
store = UOp.group(*[c_load(i).store(acc[cp_j, cp_i].gep(i)) for i in range(4)])
store = store.end(cp_i, cp_j)
return store.sink(arg=KernelInfo(name="custom_gemm", opts_to_apply=())).simplify()
# simplest WMMA
"""
# init the acc
acc = UOp.placeholder((4,), dtypes.float, 0, AddrSpace.REG)
acc = acc[init_l:=UOp.range(4, 1)].set(0.0, end=init_l)
# do the wmma
acc_load = UOp.vectorize(*[acc.after(K_loop)[i] for i in range(4)])
wmma_arg = ('WMMA_16_16_32_half_float', (16, 16, 32), dtypes.half, dtypes.float, 'AMD', 64, ((), (), ((3, 2), (2, 2))), ())
out = UOp(Ops.WMMA, dtypes.float.vec(4), (A_in, B_in, acc_load), arg=wmma_arg)
# store back the acc
acc = acc.after(UOp.group(*[acc[i].store(out.gep(i)) for i in range(4)]).end(K_loop))
# store the acc into gmem
store = UOp.group(*[C[gx, (warp//16)*4+i, gy, warp%16].store(acc[i]) for i in range(4)])
"""
if __name__ == "__main__":
a = Tensor.randn(M, K, dtype=dtypes.half)
b = Tensor.randn(K, N, dtype=dtypes.half)
#a = Tensor.zeros(M, K, dtype=dtypes.half).contiguous()
#a[0,16] = 1
#b = Tensor.ones(K, N, dtype=dtypes.half).contiguous()
c = Tensor.empty(M, N, dtype=dtypes.float)
with Context(DEBUG=0): Tensor.realize(a,b)
ref = a.dot(b, dtype=dtypes.float)
ref.realize()
GlobalCounters.reset()
with Context(DEBUG=max(2, DEBUG.value), DEVECTORIZE=2):
tst = Tensor.custom_kernel(c, a, b, fxn=custom_gemm)[0]
tst.realize()
print(f"{(N*M*K*2 / GlobalCounters.time_sum_s)*1e-12:.2f} REAL TFLOPS")
with Context(DEBUG=0):
#print(ref.numpy())
#print(tst.numpy())
assert Tensor.isclose(ref, tst, atol=1e-2).all().item(), "matrix not close"
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/gemm/mi350x_uop_matmul.py",
"license": "MIT License",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/gemm/metal_uop_matmul.py | from tinygrad import UOp, dtypes
from tinygrad.uop.ops import AxisType, Ops, KernelInfo, AddrSpace
from extra.gemm.amd_uop_matmul import test_matmul
N = 2048
# metal has an 8x8 tensor core. this is the indexing
def mat_idx(buf, g0, g1, warp, u):
l = [(warp//2**i)%2 for i in range(5)]
return buf[g0, l[4]*4 + l[2]*2 + l[1], g1, l[3]*4 + l[0]*2 + u]
def hand_spec_tc_cores():
gx = UOp.special(N // 8, "gidx0")
gy = UOp.special(N // 8, "gidx1")
warp = UOp.special(32, "lidx0")
c = UOp.placeholder((N, N), dtypes.float, slot=0).reshape((N//8, 8, N//8, 8))
a = UOp.placeholder((N, N), dtypes.float, slot=1).reshape((N//8, 8, N//8, 8))
b = UOp.placeholder((N, N), dtypes.float, slot=2).reshape((N//8, 8, N//8, 8))
gk = UOp.range(N // 8, 0, AxisType.REDUCE)
a_tc = UOp.vectorize(*[mat_idx(a, gx, gk, warp, i) for i in range(2)])
b_tc = UOp.vectorize(*[mat_idx(b, gk, gy, warp, i) for i in range(2)])
acc = UOp.placeholder((2,), dtypes.float, slot=0, addrspace=AddrSpace.REG)
acc = acc[0].set(0.0)
acc = acc[1].set(0.0)
# TODO: make this simple
wmma_arg = ('WMMA_8_8_8_float_float', (8, 8, 8), dtypes.float, dtypes.float, 'METAL', 32, (((3, 2),), ((3, 2),), ((3, 2),)), ())
acc_load = UOp.vectorize(acc.after(gk)[0], acc.after(gk)[1])
out = UOp(Ops.WMMA, dtypes.float.vec(2), (a_tc, b_tc, acc_load), arg=wmma_arg)
end_loop = UOp.group(*[acc[i].store(out.gep(i)) for i in range(2)]).end(gk)
sink = UOp.group(*[mat_idx(c.after(end_loop), gx, gy, warp, i).store(acc[i]) for i in range(2)])
return sink.sink(arg=KernelInfo(name="custom_metal_matmul", opts_to_apply=())).simplify()
if __name__ == "__main__":
test_matmul(hand_spec_tc_cores(), N=N)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/gemm/metal_uop_matmul.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/external/external_benchmark_op_conv.py | # ruff: noqa: E501 E712 F401
from dataclasses import replace
from tinygrad import dtypes, Device
from tinygrad.uop.ops import UOp, AxisType, Ops, KernelInfo
from tinygrad.codegen.opt import Opt, OptOps # pylint: disable=unused-import
from tinygrad.engine.realize import CompiledRunner, get_program
from tinygrad.helpers import dedup, getenv
from tinygrad.device import Buffer
from tinygrad.dtype import ImageDType, Invalid
# PYTHONPATH="." DEV=QCOM FLOAT16=1 IMAGE=2 NOLOCALS=1 taskset -c 4-7 python3 examples/openpilot/compile3.py https://github.com/commaai/openpilot/raw/720392c9a5b986981fdbed1bb8c47a6c5573a50e/selfdrive/modeld/models/driving_vision.onnx
def vision_conv_143():
c0 = UOp(Ops.PARAM, dtypes.imageh((16, 1024, 4)), (), 0)
c2 = UOp.range(32, 3, AxisType.LOOP)
c5 = UOp.range(128, 4, AxisType.LOOP)
c8 = UOp.range(16, 2, AxisType.LOOP)
c16 = UOp.range(7, 0, AxisType.REDUCE)
c17 = c8*2+c16
c24 = ((c17<3)!=True)&(c17<35)
c26 = UOp.range(7, 1, AxisType.REDUCE)
c27 = c2*2+c26
c32 = ((c27<3)!=True)&(c27<67)
c34 = UOp(Ops.PARAM, dtypes.imageh((32, 1024, 4)), (), 1)
c38 = c5//2
c45 = (c32&c24).where((c27*64+c38+c17*4096+-12480), UOp.const(dtypes.index, Invalid))
c48 = (c24&c32).where(c34.index(c45), UOp.const(dtypes.float, 0.0))
c49 = UOp(Ops.PARAM, dtypes.imageh((64, 49, 4)), (), 2)
c61 = c48*c49.index((c26*4+c5%2+c16*28+c38*196))
c63 = UOp(Ops.PARAM, dtypes.float.ptr(128), (), 3)
c65 = c61.reduce(c16, c26, arg=Ops.ADD)+c63.index(c5)
c67 = c0.index((c2*128+c5+c8*4096), ptr=True).store(c65).end(c8, c2, c5)
opts = None
# JITBEAM=2
# (Opt(op=OptOps.UPCAST, axis=2, arg=4), Opt(op=OptOps.NOLOCALS, axis=None, arg=None), Opt(op=OptOps.UPCAST, axis=2, arg=2), Opt(op=OptOps.UPCAST, axis=1, arg=4), Opt(op=OptOps.SWAP, axis=1, arg=2))
return c67.sink(arg=KernelInfo(name="conv", opts_to_apply=opts))
def vision_conv_153():
c0 = UOp(Ops.PARAM, dtypes.imageh((8, 1024, 4)), (), 0)
c2 = UOp.range(16, 3, AxisType.LOOP)
c5 = UOp.range(256, 4, AxisType.LOOP)
c8 = UOp.range(8, 2, AxisType.LOOP)
c16 = UOp.range(7, 0, AxisType.REDUCE)
c17 = c8*2+c16
c24 = ((c17<3)!=True)&(c17<19)
c26 = UOp.range(7, 1, AxisType.REDUCE)
c27 = c2*2+c26
c32 = ((c27<3)!=True)&(c27<35)
c34 = UOp(Ops.PARAM, dtypes.imageh((16, 1024, 4)), (), 1)
c38 = c5//2
c45 = (c32&c24).where((c27*128+c38+c17*4096+-12672), UOp.const(dtypes.index, Invalid))
c48 = (c24&c32).where(c34.index(c45), UOp.const(dtypes.float, 0.0))
c49 = UOp(Ops.PARAM, dtypes.imageh((128, 49, 4)), (), 2)
c61 = c48*c49.index((c26*4+c5%2+c16*28+c38*196))
c63 = UOp(Ops.PARAM, dtypes.float.ptr(256), (), 3)
c65 = c61.reduce(c16, c26, arg=Ops.ADD)+c63.index(c5)
c67 = c0.index((c2*256+c5+c8*4096), ptr=True).store(c65).end(c8, c2, c5)
opts = None
# JITBEAM=2
# (Opt(op=OptOps.UPCAST, axis=2, arg=4), Opt(op=OptOps.NOLOCALS, axis=None, arg=None), Opt(op=OptOps.UPCAST, axis=2, arg=2), Opt(op=OptOps.SWAP, axis=1, arg=2))
return c67.sink(arg=KernelInfo(name="conv", opts_to_apply=opts))
def dm_conv_172():
c0 = UOp(Ops.PARAM, dtypes.imageh((1, 240, 4)), (), 0)
c2 = UOp.range(960, 4, AxisType.LOOP)
c5 = UOp(Ops.PARAM, dtypes.imageh((8, 384, 4)), (), 1)
c7 = UOp.range(32, 0, AxisType.REDUCE)
c10 = UOp.range(4, 1, AxisType.REDUCE)
c13 = UOp.range(12, 3, AxisType.REDUCE)
c18 = UOp.range(8, 2, AxisType.REDUCE)
c23 = UOp(Ops.PARAM, dtypes.imageh((240, 128, 4)), (), 2)
c35 = c5.index((c7*4+c10+c13*128+c18*1536))*c23.index((c10*4+c2%4+c7*16+c2//4*512))
c37 = UOp(Ops.PARAM, dtypes.float.ptr(960), (), 3)
c39 = c35.reduce(c7, c10, arg=Ops.ADD)+c37.index(c2)
c50 = (1.0+((c39+0.044708251953125*(c39*(c39*c39)))*-2.3021129851685216).exp2()).reciprocal()*c39
c53 = c50.reduce(c18, c13, arg=Ops.ADD)*0.010416666666666666
c55 = c0.index(c2, ptr=True).store(c53).end(c2)
opts = None
# JITBEAM=2
# (Opt(op=OptOps.UPCAST, axis=0, arg=4), Opt(op=OptOps.GROUPTOP, axis=1, arg=32), Opt(op=OptOps.UNROLL, axis=1, arg=4), Opt(op=OptOps.LOCAL, axis=0, arg=8), Opt(op=OptOps.UNROLL, axis=0, arg=4), Opt(op=OptOps.GROUP, axis=1, arg=0))
return c55.sink(arg=KernelInfo(name="conv", opts_to_apply=opts))
ast = {143: vision_conv_143, 153: vision_conv_153, 172: dm_conv_172}[getenv("NUM", 143)]()
renderer = Device.default.renderer
allocator = Device.default.allocator
ps = get_program(ast, renderer)
cr = CompiledRunner(replace(ps, device=Device.DEFAULT))
gs = sorted(dedup([u for u in ast.toposort() if u.op is Ops.PARAM]), key=lambda u: u.arg)
# print(len(gs))
# print([g.dtype for g in gs])
bufs = [Buffer(ps.device, g.size, g.dtype if isinstance(g.dtype, ImageDType) else g.dtype._base).ensure_allocated() for g in gs]
t = cr(bufs, wait=True)
print(f"{t*1e6:.2f} us") | {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_benchmark_op_conv.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/thunder/cuda/fa.py | import pathlib
from tinygrad import Device, Tensor
from tinygrad.helpers import Context
from tinygrad.runtime.support.compiler_cuda import pretty_ptx, NVCCCompiler
if __name__ == "__main__":
code = (pathlib.Path(__file__).parent / "fa.cu").read_text()
device = Device["CUDA"]
kitten_args = [f"-I{(pathlib.Path(__file__).parent / 'include').as_posix()}", "-std=c++20", "--expt-relaxed-constexpr", "-DKITTENS_4090"]
lib = NVCCCompiler(device.compiler.arch, kitten_args).compile(code)
kernel_name = lib.decode().split(".globl\t")[1].split("\n")[0]
print("kernel name", kernel_name)
print(pretty_ptx(lib.decode()))
prg = device.runtime(kernel_name, lib)
prg.smem = 16384 * 3
B, N, H, D = 16, 1024, 16, 64
q = Tensor.randn(B, N, H, D, device='CUDA', dtype="bfloat16")
k = Tensor.randn(B, N, H, D, device='CUDA', dtype="bfloat16")
v = Tensor.randn(B, N, H, D, device='CUDA', dtype="bfloat16")
out = Tensor.empty(B, N, H, D, device='CUDA', dtype="bfloat16")
Tensor.realize(q, k, v, out)
NUM_WORKERS = 4
ROWS = 16 * (64 // D)
gsz = (N // (ROWS*NUM_WORKERS), H, B)
for _ in range(5):
et = prg(out.uop.buffer.ensure_allocated()._buf, q.uop.buffer._buf, k.uop.buffer._buf, v.uop.buffer._buf,
global_size=gsz, local_size=(ROWS*NUM_WORKERS,1,1), wait=True)
attn_flops = 2 * B * H * N * N * D + \
4 * B * H * N * N + \
2 * B * H * N * N * D
print(f"{attn_flops/(et*1e9):2f} GFLOPS")
for _ in range(5):
with Context(DEBUG=2):
ref = q.scaled_dot_product_attention(k, v)
ref, out = ref.float(), out.float()
print((ref-out).mean().item(), (ref-out).max().item())
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/thunder/cuda/fa.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/external/external_nhwc_conv.py | from tinygrad import Tensor, nn, Context, GlobalCounters
if __name__ == "__main__":
conv = nn.Conv2d(64, 128, 3)
img = Tensor.randn((1,64,128,128))
with Context(DEBUG=0, BEAM=0):
Tensor.realize(img, conv.weight, conv.bias)
tst = conv(img).permute(0,2,3,1).realize()
print(tst.shape)
print("NEW")
img_perm = img.permute(0,2,3,1).contiguous()
print(img_perm.shape)
pp = img_perm.permute(0,3,1,2)._pool((3,3)).permute(0,2,3,4,5,1)
def hwio(pp, conv):
pp = pp.unsqueeze(-1)
weight = conv.weight.permute(2,3,1,0).contiguous()
print(pp.shape, weight.shape, (pp*weight).shape)
return (pp * weight).sum([-4,-3, -2])
def ohwi(pp, conv):
pp = pp.unsqueeze(-4)
weight = conv.weight.permute(0,2,3,1).contiguous()
print(pp.shape, weight.shape, (pp*weight).shape)
return (pp * weight).sum([-3,-2,-1])
for f in [hwio, ohwi]:
GlobalCounters.reset()
print("\n**************", f.__name__, "**************")
out = f(pp, conv)
out.realize()
print(out.shape)
with Context(DEBUG=0, BEAM=0):
err = (tst-out).square()
print(err.mean().item(), err.max().item())
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_nhwc_conv.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/thunder/cuda/matmul.py | import pathlib
from tinygrad import Device, Tensor
from tinygrad.helpers import Context, getenv
from tinygrad.runtime.support.compiler_cuda import pretty_ptx, NVCCCompiler
if __name__ == "__main__":
if getenv("MATMUL2"):
code = (pathlib.Path(__file__).parent / "matmul2.cu").read_text()
else:
code = (pathlib.Path(__file__).parent / "matmul.cu").read_text()
device = Device["CUDA"]
kitten_args = [f"-I{(pathlib.Path(__file__).parent / 'include').as_posix()}", "-std=c++20", "--expt-relaxed-constexpr"]
lib = NVCCCompiler(device.compiler.arch, kitten_args).compile(code)
kernel_name = lib.decode().split(".globl\t")[1].split("\n")[0]
print("kernel name", kernel_name)
print(pretty_ptx(lib.decode()))
prg = device.runtime(kernel_name, lib)
if getenv("MATMUL2"):
prg.smem = 16384 * 2
else:
prg.smem = 10000
N = 8192
a = Tensor.randn(N, N, device='CUDA', dtype="bfloat16")
b = Tensor.randn(N, N, device='CUDA', dtype="bfloat16")
c = Tensor.empty(N, N, device='CUDA', dtype="bfloat16")
Tensor.realize(a, b, c)
WARP_THREADS = 32
if getenv("MATMUL2"):
SUPER_N = 2
SUPER_M = 2
NUM_WORKERS = SUPER_N * SUPER_M
BLOCK_SIZE = 32
gsz = (N // (BLOCK_SIZE * SUPER_N), N // (BLOCK_SIZE * SUPER_M), 1)
else:
NUM_WORKERS = 1
BLOCK_SIZE = 32
gsz = (N // (BLOCK_SIZE), N // (BLOCK_SIZE), 1)
for _ in range(5):
et = prg(c.uop.buffer.ensure_allocated()._buf, a.uop.buffer._buf, b.uop.buffer._buf,
global_size=gsz, local_size=(NUM_WORKERS*WARP_THREADS,1,1), wait=True)
print(f"{N*N*N*2/(et*1e9):2f} GFLOPS")
# print(c.tolist())
for _ in range(5):
with Context(DEBUG=2):
ref = (a@b).realize()
ref, c = ref.float(), c.float()
print((ref-c).mean().item(), (ref-c).max().item())
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/thunder/cuda/matmul.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/tinyfs/fetch_file.py | from tinygrad.tensor import Tensor
import argparse, math, hashlib
def _python_hash_1mb(data:bytes|bytearray):
chunks = [data[i:i+4096] for i in range(0, len(data), 4096)]
chunk_hashes = [hashlib.shake_128(chunk).digest(16) for chunk in chunks]
return hashlib.shake_128(b''.join(chunk_hashes)).digest(16)
def hash_file(data: bytes|bytearray):
if len(data) % Tensor.CHUNK_SIZE != 0: data += bytes(Tensor.CHUNK_SIZE - len(data) % Tensor.CHUNK_SIZE)
base_chunks = math.ceil(len(data) / Tensor.CHUNK_SIZE)
tree_depth = math.ceil(math.log(base_chunks, Tensor.CHUNK_SIZE // 16))
for _ in range(tree_depth + 1):
data_chunks = [data[i:i+Tensor.CHUNK_SIZE] for i in range(0, len(data), Tensor.CHUNK_SIZE)]
data_chunk_hashes = [_python_hash_1mb(chunk) for chunk in data_chunks]
data = b''.join(data_chunk_hashes)
if len(data) % Tensor.CHUNK_SIZE != 0: data += bytes(Tensor.CHUNK_SIZE - len(data) % Tensor.CHUNK_SIZE)
return data[:16]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--hash", type=str, required=True, help="file hash to fetch")
parser.add_argument("--len", type=int, required=True, help="file length to fetch")
parser.add_argument("--dest", type=str, required=True, help="destination path to save the file")
parser.add_argument("--check", action="store_true", help="verify the file hash after fetching")
args = parser.parse_args()
Tensor(bytes.fromhex(args.hash), device="CPU").fs_load(args.len).to(f"disk:{args.dest}").realize()
if args.check:
with open(args.dest, "rb") as f:
data = f.read()
assert hash_file(data) == bytes.fromhex(args.hash), "Hash mismatch after fetching file"
print("File hash verified successfully!")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/tinyfs/fetch_file.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:tinygrad/renderer/nir.py | from typing import Callable, cast, Any
from tinygrad.dtype import AddrSpace, DType, PtrDType, ImageDType, dtypes, truncate
from tinygrad.helpers import DEBUG, OSX, unwrap, fromimport
from tinygrad.renderer import Renderer
from tinygrad.renderer.cstyle import CUDARenderer
from tinygrad.uop.ops import GroupOp, Ops, UOp, PatternMatcher, UPat, range_str
from tinygrad.runtime.autogen import mesa
from tinygrad.runtime.support.c import POINTER
import base64, ctypes, ctypes.util, struct, functools, inspect, contextlib, itertools
def g(s:str): return getattr(mesa, s)
def nsrc(d:mesa.nir_def) -> mesa.nir_src: return mesa.nir_src(ssa=ctypes.pointer(d))
def glsl_type(t:DType): return mesa.glsl_array_type(glsl_type(t.base), t.size, 0).contents if isinstance(t, PtrDType) else {
**{getattr(dtypes,k):g(f"glsl_type_builtin_{v}") for k,v in [('double','double'),('float','float'),('float16','float16_t'),('bool','uint8_t')]},
**{d:g(f"glsl_type_builtin_{'u' * (d in dtypes.uints)}int{str(d.bitsize)+'_t' if d.itemsize != 4 else ''}") for d in dtypes.ints}}[t]
# alu ops, aop[<dtype>][<op>]
u_aop = { Ops.ADD: "iadd", Ops.MUL: "imul", Ops.IDIV: "udiv", Ops.MOD: "umod", Ops.CMPLT: "ult", Ops.CMPNE: "ine", Ops.CMPEQ: "ieq", Ops.OR: "ior",
Ops.AND: "iand", Ops.XOR: "ixor", Ops.WHERE: "bcsel", Ops.MAX: "umax"}
s_aop = {**u_aop, Ops.CMPLT: "ilt", Ops.IDIV: "idiv", Ops.MOD: "irem", Ops.MAX: "imax"}
f_aop = { Ops.ADD: "fadd", Ops.MUL: "fmul", Ops.CMPLT: "flt", Ops.CMPNE: "fneu", Ops.CMPEQ: "feq", Ops.FDIV: "fdiv", Ops.RECIPROCAL: "frcp",
Ops.MAX: "fmax", Ops.TRUNC: "ftrunc", Ops.SIN: "fsin", Ops.EXP2: "fexp2", Ops.LOG2: "flog2"}
aop = {**{x:u_aop for x in (dtypes.bool,)+dtypes.uints}, **{x:s_aop for x in dtypes.sints}, **{x:f_aop for x in dtypes.floats}}
def c(t:DType, u:bool=True) -> str: return "u" if t in dtypes.uints and u else ("i" if t in dtypes.ints else ("f" if t in dtypes.floats else "b"))
def ncast(b:mesa.nir_builder, src:mesa.nir_def, it:DType, ot:DType) -> mesa.nir_def:
if isinstance(it, PtrDType) and ot == dtypes.long: return src
return nalu(b, f"{c(it)}2{c(it) if it in dtypes.ints and ot in dtypes.ints else c(ot, ot == dtypes.bool)}{ot.bitsize}", src)
def nif(b:mesa.nir_builder, cond:mesa.nir_def, then_fn:Callable, else_fn:Callable):
nif = mesa.nir_push_if(b, cond)
t = then_fn()
mesa.nir_push_else(b, nif)
e = else_fn()
mesa.nir_pop_if(b, nif)
return t, e
def nalu(b:mesa.nir_builder, op:str, *srcs:mesa.nir_def) -> mesa.nir_def: return g(f"nir_build_alu{len(srcs)}")(b, g(f"nir_op_{op}"), *srcs).contents
def nir_instr(nc=1, bs=lambda: None, intrins=None, srcs=None, has_def=True, df=None, also=lambda: None, **contents):
def dec(f:Callable):
@functools.wraps(f)
def wrapper(*args, **kwargs) -> mesa.nir_def:
(ba:=inspect.signature(f).bind(*args, **kwargs)).apply_defaults()
def go(g): return g(**{nm: ba.arguments[nm] for nm in inspect.signature(g).parameters}) if callable(g) else g
instr = f(*args, **kwargs)
if has_def: mesa.nir_def_init(instr.contents.instr, instr.contents._def, go(nc), go(bs))
for k, v in go(intrins or {}).items():
idx = mesa.nir_intrinsic_infos[instr.contents.intrinsic].index_map[g(f"NIR_INTRINSIC_{k}")]
assert idx > 0, "invalid intrinsic. mesa version mismatch?"
instr.contents.const_index[idx - 1] = go(v)
for i, src in enumerate(go(srcs or [])): ctypes.cast(instr.contents.src, ctypes.POINTER(mesa.nir_src))[i] = go(src)
for k,v in {k:vcomp for k,v in contents.items() if (vcomp:=go(v)) is not None}.items(): setattr(instr.contents, k, go(v))
mesa.nir_builder_instr_insert(ba.arguments['b'], instr.contents.instr)
go(also)
return instr.contents._def if has_def else (mesa.nir_def() if df is None else go(df))
return wrapper
return dec
@nir_instr(nc=1, bs=lambda src: src.bit_size, exact=lambda b:b.exact, fp_fast_math=lambda b:b.fp_fast_math)
def nchannel(b:mesa.nir_builder, src:mesa.nir_def, c:int):
alu_src = mesa.nir_alu_src(src=nsrc(src))
alu_src.swizzle[0] = c
mov = mesa.nir_alu_instr_create(b.shader, mesa.nir_op_mov)
ctypes.cast(mov.contents.src, ctypes.POINTER(mesa.nir_alu_src))[0] = alu_src
return mov
def nimm_set(imm:mesa.nir_def, x, dtype:DType):
instr = ctypes.cast(imm.parent_instr, ctypes.POINTER(mesa.nir_load_const_instr))
struct.pack_into(unwrap(dtype.fmt), (ctypes.c_ubyte * dtype.itemsize).from_address(ctypes.addressof(instr.contents.value)), 0, truncate[dtype](x))
@nir_instr(nc=1, bs=lambda dtype: dtype.bitsize)
def nimm(b:mesa.nir_builder, x, dtype:DType) -> mesa.nir_def:
nimm_set((instr:=mesa.nir_load_const_instr_create(b.shader, 1, dtype.bitsize)).contents._def, x, dtype)
return instr
@nir_instr(nc=1, bs=lambda dtype: dtype.bitsize)
def nundef(b, dtype): return mesa.nir_undef_instr_create(b.shader, 1, dtype.bitsize)
deref_var = nir_instr(nc=1, bs=32, modes=lambda var:var.data.mode, type=lambda var:var.type, var=lambda var:ctypes.pointer(var))( # pylint: disable=W0108
lambda b, var: mesa.nir_deref_instr_create(b.shader, mesa.nir_deref_type_var))
def iointr(space): return {"ALIGN_MUL":lambda dtype:dtype.itemsize} if space != AddrSpace.REG else {}
def scope(space): return 'global' if space == AddrSpace.GLOBAL else ('shared' if space == AddrSpace.LOCAL else 'deref')
nstore = nir_instr(has_def=False, df=lambda addr:addr, intrins=lambda space,val: {"WRITE_MASK":(1<<val.num_components)-1, **iointr(space)},
num_components=lambda val:val.num_components, srcs=lambda space, addr, val: [nsrc(val), nsrc(addr)][::1 if space != AddrSpace.REG else -1])(
lambda b, space, addr, val, dtype: mesa.nir_intrinsic_instr_create(b.shader, g(f"nir_intrinsic_store_{scope(space)}")))
nload = nir_instr(nc=lambda dtype:dtype.count, bs=lambda dtype:dtype.bitsize//dtype.count, num_components=lambda dtype:dtype.count,
intrins=lambda space:{**({"ACCESS":mesa.ACCESS_CAN_REORDER} if space==AddrSpace.GLOBAL else {}), **iointr(space)}, srcs=lambda addr: [nsrc(addr)])(
lambda b, space, addr, dtype: mesa.nir_intrinsic_instr_create(b.shader, g(f"nir_intrinsic_load_{scope(space)}")))
ngid = nir_instr(nc=3, bs=32)(lambda b: mesa.nir_intrinsic_instr_create(b.shader, mesa.nir_intrinsic_load_workgroup_id))
nlid = nir_instr(nc=3, bs=32)(lambda b: mesa.nir_intrinsic_instr_create(b.shader, mesa.nir_intrinsic_load_local_invocation_id))
ngsz = nir_instr(nc=3, bs=32)(lambda b: mesa.nir_intrinsic_instr_create(b.shader, mesa.nir_intrinsic_load_workgroup_size))
def nid(b): return nalu(b, "iadd", nalu(b, "imul", ngid(b), ngsz(b)), nlid(b))
nbarrier = nir_instr(has_def=False, intrins={"EXECUTION_SCOPE":mesa.SCOPE_WORKGROUP})(
lambda b: mesa.nir_intrinsic_instr_create(b.shader, mesa.nir_intrinsic_barrier))
@nir_instr(has_def=False, target=lambda tgt:tgt and ctypes.pointer(tgt), condition=lambda cond:cond and nsrc(cond),
else_target=lambda else_tgt: else_tgt and ctypes.pointer(else_tgt))
def njump(b:mesa.nir_builder, typ, tgt=None, cond=None, else_tgt=None): return mesa.nir_jump_instr_create(b.shader, typ)
def if_phi(b:mesa.nir_builder, cond, then_fn, else_fn): return mesa.nir_if_phi(b, *nif(b, cond, then_fn, else_fn)).contents
def nidx(b:mesa.nir_builder, buf, off, dtype, gate=None) -> mesa.nir_def:
@nir_instr(nc=1, bs=32, modes=lambda buf: buf.data.mode, type=lambda buf: mesa.glsl_get_array_element(buf.type))
def reg(b, buf):
deref = mesa.nir_deref_instr_create(b.shader, mesa.nir_deref_type_array)
deref.contents.parent, deref.contents.arr.index = nsrc(deref_var(b, buf)), nsrc(off)
return deref
f = (functools.partial(reg, b, buf) if dtype.addrspace == AddrSpace.REG else
lambda: nalu(b, "iadd", buf, nalu(b, "imul", off, nimm(b, dtype.itemsize, dtypes.long))))
return if_phi(b, gate, f, lambda: buf) if gate is not None else f()
class NIRRenderer(Renderer):
suffix = "NIR"
nir_options: bytes
global_max, local_max, shared_max = CUDARenderer.global_max, CUDARenderer.local_max, CUDARenderer.shared_max
code_for_op = {**{k:lambda:None for k in u_aop.keys()}, **{k:lambda:None for k in s_aop.keys()}, **{k:lambda:None for k in f_aop.keys()}}
extra_matcher = PatternMatcher([
# handle negative unsigned CONST
(UPat.cvar("x", dtypes.uints), lambda x: UOp(Ops.CONST, dtype=x.dtype, arg=x.dtype.max+x.arg+1) if x.arg < 0 else None),
# from ptx
(UPat.var('x', dtype=dtypes.bool)<UPat.var('y'), lambda x,y: (x^True)&y),
# load/store bool -> uint8
(UPat(Ops.LOAD, dtypes.bool, name="x"),
lambda x: x.replace(dtype=dtypes.uint8, src=x.src[0:1]+((x.src[1].cast(dtypes.uint8),) if len(x.src)>=2 else ())+x.src[2:]).cast(dtypes.bool)),
(UPat(Ops.STORE, src=(UPat(), UPat(dtype=dtypes.bool)), name="x", allow_any_len=True),
lambda x: x.replace(src=x.src[0:1] + (x.src[1].cast(dtypes.uint8),) + x.src[2:])),
# OpConvertFToU is undefined if Result Type is not wide enough, cast through int32
# ref: https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpConvertFToU
(UPat(Ops.CAST, (dtypes.uchar, dtypes.ushort), src=(UPat.var("x", dtypes.floats),), name="c"), lambda x,c: x.cast(dtypes.int32).cast(c.dtype)),
# load/store use pointer arithmetic, and the cast does nothing
(UPat(Ops.INDEX, src=(UPat.var("buf"), UPat.var("off")), allow_any_len=True, name="x"), lambda x,buf,off: x.replace(
src=(buf,off.cast(dtypes.long))+x.src[2:]) if buf.dtype.addrspace != AddrSpace.REG and off.op not in (Ops.CAST, Ops.VECTORIZE) else None),
(UPat(Ops.CAST, name="x"), lambda x: x.src[0] if isinstance(x.dtype, PtrDType) or x.src[0].dtype == dtypes.void else None),
])
def_rewrite = PatternMatcher([
(UPat(Ops.CONST, name="x"), lambda ctx,x: nimm(ctx.b, x.arg, x.dtype)),
(UPat(Ops.PARAM, name="x"), lambda ctx,x: ctx.param(ctx.b, x, 8)),
(UPat(Ops.DEFINE_VAR, name="x"), lambda ctx,x: ctx.param(ctx.b, x, 4)),
(UPat(Ops.SPECIAL, name="x"), lambda ctx,x: nchannel(ctx.b, {'g':ngid, 'l':nlid, 'i': nid}[x.arg[0]](ctx.b), int(x.arg[-1]))),
(UPat(Ops.STORE, src=(UPat(Ops.INDEX, src=(UPat.var("buf"),UPat.var("off")), allow_any_len=True), UPat.var("val")), allow_any_len=True, name="x"),
lambda ctx,x,buf,off,val: nstore(ctx.b, buf.ptrdtype.addrspace, nidx(ctx.b, ctx.r[buf], ctx.r[off], buf.dtype), ctx.r[val], val.dtype)),
(UPat(Ops.LOAD, src=(UPat(Ops.INDEX, src=(UPat.var("buf"), UPat.var("off"), UPat.var("gate"))), UPat.var("alt")), allow_any_len=True, name="x"),
lambda ctx,x,buf,off,alt,gate: if_phi(ctx.b, ctx.r[gate],
lambda: nload(ctx.b, buf.ptrdtype.addrspace, nidx(ctx.b, ctx.r[buf], ctx.r[off], buf.dtype, ctx.r[gate]), x.dtype), lambda: ctx.r[alt])),
(UPat(Ops.LOAD, src=(UPat(Ops.INDEX, src=(UPat.var("buf"), UPat.var("off"))),), allow_any_len=True, name="x"),
lambda ctx,x,buf,off: nload(ctx.b, buf.ptrdtype.addrspace, nidx(ctx.b, ctx.r[buf], ctx.r[off], buf.dtype), x.dtype)),
(UPat(Ops.VECTORIZE, name="x"), lambda ctx,x: nalu(ctx.b, f"vec{x.dtype.count}", *[ctx.r[src] for src in x.src])),
(UPat(GroupOp.ALU, name="x"), lambda ctx,x: nalu(ctx.b, aop[x.src[0].dtype.scalar()][x.op], *[ctx.r[src] for src in x.src])),
(UPat(Ops.CAST, name="x"), lambda ctx,x: ncast(ctx.b, ctx.r[x.src[0]], x.src[0].dtype, x.dtype)),
(UPat(Ops.BITCAST, src=(UPat.var("a"),), allow_any_len=True), lambda ctx,a: ctx.r[a]),
(UPat(Ops.GEP, src=(UPat.var("a"),), name="x"), lambda ctx,x,a: nchannel(ctx.b, ctx.r[a], x.arg[0])),
(UPat(Ops.DEFINE_REG, name="x"), lambda ctx,x:mesa.nir_local_variable_create(ctx.b.impl, glsl_type(x.dtype), f"acc{x.arg}".encode()).contents),
(UPat(Ops.BARRIER), lambda ctx: nbarrier(ctx.b)),
(UPat(Ops.IF, name="x"), lambda ctx,x: mesa.nir_push_if(ctx.b, ctx.r[x.src[0]])),
(UPat(Ops.ENDIF, name="x"), lambda ctx,x: (lambda _: mesa.nir_def())(mesa.nir_pop_if(ctx.b, ctx.r[x.src[0]])))
])
def __reduce__(self): return self.__class__, self.args
def __init__(self, *args):
self.compiler = fromimport("tinygrad.runtime.support.compiler_mesa", self.__class__.__name__.replace("Renderer", "Compiler"))(*args)
self.args = args
if hasattr(self.compiler, "nir_options"): self.nir_options = self.compiler.nir_options
mesa.glsl_type_singleton_init_or_ref()
def __del__(self):
with contextlib.suppress(AttributeError): mesa.glsl_type_singleton_decref()
def param(self, b:mesa.nir_builder, x, sz:int) -> mesa.nir_def: raise NotImplementedError("needs param")
def prerender(self, uops:list[UOp]):
self.b = mesa.nir_builder_init_simple_shader(mesa.MESA_SHADER_COMPUTE, mesa.nir_shader_compiler_options.from_buffer_copy(self.nir_options), None)
self.b.shader.contents.info.workgroup_size_variable = any([u.op == Ops.SPECIAL and u.arg[0] == 'i' for u in uops])
def postrender(self, uops:list[UOp]): pass
def render(self, uops:list[UOp]):
self.prerender(uops)
for u in [u for u in uops if u.op is Ops.SPECIAL and u.arg[0] == "l"]: self.b.shader.contents.info.workgroup_size[int(u.arg[-1])] = u.src[0].arg
self.r: dict[UOp, Any] = {}
self.param_idx, ranges = 0, []
for u in uops:
if u.op in {Ops.NOOP, Ops.GROUP, Ops.INDEX}: pass
elif u.op is Ops.AFTER:
self.r[u] = self.r[u.src[0]]
elif u.op == Ops.SINK:
if u.arg is not None:
self.b.shader.contents.info.name = ctypes.cast(ctypes.create_string_buffer(u.arg.function_name.encode()), POINTER[ctypes.c_char])
elif u.op == Ops.DEFINE_LOCAL:
self.r[u] = nimm(self.b, self.b.shader.contents.info.shared_size, dtypes.long)
self.b.shader.contents.info.shared_size += u.dtype.nbytes()
elif u.op == Ops.RANGE:
ranges.append(i:=deref_var(self.b, mesa.nir_local_variable_create(self.b.impl, glsl_type(u.dtype), f"idx{range_str(u)}".encode()).contents))
nstore(self.b, AddrSpace.REG, i, nimm(self.b, 0, u.dtype), u.dtype)
mesa.nir_push_loop(self.b)
self.r[u] = nload(self.b, AddrSpace.REG, i, u.dtype)
nif(self.b, nalu(self.b, "ilt", self.r[u], self.r[u.src[0]]), lambda: None, lambda: njump(self.b, mesa.nir_jump_break))
elif u.op == Ops.END:
r = u.src[1]
next_i = nalu(self.b, "iadd", self.r[r], nimm(self.b, 1, r.dtype))
# TODO: this nif should be removable ... but TestMultiTensor.test_double_matmul_shard_W_0 segfaults with it gone
nif(self.b, nalu(self.b, "ilt", next_i, self.r[r.src[0]]), lambda: None, lambda: njump(self.b, mesa.nir_jump_break))
nstore(self.b, AddrSpace.REG, ranges.pop(), next_i, r.dtype),
mesa.nir_pop_loop(self.b, None)
else:
if (d:=self.def_rewrite.rewrite(u, ctx=self)) is None: raise RuntimeError(f"failed to render {u.op} srcs {[x.dtype for x in u.src]}")
self.r[u] = cast(mesa.nir_def, d)
self.postrender(uops)
mesa.nir_validate_shader(self.b.shader, b"after render")
if DEBUG >= 4: mesa.nir_print_shader(self.b.shader, ctypes.POINTER(mesa.struct__IO_FILE).in_dll(ctypes.CDLL(ctypes.util.find_library('c')),
"__stdoutp" if OSX else "stdout"))
mesa.nir_serialize(blob:=mesa.struct_blob(), self.b.shader, False)
ret = base64.b64encode(ctypes.string_at(blob.data, blob.size)).decode()
mesa.ralloc_free(self.b.shader)
ctypes.CDLL(None).free(blob.data)
del self.b, self.r
return ret
class NAKRenderer(NIRRenderer):
device = "NV"
param = nir_instr(nc=1, num_components=1, bs=lambda sz:sz*8, also=lambda self,sz: setattr(self, "param_idx", self.param_idx + sz),
intrins={"ALIGN_MUL":lambda sz:sz}, srcs=lambda self,b: [nsrc(nimm(b, 0, dtypes.int)), nsrc(nimm(b, self.param_idx, dtypes.int))])(
lambda self, b, x, sz: mesa.nir_intrinsic_instr_create(b.shader, mesa.nir_intrinsic_ldc_nv))
class LVPRenderer(NIRRenderer):
device = "CPU"
has_local = False
has_shared = False
global_max = (1, 0, 0)
nir_options = mesa.lvp_nir_options
# gallivm's exp2/log2 have "undefined behavior with infs, 0s and nans", so exp2(log2(0)*y) returns 0 instead of inf
# https://gitlab.freedesktop.org/mesa/mesa/-/blob/c200b18e876468b51fe80d9660f612dc03a5138e/src/gallium/auxiliary/gallivm/lp_bld_arit.c#L2972
code_for_op = {k:v for k,v in NIRRenderer.code_for_op.items() if k != Ops.EXP2}
param = nir_instr(nc=1, bs=lambda sz: sz * 8, num_components=1, intrins={"ALIGN_MUL":lambda sz: sz, "RANGE":lambda self: self.param_sz},
srcs=lambda b, self: [nsrc(nimm(b, 0, dtypes.int)), nsrc(nimm(b, self.param_idx, dtypes.int))], also=lambda self, sz:
setattr(self, "param_idx", self.param_idx+sz))(lambda self,b,x,sz: mesa.nir_intrinsic_instr_create(b.shader, mesa.nir_intrinsic_load_ubo))
def prerender(self, uops:list[UOp]):
super().prerender(uops)
self.param_sz = sum([8 if u.op == Ops.PARAM else u.dtype.itemsize for u in uops if u.op in (Ops.PARAM, Ops.DEFINE_VAR)])
# FIXME: this should be a rewrite rule
def tovec(b, coord): return nalu(b, "vec4", nchannel(b, coord, 0), nchannel(b, coord, 1), nundef(b, dtypes.int), nundef(b, dtypes.int))
def nfloat(dtype): return mesa.nir_type_float16 if dtype == dtypes.half else mesa.nir_type_float32
nstore_img = nir_instr(has_def=False, df=lambda img:img, num_components=lambda val:val.num_components,
intrins=lambda dtype:{'IMAGE_DIM':mesa.GLSL_SAMPLER_DIM_2D, 'ACCESS':mesa.ACCESS_CAN_REORDER, 'SRC_TYPE':nfloat(dtype)},
srcs=lambda b,img,coord,val:[nsrc(x) for x in [img, tovec(b, coord), nundef(b, dtypes.int), val, nimm(b, 0, dtypes.int)]])(
lambda b,img,coord,val,dtype:mesa.nir_intrinsic_instr_create(b.shader,g("nir_intrinsic_image_store")))
_nload_img = nir_instr(intrins=lambda dtype:{'IMAGE_DIM':mesa.GLSL_SAMPLER_DIM_2D, 'ACCESS':mesa.ACCESS_CAN_REORDER, 'DEST_TYPE':nfloat(dtype)},
nc=4, bs=32, num_components=4, srcs=lambda b,img,coord:[nsrc(x) for x in [img, tovec(b, coord), nundef(b, dtypes.int), nimm(b, 0, dtypes.int)]])(
lambda b,img,coord,dtype: mesa.nir_intrinsic_instr_create(b.shader, g("nir_intrinsic_image_load")))
class IR3Renderer(NIRRenderer):
device = "QCOM"
has_aux = True
def nload_img(ctx,img,coord):
ctx.texs.add(img)
return _nload_img(ctx.b, ctx.r[img], ctx.r[coord], img.dtype)
def_rewrite = PatternMatcher([
(UPat(Ops.STORE, src=(UPat.var('img').index(UPat.var('coord', dtypes.int.vec(2)), allow_any_len=True), UPat.var("val")),
allow_any_len=True), lambda ctx,img,coord,val: nstore_img(ctx.b, ctx.r[img], ctx.r[coord], ctx.r[val], val.dtype)),
(UPat(Ops.LOAD, src=(UPat.var('img').index(UPat.var('coord', dtypes.int.vec(2)), UPat.var("gate")), UPat.var("alt"))),
lambda ctx,img,coord,alt,gate: if_phi(ctx.b, ctx.r[gate], lambda: ctx.nload_img(img, coord), lambda: ctx.r[alt])),
(UPat(Ops.LOAD, src=(UPat.var('img').index(UPat.var('coord', dtypes.int.vec(2))),)), nload_img),
]) + NIRRenderer.def_rewrite
_param = LVPRenderer.param
def _param_img(self, x):
self.img_idx += 1
return nimm(self.b, self.img_idx - 1, dtypes.int)
def param(self, b, x, sz): return self._param_img(x) if isinstance(x.dtype, ImageDType) else self._param(b, x, sz)
def prerender(self, uops:list[UOp]):
super().prerender(uops)
self.texs:set[UOp] = set()
self.uops, self.ibo_idx, self.img_idx = uops, 0, 0
self.param_sz = sum([8 if u.op == Ops.PARAM else u.dtype.itemsize for u in uops if u.op in (Ops.PARAM, Ops.DEFINE_VAR)])
def postrender(self, uops:list[UOp]):
bufs, texs, imgs = [u for u in uops if u.op == Ops.PARAM], itertools.count().__next__, itertools.count().__next__
for b in filter(lambda b: isinstance(b.dtype, ImageDType), bufs): nimm_set(self.r[b], texs() if b in self.texs else imgs(), dtypes.int)
self.b.shader.contents.info.num_ubos = len([u for u in bufs if not isinstance(u.dtype, ImageDType)])
self.b.shader.contents.info.num_images = texs() + imgs()
def aux(self, uops:list[UOp]): return (tuple(u.dtype for u in uops if u.op == Ops.PARAM),)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/renderer/nir.py",
"license": "MIT License",
"lines": 255,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/autogen/mesa.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
from tinygrad.helpers import CPU_CC, CPU_LVP
import gzip, base64
dll = c.DLL('mesa', ([] if CPU_CC.value == 'LVP' or bool(CPU_LVP) else ['tinymesa']) + ['tinymesa_cpu'])
class struct_u_printf_info(ctypes.Structure): pass
u_printf_info: TypeAlias = struct_u_printf_info
uint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
try: nir_debug = uint32_t.in_dll(dll, 'nir_debug') # type: ignore
except (ValueError,AttributeError): pass
try: nir_debug_print_shader = c.Array[Annotated[bool, ctypes.c_bool], Literal[15]].in_dll(dll, 'nir_debug_print_shader') # type: ignore
except (ValueError,AttributeError): pass
nir_component_mask_t: TypeAlias = Annotated[int, ctypes.c_uint16]
@dll.bind
def nir_process_debug_variable() -> None: ...
@dll.bind
def nir_component_mask_can_reinterpret(mask:nir_component_mask_t, old_bit_size:Annotated[int, ctypes.c_uint32], new_bit_size:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_component_mask_reinterpret(mask:nir_component_mask_t, old_bit_size:Annotated[int, ctypes.c_uint32], new_bit_size:Annotated[int, ctypes.c_uint32]) -> nir_component_mask_t: ...
@c.record
class struct_nir_state_slot(c.Struct):
SIZE = 8
tokens: Annotated[c.Array[gl_state_index16, Literal[4]], 0]
gl_state_index16: TypeAlias = Annotated[int, ctypes.c_int16]
nir_state_slot: TypeAlias = struct_nir_state_slot
class nir_rounding_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_rounding_mode_undef = nir_rounding_mode.define('nir_rounding_mode_undef', 0)
nir_rounding_mode_rtne = nir_rounding_mode.define('nir_rounding_mode_rtne', 1)
nir_rounding_mode_ru = nir_rounding_mode.define('nir_rounding_mode_ru', 2)
nir_rounding_mode_rd = nir_rounding_mode.define('nir_rounding_mode_rd', 3)
nir_rounding_mode_rtz = nir_rounding_mode.define('nir_rounding_mode_rtz', 4)
class nir_ray_query_value(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_ray_query_value_intersection_type = nir_ray_query_value.define('nir_ray_query_value_intersection_type', 0)
nir_ray_query_value_intersection_t = nir_ray_query_value.define('nir_ray_query_value_intersection_t', 1)
nir_ray_query_value_intersection_instance_custom_index = nir_ray_query_value.define('nir_ray_query_value_intersection_instance_custom_index', 2)
nir_ray_query_value_intersection_instance_id = nir_ray_query_value.define('nir_ray_query_value_intersection_instance_id', 3)
nir_ray_query_value_intersection_instance_sbt_index = nir_ray_query_value.define('nir_ray_query_value_intersection_instance_sbt_index', 4)
nir_ray_query_value_intersection_geometry_index = nir_ray_query_value.define('nir_ray_query_value_intersection_geometry_index', 5)
nir_ray_query_value_intersection_primitive_index = nir_ray_query_value.define('nir_ray_query_value_intersection_primitive_index', 6)
nir_ray_query_value_intersection_barycentrics = nir_ray_query_value.define('nir_ray_query_value_intersection_barycentrics', 7)
nir_ray_query_value_intersection_front_face = nir_ray_query_value.define('nir_ray_query_value_intersection_front_face', 8)
nir_ray_query_value_intersection_object_ray_direction = nir_ray_query_value.define('nir_ray_query_value_intersection_object_ray_direction', 9)
nir_ray_query_value_intersection_object_ray_origin = nir_ray_query_value.define('nir_ray_query_value_intersection_object_ray_origin', 10)
nir_ray_query_value_intersection_object_to_world = nir_ray_query_value.define('nir_ray_query_value_intersection_object_to_world', 11)
nir_ray_query_value_intersection_world_to_object = nir_ray_query_value.define('nir_ray_query_value_intersection_world_to_object', 12)
nir_ray_query_value_intersection_candidate_aabb_opaque = nir_ray_query_value.define('nir_ray_query_value_intersection_candidate_aabb_opaque', 13)
nir_ray_query_value_tmin = nir_ray_query_value.define('nir_ray_query_value_tmin', 14)
nir_ray_query_value_flags = nir_ray_query_value.define('nir_ray_query_value_flags', 15)
nir_ray_query_value_world_ray_direction = nir_ray_query_value.define('nir_ray_query_value_world_ray_direction', 16)
nir_ray_query_value_world_ray_origin = nir_ray_query_value.define('nir_ray_query_value_world_ray_origin', 17)
nir_ray_query_value_intersection_triangle_vertex_positions = nir_ray_query_value.define('nir_ray_query_value_intersection_triangle_vertex_positions', 18)
class nir_resource_data_intel(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_resource_intel_bindless = nir_resource_data_intel.define('nir_resource_intel_bindless', 1)
nir_resource_intel_pushable = nir_resource_data_intel.define('nir_resource_intel_pushable', 2)
nir_resource_intel_sampler = nir_resource_data_intel.define('nir_resource_intel_sampler', 4)
nir_resource_intel_non_uniform = nir_resource_data_intel.define('nir_resource_intel_non_uniform', 8)
nir_resource_intel_sampler_embedded = nir_resource_data_intel.define('nir_resource_intel_sampler_embedded', 16)
class nir_preamble_class(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_preamble_class_general = nir_preamble_class.define('nir_preamble_class_general', 0)
nir_preamble_class_image = nir_preamble_class.define('nir_preamble_class_image', 1)
nir_preamble_num_classes = nir_preamble_class.define('nir_preamble_num_classes', 2)
class nir_cmat_signed(Annotated[int, ctypes.c_uint32], c.Enum): pass
NIR_CMAT_A_SIGNED = nir_cmat_signed.define('NIR_CMAT_A_SIGNED', 1)
NIR_CMAT_B_SIGNED = nir_cmat_signed.define('NIR_CMAT_B_SIGNED', 2)
NIR_CMAT_C_SIGNED = nir_cmat_signed.define('NIR_CMAT_C_SIGNED', 4)
NIR_CMAT_RESULT_SIGNED = nir_cmat_signed.define('NIR_CMAT_RESULT_SIGNED', 8)
@c.record
class nir_const_value(c.Struct):
SIZE = 8
b: Annotated[Annotated[bool, ctypes.c_bool], 0]
f32: Annotated[Annotated[float, ctypes.c_float], 0]
f64: Annotated[Annotated[float, ctypes.c_double], 0]
i8: Annotated[int8_t, 0]
u8: Annotated[uint8_t, 0]
i16: Annotated[int16_t, 0]
u16: Annotated[uint16_t, 0]
i32: Annotated[int32_t, 0]
u32: Annotated[uint32_t, 0]
i64: Annotated[int64_t, 0]
u64: Annotated[uint64_t, 0]
int8_t: TypeAlias = Annotated[int, ctypes.c_byte]
uint8_t: TypeAlias = Annotated[int, ctypes.c_ubyte]
int16_t: TypeAlias = Annotated[int, ctypes.c_int16]
uint16_t: TypeAlias = Annotated[int, ctypes.c_uint16]
int32_t: TypeAlias = Annotated[int, ctypes.c_int32]
int64_t: TypeAlias = Annotated[int, ctypes.c_int64]
uint64_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def nir_const_value_for_float(b:Annotated[float, ctypes.c_double], bit_size:Annotated[int, ctypes.c_uint32]) -> nir_const_value: ...
@dll.bind
def nir_const_value_as_float(value:nir_const_value, bit_size:Annotated[int, ctypes.c_uint32]) -> Annotated[float, ctypes.c_double]: ...
@c.record
class struct_nir_constant(c.Struct):
SIZE = 144
values: Annotated[c.Array[nir_const_value, Literal[16]], 0]
is_null_constant: Annotated[Annotated[bool, ctypes.c_bool], 128]
num_elements: Annotated[Annotated[int, ctypes.c_uint32], 132]
elements: Annotated[c.POINTER[c.POINTER[nir_constant]], 136]
nir_constant: TypeAlias = struct_nir_constant
class nir_depth_layout(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_depth_layout_none = nir_depth_layout.define('nir_depth_layout_none', 0)
nir_depth_layout_any = nir_depth_layout.define('nir_depth_layout_any', 1)
nir_depth_layout_greater = nir_depth_layout.define('nir_depth_layout_greater', 2)
nir_depth_layout_less = nir_depth_layout.define('nir_depth_layout_less', 3)
nir_depth_layout_unchanged = nir_depth_layout.define('nir_depth_layout_unchanged', 4)
class nir_var_declaration_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_var_declared_normally = nir_var_declaration_type.define('nir_var_declared_normally', 0)
nir_var_declared_implicitly = nir_var_declaration_type.define('nir_var_declared_implicitly', 1)
nir_var_hidden = nir_var_declaration_type.define('nir_var_hidden', 2)
@c.record
class struct_nir_variable_data(c.Struct):
SIZE = 56
mode: Annotated[Annotated[int, ctypes.c_uint32], 0, 21, 0]
read_only: Annotated[Annotated[int, ctypes.c_uint32], 2, 1, 5]
centroid: Annotated[Annotated[int, ctypes.c_uint32], 2, 1, 6]
sample: Annotated[Annotated[int, ctypes.c_uint32], 2, 1, 7]
patch: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 0]
invariant: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 1]
explicit_invariant: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 2]
ray_query: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 3]
precision: Annotated[Annotated[int, ctypes.c_uint32], 3, 2, 4]
assigned: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 6]
cannot_coalesce: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 7]
always_active_io: Annotated[Annotated[int, ctypes.c_uint32], 4, 1, 0]
interpolation: Annotated[Annotated[int, ctypes.c_uint32], 4, 3, 1]
location_frac: Annotated[Annotated[int, ctypes.c_uint32], 4, 2, 4]
compact: Annotated[Annotated[int, ctypes.c_uint32], 4, 1, 6]
fb_fetch_output: Annotated[Annotated[int, ctypes.c_uint32], 4, 1, 7]
bindless: Annotated[Annotated[int, ctypes.c_uint32], 5, 1, 0]
explicit_binding: Annotated[Annotated[int, ctypes.c_uint32], 5, 1, 1]
explicit_location: Annotated[Annotated[int, ctypes.c_uint32], 5, 1, 2]
implicit_sized_array: Annotated[Annotated[int, ctypes.c_uint32], 5, 1, 3]
max_array_access: Annotated[Annotated[int, ctypes.c_int32], 8]
has_initializer: Annotated[Annotated[int, ctypes.c_uint32], 12, 1, 0]
is_implicit_initializer: Annotated[Annotated[int, ctypes.c_uint32], 12, 1, 1]
is_xfb: Annotated[Annotated[int, ctypes.c_uint32], 12, 1, 2]
is_xfb_only: Annotated[Annotated[int, ctypes.c_uint32], 12, 1, 3]
explicit_xfb_buffer: Annotated[Annotated[int, ctypes.c_uint32], 12, 1, 4]
explicit_xfb_stride: Annotated[Annotated[int, ctypes.c_uint32], 12, 1, 5]
explicit_offset: Annotated[Annotated[int, ctypes.c_uint32], 12, 1, 6]
matrix_layout: Annotated[Annotated[int, ctypes.c_uint32], 12, 2, 7]
from_named_ifc_block: Annotated[Annotated[int, ctypes.c_uint32], 13, 1, 1]
from_ssbo_unsized_array: Annotated[Annotated[int, ctypes.c_uint32], 13, 1, 2]
must_be_shader_input: Annotated[Annotated[int, ctypes.c_uint32], 13, 1, 3]
used: Annotated[Annotated[int, ctypes.c_uint32], 13, 1, 4]
how_declared: Annotated[Annotated[int, ctypes.c_uint32], 13, 2, 5]
per_view: Annotated[Annotated[int, ctypes.c_uint32], 13, 1, 7]
per_primitive: Annotated[Annotated[int, ctypes.c_uint32], 14, 1, 0]
per_vertex: Annotated[Annotated[int, ctypes.c_uint32], 14, 1, 1]
aliased_shared_memory: Annotated[Annotated[int, ctypes.c_uint32], 14, 1, 2]
depth_layout: Annotated[Annotated[int, ctypes.c_uint32], 14, 3, 3]
stream: Annotated[Annotated[int, ctypes.c_uint32], 14, 9, 6]
access: Annotated[Annotated[int, ctypes.c_uint32], 16, 9, 0]
descriptor_set: Annotated[Annotated[int, ctypes.c_uint32], 17, 5, 1]
index: Annotated[Annotated[int, ctypes.c_uint32], 20]
binding: Annotated[Annotated[int, ctypes.c_uint32], 24]
location: Annotated[Annotated[int, ctypes.c_int32], 28]
alignment: Annotated[Annotated[int, ctypes.c_uint32], 32]
driver_location: Annotated[Annotated[int, ctypes.c_uint32], 36]
offset: Annotated[Annotated[int, ctypes.c_uint32], 40]
image: Annotated[struct_nir_variable_data_image, 44]
sampler: Annotated[struct_nir_variable_data_sampler, 44]
xfb: Annotated[struct_nir_variable_data_xfb, 44]
node_name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 48]
@c.record
class struct_nir_variable_data_image(c.Struct):
SIZE = 4
format: Annotated[enum_pipe_format, 0]
class enum_pipe_format(Annotated[int, ctypes.c_uint32], c.Enum): pass
PIPE_FORMAT_NONE = enum_pipe_format.define('PIPE_FORMAT_NONE', 0)
PIPE_FORMAT_R64_UINT = enum_pipe_format.define('PIPE_FORMAT_R64_UINT', 1)
PIPE_FORMAT_R64G64_UINT = enum_pipe_format.define('PIPE_FORMAT_R64G64_UINT', 2)
PIPE_FORMAT_R64G64B64_UINT = enum_pipe_format.define('PIPE_FORMAT_R64G64B64_UINT', 3)
PIPE_FORMAT_R64G64B64A64_UINT = enum_pipe_format.define('PIPE_FORMAT_R64G64B64A64_UINT', 4)
PIPE_FORMAT_R64_SINT = enum_pipe_format.define('PIPE_FORMAT_R64_SINT', 5)
PIPE_FORMAT_R64G64_SINT = enum_pipe_format.define('PIPE_FORMAT_R64G64_SINT', 6)
PIPE_FORMAT_R64G64B64_SINT = enum_pipe_format.define('PIPE_FORMAT_R64G64B64_SINT', 7)
PIPE_FORMAT_R64G64B64A64_SINT = enum_pipe_format.define('PIPE_FORMAT_R64G64B64A64_SINT', 8)
PIPE_FORMAT_R64_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R64_FLOAT', 9)
PIPE_FORMAT_R64G64_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R64G64_FLOAT', 10)
PIPE_FORMAT_R64G64B64_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R64G64B64_FLOAT', 11)
PIPE_FORMAT_R64G64B64A64_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R64G64B64A64_FLOAT', 12)
PIPE_FORMAT_R32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R32_FLOAT', 13)
PIPE_FORMAT_R32G32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R32G32_FLOAT', 14)
PIPE_FORMAT_R32G32B32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R32G32B32_FLOAT', 15)
PIPE_FORMAT_R32G32B32A32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R32G32B32A32_FLOAT', 16)
PIPE_FORMAT_R32_UNORM = enum_pipe_format.define('PIPE_FORMAT_R32_UNORM', 17)
PIPE_FORMAT_R32G32_UNORM = enum_pipe_format.define('PIPE_FORMAT_R32G32_UNORM', 18)
PIPE_FORMAT_R32G32B32_UNORM = enum_pipe_format.define('PIPE_FORMAT_R32G32B32_UNORM', 19)
PIPE_FORMAT_R32G32B32A32_UNORM = enum_pipe_format.define('PIPE_FORMAT_R32G32B32A32_UNORM', 20)
PIPE_FORMAT_R32_USCALED = enum_pipe_format.define('PIPE_FORMAT_R32_USCALED', 21)
PIPE_FORMAT_R32G32_USCALED = enum_pipe_format.define('PIPE_FORMAT_R32G32_USCALED', 22)
PIPE_FORMAT_R32G32B32_USCALED = enum_pipe_format.define('PIPE_FORMAT_R32G32B32_USCALED', 23)
PIPE_FORMAT_R32G32B32A32_USCALED = enum_pipe_format.define('PIPE_FORMAT_R32G32B32A32_USCALED', 24)
PIPE_FORMAT_R32_SNORM = enum_pipe_format.define('PIPE_FORMAT_R32_SNORM', 25)
PIPE_FORMAT_R32G32_SNORM = enum_pipe_format.define('PIPE_FORMAT_R32G32_SNORM', 26)
PIPE_FORMAT_R32G32B32_SNORM = enum_pipe_format.define('PIPE_FORMAT_R32G32B32_SNORM', 27)
PIPE_FORMAT_R32G32B32A32_SNORM = enum_pipe_format.define('PIPE_FORMAT_R32G32B32A32_SNORM', 28)
PIPE_FORMAT_R32_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R32_SSCALED', 29)
PIPE_FORMAT_R32G32_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R32G32_SSCALED', 30)
PIPE_FORMAT_R32G32B32_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R32G32B32_SSCALED', 31)
PIPE_FORMAT_R32G32B32A32_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R32G32B32A32_SSCALED', 32)
PIPE_FORMAT_R16_UNORM = enum_pipe_format.define('PIPE_FORMAT_R16_UNORM', 33)
PIPE_FORMAT_R16G16_UNORM = enum_pipe_format.define('PIPE_FORMAT_R16G16_UNORM', 34)
PIPE_FORMAT_R16G16B16_UNORM = enum_pipe_format.define('PIPE_FORMAT_R16G16B16_UNORM', 35)
PIPE_FORMAT_R16G16B16A16_UNORM = enum_pipe_format.define('PIPE_FORMAT_R16G16B16A16_UNORM', 36)
PIPE_FORMAT_R16_USCALED = enum_pipe_format.define('PIPE_FORMAT_R16_USCALED', 37)
PIPE_FORMAT_R16G16_USCALED = enum_pipe_format.define('PIPE_FORMAT_R16G16_USCALED', 38)
PIPE_FORMAT_R16G16B16_USCALED = enum_pipe_format.define('PIPE_FORMAT_R16G16B16_USCALED', 39)
PIPE_FORMAT_R16G16B16A16_USCALED = enum_pipe_format.define('PIPE_FORMAT_R16G16B16A16_USCALED', 40)
PIPE_FORMAT_R16_SNORM = enum_pipe_format.define('PIPE_FORMAT_R16_SNORM', 41)
PIPE_FORMAT_R16G16_SNORM = enum_pipe_format.define('PIPE_FORMAT_R16G16_SNORM', 42)
PIPE_FORMAT_R16G16B16_SNORM = enum_pipe_format.define('PIPE_FORMAT_R16G16B16_SNORM', 43)
PIPE_FORMAT_R16G16B16A16_SNORM = enum_pipe_format.define('PIPE_FORMAT_R16G16B16A16_SNORM', 44)
PIPE_FORMAT_R16_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R16_SSCALED', 45)
PIPE_FORMAT_R16G16_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R16G16_SSCALED', 46)
PIPE_FORMAT_R16G16B16_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R16G16B16_SSCALED', 47)
PIPE_FORMAT_R16G16B16A16_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R16G16B16A16_SSCALED', 48)
PIPE_FORMAT_R8_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8_UNORM', 49)
PIPE_FORMAT_R8G8_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8_UNORM', 50)
PIPE_FORMAT_R8G8B8_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8B8_UNORM', 51)
PIPE_FORMAT_B8G8R8_UNORM = enum_pipe_format.define('PIPE_FORMAT_B8G8R8_UNORM', 52)
PIPE_FORMAT_R8G8B8A8_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8B8A8_UNORM', 53)
PIPE_FORMAT_B8G8R8A8_UNORM = enum_pipe_format.define('PIPE_FORMAT_B8G8R8A8_UNORM', 54)
PIPE_FORMAT_R8_USCALED = enum_pipe_format.define('PIPE_FORMAT_R8_USCALED', 55)
PIPE_FORMAT_R8G8_USCALED = enum_pipe_format.define('PIPE_FORMAT_R8G8_USCALED', 56)
PIPE_FORMAT_R8G8B8_USCALED = enum_pipe_format.define('PIPE_FORMAT_R8G8B8_USCALED', 57)
PIPE_FORMAT_B8G8R8_USCALED = enum_pipe_format.define('PIPE_FORMAT_B8G8R8_USCALED', 58)
PIPE_FORMAT_R8G8B8A8_USCALED = enum_pipe_format.define('PIPE_FORMAT_R8G8B8A8_USCALED', 59)
PIPE_FORMAT_B8G8R8A8_USCALED = enum_pipe_format.define('PIPE_FORMAT_B8G8R8A8_USCALED', 60)
PIPE_FORMAT_A8B8G8R8_USCALED = enum_pipe_format.define('PIPE_FORMAT_A8B8G8R8_USCALED', 61)
PIPE_FORMAT_R8_SNORM = enum_pipe_format.define('PIPE_FORMAT_R8_SNORM', 62)
PIPE_FORMAT_R8G8_SNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8_SNORM', 63)
PIPE_FORMAT_R8G8B8_SNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8B8_SNORM', 64)
PIPE_FORMAT_B8G8R8_SNORM = enum_pipe_format.define('PIPE_FORMAT_B8G8R8_SNORM', 65)
PIPE_FORMAT_R8G8B8A8_SNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8B8A8_SNORM', 66)
PIPE_FORMAT_B8G8R8A8_SNORM = enum_pipe_format.define('PIPE_FORMAT_B8G8R8A8_SNORM', 67)
PIPE_FORMAT_R8_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R8_SSCALED', 68)
PIPE_FORMAT_R8G8_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R8G8_SSCALED', 69)
PIPE_FORMAT_R8G8B8_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R8G8B8_SSCALED', 70)
PIPE_FORMAT_B8G8R8_SSCALED = enum_pipe_format.define('PIPE_FORMAT_B8G8R8_SSCALED', 71)
PIPE_FORMAT_R8G8B8A8_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R8G8B8A8_SSCALED', 72)
PIPE_FORMAT_B8G8R8A8_SSCALED = enum_pipe_format.define('PIPE_FORMAT_B8G8R8A8_SSCALED', 73)
PIPE_FORMAT_A8B8G8R8_SSCALED = enum_pipe_format.define('PIPE_FORMAT_A8B8G8R8_SSCALED', 74)
PIPE_FORMAT_A8R8G8B8_UNORM = enum_pipe_format.define('PIPE_FORMAT_A8R8G8B8_UNORM', 75)
PIPE_FORMAT_R32_FIXED = enum_pipe_format.define('PIPE_FORMAT_R32_FIXED', 76)
PIPE_FORMAT_R32G32_FIXED = enum_pipe_format.define('PIPE_FORMAT_R32G32_FIXED', 77)
PIPE_FORMAT_R32G32B32_FIXED = enum_pipe_format.define('PIPE_FORMAT_R32G32B32_FIXED', 78)
PIPE_FORMAT_R32G32B32A32_FIXED = enum_pipe_format.define('PIPE_FORMAT_R32G32B32A32_FIXED', 79)
PIPE_FORMAT_R16_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R16_FLOAT', 80)
PIPE_FORMAT_R16G16_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R16G16_FLOAT', 81)
PIPE_FORMAT_R16G16B16_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R16G16B16_FLOAT', 82)
PIPE_FORMAT_R16G16B16A16_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R16G16B16A16_FLOAT', 83)
PIPE_FORMAT_R8_UINT = enum_pipe_format.define('PIPE_FORMAT_R8_UINT', 84)
PIPE_FORMAT_R8G8_UINT = enum_pipe_format.define('PIPE_FORMAT_R8G8_UINT', 85)
PIPE_FORMAT_R8G8B8_UINT = enum_pipe_format.define('PIPE_FORMAT_R8G8B8_UINT', 86)
PIPE_FORMAT_B8G8R8_UINT = enum_pipe_format.define('PIPE_FORMAT_B8G8R8_UINT', 87)
PIPE_FORMAT_R8G8B8A8_UINT = enum_pipe_format.define('PIPE_FORMAT_R8G8B8A8_UINT', 88)
PIPE_FORMAT_B8G8R8A8_UINT = enum_pipe_format.define('PIPE_FORMAT_B8G8R8A8_UINT', 89)
PIPE_FORMAT_R8_SINT = enum_pipe_format.define('PIPE_FORMAT_R8_SINT', 90)
PIPE_FORMAT_R8G8_SINT = enum_pipe_format.define('PIPE_FORMAT_R8G8_SINT', 91)
PIPE_FORMAT_R8G8B8_SINT = enum_pipe_format.define('PIPE_FORMAT_R8G8B8_SINT', 92)
PIPE_FORMAT_B8G8R8_SINT = enum_pipe_format.define('PIPE_FORMAT_B8G8R8_SINT', 93)
PIPE_FORMAT_R8G8B8A8_SINT = enum_pipe_format.define('PIPE_FORMAT_R8G8B8A8_SINT', 94)
PIPE_FORMAT_B8G8R8A8_SINT = enum_pipe_format.define('PIPE_FORMAT_B8G8R8A8_SINT', 95)
PIPE_FORMAT_R16_UINT = enum_pipe_format.define('PIPE_FORMAT_R16_UINT', 96)
PIPE_FORMAT_R16G16_UINT = enum_pipe_format.define('PIPE_FORMAT_R16G16_UINT', 97)
PIPE_FORMAT_R16G16B16_UINT = enum_pipe_format.define('PIPE_FORMAT_R16G16B16_UINT', 98)
PIPE_FORMAT_R16G16B16A16_UINT = enum_pipe_format.define('PIPE_FORMAT_R16G16B16A16_UINT', 99)
PIPE_FORMAT_R16_SINT = enum_pipe_format.define('PIPE_FORMAT_R16_SINT', 100)
PIPE_FORMAT_R16G16_SINT = enum_pipe_format.define('PIPE_FORMAT_R16G16_SINT', 101)
PIPE_FORMAT_R16G16B16_SINT = enum_pipe_format.define('PIPE_FORMAT_R16G16B16_SINT', 102)
PIPE_FORMAT_R16G16B16A16_SINT = enum_pipe_format.define('PIPE_FORMAT_R16G16B16A16_SINT', 103)
PIPE_FORMAT_R32_UINT = enum_pipe_format.define('PIPE_FORMAT_R32_UINT', 104)
PIPE_FORMAT_R32G32_UINT = enum_pipe_format.define('PIPE_FORMAT_R32G32_UINT', 105)
PIPE_FORMAT_R32G32B32_UINT = enum_pipe_format.define('PIPE_FORMAT_R32G32B32_UINT', 106)
PIPE_FORMAT_R32G32B32A32_UINT = enum_pipe_format.define('PIPE_FORMAT_R32G32B32A32_UINT', 107)
PIPE_FORMAT_R32_SINT = enum_pipe_format.define('PIPE_FORMAT_R32_SINT', 108)
PIPE_FORMAT_R32G32_SINT = enum_pipe_format.define('PIPE_FORMAT_R32G32_SINT', 109)
PIPE_FORMAT_R32G32B32_SINT = enum_pipe_format.define('PIPE_FORMAT_R32G32B32_SINT', 110)
PIPE_FORMAT_R32G32B32A32_SINT = enum_pipe_format.define('PIPE_FORMAT_R32G32B32A32_SINT', 111)
PIPE_FORMAT_R10G10B10A2_UNORM = enum_pipe_format.define('PIPE_FORMAT_R10G10B10A2_UNORM', 112)
PIPE_FORMAT_R10G10B10A2_SNORM = enum_pipe_format.define('PIPE_FORMAT_R10G10B10A2_SNORM', 113)
PIPE_FORMAT_R10G10B10A2_USCALED = enum_pipe_format.define('PIPE_FORMAT_R10G10B10A2_USCALED', 114)
PIPE_FORMAT_R10G10B10A2_SSCALED = enum_pipe_format.define('PIPE_FORMAT_R10G10B10A2_SSCALED', 115)
PIPE_FORMAT_B10G10R10A2_UNORM = enum_pipe_format.define('PIPE_FORMAT_B10G10R10A2_UNORM', 116)
PIPE_FORMAT_B10G10R10A2_SNORM = enum_pipe_format.define('PIPE_FORMAT_B10G10R10A2_SNORM', 117)
PIPE_FORMAT_B10G10R10A2_USCALED = enum_pipe_format.define('PIPE_FORMAT_B10G10R10A2_USCALED', 118)
PIPE_FORMAT_B10G10R10A2_SSCALED = enum_pipe_format.define('PIPE_FORMAT_B10G10R10A2_SSCALED', 119)
PIPE_FORMAT_R11G11B10_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R11G11B10_FLOAT', 120)
PIPE_FORMAT_R10G10B10A2_UINT = enum_pipe_format.define('PIPE_FORMAT_R10G10B10A2_UINT', 121)
PIPE_FORMAT_R10G10B10A2_SINT = enum_pipe_format.define('PIPE_FORMAT_R10G10B10A2_SINT', 122)
PIPE_FORMAT_B10G10R10A2_UINT = enum_pipe_format.define('PIPE_FORMAT_B10G10R10A2_UINT', 123)
PIPE_FORMAT_B10G10R10A2_SINT = enum_pipe_format.define('PIPE_FORMAT_B10G10R10A2_SINT', 124)
PIPE_FORMAT_B8G8R8X8_UNORM = enum_pipe_format.define('PIPE_FORMAT_B8G8R8X8_UNORM', 125)
PIPE_FORMAT_X8B8G8R8_UNORM = enum_pipe_format.define('PIPE_FORMAT_X8B8G8R8_UNORM', 126)
PIPE_FORMAT_X8R8G8B8_UNORM = enum_pipe_format.define('PIPE_FORMAT_X8R8G8B8_UNORM', 127)
PIPE_FORMAT_B5G5R5A1_UNORM = enum_pipe_format.define('PIPE_FORMAT_B5G5R5A1_UNORM', 128)
PIPE_FORMAT_R4G4B4A4_UNORM = enum_pipe_format.define('PIPE_FORMAT_R4G4B4A4_UNORM', 129)
PIPE_FORMAT_B4G4R4A4_UNORM = enum_pipe_format.define('PIPE_FORMAT_B4G4R4A4_UNORM', 130)
PIPE_FORMAT_R5G6B5_UNORM = enum_pipe_format.define('PIPE_FORMAT_R5G6B5_UNORM', 131)
PIPE_FORMAT_B5G6R5_UNORM = enum_pipe_format.define('PIPE_FORMAT_B5G6R5_UNORM', 132)
PIPE_FORMAT_L8_UNORM = enum_pipe_format.define('PIPE_FORMAT_L8_UNORM', 133)
PIPE_FORMAT_A8_UNORM = enum_pipe_format.define('PIPE_FORMAT_A8_UNORM', 134)
PIPE_FORMAT_I8_UNORM = enum_pipe_format.define('PIPE_FORMAT_I8_UNORM', 135)
PIPE_FORMAT_L8A8_UNORM = enum_pipe_format.define('PIPE_FORMAT_L8A8_UNORM', 136)
PIPE_FORMAT_L16_UNORM = enum_pipe_format.define('PIPE_FORMAT_L16_UNORM', 137)
PIPE_FORMAT_UYVY = enum_pipe_format.define('PIPE_FORMAT_UYVY', 138)
PIPE_FORMAT_VYUY = enum_pipe_format.define('PIPE_FORMAT_VYUY', 139)
PIPE_FORMAT_YUYV = enum_pipe_format.define('PIPE_FORMAT_YUYV', 140)
PIPE_FORMAT_YVYU = enum_pipe_format.define('PIPE_FORMAT_YVYU', 141)
PIPE_FORMAT_Z16_UNORM = enum_pipe_format.define('PIPE_FORMAT_Z16_UNORM', 142)
PIPE_FORMAT_Z16_UNORM_S8_UINT = enum_pipe_format.define('PIPE_FORMAT_Z16_UNORM_S8_UINT', 143)
PIPE_FORMAT_Z32_UNORM = enum_pipe_format.define('PIPE_FORMAT_Z32_UNORM', 144)
PIPE_FORMAT_Z32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_Z32_FLOAT', 145)
PIPE_FORMAT_Z24_UNORM_S8_UINT = enum_pipe_format.define('PIPE_FORMAT_Z24_UNORM_S8_UINT', 146)
PIPE_FORMAT_S8_UINT_Z24_UNORM = enum_pipe_format.define('PIPE_FORMAT_S8_UINT_Z24_UNORM', 147)
PIPE_FORMAT_Z24X8_UNORM = enum_pipe_format.define('PIPE_FORMAT_Z24X8_UNORM', 148)
PIPE_FORMAT_X8Z24_UNORM = enum_pipe_format.define('PIPE_FORMAT_X8Z24_UNORM', 149)
PIPE_FORMAT_S8_UINT = enum_pipe_format.define('PIPE_FORMAT_S8_UINT', 150)
PIPE_FORMAT_L8_SRGB = enum_pipe_format.define('PIPE_FORMAT_L8_SRGB', 151)
PIPE_FORMAT_R8_SRGB = enum_pipe_format.define('PIPE_FORMAT_R8_SRGB', 152)
PIPE_FORMAT_L8A8_SRGB = enum_pipe_format.define('PIPE_FORMAT_L8A8_SRGB', 153)
PIPE_FORMAT_R8G8_SRGB = enum_pipe_format.define('PIPE_FORMAT_R8G8_SRGB', 154)
PIPE_FORMAT_R8G8B8_SRGB = enum_pipe_format.define('PIPE_FORMAT_R8G8B8_SRGB', 155)
PIPE_FORMAT_B8G8R8_SRGB = enum_pipe_format.define('PIPE_FORMAT_B8G8R8_SRGB', 156)
PIPE_FORMAT_A8B8G8R8_SRGB = enum_pipe_format.define('PIPE_FORMAT_A8B8G8R8_SRGB', 157)
PIPE_FORMAT_X8B8G8R8_SRGB = enum_pipe_format.define('PIPE_FORMAT_X8B8G8R8_SRGB', 158)
PIPE_FORMAT_B8G8R8A8_SRGB = enum_pipe_format.define('PIPE_FORMAT_B8G8R8A8_SRGB', 159)
PIPE_FORMAT_B8G8R8X8_SRGB = enum_pipe_format.define('PIPE_FORMAT_B8G8R8X8_SRGB', 160)
PIPE_FORMAT_A8R8G8B8_SRGB = enum_pipe_format.define('PIPE_FORMAT_A8R8G8B8_SRGB', 161)
PIPE_FORMAT_X8R8G8B8_SRGB = enum_pipe_format.define('PIPE_FORMAT_X8R8G8B8_SRGB', 162)
PIPE_FORMAT_R8G8B8A8_SRGB = enum_pipe_format.define('PIPE_FORMAT_R8G8B8A8_SRGB', 163)
PIPE_FORMAT_DXT1_RGB = enum_pipe_format.define('PIPE_FORMAT_DXT1_RGB', 164)
PIPE_FORMAT_DXT1_RGBA = enum_pipe_format.define('PIPE_FORMAT_DXT1_RGBA', 165)
PIPE_FORMAT_DXT3_RGBA = enum_pipe_format.define('PIPE_FORMAT_DXT3_RGBA', 166)
PIPE_FORMAT_DXT5_RGBA = enum_pipe_format.define('PIPE_FORMAT_DXT5_RGBA', 167)
PIPE_FORMAT_DXT1_SRGB = enum_pipe_format.define('PIPE_FORMAT_DXT1_SRGB', 168)
PIPE_FORMAT_DXT1_SRGBA = enum_pipe_format.define('PIPE_FORMAT_DXT1_SRGBA', 169)
PIPE_FORMAT_DXT3_SRGBA = enum_pipe_format.define('PIPE_FORMAT_DXT3_SRGBA', 170)
PIPE_FORMAT_DXT5_SRGBA = enum_pipe_format.define('PIPE_FORMAT_DXT5_SRGBA', 171)
PIPE_FORMAT_RGTC1_UNORM = enum_pipe_format.define('PIPE_FORMAT_RGTC1_UNORM', 172)
PIPE_FORMAT_RGTC1_SNORM = enum_pipe_format.define('PIPE_FORMAT_RGTC1_SNORM', 173)
PIPE_FORMAT_RGTC2_UNORM = enum_pipe_format.define('PIPE_FORMAT_RGTC2_UNORM', 174)
PIPE_FORMAT_RGTC2_SNORM = enum_pipe_format.define('PIPE_FORMAT_RGTC2_SNORM', 175)
PIPE_FORMAT_R8G8_B8G8_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8_B8G8_UNORM', 176)
PIPE_FORMAT_G8R8_G8B8_UNORM = enum_pipe_format.define('PIPE_FORMAT_G8R8_G8B8_UNORM', 177)
PIPE_FORMAT_X6G10_X6B10X6R10_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_X6G10_X6B10X6R10_420_UNORM', 178)
PIPE_FORMAT_X4G12_X4B12X4R12_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_X4G12_X4B12X4R12_420_UNORM', 179)
PIPE_FORMAT_X6R10_UNORM = enum_pipe_format.define('PIPE_FORMAT_X6R10_UNORM', 180)
PIPE_FORMAT_X6R10X6G10_UNORM = enum_pipe_format.define('PIPE_FORMAT_X6R10X6G10_UNORM', 181)
PIPE_FORMAT_X4R12_UNORM = enum_pipe_format.define('PIPE_FORMAT_X4R12_UNORM', 182)
PIPE_FORMAT_X4R12X4G12_UNORM = enum_pipe_format.define('PIPE_FORMAT_X4R12X4G12_UNORM', 183)
PIPE_FORMAT_R8SG8SB8UX8U_NORM = enum_pipe_format.define('PIPE_FORMAT_R8SG8SB8UX8U_NORM', 184)
PIPE_FORMAT_R5SG5SB6U_NORM = enum_pipe_format.define('PIPE_FORMAT_R5SG5SB6U_NORM', 185)
PIPE_FORMAT_A8B8G8R8_UNORM = enum_pipe_format.define('PIPE_FORMAT_A8B8G8R8_UNORM', 186)
PIPE_FORMAT_B5G5R5X1_UNORM = enum_pipe_format.define('PIPE_FORMAT_B5G5R5X1_UNORM', 187)
PIPE_FORMAT_R9G9B9E5_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R9G9B9E5_FLOAT', 188)
PIPE_FORMAT_Z32_FLOAT_S8X24_UINT = enum_pipe_format.define('PIPE_FORMAT_Z32_FLOAT_S8X24_UINT', 189)
PIPE_FORMAT_R1_UNORM = enum_pipe_format.define('PIPE_FORMAT_R1_UNORM', 190)
PIPE_FORMAT_R10G10B10X2_USCALED = enum_pipe_format.define('PIPE_FORMAT_R10G10B10X2_USCALED', 191)
PIPE_FORMAT_R10G10B10X2_SNORM = enum_pipe_format.define('PIPE_FORMAT_R10G10B10X2_SNORM', 192)
PIPE_FORMAT_L4A4_UNORM = enum_pipe_format.define('PIPE_FORMAT_L4A4_UNORM', 193)
PIPE_FORMAT_A2R10G10B10_UNORM = enum_pipe_format.define('PIPE_FORMAT_A2R10G10B10_UNORM', 194)
PIPE_FORMAT_A2B10G10R10_UNORM = enum_pipe_format.define('PIPE_FORMAT_A2B10G10R10_UNORM', 195)
PIPE_FORMAT_R10SG10SB10SA2U_NORM = enum_pipe_format.define('PIPE_FORMAT_R10SG10SB10SA2U_NORM', 196)
PIPE_FORMAT_R8G8Bx_SNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8Bx_SNORM', 197)
PIPE_FORMAT_R8G8B8X8_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8B8X8_UNORM', 198)
PIPE_FORMAT_B4G4R4X4_UNORM = enum_pipe_format.define('PIPE_FORMAT_B4G4R4X4_UNORM', 199)
PIPE_FORMAT_X24S8_UINT = enum_pipe_format.define('PIPE_FORMAT_X24S8_UINT', 200)
PIPE_FORMAT_S8X24_UINT = enum_pipe_format.define('PIPE_FORMAT_S8X24_UINT', 201)
PIPE_FORMAT_X32_S8X24_UINT = enum_pipe_format.define('PIPE_FORMAT_X32_S8X24_UINT', 202)
PIPE_FORMAT_R3G3B2_UNORM = enum_pipe_format.define('PIPE_FORMAT_R3G3B2_UNORM', 203)
PIPE_FORMAT_B2G3R3_UNORM = enum_pipe_format.define('PIPE_FORMAT_B2G3R3_UNORM', 204)
PIPE_FORMAT_L16A16_UNORM = enum_pipe_format.define('PIPE_FORMAT_L16A16_UNORM', 205)
PIPE_FORMAT_A16_UNORM = enum_pipe_format.define('PIPE_FORMAT_A16_UNORM', 206)
PIPE_FORMAT_I16_UNORM = enum_pipe_format.define('PIPE_FORMAT_I16_UNORM', 207)
PIPE_FORMAT_LATC1_UNORM = enum_pipe_format.define('PIPE_FORMAT_LATC1_UNORM', 208)
PIPE_FORMAT_LATC1_SNORM = enum_pipe_format.define('PIPE_FORMAT_LATC1_SNORM', 209)
PIPE_FORMAT_LATC2_UNORM = enum_pipe_format.define('PIPE_FORMAT_LATC2_UNORM', 210)
PIPE_FORMAT_LATC2_SNORM = enum_pipe_format.define('PIPE_FORMAT_LATC2_SNORM', 211)
PIPE_FORMAT_A8_SNORM = enum_pipe_format.define('PIPE_FORMAT_A8_SNORM', 212)
PIPE_FORMAT_L8_SNORM = enum_pipe_format.define('PIPE_FORMAT_L8_SNORM', 213)
PIPE_FORMAT_L8A8_SNORM = enum_pipe_format.define('PIPE_FORMAT_L8A8_SNORM', 214)
PIPE_FORMAT_I8_SNORM = enum_pipe_format.define('PIPE_FORMAT_I8_SNORM', 215)
PIPE_FORMAT_A16_SNORM = enum_pipe_format.define('PIPE_FORMAT_A16_SNORM', 216)
PIPE_FORMAT_L16_SNORM = enum_pipe_format.define('PIPE_FORMAT_L16_SNORM', 217)
PIPE_FORMAT_L16A16_SNORM = enum_pipe_format.define('PIPE_FORMAT_L16A16_SNORM', 218)
PIPE_FORMAT_I16_SNORM = enum_pipe_format.define('PIPE_FORMAT_I16_SNORM', 219)
PIPE_FORMAT_A16_FLOAT = enum_pipe_format.define('PIPE_FORMAT_A16_FLOAT', 220)
PIPE_FORMAT_L16_FLOAT = enum_pipe_format.define('PIPE_FORMAT_L16_FLOAT', 221)
PIPE_FORMAT_L16A16_FLOAT = enum_pipe_format.define('PIPE_FORMAT_L16A16_FLOAT', 222)
PIPE_FORMAT_I16_FLOAT = enum_pipe_format.define('PIPE_FORMAT_I16_FLOAT', 223)
PIPE_FORMAT_A32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_A32_FLOAT', 224)
PIPE_FORMAT_L32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_L32_FLOAT', 225)
PIPE_FORMAT_L32A32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_L32A32_FLOAT', 226)
PIPE_FORMAT_I32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_I32_FLOAT', 227)
PIPE_FORMAT_YV12 = enum_pipe_format.define('PIPE_FORMAT_YV12', 228)
PIPE_FORMAT_YV16 = enum_pipe_format.define('PIPE_FORMAT_YV16', 229)
PIPE_FORMAT_IYUV = enum_pipe_format.define('PIPE_FORMAT_IYUV', 230)
PIPE_FORMAT_NV12 = enum_pipe_format.define('PIPE_FORMAT_NV12', 231)
PIPE_FORMAT_NV21 = enum_pipe_format.define('PIPE_FORMAT_NV21', 232)
PIPE_FORMAT_NV16 = enum_pipe_format.define('PIPE_FORMAT_NV16', 233)
PIPE_FORMAT_NV15 = enum_pipe_format.define('PIPE_FORMAT_NV15', 234)
PIPE_FORMAT_NV20 = enum_pipe_format.define('PIPE_FORMAT_NV20', 235)
PIPE_FORMAT_Y8_400_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y8_400_UNORM', 236)
PIPE_FORMAT_Y8_U8_V8_422_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y8_U8_V8_422_UNORM', 237)
PIPE_FORMAT_Y8_U8_V8_444_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y8_U8_V8_444_UNORM', 238)
PIPE_FORMAT_Y8_U8_V8_440_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y8_U8_V8_440_UNORM', 239)
PIPE_FORMAT_Y10X6_U10X6_V10X6_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y10X6_U10X6_V10X6_420_UNORM', 240)
PIPE_FORMAT_Y10X6_U10X6_V10X6_422_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y10X6_U10X6_V10X6_422_UNORM', 241)
PIPE_FORMAT_Y10X6_U10X6_V10X6_444_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y10X6_U10X6_V10X6_444_UNORM', 242)
PIPE_FORMAT_Y12X4_U12X4_V12X4_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y12X4_U12X4_V12X4_420_UNORM', 243)
PIPE_FORMAT_Y12X4_U12X4_V12X4_422_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y12X4_U12X4_V12X4_422_UNORM', 244)
PIPE_FORMAT_Y12X4_U12X4_V12X4_444_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y12X4_U12X4_V12X4_444_UNORM', 245)
PIPE_FORMAT_Y16_U16_V16_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y16_U16_V16_420_UNORM', 246)
PIPE_FORMAT_Y16_U16_V16_422_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y16_U16_V16_422_UNORM', 247)
PIPE_FORMAT_Y16_U16V16_422_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y16_U16V16_422_UNORM', 248)
PIPE_FORMAT_Y16_U16_V16_444_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y16_U16_V16_444_UNORM', 249)
PIPE_FORMAT_Y8U8V8_420_UNORM_PACKED = enum_pipe_format.define('PIPE_FORMAT_Y8U8V8_420_UNORM_PACKED', 250)
PIPE_FORMAT_Y10U10V10_420_UNORM_PACKED = enum_pipe_format.define('PIPE_FORMAT_Y10U10V10_420_UNORM_PACKED', 251)
PIPE_FORMAT_A4R4_UNORM = enum_pipe_format.define('PIPE_FORMAT_A4R4_UNORM', 252)
PIPE_FORMAT_R4A4_UNORM = enum_pipe_format.define('PIPE_FORMAT_R4A4_UNORM', 253)
PIPE_FORMAT_R8A8_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8A8_UNORM', 254)
PIPE_FORMAT_A8R8_UNORM = enum_pipe_format.define('PIPE_FORMAT_A8R8_UNORM', 255)
PIPE_FORMAT_A8_UINT = enum_pipe_format.define('PIPE_FORMAT_A8_UINT', 256)
PIPE_FORMAT_I8_UINT = enum_pipe_format.define('PIPE_FORMAT_I8_UINT', 257)
PIPE_FORMAT_L8_UINT = enum_pipe_format.define('PIPE_FORMAT_L8_UINT', 258)
PIPE_FORMAT_L8A8_UINT = enum_pipe_format.define('PIPE_FORMAT_L8A8_UINT', 259)
PIPE_FORMAT_A8_SINT = enum_pipe_format.define('PIPE_FORMAT_A8_SINT', 260)
PIPE_FORMAT_I8_SINT = enum_pipe_format.define('PIPE_FORMAT_I8_SINT', 261)
PIPE_FORMAT_L8_SINT = enum_pipe_format.define('PIPE_FORMAT_L8_SINT', 262)
PIPE_FORMAT_L8A8_SINT = enum_pipe_format.define('PIPE_FORMAT_L8A8_SINT', 263)
PIPE_FORMAT_A16_UINT = enum_pipe_format.define('PIPE_FORMAT_A16_UINT', 264)
PIPE_FORMAT_I16_UINT = enum_pipe_format.define('PIPE_FORMAT_I16_UINT', 265)
PIPE_FORMAT_L16_UINT = enum_pipe_format.define('PIPE_FORMAT_L16_UINT', 266)
PIPE_FORMAT_L16A16_UINT = enum_pipe_format.define('PIPE_FORMAT_L16A16_UINT', 267)
PIPE_FORMAT_A16_SINT = enum_pipe_format.define('PIPE_FORMAT_A16_SINT', 268)
PIPE_FORMAT_I16_SINT = enum_pipe_format.define('PIPE_FORMAT_I16_SINT', 269)
PIPE_FORMAT_L16_SINT = enum_pipe_format.define('PIPE_FORMAT_L16_SINT', 270)
PIPE_FORMAT_L16A16_SINT = enum_pipe_format.define('PIPE_FORMAT_L16A16_SINT', 271)
PIPE_FORMAT_A32_UINT = enum_pipe_format.define('PIPE_FORMAT_A32_UINT', 272)
PIPE_FORMAT_I32_UINT = enum_pipe_format.define('PIPE_FORMAT_I32_UINT', 273)
PIPE_FORMAT_L32_UINT = enum_pipe_format.define('PIPE_FORMAT_L32_UINT', 274)
PIPE_FORMAT_L32A32_UINT = enum_pipe_format.define('PIPE_FORMAT_L32A32_UINT', 275)
PIPE_FORMAT_A32_SINT = enum_pipe_format.define('PIPE_FORMAT_A32_SINT', 276)
PIPE_FORMAT_I32_SINT = enum_pipe_format.define('PIPE_FORMAT_I32_SINT', 277)
PIPE_FORMAT_L32_SINT = enum_pipe_format.define('PIPE_FORMAT_L32_SINT', 278)
PIPE_FORMAT_L32A32_SINT = enum_pipe_format.define('PIPE_FORMAT_L32A32_SINT', 279)
PIPE_FORMAT_A8R8G8B8_UINT = enum_pipe_format.define('PIPE_FORMAT_A8R8G8B8_UINT', 280)
PIPE_FORMAT_A8B8G8R8_UINT = enum_pipe_format.define('PIPE_FORMAT_A8B8G8R8_UINT', 281)
PIPE_FORMAT_A2R10G10B10_UINT = enum_pipe_format.define('PIPE_FORMAT_A2R10G10B10_UINT', 282)
PIPE_FORMAT_A2B10G10R10_UINT = enum_pipe_format.define('PIPE_FORMAT_A2B10G10R10_UINT', 283)
PIPE_FORMAT_R5G6B5_UINT = enum_pipe_format.define('PIPE_FORMAT_R5G6B5_UINT', 284)
PIPE_FORMAT_B5G6R5_UINT = enum_pipe_format.define('PIPE_FORMAT_B5G6R5_UINT', 285)
PIPE_FORMAT_R5G5B5A1_UINT = enum_pipe_format.define('PIPE_FORMAT_R5G5B5A1_UINT', 286)
PIPE_FORMAT_B5G5R5A1_UINT = enum_pipe_format.define('PIPE_FORMAT_B5G5R5A1_UINT', 287)
PIPE_FORMAT_A1R5G5B5_UINT = enum_pipe_format.define('PIPE_FORMAT_A1R5G5B5_UINT', 288)
PIPE_FORMAT_A1B5G5R5_UINT = enum_pipe_format.define('PIPE_FORMAT_A1B5G5R5_UINT', 289)
PIPE_FORMAT_R4G4B4A4_UINT = enum_pipe_format.define('PIPE_FORMAT_R4G4B4A4_UINT', 290)
PIPE_FORMAT_B4G4R4A4_UINT = enum_pipe_format.define('PIPE_FORMAT_B4G4R4A4_UINT', 291)
PIPE_FORMAT_A4R4G4B4_UINT = enum_pipe_format.define('PIPE_FORMAT_A4R4G4B4_UINT', 292)
PIPE_FORMAT_A4B4G4R4_UINT = enum_pipe_format.define('PIPE_FORMAT_A4B4G4R4_UINT', 293)
PIPE_FORMAT_R3G3B2_UINT = enum_pipe_format.define('PIPE_FORMAT_R3G3B2_UINT', 294)
PIPE_FORMAT_B2G3R3_UINT = enum_pipe_format.define('PIPE_FORMAT_B2G3R3_UINT', 295)
PIPE_FORMAT_ETC1_RGB8 = enum_pipe_format.define('PIPE_FORMAT_ETC1_RGB8', 296)
PIPE_FORMAT_R8G8_R8B8_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8_R8B8_UNORM', 297)
PIPE_FORMAT_R8B8_R8G8_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8B8_R8G8_UNORM', 298)
PIPE_FORMAT_G8R8_B8R8_UNORM = enum_pipe_format.define('PIPE_FORMAT_G8R8_B8R8_UNORM', 299)
PIPE_FORMAT_B8R8_G8R8_UNORM = enum_pipe_format.define('PIPE_FORMAT_B8R8_G8R8_UNORM', 300)
PIPE_FORMAT_G8B8_G8R8_UNORM = enum_pipe_format.define('PIPE_FORMAT_G8B8_G8R8_UNORM', 301)
PIPE_FORMAT_B8G8_R8G8_UNORM = enum_pipe_format.define('PIPE_FORMAT_B8G8_R8G8_UNORM', 302)
PIPE_FORMAT_R8G8B8X8_SNORM = enum_pipe_format.define('PIPE_FORMAT_R8G8B8X8_SNORM', 303)
PIPE_FORMAT_R8G8B8X8_SRGB = enum_pipe_format.define('PIPE_FORMAT_R8G8B8X8_SRGB', 304)
PIPE_FORMAT_R8G8B8X8_UINT = enum_pipe_format.define('PIPE_FORMAT_R8G8B8X8_UINT', 305)
PIPE_FORMAT_R8G8B8X8_SINT = enum_pipe_format.define('PIPE_FORMAT_R8G8B8X8_SINT', 306)
PIPE_FORMAT_B10G10R10X2_UNORM = enum_pipe_format.define('PIPE_FORMAT_B10G10R10X2_UNORM', 307)
PIPE_FORMAT_R16G16B16X16_UNORM = enum_pipe_format.define('PIPE_FORMAT_R16G16B16X16_UNORM', 308)
PIPE_FORMAT_R16G16B16X16_SNORM = enum_pipe_format.define('PIPE_FORMAT_R16G16B16X16_SNORM', 309)
PIPE_FORMAT_R16G16B16X16_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R16G16B16X16_FLOAT', 310)
PIPE_FORMAT_R16G16B16X16_UINT = enum_pipe_format.define('PIPE_FORMAT_R16G16B16X16_UINT', 311)
PIPE_FORMAT_R16G16B16X16_SINT = enum_pipe_format.define('PIPE_FORMAT_R16G16B16X16_SINT', 312)
PIPE_FORMAT_R32G32B32X32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R32G32B32X32_FLOAT', 313)
PIPE_FORMAT_R32G32B32X32_UINT = enum_pipe_format.define('PIPE_FORMAT_R32G32B32X32_UINT', 314)
PIPE_FORMAT_R32G32B32X32_SINT = enum_pipe_format.define('PIPE_FORMAT_R32G32B32X32_SINT', 315)
PIPE_FORMAT_R8A8_SNORM = enum_pipe_format.define('PIPE_FORMAT_R8A8_SNORM', 316)
PIPE_FORMAT_R16A16_UNORM = enum_pipe_format.define('PIPE_FORMAT_R16A16_UNORM', 317)
PIPE_FORMAT_R16A16_SNORM = enum_pipe_format.define('PIPE_FORMAT_R16A16_SNORM', 318)
PIPE_FORMAT_R16A16_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R16A16_FLOAT', 319)
PIPE_FORMAT_R32A32_FLOAT = enum_pipe_format.define('PIPE_FORMAT_R32A32_FLOAT', 320)
PIPE_FORMAT_R8A8_UINT = enum_pipe_format.define('PIPE_FORMAT_R8A8_UINT', 321)
PIPE_FORMAT_R8A8_SINT = enum_pipe_format.define('PIPE_FORMAT_R8A8_SINT', 322)
PIPE_FORMAT_R16A16_UINT = enum_pipe_format.define('PIPE_FORMAT_R16A16_UINT', 323)
PIPE_FORMAT_R16A16_SINT = enum_pipe_format.define('PIPE_FORMAT_R16A16_SINT', 324)
PIPE_FORMAT_R32A32_UINT = enum_pipe_format.define('PIPE_FORMAT_R32A32_UINT', 325)
PIPE_FORMAT_R32A32_SINT = enum_pipe_format.define('PIPE_FORMAT_R32A32_SINT', 326)
PIPE_FORMAT_B5G6R5_SRGB = enum_pipe_format.define('PIPE_FORMAT_B5G6R5_SRGB', 327)
PIPE_FORMAT_BPTC_RGBA_UNORM = enum_pipe_format.define('PIPE_FORMAT_BPTC_RGBA_UNORM', 328)
PIPE_FORMAT_BPTC_SRGBA = enum_pipe_format.define('PIPE_FORMAT_BPTC_SRGBA', 329)
PIPE_FORMAT_BPTC_RGB_FLOAT = enum_pipe_format.define('PIPE_FORMAT_BPTC_RGB_FLOAT', 330)
PIPE_FORMAT_BPTC_RGB_UFLOAT = enum_pipe_format.define('PIPE_FORMAT_BPTC_RGB_UFLOAT', 331)
PIPE_FORMAT_G8R8_UNORM = enum_pipe_format.define('PIPE_FORMAT_G8R8_UNORM', 332)
PIPE_FORMAT_G8R8_SNORM = enum_pipe_format.define('PIPE_FORMAT_G8R8_SNORM', 333)
PIPE_FORMAT_G16R16_UNORM = enum_pipe_format.define('PIPE_FORMAT_G16R16_UNORM', 334)
PIPE_FORMAT_G16R16_SNORM = enum_pipe_format.define('PIPE_FORMAT_G16R16_SNORM', 335)
PIPE_FORMAT_A8B8G8R8_SNORM = enum_pipe_format.define('PIPE_FORMAT_A8B8G8R8_SNORM', 336)
PIPE_FORMAT_X8B8G8R8_SNORM = enum_pipe_format.define('PIPE_FORMAT_X8B8G8R8_SNORM', 337)
PIPE_FORMAT_ETC2_RGB8 = enum_pipe_format.define('PIPE_FORMAT_ETC2_RGB8', 338)
PIPE_FORMAT_ETC2_SRGB8 = enum_pipe_format.define('PIPE_FORMAT_ETC2_SRGB8', 339)
PIPE_FORMAT_ETC2_RGB8A1 = enum_pipe_format.define('PIPE_FORMAT_ETC2_RGB8A1', 340)
PIPE_FORMAT_ETC2_SRGB8A1 = enum_pipe_format.define('PIPE_FORMAT_ETC2_SRGB8A1', 341)
PIPE_FORMAT_ETC2_RGBA8 = enum_pipe_format.define('PIPE_FORMAT_ETC2_RGBA8', 342)
PIPE_FORMAT_ETC2_SRGBA8 = enum_pipe_format.define('PIPE_FORMAT_ETC2_SRGBA8', 343)
PIPE_FORMAT_ETC2_R11_UNORM = enum_pipe_format.define('PIPE_FORMAT_ETC2_R11_UNORM', 344)
PIPE_FORMAT_ETC2_R11_SNORM = enum_pipe_format.define('PIPE_FORMAT_ETC2_R11_SNORM', 345)
PIPE_FORMAT_ETC2_RG11_UNORM = enum_pipe_format.define('PIPE_FORMAT_ETC2_RG11_UNORM', 346)
PIPE_FORMAT_ETC2_RG11_SNORM = enum_pipe_format.define('PIPE_FORMAT_ETC2_RG11_SNORM', 347)
PIPE_FORMAT_ASTC_4x4 = enum_pipe_format.define('PIPE_FORMAT_ASTC_4x4', 348)
PIPE_FORMAT_ASTC_5x4 = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x4', 349)
PIPE_FORMAT_ASTC_5x5 = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x5', 350)
PIPE_FORMAT_ASTC_6x5 = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x5', 351)
PIPE_FORMAT_ASTC_6x6 = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x6', 352)
PIPE_FORMAT_ASTC_8x5 = enum_pipe_format.define('PIPE_FORMAT_ASTC_8x5', 353)
PIPE_FORMAT_ASTC_8x6 = enum_pipe_format.define('PIPE_FORMAT_ASTC_8x6', 354)
PIPE_FORMAT_ASTC_8x8 = enum_pipe_format.define('PIPE_FORMAT_ASTC_8x8', 355)
PIPE_FORMAT_ASTC_10x5 = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x5', 356)
PIPE_FORMAT_ASTC_10x6 = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x6', 357)
PIPE_FORMAT_ASTC_10x8 = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x8', 358)
PIPE_FORMAT_ASTC_10x10 = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x10', 359)
PIPE_FORMAT_ASTC_12x10 = enum_pipe_format.define('PIPE_FORMAT_ASTC_12x10', 360)
PIPE_FORMAT_ASTC_12x12 = enum_pipe_format.define('PIPE_FORMAT_ASTC_12x12', 361)
PIPE_FORMAT_ASTC_4x4_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_4x4_SRGB', 362)
PIPE_FORMAT_ASTC_5x4_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x4_SRGB', 363)
PIPE_FORMAT_ASTC_5x5_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x5_SRGB', 364)
PIPE_FORMAT_ASTC_6x5_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x5_SRGB', 365)
PIPE_FORMAT_ASTC_6x6_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x6_SRGB', 366)
PIPE_FORMAT_ASTC_8x5_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_8x5_SRGB', 367)
PIPE_FORMAT_ASTC_8x6_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_8x6_SRGB', 368)
PIPE_FORMAT_ASTC_8x8_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_8x8_SRGB', 369)
PIPE_FORMAT_ASTC_10x5_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x5_SRGB', 370)
PIPE_FORMAT_ASTC_10x6_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x6_SRGB', 371)
PIPE_FORMAT_ASTC_10x8_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x8_SRGB', 372)
PIPE_FORMAT_ASTC_10x10_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x10_SRGB', 373)
PIPE_FORMAT_ASTC_12x10_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_12x10_SRGB', 374)
PIPE_FORMAT_ASTC_12x12_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_12x12_SRGB', 375)
PIPE_FORMAT_ASTC_3x3x3 = enum_pipe_format.define('PIPE_FORMAT_ASTC_3x3x3', 376)
PIPE_FORMAT_ASTC_4x3x3 = enum_pipe_format.define('PIPE_FORMAT_ASTC_4x3x3', 377)
PIPE_FORMAT_ASTC_4x4x3 = enum_pipe_format.define('PIPE_FORMAT_ASTC_4x4x3', 378)
PIPE_FORMAT_ASTC_4x4x4 = enum_pipe_format.define('PIPE_FORMAT_ASTC_4x4x4', 379)
PIPE_FORMAT_ASTC_5x4x4 = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x4x4', 380)
PIPE_FORMAT_ASTC_5x5x4 = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x5x4', 381)
PIPE_FORMAT_ASTC_5x5x5 = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x5x5', 382)
PIPE_FORMAT_ASTC_6x5x5 = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x5x5', 383)
PIPE_FORMAT_ASTC_6x6x5 = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x6x5', 384)
PIPE_FORMAT_ASTC_6x6x6 = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x6x6', 385)
PIPE_FORMAT_ASTC_3x3x3_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_3x3x3_SRGB', 386)
PIPE_FORMAT_ASTC_4x3x3_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_4x3x3_SRGB', 387)
PIPE_FORMAT_ASTC_4x4x3_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_4x4x3_SRGB', 388)
PIPE_FORMAT_ASTC_4x4x4_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_4x4x4_SRGB', 389)
PIPE_FORMAT_ASTC_5x4x4_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x4x4_SRGB', 390)
PIPE_FORMAT_ASTC_5x5x4_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x5x4_SRGB', 391)
PIPE_FORMAT_ASTC_5x5x5_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x5x5_SRGB', 392)
PIPE_FORMAT_ASTC_6x5x5_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x5x5_SRGB', 393)
PIPE_FORMAT_ASTC_6x6x5_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x6x5_SRGB', 394)
PIPE_FORMAT_ASTC_6x6x6_SRGB = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x6x6_SRGB', 395)
PIPE_FORMAT_ASTC_4x4_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_4x4_FLOAT', 396)
PIPE_FORMAT_ASTC_5x4_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x4_FLOAT', 397)
PIPE_FORMAT_ASTC_5x5_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_5x5_FLOAT', 398)
PIPE_FORMAT_ASTC_6x5_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x5_FLOAT', 399)
PIPE_FORMAT_ASTC_6x6_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_6x6_FLOAT', 400)
PIPE_FORMAT_ASTC_8x5_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_8x5_FLOAT', 401)
PIPE_FORMAT_ASTC_8x6_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_8x6_FLOAT', 402)
PIPE_FORMAT_ASTC_8x8_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_8x8_FLOAT', 403)
PIPE_FORMAT_ASTC_10x5_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x5_FLOAT', 404)
PIPE_FORMAT_ASTC_10x6_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x6_FLOAT', 405)
PIPE_FORMAT_ASTC_10x8_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x8_FLOAT', 406)
PIPE_FORMAT_ASTC_10x10_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_10x10_FLOAT', 407)
PIPE_FORMAT_ASTC_12x10_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_12x10_FLOAT', 408)
PIPE_FORMAT_ASTC_12x12_FLOAT = enum_pipe_format.define('PIPE_FORMAT_ASTC_12x12_FLOAT', 409)
PIPE_FORMAT_FXT1_RGB = enum_pipe_format.define('PIPE_FORMAT_FXT1_RGB', 410)
PIPE_FORMAT_FXT1_RGBA = enum_pipe_format.define('PIPE_FORMAT_FXT1_RGBA', 411)
PIPE_FORMAT_P010 = enum_pipe_format.define('PIPE_FORMAT_P010', 412)
PIPE_FORMAT_P012 = enum_pipe_format.define('PIPE_FORMAT_P012', 413)
PIPE_FORMAT_P016 = enum_pipe_format.define('PIPE_FORMAT_P016', 414)
PIPE_FORMAT_P030 = enum_pipe_format.define('PIPE_FORMAT_P030', 415)
PIPE_FORMAT_Y210 = enum_pipe_format.define('PIPE_FORMAT_Y210', 416)
PIPE_FORMAT_Y212 = enum_pipe_format.define('PIPE_FORMAT_Y212', 417)
PIPE_FORMAT_Y216 = enum_pipe_format.define('PIPE_FORMAT_Y216', 418)
PIPE_FORMAT_Y410 = enum_pipe_format.define('PIPE_FORMAT_Y410', 419)
PIPE_FORMAT_Y412 = enum_pipe_format.define('PIPE_FORMAT_Y412', 420)
PIPE_FORMAT_Y416 = enum_pipe_format.define('PIPE_FORMAT_Y416', 421)
PIPE_FORMAT_R10G10B10X2_UNORM = enum_pipe_format.define('PIPE_FORMAT_R10G10B10X2_UNORM', 422)
PIPE_FORMAT_A1R5G5B5_UNORM = enum_pipe_format.define('PIPE_FORMAT_A1R5G5B5_UNORM', 423)
PIPE_FORMAT_A1B5G5R5_UNORM = enum_pipe_format.define('PIPE_FORMAT_A1B5G5R5_UNORM', 424)
PIPE_FORMAT_X1B5G5R5_UNORM = enum_pipe_format.define('PIPE_FORMAT_X1B5G5R5_UNORM', 425)
PIPE_FORMAT_R5G5B5A1_UNORM = enum_pipe_format.define('PIPE_FORMAT_R5G5B5A1_UNORM', 426)
PIPE_FORMAT_A4R4G4B4_UNORM = enum_pipe_format.define('PIPE_FORMAT_A4R4G4B4_UNORM', 427)
PIPE_FORMAT_A4B4G4R4_UNORM = enum_pipe_format.define('PIPE_FORMAT_A4B4G4R4_UNORM', 428)
PIPE_FORMAT_G8R8_SINT = enum_pipe_format.define('PIPE_FORMAT_G8R8_SINT', 429)
PIPE_FORMAT_A8B8G8R8_SINT = enum_pipe_format.define('PIPE_FORMAT_A8B8G8R8_SINT', 430)
PIPE_FORMAT_X8B8G8R8_SINT = enum_pipe_format.define('PIPE_FORMAT_X8B8G8R8_SINT', 431)
PIPE_FORMAT_ATC_RGB = enum_pipe_format.define('PIPE_FORMAT_ATC_RGB', 432)
PIPE_FORMAT_ATC_RGBA_EXPLICIT = enum_pipe_format.define('PIPE_FORMAT_ATC_RGBA_EXPLICIT', 433)
PIPE_FORMAT_ATC_RGBA_INTERPOLATED = enum_pipe_format.define('PIPE_FORMAT_ATC_RGBA_INTERPOLATED', 434)
PIPE_FORMAT_Z24_UNORM_S8_UINT_AS_R8G8B8A8 = enum_pipe_format.define('PIPE_FORMAT_Z24_UNORM_S8_UINT_AS_R8G8B8A8', 435)
PIPE_FORMAT_AYUV = enum_pipe_format.define('PIPE_FORMAT_AYUV', 436)
PIPE_FORMAT_XYUV = enum_pipe_format.define('PIPE_FORMAT_XYUV', 437)
PIPE_FORMAT_R8G8B8_420_UNORM_PACKED = enum_pipe_format.define('PIPE_FORMAT_R8G8B8_420_UNORM_PACKED', 438)
PIPE_FORMAT_R8_G8B8_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8_G8B8_420_UNORM', 439)
PIPE_FORMAT_R8_B8G8_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8_B8G8_420_UNORM', 440)
PIPE_FORMAT_G8_B8R8_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_G8_B8R8_420_UNORM', 441)
PIPE_FORMAT_R10G10B10_420_UNORM_PACKED = enum_pipe_format.define('PIPE_FORMAT_R10G10B10_420_UNORM_PACKED', 442)
PIPE_FORMAT_R10_G10B10_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_R10_G10B10_420_UNORM', 443)
PIPE_FORMAT_R10_G10B10_422_UNORM = enum_pipe_format.define('PIPE_FORMAT_R10_G10B10_422_UNORM', 444)
PIPE_FORMAT_R8_G8_B8_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8_G8_B8_420_UNORM', 445)
PIPE_FORMAT_R8_B8_G8_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8_B8_G8_420_UNORM', 446)
PIPE_FORMAT_G8_B8_R8_420_UNORM = enum_pipe_format.define('PIPE_FORMAT_G8_B8_R8_420_UNORM', 447)
PIPE_FORMAT_R8_G8B8_422_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8_G8B8_422_UNORM', 448)
PIPE_FORMAT_R8_B8G8_422_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8_B8G8_422_UNORM', 449)
PIPE_FORMAT_G8_B8R8_422_UNORM = enum_pipe_format.define('PIPE_FORMAT_G8_B8R8_422_UNORM', 450)
PIPE_FORMAT_R8_G8_B8_UNORM = enum_pipe_format.define('PIPE_FORMAT_R8_G8_B8_UNORM', 451)
PIPE_FORMAT_Y8_UNORM = enum_pipe_format.define('PIPE_FORMAT_Y8_UNORM', 452)
PIPE_FORMAT_B8G8R8X8_SNORM = enum_pipe_format.define('PIPE_FORMAT_B8G8R8X8_SNORM', 453)
PIPE_FORMAT_B8G8R8X8_UINT = enum_pipe_format.define('PIPE_FORMAT_B8G8R8X8_UINT', 454)
PIPE_FORMAT_B8G8R8X8_SINT = enum_pipe_format.define('PIPE_FORMAT_B8G8R8X8_SINT', 455)
PIPE_FORMAT_A8R8G8B8_SNORM = enum_pipe_format.define('PIPE_FORMAT_A8R8G8B8_SNORM', 456)
PIPE_FORMAT_A8R8G8B8_SINT = enum_pipe_format.define('PIPE_FORMAT_A8R8G8B8_SINT', 457)
PIPE_FORMAT_X8R8G8B8_SNORM = enum_pipe_format.define('PIPE_FORMAT_X8R8G8B8_SNORM', 458)
PIPE_FORMAT_X8R8G8B8_SINT = enum_pipe_format.define('PIPE_FORMAT_X8R8G8B8_SINT', 459)
PIPE_FORMAT_R5G5B5X1_UNORM = enum_pipe_format.define('PIPE_FORMAT_R5G5B5X1_UNORM', 460)
PIPE_FORMAT_X1R5G5B5_UNORM = enum_pipe_format.define('PIPE_FORMAT_X1R5G5B5_UNORM', 461)
PIPE_FORMAT_R4G4B4X4_UNORM = enum_pipe_format.define('PIPE_FORMAT_R4G4B4X4_UNORM', 462)
PIPE_FORMAT_B10G10R10X2_SNORM = enum_pipe_format.define('PIPE_FORMAT_B10G10R10X2_SNORM', 463)
PIPE_FORMAT_R5G6B5_SRGB = enum_pipe_format.define('PIPE_FORMAT_R5G6B5_SRGB', 464)
PIPE_FORMAT_R10G10B10X2_SINT = enum_pipe_format.define('PIPE_FORMAT_R10G10B10X2_SINT', 465)
PIPE_FORMAT_B10G10R10X2_SINT = enum_pipe_format.define('PIPE_FORMAT_B10G10R10X2_SINT', 466)
PIPE_FORMAT_G16R16_SINT = enum_pipe_format.define('PIPE_FORMAT_G16R16_SINT', 467)
PIPE_FORMAT_COUNT = enum_pipe_format.define('PIPE_FORMAT_COUNT', 468)
@c.record
class struct_nir_variable_data_sampler(c.Struct):
SIZE = 4
is_inline_sampler: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 0]
addressing_mode: Annotated[Annotated[int, ctypes.c_uint32], 0, 3, 1]
normalized_coordinates: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 4]
filter_mode: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 5]
@c.record
class struct_nir_variable_data_xfb(c.Struct):
SIZE = 4
buffer: Annotated[uint16_t, 0, 2, 0]
stride: Annotated[uint16_t, 2]
nir_variable_data: TypeAlias = struct_nir_variable_data
@c.record
class struct_nir_variable(c.Struct):
SIZE = 152
node: Annotated[struct_exec_node, 0]
type: Annotated[c.POINTER[struct_glsl_type], 16]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
data: Annotated[struct_nir_variable_data, 32]
index: Annotated[Annotated[int, ctypes.c_uint32], 88]
num_members: Annotated[uint16_t, 92]
max_ifc_array_access: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 96]
num_state_slots: Annotated[uint16_t, 104]
state_slots: Annotated[c.POINTER[nir_state_slot], 112]
constant_initializer: Annotated[c.POINTER[nir_constant], 120]
pointer_initializer: Annotated[c.POINTER[nir_variable], 128]
interface_type: Annotated[c.POINTER[struct_glsl_type], 136]
members: Annotated[c.POINTER[nir_variable_data], 144]
@c.record
class struct_exec_node(c.Struct):
SIZE = 16
next: Annotated[c.POINTER[struct_exec_node], 0]
prev: Annotated[c.POINTER[struct_exec_node], 8]
@c.record
class struct_glsl_type(c.Struct):
SIZE = 48
gl_type: Annotated[uint32_t, 0]
base_type: Annotated[enum_glsl_base_type, 4, 8, 0]
sampled_type: Annotated[enum_glsl_base_type, 5, 8, 0]
sampler_dimensionality: Annotated[Annotated[int, ctypes.c_uint32], 6, 4, 0]
sampler_shadow: Annotated[Annotated[int, ctypes.c_uint32], 6, 1, 4]
sampler_array: Annotated[Annotated[int, ctypes.c_uint32], 6, 1, 5]
interface_packing: Annotated[Annotated[int, ctypes.c_uint32], 6, 2, 6]
interface_row_major: Annotated[Annotated[int, ctypes.c_uint32], 7, 1, 0]
cmat_desc: Annotated[struct_glsl_cmat_description, 8]
packed: Annotated[Annotated[int, ctypes.c_uint32], 12, 1, 0]
has_builtin_name: Annotated[Annotated[int, ctypes.c_uint32], 12, 1, 1]
vector_elements: Annotated[uint8_t, 13]
matrix_columns: Annotated[uint8_t, 14]
length: Annotated[Annotated[int, ctypes.c_uint32], 16]
name_id: Annotated[uintptr_t, 24]
explicit_stride: Annotated[Annotated[int, ctypes.c_uint32], 32]
explicit_alignment: Annotated[Annotated[int, ctypes.c_uint32], 36]
fields: Annotated[struct_glsl_type_fields, 40]
class enum_glsl_base_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
GLSL_TYPE_UINT = enum_glsl_base_type.define('GLSL_TYPE_UINT', 0)
GLSL_TYPE_INT = enum_glsl_base_type.define('GLSL_TYPE_INT', 1)
GLSL_TYPE_FLOAT = enum_glsl_base_type.define('GLSL_TYPE_FLOAT', 2)
GLSL_TYPE_FLOAT16 = enum_glsl_base_type.define('GLSL_TYPE_FLOAT16', 3)
GLSL_TYPE_BFLOAT16 = enum_glsl_base_type.define('GLSL_TYPE_BFLOAT16', 4)
GLSL_TYPE_FLOAT_E4M3FN = enum_glsl_base_type.define('GLSL_TYPE_FLOAT_E4M3FN', 5)
GLSL_TYPE_FLOAT_E5M2 = enum_glsl_base_type.define('GLSL_TYPE_FLOAT_E5M2', 6)
GLSL_TYPE_DOUBLE = enum_glsl_base_type.define('GLSL_TYPE_DOUBLE', 7)
GLSL_TYPE_UINT8 = enum_glsl_base_type.define('GLSL_TYPE_UINT8', 8)
GLSL_TYPE_INT8 = enum_glsl_base_type.define('GLSL_TYPE_INT8', 9)
GLSL_TYPE_UINT16 = enum_glsl_base_type.define('GLSL_TYPE_UINT16', 10)
GLSL_TYPE_INT16 = enum_glsl_base_type.define('GLSL_TYPE_INT16', 11)
GLSL_TYPE_UINT64 = enum_glsl_base_type.define('GLSL_TYPE_UINT64', 12)
GLSL_TYPE_INT64 = enum_glsl_base_type.define('GLSL_TYPE_INT64', 13)
GLSL_TYPE_BOOL = enum_glsl_base_type.define('GLSL_TYPE_BOOL', 14)
GLSL_TYPE_COOPERATIVE_MATRIX = enum_glsl_base_type.define('GLSL_TYPE_COOPERATIVE_MATRIX', 15)
GLSL_TYPE_SAMPLER = enum_glsl_base_type.define('GLSL_TYPE_SAMPLER', 16)
GLSL_TYPE_TEXTURE = enum_glsl_base_type.define('GLSL_TYPE_TEXTURE', 17)
GLSL_TYPE_IMAGE = enum_glsl_base_type.define('GLSL_TYPE_IMAGE', 18)
GLSL_TYPE_ATOMIC_UINT = enum_glsl_base_type.define('GLSL_TYPE_ATOMIC_UINT', 19)
GLSL_TYPE_STRUCT = enum_glsl_base_type.define('GLSL_TYPE_STRUCT', 20)
GLSL_TYPE_INTERFACE = enum_glsl_base_type.define('GLSL_TYPE_INTERFACE', 21)
GLSL_TYPE_ARRAY = enum_glsl_base_type.define('GLSL_TYPE_ARRAY', 22)
GLSL_TYPE_VOID = enum_glsl_base_type.define('GLSL_TYPE_VOID', 23)
GLSL_TYPE_SUBROUTINE = enum_glsl_base_type.define('GLSL_TYPE_SUBROUTINE', 24)
GLSL_TYPE_ERROR = enum_glsl_base_type.define('GLSL_TYPE_ERROR', 25)
@c.record
class struct_glsl_cmat_description(c.Struct):
SIZE = 4
element_type: Annotated[uint8_t, 0, 5, 0]
scope: Annotated[uint8_t, 0, 3, 5]
rows: Annotated[uint8_t, 1]
cols: Annotated[uint8_t, 2]
use: Annotated[uint8_t, 3]
uintptr_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@c.record
class struct_glsl_type_fields(c.Struct):
SIZE = 8
array: Annotated[c.POINTER[glsl_type], 0]
structure: Annotated[c.POINTER[glsl_struct_field], 0]
glsl_type: TypeAlias = struct_glsl_type
@c.record
class struct_glsl_struct_field(c.Struct):
SIZE = 48
type: Annotated[c.POINTER[glsl_type], 0]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
location: Annotated[Annotated[int, ctypes.c_int32], 16]
component: Annotated[Annotated[int, ctypes.c_int32], 20]
offset: Annotated[Annotated[int, ctypes.c_int32], 24]
xfb_buffer: Annotated[Annotated[int, ctypes.c_int32], 28]
xfb_stride: Annotated[Annotated[int, ctypes.c_int32], 32]
image_format: Annotated[enum_pipe_format, 36]
interpolation: Annotated[Annotated[int, ctypes.c_uint32], 40, 3, 0]
centroid: Annotated[Annotated[int, ctypes.c_uint32], 40, 1, 3]
sample: Annotated[Annotated[int, ctypes.c_uint32], 40, 1, 4]
matrix_layout: Annotated[Annotated[int, ctypes.c_uint32], 40, 2, 5]
patch: Annotated[Annotated[int, ctypes.c_uint32], 40, 1, 7]
precision: Annotated[Annotated[int, ctypes.c_uint32], 41, 2, 0]
memory_read_only: Annotated[Annotated[int, ctypes.c_uint32], 41, 1, 2]
memory_write_only: Annotated[Annotated[int, ctypes.c_uint32], 41, 1, 3]
memory_coherent: Annotated[Annotated[int, ctypes.c_uint32], 41, 1, 4]
memory_volatile: Annotated[Annotated[int, ctypes.c_uint32], 41, 1, 5]
memory_restrict: Annotated[Annotated[int, ctypes.c_uint32], 41, 1, 6]
explicit_xfb_buffer: Annotated[Annotated[int, ctypes.c_uint32], 41, 1, 7]
implicit_sized_array: Annotated[Annotated[int, ctypes.c_uint32], 42, 1, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 40]
glsl_struct_field: TypeAlias = struct_glsl_struct_field
nir_variable: TypeAlias = struct_nir_variable
class nir_instr_type(Annotated[int, ctypes.c_ubyte], c.Enum): pass
nir_instr_type_alu = nir_instr_type.define('nir_instr_type_alu', 0)
nir_instr_type_deref = nir_instr_type.define('nir_instr_type_deref', 1)
nir_instr_type_call = nir_instr_type.define('nir_instr_type_call', 2)
nir_instr_type_tex = nir_instr_type.define('nir_instr_type_tex', 3)
nir_instr_type_intrinsic = nir_instr_type.define('nir_instr_type_intrinsic', 4)
nir_instr_type_load_const = nir_instr_type.define('nir_instr_type_load_const', 5)
nir_instr_type_jump = nir_instr_type.define('nir_instr_type_jump', 6)
nir_instr_type_undef = nir_instr_type.define('nir_instr_type_undef', 7)
nir_instr_type_phi = nir_instr_type.define('nir_instr_type_phi', 8)
nir_instr_type_parallel_copy = nir_instr_type.define('nir_instr_type_parallel_copy', 9)
@c.record
class struct_nir_instr(c.Struct):
SIZE = 32
node: Annotated[struct_exec_node, 0]
block: Annotated[c.POINTER[nir_block], 16]
type: Annotated[nir_instr_type, 24]
pass_flags: Annotated[uint8_t, 25]
has_debug_info: Annotated[Annotated[bool, ctypes.c_bool], 26]
index: Annotated[uint32_t, 28]
@c.record
class struct_nir_block(c.Struct):
SIZE = 160
cf_node: Annotated[nir_cf_node, 0]
instr_list: Annotated[struct_exec_list, 32]
index: Annotated[Annotated[int, ctypes.c_uint32], 64]
divergent: Annotated[Annotated[bool, ctypes.c_bool], 68]
successors: Annotated[c.Array[c.POINTER[nir_block], Literal[2]], 72]
predecessors: Annotated[c.POINTER[struct_set], 88]
imm_dom: Annotated[c.POINTER[nir_block], 96]
num_dom_children: Annotated[Annotated[int, ctypes.c_uint32], 104]
dom_children: Annotated[c.POINTER[c.POINTER[nir_block]], 112]
dom_frontier: Annotated[c.POINTER[struct_set], 120]
dom_pre_index: Annotated[uint32_t, 128]
dom_post_index: Annotated[uint32_t, 132]
start_ip: Annotated[uint32_t, 136]
end_ip: Annotated[uint32_t, 140]
live_in: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 144]
live_out: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 152]
nir_block: TypeAlias = struct_nir_block
@c.record
class struct_nir_cf_node(c.Struct):
SIZE = 32
node: Annotated[struct_exec_node, 0]
type: Annotated[nir_cf_node_type, 16]
parent: Annotated[c.POINTER[nir_cf_node], 24]
nir_cf_node: TypeAlias = struct_nir_cf_node
class nir_cf_node_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_cf_node_block = nir_cf_node_type.define('nir_cf_node_block', 0)
nir_cf_node_if = nir_cf_node_type.define('nir_cf_node_if', 1)
nir_cf_node_loop = nir_cf_node_type.define('nir_cf_node_loop', 2)
nir_cf_node_function = nir_cf_node_type.define('nir_cf_node_function', 3)
@c.record
class struct_exec_list(c.Struct):
SIZE = 32
head_sentinel: Annotated[struct_exec_node, 0]
tail_sentinel: Annotated[struct_exec_node, 16]
@c.record
class struct_set(c.Struct):
SIZE = 72
mem_ctx: Annotated[ctypes.c_void_p, 0]
table: Annotated[c.POINTER[struct_set_entry], 8]
key_hash_function: Annotated[c.CFUNCTYPE[uint32_t, [ctypes.c_void_p]], 16]
key_equals_function: Annotated[c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [ctypes.c_void_p, ctypes.c_void_p]], 24]
size: Annotated[uint32_t, 32]
rehash: Annotated[uint32_t, 36]
size_magic: Annotated[uint64_t, 40]
rehash_magic: Annotated[uint64_t, 48]
max_entries: Annotated[uint32_t, 56]
size_index: Annotated[uint32_t, 60]
entries: Annotated[uint32_t, 64]
deleted_entries: Annotated[uint32_t, 68]
@c.record
class struct_set_entry(c.Struct):
SIZE = 16
hash: Annotated[uint32_t, 0]
key: Annotated[ctypes.c_void_p, 8]
nir_instr: TypeAlias = struct_nir_instr
@c.record
class struct_nir_def(c.Struct):
SIZE = 32
parent_instr: Annotated[c.POINTER[nir_instr], 0]
uses: Annotated[struct_list_head, 8]
index: Annotated[Annotated[int, ctypes.c_uint32], 24]
num_components: Annotated[uint8_t, 28]
bit_size: Annotated[uint8_t, 29]
divergent: Annotated[Annotated[bool, ctypes.c_bool], 30]
loop_invariant: Annotated[Annotated[bool, ctypes.c_bool], 31]
@c.record
class struct_list_head(c.Struct):
SIZE = 16
prev: Annotated[c.POINTER[struct_list_head], 0]
next: Annotated[c.POINTER[struct_list_head], 8]
nir_def: TypeAlias = struct_nir_def
@c.record
class struct_nir_src(c.Struct):
SIZE = 32
_parent: Annotated[uintptr_t, 0]
use_link: Annotated[struct_list_head, 8]
ssa: Annotated[c.POINTER[nir_def], 24]
nir_src: TypeAlias = struct_nir_src
@dll.bind
def nir_src_is_divergent(src:c.POINTER[nir_src]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_alu_src(c.Struct):
SIZE = 48
src: Annotated[nir_src, 0]
swizzle: Annotated[c.Array[uint8_t, Literal[16]], 32]
nir_alu_src: TypeAlias = struct_nir_alu_src
class nir_alu_type(Annotated[int, ctypes.c_ubyte], c.Enum): pass
nir_type_invalid = nir_alu_type.define('nir_type_invalid', 0)
nir_type_int = nir_alu_type.define('nir_type_int', 2)
nir_type_uint = nir_alu_type.define('nir_type_uint', 4)
nir_type_bool = nir_alu_type.define('nir_type_bool', 6)
nir_type_float = nir_alu_type.define('nir_type_float', 128)
nir_type_bool1 = nir_alu_type.define('nir_type_bool1', 7)
nir_type_bool8 = nir_alu_type.define('nir_type_bool8', 14)
nir_type_bool16 = nir_alu_type.define('nir_type_bool16', 22)
nir_type_bool32 = nir_alu_type.define('nir_type_bool32', 38)
nir_type_int1 = nir_alu_type.define('nir_type_int1', 3)
nir_type_int8 = nir_alu_type.define('nir_type_int8', 10)
nir_type_int16 = nir_alu_type.define('nir_type_int16', 18)
nir_type_int32 = nir_alu_type.define('nir_type_int32', 34)
nir_type_int64 = nir_alu_type.define('nir_type_int64', 66)
nir_type_uint1 = nir_alu_type.define('nir_type_uint1', 5)
nir_type_uint8 = nir_alu_type.define('nir_type_uint8', 12)
nir_type_uint16 = nir_alu_type.define('nir_type_uint16', 20)
nir_type_uint32 = nir_alu_type.define('nir_type_uint32', 36)
nir_type_uint64 = nir_alu_type.define('nir_type_uint64', 68)
nir_type_float16 = nir_alu_type.define('nir_type_float16', 144)
nir_type_float32 = nir_alu_type.define('nir_type_float32', 160)
nir_type_float64 = nir_alu_type.define('nir_type_float64', 192)
@dll.bind
def nir_get_nir_type_for_glsl_base_type(base_type:enum_glsl_base_type) -> nir_alu_type: ...
@dll.bind
def nir_get_glsl_base_type_for_nir_type(base_type:nir_alu_type) -> enum_glsl_base_type: ...
class nir_op(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_op_alignbyte_amd = nir_op.define('nir_op_alignbyte_amd', 0)
nir_op_amul = nir_op.define('nir_op_amul', 1)
nir_op_andg_ir3 = nir_op.define('nir_op_andg_ir3', 2)
nir_op_b16all_fequal16 = nir_op.define('nir_op_b16all_fequal16', 3)
nir_op_b16all_fequal2 = nir_op.define('nir_op_b16all_fequal2', 4)
nir_op_b16all_fequal3 = nir_op.define('nir_op_b16all_fequal3', 5)
nir_op_b16all_fequal4 = nir_op.define('nir_op_b16all_fequal4', 6)
nir_op_b16all_fequal5 = nir_op.define('nir_op_b16all_fequal5', 7)
nir_op_b16all_fequal8 = nir_op.define('nir_op_b16all_fequal8', 8)
nir_op_b16all_iequal16 = nir_op.define('nir_op_b16all_iequal16', 9)
nir_op_b16all_iequal2 = nir_op.define('nir_op_b16all_iequal2', 10)
nir_op_b16all_iequal3 = nir_op.define('nir_op_b16all_iequal3', 11)
nir_op_b16all_iequal4 = nir_op.define('nir_op_b16all_iequal4', 12)
nir_op_b16all_iequal5 = nir_op.define('nir_op_b16all_iequal5', 13)
nir_op_b16all_iequal8 = nir_op.define('nir_op_b16all_iequal8', 14)
nir_op_b16any_fnequal16 = nir_op.define('nir_op_b16any_fnequal16', 15)
nir_op_b16any_fnequal2 = nir_op.define('nir_op_b16any_fnequal2', 16)
nir_op_b16any_fnequal3 = nir_op.define('nir_op_b16any_fnequal3', 17)
nir_op_b16any_fnequal4 = nir_op.define('nir_op_b16any_fnequal4', 18)
nir_op_b16any_fnequal5 = nir_op.define('nir_op_b16any_fnequal5', 19)
nir_op_b16any_fnequal8 = nir_op.define('nir_op_b16any_fnequal8', 20)
nir_op_b16any_inequal16 = nir_op.define('nir_op_b16any_inequal16', 21)
nir_op_b16any_inequal2 = nir_op.define('nir_op_b16any_inequal2', 22)
nir_op_b16any_inequal3 = nir_op.define('nir_op_b16any_inequal3', 23)
nir_op_b16any_inequal4 = nir_op.define('nir_op_b16any_inequal4', 24)
nir_op_b16any_inequal5 = nir_op.define('nir_op_b16any_inequal5', 25)
nir_op_b16any_inequal8 = nir_op.define('nir_op_b16any_inequal8', 26)
nir_op_b16csel = nir_op.define('nir_op_b16csel', 27)
nir_op_b2b1 = nir_op.define('nir_op_b2b1', 28)
nir_op_b2b16 = nir_op.define('nir_op_b2b16', 29)
nir_op_b2b32 = nir_op.define('nir_op_b2b32', 30)
nir_op_b2b8 = nir_op.define('nir_op_b2b8', 31)
nir_op_b2f16 = nir_op.define('nir_op_b2f16', 32)
nir_op_b2f32 = nir_op.define('nir_op_b2f32', 33)
nir_op_b2f64 = nir_op.define('nir_op_b2f64', 34)
nir_op_b2i1 = nir_op.define('nir_op_b2i1', 35)
nir_op_b2i16 = nir_op.define('nir_op_b2i16', 36)
nir_op_b2i32 = nir_op.define('nir_op_b2i32', 37)
nir_op_b2i64 = nir_op.define('nir_op_b2i64', 38)
nir_op_b2i8 = nir_op.define('nir_op_b2i8', 39)
nir_op_b32all_fequal16 = nir_op.define('nir_op_b32all_fequal16', 40)
nir_op_b32all_fequal2 = nir_op.define('nir_op_b32all_fequal2', 41)
nir_op_b32all_fequal3 = nir_op.define('nir_op_b32all_fequal3', 42)
nir_op_b32all_fequal4 = nir_op.define('nir_op_b32all_fequal4', 43)
nir_op_b32all_fequal5 = nir_op.define('nir_op_b32all_fequal5', 44)
nir_op_b32all_fequal8 = nir_op.define('nir_op_b32all_fequal8', 45)
nir_op_b32all_iequal16 = nir_op.define('nir_op_b32all_iequal16', 46)
nir_op_b32all_iequal2 = nir_op.define('nir_op_b32all_iequal2', 47)
nir_op_b32all_iequal3 = nir_op.define('nir_op_b32all_iequal3', 48)
nir_op_b32all_iequal4 = nir_op.define('nir_op_b32all_iequal4', 49)
nir_op_b32all_iequal5 = nir_op.define('nir_op_b32all_iequal5', 50)
nir_op_b32all_iequal8 = nir_op.define('nir_op_b32all_iequal8', 51)
nir_op_b32any_fnequal16 = nir_op.define('nir_op_b32any_fnequal16', 52)
nir_op_b32any_fnequal2 = nir_op.define('nir_op_b32any_fnequal2', 53)
nir_op_b32any_fnequal3 = nir_op.define('nir_op_b32any_fnequal3', 54)
nir_op_b32any_fnequal4 = nir_op.define('nir_op_b32any_fnequal4', 55)
nir_op_b32any_fnequal5 = nir_op.define('nir_op_b32any_fnequal5', 56)
nir_op_b32any_fnequal8 = nir_op.define('nir_op_b32any_fnequal8', 57)
nir_op_b32any_inequal16 = nir_op.define('nir_op_b32any_inequal16', 58)
nir_op_b32any_inequal2 = nir_op.define('nir_op_b32any_inequal2', 59)
nir_op_b32any_inequal3 = nir_op.define('nir_op_b32any_inequal3', 60)
nir_op_b32any_inequal4 = nir_op.define('nir_op_b32any_inequal4', 61)
nir_op_b32any_inequal5 = nir_op.define('nir_op_b32any_inequal5', 62)
nir_op_b32any_inequal8 = nir_op.define('nir_op_b32any_inequal8', 63)
nir_op_b32csel = nir_op.define('nir_op_b32csel', 64)
nir_op_b32fcsel_mdg = nir_op.define('nir_op_b32fcsel_mdg', 65)
nir_op_b8all_fequal16 = nir_op.define('nir_op_b8all_fequal16', 66)
nir_op_b8all_fequal2 = nir_op.define('nir_op_b8all_fequal2', 67)
nir_op_b8all_fequal3 = nir_op.define('nir_op_b8all_fequal3', 68)
nir_op_b8all_fequal4 = nir_op.define('nir_op_b8all_fequal4', 69)
nir_op_b8all_fequal5 = nir_op.define('nir_op_b8all_fequal5', 70)
nir_op_b8all_fequal8 = nir_op.define('nir_op_b8all_fequal8', 71)
nir_op_b8all_iequal16 = nir_op.define('nir_op_b8all_iequal16', 72)
nir_op_b8all_iequal2 = nir_op.define('nir_op_b8all_iequal2', 73)
nir_op_b8all_iequal3 = nir_op.define('nir_op_b8all_iequal3', 74)
nir_op_b8all_iequal4 = nir_op.define('nir_op_b8all_iequal4', 75)
nir_op_b8all_iequal5 = nir_op.define('nir_op_b8all_iequal5', 76)
nir_op_b8all_iequal8 = nir_op.define('nir_op_b8all_iequal8', 77)
nir_op_b8any_fnequal16 = nir_op.define('nir_op_b8any_fnequal16', 78)
nir_op_b8any_fnequal2 = nir_op.define('nir_op_b8any_fnequal2', 79)
nir_op_b8any_fnequal3 = nir_op.define('nir_op_b8any_fnequal3', 80)
nir_op_b8any_fnequal4 = nir_op.define('nir_op_b8any_fnequal4', 81)
nir_op_b8any_fnequal5 = nir_op.define('nir_op_b8any_fnequal5', 82)
nir_op_b8any_fnequal8 = nir_op.define('nir_op_b8any_fnequal8', 83)
nir_op_b8any_inequal16 = nir_op.define('nir_op_b8any_inequal16', 84)
nir_op_b8any_inequal2 = nir_op.define('nir_op_b8any_inequal2', 85)
nir_op_b8any_inequal3 = nir_op.define('nir_op_b8any_inequal3', 86)
nir_op_b8any_inequal4 = nir_op.define('nir_op_b8any_inequal4', 87)
nir_op_b8any_inequal5 = nir_op.define('nir_op_b8any_inequal5', 88)
nir_op_b8any_inequal8 = nir_op.define('nir_op_b8any_inequal8', 89)
nir_op_b8csel = nir_op.define('nir_op_b8csel', 90)
nir_op_ball_fequal16 = nir_op.define('nir_op_ball_fequal16', 91)
nir_op_ball_fequal2 = nir_op.define('nir_op_ball_fequal2', 92)
nir_op_ball_fequal3 = nir_op.define('nir_op_ball_fequal3', 93)
nir_op_ball_fequal4 = nir_op.define('nir_op_ball_fequal4', 94)
nir_op_ball_fequal5 = nir_op.define('nir_op_ball_fequal5', 95)
nir_op_ball_fequal8 = nir_op.define('nir_op_ball_fequal8', 96)
nir_op_ball_iequal16 = nir_op.define('nir_op_ball_iequal16', 97)
nir_op_ball_iequal2 = nir_op.define('nir_op_ball_iequal2', 98)
nir_op_ball_iequal3 = nir_op.define('nir_op_ball_iequal3', 99)
nir_op_ball_iequal4 = nir_op.define('nir_op_ball_iequal4', 100)
nir_op_ball_iequal5 = nir_op.define('nir_op_ball_iequal5', 101)
nir_op_ball_iequal8 = nir_op.define('nir_op_ball_iequal8', 102)
nir_op_bany_fnequal16 = nir_op.define('nir_op_bany_fnequal16', 103)
nir_op_bany_fnequal2 = nir_op.define('nir_op_bany_fnequal2', 104)
nir_op_bany_fnequal3 = nir_op.define('nir_op_bany_fnequal3', 105)
nir_op_bany_fnequal4 = nir_op.define('nir_op_bany_fnequal4', 106)
nir_op_bany_fnequal5 = nir_op.define('nir_op_bany_fnequal5', 107)
nir_op_bany_fnequal8 = nir_op.define('nir_op_bany_fnequal8', 108)
nir_op_bany_inequal16 = nir_op.define('nir_op_bany_inequal16', 109)
nir_op_bany_inequal2 = nir_op.define('nir_op_bany_inequal2', 110)
nir_op_bany_inequal3 = nir_op.define('nir_op_bany_inequal3', 111)
nir_op_bany_inequal4 = nir_op.define('nir_op_bany_inequal4', 112)
nir_op_bany_inequal5 = nir_op.define('nir_op_bany_inequal5', 113)
nir_op_bany_inequal8 = nir_op.define('nir_op_bany_inequal8', 114)
nir_op_bcsel = nir_op.define('nir_op_bcsel', 115)
nir_op_bf2f = nir_op.define('nir_op_bf2f', 116)
nir_op_bfdot16 = nir_op.define('nir_op_bfdot16', 117)
nir_op_bfdot2 = nir_op.define('nir_op_bfdot2', 118)
nir_op_bfdot2_bfadd = nir_op.define('nir_op_bfdot2_bfadd', 119)
nir_op_bfdot3 = nir_op.define('nir_op_bfdot3', 120)
nir_op_bfdot4 = nir_op.define('nir_op_bfdot4', 121)
nir_op_bfdot5 = nir_op.define('nir_op_bfdot5', 122)
nir_op_bfdot8 = nir_op.define('nir_op_bfdot8', 123)
nir_op_bffma = nir_op.define('nir_op_bffma', 124)
nir_op_bfi = nir_op.define('nir_op_bfi', 125)
nir_op_bfm = nir_op.define('nir_op_bfm', 126)
nir_op_bfmul = nir_op.define('nir_op_bfmul', 127)
nir_op_bit_count = nir_op.define('nir_op_bit_count', 128)
nir_op_bitfield_insert = nir_op.define('nir_op_bitfield_insert', 129)
nir_op_bitfield_reverse = nir_op.define('nir_op_bitfield_reverse', 130)
nir_op_bitfield_select = nir_op.define('nir_op_bitfield_select', 131)
nir_op_bitnz = nir_op.define('nir_op_bitnz', 132)
nir_op_bitnz16 = nir_op.define('nir_op_bitnz16', 133)
nir_op_bitnz32 = nir_op.define('nir_op_bitnz32', 134)
nir_op_bitnz8 = nir_op.define('nir_op_bitnz8', 135)
nir_op_bitz = nir_op.define('nir_op_bitz', 136)
nir_op_bitz16 = nir_op.define('nir_op_bitz16', 137)
nir_op_bitz32 = nir_op.define('nir_op_bitz32', 138)
nir_op_bitz8 = nir_op.define('nir_op_bitz8', 139)
nir_op_bounds_agx = nir_op.define('nir_op_bounds_agx', 140)
nir_op_byte_perm_amd = nir_op.define('nir_op_byte_perm_amd', 141)
nir_op_cube_amd = nir_op.define('nir_op_cube_amd', 142)
nir_op_e4m3fn2f = nir_op.define('nir_op_e4m3fn2f', 143)
nir_op_e5m22f = nir_op.define('nir_op_e5m22f', 144)
nir_op_extr_agx = nir_op.define('nir_op_extr_agx', 145)
nir_op_extract_i16 = nir_op.define('nir_op_extract_i16', 146)
nir_op_extract_i8 = nir_op.define('nir_op_extract_i8', 147)
nir_op_extract_u16 = nir_op.define('nir_op_extract_u16', 148)
nir_op_extract_u8 = nir_op.define('nir_op_extract_u8', 149)
nir_op_f2bf = nir_op.define('nir_op_f2bf', 150)
nir_op_f2e4m3fn = nir_op.define('nir_op_f2e4m3fn', 151)
nir_op_f2e4m3fn_sat = nir_op.define('nir_op_f2e4m3fn_sat', 152)
nir_op_f2e4m3fn_satfn = nir_op.define('nir_op_f2e4m3fn_satfn', 153)
nir_op_f2e5m2 = nir_op.define('nir_op_f2e5m2', 154)
nir_op_f2e5m2_sat = nir_op.define('nir_op_f2e5m2_sat', 155)
nir_op_f2f16 = nir_op.define('nir_op_f2f16', 156)
nir_op_f2f16_rtne = nir_op.define('nir_op_f2f16_rtne', 157)
nir_op_f2f16_rtz = nir_op.define('nir_op_f2f16_rtz', 158)
nir_op_f2f32 = nir_op.define('nir_op_f2f32', 159)
nir_op_f2f64 = nir_op.define('nir_op_f2f64', 160)
nir_op_f2fmp = nir_op.define('nir_op_f2fmp', 161)
nir_op_f2i1 = nir_op.define('nir_op_f2i1', 162)
nir_op_f2i16 = nir_op.define('nir_op_f2i16', 163)
nir_op_f2i32 = nir_op.define('nir_op_f2i32', 164)
nir_op_f2i64 = nir_op.define('nir_op_f2i64', 165)
nir_op_f2i8 = nir_op.define('nir_op_f2i8', 166)
nir_op_f2imp = nir_op.define('nir_op_f2imp', 167)
nir_op_f2snorm_16_v3d = nir_op.define('nir_op_f2snorm_16_v3d', 168)
nir_op_f2u1 = nir_op.define('nir_op_f2u1', 169)
nir_op_f2u16 = nir_op.define('nir_op_f2u16', 170)
nir_op_f2u32 = nir_op.define('nir_op_f2u32', 171)
nir_op_f2u64 = nir_op.define('nir_op_f2u64', 172)
nir_op_f2u8 = nir_op.define('nir_op_f2u8', 173)
nir_op_f2ump = nir_op.define('nir_op_f2ump', 174)
nir_op_f2unorm_16_v3d = nir_op.define('nir_op_f2unorm_16_v3d', 175)
nir_op_fabs = nir_op.define('nir_op_fabs', 176)
nir_op_fadd = nir_op.define('nir_op_fadd', 177)
nir_op_fall_equal16 = nir_op.define('nir_op_fall_equal16', 178)
nir_op_fall_equal2 = nir_op.define('nir_op_fall_equal2', 179)
nir_op_fall_equal3 = nir_op.define('nir_op_fall_equal3', 180)
nir_op_fall_equal4 = nir_op.define('nir_op_fall_equal4', 181)
nir_op_fall_equal5 = nir_op.define('nir_op_fall_equal5', 182)
nir_op_fall_equal8 = nir_op.define('nir_op_fall_equal8', 183)
nir_op_fany_nequal16 = nir_op.define('nir_op_fany_nequal16', 184)
nir_op_fany_nequal2 = nir_op.define('nir_op_fany_nequal2', 185)
nir_op_fany_nequal3 = nir_op.define('nir_op_fany_nequal3', 186)
nir_op_fany_nequal4 = nir_op.define('nir_op_fany_nequal4', 187)
nir_op_fany_nequal5 = nir_op.define('nir_op_fany_nequal5', 188)
nir_op_fany_nequal8 = nir_op.define('nir_op_fany_nequal8', 189)
nir_op_fceil = nir_op.define('nir_op_fceil', 190)
nir_op_fclamp_pos = nir_op.define('nir_op_fclamp_pos', 191)
nir_op_fcos = nir_op.define('nir_op_fcos', 192)
nir_op_fcos_amd = nir_op.define('nir_op_fcos_amd', 193)
nir_op_fcos_mdg = nir_op.define('nir_op_fcos_mdg', 194)
nir_op_fcsel = nir_op.define('nir_op_fcsel', 195)
nir_op_fcsel_ge = nir_op.define('nir_op_fcsel_ge', 196)
nir_op_fcsel_gt = nir_op.define('nir_op_fcsel_gt', 197)
nir_op_fdiv = nir_op.define('nir_op_fdiv', 198)
nir_op_fdot16 = nir_op.define('nir_op_fdot16', 199)
nir_op_fdot16_replicated = nir_op.define('nir_op_fdot16_replicated', 200)
nir_op_fdot2 = nir_op.define('nir_op_fdot2', 201)
nir_op_fdot2_replicated = nir_op.define('nir_op_fdot2_replicated', 202)
nir_op_fdot3 = nir_op.define('nir_op_fdot3', 203)
nir_op_fdot3_replicated = nir_op.define('nir_op_fdot3_replicated', 204)
nir_op_fdot4 = nir_op.define('nir_op_fdot4', 205)
nir_op_fdot4_replicated = nir_op.define('nir_op_fdot4_replicated', 206)
nir_op_fdot5 = nir_op.define('nir_op_fdot5', 207)
nir_op_fdot5_replicated = nir_op.define('nir_op_fdot5_replicated', 208)
nir_op_fdot8 = nir_op.define('nir_op_fdot8', 209)
nir_op_fdot8_replicated = nir_op.define('nir_op_fdot8_replicated', 210)
nir_op_fdph = nir_op.define('nir_op_fdph', 211)
nir_op_fdph_replicated = nir_op.define('nir_op_fdph_replicated', 212)
nir_op_feq = nir_op.define('nir_op_feq', 213)
nir_op_feq16 = nir_op.define('nir_op_feq16', 214)
nir_op_feq32 = nir_op.define('nir_op_feq32', 215)
nir_op_feq8 = nir_op.define('nir_op_feq8', 216)
nir_op_fequ = nir_op.define('nir_op_fequ', 217)
nir_op_fequ16 = nir_op.define('nir_op_fequ16', 218)
nir_op_fequ32 = nir_op.define('nir_op_fequ32', 219)
nir_op_fequ8 = nir_op.define('nir_op_fequ8', 220)
nir_op_fexp2 = nir_op.define('nir_op_fexp2', 221)
nir_op_ffloor = nir_op.define('nir_op_ffloor', 222)
nir_op_ffma = nir_op.define('nir_op_ffma', 223)
nir_op_ffmaz = nir_op.define('nir_op_ffmaz', 224)
nir_op_ffract = nir_op.define('nir_op_ffract', 225)
nir_op_fge = nir_op.define('nir_op_fge', 226)
nir_op_fge16 = nir_op.define('nir_op_fge16', 227)
nir_op_fge32 = nir_op.define('nir_op_fge32', 228)
nir_op_fge8 = nir_op.define('nir_op_fge8', 229)
nir_op_fgeu = nir_op.define('nir_op_fgeu', 230)
nir_op_fgeu16 = nir_op.define('nir_op_fgeu16', 231)
nir_op_fgeu32 = nir_op.define('nir_op_fgeu32', 232)
nir_op_fgeu8 = nir_op.define('nir_op_fgeu8', 233)
nir_op_find_lsb = nir_op.define('nir_op_find_lsb', 234)
nir_op_fisfinite = nir_op.define('nir_op_fisfinite', 235)
nir_op_fisfinite32 = nir_op.define('nir_op_fisfinite32', 236)
nir_op_fisnormal = nir_op.define('nir_op_fisnormal', 237)
nir_op_flog2 = nir_op.define('nir_op_flog2', 238)
nir_op_flrp = nir_op.define('nir_op_flrp', 239)
nir_op_flt = nir_op.define('nir_op_flt', 240)
nir_op_flt16 = nir_op.define('nir_op_flt16', 241)
nir_op_flt32 = nir_op.define('nir_op_flt32', 242)
nir_op_flt8 = nir_op.define('nir_op_flt8', 243)
nir_op_fltu = nir_op.define('nir_op_fltu', 244)
nir_op_fltu16 = nir_op.define('nir_op_fltu16', 245)
nir_op_fltu32 = nir_op.define('nir_op_fltu32', 246)
nir_op_fltu8 = nir_op.define('nir_op_fltu8', 247)
nir_op_fmax = nir_op.define('nir_op_fmax', 248)
nir_op_fmax_agx = nir_op.define('nir_op_fmax_agx', 249)
nir_op_fmin = nir_op.define('nir_op_fmin', 250)
nir_op_fmin_agx = nir_op.define('nir_op_fmin_agx', 251)
nir_op_fmod = nir_op.define('nir_op_fmod', 252)
nir_op_fmul = nir_op.define('nir_op_fmul', 253)
nir_op_fmulz = nir_op.define('nir_op_fmulz', 254)
nir_op_fneg = nir_op.define('nir_op_fneg', 255)
nir_op_fneo = nir_op.define('nir_op_fneo', 256)
nir_op_fneo16 = nir_op.define('nir_op_fneo16', 257)
nir_op_fneo32 = nir_op.define('nir_op_fneo32', 258)
nir_op_fneo8 = nir_op.define('nir_op_fneo8', 259)
nir_op_fneu = nir_op.define('nir_op_fneu', 260)
nir_op_fneu16 = nir_op.define('nir_op_fneu16', 261)
nir_op_fneu32 = nir_op.define('nir_op_fneu32', 262)
nir_op_fneu8 = nir_op.define('nir_op_fneu8', 263)
nir_op_ford = nir_op.define('nir_op_ford', 264)
nir_op_ford16 = nir_op.define('nir_op_ford16', 265)
nir_op_ford32 = nir_op.define('nir_op_ford32', 266)
nir_op_ford8 = nir_op.define('nir_op_ford8', 267)
nir_op_fpow = nir_op.define('nir_op_fpow', 268)
nir_op_fquantize2f16 = nir_op.define('nir_op_fquantize2f16', 269)
nir_op_frcp = nir_op.define('nir_op_frcp', 270)
nir_op_frem = nir_op.define('nir_op_frem', 271)
nir_op_frexp_exp = nir_op.define('nir_op_frexp_exp', 272)
nir_op_frexp_sig = nir_op.define('nir_op_frexp_sig', 273)
nir_op_fround_even = nir_op.define('nir_op_fround_even', 274)
nir_op_frsq = nir_op.define('nir_op_frsq', 275)
nir_op_fsat = nir_op.define('nir_op_fsat', 276)
nir_op_fsat_signed = nir_op.define('nir_op_fsat_signed', 277)
nir_op_fsign = nir_op.define('nir_op_fsign', 278)
nir_op_fsin = nir_op.define('nir_op_fsin', 279)
nir_op_fsin_agx = nir_op.define('nir_op_fsin_agx', 280)
nir_op_fsin_amd = nir_op.define('nir_op_fsin_amd', 281)
nir_op_fsin_mdg = nir_op.define('nir_op_fsin_mdg', 282)
nir_op_fsqrt = nir_op.define('nir_op_fsqrt', 283)
nir_op_fsub = nir_op.define('nir_op_fsub', 284)
nir_op_fsum2 = nir_op.define('nir_op_fsum2', 285)
nir_op_fsum3 = nir_op.define('nir_op_fsum3', 286)
nir_op_fsum4 = nir_op.define('nir_op_fsum4', 287)
nir_op_ftrunc = nir_op.define('nir_op_ftrunc', 288)
nir_op_funord = nir_op.define('nir_op_funord', 289)
nir_op_funord16 = nir_op.define('nir_op_funord16', 290)
nir_op_funord32 = nir_op.define('nir_op_funord32', 291)
nir_op_funord8 = nir_op.define('nir_op_funord8', 292)
nir_op_i2f16 = nir_op.define('nir_op_i2f16', 293)
nir_op_i2f32 = nir_op.define('nir_op_i2f32', 294)
nir_op_i2f64 = nir_op.define('nir_op_i2f64', 295)
nir_op_i2fmp = nir_op.define('nir_op_i2fmp', 296)
nir_op_i2i1 = nir_op.define('nir_op_i2i1', 297)
nir_op_i2i16 = nir_op.define('nir_op_i2i16', 298)
nir_op_i2i32 = nir_op.define('nir_op_i2i32', 299)
nir_op_i2i64 = nir_op.define('nir_op_i2i64', 300)
nir_op_i2i8 = nir_op.define('nir_op_i2i8', 301)
nir_op_i2imp = nir_op.define('nir_op_i2imp', 302)
nir_op_i32csel_ge = nir_op.define('nir_op_i32csel_ge', 303)
nir_op_i32csel_gt = nir_op.define('nir_op_i32csel_gt', 304)
nir_op_iabs = nir_op.define('nir_op_iabs', 305)
nir_op_iadd = nir_op.define('nir_op_iadd', 306)
nir_op_iadd3 = nir_op.define('nir_op_iadd3', 307)
nir_op_iadd_sat = nir_op.define('nir_op_iadd_sat', 308)
nir_op_iand = nir_op.define('nir_op_iand', 309)
nir_op_ibfe = nir_op.define('nir_op_ibfe', 310)
nir_op_ibitfield_extract = nir_op.define('nir_op_ibitfield_extract', 311)
nir_op_icsel_eqz = nir_op.define('nir_op_icsel_eqz', 312)
nir_op_idiv = nir_op.define('nir_op_idiv', 313)
nir_op_ieq = nir_op.define('nir_op_ieq', 314)
nir_op_ieq16 = nir_op.define('nir_op_ieq16', 315)
nir_op_ieq32 = nir_op.define('nir_op_ieq32', 316)
nir_op_ieq8 = nir_op.define('nir_op_ieq8', 317)
nir_op_ifind_msb = nir_op.define('nir_op_ifind_msb', 318)
nir_op_ifind_msb_rev = nir_op.define('nir_op_ifind_msb_rev', 319)
nir_op_ige = nir_op.define('nir_op_ige', 320)
nir_op_ige16 = nir_op.define('nir_op_ige16', 321)
nir_op_ige32 = nir_op.define('nir_op_ige32', 322)
nir_op_ige8 = nir_op.define('nir_op_ige8', 323)
nir_op_ihadd = nir_op.define('nir_op_ihadd', 324)
nir_op_ilea_agx = nir_op.define('nir_op_ilea_agx', 325)
nir_op_ilt = nir_op.define('nir_op_ilt', 326)
nir_op_ilt16 = nir_op.define('nir_op_ilt16', 327)
nir_op_ilt32 = nir_op.define('nir_op_ilt32', 328)
nir_op_ilt8 = nir_op.define('nir_op_ilt8', 329)
nir_op_imad = nir_op.define('nir_op_imad', 330)
nir_op_imad24_ir3 = nir_op.define('nir_op_imad24_ir3', 331)
nir_op_imadsh_mix16 = nir_op.define('nir_op_imadsh_mix16', 332)
nir_op_imadshl_agx = nir_op.define('nir_op_imadshl_agx', 333)
nir_op_imax = nir_op.define('nir_op_imax', 334)
nir_op_imin = nir_op.define('nir_op_imin', 335)
nir_op_imod = nir_op.define('nir_op_imod', 336)
nir_op_imsubshl_agx = nir_op.define('nir_op_imsubshl_agx', 337)
nir_op_imul = nir_op.define('nir_op_imul', 338)
nir_op_imul24 = nir_op.define('nir_op_imul24', 339)
nir_op_imul24_relaxed = nir_op.define('nir_op_imul24_relaxed', 340)
nir_op_imul_2x32_64 = nir_op.define('nir_op_imul_2x32_64', 341)
nir_op_imul_32x16 = nir_op.define('nir_op_imul_32x16', 342)
nir_op_imul_high = nir_op.define('nir_op_imul_high', 343)
nir_op_ine = nir_op.define('nir_op_ine', 344)
nir_op_ine16 = nir_op.define('nir_op_ine16', 345)
nir_op_ine32 = nir_op.define('nir_op_ine32', 346)
nir_op_ine8 = nir_op.define('nir_op_ine8', 347)
nir_op_ineg = nir_op.define('nir_op_ineg', 348)
nir_op_inot = nir_op.define('nir_op_inot', 349)
nir_op_insert_u16 = nir_op.define('nir_op_insert_u16', 350)
nir_op_insert_u8 = nir_op.define('nir_op_insert_u8', 351)
nir_op_interleave_agx = nir_op.define('nir_op_interleave_agx', 352)
nir_op_ior = nir_op.define('nir_op_ior', 353)
nir_op_irem = nir_op.define('nir_op_irem', 354)
nir_op_irhadd = nir_op.define('nir_op_irhadd', 355)
nir_op_ishl = nir_op.define('nir_op_ishl', 356)
nir_op_ishr = nir_op.define('nir_op_ishr', 357)
nir_op_isign = nir_op.define('nir_op_isign', 358)
nir_op_isub = nir_op.define('nir_op_isub', 359)
nir_op_isub_sat = nir_op.define('nir_op_isub_sat', 360)
nir_op_ixor = nir_op.define('nir_op_ixor', 361)
nir_op_ldexp = nir_op.define('nir_op_ldexp', 362)
nir_op_ldexp16_pan = nir_op.define('nir_op_ldexp16_pan', 363)
nir_op_lea_nv = nir_op.define('nir_op_lea_nv', 364)
nir_op_mov = nir_op.define('nir_op_mov', 365)
nir_op_mqsad_4x8 = nir_op.define('nir_op_mqsad_4x8', 366)
nir_op_msad_4x8 = nir_op.define('nir_op_msad_4x8', 367)
nir_op_pack_2x16_to_snorm_2x8_v3d = nir_op.define('nir_op_pack_2x16_to_snorm_2x8_v3d', 368)
nir_op_pack_2x16_to_unorm_10_2_v3d = nir_op.define('nir_op_pack_2x16_to_unorm_10_2_v3d', 369)
nir_op_pack_2x16_to_unorm_2x10_v3d = nir_op.define('nir_op_pack_2x16_to_unorm_2x10_v3d', 370)
nir_op_pack_2x16_to_unorm_2x8_v3d = nir_op.define('nir_op_pack_2x16_to_unorm_2x8_v3d', 371)
nir_op_pack_2x32_to_2x16_v3d = nir_op.define('nir_op_pack_2x32_to_2x16_v3d', 372)
nir_op_pack_32_2x16 = nir_op.define('nir_op_pack_32_2x16', 373)
nir_op_pack_32_2x16_split = nir_op.define('nir_op_pack_32_2x16_split', 374)
nir_op_pack_32_4x8 = nir_op.define('nir_op_pack_32_4x8', 375)
nir_op_pack_32_4x8_split = nir_op.define('nir_op_pack_32_4x8_split', 376)
nir_op_pack_32_to_r11g11b10_v3d = nir_op.define('nir_op_pack_32_to_r11g11b10_v3d', 377)
nir_op_pack_4x16_to_4x8_v3d = nir_op.define('nir_op_pack_4x16_to_4x8_v3d', 378)
nir_op_pack_64_2x32 = nir_op.define('nir_op_pack_64_2x32', 379)
nir_op_pack_64_2x32_split = nir_op.define('nir_op_pack_64_2x32_split', 380)
nir_op_pack_64_4x16 = nir_op.define('nir_op_pack_64_4x16', 381)
nir_op_pack_double_2x32_dxil = nir_op.define('nir_op_pack_double_2x32_dxil', 382)
nir_op_pack_half_2x16 = nir_op.define('nir_op_pack_half_2x16', 383)
nir_op_pack_half_2x16_rtz_split = nir_op.define('nir_op_pack_half_2x16_rtz_split', 384)
nir_op_pack_half_2x16_split = nir_op.define('nir_op_pack_half_2x16_split', 385)
nir_op_pack_sint_2x16 = nir_op.define('nir_op_pack_sint_2x16', 386)
nir_op_pack_snorm_2x16 = nir_op.define('nir_op_pack_snorm_2x16', 387)
nir_op_pack_snorm_4x8 = nir_op.define('nir_op_pack_snorm_4x8', 388)
nir_op_pack_uint_2x16 = nir_op.define('nir_op_pack_uint_2x16', 389)
nir_op_pack_uint_32_to_r10g10b10a2_v3d = nir_op.define('nir_op_pack_uint_32_to_r10g10b10a2_v3d', 390)
nir_op_pack_unorm_2x16 = nir_op.define('nir_op_pack_unorm_2x16', 391)
nir_op_pack_unorm_4x8 = nir_op.define('nir_op_pack_unorm_4x8', 392)
nir_op_pack_uvec2_to_uint = nir_op.define('nir_op_pack_uvec2_to_uint', 393)
nir_op_pack_uvec4_to_uint = nir_op.define('nir_op_pack_uvec4_to_uint', 394)
nir_op_prmt_nv = nir_op.define('nir_op_prmt_nv', 395)
nir_op_sdot_2x16_iadd = nir_op.define('nir_op_sdot_2x16_iadd', 396)
nir_op_sdot_2x16_iadd_sat = nir_op.define('nir_op_sdot_2x16_iadd_sat', 397)
nir_op_sdot_4x8_iadd = nir_op.define('nir_op_sdot_4x8_iadd', 398)
nir_op_sdot_4x8_iadd_sat = nir_op.define('nir_op_sdot_4x8_iadd_sat', 399)
nir_op_seq = nir_op.define('nir_op_seq', 400)
nir_op_sge = nir_op.define('nir_op_sge', 401)
nir_op_shfr = nir_op.define('nir_op_shfr', 402)
nir_op_shlg_ir3 = nir_op.define('nir_op_shlg_ir3', 403)
nir_op_shlm_ir3 = nir_op.define('nir_op_shlm_ir3', 404)
nir_op_shrg_ir3 = nir_op.define('nir_op_shrg_ir3', 405)
nir_op_shrm_ir3 = nir_op.define('nir_op_shrm_ir3', 406)
nir_op_slt = nir_op.define('nir_op_slt', 407)
nir_op_sne = nir_op.define('nir_op_sne', 408)
nir_op_sudot_4x8_iadd = nir_op.define('nir_op_sudot_4x8_iadd', 409)
nir_op_sudot_4x8_iadd_sat = nir_op.define('nir_op_sudot_4x8_iadd_sat', 410)
nir_op_u2f16 = nir_op.define('nir_op_u2f16', 411)
nir_op_u2f32 = nir_op.define('nir_op_u2f32', 412)
nir_op_u2f64 = nir_op.define('nir_op_u2f64', 413)
nir_op_u2fmp = nir_op.define('nir_op_u2fmp', 414)
nir_op_u2u1 = nir_op.define('nir_op_u2u1', 415)
nir_op_u2u16 = nir_op.define('nir_op_u2u16', 416)
nir_op_u2u32 = nir_op.define('nir_op_u2u32', 417)
nir_op_u2u64 = nir_op.define('nir_op_u2u64', 418)
nir_op_u2u8 = nir_op.define('nir_op_u2u8', 419)
nir_op_uabs_isub = nir_op.define('nir_op_uabs_isub', 420)
nir_op_uabs_usub = nir_op.define('nir_op_uabs_usub', 421)
nir_op_uadd_carry = nir_op.define('nir_op_uadd_carry', 422)
nir_op_uadd_sat = nir_op.define('nir_op_uadd_sat', 423)
nir_op_ubfe = nir_op.define('nir_op_ubfe', 424)
nir_op_ubitfield_extract = nir_op.define('nir_op_ubitfield_extract', 425)
nir_op_uclz = nir_op.define('nir_op_uclz', 426)
nir_op_udiv = nir_op.define('nir_op_udiv', 427)
nir_op_udiv_aligned_4 = nir_op.define('nir_op_udiv_aligned_4', 428)
nir_op_udot_2x16_uadd = nir_op.define('nir_op_udot_2x16_uadd', 429)
nir_op_udot_2x16_uadd_sat = nir_op.define('nir_op_udot_2x16_uadd_sat', 430)
nir_op_udot_4x8_uadd = nir_op.define('nir_op_udot_4x8_uadd', 431)
nir_op_udot_4x8_uadd_sat = nir_op.define('nir_op_udot_4x8_uadd_sat', 432)
nir_op_ufind_msb = nir_op.define('nir_op_ufind_msb', 433)
nir_op_ufind_msb_rev = nir_op.define('nir_op_ufind_msb_rev', 434)
nir_op_uge = nir_op.define('nir_op_uge', 435)
nir_op_uge16 = nir_op.define('nir_op_uge16', 436)
nir_op_uge32 = nir_op.define('nir_op_uge32', 437)
nir_op_uge8 = nir_op.define('nir_op_uge8', 438)
nir_op_uhadd = nir_op.define('nir_op_uhadd', 439)
nir_op_ulea_agx = nir_op.define('nir_op_ulea_agx', 440)
nir_op_ult = nir_op.define('nir_op_ult', 441)
nir_op_ult16 = nir_op.define('nir_op_ult16', 442)
nir_op_ult32 = nir_op.define('nir_op_ult32', 443)
nir_op_ult8 = nir_op.define('nir_op_ult8', 444)
nir_op_umad24 = nir_op.define('nir_op_umad24', 445)
nir_op_umad24_relaxed = nir_op.define('nir_op_umad24_relaxed', 446)
nir_op_umax = nir_op.define('nir_op_umax', 447)
nir_op_umax_4x8_vc4 = nir_op.define('nir_op_umax_4x8_vc4', 448)
nir_op_umin = nir_op.define('nir_op_umin', 449)
nir_op_umin_4x8_vc4 = nir_op.define('nir_op_umin_4x8_vc4', 450)
nir_op_umod = nir_op.define('nir_op_umod', 451)
nir_op_umul24 = nir_op.define('nir_op_umul24', 452)
nir_op_umul24_relaxed = nir_op.define('nir_op_umul24_relaxed', 453)
nir_op_umul_2x32_64 = nir_op.define('nir_op_umul_2x32_64', 454)
nir_op_umul_32x16 = nir_op.define('nir_op_umul_32x16', 455)
nir_op_umul_high = nir_op.define('nir_op_umul_high', 456)
nir_op_umul_low = nir_op.define('nir_op_umul_low', 457)
nir_op_umul_unorm_4x8_vc4 = nir_op.define('nir_op_umul_unorm_4x8_vc4', 458)
nir_op_unpack_32_2x16 = nir_op.define('nir_op_unpack_32_2x16', 459)
nir_op_unpack_32_2x16_split_x = nir_op.define('nir_op_unpack_32_2x16_split_x', 460)
nir_op_unpack_32_2x16_split_y = nir_op.define('nir_op_unpack_32_2x16_split_y', 461)
nir_op_unpack_32_4x8 = nir_op.define('nir_op_unpack_32_4x8', 462)
nir_op_unpack_64_2x32 = nir_op.define('nir_op_unpack_64_2x32', 463)
nir_op_unpack_64_2x32_split_x = nir_op.define('nir_op_unpack_64_2x32_split_x', 464)
nir_op_unpack_64_2x32_split_y = nir_op.define('nir_op_unpack_64_2x32_split_y', 465)
nir_op_unpack_64_4x16 = nir_op.define('nir_op_unpack_64_4x16', 466)
nir_op_unpack_double_2x32_dxil = nir_op.define('nir_op_unpack_double_2x32_dxil', 467)
nir_op_unpack_half_2x16 = nir_op.define('nir_op_unpack_half_2x16', 468)
nir_op_unpack_half_2x16_split_x = nir_op.define('nir_op_unpack_half_2x16_split_x', 469)
nir_op_unpack_half_2x16_split_y = nir_op.define('nir_op_unpack_half_2x16_split_y', 470)
nir_op_unpack_snorm_2x16 = nir_op.define('nir_op_unpack_snorm_2x16', 471)
nir_op_unpack_snorm_4x8 = nir_op.define('nir_op_unpack_snorm_4x8', 472)
nir_op_unpack_unorm_2x16 = nir_op.define('nir_op_unpack_unorm_2x16', 473)
nir_op_unpack_unorm_4x8 = nir_op.define('nir_op_unpack_unorm_4x8', 474)
nir_op_urhadd = nir_op.define('nir_op_urhadd', 475)
nir_op_urol = nir_op.define('nir_op_urol', 476)
nir_op_uror = nir_op.define('nir_op_uror', 477)
nir_op_usadd_4x8_vc4 = nir_op.define('nir_op_usadd_4x8_vc4', 478)
nir_op_ushr = nir_op.define('nir_op_ushr', 479)
nir_op_ussub_4x8_vc4 = nir_op.define('nir_op_ussub_4x8_vc4', 480)
nir_op_usub_borrow = nir_op.define('nir_op_usub_borrow', 481)
nir_op_usub_sat = nir_op.define('nir_op_usub_sat', 482)
nir_op_vec16 = nir_op.define('nir_op_vec16', 483)
nir_op_vec2 = nir_op.define('nir_op_vec2', 484)
nir_op_vec3 = nir_op.define('nir_op_vec3', 485)
nir_op_vec4 = nir_op.define('nir_op_vec4', 486)
nir_op_vec5 = nir_op.define('nir_op_vec5', 487)
nir_op_vec8 = nir_op.define('nir_op_vec8', 488)
nir_last_opcode = nir_op.define('nir_last_opcode', 488)
nir_num_opcodes = nir_op.define('nir_num_opcodes', 489)
@dll.bind
def nir_type_conversion_op(src:nir_alu_type, dst:nir_alu_type, rnd:nir_rounding_mode) -> nir_op: ...
class nir_atomic_op(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_atomic_op_iadd = nir_atomic_op.define('nir_atomic_op_iadd', 0)
nir_atomic_op_imin = nir_atomic_op.define('nir_atomic_op_imin', 1)
nir_atomic_op_umin = nir_atomic_op.define('nir_atomic_op_umin', 2)
nir_atomic_op_imax = nir_atomic_op.define('nir_atomic_op_imax', 3)
nir_atomic_op_umax = nir_atomic_op.define('nir_atomic_op_umax', 4)
nir_atomic_op_iand = nir_atomic_op.define('nir_atomic_op_iand', 5)
nir_atomic_op_ior = nir_atomic_op.define('nir_atomic_op_ior', 6)
nir_atomic_op_ixor = nir_atomic_op.define('nir_atomic_op_ixor', 7)
nir_atomic_op_xchg = nir_atomic_op.define('nir_atomic_op_xchg', 8)
nir_atomic_op_fadd = nir_atomic_op.define('nir_atomic_op_fadd', 9)
nir_atomic_op_fmin = nir_atomic_op.define('nir_atomic_op_fmin', 10)
nir_atomic_op_fmax = nir_atomic_op.define('nir_atomic_op_fmax', 11)
nir_atomic_op_cmpxchg = nir_atomic_op.define('nir_atomic_op_cmpxchg', 12)
nir_atomic_op_fcmpxchg = nir_atomic_op.define('nir_atomic_op_fcmpxchg', 13)
nir_atomic_op_inc_wrap = nir_atomic_op.define('nir_atomic_op_inc_wrap', 14)
nir_atomic_op_dec_wrap = nir_atomic_op.define('nir_atomic_op_dec_wrap', 15)
nir_atomic_op_ordered_add_gfx12_amd = nir_atomic_op.define('nir_atomic_op_ordered_add_gfx12_amd', 16)
@dll.bind
def nir_atomic_op_to_alu(op:nir_atomic_op) -> nir_op: ...
@dll.bind
def nir_op_vec(num_components:Annotated[int, ctypes.c_uint32]) -> nir_op: ...
@dll.bind
def nir_op_is_vec(op:nir_op) -> Annotated[bool, ctypes.c_bool]: ...
class nir_op_algebraic_property(Annotated[int, ctypes.c_uint32], c.Enum): pass
NIR_OP_IS_2SRC_COMMUTATIVE = nir_op_algebraic_property.define('NIR_OP_IS_2SRC_COMMUTATIVE', 1)
NIR_OP_IS_ASSOCIATIVE = nir_op_algebraic_property.define('NIR_OP_IS_ASSOCIATIVE', 2)
NIR_OP_IS_SELECTION = nir_op_algebraic_property.define('NIR_OP_IS_SELECTION', 4)
@c.record
class struct_nir_op_info(c.Struct):
SIZE = 56
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
num_inputs: Annotated[uint8_t, 8]
output_size: Annotated[uint8_t, 9]
output_type: Annotated[nir_alu_type, 10]
input_sizes: Annotated[c.Array[uint8_t, Literal[16]], 11]
input_types: Annotated[c.Array[nir_alu_type, Literal[16]], 27]
algebraic_properties: Annotated[nir_op_algebraic_property, 44]
is_conversion: Annotated[Annotated[bool, ctypes.c_bool], 48]
nir_op_info: TypeAlias = struct_nir_op_info
try: nir_op_infos = c.Array[nir_op_info, Literal[489]].in_dll(dll, 'nir_op_infos') # type: ignore
except (ValueError,AttributeError): pass
@c.record
class struct_nir_alu_instr(c.Struct):
SIZE = 72
instr: Annotated[nir_instr, 0]
op: Annotated[nir_op, 32]
exact: Annotated[Annotated[bool, ctypes.c_bool], 36, 1, 0]
no_signed_wrap: Annotated[Annotated[bool, ctypes.c_bool], 36, 1, 1]
no_unsigned_wrap: Annotated[Annotated[bool, ctypes.c_bool], 36, 1, 2]
fp_fast_math: Annotated[uint32_t, 36, 9, 3]
_def: Annotated[nir_def, 40]
src: Annotated[c.Array[nir_alu_src, Literal[0]], 72]
nir_alu_instr: TypeAlias = struct_nir_alu_instr
@dll.bind
def nir_alu_src_copy(dest:c.POINTER[nir_alu_src], src:c.POINTER[nir_alu_src]) -> None: ...
@dll.bind
def nir_alu_instr_src_read_mask(instr:c.POINTER[nir_alu_instr], src:Annotated[int, ctypes.c_uint32]) -> nir_component_mask_t: ...
@dll.bind
def nir_ssa_alu_instr_src_components(instr:c.POINTER[nir_alu_instr], src:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def nir_alu_instr_is_comparison(instr:c.POINTER[nir_alu_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_const_value_negative_equal(c1:nir_const_value, c2:nir_const_value, full_type:nir_alu_type) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_alu_srcs_equal(alu1:c.POINTER[nir_alu_instr], alu2:c.POINTER[nir_alu_instr], src1:Annotated[int, ctypes.c_uint32], src2:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_alu_srcs_negative_equal_typed(alu1:c.POINTER[nir_alu_instr], alu2:c.POINTER[nir_alu_instr], src1:Annotated[int, ctypes.c_uint32], src2:Annotated[int, ctypes.c_uint32], base_type:nir_alu_type) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_alu_srcs_negative_equal(alu1:c.POINTER[nir_alu_instr], alu2:c.POINTER[nir_alu_instr], src1:Annotated[int, ctypes.c_uint32], src2:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_alu_src_is_trivial_ssa(alu:c.POINTER[nir_alu_instr], srcn:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
class nir_deref_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_deref_type_var = nir_deref_type.define('nir_deref_type_var', 0)
nir_deref_type_array = nir_deref_type.define('nir_deref_type_array', 1)
nir_deref_type_array_wildcard = nir_deref_type.define('nir_deref_type_array_wildcard', 2)
nir_deref_type_ptr_as_array = nir_deref_type.define('nir_deref_type_ptr_as_array', 3)
nir_deref_type_struct = nir_deref_type.define('nir_deref_type_struct', 4)
nir_deref_type_cast = nir_deref_type.define('nir_deref_type_cast', 5)
@c.record
class struct_nir_deref_instr(c.Struct):
SIZE = 152
instr: Annotated[nir_instr, 0]
deref_type: Annotated[nir_deref_type, 32]
modes: Annotated[nir_variable_mode, 36]
type: Annotated[c.POINTER[struct_glsl_type], 40]
var: Annotated[c.POINTER[nir_variable], 48]
parent: Annotated[nir_src, 48]
arr: Annotated[struct_nir_deref_instr_arr, 80]
strct: Annotated[struct_nir_deref_instr_strct, 80]
cast: Annotated[struct_nir_deref_instr_cast, 80]
_def: Annotated[nir_def, 120]
class nir_variable_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_var_system_value = nir_variable_mode.define('nir_var_system_value', 1)
nir_var_uniform = nir_variable_mode.define('nir_var_uniform', 2)
nir_var_shader_in = nir_variable_mode.define('nir_var_shader_in', 4)
nir_var_shader_out = nir_variable_mode.define('nir_var_shader_out', 8)
nir_var_image = nir_variable_mode.define('nir_var_image', 16)
nir_var_shader_call_data = nir_variable_mode.define('nir_var_shader_call_data', 32)
nir_var_ray_hit_attrib = nir_variable_mode.define('nir_var_ray_hit_attrib', 64)
nir_var_mem_ubo = nir_variable_mode.define('nir_var_mem_ubo', 128)
nir_var_mem_push_const = nir_variable_mode.define('nir_var_mem_push_const', 256)
nir_var_mem_ssbo = nir_variable_mode.define('nir_var_mem_ssbo', 512)
nir_var_mem_constant = nir_variable_mode.define('nir_var_mem_constant', 1024)
nir_var_mem_task_payload = nir_variable_mode.define('nir_var_mem_task_payload', 2048)
nir_var_mem_node_payload = nir_variable_mode.define('nir_var_mem_node_payload', 4096)
nir_var_mem_node_payload_in = nir_variable_mode.define('nir_var_mem_node_payload_in', 8192)
nir_var_function_in = nir_variable_mode.define('nir_var_function_in', 16384)
nir_var_function_out = nir_variable_mode.define('nir_var_function_out', 32768)
nir_var_function_inout = nir_variable_mode.define('nir_var_function_inout', 65536)
nir_var_shader_temp = nir_variable_mode.define('nir_var_shader_temp', 131072)
nir_var_function_temp = nir_variable_mode.define('nir_var_function_temp', 262144)
nir_var_mem_shared = nir_variable_mode.define('nir_var_mem_shared', 524288)
nir_var_mem_global = nir_variable_mode.define('nir_var_mem_global', 1048576)
nir_var_mem_generic = nir_variable_mode.define('nir_var_mem_generic', 1966080)
nir_var_read_only_modes = nir_variable_mode.define('nir_var_read_only_modes', 1159)
nir_var_vec_indexable_modes = nir_variable_mode.define('nir_var_vec_indexable_modes', 1969033)
nir_num_variable_modes = nir_variable_mode.define('nir_num_variable_modes', 21)
nir_var_all = nir_variable_mode.define('nir_var_all', 2097151)
@c.record
class struct_nir_deref_instr_arr(c.Struct):
SIZE = 40
index: Annotated[nir_src, 0]
in_bounds: Annotated[Annotated[bool, ctypes.c_bool], 32]
@c.record
class struct_nir_deref_instr_strct(c.Struct):
SIZE = 4
index: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_nir_deref_instr_cast(c.Struct):
SIZE = 12
ptr_stride: Annotated[Annotated[int, ctypes.c_uint32], 0]
align_mul: Annotated[Annotated[int, ctypes.c_uint32], 4]
align_offset: Annotated[Annotated[int, ctypes.c_uint32], 8]
nir_deref_instr: TypeAlias = struct_nir_deref_instr
@dll.bind
def nir_deref_cast_is_trivial(cast:c.POINTER[nir_deref_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_deref_instr_has_indirect(instr:c.POINTER[nir_deref_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_deref_instr_is_known_out_of_bounds(instr:c.POINTER[nir_deref_instr]) -> Annotated[bool, ctypes.c_bool]: ...
class nir_deref_instr_has_complex_use_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_deref_instr_has_complex_use_allow_memcpy_src = nir_deref_instr_has_complex_use_options.define('nir_deref_instr_has_complex_use_allow_memcpy_src', 1)
nir_deref_instr_has_complex_use_allow_memcpy_dst = nir_deref_instr_has_complex_use_options.define('nir_deref_instr_has_complex_use_allow_memcpy_dst', 2)
nir_deref_instr_has_complex_use_allow_atomics = nir_deref_instr_has_complex_use_options.define('nir_deref_instr_has_complex_use_allow_atomics', 4)
@dll.bind
def nir_deref_instr_has_complex_use(instr:c.POINTER[nir_deref_instr], opts:nir_deref_instr_has_complex_use_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_deref_instr_remove_if_unused(instr:c.POINTER[nir_deref_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_deref_instr_array_stride(instr:c.POINTER[nir_deref_instr]) -> Annotated[int, ctypes.c_uint32]: ...
@c.record
class struct_nir_call_instr(c.Struct):
SIZE = 80
instr: Annotated[nir_instr, 0]
callee: Annotated[c.POINTER[nir_function], 32]
indirect_callee: Annotated[nir_src, 40]
num_params: Annotated[Annotated[int, ctypes.c_uint32], 72]
params: Annotated[c.Array[nir_src, Literal[0]], 80]
@c.record
class struct_nir_function(c.Struct):
SIZE = 104
node: Annotated[struct_exec_node, 0]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
shader: Annotated[c.POINTER[nir_shader], 24]
num_params: Annotated[Annotated[int, ctypes.c_uint32], 32]
params: Annotated[c.POINTER[nir_parameter], 40]
impl: Annotated[c.POINTER[nir_function_impl], 48]
driver_attributes: Annotated[uint32_t, 56]
is_entrypoint: Annotated[Annotated[bool, ctypes.c_bool], 60]
is_exported: Annotated[Annotated[bool, ctypes.c_bool], 61]
is_preamble: Annotated[Annotated[bool, ctypes.c_bool], 62]
should_inline: Annotated[Annotated[bool, ctypes.c_bool], 63]
dont_inline: Annotated[Annotated[bool, ctypes.c_bool], 64]
workgroup_size: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[3]], 68]
is_subroutine: Annotated[Annotated[bool, ctypes.c_bool], 80]
is_tmp_globals_wrapper: Annotated[Annotated[bool, ctypes.c_bool], 81]
num_subroutine_types: Annotated[Annotated[int, ctypes.c_int32], 84]
subroutine_types: Annotated[c.POINTER[c.POINTER[struct_glsl_type]], 88]
subroutine_index: Annotated[Annotated[int, ctypes.c_int32], 96]
pass_flags: Annotated[uint32_t, 100]
nir_function: TypeAlias = struct_nir_function
@c.record
class struct_nir_shader(c.Struct):
SIZE = 520
gctx: Annotated[c.POINTER[gc_ctx], 0]
variables: Annotated[struct_exec_list, 8]
options: Annotated[c.POINTER[nir_shader_compiler_options], 40]
info: Annotated[struct_shader_info, 48]
functions: Annotated[struct_exec_list, 416]
num_inputs: Annotated[Annotated[int, ctypes.c_uint32], 448]
num_uniforms: Annotated[Annotated[int, ctypes.c_uint32], 452]
num_outputs: Annotated[Annotated[int, ctypes.c_uint32], 456]
global_mem_size: Annotated[Annotated[int, ctypes.c_uint32], 460]
scratch_size: Annotated[Annotated[int, ctypes.c_uint32], 464]
constant_data: Annotated[ctypes.c_void_p, 472]
constant_data_size: Annotated[Annotated[int, ctypes.c_uint32], 480]
xfb_info: Annotated[c.POINTER[nir_xfb_info], 488]
printf_info_count: Annotated[Annotated[int, ctypes.c_uint32], 496]
printf_info: Annotated[c.POINTER[u_printf_info], 504]
has_debug_info: Annotated[Annotated[bool, ctypes.c_bool], 512]
nir_shader: TypeAlias = struct_nir_shader
class struct_gc_ctx(ctypes.Structure): pass
gc_ctx: TypeAlias = struct_gc_ctx
@c.record
class struct_nir_shader_compiler_options(c.Struct):
SIZE = 248
lower_fdiv: Annotated[Annotated[bool, ctypes.c_bool], 0]
lower_ffma16: Annotated[Annotated[bool, ctypes.c_bool], 1]
lower_ffma32: Annotated[Annotated[bool, ctypes.c_bool], 2]
lower_ffma64: Annotated[Annotated[bool, ctypes.c_bool], 3]
fuse_ffma16: Annotated[Annotated[bool, ctypes.c_bool], 4]
fuse_ffma32: Annotated[Annotated[bool, ctypes.c_bool], 5]
fuse_ffma64: Annotated[Annotated[bool, ctypes.c_bool], 6]
lower_flrp16: Annotated[Annotated[bool, ctypes.c_bool], 7]
lower_flrp32: Annotated[Annotated[bool, ctypes.c_bool], 8]
lower_flrp64: Annotated[Annotated[bool, ctypes.c_bool], 9]
lower_fpow: Annotated[Annotated[bool, ctypes.c_bool], 10]
lower_fsat: Annotated[Annotated[bool, ctypes.c_bool], 11]
lower_fsqrt: Annotated[Annotated[bool, ctypes.c_bool], 12]
lower_sincos: Annotated[Annotated[bool, ctypes.c_bool], 13]
lower_fmod: Annotated[Annotated[bool, ctypes.c_bool], 14]
lower_bitfield_extract8: Annotated[Annotated[bool, ctypes.c_bool], 15]
lower_bitfield_extract16: Annotated[Annotated[bool, ctypes.c_bool], 16]
lower_bitfield_extract: Annotated[Annotated[bool, ctypes.c_bool], 17]
lower_bitfield_insert: Annotated[Annotated[bool, ctypes.c_bool], 18]
lower_bitfield_reverse: Annotated[Annotated[bool, ctypes.c_bool], 19]
lower_bit_count: Annotated[Annotated[bool, ctypes.c_bool], 20]
lower_ifind_msb: Annotated[Annotated[bool, ctypes.c_bool], 21]
lower_ufind_msb: Annotated[Annotated[bool, ctypes.c_bool], 22]
lower_find_lsb: Annotated[Annotated[bool, ctypes.c_bool], 23]
lower_uadd_carry: Annotated[Annotated[bool, ctypes.c_bool], 24]
lower_usub_borrow: Annotated[Annotated[bool, ctypes.c_bool], 25]
lower_mul_high: Annotated[Annotated[bool, ctypes.c_bool], 26]
lower_mul_high16: Annotated[Annotated[bool, ctypes.c_bool], 27]
lower_fneg: Annotated[Annotated[bool, ctypes.c_bool], 28]
lower_ineg: Annotated[Annotated[bool, ctypes.c_bool], 29]
lower_fisnormal: Annotated[Annotated[bool, ctypes.c_bool], 30]
lower_scmp: Annotated[Annotated[bool, ctypes.c_bool], 31]
lower_vector_cmp: Annotated[Annotated[bool, ctypes.c_bool], 32]
lower_bitops: Annotated[Annotated[bool, ctypes.c_bool], 33]
lower_isign: Annotated[Annotated[bool, ctypes.c_bool], 34]
lower_fsign: Annotated[Annotated[bool, ctypes.c_bool], 35]
lower_iabs: Annotated[Annotated[bool, ctypes.c_bool], 36]
lower_umax: Annotated[Annotated[bool, ctypes.c_bool], 37]
lower_umin: Annotated[Annotated[bool, ctypes.c_bool], 38]
lower_fminmax_signed_zero: Annotated[Annotated[bool, ctypes.c_bool], 39]
lower_fdph: Annotated[Annotated[bool, ctypes.c_bool], 40]
fdot_replicates: Annotated[Annotated[bool, ctypes.c_bool], 41]
lower_ffloor: Annotated[Annotated[bool, ctypes.c_bool], 42]
lower_ffract: Annotated[Annotated[bool, ctypes.c_bool], 43]
lower_fceil: Annotated[Annotated[bool, ctypes.c_bool], 44]
lower_ftrunc: Annotated[Annotated[bool, ctypes.c_bool], 45]
lower_fround_even: Annotated[Annotated[bool, ctypes.c_bool], 46]
lower_ldexp: Annotated[Annotated[bool, ctypes.c_bool], 47]
lower_pack_half_2x16: Annotated[Annotated[bool, ctypes.c_bool], 48]
lower_pack_unorm_2x16: Annotated[Annotated[bool, ctypes.c_bool], 49]
lower_pack_snorm_2x16: Annotated[Annotated[bool, ctypes.c_bool], 50]
lower_pack_unorm_4x8: Annotated[Annotated[bool, ctypes.c_bool], 51]
lower_pack_snorm_4x8: Annotated[Annotated[bool, ctypes.c_bool], 52]
lower_pack_64_2x32: Annotated[Annotated[bool, ctypes.c_bool], 53]
lower_pack_64_4x16: Annotated[Annotated[bool, ctypes.c_bool], 54]
lower_pack_32_2x16: Annotated[Annotated[bool, ctypes.c_bool], 55]
lower_pack_64_2x32_split: Annotated[Annotated[bool, ctypes.c_bool], 56]
lower_pack_32_2x16_split: Annotated[Annotated[bool, ctypes.c_bool], 57]
lower_unpack_half_2x16: Annotated[Annotated[bool, ctypes.c_bool], 58]
lower_unpack_unorm_2x16: Annotated[Annotated[bool, ctypes.c_bool], 59]
lower_unpack_snorm_2x16: Annotated[Annotated[bool, ctypes.c_bool], 60]
lower_unpack_unorm_4x8: Annotated[Annotated[bool, ctypes.c_bool], 61]
lower_unpack_snorm_4x8: Annotated[Annotated[bool, ctypes.c_bool], 62]
lower_unpack_64_2x32_split: Annotated[Annotated[bool, ctypes.c_bool], 63]
lower_unpack_32_2x16_split: Annotated[Annotated[bool, ctypes.c_bool], 64]
lower_pack_split: Annotated[Annotated[bool, ctypes.c_bool], 65]
lower_extract_byte: Annotated[Annotated[bool, ctypes.c_bool], 66]
lower_extract_word: Annotated[Annotated[bool, ctypes.c_bool], 67]
lower_insert_byte: Annotated[Annotated[bool, ctypes.c_bool], 68]
lower_insert_word: Annotated[Annotated[bool, ctypes.c_bool], 69]
vertex_id_zero_based: Annotated[Annotated[bool, ctypes.c_bool], 70]
lower_base_vertex: Annotated[Annotated[bool, ctypes.c_bool], 71]
instance_id_includes_base_index: Annotated[Annotated[bool, ctypes.c_bool], 72]
lower_helper_invocation: Annotated[Annotated[bool, ctypes.c_bool], 73]
optimize_sample_mask_in: Annotated[Annotated[bool, ctypes.c_bool], 74]
optimize_load_front_face_fsign: Annotated[Annotated[bool, ctypes.c_bool], 75]
optimize_quad_vote_to_reduce: Annotated[Annotated[bool, ctypes.c_bool], 76]
lower_cs_local_index_to_id: Annotated[Annotated[bool, ctypes.c_bool], 77]
lower_cs_local_id_to_index: Annotated[Annotated[bool, ctypes.c_bool], 78]
has_cs_global_id: Annotated[Annotated[bool, ctypes.c_bool], 79]
lower_device_index_to_zero: Annotated[Annotated[bool, ctypes.c_bool], 80]
lower_wpos_pntc: Annotated[Annotated[bool, ctypes.c_bool], 81]
lower_hadd: Annotated[Annotated[bool, ctypes.c_bool], 82]
lower_hadd64: Annotated[Annotated[bool, ctypes.c_bool], 83]
lower_uadd_sat: Annotated[Annotated[bool, ctypes.c_bool], 84]
lower_usub_sat: Annotated[Annotated[bool, ctypes.c_bool], 85]
lower_iadd_sat: Annotated[Annotated[bool, ctypes.c_bool], 86]
lower_mul_32x16: Annotated[Annotated[bool, ctypes.c_bool], 87]
lower_bfloat16_conversions: Annotated[Annotated[bool, ctypes.c_bool], 88]
vectorize_tess_levels: Annotated[Annotated[bool, ctypes.c_bool], 89]
lower_to_scalar: Annotated[Annotated[bool, ctypes.c_bool], 90]
lower_to_scalar_filter: Annotated[nir_instr_filter_cb, 96]
vectorize_vec2_16bit: Annotated[Annotated[bool, ctypes.c_bool], 104]
unify_interfaces: Annotated[Annotated[bool, ctypes.c_bool], 105]
lower_interpolate_at: Annotated[Annotated[bool, ctypes.c_bool], 106]
lower_mul_2x32_64: Annotated[Annotated[bool, ctypes.c_bool], 107]
has_rotate8: Annotated[Annotated[bool, ctypes.c_bool], 108]
has_rotate16: Annotated[Annotated[bool, ctypes.c_bool], 109]
has_rotate32: Annotated[Annotated[bool, ctypes.c_bool], 110]
has_shfr32: Annotated[Annotated[bool, ctypes.c_bool], 111]
has_iadd3: Annotated[Annotated[bool, ctypes.c_bool], 112]
has_amul: Annotated[Annotated[bool, ctypes.c_bool], 113]
has_imul24: Annotated[Annotated[bool, ctypes.c_bool], 114]
has_umul24: Annotated[Annotated[bool, ctypes.c_bool], 115]
has_mul24_relaxed: Annotated[Annotated[bool, ctypes.c_bool], 116]
has_imad32: Annotated[Annotated[bool, ctypes.c_bool], 117]
has_umad24: Annotated[Annotated[bool, ctypes.c_bool], 118]
has_fused_comp_and_csel: Annotated[Annotated[bool, ctypes.c_bool], 119]
has_icsel_eqz64: Annotated[Annotated[bool, ctypes.c_bool], 120]
has_icsel_eqz32: Annotated[Annotated[bool, ctypes.c_bool], 121]
has_icsel_eqz16: Annotated[Annotated[bool, ctypes.c_bool], 122]
has_fneo_fcmpu: Annotated[Annotated[bool, ctypes.c_bool], 123]
has_ford_funord: Annotated[Annotated[bool, ctypes.c_bool], 124]
has_fsub: Annotated[Annotated[bool, ctypes.c_bool], 125]
has_isub: Annotated[Annotated[bool, ctypes.c_bool], 126]
has_pack_32_4x8: Annotated[Annotated[bool, ctypes.c_bool], 127]
has_texture_scaling: Annotated[Annotated[bool, ctypes.c_bool], 128]
has_sdot_4x8: Annotated[Annotated[bool, ctypes.c_bool], 129]
has_udot_4x8: Annotated[Annotated[bool, ctypes.c_bool], 130]
has_sudot_4x8: Annotated[Annotated[bool, ctypes.c_bool], 131]
has_sdot_4x8_sat: Annotated[Annotated[bool, ctypes.c_bool], 132]
has_udot_4x8_sat: Annotated[Annotated[bool, ctypes.c_bool], 133]
has_sudot_4x8_sat: Annotated[Annotated[bool, ctypes.c_bool], 134]
has_dot_2x16: Annotated[Annotated[bool, ctypes.c_bool], 135]
has_bfdot2_bfadd: Annotated[Annotated[bool, ctypes.c_bool], 136]
has_fmulz: Annotated[Annotated[bool, ctypes.c_bool], 137]
has_fmulz_no_denorms: Annotated[Annotated[bool, ctypes.c_bool], 138]
has_find_msb_rev: Annotated[Annotated[bool, ctypes.c_bool], 139]
has_pack_half_2x16_rtz: Annotated[Annotated[bool, ctypes.c_bool], 140]
has_bit_test: Annotated[Annotated[bool, ctypes.c_bool], 141]
has_bfe: Annotated[Annotated[bool, ctypes.c_bool], 142]
has_bfm: Annotated[Annotated[bool, ctypes.c_bool], 143]
has_bfi: Annotated[Annotated[bool, ctypes.c_bool], 144]
has_bitfield_select: Annotated[Annotated[bool, ctypes.c_bool], 145]
has_uclz: Annotated[Annotated[bool, ctypes.c_bool], 146]
has_msad: Annotated[Annotated[bool, ctypes.c_bool], 147]
has_f2e4m3fn_satfn: Annotated[Annotated[bool, ctypes.c_bool], 148]
has_load_global_bounded: Annotated[Annotated[bool, ctypes.c_bool], 149]
intel_vec4: Annotated[Annotated[bool, ctypes.c_bool], 150]
avoid_ternary_with_two_constants: Annotated[Annotated[bool, ctypes.c_bool], 151]
support_8bit_alu: Annotated[Annotated[bool, ctypes.c_bool], 152]
support_16bit_alu: Annotated[Annotated[bool, ctypes.c_bool], 153]
max_unroll_iterations: Annotated[Annotated[int, ctypes.c_uint32], 156]
max_unroll_iterations_aggressive: Annotated[Annotated[int, ctypes.c_uint32], 160]
max_unroll_iterations_fp64: Annotated[Annotated[int, ctypes.c_uint32], 164]
lower_uniforms_to_ubo: Annotated[Annotated[bool, ctypes.c_bool], 168]
force_indirect_unrolling_sampler: Annotated[Annotated[bool, ctypes.c_bool], 169]
no_integers: Annotated[Annotated[bool, ctypes.c_bool], 170]
force_indirect_unrolling: Annotated[nir_variable_mode, 172]
driver_functions: Annotated[Annotated[bool, ctypes.c_bool], 176]
late_lower_int64: Annotated[Annotated[bool, ctypes.c_bool], 177]
lower_int64_options: Annotated[nir_lower_int64_options, 180]
lower_doubles_options: Annotated[nir_lower_doubles_options, 184]
divergence_analysis_options: Annotated[nir_divergence_options, 188]
support_indirect_inputs: Annotated[uint8_t, 192]
support_indirect_outputs: Annotated[uint8_t, 193]
lower_image_offset_to_range_base: Annotated[Annotated[bool, ctypes.c_bool], 194]
lower_atomic_offset_to_range_base: Annotated[Annotated[bool, ctypes.c_bool], 195]
preserve_mediump: Annotated[Annotated[bool, ctypes.c_bool], 196]
lower_fquantize2f16: Annotated[Annotated[bool, ctypes.c_bool], 197]
force_f2f16_rtz: Annotated[Annotated[bool, ctypes.c_bool], 198]
lower_layer_fs_input_to_sysval: Annotated[Annotated[bool, ctypes.c_bool], 199]
compact_arrays: Annotated[Annotated[bool, ctypes.c_bool], 200]
discard_is_demote: Annotated[Annotated[bool, ctypes.c_bool], 201]
has_ddx_intrinsics: Annotated[Annotated[bool, ctypes.c_bool], 202]
scalarize_ddx: Annotated[Annotated[bool, ctypes.c_bool], 203]
per_view_unique_driver_locations: Annotated[Annotated[bool, ctypes.c_bool], 204]
compact_view_index: Annotated[Annotated[bool, ctypes.c_bool], 205]
io_options: Annotated[nir_io_options, 208]
skip_lower_packing_ops: Annotated[Annotated[int, ctypes.c_uint32], 212]
lower_mediump_io: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_nir_shader]]], 216]
varying_expression_max_cost: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_uint32], [c.POINTER[struct_nir_shader], c.POINTER[struct_nir_shader]]], 224]
varying_estimate_instr_cost: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_uint32], [c.POINTER[struct_nir_instr]]], 232]
max_varying_expression_cost: Annotated[Annotated[int, ctypes.c_uint32], 240]
nir_shader_compiler_options: TypeAlias = struct_nir_shader_compiler_options
nir_instr_filter_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_instr], ctypes.c_void_p]]
class nir_lower_int64_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_lower_imul64 = nir_lower_int64_options.define('nir_lower_imul64', 1)
nir_lower_isign64 = nir_lower_int64_options.define('nir_lower_isign64', 2)
nir_lower_divmod64 = nir_lower_int64_options.define('nir_lower_divmod64', 4)
nir_lower_imul_high64 = nir_lower_int64_options.define('nir_lower_imul_high64', 8)
nir_lower_bcsel64 = nir_lower_int64_options.define('nir_lower_bcsel64', 16)
nir_lower_icmp64 = nir_lower_int64_options.define('nir_lower_icmp64', 32)
nir_lower_iadd64 = nir_lower_int64_options.define('nir_lower_iadd64', 64)
nir_lower_iabs64 = nir_lower_int64_options.define('nir_lower_iabs64', 128)
nir_lower_ineg64 = nir_lower_int64_options.define('nir_lower_ineg64', 256)
nir_lower_logic64 = nir_lower_int64_options.define('nir_lower_logic64', 512)
nir_lower_minmax64 = nir_lower_int64_options.define('nir_lower_minmax64', 1024)
nir_lower_shift64 = nir_lower_int64_options.define('nir_lower_shift64', 2048)
nir_lower_imul_2x32_64 = nir_lower_int64_options.define('nir_lower_imul_2x32_64', 4096)
nir_lower_extract64 = nir_lower_int64_options.define('nir_lower_extract64', 8192)
nir_lower_ufind_msb64 = nir_lower_int64_options.define('nir_lower_ufind_msb64', 16384)
nir_lower_bit_count64 = nir_lower_int64_options.define('nir_lower_bit_count64', 32768)
nir_lower_subgroup_shuffle64 = nir_lower_int64_options.define('nir_lower_subgroup_shuffle64', 65536)
nir_lower_scan_reduce_bitwise64 = nir_lower_int64_options.define('nir_lower_scan_reduce_bitwise64', 131072)
nir_lower_scan_reduce_iadd64 = nir_lower_int64_options.define('nir_lower_scan_reduce_iadd64', 262144)
nir_lower_vote_ieq64 = nir_lower_int64_options.define('nir_lower_vote_ieq64', 524288)
nir_lower_usub_sat64 = nir_lower_int64_options.define('nir_lower_usub_sat64', 1048576)
nir_lower_iadd_sat64 = nir_lower_int64_options.define('nir_lower_iadd_sat64', 2097152)
nir_lower_find_lsb64 = nir_lower_int64_options.define('nir_lower_find_lsb64', 4194304)
nir_lower_conv64 = nir_lower_int64_options.define('nir_lower_conv64', 8388608)
nir_lower_uadd_sat64 = nir_lower_int64_options.define('nir_lower_uadd_sat64', 16777216)
nir_lower_iadd3_64 = nir_lower_int64_options.define('nir_lower_iadd3_64', 33554432)
nir_lower_bitfield_reverse64 = nir_lower_int64_options.define('nir_lower_bitfield_reverse64', 67108864)
nir_lower_bitfield_extract64 = nir_lower_int64_options.define('nir_lower_bitfield_extract64', 134217728)
class nir_lower_doubles_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_lower_drcp = nir_lower_doubles_options.define('nir_lower_drcp', 1)
nir_lower_dsqrt = nir_lower_doubles_options.define('nir_lower_dsqrt', 2)
nir_lower_drsq = nir_lower_doubles_options.define('nir_lower_drsq', 4)
nir_lower_dtrunc = nir_lower_doubles_options.define('nir_lower_dtrunc', 8)
nir_lower_dfloor = nir_lower_doubles_options.define('nir_lower_dfloor', 16)
nir_lower_dceil = nir_lower_doubles_options.define('nir_lower_dceil', 32)
nir_lower_dfract = nir_lower_doubles_options.define('nir_lower_dfract', 64)
nir_lower_dround_even = nir_lower_doubles_options.define('nir_lower_dround_even', 128)
nir_lower_dmod = nir_lower_doubles_options.define('nir_lower_dmod', 256)
nir_lower_dsub = nir_lower_doubles_options.define('nir_lower_dsub', 512)
nir_lower_ddiv = nir_lower_doubles_options.define('nir_lower_ddiv', 1024)
nir_lower_dsign = nir_lower_doubles_options.define('nir_lower_dsign', 2048)
nir_lower_dminmax = nir_lower_doubles_options.define('nir_lower_dminmax', 4096)
nir_lower_dsat = nir_lower_doubles_options.define('nir_lower_dsat', 8192)
nir_lower_fp64_full_software = nir_lower_doubles_options.define('nir_lower_fp64_full_software', 16384)
class nir_divergence_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_divergence_single_prim_per_subgroup = nir_divergence_options.define('nir_divergence_single_prim_per_subgroup', 1)
nir_divergence_single_patch_per_tcs_subgroup = nir_divergence_options.define('nir_divergence_single_patch_per_tcs_subgroup', 2)
nir_divergence_single_patch_per_tes_subgroup = nir_divergence_options.define('nir_divergence_single_patch_per_tes_subgroup', 4)
nir_divergence_view_index_uniform = nir_divergence_options.define('nir_divergence_view_index_uniform', 8)
nir_divergence_single_frag_shading_rate_per_subgroup = nir_divergence_options.define('nir_divergence_single_frag_shading_rate_per_subgroup', 16)
nir_divergence_multiple_workgroup_per_compute_subgroup = nir_divergence_options.define('nir_divergence_multiple_workgroup_per_compute_subgroup', 32)
nir_divergence_shader_record_ptr_uniform = nir_divergence_options.define('nir_divergence_shader_record_ptr_uniform', 64)
nir_divergence_uniform_load_tears = nir_divergence_options.define('nir_divergence_uniform_load_tears', 128)
nir_divergence_ignore_undef_if_phi_srcs = nir_divergence_options.define('nir_divergence_ignore_undef_if_phi_srcs', 256)
class nir_io_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_io_has_flexible_input_interpolation_except_flat = nir_io_options.define('nir_io_has_flexible_input_interpolation_except_flat', 1)
nir_io_dont_use_pos_for_non_fs_varyings = nir_io_options.define('nir_io_dont_use_pos_for_non_fs_varyings', 2)
nir_io_16bit_input_output_support = nir_io_options.define('nir_io_16bit_input_output_support', 4)
nir_io_mediump_is_32bit = nir_io_options.define('nir_io_mediump_is_32bit', 8)
nir_io_prefer_scalar_fs_inputs = nir_io_options.define('nir_io_prefer_scalar_fs_inputs', 16)
nir_io_mix_convergent_flat_with_interpolated = nir_io_options.define('nir_io_mix_convergent_flat_with_interpolated', 32)
nir_io_vectorizer_ignores_types = nir_io_options.define('nir_io_vectorizer_ignores_types', 64)
nir_io_always_interpolate_convergent_fs_inputs = nir_io_options.define('nir_io_always_interpolate_convergent_fs_inputs', 128)
nir_io_compaction_rotates_color_channels = nir_io_options.define('nir_io_compaction_rotates_color_channels', 256)
nir_io_compaction_groups_tes_inputs_into_pos_and_var_groups = nir_io_options.define('nir_io_compaction_groups_tes_inputs_into_pos_and_var_groups', 512)
nir_io_radv_intrinsic_component_workaround = nir_io_options.define('nir_io_radv_intrinsic_component_workaround', 1024)
nir_io_has_intrinsics = nir_io_options.define('nir_io_has_intrinsics', 65536)
nir_io_separate_clip_cull_distance_arrays = nir_io_options.define('nir_io_separate_clip_cull_distance_arrays', 131072)
@c.record
class struct_shader_info(c.Struct):
SIZE = 368
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
label: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
internal: Annotated[Annotated[bool, ctypes.c_bool], 16]
source_blake3: Annotated[blake3_hash, 17]
stage: Annotated[gl_shader_stage, 49, 8, 0]
prev_stage: Annotated[gl_shader_stage, 50, 8, 0]
next_stage: Annotated[gl_shader_stage, 51, 8, 0]
prev_stage_has_xfb: Annotated[Annotated[bool, ctypes.c_bool], 52]
num_textures: Annotated[uint8_t, 53]
num_ubos: Annotated[uint8_t, 54]
num_abos: Annotated[uint8_t, 55]
num_ssbos: Annotated[uint8_t, 56]
num_images: Annotated[uint8_t, 57]
inputs_read: Annotated[uint64_t, 64]
dual_slot_inputs: Annotated[uint64_t, 72]
outputs_written: Annotated[uint64_t, 80]
outputs_read: Annotated[uint64_t, 88]
system_values_read: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[4]], 96]
per_primitive_inputs: Annotated[uint64_t, 112]
per_primitive_outputs: Annotated[uint64_t, 120]
per_view_outputs: Annotated[uint64_t, 128]
view_mask: Annotated[uint32_t, 136]
inputs_read_16bit: Annotated[uint16_t, 140]
outputs_written_16bit: Annotated[uint16_t, 142]
outputs_read_16bit: Annotated[uint16_t, 144]
inputs_read_indirectly_16bit: Annotated[uint16_t, 146]
outputs_read_indirectly_16bit: Annotated[uint16_t, 148]
outputs_written_indirectly_16bit: Annotated[uint16_t, 150]
patch_inputs_read: Annotated[uint32_t, 152]
patch_outputs_written: Annotated[uint32_t, 156]
patch_outputs_read: Annotated[uint32_t, 160]
inputs_read_indirectly: Annotated[uint64_t, 168]
outputs_read_indirectly: Annotated[uint64_t, 176]
outputs_written_indirectly: Annotated[uint64_t, 184]
patch_inputs_read_indirectly: Annotated[uint32_t, 192]
patch_outputs_read_indirectly: Annotated[uint32_t, 196]
patch_outputs_written_indirectly: Annotated[uint32_t, 200]
textures_used: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[4]], 204]
textures_used_by_txf: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[4]], 220]
samplers_used: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[1]], 236]
images_used: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[2]], 240]
image_buffers: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[2]], 248]
msaa_images: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[2]], 256]
float_controls_execution_mode: Annotated[uint32_t, 264]
shared_size: Annotated[Annotated[int, ctypes.c_uint32], 268]
task_payload_size: Annotated[Annotated[int, ctypes.c_uint32], 272]
ray_queries: Annotated[Annotated[int, ctypes.c_uint32], 276]
workgroup_size: Annotated[c.Array[uint16_t, Literal[3]], 280]
subgroup_size: Annotated[enum_gl_subgroup_size, 286]
num_subgroups: Annotated[uint8_t, 287]
uses_wide_subgroup_intrinsics: Annotated[Annotated[bool, ctypes.c_bool], 288]
xfb_stride: Annotated[c.Array[uint8_t, Literal[4]], 289]
inlinable_uniform_dw_offsets: Annotated[c.Array[uint16_t, Literal[4]], 294]
num_inlinable_uniforms: Annotated[uint8_t, 302, 4, 0]
clip_distance_array_size: Annotated[uint8_t, 302, 4, 4]
cull_distance_array_size: Annotated[uint8_t, 303, 4, 0]
uses_texture_gather: Annotated[Annotated[bool, ctypes.c_bool], 303, 1, 4]
uses_resource_info_query: Annotated[Annotated[bool, ctypes.c_bool], 303, 1, 5]
bit_sizes_float: Annotated[uint8_t, 304]
bit_sizes_int: Annotated[uint8_t, 305]
first_ubo_is_default_ubo: Annotated[Annotated[bool, ctypes.c_bool], 306, 1, 0]
separate_shader: Annotated[Annotated[bool, ctypes.c_bool], 306, 1, 1]
has_transform_feedback_varyings: Annotated[Annotated[bool, ctypes.c_bool], 306, 1, 2]
flrp_lowered: Annotated[Annotated[bool, ctypes.c_bool], 306, 1, 3]
io_lowered: Annotated[Annotated[bool, ctypes.c_bool], 306, 1, 4]
var_copies_lowered: Annotated[Annotated[bool, ctypes.c_bool], 306, 1, 5]
writes_memory: Annotated[Annotated[bool, ctypes.c_bool], 306, 1, 6]
layer_viewport_relative: Annotated[Annotated[bool, ctypes.c_bool], 306, 1, 7]
uses_control_barrier: Annotated[Annotated[bool, ctypes.c_bool], 307, 1, 0]
uses_memory_barrier: Annotated[Annotated[bool, ctypes.c_bool], 307, 1, 1]
uses_bindless: Annotated[Annotated[bool, ctypes.c_bool], 307, 1, 2]
shared_memory_explicit_layout: Annotated[Annotated[bool, ctypes.c_bool], 307, 1, 3]
zero_initialize_shared_memory: Annotated[Annotated[bool, ctypes.c_bool], 307, 1, 4]
workgroup_size_variable: Annotated[Annotated[bool, ctypes.c_bool], 307, 1, 5]
uses_printf: Annotated[Annotated[bool, ctypes.c_bool], 307, 1, 6]
maximally_reconverges: Annotated[Annotated[bool, ctypes.c_bool], 307, 1, 7]
use_aco_amd: Annotated[Annotated[bool, ctypes.c_bool], 308, 1, 0]
use_lowered_image_to_global: Annotated[Annotated[bool, ctypes.c_bool], 308, 1, 1]
use_legacy_math_rules: Annotated[Annotated[bool, ctypes.c_bool], 309]
derivative_group: Annotated[enum_gl_derivative_group, 310, 2, 0]
vs: Annotated[struct_shader_info_vs, 312]
gs: Annotated[struct_shader_info_gs, 312]
fs: Annotated[struct_shader_info_fs, 312]
cs: Annotated[struct_shader_info_cs, 312]
tess: Annotated[struct_shader_info_tess, 312]
mesh: Annotated[struct_shader_info_mesh, 312]
blake3_hash: TypeAlias = c.Array[Annotated[int, ctypes.c_ubyte], Literal[32]]
class enum_pipe_shader_type(Annotated[int, ctypes.c_int32], c.Enum): pass
MESA_SHADER_NONE = enum_pipe_shader_type.define('MESA_SHADER_NONE', -1)
MESA_SHADER_VERTEX = enum_pipe_shader_type.define('MESA_SHADER_VERTEX', 0)
PIPE_SHADER_VERTEX = enum_pipe_shader_type.define('PIPE_SHADER_VERTEX', 0)
MESA_SHADER_TESS_CTRL = enum_pipe_shader_type.define('MESA_SHADER_TESS_CTRL', 1)
PIPE_SHADER_TESS_CTRL = enum_pipe_shader_type.define('PIPE_SHADER_TESS_CTRL', 1)
MESA_SHADER_TESS_EVAL = enum_pipe_shader_type.define('MESA_SHADER_TESS_EVAL', 2)
PIPE_SHADER_TESS_EVAL = enum_pipe_shader_type.define('PIPE_SHADER_TESS_EVAL', 2)
MESA_SHADER_GEOMETRY = enum_pipe_shader_type.define('MESA_SHADER_GEOMETRY', 3)
PIPE_SHADER_GEOMETRY = enum_pipe_shader_type.define('PIPE_SHADER_GEOMETRY', 3)
MESA_SHADER_FRAGMENT = enum_pipe_shader_type.define('MESA_SHADER_FRAGMENT', 4)
PIPE_SHADER_FRAGMENT = enum_pipe_shader_type.define('PIPE_SHADER_FRAGMENT', 4)
MESA_SHADER_COMPUTE = enum_pipe_shader_type.define('MESA_SHADER_COMPUTE', 5)
PIPE_SHADER_COMPUTE = enum_pipe_shader_type.define('PIPE_SHADER_COMPUTE', 5)
PIPE_SHADER_TYPES = enum_pipe_shader_type.define('PIPE_SHADER_TYPES', 6)
MESA_SHADER_TASK = enum_pipe_shader_type.define('MESA_SHADER_TASK', 6)
PIPE_SHADER_TASK = enum_pipe_shader_type.define('PIPE_SHADER_TASK', 6)
MESA_SHADER_MESH = enum_pipe_shader_type.define('MESA_SHADER_MESH', 7)
PIPE_SHADER_MESH = enum_pipe_shader_type.define('PIPE_SHADER_MESH', 7)
PIPE_SHADER_MESH_TYPES = enum_pipe_shader_type.define('PIPE_SHADER_MESH_TYPES', 8)
MESA_SHADER_RAYGEN = enum_pipe_shader_type.define('MESA_SHADER_RAYGEN', 8)
MESA_SHADER_ANY_HIT = enum_pipe_shader_type.define('MESA_SHADER_ANY_HIT', 9)
MESA_SHADER_CLOSEST_HIT = enum_pipe_shader_type.define('MESA_SHADER_CLOSEST_HIT', 10)
MESA_SHADER_MISS = enum_pipe_shader_type.define('MESA_SHADER_MISS', 11)
MESA_SHADER_INTERSECTION = enum_pipe_shader_type.define('MESA_SHADER_INTERSECTION', 12)
MESA_SHADER_CALLABLE = enum_pipe_shader_type.define('MESA_SHADER_CALLABLE', 13)
MESA_SHADER_KERNEL = enum_pipe_shader_type.define('MESA_SHADER_KERNEL', 14)
gl_shader_stage: TypeAlias = enum_pipe_shader_type
class enum_gl_subgroup_size(Annotated[int, ctypes.c_ubyte], c.Enum): pass
SUBGROUP_SIZE_VARYING = enum_gl_subgroup_size.define('SUBGROUP_SIZE_VARYING', 0)
SUBGROUP_SIZE_UNIFORM = enum_gl_subgroup_size.define('SUBGROUP_SIZE_UNIFORM', 1)
SUBGROUP_SIZE_API_CONSTANT = enum_gl_subgroup_size.define('SUBGROUP_SIZE_API_CONSTANT', 2)
SUBGROUP_SIZE_FULL_SUBGROUPS = enum_gl_subgroup_size.define('SUBGROUP_SIZE_FULL_SUBGROUPS', 3)
SUBGROUP_SIZE_REQUIRE_4 = enum_gl_subgroup_size.define('SUBGROUP_SIZE_REQUIRE_4', 4)
SUBGROUP_SIZE_REQUIRE_8 = enum_gl_subgroup_size.define('SUBGROUP_SIZE_REQUIRE_8', 8)
SUBGROUP_SIZE_REQUIRE_16 = enum_gl_subgroup_size.define('SUBGROUP_SIZE_REQUIRE_16', 16)
SUBGROUP_SIZE_REQUIRE_32 = enum_gl_subgroup_size.define('SUBGROUP_SIZE_REQUIRE_32', 32)
SUBGROUP_SIZE_REQUIRE_64 = enum_gl_subgroup_size.define('SUBGROUP_SIZE_REQUIRE_64', 64)
SUBGROUP_SIZE_REQUIRE_128 = enum_gl_subgroup_size.define('SUBGROUP_SIZE_REQUIRE_128', 128)
class enum_gl_derivative_group(Annotated[int, ctypes.c_uint32], c.Enum): pass
DERIVATIVE_GROUP_NONE = enum_gl_derivative_group.define('DERIVATIVE_GROUP_NONE', 0)
DERIVATIVE_GROUP_QUADS = enum_gl_derivative_group.define('DERIVATIVE_GROUP_QUADS', 1)
DERIVATIVE_GROUP_LINEAR = enum_gl_derivative_group.define('DERIVATIVE_GROUP_LINEAR', 2)
@c.record
class struct_shader_info_vs(c.Struct):
SIZE = 16
double_inputs: Annotated[uint64_t, 0]
blit_sgprs_amd: Annotated[uint8_t, 8, 4, 0]
tes_agx: Annotated[Annotated[bool, ctypes.c_bool], 8, 1, 4]
window_space_position: Annotated[Annotated[bool, ctypes.c_bool], 8, 1, 5]
needs_edge_flag: Annotated[Annotated[bool, ctypes.c_bool], 8, 1, 6]
@c.record
class struct_shader_info_gs(c.Struct):
SIZE = 6
output_primitive: Annotated[enum_mesa_prim, 0]
input_primitive: Annotated[enum_mesa_prim, 1]
vertices_out: Annotated[uint16_t, 2]
invocations: Annotated[uint8_t, 4]
vertices_in: Annotated[uint8_t, 5, 3, 0]
uses_end_primitive: Annotated[Annotated[bool, ctypes.c_bool], 5, 1, 3]
active_stream_mask: Annotated[uint8_t, 5, 4, 4]
class enum_mesa_prim(Annotated[int, ctypes.c_ubyte], c.Enum): pass
MESA_PRIM_POINTS = enum_mesa_prim.define('MESA_PRIM_POINTS', 0)
MESA_PRIM_LINES = enum_mesa_prim.define('MESA_PRIM_LINES', 1)
MESA_PRIM_LINE_LOOP = enum_mesa_prim.define('MESA_PRIM_LINE_LOOP', 2)
MESA_PRIM_LINE_STRIP = enum_mesa_prim.define('MESA_PRIM_LINE_STRIP', 3)
MESA_PRIM_TRIANGLES = enum_mesa_prim.define('MESA_PRIM_TRIANGLES', 4)
MESA_PRIM_TRIANGLE_STRIP = enum_mesa_prim.define('MESA_PRIM_TRIANGLE_STRIP', 5)
MESA_PRIM_TRIANGLE_FAN = enum_mesa_prim.define('MESA_PRIM_TRIANGLE_FAN', 6)
MESA_PRIM_QUADS = enum_mesa_prim.define('MESA_PRIM_QUADS', 7)
MESA_PRIM_QUAD_STRIP = enum_mesa_prim.define('MESA_PRIM_QUAD_STRIP', 8)
MESA_PRIM_POLYGON = enum_mesa_prim.define('MESA_PRIM_POLYGON', 9)
MESA_PRIM_LINES_ADJACENCY = enum_mesa_prim.define('MESA_PRIM_LINES_ADJACENCY', 10)
MESA_PRIM_LINE_STRIP_ADJACENCY = enum_mesa_prim.define('MESA_PRIM_LINE_STRIP_ADJACENCY', 11)
MESA_PRIM_TRIANGLES_ADJACENCY = enum_mesa_prim.define('MESA_PRIM_TRIANGLES_ADJACENCY', 12)
MESA_PRIM_TRIANGLE_STRIP_ADJACENCY = enum_mesa_prim.define('MESA_PRIM_TRIANGLE_STRIP_ADJACENCY', 13)
MESA_PRIM_PATCHES = enum_mesa_prim.define('MESA_PRIM_PATCHES', 14)
MESA_PRIM_MAX = enum_mesa_prim.define('MESA_PRIM_MAX', 14)
MESA_PRIM_COUNT = enum_mesa_prim.define('MESA_PRIM_COUNT', 15)
MESA_PRIM_UNKNOWN = enum_mesa_prim.define('MESA_PRIM_UNKNOWN', 28)
@c.record
class struct_shader_info_fs(c.Struct):
SIZE = 16
uses_discard: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 0]
uses_fbfetch_output: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 1]
fbfetch_coherent: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 2]
color_is_dual_source: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 3]
require_full_quads: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 4]
quad_derivatives: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 5]
needs_coarse_quad_helper_invocations: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 6]
needs_full_quad_helper_invocations: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 7]
uses_sample_qualifier: Annotated[Annotated[bool, ctypes.c_bool], 1, 1, 0]
uses_sample_shading: Annotated[Annotated[bool, ctypes.c_bool], 1, 1, 1]
early_fragment_tests: Annotated[Annotated[bool, ctypes.c_bool], 1, 1, 2]
inner_coverage: Annotated[Annotated[bool, ctypes.c_bool], 1, 1, 3]
post_depth_coverage: Annotated[Annotated[bool, ctypes.c_bool], 1, 1, 4]
pixel_center_integer: Annotated[Annotated[bool, ctypes.c_bool], 1, 1, 5]
origin_upper_left: Annotated[Annotated[bool, ctypes.c_bool], 1, 1, 6]
pixel_interlock_ordered: Annotated[Annotated[bool, ctypes.c_bool], 1, 1, 7]
pixel_interlock_unordered: Annotated[Annotated[bool, ctypes.c_bool], 2, 1, 0]
sample_interlock_ordered: Annotated[Annotated[bool, ctypes.c_bool], 2, 1, 1]
sample_interlock_unordered: Annotated[Annotated[bool, ctypes.c_bool], 2, 1, 2]
untyped_color_outputs: Annotated[Annotated[bool, ctypes.c_bool], 2, 1, 3]
depth_layout: Annotated[enum_gl_frag_depth_layout, 2, 3, 4]
color0_interp: Annotated[Annotated[int, ctypes.c_uint32], 2, 3, 7]
color0_sample: Annotated[Annotated[bool, ctypes.c_bool], 3, 1, 2]
color0_centroid: Annotated[Annotated[bool, ctypes.c_bool], 3, 1, 3]
color1_interp: Annotated[Annotated[int, ctypes.c_uint32], 3, 3, 4]
color1_sample: Annotated[Annotated[bool, ctypes.c_bool], 3, 1, 7]
color1_centroid: Annotated[Annotated[bool, ctypes.c_bool], 4, 1, 0]
advanced_blend_modes: Annotated[Annotated[int, ctypes.c_uint32], 8]
early_and_late_fragment_tests: Annotated[Annotated[bool, ctypes.c_bool], 12, 1, 0]
stencil_front_layout: Annotated[enum_gl_frag_stencil_layout, 12, 3, 1]
stencil_back_layout: Annotated[enum_gl_frag_stencil_layout, 12, 3, 4]
class enum_gl_frag_depth_layout(Annotated[int, ctypes.c_uint32], c.Enum): pass
FRAG_DEPTH_LAYOUT_NONE = enum_gl_frag_depth_layout.define('FRAG_DEPTH_LAYOUT_NONE', 0)
FRAG_DEPTH_LAYOUT_ANY = enum_gl_frag_depth_layout.define('FRAG_DEPTH_LAYOUT_ANY', 1)
FRAG_DEPTH_LAYOUT_GREATER = enum_gl_frag_depth_layout.define('FRAG_DEPTH_LAYOUT_GREATER', 2)
FRAG_DEPTH_LAYOUT_LESS = enum_gl_frag_depth_layout.define('FRAG_DEPTH_LAYOUT_LESS', 3)
FRAG_DEPTH_LAYOUT_UNCHANGED = enum_gl_frag_depth_layout.define('FRAG_DEPTH_LAYOUT_UNCHANGED', 4)
class enum_gl_frag_stencil_layout(Annotated[int, ctypes.c_uint32], c.Enum): pass
FRAG_STENCIL_LAYOUT_NONE = enum_gl_frag_stencil_layout.define('FRAG_STENCIL_LAYOUT_NONE', 0)
FRAG_STENCIL_LAYOUT_ANY = enum_gl_frag_stencil_layout.define('FRAG_STENCIL_LAYOUT_ANY', 1)
FRAG_STENCIL_LAYOUT_GREATER = enum_gl_frag_stencil_layout.define('FRAG_STENCIL_LAYOUT_GREATER', 2)
FRAG_STENCIL_LAYOUT_LESS = enum_gl_frag_stencil_layout.define('FRAG_STENCIL_LAYOUT_LESS', 3)
FRAG_STENCIL_LAYOUT_UNCHANGED = enum_gl_frag_stencil_layout.define('FRAG_STENCIL_LAYOUT_UNCHANGED', 4)
@c.record
class struct_shader_info_cs(c.Struct):
SIZE = 32
workgroup_size_hint: Annotated[c.Array[uint16_t, Literal[3]], 0]
user_data_components_amd: Annotated[uint8_t, 6, 4, 0]
has_variable_shared_mem: Annotated[Annotated[bool, ctypes.c_bool], 6, 1, 4]
has_cooperative_matrix: Annotated[Annotated[bool, ctypes.c_bool], 6, 1, 5]
image_block_size_per_thread_agx: Annotated[uint8_t, 7]
ptr_size: Annotated[Annotated[int, ctypes.c_uint32], 8]
shader_index: Annotated[uint32_t, 12]
node_payloads_size: Annotated[uint32_t, 16]
workgroup_count: Annotated[c.Array[uint32_t, Literal[3]], 20]
@c.record
class struct_shader_info_tess(c.Struct):
SIZE = 56
_primitive_mode: Annotated[enum_tess_primitive_mode, 0]
tcs_vertices_out: Annotated[uint8_t, 4]
spacing: Annotated[Annotated[int, ctypes.c_uint32], 5, 2, 0]
ccw: Annotated[Annotated[bool, ctypes.c_bool], 5, 1, 2]
point_mode: Annotated[Annotated[bool, ctypes.c_bool], 5, 1, 3]
tcs_same_invocation_inputs_read: Annotated[uint64_t, 8]
tcs_cross_invocation_inputs_read: Annotated[uint64_t, 16]
tcs_cross_invocation_outputs_read: Annotated[uint64_t, 24]
tcs_cross_invocation_outputs_written: Annotated[uint64_t, 32]
tcs_outputs_read_by_tes: Annotated[uint64_t, 40]
tcs_patch_outputs_read_by_tes: Annotated[uint32_t, 48]
tcs_outputs_read_by_tes_16bit: Annotated[uint16_t, 52]
class enum_tess_primitive_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
TESS_PRIMITIVE_UNSPECIFIED = enum_tess_primitive_mode.define('TESS_PRIMITIVE_UNSPECIFIED', 0)
TESS_PRIMITIVE_TRIANGLES = enum_tess_primitive_mode.define('TESS_PRIMITIVE_TRIANGLES', 1)
TESS_PRIMITIVE_QUADS = enum_tess_primitive_mode.define('TESS_PRIMITIVE_QUADS', 2)
TESS_PRIMITIVE_ISOLINES = enum_tess_primitive_mode.define('TESS_PRIMITIVE_ISOLINES', 3)
@c.record
class struct_shader_info_mesh(c.Struct):
SIZE = 32
ms_cross_invocation_output_access: Annotated[uint64_t, 0]
ts_mesh_dispatch_dimensions: Annotated[c.Array[uint32_t, Literal[3]], 8]
max_vertices_out: Annotated[uint16_t, 20]
max_primitives_out: Annotated[uint16_t, 22]
primitive_type: Annotated[enum_mesa_prim, 24]
nv: Annotated[Annotated[bool, ctypes.c_bool], 25]
class struct_nir_xfb_info(ctypes.Structure): pass
nir_xfb_info: TypeAlias = struct_nir_xfb_info
@c.record
class struct_nir_parameter(c.Struct):
SIZE = 32
num_components: Annotated[uint8_t, 0]
bit_size: Annotated[uint8_t, 1]
is_return: Annotated[Annotated[bool, ctypes.c_bool], 2]
implicit_conversion_prohibited: Annotated[Annotated[bool, ctypes.c_bool], 3]
is_uniform: Annotated[Annotated[bool, ctypes.c_bool], 4]
mode: Annotated[nir_variable_mode, 8]
driver_attributes: Annotated[uint32_t, 12]
type: Annotated[c.POINTER[struct_glsl_type], 16]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
nir_parameter: TypeAlias = struct_nir_parameter
@c.record
class struct_nir_function_impl(c.Struct):
SIZE = 144
cf_node: Annotated[nir_cf_node, 0]
function: Annotated[c.POINTER[nir_function], 32]
preamble: Annotated[c.POINTER[nir_function], 40]
body: Annotated[struct_exec_list, 48]
end_block: Annotated[c.POINTER[nir_block], 80]
locals: Annotated[struct_exec_list, 88]
ssa_alloc: Annotated[Annotated[int, ctypes.c_uint32], 120]
num_blocks: Annotated[Annotated[int, ctypes.c_uint32], 124]
structured: Annotated[Annotated[bool, ctypes.c_bool], 128]
valid_metadata: Annotated[nir_metadata, 132]
loop_analysis_indirect_mask: Annotated[nir_variable_mode, 136]
loop_analysis_force_unroll_sampler_indirect: Annotated[Annotated[bool, ctypes.c_bool], 140]
nir_function_impl: TypeAlias = struct_nir_function_impl
class nir_metadata(Annotated[int, ctypes.c_int32], c.Enum): pass
nir_metadata_none = nir_metadata.define('nir_metadata_none', 0)
nir_metadata_block_index = nir_metadata.define('nir_metadata_block_index', 1)
nir_metadata_dominance = nir_metadata.define('nir_metadata_dominance', 2)
nir_metadata_live_defs = nir_metadata.define('nir_metadata_live_defs', 4)
nir_metadata_not_properly_reset = nir_metadata.define('nir_metadata_not_properly_reset', 8)
nir_metadata_loop_analysis = nir_metadata.define('nir_metadata_loop_analysis', 16)
nir_metadata_instr_index = nir_metadata.define('nir_metadata_instr_index', 32)
nir_metadata_divergence = nir_metadata.define('nir_metadata_divergence', 64)
nir_metadata_control_flow = nir_metadata.define('nir_metadata_control_flow', 3)
nir_metadata_all = nir_metadata.define('nir_metadata_all', -9)
nir_call_instr: TypeAlias = struct_nir_call_instr
@c.record
class struct_nir_intrinsic_instr(c.Struct):
SIZE = 120
instr: Annotated[nir_instr, 0]
intrinsic: Annotated[nir_intrinsic_op, 32]
_def: Annotated[nir_def, 40]
num_components: Annotated[uint8_t, 72]
const_index: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[8]], 76]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 112]
src: Annotated[c.Array[nir_src, Literal[0]], 120]
class nir_intrinsic_op(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_intrinsic_accept_ray_intersection = nir_intrinsic_op.define('nir_intrinsic_accept_ray_intersection', 0)
nir_intrinsic_addr_mode_is = nir_intrinsic_op.define('nir_intrinsic_addr_mode_is', 1)
nir_intrinsic_al2p_nv = nir_intrinsic_op.define('nir_intrinsic_al2p_nv', 2)
nir_intrinsic_ald_nv = nir_intrinsic_op.define('nir_intrinsic_ald_nv', 3)
nir_intrinsic_alpha_to_coverage = nir_intrinsic_op.define('nir_intrinsic_alpha_to_coverage', 4)
nir_intrinsic_as_uniform = nir_intrinsic_op.define('nir_intrinsic_as_uniform', 5)
nir_intrinsic_ast_nv = nir_intrinsic_op.define('nir_intrinsic_ast_nv', 6)
nir_intrinsic_atomic_add_gen_prim_count_amd = nir_intrinsic_op.define('nir_intrinsic_atomic_add_gen_prim_count_amd', 7)
nir_intrinsic_atomic_add_gs_emit_prim_count_amd = nir_intrinsic_op.define('nir_intrinsic_atomic_add_gs_emit_prim_count_amd', 8)
nir_intrinsic_atomic_add_shader_invocation_count_amd = nir_intrinsic_op.define('nir_intrinsic_atomic_add_shader_invocation_count_amd', 9)
nir_intrinsic_atomic_add_xfb_prim_count_amd = nir_intrinsic_op.define('nir_intrinsic_atomic_add_xfb_prim_count_amd', 10)
nir_intrinsic_atomic_counter_add = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_add', 11)
nir_intrinsic_atomic_counter_add_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_add_deref', 12)
nir_intrinsic_atomic_counter_and = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_and', 13)
nir_intrinsic_atomic_counter_and_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_and_deref', 14)
nir_intrinsic_atomic_counter_comp_swap = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_comp_swap', 15)
nir_intrinsic_atomic_counter_comp_swap_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_comp_swap_deref', 16)
nir_intrinsic_atomic_counter_exchange = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_exchange', 17)
nir_intrinsic_atomic_counter_exchange_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_exchange_deref', 18)
nir_intrinsic_atomic_counter_inc = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_inc', 19)
nir_intrinsic_atomic_counter_inc_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_inc_deref', 20)
nir_intrinsic_atomic_counter_max = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_max', 21)
nir_intrinsic_atomic_counter_max_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_max_deref', 22)
nir_intrinsic_atomic_counter_min = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_min', 23)
nir_intrinsic_atomic_counter_min_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_min_deref', 24)
nir_intrinsic_atomic_counter_or = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_or', 25)
nir_intrinsic_atomic_counter_or_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_or_deref', 26)
nir_intrinsic_atomic_counter_post_dec = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_post_dec', 27)
nir_intrinsic_atomic_counter_post_dec_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_post_dec_deref', 28)
nir_intrinsic_atomic_counter_pre_dec = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_pre_dec', 29)
nir_intrinsic_atomic_counter_pre_dec_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_pre_dec_deref', 30)
nir_intrinsic_atomic_counter_read = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_read', 31)
nir_intrinsic_atomic_counter_read_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_read_deref', 32)
nir_intrinsic_atomic_counter_xor = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_xor', 33)
nir_intrinsic_atomic_counter_xor_deref = nir_intrinsic_op.define('nir_intrinsic_atomic_counter_xor_deref', 34)
nir_intrinsic_ballot = nir_intrinsic_op.define('nir_intrinsic_ballot', 35)
nir_intrinsic_ballot_bit_count_exclusive = nir_intrinsic_op.define('nir_intrinsic_ballot_bit_count_exclusive', 36)
nir_intrinsic_ballot_bit_count_inclusive = nir_intrinsic_op.define('nir_intrinsic_ballot_bit_count_inclusive', 37)
nir_intrinsic_ballot_bit_count_reduce = nir_intrinsic_op.define('nir_intrinsic_ballot_bit_count_reduce', 38)
nir_intrinsic_ballot_bitfield_extract = nir_intrinsic_op.define('nir_intrinsic_ballot_bitfield_extract', 39)
nir_intrinsic_ballot_find_lsb = nir_intrinsic_op.define('nir_intrinsic_ballot_find_lsb', 40)
nir_intrinsic_ballot_find_msb = nir_intrinsic_op.define('nir_intrinsic_ballot_find_msb', 41)
nir_intrinsic_ballot_relaxed = nir_intrinsic_op.define('nir_intrinsic_ballot_relaxed', 42)
nir_intrinsic_bar_break_nv = nir_intrinsic_op.define('nir_intrinsic_bar_break_nv', 43)
nir_intrinsic_bar_set_nv = nir_intrinsic_op.define('nir_intrinsic_bar_set_nv', 44)
nir_intrinsic_bar_sync_nv = nir_intrinsic_op.define('nir_intrinsic_bar_sync_nv', 45)
nir_intrinsic_barrier = nir_intrinsic_op.define('nir_intrinsic_barrier', 46)
nir_intrinsic_begin_invocation_interlock = nir_intrinsic_op.define('nir_intrinsic_begin_invocation_interlock', 47)
nir_intrinsic_bindgen_return = nir_intrinsic_op.define('nir_intrinsic_bindgen_return', 48)
nir_intrinsic_bindless_image_agx = nir_intrinsic_op.define('nir_intrinsic_bindless_image_agx', 49)
nir_intrinsic_bindless_image_atomic = nir_intrinsic_op.define('nir_intrinsic_bindless_image_atomic', 50)
nir_intrinsic_bindless_image_atomic_swap = nir_intrinsic_op.define('nir_intrinsic_bindless_image_atomic_swap', 51)
nir_intrinsic_bindless_image_descriptor_amd = nir_intrinsic_op.define('nir_intrinsic_bindless_image_descriptor_amd', 52)
nir_intrinsic_bindless_image_format = nir_intrinsic_op.define('nir_intrinsic_bindless_image_format', 53)
nir_intrinsic_bindless_image_fragment_mask_load_amd = nir_intrinsic_op.define('nir_intrinsic_bindless_image_fragment_mask_load_amd', 54)
nir_intrinsic_bindless_image_levels = nir_intrinsic_op.define('nir_intrinsic_bindless_image_levels', 55)
nir_intrinsic_bindless_image_load = nir_intrinsic_op.define('nir_intrinsic_bindless_image_load', 56)
nir_intrinsic_bindless_image_load_raw_intel = nir_intrinsic_op.define('nir_intrinsic_bindless_image_load_raw_intel', 57)
nir_intrinsic_bindless_image_order = nir_intrinsic_op.define('nir_intrinsic_bindless_image_order', 58)
nir_intrinsic_bindless_image_samples = nir_intrinsic_op.define('nir_intrinsic_bindless_image_samples', 59)
nir_intrinsic_bindless_image_samples_identical = nir_intrinsic_op.define('nir_intrinsic_bindless_image_samples_identical', 60)
nir_intrinsic_bindless_image_size = nir_intrinsic_op.define('nir_intrinsic_bindless_image_size', 61)
nir_intrinsic_bindless_image_sparse_load = nir_intrinsic_op.define('nir_intrinsic_bindless_image_sparse_load', 62)
nir_intrinsic_bindless_image_store = nir_intrinsic_op.define('nir_intrinsic_bindless_image_store', 63)
nir_intrinsic_bindless_image_store_block_agx = nir_intrinsic_op.define('nir_intrinsic_bindless_image_store_block_agx', 64)
nir_intrinsic_bindless_image_store_raw_intel = nir_intrinsic_op.define('nir_intrinsic_bindless_image_store_raw_intel', 65)
nir_intrinsic_bindless_image_texel_address = nir_intrinsic_op.define('nir_intrinsic_bindless_image_texel_address', 66)
nir_intrinsic_bindless_resource_ir3 = nir_intrinsic_op.define('nir_intrinsic_bindless_resource_ir3', 67)
nir_intrinsic_brcst_active_ir3 = nir_intrinsic_op.define('nir_intrinsic_brcst_active_ir3', 68)
nir_intrinsic_btd_retire_intel = nir_intrinsic_op.define('nir_intrinsic_btd_retire_intel', 69)
nir_intrinsic_btd_spawn_intel = nir_intrinsic_op.define('nir_intrinsic_btd_spawn_intel', 70)
nir_intrinsic_btd_stack_push_intel = nir_intrinsic_op.define('nir_intrinsic_btd_stack_push_intel', 71)
nir_intrinsic_bvh64_intersect_ray_amd = nir_intrinsic_op.define('nir_intrinsic_bvh64_intersect_ray_amd', 72)
nir_intrinsic_bvh8_intersect_ray_amd = nir_intrinsic_op.define('nir_intrinsic_bvh8_intersect_ray_amd', 73)
nir_intrinsic_bvh_stack_rtn_amd = nir_intrinsic_op.define('nir_intrinsic_bvh_stack_rtn_amd', 74)
nir_intrinsic_cmat_binary_op = nir_intrinsic_op.define('nir_intrinsic_cmat_binary_op', 75)
nir_intrinsic_cmat_bitcast = nir_intrinsic_op.define('nir_intrinsic_cmat_bitcast', 76)
nir_intrinsic_cmat_construct = nir_intrinsic_op.define('nir_intrinsic_cmat_construct', 77)
nir_intrinsic_cmat_convert = nir_intrinsic_op.define('nir_intrinsic_cmat_convert', 78)
nir_intrinsic_cmat_copy = nir_intrinsic_op.define('nir_intrinsic_cmat_copy', 79)
nir_intrinsic_cmat_extract = nir_intrinsic_op.define('nir_intrinsic_cmat_extract', 80)
nir_intrinsic_cmat_insert = nir_intrinsic_op.define('nir_intrinsic_cmat_insert', 81)
nir_intrinsic_cmat_length = nir_intrinsic_op.define('nir_intrinsic_cmat_length', 82)
nir_intrinsic_cmat_load = nir_intrinsic_op.define('nir_intrinsic_cmat_load', 83)
nir_intrinsic_cmat_muladd = nir_intrinsic_op.define('nir_intrinsic_cmat_muladd', 84)
nir_intrinsic_cmat_muladd_amd = nir_intrinsic_op.define('nir_intrinsic_cmat_muladd_amd', 85)
nir_intrinsic_cmat_muladd_nv = nir_intrinsic_op.define('nir_intrinsic_cmat_muladd_nv', 86)
nir_intrinsic_cmat_scalar_op = nir_intrinsic_op.define('nir_intrinsic_cmat_scalar_op', 87)
nir_intrinsic_cmat_store = nir_intrinsic_op.define('nir_intrinsic_cmat_store', 88)
nir_intrinsic_cmat_transpose = nir_intrinsic_op.define('nir_intrinsic_cmat_transpose', 89)
nir_intrinsic_cmat_unary_op = nir_intrinsic_op.define('nir_intrinsic_cmat_unary_op', 90)
nir_intrinsic_convert_alu_types = nir_intrinsic_op.define('nir_intrinsic_convert_alu_types', 91)
nir_intrinsic_convert_cmat_intel = nir_intrinsic_op.define('nir_intrinsic_convert_cmat_intel', 92)
nir_intrinsic_copy_deref = nir_intrinsic_op.define('nir_intrinsic_copy_deref', 93)
nir_intrinsic_copy_fs_outputs_nv = nir_intrinsic_op.define('nir_intrinsic_copy_fs_outputs_nv', 94)
nir_intrinsic_copy_global_to_uniform_ir3 = nir_intrinsic_op.define('nir_intrinsic_copy_global_to_uniform_ir3', 95)
nir_intrinsic_copy_push_const_to_uniform_ir3 = nir_intrinsic_op.define('nir_intrinsic_copy_push_const_to_uniform_ir3', 96)
nir_intrinsic_copy_ubo_to_uniform_ir3 = nir_intrinsic_op.define('nir_intrinsic_copy_ubo_to_uniform_ir3', 97)
nir_intrinsic_ddx = nir_intrinsic_op.define('nir_intrinsic_ddx', 98)
nir_intrinsic_ddx_coarse = nir_intrinsic_op.define('nir_intrinsic_ddx_coarse', 99)
nir_intrinsic_ddx_fine = nir_intrinsic_op.define('nir_intrinsic_ddx_fine', 100)
nir_intrinsic_ddy = nir_intrinsic_op.define('nir_intrinsic_ddy', 101)
nir_intrinsic_ddy_coarse = nir_intrinsic_op.define('nir_intrinsic_ddy_coarse', 102)
nir_intrinsic_ddy_fine = nir_intrinsic_op.define('nir_intrinsic_ddy_fine', 103)
nir_intrinsic_debug_break = nir_intrinsic_op.define('nir_intrinsic_debug_break', 104)
nir_intrinsic_decl_reg = nir_intrinsic_op.define('nir_intrinsic_decl_reg', 105)
nir_intrinsic_demote = nir_intrinsic_op.define('nir_intrinsic_demote', 106)
nir_intrinsic_demote_if = nir_intrinsic_op.define('nir_intrinsic_demote_if', 107)
nir_intrinsic_demote_samples = nir_intrinsic_op.define('nir_intrinsic_demote_samples', 108)
nir_intrinsic_deref_atomic = nir_intrinsic_op.define('nir_intrinsic_deref_atomic', 109)
nir_intrinsic_deref_atomic_swap = nir_intrinsic_op.define('nir_intrinsic_deref_atomic_swap', 110)
nir_intrinsic_deref_buffer_array_length = nir_intrinsic_op.define('nir_intrinsic_deref_buffer_array_length', 111)
nir_intrinsic_deref_implicit_array_length = nir_intrinsic_op.define('nir_intrinsic_deref_implicit_array_length', 112)
nir_intrinsic_deref_mode_is = nir_intrinsic_op.define('nir_intrinsic_deref_mode_is', 113)
nir_intrinsic_deref_texture_src = nir_intrinsic_op.define('nir_intrinsic_deref_texture_src', 114)
nir_intrinsic_doorbell_agx = nir_intrinsic_op.define('nir_intrinsic_doorbell_agx', 115)
nir_intrinsic_dpas_intel = nir_intrinsic_op.define('nir_intrinsic_dpas_intel', 116)
nir_intrinsic_dpp16_shift_amd = nir_intrinsic_op.define('nir_intrinsic_dpp16_shift_amd', 117)
nir_intrinsic_elect = nir_intrinsic_op.define('nir_intrinsic_elect', 118)
nir_intrinsic_elect_any_ir3 = nir_intrinsic_op.define('nir_intrinsic_elect_any_ir3', 119)
nir_intrinsic_emit_primitive_poly = nir_intrinsic_op.define('nir_intrinsic_emit_primitive_poly', 120)
nir_intrinsic_emit_vertex = nir_intrinsic_op.define('nir_intrinsic_emit_vertex', 121)
nir_intrinsic_emit_vertex_nv = nir_intrinsic_op.define('nir_intrinsic_emit_vertex_nv', 122)
nir_intrinsic_emit_vertex_with_counter = nir_intrinsic_op.define('nir_intrinsic_emit_vertex_with_counter', 123)
nir_intrinsic_end_invocation_interlock = nir_intrinsic_op.define('nir_intrinsic_end_invocation_interlock', 124)
nir_intrinsic_end_primitive = nir_intrinsic_op.define('nir_intrinsic_end_primitive', 125)
nir_intrinsic_end_primitive_nv = nir_intrinsic_op.define('nir_intrinsic_end_primitive_nv', 126)
nir_intrinsic_end_primitive_with_counter = nir_intrinsic_op.define('nir_intrinsic_end_primitive_with_counter', 127)
nir_intrinsic_enqueue_node_payloads = nir_intrinsic_op.define('nir_intrinsic_enqueue_node_payloads', 128)
nir_intrinsic_exclusive_scan = nir_intrinsic_op.define('nir_intrinsic_exclusive_scan', 129)
nir_intrinsic_exclusive_scan_clusters_ir3 = nir_intrinsic_op.define('nir_intrinsic_exclusive_scan_clusters_ir3', 130)
nir_intrinsic_execute_callable = nir_intrinsic_op.define('nir_intrinsic_execute_callable', 131)
nir_intrinsic_execute_closest_hit_amd = nir_intrinsic_op.define('nir_intrinsic_execute_closest_hit_amd', 132)
nir_intrinsic_execute_miss_amd = nir_intrinsic_op.define('nir_intrinsic_execute_miss_amd', 133)
nir_intrinsic_export_agx = nir_intrinsic_op.define('nir_intrinsic_export_agx', 134)
nir_intrinsic_export_amd = nir_intrinsic_op.define('nir_intrinsic_export_amd', 135)
nir_intrinsic_export_dual_src_blend_amd = nir_intrinsic_op.define('nir_intrinsic_export_dual_src_blend_amd', 136)
nir_intrinsic_export_row_amd = nir_intrinsic_op.define('nir_intrinsic_export_row_amd', 137)
nir_intrinsic_fence_helper_exit_agx = nir_intrinsic_op.define('nir_intrinsic_fence_helper_exit_agx', 138)
nir_intrinsic_fence_mem_to_tex_agx = nir_intrinsic_op.define('nir_intrinsic_fence_mem_to_tex_agx', 139)
nir_intrinsic_fence_pbe_to_tex_agx = nir_intrinsic_op.define('nir_intrinsic_fence_pbe_to_tex_agx', 140)
nir_intrinsic_fence_pbe_to_tex_pixel_agx = nir_intrinsic_op.define('nir_intrinsic_fence_pbe_to_tex_pixel_agx', 141)
nir_intrinsic_final_primitive_nv = nir_intrinsic_op.define('nir_intrinsic_final_primitive_nv', 142)
nir_intrinsic_finalize_incoming_node_payload = nir_intrinsic_op.define('nir_intrinsic_finalize_incoming_node_payload', 143)
nir_intrinsic_first_invocation = nir_intrinsic_op.define('nir_intrinsic_first_invocation', 144)
nir_intrinsic_fs_out_nv = nir_intrinsic_op.define('nir_intrinsic_fs_out_nv', 145)
nir_intrinsic_gds_atomic_add_amd = nir_intrinsic_op.define('nir_intrinsic_gds_atomic_add_amd', 146)
nir_intrinsic_get_ssbo_size = nir_intrinsic_op.define('nir_intrinsic_get_ssbo_size', 147)
nir_intrinsic_get_ubo_size = nir_intrinsic_op.define('nir_intrinsic_get_ubo_size', 148)
nir_intrinsic_global_atomic = nir_intrinsic_op.define('nir_intrinsic_global_atomic', 149)
nir_intrinsic_global_atomic_2x32 = nir_intrinsic_op.define('nir_intrinsic_global_atomic_2x32', 150)
nir_intrinsic_global_atomic_agx = nir_intrinsic_op.define('nir_intrinsic_global_atomic_agx', 151)
nir_intrinsic_global_atomic_amd = nir_intrinsic_op.define('nir_intrinsic_global_atomic_amd', 152)
nir_intrinsic_global_atomic_swap = nir_intrinsic_op.define('nir_intrinsic_global_atomic_swap', 153)
nir_intrinsic_global_atomic_swap_2x32 = nir_intrinsic_op.define('nir_intrinsic_global_atomic_swap_2x32', 154)
nir_intrinsic_global_atomic_swap_agx = nir_intrinsic_op.define('nir_intrinsic_global_atomic_swap_agx', 155)
nir_intrinsic_global_atomic_swap_amd = nir_intrinsic_op.define('nir_intrinsic_global_atomic_swap_amd', 156)
nir_intrinsic_ignore_ray_intersection = nir_intrinsic_op.define('nir_intrinsic_ignore_ray_intersection', 157)
nir_intrinsic_imadsp_nv = nir_intrinsic_op.define('nir_intrinsic_imadsp_nv', 158)
nir_intrinsic_image_atomic = nir_intrinsic_op.define('nir_intrinsic_image_atomic', 159)
nir_intrinsic_image_atomic_swap = nir_intrinsic_op.define('nir_intrinsic_image_atomic_swap', 160)
nir_intrinsic_image_deref_atomic = nir_intrinsic_op.define('nir_intrinsic_image_deref_atomic', 161)
nir_intrinsic_image_deref_atomic_swap = nir_intrinsic_op.define('nir_intrinsic_image_deref_atomic_swap', 162)
nir_intrinsic_image_deref_descriptor_amd = nir_intrinsic_op.define('nir_intrinsic_image_deref_descriptor_amd', 163)
nir_intrinsic_image_deref_format = nir_intrinsic_op.define('nir_intrinsic_image_deref_format', 164)
nir_intrinsic_image_deref_fragment_mask_load_amd = nir_intrinsic_op.define('nir_intrinsic_image_deref_fragment_mask_load_amd', 165)
nir_intrinsic_image_deref_levels = nir_intrinsic_op.define('nir_intrinsic_image_deref_levels', 166)
nir_intrinsic_image_deref_load = nir_intrinsic_op.define('nir_intrinsic_image_deref_load', 167)
nir_intrinsic_image_deref_load_info_nv = nir_intrinsic_op.define('nir_intrinsic_image_deref_load_info_nv', 168)
nir_intrinsic_image_deref_load_param_intel = nir_intrinsic_op.define('nir_intrinsic_image_deref_load_param_intel', 169)
nir_intrinsic_image_deref_load_raw_intel = nir_intrinsic_op.define('nir_intrinsic_image_deref_load_raw_intel', 170)
nir_intrinsic_image_deref_order = nir_intrinsic_op.define('nir_intrinsic_image_deref_order', 171)
nir_intrinsic_image_deref_samples = nir_intrinsic_op.define('nir_intrinsic_image_deref_samples', 172)
nir_intrinsic_image_deref_samples_identical = nir_intrinsic_op.define('nir_intrinsic_image_deref_samples_identical', 173)
nir_intrinsic_image_deref_size = nir_intrinsic_op.define('nir_intrinsic_image_deref_size', 174)
nir_intrinsic_image_deref_sparse_load = nir_intrinsic_op.define('nir_intrinsic_image_deref_sparse_load', 175)
nir_intrinsic_image_deref_store = nir_intrinsic_op.define('nir_intrinsic_image_deref_store', 176)
nir_intrinsic_image_deref_store_block_agx = nir_intrinsic_op.define('nir_intrinsic_image_deref_store_block_agx', 177)
nir_intrinsic_image_deref_store_raw_intel = nir_intrinsic_op.define('nir_intrinsic_image_deref_store_raw_intel', 178)
nir_intrinsic_image_deref_texel_address = nir_intrinsic_op.define('nir_intrinsic_image_deref_texel_address', 179)
nir_intrinsic_image_descriptor_amd = nir_intrinsic_op.define('nir_intrinsic_image_descriptor_amd', 180)
nir_intrinsic_image_format = nir_intrinsic_op.define('nir_intrinsic_image_format', 181)
nir_intrinsic_image_fragment_mask_load_amd = nir_intrinsic_op.define('nir_intrinsic_image_fragment_mask_load_amd', 182)
nir_intrinsic_image_levels = nir_intrinsic_op.define('nir_intrinsic_image_levels', 183)
nir_intrinsic_image_load = nir_intrinsic_op.define('nir_intrinsic_image_load', 184)
nir_intrinsic_image_load_raw_intel = nir_intrinsic_op.define('nir_intrinsic_image_load_raw_intel', 185)
nir_intrinsic_image_order = nir_intrinsic_op.define('nir_intrinsic_image_order', 186)
nir_intrinsic_image_samples = nir_intrinsic_op.define('nir_intrinsic_image_samples', 187)
nir_intrinsic_image_samples_identical = nir_intrinsic_op.define('nir_intrinsic_image_samples_identical', 188)
nir_intrinsic_image_size = nir_intrinsic_op.define('nir_intrinsic_image_size', 189)
nir_intrinsic_image_sparse_load = nir_intrinsic_op.define('nir_intrinsic_image_sparse_load', 190)
nir_intrinsic_image_store = nir_intrinsic_op.define('nir_intrinsic_image_store', 191)
nir_intrinsic_image_store_block_agx = nir_intrinsic_op.define('nir_intrinsic_image_store_block_agx', 192)
nir_intrinsic_image_store_raw_intel = nir_intrinsic_op.define('nir_intrinsic_image_store_raw_intel', 193)
nir_intrinsic_image_texel_address = nir_intrinsic_op.define('nir_intrinsic_image_texel_address', 194)
nir_intrinsic_inclusive_scan = nir_intrinsic_op.define('nir_intrinsic_inclusive_scan', 195)
nir_intrinsic_inclusive_scan_clusters_ir3 = nir_intrinsic_op.define('nir_intrinsic_inclusive_scan_clusters_ir3', 196)
nir_intrinsic_initialize_node_payloads = nir_intrinsic_op.define('nir_intrinsic_initialize_node_payloads', 197)
nir_intrinsic_interp_deref_at_centroid = nir_intrinsic_op.define('nir_intrinsic_interp_deref_at_centroid', 198)
nir_intrinsic_interp_deref_at_offset = nir_intrinsic_op.define('nir_intrinsic_interp_deref_at_offset', 199)
nir_intrinsic_interp_deref_at_sample = nir_intrinsic_op.define('nir_intrinsic_interp_deref_at_sample', 200)
nir_intrinsic_interp_deref_at_vertex = nir_intrinsic_op.define('nir_intrinsic_interp_deref_at_vertex', 201)
nir_intrinsic_inverse_ballot = nir_intrinsic_op.define('nir_intrinsic_inverse_ballot', 202)
nir_intrinsic_ipa_nv = nir_intrinsic_op.define('nir_intrinsic_ipa_nv', 203)
nir_intrinsic_is_helper_invocation = nir_intrinsic_op.define('nir_intrinsic_is_helper_invocation', 204)
nir_intrinsic_is_sparse_resident_zink = nir_intrinsic_op.define('nir_intrinsic_is_sparse_resident_zink', 205)
nir_intrinsic_is_sparse_texels_resident = nir_intrinsic_op.define('nir_intrinsic_is_sparse_texels_resident', 206)
nir_intrinsic_is_subgroup_invocation_lt_amd = nir_intrinsic_op.define('nir_intrinsic_is_subgroup_invocation_lt_amd', 207)
nir_intrinsic_isberd_nv = nir_intrinsic_op.define('nir_intrinsic_isberd_nv', 208)
nir_intrinsic_lane_permute_16_amd = nir_intrinsic_op.define('nir_intrinsic_lane_permute_16_amd', 209)
nir_intrinsic_last_invocation = nir_intrinsic_op.define('nir_intrinsic_last_invocation', 210)
nir_intrinsic_launch_mesh_workgroups = nir_intrinsic_op.define('nir_intrinsic_launch_mesh_workgroups', 211)
nir_intrinsic_launch_mesh_workgroups_with_payload_deref = nir_intrinsic_op.define('nir_intrinsic_launch_mesh_workgroups_with_payload_deref', 212)
nir_intrinsic_ldc_nv = nir_intrinsic_op.define('nir_intrinsic_ldc_nv', 213)
nir_intrinsic_ldcx_nv = nir_intrinsic_op.define('nir_intrinsic_ldcx_nv', 214)
nir_intrinsic_ldtram_nv = nir_intrinsic_op.define('nir_intrinsic_ldtram_nv', 215)
nir_intrinsic_load_aa_line_width = nir_intrinsic_op.define('nir_intrinsic_load_aa_line_width', 216)
nir_intrinsic_load_accel_struct_amd = nir_intrinsic_op.define('nir_intrinsic_load_accel_struct_amd', 217)
nir_intrinsic_load_active_samples_agx = nir_intrinsic_op.define('nir_intrinsic_load_active_samples_agx', 218)
nir_intrinsic_load_active_subgroup_count_agx = nir_intrinsic_op.define('nir_intrinsic_load_active_subgroup_count_agx', 219)
nir_intrinsic_load_active_subgroup_invocation_agx = nir_intrinsic_op.define('nir_intrinsic_load_active_subgroup_invocation_agx', 220)
nir_intrinsic_load_agx = nir_intrinsic_op.define('nir_intrinsic_load_agx', 221)
nir_intrinsic_load_alpha_reference_amd = nir_intrinsic_op.define('nir_intrinsic_load_alpha_reference_amd', 222)
nir_intrinsic_load_api_sample_mask_agx = nir_intrinsic_op.define('nir_intrinsic_load_api_sample_mask_agx', 223)
nir_intrinsic_load_attrib_clamp_agx = nir_intrinsic_op.define('nir_intrinsic_load_attrib_clamp_agx', 224)
nir_intrinsic_load_attribute_pan = nir_intrinsic_op.define('nir_intrinsic_load_attribute_pan', 225)
nir_intrinsic_load_back_face_agx = nir_intrinsic_op.define('nir_intrinsic_load_back_face_agx', 226)
nir_intrinsic_load_barycentric_at_offset = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_at_offset', 227)
nir_intrinsic_load_barycentric_at_offset_nv = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_at_offset_nv', 228)
nir_intrinsic_load_barycentric_at_sample = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_at_sample', 229)
nir_intrinsic_load_barycentric_centroid = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_centroid', 230)
nir_intrinsic_load_barycentric_coord_at_offset = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_coord_at_offset', 231)
nir_intrinsic_load_barycentric_coord_at_sample = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_coord_at_sample', 232)
nir_intrinsic_load_barycentric_coord_centroid = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_coord_centroid', 233)
nir_intrinsic_load_barycentric_coord_pixel = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_coord_pixel', 234)
nir_intrinsic_load_barycentric_coord_sample = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_coord_sample', 235)
nir_intrinsic_load_barycentric_model = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_model', 236)
nir_intrinsic_load_barycentric_optimize_amd = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_optimize_amd', 237)
nir_intrinsic_load_barycentric_pixel = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_pixel', 238)
nir_intrinsic_load_barycentric_sample = nir_intrinsic_op.define('nir_intrinsic_load_barycentric_sample', 239)
nir_intrinsic_load_base_global_invocation_id = nir_intrinsic_op.define('nir_intrinsic_load_base_global_invocation_id', 240)
nir_intrinsic_load_base_instance = nir_intrinsic_op.define('nir_intrinsic_load_base_instance', 241)
nir_intrinsic_load_base_vertex = nir_intrinsic_op.define('nir_intrinsic_load_base_vertex', 242)
nir_intrinsic_load_base_workgroup_id = nir_intrinsic_op.define('nir_intrinsic_load_base_workgroup_id', 243)
nir_intrinsic_load_blend_const_color_a_float = nir_intrinsic_op.define('nir_intrinsic_load_blend_const_color_a_float', 244)
nir_intrinsic_load_blend_const_color_aaaa8888_unorm = nir_intrinsic_op.define('nir_intrinsic_load_blend_const_color_aaaa8888_unorm', 245)
nir_intrinsic_load_blend_const_color_b_float = nir_intrinsic_op.define('nir_intrinsic_load_blend_const_color_b_float', 246)
nir_intrinsic_load_blend_const_color_g_float = nir_intrinsic_op.define('nir_intrinsic_load_blend_const_color_g_float', 247)
nir_intrinsic_load_blend_const_color_r_float = nir_intrinsic_op.define('nir_intrinsic_load_blend_const_color_r_float', 248)
nir_intrinsic_load_blend_const_color_rgba = nir_intrinsic_op.define('nir_intrinsic_load_blend_const_color_rgba', 249)
nir_intrinsic_load_blend_const_color_rgba8888_unorm = nir_intrinsic_op.define('nir_intrinsic_load_blend_const_color_rgba8888_unorm', 250)
nir_intrinsic_load_btd_global_arg_addr_intel = nir_intrinsic_op.define('nir_intrinsic_load_btd_global_arg_addr_intel', 251)
nir_intrinsic_load_btd_local_arg_addr_intel = nir_intrinsic_op.define('nir_intrinsic_load_btd_local_arg_addr_intel', 252)
nir_intrinsic_load_btd_resume_sbt_addr_intel = nir_intrinsic_op.define('nir_intrinsic_load_btd_resume_sbt_addr_intel', 253)
nir_intrinsic_load_btd_shader_type_intel = nir_intrinsic_op.define('nir_intrinsic_load_btd_shader_type_intel', 254)
nir_intrinsic_load_btd_stack_id_intel = nir_intrinsic_op.define('nir_intrinsic_load_btd_stack_id_intel', 255)
nir_intrinsic_load_buffer_amd = nir_intrinsic_op.define('nir_intrinsic_load_buffer_amd', 256)
nir_intrinsic_load_callable_sbt_addr_intel = nir_intrinsic_op.define('nir_intrinsic_load_callable_sbt_addr_intel', 257)
nir_intrinsic_load_callable_sbt_stride_intel = nir_intrinsic_op.define('nir_intrinsic_load_callable_sbt_stride_intel', 258)
nir_intrinsic_load_clamp_vertex_color_amd = nir_intrinsic_op.define('nir_intrinsic_load_clamp_vertex_color_amd', 259)
nir_intrinsic_load_clip_half_line_width_amd = nir_intrinsic_op.define('nir_intrinsic_load_clip_half_line_width_amd', 260)
nir_intrinsic_load_clip_z_coeff_agx = nir_intrinsic_op.define('nir_intrinsic_load_clip_z_coeff_agx', 261)
nir_intrinsic_load_coalesced_input_count = nir_intrinsic_op.define('nir_intrinsic_load_coalesced_input_count', 262)
nir_intrinsic_load_coefficients_agx = nir_intrinsic_op.define('nir_intrinsic_load_coefficients_agx', 263)
nir_intrinsic_load_color0 = nir_intrinsic_op.define('nir_intrinsic_load_color0', 264)
nir_intrinsic_load_color1 = nir_intrinsic_op.define('nir_intrinsic_load_color1', 265)
nir_intrinsic_load_const_buf_base_addr_lvp = nir_intrinsic_op.define('nir_intrinsic_load_const_buf_base_addr_lvp', 266)
nir_intrinsic_load_const_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_const_ir3', 267)
nir_intrinsic_load_constant = nir_intrinsic_op.define('nir_intrinsic_load_constant', 268)
nir_intrinsic_load_constant_agx = nir_intrinsic_op.define('nir_intrinsic_load_constant_agx', 269)
nir_intrinsic_load_constant_base_ptr = nir_intrinsic_op.define('nir_intrinsic_load_constant_base_ptr', 270)
nir_intrinsic_load_converted_output_pan = nir_intrinsic_op.define('nir_intrinsic_load_converted_output_pan', 271)
nir_intrinsic_load_core_id_agx = nir_intrinsic_op.define('nir_intrinsic_load_core_id_agx', 272)
nir_intrinsic_load_cull_any_enabled_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_any_enabled_amd', 273)
nir_intrinsic_load_cull_back_face_enabled_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_back_face_enabled_amd', 274)
nir_intrinsic_load_cull_ccw_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_ccw_amd', 275)
nir_intrinsic_load_cull_front_face_enabled_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_front_face_enabled_amd', 276)
nir_intrinsic_load_cull_line_viewport_xy_scale_and_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_line_viewport_xy_scale_and_offset_amd', 277)
nir_intrinsic_load_cull_mask = nir_intrinsic_op.define('nir_intrinsic_load_cull_mask', 278)
nir_intrinsic_load_cull_mask_and_flags_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_mask_and_flags_amd', 279)
nir_intrinsic_load_cull_small_line_precision_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_small_line_precision_amd', 280)
nir_intrinsic_load_cull_small_lines_enabled_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_small_lines_enabled_amd', 281)
nir_intrinsic_load_cull_small_triangle_precision_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_small_triangle_precision_amd', 282)
nir_intrinsic_load_cull_small_triangles_enabled_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_small_triangles_enabled_amd', 283)
nir_intrinsic_load_cull_triangle_viewport_xy_scale_and_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_cull_triangle_viewport_xy_scale_and_offset_amd', 284)
nir_intrinsic_load_debug_log_desc_amd = nir_intrinsic_op.define('nir_intrinsic_load_debug_log_desc_amd', 285)
nir_intrinsic_load_depth_never_agx = nir_intrinsic_op.define('nir_intrinsic_load_depth_never_agx', 286)
nir_intrinsic_load_deref = nir_intrinsic_op.define('nir_intrinsic_load_deref', 287)
nir_intrinsic_load_deref_block_intel = nir_intrinsic_op.define('nir_intrinsic_load_deref_block_intel', 288)
nir_intrinsic_load_draw_id = nir_intrinsic_op.define('nir_intrinsic_load_draw_id', 289)
nir_intrinsic_load_esgs_vertex_stride_amd = nir_intrinsic_op.define('nir_intrinsic_load_esgs_vertex_stride_amd', 290)
nir_intrinsic_load_exported_agx = nir_intrinsic_op.define('nir_intrinsic_load_exported_agx', 291)
nir_intrinsic_load_fb_layers_v3d = nir_intrinsic_op.define('nir_intrinsic_load_fb_layers_v3d', 292)
nir_intrinsic_load_fbfetch_image_desc_amd = nir_intrinsic_op.define('nir_intrinsic_load_fbfetch_image_desc_amd', 293)
nir_intrinsic_load_fbfetch_image_fmask_desc_amd = nir_intrinsic_op.define('nir_intrinsic_load_fbfetch_image_fmask_desc_amd', 294)
nir_intrinsic_load_fep_w_v3d = nir_intrinsic_op.define('nir_intrinsic_load_fep_w_v3d', 295)
nir_intrinsic_load_first_vertex = nir_intrinsic_op.define('nir_intrinsic_load_first_vertex', 296)
nir_intrinsic_load_fixed_point_size_agx = nir_intrinsic_op.define('nir_intrinsic_load_fixed_point_size_agx', 297)
nir_intrinsic_load_flat_mask = nir_intrinsic_op.define('nir_intrinsic_load_flat_mask', 298)
nir_intrinsic_load_force_vrs_rates_amd = nir_intrinsic_op.define('nir_intrinsic_load_force_vrs_rates_amd', 299)
nir_intrinsic_load_frag_coord = nir_intrinsic_op.define('nir_intrinsic_load_frag_coord', 300)
nir_intrinsic_load_frag_coord_unscaled_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_frag_coord_unscaled_ir3', 301)
nir_intrinsic_load_frag_coord_w = nir_intrinsic_op.define('nir_intrinsic_load_frag_coord_w', 302)
nir_intrinsic_load_frag_coord_z = nir_intrinsic_op.define('nir_intrinsic_load_frag_coord_z', 303)
nir_intrinsic_load_frag_coord_zw_pan = nir_intrinsic_op.define('nir_intrinsic_load_frag_coord_zw_pan', 304)
nir_intrinsic_load_frag_invocation_count = nir_intrinsic_op.define('nir_intrinsic_load_frag_invocation_count', 305)
nir_intrinsic_load_frag_offset_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_frag_offset_ir3', 306)
nir_intrinsic_load_frag_shading_rate = nir_intrinsic_op.define('nir_intrinsic_load_frag_shading_rate', 307)
nir_intrinsic_load_frag_size = nir_intrinsic_op.define('nir_intrinsic_load_frag_size', 308)
nir_intrinsic_load_frag_size_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_frag_size_ir3', 309)
nir_intrinsic_load_from_texture_handle_agx = nir_intrinsic_op.define('nir_intrinsic_load_from_texture_handle_agx', 310)
nir_intrinsic_load_front_face = nir_intrinsic_op.define('nir_intrinsic_load_front_face', 311)
nir_intrinsic_load_front_face_fsign = nir_intrinsic_op.define('nir_intrinsic_load_front_face_fsign', 312)
nir_intrinsic_load_fs_input_interp_deltas = nir_intrinsic_op.define('nir_intrinsic_load_fs_input_interp_deltas', 313)
nir_intrinsic_load_fs_msaa_intel = nir_intrinsic_op.define('nir_intrinsic_load_fs_msaa_intel', 314)
nir_intrinsic_load_fully_covered = nir_intrinsic_op.define('nir_intrinsic_load_fully_covered', 315)
nir_intrinsic_load_geometry_param_buffer_poly = nir_intrinsic_op.define('nir_intrinsic_load_geometry_param_buffer_poly', 316)
nir_intrinsic_load_global = nir_intrinsic_op.define('nir_intrinsic_load_global', 317)
nir_intrinsic_load_global_2x32 = nir_intrinsic_op.define('nir_intrinsic_load_global_2x32', 318)
nir_intrinsic_load_global_amd = nir_intrinsic_op.define('nir_intrinsic_load_global_amd', 319)
nir_intrinsic_load_global_base_ptr = nir_intrinsic_op.define('nir_intrinsic_load_global_base_ptr', 320)
nir_intrinsic_load_global_block_intel = nir_intrinsic_op.define('nir_intrinsic_load_global_block_intel', 321)
nir_intrinsic_load_global_bounded = nir_intrinsic_op.define('nir_intrinsic_load_global_bounded', 322)
nir_intrinsic_load_global_constant = nir_intrinsic_op.define('nir_intrinsic_load_global_constant', 323)
nir_intrinsic_load_global_constant_bounded = nir_intrinsic_op.define('nir_intrinsic_load_global_constant_bounded', 324)
nir_intrinsic_load_global_constant_offset = nir_intrinsic_op.define('nir_intrinsic_load_global_constant_offset', 325)
nir_intrinsic_load_global_constant_uniform_block_intel = nir_intrinsic_op.define('nir_intrinsic_load_global_constant_uniform_block_intel', 326)
nir_intrinsic_load_global_etna = nir_intrinsic_op.define('nir_intrinsic_load_global_etna', 327)
nir_intrinsic_load_global_invocation_id = nir_intrinsic_op.define('nir_intrinsic_load_global_invocation_id', 328)
nir_intrinsic_load_global_invocation_index = nir_intrinsic_op.define('nir_intrinsic_load_global_invocation_index', 329)
nir_intrinsic_load_global_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_global_ir3', 330)
nir_intrinsic_load_global_size = nir_intrinsic_op.define('nir_intrinsic_load_global_size', 331)
nir_intrinsic_load_gs_header_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_gs_header_ir3', 332)
nir_intrinsic_load_gs_vertex_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_gs_vertex_offset_amd', 333)
nir_intrinsic_load_gs_wave_id_amd = nir_intrinsic_op.define('nir_intrinsic_load_gs_wave_id_amd', 334)
nir_intrinsic_load_helper_arg_hi_agx = nir_intrinsic_op.define('nir_intrinsic_load_helper_arg_hi_agx', 335)
nir_intrinsic_load_helper_arg_lo_agx = nir_intrinsic_op.define('nir_intrinsic_load_helper_arg_lo_agx', 336)
nir_intrinsic_load_helper_invocation = nir_intrinsic_op.define('nir_intrinsic_load_helper_invocation', 337)
nir_intrinsic_load_helper_op_id_agx = nir_intrinsic_op.define('nir_intrinsic_load_helper_op_id_agx', 338)
nir_intrinsic_load_hit_attrib_amd = nir_intrinsic_op.define('nir_intrinsic_load_hit_attrib_amd', 339)
nir_intrinsic_load_hs_out_patch_data_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_hs_out_patch_data_offset_amd', 340)
nir_intrinsic_load_hs_patch_stride_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_hs_patch_stride_ir3', 341)
nir_intrinsic_load_initial_edgeflags_amd = nir_intrinsic_op.define('nir_intrinsic_load_initial_edgeflags_amd', 342)
nir_intrinsic_load_inline_data_intel = nir_intrinsic_op.define('nir_intrinsic_load_inline_data_intel', 343)
nir_intrinsic_load_input = nir_intrinsic_op.define('nir_intrinsic_load_input', 344)
nir_intrinsic_load_input_assembly_buffer_poly = nir_intrinsic_op.define('nir_intrinsic_load_input_assembly_buffer_poly', 345)
nir_intrinsic_load_input_attachment_conv_pan = nir_intrinsic_op.define('nir_intrinsic_load_input_attachment_conv_pan', 346)
nir_intrinsic_load_input_attachment_coord = nir_intrinsic_op.define('nir_intrinsic_load_input_attachment_coord', 347)
nir_intrinsic_load_input_attachment_target_pan = nir_intrinsic_op.define('nir_intrinsic_load_input_attachment_target_pan', 348)
nir_intrinsic_load_input_topology_poly = nir_intrinsic_op.define('nir_intrinsic_load_input_topology_poly', 349)
nir_intrinsic_load_input_vertex = nir_intrinsic_op.define('nir_intrinsic_load_input_vertex', 350)
nir_intrinsic_load_instance_id = nir_intrinsic_op.define('nir_intrinsic_load_instance_id', 351)
nir_intrinsic_load_interpolated_input = nir_intrinsic_op.define('nir_intrinsic_load_interpolated_input', 352)
nir_intrinsic_load_intersection_opaque_amd = nir_intrinsic_op.define('nir_intrinsic_load_intersection_opaque_amd', 353)
nir_intrinsic_load_invocation_id = nir_intrinsic_op.define('nir_intrinsic_load_invocation_id', 354)
nir_intrinsic_load_is_first_fan_agx = nir_intrinsic_op.define('nir_intrinsic_load_is_first_fan_agx', 355)
nir_intrinsic_load_is_indexed_draw = nir_intrinsic_op.define('nir_intrinsic_load_is_indexed_draw', 356)
nir_intrinsic_load_kernel_input = nir_intrinsic_op.define('nir_intrinsic_load_kernel_input', 357)
nir_intrinsic_load_layer_id = nir_intrinsic_op.define('nir_intrinsic_load_layer_id', 358)
nir_intrinsic_load_lds_ngg_gs_out_vertex_base_amd = nir_intrinsic_op.define('nir_intrinsic_load_lds_ngg_gs_out_vertex_base_amd', 359)
nir_intrinsic_load_leaf_opaque_intel = nir_intrinsic_op.define('nir_intrinsic_load_leaf_opaque_intel', 360)
nir_intrinsic_load_leaf_procedural_intel = nir_intrinsic_op.define('nir_intrinsic_load_leaf_procedural_intel', 361)
nir_intrinsic_load_line_coord = nir_intrinsic_op.define('nir_intrinsic_load_line_coord', 362)
nir_intrinsic_load_line_width = nir_intrinsic_op.define('nir_intrinsic_load_line_width', 363)
nir_intrinsic_load_local_invocation_id = nir_intrinsic_op.define('nir_intrinsic_load_local_invocation_id', 364)
nir_intrinsic_load_local_invocation_index = nir_intrinsic_op.define('nir_intrinsic_load_local_invocation_index', 365)
nir_intrinsic_load_local_pixel_agx = nir_intrinsic_op.define('nir_intrinsic_load_local_pixel_agx', 366)
nir_intrinsic_load_local_shared_r600 = nir_intrinsic_op.define('nir_intrinsic_load_local_shared_r600', 367)
nir_intrinsic_load_lshs_vertex_stride_amd = nir_intrinsic_op.define('nir_intrinsic_load_lshs_vertex_stride_amd', 368)
nir_intrinsic_load_max_polygon_intel = nir_intrinsic_op.define('nir_intrinsic_load_max_polygon_intel', 369)
nir_intrinsic_load_merged_wave_info_amd = nir_intrinsic_op.define('nir_intrinsic_load_merged_wave_info_amd', 370)
nir_intrinsic_load_mesh_view_count = nir_intrinsic_op.define('nir_intrinsic_load_mesh_view_count', 371)
nir_intrinsic_load_mesh_view_indices = nir_intrinsic_op.define('nir_intrinsic_load_mesh_view_indices', 372)
nir_intrinsic_load_multisampled_pan = nir_intrinsic_op.define('nir_intrinsic_load_multisampled_pan', 373)
nir_intrinsic_load_noperspective_varyings_pan = nir_intrinsic_op.define('nir_intrinsic_load_noperspective_varyings_pan', 374)
nir_intrinsic_load_num_subgroups = nir_intrinsic_op.define('nir_intrinsic_load_num_subgroups', 375)
nir_intrinsic_load_num_vertices = nir_intrinsic_op.define('nir_intrinsic_load_num_vertices', 376)
nir_intrinsic_load_num_vertices_per_primitive_amd = nir_intrinsic_op.define('nir_intrinsic_load_num_vertices_per_primitive_amd', 377)
nir_intrinsic_load_num_workgroups = nir_intrinsic_op.define('nir_intrinsic_load_num_workgroups', 378)
nir_intrinsic_load_ordered_id_amd = nir_intrinsic_op.define('nir_intrinsic_load_ordered_id_amd', 379)
nir_intrinsic_load_output = nir_intrinsic_op.define('nir_intrinsic_load_output', 380)
nir_intrinsic_load_packed_passthrough_primitive_amd = nir_intrinsic_op.define('nir_intrinsic_load_packed_passthrough_primitive_amd', 381)
nir_intrinsic_load_param = nir_intrinsic_op.define('nir_intrinsic_load_param', 382)
nir_intrinsic_load_patch_vertices_in = nir_intrinsic_op.define('nir_intrinsic_load_patch_vertices_in', 383)
nir_intrinsic_load_per_primitive_input = nir_intrinsic_op.define('nir_intrinsic_load_per_primitive_input', 384)
nir_intrinsic_load_per_primitive_output = nir_intrinsic_op.define('nir_intrinsic_load_per_primitive_output', 385)
nir_intrinsic_load_per_primitive_remap_intel = nir_intrinsic_op.define('nir_intrinsic_load_per_primitive_remap_intel', 386)
nir_intrinsic_load_per_vertex_input = nir_intrinsic_op.define('nir_intrinsic_load_per_vertex_input', 387)
nir_intrinsic_load_per_vertex_output = nir_intrinsic_op.define('nir_intrinsic_load_per_vertex_output', 388)
nir_intrinsic_load_per_view_output = nir_intrinsic_op.define('nir_intrinsic_load_per_view_output', 389)
nir_intrinsic_load_persp_center_rhw_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_persp_center_rhw_ir3', 390)
nir_intrinsic_load_pipeline_stat_query_enabled_amd = nir_intrinsic_op.define('nir_intrinsic_load_pipeline_stat_query_enabled_amd', 391)
nir_intrinsic_load_pixel_coord = nir_intrinsic_op.define('nir_intrinsic_load_pixel_coord', 392)
nir_intrinsic_load_point_coord = nir_intrinsic_op.define('nir_intrinsic_load_point_coord', 393)
nir_intrinsic_load_point_coord_maybe_flipped = nir_intrinsic_op.define('nir_intrinsic_load_point_coord_maybe_flipped', 394)
nir_intrinsic_load_poly_line_smooth_enabled = nir_intrinsic_op.define('nir_intrinsic_load_poly_line_smooth_enabled', 395)
nir_intrinsic_load_polygon_stipple_agx = nir_intrinsic_op.define('nir_intrinsic_load_polygon_stipple_agx', 396)
nir_intrinsic_load_polygon_stipple_buffer_amd = nir_intrinsic_op.define('nir_intrinsic_load_polygon_stipple_buffer_amd', 397)
nir_intrinsic_load_preamble = nir_intrinsic_op.define('nir_intrinsic_load_preamble', 398)
nir_intrinsic_load_prim_gen_query_enabled_amd = nir_intrinsic_op.define('nir_intrinsic_load_prim_gen_query_enabled_amd', 399)
nir_intrinsic_load_prim_xfb_query_enabled_amd = nir_intrinsic_op.define('nir_intrinsic_load_prim_xfb_query_enabled_amd', 400)
nir_intrinsic_load_primitive_id = nir_intrinsic_op.define('nir_intrinsic_load_primitive_id', 401)
nir_intrinsic_load_primitive_location_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_primitive_location_ir3', 402)
nir_intrinsic_load_printf_buffer_address = nir_intrinsic_op.define('nir_intrinsic_load_printf_buffer_address', 403)
nir_intrinsic_load_printf_buffer_size = nir_intrinsic_op.define('nir_intrinsic_load_printf_buffer_size', 404)
nir_intrinsic_load_provoking_last = nir_intrinsic_op.define('nir_intrinsic_load_provoking_last', 405)
nir_intrinsic_load_provoking_vtx_amd = nir_intrinsic_op.define('nir_intrinsic_load_provoking_vtx_amd', 406)
nir_intrinsic_load_provoking_vtx_in_prim_amd = nir_intrinsic_op.define('nir_intrinsic_load_provoking_vtx_in_prim_amd', 407)
nir_intrinsic_load_push_constant = nir_intrinsic_op.define('nir_intrinsic_load_push_constant', 408)
nir_intrinsic_load_push_constant_zink = nir_intrinsic_op.define('nir_intrinsic_load_push_constant_zink', 409)
nir_intrinsic_load_r600_indirect_per_vertex_input = nir_intrinsic_op.define('nir_intrinsic_load_r600_indirect_per_vertex_input', 410)
nir_intrinsic_load_rasterization_primitive_amd = nir_intrinsic_op.define('nir_intrinsic_load_rasterization_primitive_amd', 411)
nir_intrinsic_load_rasterization_samples_amd = nir_intrinsic_op.define('nir_intrinsic_load_rasterization_samples_amd', 412)
nir_intrinsic_load_rasterization_stream = nir_intrinsic_op.define('nir_intrinsic_load_rasterization_stream', 413)
nir_intrinsic_load_raw_output_pan = nir_intrinsic_op.define('nir_intrinsic_load_raw_output_pan', 414)
nir_intrinsic_load_raw_vertex_id_pan = nir_intrinsic_op.define('nir_intrinsic_load_raw_vertex_id_pan', 415)
nir_intrinsic_load_raw_vertex_offset_pan = nir_intrinsic_op.define('nir_intrinsic_load_raw_vertex_offset_pan', 416)
nir_intrinsic_load_ray_base_mem_addr_intel = nir_intrinsic_op.define('nir_intrinsic_load_ray_base_mem_addr_intel', 417)
nir_intrinsic_load_ray_flags = nir_intrinsic_op.define('nir_intrinsic_load_ray_flags', 418)
nir_intrinsic_load_ray_geometry_index = nir_intrinsic_op.define('nir_intrinsic_load_ray_geometry_index', 419)
nir_intrinsic_load_ray_hit_kind = nir_intrinsic_op.define('nir_intrinsic_load_ray_hit_kind', 420)
nir_intrinsic_load_ray_hit_sbt_addr_intel = nir_intrinsic_op.define('nir_intrinsic_load_ray_hit_sbt_addr_intel', 421)
nir_intrinsic_load_ray_hit_sbt_stride_intel = nir_intrinsic_op.define('nir_intrinsic_load_ray_hit_sbt_stride_intel', 422)
nir_intrinsic_load_ray_hw_stack_size_intel = nir_intrinsic_op.define('nir_intrinsic_load_ray_hw_stack_size_intel', 423)
nir_intrinsic_load_ray_instance_custom_index = nir_intrinsic_op.define('nir_intrinsic_load_ray_instance_custom_index', 424)
nir_intrinsic_load_ray_launch_id = nir_intrinsic_op.define('nir_intrinsic_load_ray_launch_id', 425)
nir_intrinsic_load_ray_launch_size = nir_intrinsic_op.define('nir_intrinsic_load_ray_launch_size', 426)
nir_intrinsic_load_ray_miss_sbt_addr_intel = nir_intrinsic_op.define('nir_intrinsic_load_ray_miss_sbt_addr_intel', 427)
nir_intrinsic_load_ray_miss_sbt_stride_intel = nir_intrinsic_op.define('nir_intrinsic_load_ray_miss_sbt_stride_intel', 428)
nir_intrinsic_load_ray_num_dss_rt_stacks_intel = nir_intrinsic_op.define('nir_intrinsic_load_ray_num_dss_rt_stacks_intel', 429)
nir_intrinsic_load_ray_object_direction = nir_intrinsic_op.define('nir_intrinsic_load_ray_object_direction', 430)
nir_intrinsic_load_ray_object_origin = nir_intrinsic_op.define('nir_intrinsic_load_ray_object_origin', 431)
nir_intrinsic_load_ray_object_to_world = nir_intrinsic_op.define('nir_intrinsic_load_ray_object_to_world', 432)
nir_intrinsic_load_ray_query_global_intel = nir_intrinsic_op.define('nir_intrinsic_load_ray_query_global_intel', 433)
nir_intrinsic_load_ray_sw_stack_size_intel = nir_intrinsic_op.define('nir_intrinsic_load_ray_sw_stack_size_intel', 434)
nir_intrinsic_load_ray_t_max = nir_intrinsic_op.define('nir_intrinsic_load_ray_t_max', 435)
nir_intrinsic_load_ray_t_min = nir_intrinsic_op.define('nir_intrinsic_load_ray_t_min', 436)
nir_intrinsic_load_ray_tracing_stack_base_lvp = nir_intrinsic_op.define('nir_intrinsic_load_ray_tracing_stack_base_lvp', 437)
nir_intrinsic_load_ray_triangle_vertex_positions = nir_intrinsic_op.define('nir_intrinsic_load_ray_triangle_vertex_positions', 438)
nir_intrinsic_load_ray_world_direction = nir_intrinsic_op.define('nir_intrinsic_load_ray_world_direction', 439)
nir_intrinsic_load_ray_world_origin = nir_intrinsic_op.define('nir_intrinsic_load_ray_world_origin', 440)
nir_intrinsic_load_ray_world_to_object = nir_intrinsic_op.define('nir_intrinsic_load_ray_world_to_object', 441)
nir_intrinsic_load_readonly_output_pan = nir_intrinsic_op.define('nir_intrinsic_load_readonly_output_pan', 442)
nir_intrinsic_load_reg = nir_intrinsic_op.define('nir_intrinsic_load_reg', 443)
nir_intrinsic_load_reg_indirect = nir_intrinsic_op.define('nir_intrinsic_load_reg_indirect', 444)
nir_intrinsic_load_rel_patch_id_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_rel_patch_id_ir3', 445)
nir_intrinsic_load_reloc_const_intel = nir_intrinsic_op.define('nir_intrinsic_load_reloc_const_intel', 446)
nir_intrinsic_load_resume_shader_address_amd = nir_intrinsic_op.define('nir_intrinsic_load_resume_shader_address_amd', 447)
nir_intrinsic_load_ring_attr_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_attr_amd', 448)
nir_intrinsic_load_ring_attr_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_attr_offset_amd', 449)
nir_intrinsic_load_ring_es2gs_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_es2gs_offset_amd', 450)
nir_intrinsic_load_ring_esgs_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_esgs_amd', 451)
nir_intrinsic_load_ring_gs2vs_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_gs2vs_offset_amd', 452)
nir_intrinsic_load_ring_gsvs_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_gsvs_amd', 453)
nir_intrinsic_load_ring_mesh_scratch_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_mesh_scratch_amd', 454)
nir_intrinsic_load_ring_mesh_scratch_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_mesh_scratch_offset_amd', 455)
nir_intrinsic_load_ring_task_draw_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_task_draw_amd', 456)
nir_intrinsic_load_ring_task_payload_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_task_payload_amd', 457)
nir_intrinsic_load_ring_tess_factors_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_tess_factors_amd', 458)
nir_intrinsic_load_ring_tess_factors_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_tess_factors_offset_amd', 459)
nir_intrinsic_load_ring_tess_offchip_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_tess_offchip_amd', 460)
nir_intrinsic_load_ring_tess_offchip_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_ring_tess_offchip_offset_amd', 461)
nir_intrinsic_load_root_agx = nir_intrinsic_op.define('nir_intrinsic_load_root_agx', 462)
nir_intrinsic_load_rt_arg_scratch_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_rt_arg_scratch_offset_amd', 463)
nir_intrinsic_load_rt_conversion_pan = nir_intrinsic_op.define('nir_intrinsic_load_rt_conversion_pan', 464)
nir_intrinsic_load_sample_id = nir_intrinsic_op.define('nir_intrinsic_load_sample_id', 465)
nir_intrinsic_load_sample_id_no_per_sample = nir_intrinsic_op.define('nir_intrinsic_load_sample_id_no_per_sample', 466)
nir_intrinsic_load_sample_mask = nir_intrinsic_op.define('nir_intrinsic_load_sample_mask', 467)
nir_intrinsic_load_sample_mask_in = nir_intrinsic_op.define('nir_intrinsic_load_sample_mask_in', 468)
nir_intrinsic_load_sample_pos = nir_intrinsic_op.define('nir_intrinsic_load_sample_pos', 469)
nir_intrinsic_load_sample_pos_from_id = nir_intrinsic_op.define('nir_intrinsic_load_sample_pos_from_id', 470)
nir_intrinsic_load_sample_pos_or_center = nir_intrinsic_op.define('nir_intrinsic_load_sample_pos_or_center', 471)
nir_intrinsic_load_sample_positions_agx = nir_intrinsic_op.define('nir_intrinsic_load_sample_positions_agx', 472)
nir_intrinsic_load_sample_positions_amd = nir_intrinsic_op.define('nir_intrinsic_load_sample_positions_amd', 473)
nir_intrinsic_load_sample_positions_pan = nir_intrinsic_op.define('nir_intrinsic_load_sample_positions_pan', 474)
nir_intrinsic_load_sampler_handle_agx = nir_intrinsic_op.define('nir_intrinsic_load_sampler_handle_agx', 475)
nir_intrinsic_load_sampler_lod_parameters = nir_intrinsic_op.define('nir_intrinsic_load_sampler_lod_parameters', 476)
nir_intrinsic_load_samples_log2_agx = nir_intrinsic_op.define('nir_intrinsic_load_samples_log2_agx', 477)
nir_intrinsic_load_sbt_base_amd = nir_intrinsic_op.define('nir_intrinsic_load_sbt_base_amd', 478)
nir_intrinsic_load_sbt_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_sbt_offset_amd', 479)
nir_intrinsic_load_sbt_stride_amd = nir_intrinsic_op.define('nir_intrinsic_load_sbt_stride_amd', 480)
nir_intrinsic_load_scalar_arg_amd = nir_intrinsic_op.define('nir_intrinsic_load_scalar_arg_amd', 481)
nir_intrinsic_load_scratch = nir_intrinsic_op.define('nir_intrinsic_load_scratch', 482)
nir_intrinsic_load_scratch_base_ptr = nir_intrinsic_op.define('nir_intrinsic_load_scratch_base_ptr', 483)
nir_intrinsic_load_shader_call_data_offset_lvp = nir_intrinsic_op.define('nir_intrinsic_load_shader_call_data_offset_lvp', 484)
nir_intrinsic_load_shader_index = nir_intrinsic_op.define('nir_intrinsic_load_shader_index', 485)
nir_intrinsic_load_shader_output_pan = nir_intrinsic_op.define('nir_intrinsic_load_shader_output_pan', 486)
nir_intrinsic_load_shader_part_tests_zs_agx = nir_intrinsic_op.define('nir_intrinsic_load_shader_part_tests_zs_agx', 487)
nir_intrinsic_load_shader_record_ptr = nir_intrinsic_op.define('nir_intrinsic_load_shader_record_ptr', 488)
nir_intrinsic_load_shared = nir_intrinsic_op.define('nir_intrinsic_load_shared', 489)
nir_intrinsic_load_shared2_amd = nir_intrinsic_op.define('nir_intrinsic_load_shared2_amd', 490)
nir_intrinsic_load_shared_base_ptr = nir_intrinsic_op.define('nir_intrinsic_load_shared_base_ptr', 491)
nir_intrinsic_load_shared_block_intel = nir_intrinsic_op.define('nir_intrinsic_load_shared_block_intel', 492)
nir_intrinsic_load_shared_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_shared_ir3', 493)
nir_intrinsic_load_shared_lock_nv = nir_intrinsic_op.define('nir_intrinsic_load_shared_lock_nv', 494)
nir_intrinsic_load_shared_uniform_block_intel = nir_intrinsic_op.define('nir_intrinsic_load_shared_uniform_block_intel', 495)
nir_intrinsic_load_simd_width_intel = nir_intrinsic_op.define('nir_intrinsic_load_simd_width_intel', 496)
nir_intrinsic_load_sm_count_nv = nir_intrinsic_op.define('nir_intrinsic_load_sm_count_nv', 497)
nir_intrinsic_load_sm_id_nv = nir_intrinsic_op.define('nir_intrinsic_load_sm_id_nv', 498)
nir_intrinsic_load_smem_amd = nir_intrinsic_op.define('nir_intrinsic_load_smem_amd', 499)
nir_intrinsic_load_ssbo = nir_intrinsic_op.define('nir_intrinsic_load_ssbo', 500)
nir_intrinsic_load_ssbo_address = nir_intrinsic_op.define('nir_intrinsic_load_ssbo_address', 501)
nir_intrinsic_load_ssbo_block_intel = nir_intrinsic_op.define('nir_intrinsic_load_ssbo_block_intel', 502)
nir_intrinsic_load_ssbo_intel = nir_intrinsic_op.define('nir_intrinsic_load_ssbo_intel', 503)
nir_intrinsic_load_ssbo_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_ssbo_ir3', 504)
nir_intrinsic_load_ssbo_uniform_block_intel = nir_intrinsic_op.define('nir_intrinsic_load_ssbo_uniform_block_intel', 505)
nir_intrinsic_load_stack = nir_intrinsic_op.define('nir_intrinsic_load_stack', 506)
nir_intrinsic_load_stat_query_address_agx = nir_intrinsic_op.define('nir_intrinsic_load_stat_query_address_agx', 507)
nir_intrinsic_load_streamout_buffer_amd = nir_intrinsic_op.define('nir_intrinsic_load_streamout_buffer_amd', 508)
nir_intrinsic_load_streamout_config_amd = nir_intrinsic_op.define('nir_intrinsic_load_streamout_config_amd', 509)
nir_intrinsic_load_streamout_offset_amd = nir_intrinsic_op.define('nir_intrinsic_load_streamout_offset_amd', 510)
nir_intrinsic_load_streamout_write_index_amd = nir_intrinsic_op.define('nir_intrinsic_load_streamout_write_index_amd', 511)
nir_intrinsic_load_subgroup_eq_mask = nir_intrinsic_op.define('nir_intrinsic_load_subgroup_eq_mask', 512)
nir_intrinsic_load_subgroup_ge_mask = nir_intrinsic_op.define('nir_intrinsic_load_subgroup_ge_mask', 513)
nir_intrinsic_load_subgroup_gt_mask = nir_intrinsic_op.define('nir_intrinsic_load_subgroup_gt_mask', 514)
nir_intrinsic_load_subgroup_id = nir_intrinsic_op.define('nir_intrinsic_load_subgroup_id', 515)
nir_intrinsic_load_subgroup_id_shift_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_subgroup_id_shift_ir3', 516)
nir_intrinsic_load_subgroup_invocation = nir_intrinsic_op.define('nir_intrinsic_load_subgroup_invocation', 517)
nir_intrinsic_load_subgroup_le_mask = nir_intrinsic_op.define('nir_intrinsic_load_subgroup_le_mask', 518)
nir_intrinsic_load_subgroup_lt_mask = nir_intrinsic_op.define('nir_intrinsic_load_subgroup_lt_mask', 519)
nir_intrinsic_load_subgroup_size = nir_intrinsic_op.define('nir_intrinsic_load_subgroup_size', 520)
nir_intrinsic_load_sysval_agx = nir_intrinsic_op.define('nir_intrinsic_load_sysval_agx', 521)
nir_intrinsic_load_sysval_nv = nir_intrinsic_op.define('nir_intrinsic_load_sysval_nv', 522)
nir_intrinsic_load_task_payload = nir_intrinsic_op.define('nir_intrinsic_load_task_payload', 523)
nir_intrinsic_load_task_ring_entry_amd = nir_intrinsic_op.define('nir_intrinsic_load_task_ring_entry_amd', 524)
nir_intrinsic_load_tcs_header_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_tcs_header_ir3', 525)
nir_intrinsic_load_tcs_in_param_base_r600 = nir_intrinsic_op.define('nir_intrinsic_load_tcs_in_param_base_r600', 526)
nir_intrinsic_load_tcs_mem_attrib_stride = nir_intrinsic_op.define('nir_intrinsic_load_tcs_mem_attrib_stride', 527)
nir_intrinsic_load_tcs_num_patches_amd = nir_intrinsic_op.define('nir_intrinsic_load_tcs_num_patches_amd', 528)
nir_intrinsic_load_tcs_out_param_base_r600 = nir_intrinsic_op.define('nir_intrinsic_load_tcs_out_param_base_r600', 529)
nir_intrinsic_load_tcs_primitive_mode_amd = nir_intrinsic_op.define('nir_intrinsic_load_tcs_primitive_mode_amd', 530)
nir_intrinsic_load_tcs_rel_patch_id_r600 = nir_intrinsic_op.define('nir_intrinsic_load_tcs_rel_patch_id_r600', 531)
nir_intrinsic_load_tcs_tess_factor_base_r600 = nir_intrinsic_op.define('nir_intrinsic_load_tcs_tess_factor_base_r600', 532)
nir_intrinsic_load_tcs_tess_levels_to_tes_amd = nir_intrinsic_op.define('nir_intrinsic_load_tcs_tess_levels_to_tes_amd', 533)
nir_intrinsic_load_tess_coord = nir_intrinsic_op.define('nir_intrinsic_load_tess_coord', 534)
nir_intrinsic_load_tess_coord_xy = nir_intrinsic_op.define('nir_intrinsic_load_tess_coord_xy', 535)
nir_intrinsic_load_tess_factor_base_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_tess_factor_base_ir3', 536)
nir_intrinsic_load_tess_level_inner = nir_intrinsic_op.define('nir_intrinsic_load_tess_level_inner', 537)
nir_intrinsic_load_tess_level_inner_default = nir_intrinsic_op.define('nir_intrinsic_load_tess_level_inner_default', 538)
nir_intrinsic_load_tess_level_outer = nir_intrinsic_op.define('nir_intrinsic_load_tess_level_outer', 539)
nir_intrinsic_load_tess_level_outer_default = nir_intrinsic_op.define('nir_intrinsic_load_tess_level_outer_default', 540)
nir_intrinsic_load_tess_param_base_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_tess_param_base_ir3', 541)
nir_intrinsic_load_tess_param_buffer_poly = nir_intrinsic_op.define('nir_intrinsic_load_tess_param_buffer_poly', 542)
nir_intrinsic_load_tess_rel_patch_id_amd = nir_intrinsic_op.define('nir_intrinsic_load_tess_rel_patch_id_amd', 543)
nir_intrinsic_load_tex_sprite_mask_agx = nir_intrinsic_op.define('nir_intrinsic_load_tex_sprite_mask_agx', 544)
nir_intrinsic_load_texture_handle_agx = nir_intrinsic_op.define('nir_intrinsic_load_texture_handle_agx', 545)
nir_intrinsic_load_texture_scale = nir_intrinsic_op.define('nir_intrinsic_load_texture_scale', 546)
nir_intrinsic_load_texture_size_etna = nir_intrinsic_op.define('nir_intrinsic_load_texture_size_etna', 547)
nir_intrinsic_load_tlb_color_brcm = nir_intrinsic_op.define('nir_intrinsic_load_tlb_color_brcm', 548)
nir_intrinsic_load_topology_id_intel = nir_intrinsic_op.define('nir_intrinsic_load_topology_id_intel', 549)
nir_intrinsic_load_typed_buffer_amd = nir_intrinsic_op.define('nir_intrinsic_load_typed_buffer_amd', 550)
nir_intrinsic_load_uav_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_uav_ir3', 551)
nir_intrinsic_load_ubo = nir_intrinsic_op.define('nir_intrinsic_load_ubo', 552)
nir_intrinsic_load_ubo_uniform_block_intel = nir_intrinsic_op.define('nir_intrinsic_load_ubo_uniform_block_intel', 553)
nir_intrinsic_load_ubo_vec4 = nir_intrinsic_op.define('nir_intrinsic_load_ubo_vec4', 554)
nir_intrinsic_load_uniform = nir_intrinsic_op.define('nir_intrinsic_load_uniform', 555)
nir_intrinsic_load_user_clip_plane = nir_intrinsic_op.define('nir_intrinsic_load_user_clip_plane', 556)
nir_intrinsic_load_user_data_amd = nir_intrinsic_op.define('nir_intrinsic_load_user_data_amd', 557)
nir_intrinsic_load_uvs_index_agx = nir_intrinsic_op.define('nir_intrinsic_load_uvs_index_agx', 558)
nir_intrinsic_load_vbo_base_agx = nir_intrinsic_op.define('nir_intrinsic_load_vbo_base_agx', 559)
nir_intrinsic_load_vector_arg_amd = nir_intrinsic_op.define('nir_intrinsic_load_vector_arg_amd', 560)
nir_intrinsic_load_vertex_id = nir_intrinsic_op.define('nir_intrinsic_load_vertex_id', 561)
nir_intrinsic_load_vertex_id_zero_base = nir_intrinsic_op.define('nir_intrinsic_load_vertex_id_zero_base', 562)
nir_intrinsic_load_view_index = nir_intrinsic_op.define('nir_intrinsic_load_view_index', 563)
nir_intrinsic_load_viewport_offset = nir_intrinsic_op.define('nir_intrinsic_load_viewport_offset', 564)
nir_intrinsic_load_viewport_scale = nir_intrinsic_op.define('nir_intrinsic_load_viewport_scale', 565)
nir_intrinsic_load_viewport_x_offset = nir_intrinsic_op.define('nir_intrinsic_load_viewport_x_offset', 566)
nir_intrinsic_load_viewport_x_scale = nir_intrinsic_op.define('nir_intrinsic_load_viewport_x_scale', 567)
nir_intrinsic_load_viewport_y_offset = nir_intrinsic_op.define('nir_intrinsic_load_viewport_y_offset', 568)
nir_intrinsic_load_viewport_y_scale = nir_intrinsic_op.define('nir_intrinsic_load_viewport_y_scale', 569)
nir_intrinsic_load_viewport_z_offset = nir_intrinsic_op.define('nir_intrinsic_load_viewport_z_offset', 570)
nir_intrinsic_load_viewport_z_scale = nir_intrinsic_op.define('nir_intrinsic_load_viewport_z_scale', 571)
nir_intrinsic_load_vs_output_buffer_poly = nir_intrinsic_op.define('nir_intrinsic_load_vs_output_buffer_poly', 572)
nir_intrinsic_load_vs_outputs_poly = nir_intrinsic_op.define('nir_intrinsic_load_vs_outputs_poly', 573)
nir_intrinsic_load_vs_primitive_stride_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_vs_primitive_stride_ir3', 574)
nir_intrinsic_load_vs_vertex_stride_ir3 = nir_intrinsic_op.define('nir_intrinsic_load_vs_vertex_stride_ir3', 575)
nir_intrinsic_load_vulkan_descriptor = nir_intrinsic_op.define('nir_intrinsic_load_vulkan_descriptor', 576)
nir_intrinsic_load_warp_id_nv = nir_intrinsic_op.define('nir_intrinsic_load_warp_id_nv', 577)
nir_intrinsic_load_warps_per_sm_nv = nir_intrinsic_op.define('nir_intrinsic_load_warps_per_sm_nv', 578)
nir_intrinsic_load_work_dim = nir_intrinsic_op.define('nir_intrinsic_load_work_dim', 579)
nir_intrinsic_load_workgroup_id = nir_intrinsic_op.define('nir_intrinsic_load_workgroup_id', 580)
nir_intrinsic_load_workgroup_index = nir_intrinsic_op.define('nir_intrinsic_load_workgroup_index', 581)
nir_intrinsic_load_workgroup_num_input_primitives_amd = nir_intrinsic_op.define('nir_intrinsic_load_workgroup_num_input_primitives_amd', 582)
nir_intrinsic_load_workgroup_num_input_vertices_amd = nir_intrinsic_op.define('nir_intrinsic_load_workgroup_num_input_vertices_amd', 583)
nir_intrinsic_load_workgroup_size = nir_intrinsic_op.define('nir_intrinsic_load_workgroup_size', 584)
nir_intrinsic_load_xfb_address = nir_intrinsic_op.define('nir_intrinsic_load_xfb_address', 585)
nir_intrinsic_load_xfb_index_buffer = nir_intrinsic_op.define('nir_intrinsic_load_xfb_index_buffer', 586)
nir_intrinsic_load_xfb_size = nir_intrinsic_op.define('nir_intrinsic_load_xfb_size', 587)
nir_intrinsic_load_xfb_state_address_gfx12_amd = nir_intrinsic_op.define('nir_intrinsic_load_xfb_state_address_gfx12_amd', 588)
nir_intrinsic_masked_swizzle_amd = nir_intrinsic_op.define('nir_intrinsic_masked_swizzle_amd', 589)
nir_intrinsic_mbcnt_amd = nir_intrinsic_op.define('nir_intrinsic_mbcnt_amd', 590)
nir_intrinsic_memcpy_deref = nir_intrinsic_op.define('nir_intrinsic_memcpy_deref', 591)
nir_intrinsic_nop = nir_intrinsic_op.define('nir_intrinsic_nop', 592)
nir_intrinsic_nop_amd = nir_intrinsic_op.define('nir_intrinsic_nop_amd', 593)
nir_intrinsic_optimization_barrier_sgpr_amd = nir_intrinsic_op.define('nir_intrinsic_optimization_barrier_sgpr_amd', 594)
nir_intrinsic_optimization_barrier_vgpr_amd = nir_intrinsic_op.define('nir_intrinsic_optimization_barrier_vgpr_amd', 595)
nir_intrinsic_ordered_add_loop_gfx12_amd = nir_intrinsic_op.define('nir_intrinsic_ordered_add_loop_gfx12_amd', 596)
nir_intrinsic_ordered_xfb_counter_add_gfx11_amd = nir_intrinsic_op.define('nir_intrinsic_ordered_xfb_counter_add_gfx11_amd', 597)
nir_intrinsic_overwrite_tes_arguments_amd = nir_intrinsic_op.define('nir_intrinsic_overwrite_tes_arguments_amd', 598)
nir_intrinsic_overwrite_vs_arguments_amd = nir_intrinsic_op.define('nir_intrinsic_overwrite_vs_arguments_amd', 599)
nir_intrinsic_pin_cx_handle_nv = nir_intrinsic_op.define('nir_intrinsic_pin_cx_handle_nv', 600)
nir_intrinsic_preamble_end_ir3 = nir_intrinsic_op.define('nir_intrinsic_preamble_end_ir3', 601)
nir_intrinsic_preamble_start_ir3 = nir_intrinsic_op.define('nir_intrinsic_preamble_start_ir3', 602)
nir_intrinsic_prefetch_sam_ir3 = nir_intrinsic_op.define('nir_intrinsic_prefetch_sam_ir3', 603)
nir_intrinsic_prefetch_tex_ir3 = nir_intrinsic_op.define('nir_intrinsic_prefetch_tex_ir3', 604)
nir_intrinsic_prefetch_ubo_ir3 = nir_intrinsic_op.define('nir_intrinsic_prefetch_ubo_ir3', 605)
nir_intrinsic_printf = nir_intrinsic_op.define('nir_intrinsic_printf', 606)
nir_intrinsic_printf_abort = nir_intrinsic_op.define('nir_intrinsic_printf_abort', 607)
nir_intrinsic_quad_ballot_agx = nir_intrinsic_op.define('nir_intrinsic_quad_ballot_agx', 608)
nir_intrinsic_quad_broadcast = nir_intrinsic_op.define('nir_intrinsic_quad_broadcast', 609)
nir_intrinsic_quad_swap_diagonal = nir_intrinsic_op.define('nir_intrinsic_quad_swap_diagonal', 610)
nir_intrinsic_quad_swap_horizontal = nir_intrinsic_op.define('nir_intrinsic_quad_swap_horizontal', 611)
nir_intrinsic_quad_swap_vertical = nir_intrinsic_op.define('nir_intrinsic_quad_swap_vertical', 612)
nir_intrinsic_quad_swizzle_amd = nir_intrinsic_op.define('nir_intrinsic_quad_swizzle_amd', 613)
nir_intrinsic_quad_vote_all = nir_intrinsic_op.define('nir_intrinsic_quad_vote_all', 614)
nir_intrinsic_quad_vote_any = nir_intrinsic_op.define('nir_intrinsic_quad_vote_any', 615)
nir_intrinsic_r600_indirect_vertex_at_index = nir_intrinsic_op.define('nir_intrinsic_r600_indirect_vertex_at_index', 616)
nir_intrinsic_ray_intersection_ir3 = nir_intrinsic_op.define('nir_intrinsic_ray_intersection_ir3', 617)
nir_intrinsic_read_attribute_payload_intel = nir_intrinsic_op.define('nir_intrinsic_read_attribute_payload_intel', 618)
nir_intrinsic_read_first_invocation = nir_intrinsic_op.define('nir_intrinsic_read_first_invocation', 619)
nir_intrinsic_read_getlast_ir3 = nir_intrinsic_op.define('nir_intrinsic_read_getlast_ir3', 620)
nir_intrinsic_read_invocation = nir_intrinsic_op.define('nir_intrinsic_read_invocation', 621)
nir_intrinsic_read_invocation_cond_ir3 = nir_intrinsic_op.define('nir_intrinsic_read_invocation_cond_ir3', 622)
nir_intrinsic_reduce = nir_intrinsic_op.define('nir_intrinsic_reduce', 623)
nir_intrinsic_reduce_clusters_ir3 = nir_intrinsic_op.define('nir_intrinsic_reduce_clusters_ir3', 624)
nir_intrinsic_report_ray_intersection = nir_intrinsic_op.define('nir_intrinsic_report_ray_intersection', 625)
nir_intrinsic_resource_intel = nir_intrinsic_op.define('nir_intrinsic_resource_intel', 626)
nir_intrinsic_rotate = nir_intrinsic_op.define('nir_intrinsic_rotate', 627)
nir_intrinsic_rq_confirm_intersection = nir_intrinsic_op.define('nir_intrinsic_rq_confirm_intersection', 628)
nir_intrinsic_rq_generate_intersection = nir_intrinsic_op.define('nir_intrinsic_rq_generate_intersection', 629)
nir_intrinsic_rq_initialize = nir_intrinsic_op.define('nir_intrinsic_rq_initialize', 630)
nir_intrinsic_rq_load = nir_intrinsic_op.define('nir_intrinsic_rq_load', 631)
nir_intrinsic_rq_proceed = nir_intrinsic_op.define('nir_intrinsic_rq_proceed', 632)
nir_intrinsic_rq_terminate = nir_intrinsic_op.define('nir_intrinsic_rq_terminate', 633)
nir_intrinsic_rt_execute_callable = nir_intrinsic_op.define('nir_intrinsic_rt_execute_callable', 634)
nir_intrinsic_rt_resume = nir_intrinsic_op.define('nir_intrinsic_rt_resume', 635)
nir_intrinsic_rt_return_amd = nir_intrinsic_op.define('nir_intrinsic_rt_return_amd', 636)
nir_intrinsic_rt_trace_ray = nir_intrinsic_op.define('nir_intrinsic_rt_trace_ray', 637)
nir_intrinsic_sample_mask_agx = nir_intrinsic_op.define('nir_intrinsic_sample_mask_agx', 638)
nir_intrinsic_select_vertex_poly = nir_intrinsic_op.define('nir_intrinsic_select_vertex_poly', 639)
nir_intrinsic_sendmsg_amd = nir_intrinsic_op.define('nir_intrinsic_sendmsg_amd', 640)
nir_intrinsic_set_vertex_and_primitive_count = nir_intrinsic_op.define('nir_intrinsic_set_vertex_and_primitive_count', 641)
nir_intrinsic_shader_clock = nir_intrinsic_op.define('nir_intrinsic_shader_clock', 642)
nir_intrinsic_shared_append_amd = nir_intrinsic_op.define('nir_intrinsic_shared_append_amd', 643)
nir_intrinsic_shared_atomic = nir_intrinsic_op.define('nir_intrinsic_shared_atomic', 644)
nir_intrinsic_shared_atomic_swap = nir_intrinsic_op.define('nir_intrinsic_shared_atomic_swap', 645)
nir_intrinsic_shared_consume_amd = nir_intrinsic_op.define('nir_intrinsic_shared_consume_amd', 646)
nir_intrinsic_shuffle = nir_intrinsic_op.define('nir_intrinsic_shuffle', 647)
nir_intrinsic_shuffle_down = nir_intrinsic_op.define('nir_intrinsic_shuffle_down', 648)
nir_intrinsic_shuffle_down_uniform_ir3 = nir_intrinsic_op.define('nir_intrinsic_shuffle_down_uniform_ir3', 649)
nir_intrinsic_shuffle_up = nir_intrinsic_op.define('nir_intrinsic_shuffle_up', 650)
nir_intrinsic_shuffle_up_uniform_ir3 = nir_intrinsic_op.define('nir_intrinsic_shuffle_up_uniform_ir3', 651)
nir_intrinsic_shuffle_xor = nir_intrinsic_op.define('nir_intrinsic_shuffle_xor', 652)
nir_intrinsic_shuffle_xor_uniform_ir3 = nir_intrinsic_op.define('nir_intrinsic_shuffle_xor_uniform_ir3', 653)
nir_intrinsic_sleep_amd = nir_intrinsic_op.define('nir_intrinsic_sleep_amd', 654)
nir_intrinsic_sparse_residency_code_and = nir_intrinsic_op.define('nir_intrinsic_sparse_residency_code_and', 655)
nir_intrinsic_ssa_bar_nv = nir_intrinsic_op.define('nir_intrinsic_ssa_bar_nv', 656)
nir_intrinsic_ssbo_atomic = nir_intrinsic_op.define('nir_intrinsic_ssbo_atomic', 657)
nir_intrinsic_ssbo_atomic_ir3 = nir_intrinsic_op.define('nir_intrinsic_ssbo_atomic_ir3', 658)
nir_intrinsic_ssbo_atomic_swap = nir_intrinsic_op.define('nir_intrinsic_ssbo_atomic_swap', 659)
nir_intrinsic_ssbo_atomic_swap_ir3 = nir_intrinsic_op.define('nir_intrinsic_ssbo_atomic_swap_ir3', 660)
nir_intrinsic_stack_map_agx = nir_intrinsic_op.define('nir_intrinsic_stack_map_agx', 661)
nir_intrinsic_stack_unmap_agx = nir_intrinsic_op.define('nir_intrinsic_stack_unmap_agx', 662)
nir_intrinsic_store_agx = nir_intrinsic_op.define('nir_intrinsic_store_agx', 663)
nir_intrinsic_store_buffer_amd = nir_intrinsic_op.define('nir_intrinsic_store_buffer_amd', 664)
nir_intrinsic_store_combined_output_pan = nir_intrinsic_op.define('nir_intrinsic_store_combined_output_pan', 665)
nir_intrinsic_store_const_ir3 = nir_intrinsic_op.define('nir_intrinsic_store_const_ir3', 666)
nir_intrinsic_store_deref = nir_intrinsic_op.define('nir_intrinsic_store_deref', 667)
nir_intrinsic_store_deref_block_intel = nir_intrinsic_op.define('nir_intrinsic_store_deref_block_intel', 668)
nir_intrinsic_store_global = nir_intrinsic_op.define('nir_intrinsic_store_global', 669)
nir_intrinsic_store_global_2x32 = nir_intrinsic_op.define('nir_intrinsic_store_global_2x32', 670)
nir_intrinsic_store_global_amd = nir_intrinsic_op.define('nir_intrinsic_store_global_amd', 671)
nir_intrinsic_store_global_block_intel = nir_intrinsic_op.define('nir_intrinsic_store_global_block_intel', 672)
nir_intrinsic_store_global_etna = nir_intrinsic_op.define('nir_intrinsic_store_global_etna', 673)
nir_intrinsic_store_global_ir3 = nir_intrinsic_op.define('nir_intrinsic_store_global_ir3', 674)
nir_intrinsic_store_hit_attrib_amd = nir_intrinsic_op.define('nir_intrinsic_store_hit_attrib_amd', 675)
nir_intrinsic_store_local_pixel_agx = nir_intrinsic_op.define('nir_intrinsic_store_local_pixel_agx', 676)
nir_intrinsic_store_local_shared_r600 = nir_intrinsic_op.define('nir_intrinsic_store_local_shared_r600', 677)
nir_intrinsic_store_output = nir_intrinsic_op.define('nir_intrinsic_store_output', 678)
nir_intrinsic_store_per_primitive_output = nir_intrinsic_op.define('nir_intrinsic_store_per_primitive_output', 679)
nir_intrinsic_store_per_primitive_payload_intel = nir_intrinsic_op.define('nir_intrinsic_store_per_primitive_payload_intel', 680)
nir_intrinsic_store_per_vertex_output = nir_intrinsic_op.define('nir_intrinsic_store_per_vertex_output', 681)
nir_intrinsic_store_per_view_output = nir_intrinsic_op.define('nir_intrinsic_store_per_view_output', 682)
nir_intrinsic_store_preamble = nir_intrinsic_op.define('nir_intrinsic_store_preamble', 683)
nir_intrinsic_store_raw_output_pan = nir_intrinsic_op.define('nir_intrinsic_store_raw_output_pan', 684)
nir_intrinsic_store_reg = nir_intrinsic_op.define('nir_intrinsic_store_reg', 685)
nir_intrinsic_store_reg_indirect = nir_intrinsic_op.define('nir_intrinsic_store_reg_indirect', 686)
nir_intrinsic_store_scalar_arg_amd = nir_intrinsic_op.define('nir_intrinsic_store_scalar_arg_amd', 687)
nir_intrinsic_store_scratch = nir_intrinsic_op.define('nir_intrinsic_store_scratch', 688)
nir_intrinsic_store_shared = nir_intrinsic_op.define('nir_intrinsic_store_shared', 689)
nir_intrinsic_store_shared2_amd = nir_intrinsic_op.define('nir_intrinsic_store_shared2_amd', 690)
nir_intrinsic_store_shared_block_intel = nir_intrinsic_op.define('nir_intrinsic_store_shared_block_intel', 691)
nir_intrinsic_store_shared_ir3 = nir_intrinsic_op.define('nir_intrinsic_store_shared_ir3', 692)
nir_intrinsic_store_shared_unlock_nv = nir_intrinsic_op.define('nir_intrinsic_store_shared_unlock_nv', 693)
nir_intrinsic_store_ssbo = nir_intrinsic_op.define('nir_intrinsic_store_ssbo', 694)
nir_intrinsic_store_ssbo_block_intel = nir_intrinsic_op.define('nir_intrinsic_store_ssbo_block_intel', 695)
nir_intrinsic_store_ssbo_intel = nir_intrinsic_op.define('nir_intrinsic_store_ssbo_intel', 696)
nir_intrinsic_store_ssbo_ir3 = nir_intrinsic_op.define('nir_intrinsic_store_ssbo_ir3', 697)
nir_intrinsic_store_stack = nir_intrinsic_op.define('nir_intrinsic_store_stack', 698)
nir_intrinsic_store_task_payload = nir_intrinsic_op.define('nir_intrinsic_store_task_payload', 699)
nir_intrinsic_store_tf_r600 = nir_intrinsic_op.define('nir_intrinsic_store_tf_r600', 700)
nir_intrinsic_store_tlb_sample_color_v3d = nir_intrinsic_op.define('nir_intrinsic_store_tlb_sample_color_v3d', 701)
nir_intrinsic_store_uvs_agx = nir_intrinsic_op.define('nir_intrinsic_store_uvs_agx', 702)
nir_intrinsic_store_vector_arg_amd = nir_intrinsic_op.define('nir_intrinsic_store_vector_arg_amd', 703)
nir_intrinsic_store_zs_agx = nir_intrinsic_op.define('nir_intrinsic_store_zs_agx', 704)
nir_intrinsic_strict_wqm_coord_amd = nir_intrinsic_op.define('nir_intrinsic_strict_wqm_coord_amd', 705)
nir_intrinsic_subfm_nv = nir_intrinsic_op.define('nir_intrinsic_subfm_nv', 706)
nir_intrinsic_suclamp_nv = nir_intrinsic_op.define('nir_intrinsic_suclamp_nv', 707)
nir_intrinsic_sueau_nv = nir_intrinsic_op.define('nir_intrinsic_sueau_nv', 708)
nir_intrinsic_suldga_nv = nir_intrinsic_op.define('nir_intrinsic_suldga_nv', 709)
nir_intrinsic_sustga_nv = nir_intrinsic_op.define('nir_intrinsic_sustga_nv', 710)
nir_intrinsic_task_payload_atomic = nir_intrinsic_op.define('nir_intrinsic_task_payload_atomic', 711)
nir_intrinsic_task_payload_atomic_swap = nir_intrinsic_op.define('nir_intrinsic_task_payload_atomic_swap', 712)
nir_intrinsic_terminate = nir_intrinsic_op.define('nir_intrinsic_terminate', 713)
nir_intrinsic_terminate_if = nir_intrinsic_op.define('nir_intrinsic_terminate_if', 714)
nir_intrinsic_terminate_ray = nir_intrinsic_op.define('nir_intrinsic_terminate_ray', 715)
nir_intrinsic_trace_ray = nir_intrinsic_op.define('nir_intrinsic_trace_ray', 716)
nir_intrinsic_trace_ray_intel = nir_intrinsic_op.define('nir_intrinsic_trace_ray_intel', 717)
nir_intrinsic_unit_test_amd = nir_intrinsic_op.define('nir_intrinsic_unit_test_amd', 718)
nir_intrinsic_unit_test_divergent_amd = nir_intrinsic_op.define('nir_intrinsic_unit_test_divergent_amd', 719)
nir_intrinsic_unit_test_uniform_amd = nir_intrinsic_op.define('nir_intrinsic_unit_test_uniform_amd', 720)
nir_intrinsic_unpin_cx_handle_nv = nir_intrinsic_op.define('nir_intrinsic_unpin_cx_handle_nv', 721)
nir_intrinsic_use = nir_intrinsic_op.define('nir_intrinsic_use', 722)
nir_intrinsic_vild_nv = nir_intrinsic_op.define('nir_intrinsic_vild_nv', 723)
nir_intrinsic_vote_all = nir_intrinsic_op.define('nir_intrinsic_vote_all', 724)
nir_intrinsic_vote_any = nir_intrinsic_op.define('nir_intrinsic_vote_any', 725)
nir_intrinsic_vote_feq = nir_intrinsic_op.define('nir_intrinsic_vote_feq', 726)
nir_intrinsic_vote_ieq = nir_intrinsic_op.define('nir_intrinsic_vote_ieq', 727)
nir_intrinsic_vulkan_resource_index = nir_intrinsic_op.define('nir_intrinsic_vulkan_resource_index', 728)
nir_intrinsic_vulkan_resource_reindex = nir_intrinsic_op.define('nir_intrinsic_vulkan_resource_reindex', 729)
nir_intrinsic_write_invocation_amd = nir_intrinsic_op.define('nir_intrinsic_write_invocation_amd', 730)
nir_intrinsic_xfb_counter_sub_gfx11_amd = nir_intrinsic_op.define('nir_intrinsic_xfb_counter_sub_gfx11_amd', 731)
nir_last_intrinsic = nir_intrinsic_op.define('nir_last_intrinsic', 731)
nir_num_intrinsics = nir_intrinsic_op.define('nir_num_intrinsics', 732)
nir_intrinsic_instr: TypeAlias = struct_nir_intrinsic_instr
class nir_memory_semantics(Annotated[int, ctypes.c_uint32], c.Enum): pass
NIR_MEMORY_ACQUIRE = nir_memory_semantics.define('NIR_MEMORY_ACQUIRE', 1)
NIR_MEMORY_RELEASE = nir_memory_semantics.define('NIR_MEMORY_RELEASE', 2)
NIR_MEMORY_ACQ_REL = nir_memory_semantics.define('NIR_MEMORY_ACQ_REL', 3)
NIR_MEMORY_MAKE_AVAILABLE = nir_memory_semantics.define('NIR_MEMORY_MAKE_AVAILABLE', 4)
NIR_MEMORY_MAKE_VISIBLE = nir_memory_semantics.define('NIR_MEMORY_MAKE_VISIBLE', 8)
class nir_intrinsic_semantic_flag(Annotated[int, ctypes.c_uint32], c.Enum): pass
NIR_INTRINSIC_CAN_ELIMINATE = nir_intrinsic_semantic_flag.define('NIR_INTRINSIC_CAN_ELIMINATE', 1)
NIR_INTRINSIC_CAN_REORDER = nir_intrinsic_semantic_flag.define('NIR_INTRINSIC_CAN_REORDER', 2)
NIR_INTRINSIC_SUBGROUP = nir_intrinsic_semantic_flag.define('NIR_INTRINSIC_SUBGROUP', 4)
NIR_INTRINSIC_QUADGROUP = nir_intrinsic_semantic_flag.define('NIR_INTRINSIC_QUADGROUP', 8)
@c.record
class struct_nir_io_semantics(c.Struct):
SIZE = 4
location: Annotated[Annotated[int, ctypes.c_uint32], 0, 7, 0]
num_slots: Annotated[Annotated[int, ctypes.c_uint32], 0, 6, 7]
dual_source_blend_index: Annotated[Annotated[int, ctypes.c_uint32], 1, 1, 5]
fb_fetch_output: Annotated[Annotated[int, ctypes.c_uint32], 1, 1, 6]
fb_fetch_output_coherent: Annotated[Annotated[int, ctypes.c_uint32], 1, 1, 7]
gs_streams: Annotated[Annotated[int, ctypes.c_uint32], 2, 8, 0]
medium_precision: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 0]
per_view: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 1]
high_16bits: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 2]
high_dvec2: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 3]
no_varying: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 4]
no_sysval_output: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 5]
interp_explicit_strict: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 6]
_pad: Annotated[Annotated[int, ctypes.c_uint32], 3, 1, 7]
nir_io_semantics: TypeAlias = struct_nir_io_semantics
@c.record
class struct_nir_io_xfb(c.Struct):
SIZE = 4
out: Annotated[c.Array[struct_nir_io_xfb_out, Literal[2]], 0]
@c.record
class struct_nir_io_xfb_out(c.Struct):
SIZE = 2
num_components: Annotated[uint8_t, 0, 4, 0]
buffer: Annotated[uint8_t, 0, 4, 4]
offset: Annotated[uint8_t, 1]
nir_io_xfb: TypeAlias = struct_nir_io_xfb
@dll.bind
def nir_instr_xfb_write_mask(instr:c.POINTER[nir_intrinsic_instr]) -> Annotated[int, ctypes.c_uint32]: ...
@c.record
class struct_nir_intrinsic_info(c.Struct):
SIZE = 112
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
num_srcs: Annotated[uint8_t, 8]
src_components: Annotated[c.Array[int8_t, Literal[11]], 9]
has_dest: Annotated[Annotated[bool, ctypes.c_bool], 20]
dest_components: Annotated[uint8_t, 21]
dest_bit_sizes: Annotated[uint8_t, 22]
bit_size_src: Annotated[int8_t, 23]
num_indices: Annotated[uint8_t, 24]
indices: Annotated[c.Array[uint8_t, Literal[8]], 25]
index_map: Annotated[c.Array[uint8_t, Literal[75]], 33]
flags: Annotated[nir_intrinsic_semantic_flag, 108]
nir_intrinsic_info: TypeAlias = struct_nir_intrinsic_info
try: nir_intrinsic_infos = c.Array[nir_intrinsic_info, Literal[732]].in_dll(dll, 'nir_intrinsic_infos') # type: ignore
except (ValueError,AttributeError): pass
@dll.bind
def nir_intrinsic_src_components(intr:c.POINTER[nir_intrinsic_instr], srcn:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def nir_intrinsic_dest_components(intr:c.POINTER[nir_intrinsic_instr]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def nir_intrinsic_instr_src_type(intrin:c.POINTER[nir_intrinsic_instr], src:Annotated[int, ctypes.c_uint32]) -> nir_alu_type: ...
@dll.bind
def nir_intrinsic_instr_dest_type(intrin:c.POINTER[nir_intrinsic_instr]) -> nir_alu_type: ...
@dll.bind
def nir_intrinsic_copy_const_indices(dst:c.POINTER[nir_intrinsic_instr], src:c.POINTER[nir_intrinsic_instr]) -> None: ...
@dll.bind
def nir_image_intrinsic_coord_components(instr:c.POINTER[nir_intrinsic_instr]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def nir_rewrite_image_intrinsic(instr:c.POINTER[nir_intrinsic_instr], handle:c.POINTER[nir_def], bindless:Annotated[bool, ctypes.c_bool]) -> None: ...
@dll.bind
def nir_intrinsic_can_reorder(instr:c.POINTER[nir_intrinsic_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_intrinsic_writes_external_memory(instr:c.POINTER[nir_intrinsic_instr]) -> Annotated[bool, ctypes.c_bool]: ...
class enum_nir_tex_src_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_tex_src_coord = enum_nir_tex_src_type.define('nir_tex_src_coord', 0)
nir_tex_src_projector = enum_nir_tex_src_type.define('nir_tex_src_projector', 1)
nir_tex_src_comparator = enum_nir_tex_src_type.define('nir_tex_src_comparator', 2)
nir_tex_src_offset = enum_nir_tex_src_type.define('nir_tex_src_offset', 3)
nir_tex_src_bias = enum_nir_tex_src_type.define('nir_tex_src_bias', 4)
nir_tex_src_lod = enum_nir_tex_src_type.define('nir_tex_src_lod', 5)
nir_tex_src_min_lod = enum_nir_tex_src_type.define('nir_tex_src_min_lod', 6)
nir_tex_src_lod_bias_min_agx = enum_nir_tex_src_type.define('nir_tex_src_lod_bias_min_agx', 7)
nir_tex_src_ms_index = enum_nir_tex_src_type.define('nir_tex_src_ms_index', 8)
nir_tex_src_ms_mcs_intel = enum_nir_tex_src_type.define('nir_tex_src_ms_mcs_intel', 9)
nir_tex_src_ddx = enum_nir_tex_src_type.define('nir_tex_src_ddx', 10)
nir_tex_src_ddy = enum_nir_tex_src_type.define('nir_tex_src_ddy', 11)
nir_tex_src_texture_deref = enum_nir_tex_src_type.define('nir_tex_src_texture_deref', 12)
nir_tex_src_sampler_deref = enum_nir_tex_src_type.define('nir_tex_src_sampler_deref', 13)
nir_tex_src_texture_offset = enum_nir_tex_src_type.define('nir_tex_src_texture_offset', 14)
nir_tex_src_sampler_offset = enum_nir_tex_src_type.define('nir_tex_src_sampler_offset', 15)
nir_tex_src_texture_handle = enum_nir_tex_src_type.define('nir_tex_src_texture_handle', 16)
nir_tex_src_sampler_handle = enum_nir_tex_src_type.define('nir_tex_src_sampler_handle', 17)
nir_tex_src_sampler_deref_intrinsic = enum_nir_tex_src_type.define('nir_tex_src_sampler_deref_intrinsic', 18)
nir_tex_src_texture_deref_intrinsic = enum_nir_tex_src_type.define('nir_tex_src_texture_deref_intrinsic', 19)
nir_tex_src_plane = enum_nir_tex_src_type.define('nir_tex_src_plane', 20)
nir_tex_src_backend1 = enum_nir_tex_src_type.define('nir_tex_src_backend1', 21)
nir_tex_src_backend2 = enum_nir_tex_src_type.define('nir_tex_src_backend2', 22)
nir_num_tex_src_types = enum_nir_tex_src_type.define('nir_num_tex_src_types', 23)
nir_tex_src_type: TypeAlias = enum_nir_tex_src_type
@c.record
class struct_nir_tex_src(c.Struct):
SIZE = 40
src: Annotated[nir_src, 0]
src_type: Annotated[nir_tex_src_type, 32]
nir_tex_src: TypeAlias = struct_nir_tex_src
class enum_nir_texop(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_texop_tex = enum_nir_texop.define('nir_texop_tex', 0)
nir_texop_txb = enum_nir_texop.define('nir_texop_txb', 1)
nir_texop_txl = enum_nir_texop.define('nir_texop_txl', 2)
nir_texop_txd = enum_nir_texop.define('nir_texop_txd', 3)
nir_texop_txf = enum_nir_texop.define('nir_texop_txf', 4)
nir_texop_txf_ms = enum_nir_texop.define('nir_texop_txf_ms', 5)
nir_texop_txf_ms_fb = enum_nir_texop.define('nir_texop_txf_ms_fb', 6)
nir_texop_txf_ms_mcs_intel = enum_nir_texop.define('nir_texop_txf_ms_mcs_intel', 7)
nir_texop_txs = enum_nir_texop.define('nir_texop_txs', 8)
nir_texop_lod = enum_nir_texop.define('nir_texop_lod', 9)
nir_texop_tg4 = enum_nir_texop.define('nir_texop_tg4', 10)
nir_texop_query_levels = enum_nir_texop.define('nir_texop_query_levels', 11)
nir_texop_texture_samples = enum_nir_texop.define('nir_texop_texture_samples', 12)
nir_texop_samples_identical = enum_nir_texop.define('nir_texop_samples_identical', 13)
nir_texop_tex_prefetch = enum_nir_texop.define('nir_texop_tex_prefetch', 14)
nir_texop_lod_bias = enum_nir_texop.define('nir_texop_lod_bias', 15)
nir_texop_fragment_fetch_amd = enum_nir_texop.define('nir_texop_fragment_fetch_amd', 16)
nir_texop_fragment_mask_fetch_amd = enum_nir_texop.define('nir_texop_fragment_mask_fetch_amd', 17)
nir_texop_descriptor_amd = enum_nir_texop.define('nir_texop_descriptor_amd', 18)
nir_texop_sampler_descriptor_amd = enum_nir_texop.define('nir_texop_sampler_descriptor_amd', 19)
nir_texop_image_min_lod_agx = enum_nir_texop.define('nir_texop_image_min_lod_agx', 20)
nir_texop_has_custom_border_color_agx = enum_nir_texop.define('nir_texop_has_custom_border_color_agx', 21)
nir_texop_custom_border_color_agx = enum_nir_texop.define('nir_texop_custom_border_color_agx', 22)
nir_texop_hdr_dim_nv = enum_nir_texop.define('nir_texop_hdr_dim_nv', 23)
nir_texop_tex_type_nv = enum_nir_texop.define('nir_texop_tex_type_nv', 24)
nir_texop: TypeAlias = enum_nir_texop
@c.record
class struct_nir_tex_instr(c.Struct):
SIZE = 128
instr: Annotated[nir_instr, 0]
sampler_dim: Annotated[enum_glsl_sampler_dim, 32]
dest_type: Annotated[nir_alu_type, 36]
op: Annotated[nir_texop, 40]
_def: Annotated[nir_def, 48]
src: Annotated[c.POINTER[nir_tex_src], 80]
num_srcs: Annotated[Annotated[int, ctypes.c_uint32], 88]
coord_components: Annotated[Annotated[int, ctypes.c_uint32], 92]
is_array: Annotated[Annotated[bool, ctypes.c_bool], 96]
is_shadow: Annotated[Annotated[bool, ctypes.c_bool], 97]
is_new_style_shadow: Annotated[Annotated[bool, ctypes.c_bool], 98]
is_sparse: Annotated[Annotated[bool, ctypes.c_bool], 99]
component: Annotated[Annotated[int, ctypes.c_uint32], 100, 2, 0]
array_is_lowered_cube: Annotated[Annotated[int, ctypes.c_uint32], 100, 1, 2]
is_gather_implicit_lod: Annotated[Annotated[int, ctypes.c_uint32], 100, 1, 3]
skip_helpers: Annotated[Annotated[int, ctypes.c_uint32], 100, 1, 4]
tg4_offsets: Annotated[c.Array[c.Array[int8_t, Literal[2]], Literal[4]], 101]
texture_non_uniform: Annotated[Annotated[bool, ctypes.c_bool], 109]
sampler_non_uniform: Annotated[Annotated[bool, ctypes.c_bool], 110]
offset_non_uniform: Annotated[Annotated[bool, ctypes.c_bool], 111]
texture_index: Annotated[Annotated[int, ctypes.c_uint32], 112]
sampler_index: Annotated[Annotated[int, ctypes.c_uint32], 116]
backend_flags: Annotated[uint32_t, 120]
class enum_glsl_sampler_dim(Annotated[int, ctypes.c_uint32], c.Enum): pass
GLSL_SAMPLER_DIM_1D = enum_glsl_sampler_dim.define('GLSL_SAMPLER_DIM_1D', 0)
GLSL_SAMPLER_DIM_2D = enum_glsl_sampler_dim.define('GLSL_SAMPLER_DIM_2D', 1)
GLSL_SAMPLER_DIM_3D = enum_glsl_sampler_dim.define('GLSL_SAMPLER_DIM_3D', 2)
GLSL_SAMPLER_DIM_CUBE = enum_glsl_sampler_dim.define('GLSL_SAMPLER_DIM_CUBE', 3)
GLSL_SAMPLER_DIM_RECT = enum_glsl_sampler_dim.define('GLSL_SAMPLER_DIM_RECT', 4)
GLSL_SAMPLER_DIM_BUF = enum_glsl_sampler_dim.define('GLSL_SAMPLER_DIM_BUF', 5)
GLSL_SAMPLER_DIM_EXTERNAL = enum_glsl_sampler_dim.define('GLSL_SAMPLER_DIM_EXTERNAL', 6)
GLSL_SAMPLER_DIM_MS = enum_glsl_sampler_dim.define('GLSL_SAMPLER_DIM_MS', 7)
GLSL_SAMPLER_DIM_SUBPASS = enum_glsl_sampler_dim.define('GLSL_SAMPLER_DIM_SUBPASS', 8)
GLSL_SAMPLER_DIM_SUBPASS_MS = enum_glsl_sampler_dim.define('GLSL_SAMPLER_DIM_SUBPASS_MS', 9)
nir_tex_instr: TypeAlias = struct_nir_tex_instr
@dll.bind
def nir_tex_instr_need_sampler(instr:c.POINTER[nir_tex_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_tex_instr_result_size(instr:c.POINTER[nir_tex_instr]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def nir_tex_instr_is_query(instr:c.POINTER[nir_tex_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_tex_instr_has_implicit_derivative(instr:c.POINTER[nir_tex_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_tex_instr_src_type(instr:c.POINTER[nir_tex_instr], src:Annotated[int, ctypes.c_uint32]) -> nir_alu_type: ...
@dll.bind
def nir_tex_instr_src_size(instr:c.POINTER[nir_tex_instr], src:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def nir_tex_instr_add_src(tex:c.POINTER[nir_tex_instr], src_type:nir_tex_src_type, src:c.POINTER[nir_def]) -> None: ...
@dll.bind
def nir_tex_instr_remove_src(tex:c.POINTER[nir_tex_instr], src_idx:Annotated[int, ctypes.c_uint32]) -> None: ...
@dll.bind
def nir_tex_instr_has_explicit_tg4_offsets(tex:c.POINTER[nir_tex_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_load_const_instr(c.Struct):
SIZE = 64
instr: Annotated[nir_instr, 0]
_def: Annotated[nir_def, 32]
value: Annotated[c.Array[nir_const_value, Literal[0]], 64]
nir_load_const_instr: TypeAlias = struct_nir_load_const_instr
class nir_jump_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_jump_return = nir_jump_type.define('nir_jump_return', 0)
nir_jump_halt = nir_jump_type.define('nir_jump_halt', 1)
nir_jump_break = nir_jump_type.define('nir_jump_break', 2)
nir_jump_continue = nir_jump_type.define('nir_jump_continue', 3)
nir_jump_goto = nir_jump_type.define('nir_jump_goto', 4)
nir_jump_goto_if = nir_jump_type.define('nir_jump_goto_if', 5)
@c.record
class struct_nir_jump_instr(c.Struct):
SIZE = 88
instr: Annotated[nir_instr, 0]
type: Annotated[nir_jump_type, 32]
condition: Annotated[nir_src, 40]
target: Annotated[c.POINTER[nir_block], 72]
else_target: Annotated[c.POINTER[nir_block], 80]
nir_jump_instr: TypeAlias = struct_nir_jump_instr
@c.record
class struct_nir_undef_instr(c.Struct):
SIZE = 64
instr: Annotated[nir_instr, 0]
_def: Annotated[nir_def, 32]
nir_undef_instr: TypeAlias = struct_nir_undef_instr
@c.record
class struct_nir_phi_src(c.Struct):
SIZE = 56
node: Annotated[struct_exec_node, 0]
pred: Annotated[c.POINTER[nir_block], 16]
src: Annotated[nir_src, 24]
nir_phi_src: TypeAlias = struct_nir_phi_src
@c.record
class struct_nir_phi_instr(c.Struct):
SIZE = 96
instr: Annotated[nir_instr, 0]
srcs: Annotated[struct_exec_list, 32]
_def: Annotated[nir_def, 64]
nir_phi_instr: TypeAlias = struct_nir_phi_instr
@c.record
class struct_nir_parallel_copy_entry(c.Struct):
SIZE = 88
node: Annotated[struct_exec_node, 0]
src_is_reg: Annotated[Annotated[bool, ctypes.c_bool], 16]
dest_is_reg: Annotated[Annotated[bool, ctypes.c_bool], 17]
src: Annotated[nir_src, 24]
dest: Annotated[struct_nir_parallel_copy_entry_dest, 56]
@c.record
class struct_nir_parallel_copy_entry_dest(c.Struct):
SIZE = 32
_def: Annotated[nir_def, 0]
reg: Annotated[nir_src, 0]
nir_parallel_copy_entry: TypeAlias = struct_nir_parallel_copy_entry
@c.record
class struct_nir_parallel_copy_instr(c.Struct):
SIZE = 64
instr: Annotated[nir_instr, 0]
entries: Annotated[struct_exec_list, 32]
nir_parallel_copy_instr: TypeAlias = struct_nir_parallel_copy_instr
@c.record
class struct_nir_instr_debug_info(c.Struct):
SIZE = 64
filename: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
line: Annotated[uint32_t, 8]
column: Annotated[uint32_t, 12]
spirv_offset: Annotated[uint32_t, 16]
nir_line: Annotated[uint32_t, 20]
variable_name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
instr: Annotated[nir_instr, 32]
nir_instr_debug_info: TypeAlias = struct_nir_instr_debug_info
@c.record
class struct_nir_scalar(c.Struct):
SIZE = 16
_def: Annotated[c.POINTER[nir_def], 0]
comp: Annotated[Annotated[int, ctypes.c_uint32], 8]
nir_scalar: TypeAlias = struct_nir_scalar
@dll.bind
def nir_scalar_chase_movs(s:nir_scalar) -> nir_scalar: ...
@c.record
class struct_nir_binding(c.Struct):
SIZE = 168
success: Annotated[Annotated[bool, ctypes.c_bool], 0]
var: Annotated[c.POINTER[nir_variable], 8]
desc_set: Annotated[Annotated[int, ctypes.c_uint32], 16]
binding: Annotated[Annotated[int, ctypes.c_uint32], 20]
num_indices: Annotated[Annotated[int, ctypes.c_uint32], 24]
indices: Annotated[c.Array[nir_src, Literal[4]], 32]
read_first_invocation: Annotated[Annotated[bool, ctypes.c_bool], 160]
nir_binding: TypeAlias = struct_nir_binding
@dll.bind
def nir_chase_binding(rsrc:nir_src) -> nir_binding: ...
@dll.bind
def nir_get_binding_variable(shader:c.POINTER[nir_shader], binding:nir_binding) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_block_contains_work(block:c.POINTER[nir_block]) -> Annotated[bool, ctypes.c_bool]: ...
class nir_selection_control(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_selection_control_none = nir_selection_control.define('nir_selection_control_none', 0)
nir_selection_control_flatten = nir_selection_control.define('nir_selection_control_flatten', 1)
nir_selection_control_dont_flatten = nir_selection_control.define('nir_selection_control_dont_flatten', 2)
nir_selection_control_divergent_always_taken = nir_selection_control.define('nir_selection_control_divergent_always_taken', 3)
@c.record
class struct_nir_if(c.Struct):
SIZE = 136
cf_node: Annotated[nir_cf_node, 0]
condition: Annotated[nir_src, 32]
control: Annotated[nir_selection_control, 64]
then_list: Annotated[struct_exec_list, 72]
else_list: Annotated[struct_exec_list, 104]
nir_if: TypeAlias = struct_nir_if
@c.record
class struct_nir_loop_terminator(c.Struct):
SIZE = 56
nif: Annotated[c.POINTER[nir_if], 0]
conditional_instr: Annotated[c.POINTER[nir_instr], 8]
break_block: Annotated[c.POINTER[nir_block], 16]
continue_from_block: Annotated[c.POINTER[nir_block], 24]
continue_from_then: Annotated[Annotated[bool, ctypes.c_bool], 32]
induction_rhs: Annotated[Annotated[bool, ctypes.c_bool], 33]
exact_trip_count_unknown: Annotated[Annotated[bool, ctypes.c_bool], 34]
loop_terminator_link: Annotated[struct_list_head, 40]
nir_loop_terminator: TypeAlias = struct_nir_loop_terminator
@c.record
class struct_nir_loop_induction_variable(c.Struct):
SIZE = 32
basis: Annotated[c.POINTER[nir_def], 0]
_def: Annotated[c.POINTER[nir_def], 8]
init_src: Annotated[c.POINTER[nir_src], 16]
update_src: Annotated[c.POINTER[nir_alu_src], 24]
nir_loop_induction_variable: TypeAlias = struct_nir_loop_induction_variable
@c.record
class struct_nir_loop_info(c.Struct):
SIZE = 56
instr_cost: Annotated[Annotated[int, ctypes.c_uint32], 0]
has_soft_fp64: Annotated[Annotated[bool, ctypes.c_bool], 4]
guessed_trip_count: Annotated[Annotated[int, ctypes.c_uint32], 8]
max_trip_count: Annotated[Annotated[int, ctypes.c_uint32], 12]
exact_trip_count_known: Annotated[Annotated[bool, ctypes.c_bool], 16]
force_unroll: Annotated[Annotated[bool, ctypes.c_bool], 17]
complex_loop: Annotated[Annotated[bool, ctypes.c_bool], 18]
limiting_terminator: Annotated[c.POINTER[nir_loop_terminator], 24]
loop_terminator_list: Annotated[struct_list_head, 32]
induction_vars: Annotated[c.POINTER[struct_hash_table], 48]
@c.record
class struct_hash_table(c.Struct):
SIZE = 72
table: Annotated[c.POINTER[struct_hash_entry], 0]
key_hash_function: Annotated[c.CFUNCTYPE[uint32_t, [ctypes.c_void_p]], 8]
key_equals_function: Annotated[c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [ctypes.c_void_p, ctypes.c_void_p]], 16]
deleted_key: Annotated[ctypes.c_void_p, 24]
size: Annotated[uint32_t, 32]
rehash: Annotated[uint32_t, 36]
size_magic: Annotated[uint64_t, 40]
rehash_magic: Annotated[uint64_t, 48]
max_entries: Annotated[uint32_t, 56]
size_index: Annotated[uint32_t, 60]
entries: Annotated[uint32_t, 64]
deleted_entries: Annotated[uint32_t, 68]
@c.record
class struct_hash_entry(c.Struct):
SIZE = 24
hash: Annotated[uint32_t, 0]
key: Annotated[ctypes.c_void_p, 8]
data: Annotated[ctypes.c_void_p, 16]
nir_loop_info: TypeAlias = struct_nir_loop_info
class nir_loop_control(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_loop_control_none = nir_loop_control.define('nir_loop_control_none', 0)
nir_loop_control_unroll = nir_loop_control.define('nir_loop_control_unroll', 1)
nir_loop_control_dont_unroll = nir_loop_control.define('nir_loop_control_dont_unroll', 2)
@c.record
class struct_nir_loop(c.Struct):
SIZE = 112
cf_node: Annotated[nir_cf_node, 0]
body: Annotated[struct_exec_list, 32]
continue_list: Annotated[struct_exec_list, 64]
info: Annotated[c.POINTER[nir_loop_info], 96]
control: Annotated[nir_loop_control, 104]
partially_unrolled: Annotated[Annotated[bool, ctypes.c_bool], 108]
divergent_continue: Annotated[Annotated[bool, ctypes.c_bool], 109]
divergent_break: Annotated[Annotated[bool, ctypes.c_bool], 110]
nir_loop: TypeAlias = struct_nir_loop
nir_intrin_filter_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_intrinsic_instr], ctypes.c_void_p]]
nir_vectorize_cb: TypeAlias = c.CFUNCTYPE[Annotated[int, ctypes.c_ubyte], [c.POINTER[struct_nir_instr], ctypes.c_void_p]]
@dll.bind
def nir_remove_non_entrypoints(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_remove_non_exported(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_remove_entrypoints(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_fixup_is_exported(shader:c.POINTER[nir_shader]) -> None: ...
shader_info: TypeAlias = struct_shader_info
@dll.bind
def nir_shader_create(mem_ctx:ctypes.c_void_p, stage:gl_shader_stage, options:c.POINTER[nir_shader_compiler_options], si:c.POINTER[shader_info]) -> c.POINTER[nir_shader]: ...
@dll.bind
def nir_shader_add_variable(shader:c.POINTER[nir_shader], var:c.POINTER[nir_variable]) -> None: ...
@dll.bind
def nir_variable_create(shader:c.POINTER[nir_shader], mode:nir_variable_mode, type:c.POINTER[struct_glsl_type], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_local_variable_create(impl:c.POINTER[nir_function_impl], type:c.POINTER[struct_glsl_type], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_state_variable_create(shader:c.POINTER[nir_shader], type:c.POINTER[struct_glsl_type], name:c.POINTER[Annotated[bytes, ctypes.c_char]], tokens:c.Array[gl_state_index16, Literal[4]]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_get_variable_with_location(shader:c.POINTER[nir_shader], mode:nir_variable_mode, location:Annotated[int, ctypes.c_int32], type:c.POINTER[struct_glsl_type]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_create_variable_with_location(shader:c.POINTER[nir_shader], mode:nir_variable_mode, location:Annotated[int, ctypes.c_int32], type:c.POINTER[struct_glsl_type]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_find_variable_with_location(shader:c.POINTER[nir_shader], mode:nir_variable_mode, location:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_find_variable_with_driver_location(shader:c.POINTER[nir_shader], mode:nir_variable_mode, location:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_find_state_variable(s:c.POINTER[nir_shader], tokens:c.Array[gl_state_index16, Literal[4]]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_find_sampler_variable_with_tex_index(shader:c.POINTER[nir_shader], texture_index:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_sort_variables_with_modes(shader:c.POINTER[nir_shader], compar:c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[nir_variable], c.POINTER[nir_variable]]], modes:nir_variable_mode) -> None: ...
@dll.bind
def nir_function_create(shader:c.POINTER[nir_shader], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[nir_function]: ...
@dll.bind
def nir_function_impl_create(func:c.POINTER[nir_function]) -> c.POINTER[nir_function_impl]: ...
@dll.bind
def nir_function_impl_create_bare(shader:c.POINTER[nir_shader]) -> c.POINTER[nir_function_impl]: ...
@dll.bind
def nir_block_create(shader:c.POINTER[nir_shader]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_if_create(shader:c.POINTER[nir_shader]) -> c.POINTER[nir_if]: ...
@dll.bind
def nir_loop_create(shader:c.POINTER[nir_shader]) -> c.POINTER[nir_loop]: ...
@dll.bind
def nir_cf_node_get_function(node:c.POINTER[nir_cf_node]) -> c.POINTER[nir_function_impl]: ...
@dll.bind
def nir_metadata_require(impl:c.POINTER[nir_function_impl], required:nir_metadata) -> None: ...
@dll.bind
def nir_shader_preserve_all_metadata(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_metadata_invalidate(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_progress(progress:Annotated[bool, ctypes.c_bool], impl:c.POINTER[nir_function_impl], preserved:nir_metadata) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_alu_instr_create(shader:c.POINTER[nir_shader], op:nir_op) -> c.POINTER[nir_alu_instr]: ...
@dll.bind
def nir_deref_instr_create(shader:c.POINTER[nir_shader], deref_type:nir_deref_type) -> c.POINTER[nir_deref_instr]: ...
@dll.bind
def nir_jump_instr_create(shader:c.POINTER[nir_shader], type:nir_jump_type) -> c.POINTER[nir_jump_instr]: ...
@dll.bind
def nir_load_const_instr_create(shader:c.POINTER[nir_shader], num_components:Annotated[int, ctypes.c_uint32], bit_size:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_load_const_instr]: ...
@dll.bind
def nir_intrinsic_instr_create(shader:c.POINTER[nir_shader], op:nir_intrinsic_op) -> c.POINTER[nir_intrinsic_instr]: ...
@dll.bind
def nir_call_instr_create(shader:c.POINTER[nir_shader], callee:c.POINTER[nir_function]) -> c.POINTER[nir_call_instr]: ...
@dll.bind
def nir_tex_instr_create(shader:c.POINTER[nir_shader], num_srcs:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_tex_instr]: ...
@dll.bind
def nir_phi_instr_create(shader:c.POINTER[nir_shader]) -> c.POINTER[nir_phi_instr]: ...
@dll.bind
def nir_phi_instr_add_src(instr:c.POINTER[nir_phi_instr], pred:c.POINTER[nir_block], src:c.POINTER[nir_def]) -> c.POINTER[nir_phi_src]: ...
@dll.bind
def nir_parallel_copy_instr_create(shader:c.POINTER[nir_shader]) -> c.POINTER[nir_parallel_copy_instr]: ...
@dll.bind
def nir_undef_instr_create(shader:c.POINTER[nir_shader], num_components:Annotated[int, ctypes.c_uint32], bit_size:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_undef_instr]: ...
@dll.bind
def nir_alu_binop_identity(binop:nir_op, bit_size:Annotated[int, ctypes.c_uint32]) -> nir_const_value: ...
class nir_cursor_option(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_cursor_before_block = nir_cursor_option.define('nir_cursor_before_block', 0)
nir_cursor_after_block = nir_cursor_option.define('nir_cursor_after_block', 1)
nir_cursor_before_instr = nir_cursor_option.define('nir_cursor_before_instr', 2)
nir_cursor_after_instr = nir_cursor_option.define('nir_cursor_after_instr', 3)
@c.record
class struct_nir_cursor(c.Struct):
SIZE = 16
option: Annotated[nir_cursor_option, 0]
block: Annotated[c.POINTER[nir_block], 8]
instr: Annotated[c.POINTER[nir_instr], 8]
nir_cursor: TypeAlias = struct_nir_cursor
@dll.bind
def nir_cursors_equal(a:nir_cursor, b:nir_cursor) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_instr_insert(cursor:nir_cursor, instr:c.POINTER[nir_instr]) -> None: ...
@dll.bind
def nir_instr_move(cursor:nir_cursor, instr:c.POINTER[nir_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_instr_remove_v(instr:c.POINTER[nir_instr]) -> None: ...
@dll.bind
def nir_instr_free(instr:c.POINTER[nir_instr]) -> None: ...
@dll.bind
def nir_instr_free_list(list:c.POINTER[struct_exec_list]) -> None: ...
@dll.bind
def nir_instr_free_and_dce(instr:c.POINTER[nir_instr]) -> nir_cursor: ...
@dll.bind
def nir_instr_def(instr:c.POINTER[nir_instr]) -> c.POINTER[nir_def]: ...
nir_foreach_def_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_def], ctypes.c_void_p]]
nir_foreach_src_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_src], ctypes.c_void_p]]
@dll.bind
def nir_foreach_phi_src_leaving_block(instr:c.POINTER[nir_block], cb:nir_foreach_src_cb, state:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_src_as_const_value(src:nir_src) -> c.POINTER[nir_const_value]: ...
@dll.bind
def nir_src_as_string(src:nir_src) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def nir_src_is_always_uniform(src:nir_src) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_srcs_equal(src1:nir_src, src2:nir_src) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_instrs_equal(instr1:c.POINTER[nir_instr], instr2:c.POINTER[nir_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_src_get_block(src:c.POINTER[nir_src]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_instr_init_src(instr:c.POINTER[nir_instr], src:c.POINTER[nir_src], _def:c.POINTER[nir_def]) -> None: ...
@dll.bind
def nir_instr_clear_src(instr:c.POINTER[nir_instr], src:c.POINTER[nir_src]) -> None: ...
@dll.bind
def nir_instr_move_src(dest_instr:c.POINTER[nir_instr], dest:c.POINTER[nir_src], src:c.POINTER[nir_src]) -> None: ...
@dll.bind
def nir_instr_is_before(first:c.POINTER[nir_instr], second:c.POINTER[nir_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_def_init(instr:c.POINTER[nir_instr], _def:c.POINTER[nir_def], num_components:Annotated[int, ctypes.c_uint32], bit_size:Annotated[int, ctypes.c_uint32]) -> None: ...
@dll.bind
def nir_def_rewrite_uses(_def:c.POINTER[nir_def], new_ssa:c.POINTER[nir_def]) -> None: ...
@dll.bind
def nir_def_rewrite_uses_src(_def:c.POINTER[nir_def], new_src:nir_src) -> None: ...
@dll.bind
def nir_def_rewrite_uses_after(_def:c.POINTER[nir_def], new_ssa:c.POINTER[nir_def], after_me:c.POINTER[nir_instr]) -> None: ...
@dll.bind
def nir_src_components_read(src:c.POINTER[nir_src]) -> nir_component_mask_t: ...
@dll.bind
def nir_def_components_read(_def:c.POINTER[nir_def]) -> nir_component_mask_t: ...
@dll.bind
def nir_def_all_uses_are_fsat(_def:c.POINTER[nir_def]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_def_all_uses_ignore_sign_bit(_def:c.POINTER[nir_def]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_sort_unstructured_blocks(impl:c.POINTER[nir_function_impl]) -> None: ...
@dll.bind
def nir_block_unstructured_next(block:c.POINTER[nir_block]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_unstructured_start_block(impl:c.POINTER[nir_function_impl]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_block_cf_tree_next(block:c.POINTER[nir_block]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_block_cf_tree_prev(block:c.POINTER[nir_block]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_cf_node_cf_tree_first(node:c.POINTER[nir_cf_node]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_cf_node_cf_tree_last(node:c.POINTER[nir_cf_node]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_cf_node_cf_tree_next(node:c.POINTER[nir_cf_node]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_cf_node_cf_tree_prev(node:c.POINTER[nir_cf_node]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_block_get_following_if(block:c.POINTER[nir_block]) -> c.POINTER[nir_if]: ...
@dll.bind
def nir_block_get_following_loop(block:c.POINTER[nir_block]) -> c.POINTER[nir_loop]: ...
@dll.bind
def nir_block_get_predecessors_sorted(block:c.POINTER[nir_block], mem_ctx:ctypes.c_void_p) -> c.POINTER[c.POINTER[nir_block]]: ...
@dll.bind
def nir_index_ssa_defs(impl:c.POINTER[nir_function_impl]) -> None: ...
@dll.bind
def nir_index_instrs(impl:c.POINTER[nir_function_impl]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def nir_index_blocks(impl:c.POINTER[nir_function_impl]) -> None: ...
@dll.bind
def nir_shader_clear_pass_flags(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_shader_index_vars(shader:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def nir_function_impl_index_vars(impl:c.POINTER[nir_function_impl]) -> Annotated[int, ctypes.c_uint32]: ...
@c.record
class struct__IO_FILE(c.Struct):
SIZE = 216
_flags: Annotated[Annotated[int, ctypes.c_int32], 0]
_IO_read_ptr: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
_IO_read_end: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
_IO_read_base: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
_IO_write_base: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 32]
_IO_write_ptr: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 40]
_IO_write_end: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 48]
_IO_buf_base: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 56]
_IO_buf_end: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 64]
_IO_save_base: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 72]
_IO_backup_base: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 80]
_IO_save_end: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 88]
_markers: Annotated[c.POINTER[struct__IO_marker], 96]
_chain: Annotated[c.POINTER[struct__IO_FILE], 104]
_fileno: Annotated[Annotated[int, ctypes.c_int32], 112]
_flags2: Annotated[Annotated[int, ctypes.c_int32], 116]
_old_offset: Annotated[Annotated[int, ctypes.c_int64], 120]
_cur_column: Annotated[Annotated[int, ctypes.c_uint16], 128]
_vtable_offset: Annotated[Annotated[int, ctypes.c_byte], 130]
_shortbuf: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[1]], 131]
_lock: Annotated[c.POINTER[_IO_lock_t], 136]
_offset: Annotated[Annotated[int, ctypes.c_int64], 144]
_codecvt: Annotated[c.POINTER[struct__IO_codecvt], 152]
_wide_data: Annotated[c.POINTER[struct__IO_wide_data], 160]
_freeres_list: Annotated[c.POINTER[struct__IO_FILE], 168]
_freeres_buf: Annotated[ctypes.c_void_p, 176]
__pad5: Annotated[size_t, 184]
_mode: Annotated[Annotated[int, ctypes.c_int32], 192]
_unused2: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[20]], 196]
FILE: TypeAlias = struct__IO_FILE
class struct__IO_marker(ctypes.Structure): pass
__off_t: TypeAlias = Annotated[int, ctypes.c_int64]
_IO_lock_t: TypeAlias = None
__off64_t: TypeAlias = Annotated[int, ctypes.c_int64]
class struct__IO_codecvt(ctypes.Structure): pass
class struct__IO_wide_data(ctypes.Structure): pass
size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def nir_print_shader(shader:c.POINTER[nir_shader], fp:c.POINTER[FILE]) -> None: ...
@dll.bind
def nir_print_function_body(impl:c.POINTER[nir_function_impl], fp:c.POINTER[FILE]) -> None: ...
@dll.bind
def nir_print_shader_annotated(shader:c.POINTER[nir_shader], fp:c.POINTER[FILE], errors:c.POINTER[struct_hash_table]) -> None: ...
@dll.bind
def nir_print_instr(instr:c.POINTER[nir_instr], fp:c.POINTER[FILE]) -> None: ...
@dll.bind
def nir_print_deref(deref:c.POINTER[nir_deref_instr], fp:c.POINTER[FILE]) -> None: ...
class enum_mesa_log_level(Annotated[int, ctypes.c_uint32], c.Enum): pass
MESA_LOG_ERROR = enum_mesa_log_level.define('MESA_LOG_ERROR', 0)
MESA_LOG_WARN = enum_mesa_log_level.define('MESA_LOG_WARN', 1)
MESA_LOG_INFO = enum_mesa_log_level.define('MESA_LOG_INFO', 2)
MESA_LOG_DEBUG = enum_mesa_log_level.define('MESA_LOG_DEBUG', 3)
@dll.bind
def nir_log_shader_annotated_tagged(level:enum_mesa_log_level, tag:c.POINTER[Annotated[bytes, ctypes.c_char]], shader:c.POINTER[nir_shader], annotations:c.POINTER[struct_hash_table]) -> None: ...
@dll.bind
def nir_shader_as_str(nir:c.POINTER[nir_shader], mem_ctx:ctypes.c_void_p) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def nir_shader_as_str_annotated(nir:c.POINTER[nir_shader], annotations:c.POINTER[struct_hash_table], mem_ctx:ctypes.c_void_p) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def nir_instr_as_str(instr:c.POINTER[nir_instr], mem_ctx:ctypes.c_void_p) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def nir_shader_gather_debug_info(shader:c.POINTER[nir_shader], filename:c.POINTER[Annotated[bytes, ctypes.c_char]], first_line:uint32_t) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def nir_instr_clone(s:c.POINTER[nir_shader], orig:c.POINTER[nir_instr]) -> c.POINTER[nir_instr]: ...
@dll.bind
def nir_instr_clone_deep(s:c.POINTER[nir_shader], orig:c.POINTER[nir_instr], remap_table:c.POINTER[struct_hash_table]) -> c.POINTER[nir_instr]: ...
@dll.bind
def nir_alu_instr_clone(s:c.POINTER[nir_shader], orig:c.POINTER[nir_alu_instr]) -> c.POINTER[nir_alu_instr]: ...
@dll.bind
def nir_shader_clone(mem_ctx:ctypes.c_void_p, s:c.POINTER[nir_shader]) -> c.POINTER[nir_shader]: ...
@dll.bind
def nir_function_clone(ns:c.POINTER[nir_shader], fxn:c.POINTER[nir_function]) -> c.POINTER[nir_function]: ...
@dll.bind
def nir_function_impl_clone(shader:c.POINTER[nir_shader], fi:c.POINTER[nir_function_impl]) -> c.POINTER[nir_function_impl]: ...
@dll.bind
def nir_function_impl_clone_remap_globals(shader:c.POINTER[nir_shader], fi:c.POINTER[nir_function_impl], remap_table:c.POINTER[struct_hash_table]) -> c.POINTER[nir_function_impl]: ...
@dll.bind
def nir_constant_clone(c:c.POINTER[nir_constant], var:c.POINTER[nir_variable]) -> c.POINTER[nir_constant]: ...
@dll.bind
def nir_variable_clone(c:c.POINTER[nir_variable], shader:c.POINTER[nir_shader]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_shader_replace(dest:c.POINTER[nir_shader], src:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_shader_serialize_deserialize(s:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_validate_shader(shader:c.POINTER[nir_shader], when:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> None: ...
@dll.bind
def nir_validate_ssa_dominance(shader:c.POINTER[nir_shader], when:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> None: ...
@dll.bind
def nir_metadata_set_validation_flag(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_metadata_check_validation_flag(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_metadata_require_all(shader:c.POINTER[nir_shader]) -> None: ...
nir_instr_writemask_filter_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_instr], Annotated[int, ctypes.c_uint32], ctypes.c_void_p]]
@c.record
class struct_nir_builder(c.Struct):
SIZE = 40
cursor: Annotated[nir_cursor, 0]
exact: Annotated[Annotated[bool, ctypes.c_bool], 16]
fp_fast_math: Annotated[uint32_t, 20]
shader: Annotated[c.POINTER[nir_shader], 24]
impl: Annotated[c.POINTER[nir_function_impl], 32]
nir_lower_instr_cb: TypeAlias = c.CFUNCTYPE[c.POINTER[struct_nir_def], [c.POINTER[struct_nir_builder], c.POINTER[struct_nir_instr], ctypes.c_void_p]]
@dll.bind
def nir_function_impl_lower_instructions(impl:c.POINTER[nir_function_impl], filter:nir_instr_filter_cb, lower:nir_lower_instr_cb, cb_data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_shader_lower_instructions(shader:c.POINTER[nir_shader], filter:nir_instr_filter_cb, lower:nir_lower_instr_cb, cb_data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_calc_dominance_impl(impl:c.POINTER[nir_function_impl]) -> None: ...
@dll.bind
def nir_calc_dominance(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_dominance_lca(b1:c.POINTER[nir_block], b2:c.POINTER[nir_block]) -> c.POINTER[nir_block]: ...
@dll.bind
def nir_block_dominates(parent:c.POINTER[nir_block], child:c.POINTER[nir_block]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_block_is_unreachable(block:c.POINTER[nir_block]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_dump_dom_tree_impl(impl:c.POINTER[nir_function_impl], fp:c.POINTER[FILE]) -> None: ...
@dll.bind
def nir_dump_dom_tree(shader:c.POINTER[nir_shader], fp:c.POINTER[FILE]) -> None: ...
@dll.bind
def nir_dump_dom_frontier_impl(impl:c.POINTER[nir_function_impl], fp:c.POINTER[FILE]) -> None: ...
@dll.bind
def nir_dump_dom_frontier(shader:c.POINTER[nir_shader], fp:c.POINTER[FILE]) -> None: ...
@dll.bind
def nir_dump_cfg_impl(impl:c.POINTER[nir_function_impl], fp:c.POINTER[FILE]) -> None: ...
@dll.bind
def nir_dump_cfg(shader:c.POINTER[nir_shader], fp:c.POINTER[FILE]) -> None: ...
@dll.bind
def nir_gs_count_vertices_and_primitives(shader:c.POINTER[nir_shader], out_vtxcnt:c.POINTER[Annotated[int, ctypes.c_int32]], out_prmcnt:c.POINTER[Annotated[int, ctypes.c_int32]], out_decomposed_prmcnt:c.POINTER[Annotated[int, ctypes.c_int32]], num_streams:Annotated[int, ctypes.c_uint32]) -> None: ...
class nir_load_grouping(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_group_all = nir_load_grouping.define('nir_group_all', 0)
nir_group_same_resource_only = nir_load_grouping.define('nir_group_same_resource_only', 1)
@dll.bind
def nir_group_loads(shader:c.POINTER[nir_shader], grouping:nir_load_grouping, max_distance:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_shrink_vec_array_vars(shader:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_split_array_vars(shader:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_split_var_copies(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_split_per_member_structs(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_split_struct_vars(shader:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_returns_impl(impl:c.POINTER[nir_function_impl]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_returns(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
nir_builder: TypeAlias = struct_nir_builder
@dll.bind
def nir_inline_function_impl(b:c.POINTER[nir_builder], impl:c.POINTER[nir_function_impl], params:c.POINTER[c.POINTER[nir_def]], shader_var_remap:c.POINTER[struct_hash_table]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_inline_functions(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_cleanup_functions(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_link_shader_functions(shader:c.POINTER[nir_shader], link_shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_calls_to_builtins(s:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_find_inlinable_uniforms(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_inline_uniforms(shader:c.POINTER[nir_shader], num_uniforms:Annotated[int, ctypes.c_uint32], uniform_values:c.POINTER[uint32_t], uniform_dw_offsets:c.POINTER[uint16_t]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_collect_src_uniforms(src:c.POINTER[nir_src], component:Annotated[int, ctypes.c_int32], uni_offsets:c.POINTER[uint32_t], num_offsets:c.POINTER[uint8_t], max_num_bo:Annotated[int, ctypes.c_uint32], max_offset:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_add_inlinable_uniforms(cond:c.POINTER[nir_src], info:c.POINTER[nir_loop_info], uni_offsets:c.POINTER[uint32_t], num_offsets:c.POINTER[uint8_t], max_num_bo:Annotated[int, ctypes.c_uint32], max_offset:Annotated[int, ctypes.c_uint32]) -> None: ...
@dll.bind
def nir_propagate_invariant(shader:c.POINTER[nir_shader], invariant_prim:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_var_copy_instr(copy:c.POINTER[nir_intrinsic_instr], shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_lower_deref_copy_instr(b:c.POINTER[nir_builder], copy:c.POINTER[nir_intrinsic_instr]) -> None: ...
@dll.bind
def nir_lower_var_copies(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_memcpy(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_memcpy(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_fixup_deref_modes(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_fixup_deref_types(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_global_vars_to_local(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_constant_to_temp(shader:c.POINTER[nir_shader]) -> None: ...
class nir_lower_array_deref_of_vec_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_lower_direct_array_deref_of_vec_load = nir_lower_array_deref_of_vec_options.define('nir_lower_direct_array_deref_of_vec_load', 1)
nir_lower_indirect_array_deref_of_vec_load = nir_lower_array_deref_of_vec_options.define('nir_lower_indirect_array_deref_of_vec_load', 2)
nir_lower_direct_array_deref_of_vec_store = nir_lower_array_deref_of_vec_options.define('nir_lower_direct_array_deref_of_vec_store', 4)
nir_lower_indirect_array_deref_of_vec_store = nir_lower_array_deref_of_vec_options.define('nir_lower_indirect_array_deref_of_vec_store', 8)
@dll.bind
def nir_lower_array_deref_of_vec(shader:c.POINTER[nir_shader], modes:nir_variable_mode, filter:c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[nir_variable]]], options:nir_lower_array_deref_of_vec_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_indirect_derefs(shader:c.POINTER[nir_shader], modes:nir_variable_mode, max_lower_array_len:uint32_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_indirect_var_derefs(shader:c.POINTER[nir_shader], vars:c.POINTER[struct_set]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_locals_to_regs(shader:c.POINTER[nir_shader], bool_bitsize:uint8_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_io_vars_to_temporaries(shader:c.POINTER[nir_shader], entrypoint:c.POINTER[nir_function_impl], outputs:Annotated[bool, ctypes.c_bool], inputs:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
glsl_type_size_align_func: TypeAlias = c.CFUNCTYPE[None, [c.POINTER[struct_glsl_type], c.POINTER[Annotated[int, ctypes.c_uint32]], c.POINTER[Annotated[int, ctypes.c_uint32]]]]
@dll.bind
def nir_lower_vars_to_scratch(shader:c.POINTER[nir_shader], modes:nir_variable_mode, size_threshold:Annotated[int, ctypes.c_int32], variable_size_align:glsl_type_size_align_func, scratch_layout_size_align:glsl_type_size_align_func) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_scratch_to_var(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_clip_halfz(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_shader_gather_info(shader:c.POINTER[nir_shader], entrypoint:c.POINTER[nir_function_impl]) -> None: ...
@dll.bind
def nir_gather_types(impl:c.POINTER[nir_function_impl], float_types:c.POINTER[Annotated[int, ctypes.c_uint32]], int_types:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def nir_remove_unused_varyings(producer:c.POINTER[nir_shader], consumer:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_remove_unused_io_vars(shader:c.POINTER[nir_shader], mode:nir_variable_mode, used_by_other_stage:c.POINTER[uint64_t], used_by_other_stage_patches:c.POINTER[uint64_t]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_compact_varyings(producer:c.POINTER[nir_shader], consumer:c.POINTER[nir_shader], default_to_smooth_interp:Annotated[bool, ctypes.c_bool]) -> None: ...
@dll.bind
def nir_link_xfb_varyings(producer:c.POINTER[nir_shader], consumer:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_link_opt_varyings(producer:c.POINTER[nir_shader], consumer:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_link_varying_precision(producer:c.POINTER[nir_shader], consumer:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_clone_uniform_variable(nir:c.POINTER[nir_shader], uniform:c.POINTER[nir_variable], spirv:Annotated[bool, ctypes.c_bool]) -> c.POINTER[nir_variable]: ...
@dll.bind
def nir_clone_deref_instr(b:c.POINTER[nir_builder], var:c.POINTER[nir_variable], deref:c.POINTER[nir_deref_instr]) -> c.POINTER[nir_deref_instr]: ...
class nir_opt_varyings_progress(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_progress_producer = nir_opt_varyings_progress.define('nir_progress_producer', 1)
nir_progress_consumer = nir_opt_varyings_progress.define('nir_progress_consumer', 2)
@dll.bind
def nir_opt_varyings(producer:c.POINTER[nir_shader], consumer:c.POINTER[nir_shader], spirv:Annotated[bool, ctypes.c_bool], max_uniform_components:Annotated[int, ctypes.c_uint32], max_ubos_per_stage:Annotated[int, ctypes.c_uint32], debug_no_algebraic:Annotated[bool, ctypes.c_bool]) -> nir_opt_varyings_progress: ...
class gl_varying_slot(Annotated[int, ctypes.c_uint32], c.Enum): pass
VARYING_SLOT_POS = gl_varying_slot.define('VARYING_SLOT_POS', 0)
VARYING_SLOT_COL0 = gl_varying_slot.define('VARYING_SLOT_COL0', 1)
VARYING_SLOT_COL1 = gl_varying_slot.define('VARYING_SLOT_COL1', 2)
VARYING_SLOT_FOGC = gl_varying_slot.define('VARYING_SLOT_FOGC', 3)
VARYING_SLOT_TEX0 = gl_varying_slot.define('VARYING_SLOT_TEX0', 4)
VARYING_SLOT_TEX1 = gl_varying_slot.define('VARYING_SLOT_TEX1', 5)
VARYING_SLOT_TEX2 = gl_varying_slot.define('VARYING_SLOT_TEX2', 6)
VARYING_SLOT_TEX3 = gl_varying_slot.define('VARYING_SLOT_TEX3', 7)
VARYING_SLOT_TEX4 = gl_varying_slot.define('VARYING_SLOT_TEX4', 8)
VARYING_SLOT_TEX5 = gl_varying_slot.define('VARYING_SLOT_TEX5', 9)
VARYING_SLOT_TEX6 = gl_varying_slot.define('VARYING_SLOT_TEX6', 10)
VARYING_SLOT_TEX7 = gl_varying_slot.define('VARYING_SLOT_TEX7', 11)
VARYING_SLOT_PSIZ = gl_varying_slot.define('VARYING_SLOT_PSIZ', 12)
VARYING_SLOT_BFC0 = gl_varying_slot.define('VARYING_SLOT_BFC0', 13)
VARYING_SLOT_BFC1 = gl_varying_slot.define('VARYING_SLOT_BFC1', 14)
VARYING_SLOT_EDGE = gl_varying_slot.define('VARYING_SLOT_EDGE', 15)
VARYING_SLOT_CLIP_VERTEX = gl_varying_slot.define('VARYING_SLOT_CLIP_VERTEX', 16)
VARYING_SLOT_CLIP_DIST0 = gl_varying_slot.define('VARYING_SLOT_CLIP_DIST0', 17)
VARYING_SLOT_CLIP_DIST1 = gl_varying_slot.define('VARYING_SLOT_CLIP_DIST1', 18)
VARYING_SLOT_CULL_DIST0 = gl_varying_slot.define('VARYING_SLOT_CULL_DIST0', 19)
VARYING_SLOT_CULL_DIST1 = gl_varying_slot.define('VARYING_SLOT_CULL_DIST1', 20)
VARYING_SLOT_PRIMITIVE_ID = gl_varying_slot.define('VARYING_SLOT_PRIMITIVE_ID', 21)
VARYING_SLOT_LAYER = gl_varying_slot.define('VARYING_SLOT_LAYER', 22)
VARYING_SLOT_VIEWPORT = gl_varying_slot.define('VARYING_SLOT_VIEWPORT', 23)
VARYING_SLOT_FACE = gl_varying_slot.define('VARYING_SLOT_FACE', 24)
VARYING_SLOT_PNTC = gl_varying_slot.define('VARYING_SLOT_PNTC', 25)
VARYING_SLOT_TESS_LEVEL_OUTER = gl_varying_slot.define('VARYING_SLOT_TESS_LEVEL_OUTER', 26)
VARYING_SLOT_TESS_LEVEL_INNER = gl_varying_slot.define('VARYING_SLOT_TESS_LEVEL_INNER', 27)
VARYING_SLOT_BOUNDING_BOX0 = gl_varying_slot.define('VARYING_SLOT_BOUNDING_BOX0', 28)
VARYING_SLOT_BOUNDING_BOX1 = gl_varying_slot.define('VARYING_SLOT_BOUNDING_BOX1', 29)
VARYING_SLOT_VIEW_INDEX = gl_varying_slot.define('VARYING_SLOT_VIEW_INDEX', 30)
VARYING_SLOT_VIEWPORT_MASK = gl_varying_slot.define('VARYING_SLOT_VIEWPORT_MASK', 31)
VARYING_SLOT_PRIMITIVE_SHADING_RATE = gl_varying_slot.define('VARYING_SLOT_PRIMITIVE_SHADING_RATE', 24)
VARYING_SLOT_PRIMITIVE_COUNT = gl_varying_slot.define('VARYING_SLOT_PRIMITIVE_COUNT', 26)
VARYING_SLOT_PRIMITIVE_INDICES = gl_varying_slot.define('VARYING_SLOT_PRIMITIVE_INDICES', 27)
VARYING_SLOT_TASK_COUNT = gl_varying_slot.define('VARYING_SLOT_TASK_COUNT', 28)
VARYING_SLOT_CULL_PRIMITIVE = gl_varying_slot.define('VARYING_SLOT_CULL_PRIMITIVE', 28)
VARYING_SLOT_VAR0 = gl_varying_slot.define('VARYING_SLOT_VAR0', 32)
VARYING_SLOT_VAR1 = gl_varying_slot.define('VARYING_SLOT_VAR1', 33)
VARYING_SLOT_VAR2 = gl_varying_slot.define('VARYING_SLOT_VAR2', 34)
VARYING_SLOT_VAR3 = gl_varying_slot.define('VARYING_SLOT_VAR3', 35)
VARYING_SLOT_VAR4 = gl_varying_slot.define('VARYING_SLOT_VAR4', 36)
VARYING_SLOT_VAR5 = gl_varying_slot.define('VARYING_SLOT_VAR5', 37)
VARYING_SLOT_VAR6 = gl_varying_slot.define('VARYING_SLOT_VAR6', 38)
VARYING_SLOT_VAR7 = gl_varying_slot.define('VARYING_SLOT_VAR7', 39)
VARYING_SLOT_VAR8 = gl_varying_slot.define('VARYING_SLOT_VAR8', 40)
VARYING_SLOT_VAR9 = gl_varying_slot.define('VARYING_SLOT_VAR9', 41)
VARYING_SLOT_VAR10 = gl_varying_slot.define('VARYING_SLOT_VAR10', 42)
VARYING_SLOT_VAR11 = gl_varying_slot.define('VARYING_SLOT_VAR11', 43)
VARYING_SLOT_VAR12 = gl_varying_slot.define('VARYING_SLOT_VAR12', 44)
VARYING_SLOT_VAR13 = gl_varying_slot.define('VARYING_SLOT_VAR13', 45)
VARYING_SLOT_VAR14 = gl_varying_slot.define('VARYING_SLOT_VAR14', 46)
VARYING_SLOT_VAR15 = gl_varying_slot.define('VARYING_SLOT_VAR15', 47)
VARYING_SLOT_VAR16 = gl_varying_slot.define('VARYING_SLOT_VAR16', 48)
VARYING_SLOT_VAR17 = gl_varying_slot.define('VARYING_SLOT_VAR17', 49)
VARYING_SLOT_VAR18 = gl_varying_slot.define('VARYING_SLOT_VAR18', 50)
VARYING_SLOT_VAR19 = gl_varying_slot.define('VARYING_SLOT_VAR19', 51)
VARYING_SLOT_VAR20 = gl_varying_slot.define('VARYING_SLOT_VAR20', 52)
VARYING_SLOT_VAR21 = gl_varying_slot.define('VARYING_SLOT_VAR21', 53)
VARYING_SLOT_VAR22 = gl_varying_slot.define('VARYING_SLOT_VAR22', 54)
VARYING_SLOT_VAR23 = gl_varying_slot.define('VARYING_SLOT_VAR23', 55)
VARYING_SLOT_VAR24 = gl_varying_slot.define('VARYING_SLOT_VAR24', 56)
VARYING_SLOT_VAR25 = gl_varying_slot.define('VARYING_SLOT_VAR25', 57)
VARYING_SLOT_VAR26 = gl_varying_slot.define('VARYING_SLOT_VAR26', 58)
VARYING_SLOT_VAR27 = gl_varying_slot.define('VARYING_SLOT_VAR27', 59)
VARYING_SLOT_VAR28 = gl_varying_slot.define('VARYING_SLOT_VAR28', 60)
VARYING_SLOT_VAR29 = gl_varying_slot.define('VARYING_SLOT_VAR29', 61)
VARYING_SLOT_VAR30 = gl_varying_slot.define('VARYING_SLOT_VAR30', 62)
VARYING_SLOT_VAR31 = gl_varying_slot.define('VARYING_SLOT_VAR31', 63)
VARYING_SLOT_PATCH0 = gl_varying_slot.define('VARYING_SLOT_PATCH0', 64)
VARYING_SLOT_PATCH1 = gl_varying_slot.define('VARYING_SLOT_PATCH1', 65)
VARYING_SLOT_PATCH2 = gl_varying_slot.define('VARYING_SLOT_PATCH2', 66)
VARYING_SLOT_PATCH3 = gl_varying_slot.define('VARYING_SLOT_PATCH3', 67)
VARYING_SLOT_PATCH4 = gl_varying_slot.define('VARYING_SLOT_PATCH4', 68)
VARYING_SLOT_PATCH5 = gl_varying_slot.define('VARYING_SLOT_PATCH5', 69)
VARYING_SLOT_PATCH6 = gl_varying_slot.define('VARYING_SLOT_PATCH6', 70)
VARYING_SLOT_PATCH7 = gl_varying_slot.define('VARYING_SLOT_PATCH7', 71)
VARYING_SLOT_PATCH8 = gl_varying_slot.define('VARYING_SLOT_PATCH8', 72)
VARYING_SLOT_PATCH9 = gl_varying_slot.define('VARYING_SLOT_PATCH9', 73)
VARYING_SLOT_PATCH10 = gl_varying_slot.define('VARYING_SLOT_PATCH10', 74)
VARYING_SLOT_PATCH11 = gl_varying_slot.define('VARYING_SLOT_PATCH11', 75)
VARYING_SLOT_PATCH12 = gl_varying_slot.define('VARYING_SLOT_PATCH12', 76)
VARYING_SLOT_PATCH13 = gl_varying_slot.define('VARYING_SLOT_PATCH13', 77)
VARYING_SLOT_PATCH14 = gl_varying_slot.define('VARYING_SLOT_PATCH14', 78)
VARYING_SLOT_PATCH15 = gl_varying_slot.define('VARYING_SLOT_PATCH15', 79)
VARYING_SLOT_PATCH16 = gl_varying_slot.define('VARYING_SLOT_PATCH16', 80)
VARYING_SLOT_PATCH17 = gl_varying_slot.define('VARYING_SLOT_PATCH17', 81)
VARYING_SLOT_PATCH18 = gl_varying_slot.define('VARYING_SLOT_PATCH18', 82)
VARYING_SLOT_PATCH19 = gl_varying_slot.define('VARYING_SLOT_PATCH19', 83)
VARYING_SLOT_PATCH20 = gl_varying_slot.define('VARYING_SLOT_PATCH20', 84)
VARYING_SLOT_PATCH21 = gl_varying_slot.define('VARYING_SLOT_PATCH21', 85)
VARYING_SLOT_PATCH22 = gl_varying_slot.define('VARYING_SLOT_PATCH22', 86)
VARYING_SLOT_PATCH23 = gl_varying_slot.define('VARYING_SLOT_PATCH23', 87)
VARYING_SLOT_PATCH24 = gl_varying_slot.define('VARYING_SLOT_PATCH24', 88)
VARYING_SLOT_PATCH25 = gl_varying_slot.define('VARYING_SLOT_PATCH25', 89)
VARYING_SLOT_PATCH26 = gl_varying_slot.define('VARYING_SLOT_PATCH26', 90)
VARYING_SLOT_PATCH27 = gl_varying_slot.define('VARYING_SLOT_PATCH27', 91)
VARYING_SLOT_PATCH28 = gl_varying_slot.define('VARYING_SLOT_PATCH28', 92)
VARYING_SLOT_PATCH29 = gl_varying_slot.define('VARYING_SLOT_PATCH29', 93)
VARYING_SLOT_PATCH30 = gl_varying_slot.define('VARYING_SLOT_PATCH30', 94)
VARYING_SLOT_PATCH31 = gl_varying_slot.define('VARYING_SLOT_PATCH31', 95)
VARYING_SLOT_VAR0_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR0_16BIT', 96)
VARYING_SLOT_VAR1_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR1_16BIT', 97)
VARYING_SLOT_VAR2_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR2_16BIT', 98)
VARYING_SLOT_VAR3_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR3_16BIT', 99)
VARYING_SLOT_VAR4_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR4_16BIT', 100)
VARYING_SLOT_VAR5_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR5_16BIT', 101)
VARYING_SLOT_VAR6_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR6_16BIT', 102)
VARYING_SLOT_VAR7_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR7_16BIT', 103)
VARYING_SLOT_VAR8_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR8_16BIT', 104)
VARYING_SLOT_VAR9_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR9_16BIT', 105)
VARYING_SLOT_VAR10_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR10_16BIT', 106)
VARYING_SLOT_VAR11_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR11_16BIT', 107)
VARYING_SLOT_VAR12_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR12_16BIT', 108)
VARYING_SLOT_VAR13_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR13_16BIT', 109)
VARYING_SLOT_VAR14_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR14_16BIT', 110)
VARYING_SLOT_VAR15_16BIT = gl_varying_slot.define('VARYING_SLOT_VAR15_16BIT', 111)
NUM_TOTAL_VARYING_SLOTS = gl_varying_slot.define('NUM_TOTAL_VARYING_SLOTS', 112)
@dll.bind
def nir_slot_is_sysval_output(slot:gl_varying_slot, next_shader:gl_shader_stage) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_slot_is_varying(slot:gl_varying_slot, next_shader:gl_shader_stage) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_slot_is_sysval_output_and_varying(slot:gl_varying_slot, next_shader:gl_shader_stage) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_remove_varying(intr:c.POINTER[nir_intrinsic_instr], next_shader:gl_shader_stage) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_remove_sysval_output(intr:c.POINTER[nir_intrinsic_instr], next_shader:gl_shader_stage) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_amul(shader:c.POINTER[nir_shader], type_size:c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_glsl_type], Annotated[bool, ctypes.c_bool]]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_ubo_vec4(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_sort_variables_by_location(shader:c.POINTER[nir_shader], mode:nir_variable_mode) -> None: ...
@dll.bind
def nir_assign_io_var_locations(shader:c.POINTER[nir_shader], mode:nir_variable_mode, size:c.POINTER[Annotated[int, ctypes.c_uint32]], stage:gl_shader_stage) -> None: ...
@dll.bind
def nir_opt_clip_cull_const(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
class nir_lower_io_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_lower_io_lower_64bit_to_32 = nir_lower_io_options.define('nir_lower_io_lower_64bit_to_32', 1)
nir_lower_io_lower_64bit_float_to_32 = nir_lower_io_options.define('nir_lower_io_lower_64bit_float_to_32', 2)
nir_lower_io_lower_64bit_to_32_new = nir_lower_io_options.define('nir_lower_io_lower_64bit_to_32_new', 4)
nir_lower_io_use_interpolated_input_intrinsics = nir_lower_io_options.define('nir_lower_io_use_interpolated_input_intrinsics', 8)
@dll.bind
def nir_lower_io(shader:c.POINTER[nir_shader], modes:nir_variable_mode, type_size:c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_glsl_type], Annotated[bool, ctypes.c_bool]]], _3:nir_lower_io_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_io_add_const_offset_to_base(nir:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_io_passes(nir:c.POINTER[nir_shader], renumber_vs_inputs:Annotated[bool, ctypes.c_bool]) -> None: ...
@dll.bind
def nir_io_add_intrinsic_xfb_info(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_io_indirect_loads(nir:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_vars_to_explicit_types(shader:c.POINTER[nir_shader], modes:nir_variable_mode, type_info:glsl_type_size_align_func) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_gather_explicit_io_initializers(shader:c.POINTER[nir_shader], dst:ctypes.c_void_p, dst_size:size_t, mode:nir_variable_mode) -> None: ...
@dll.bind
def nir_lower_vec3_to_vec4(shader:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
class nir_address_format(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_address_format_32bit_global = nir_address_format.define('nir_address_format_32bit_global', 0)
nir_address_format_64bit_global = nir_address_format.define('nir_address_format_64bit_global', 1)
nir_address_format_2x32bit_global = nir_address_format.define('nir_address_format_2x32bit_global', 2)
nir_address_format_64bit_global_32bit_offset = nir_address_format.define('nir_address_format_64bit_global_32bit_offset', 3)
nir_address_format_64bit_bounded_global = nir_address_format.define('nir_address_format_64bit_bounded_global', 4)
nir_address_format_32bit_index_offset = nir_address_format.define('nir_address_format_32bit_index_offset', 5)
nir_address_format_32bit_index_offset_pack64 = nir_address_format.define('nir_address_format_32bit_index_offset_pack64', 6)
nir_address_format_vec2_index_32bit_offset = nir_address_format.define('nir_address_format_vec2_index_32bit_offset', 7)
nir_address_format_62bit_generic = nir_address_format.define('nir_address_format_62bit_generic', 8)
nir_address_format_32bit_offset = nir_address_format.define('nir_address_format_32bit_offset', 9)
nir_address_format_32bit_offset_as_64bit = nir_address_format.define('nir_address_format_32bit_offset_as_64bit', 10)
nir_address_format_logical = nir_address_format.define('nir_address_format_logical', 11)
@dll.bind
def nir_address_format_bit_size(addr_format:nir_address_format) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def nir_address_format_num_components(addr_format:nir_address_format) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def nir_address_format_null_value(addr_format:nir_address_format) -> c.POINTER[nir_const_value]: ...
@dll.bind
def nir_build_addr_iadd(b:c.POINTER[nir_builder], addr:c.POINTER[nir_def], addr_format:nir_address_format, modes:nir_variable_mode, offset:c.POINTER[nir_def]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_build_addr_iadd_imm(b:c.POINTER[nir_builder], addr:c.POINTER[nir_def], addr_format:nir_address_format, modes:nir_variable_mode, offset:int64_t) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_build_addr_ieq(b:c.POINTER[nir_builder], addr0:c.POINTER[nir_def], addr1:c.POINTER[nir_def], addr_format:nir_address_format) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_build_addr_isub(b:c.POINTER[nir_builder], addr0:c.POINTER[nir_def], addr1:c.POINTER[nir_def], addr_format:nir_address_format) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_explicit_io_address_from_deref(b:c.POINTER[nir_builder], deref:c.POINTER[nir_deref_instr], base_addr:c.POINTER[nir_def], addr_format:nir_address_format) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_get_explicit_deref_align(deref:c.POINTER[nir_deref_instr], default_to_type_align:Annotated[bool, ctypes.c_bool], align_mul:c.POINTER[uint32_t], align_offset:c.POINTER[uint32_t]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_explicit_io_instr(b:c.POINTER[nir_builder], io_instr:c.POINTER[nir_intrinsic_instr], addr:c.POINTER[nir_def], addr_format:nir_address_format) -> None: ...
@dll.bind
def nir_lower_explicit_io(shader:c.POINTER[nir_shader], modes:nir_variable_mode, _2:nir_address_format) -> Annotated[bool, ctypes.c_bool]: ...
class nir_mem_access_shift_method(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_mem_access_shift_method_scalar = nir_mem_access_shift_method.define('nir_mem_access_shift_method_scalar', 0)
nir_mem_access_shift_method_shift64 = nir_mem_access_shift_method.define('nir_mem_access_shift_method_shift64', 1)
nir_mem_access_shift_method_bytealign_amd = nir_mem_access_shift_method.define('nir_mem_access_shift_method_bytealign_amd', 2)
@c.record
class struct_nir_mem_access_size_align(c.Struct):
SIZE = 8
num_components: Annotated[uint8_t, 0]
bit_size: Annotated[uint8_t, 1]
align: Annotated[uint16_t, 2]
shift: Annotated[nir_mem_access_shift_method, 4]
nir_mem_access_size_align: TypeAlias = struct_nir_mem_access_size_align
class enum_gl_access_qualifier(Annotated[int, ctypes.c_uint32], c.Enum): pass
ACCESS_COHERENT = enum_gl_access_qualifier.define('ACCESS_COHERENT', 1)
ACCESS_RESTRICT = enum_gl_access_qualifier.define('ACCESS_RESTRICT', 2)
ACCESS_VOLATILE = enum_gl_access_qualifier.define('ACCESS_VOLATILE', 4)
ACCESS_NON_READABLE = enum_gl_access_qualifier.define('ACCESS_NON_READABLE', 8)
ACCESS_NON_WRITEABLE = enum_gl_access_qualifier.define('ACCESS_NON_WRITEABLE', 16)
ACCESS_NON_UNIFORM = enum_gl_access_qualifier.define('ACCESS_NON_UNIFORM', 32)
ACCESS_CAN_REORDER = enum_gl_access_qualifier.define('ACCESS_CAN_REORDER', 64)
ACCESS_NON_TEMPORAL = enum_gl_access_qualifier.define('ACCESS_NON_TEMPORAL', 128)
ACCESS_INCLUDE_HELPERS = enum_gl_access_qualifier.define('ACCESS_INCLUDE_HELPERS', 256)
ACCESS_IS_SWIZZLED_AMD = enum_gl_access_qualifier.define('ACCESS_IS_SWIZZLED_AMD', 512)
ACCESS_USES_FORMAT_AMD = enum_gl_access_qualifier.define('ACCESS_USES_FORMAT_AMD', 1024)
ACCESS_FMASK_LOWERED_AMD = enum_gl_access_qualifier.define('ACCESS_FMASK_LOWERED_AMD', 2048)
ACCESS_CAN_SPECULATE = enum_gl_access_qualifier.define('ACCESS_CAN_SPECULATE', 4096)
ACCESS_CP_GE_COHERENT_AMD = enum_gl_access_qualifier.define('ACCESS_CP_GE_COHERENT_AMD', 8192)
ACCESS_IN_BOUNDS = enum_gl_access_qualifier.define('ACCESS_IN_BOUNDS', 16384)
ACCESS_KEEP_SCALAR = enum_gl_access_qualifier.define('ACCESS_KEEP_SCALAR', 32768)
ACCESS_SMEM_AMD = enum_gl_access_qualifier.define('ACCESS_SMEM_AMD', 65536)
nir_lower_mem_access_bit_sizes_cb: TypeAlias = c.CFUNCTYPE[struct_nir_mem_access_size_align, [nir_intrinsic_op, Annotated[int, ctypes.c_ubyte], Annotated[int, ctypes.c_ubyte], Annotated[int, ctypes.c_uint32], Annotated[int, ctypes.c_uint32], Annotated[bool, ctypes.c_bool], enum_gl_access_qualifier, ctypes.c_void_p]]
@c.record
class struct_nir_lower_mem_access_bit_sizes_options(c.Struct):
SIZE = 24
callback: Annotated[nir_lower_mem_access_bit_sizes_cb, 0]
modes: Annotated[nir_variable_mode, 8]
may_lower_unaligned_stores_to_atomics: Annotated[Annotated[bool, ctypes.c_bool], 12]
cb_data: Annotated[ctypes.c_void_p, 16]
nir_lower_mem_access_bit_sizes_options: TypeAlias = struct_nir_lower_mem_access_bit_sizes_options
@dll.bind
def nir_lower_mem_access_bit_sizes(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_mem_access_bit_sizes_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_robust_access(s:c.POINTER[nir_shader], filter:nir_intrin_filter_cb, data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
nir_should_vectorize_mem_func: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [Annotated[int, ctypes.c_uint32], Annotated[int, ctypes.c_uint32], Annotated[int, ctypes.c_uint32], Annotated[int, ctypes.c_uint32], Annotated[int, ctypes.c_int64], c.POINTER[struct_nir_intrinsic_instr], c.POINTER[struct_nir_intrinsic_instr], ctypes.c_void_p]]
@c.record
class struct_nir_load_store_vectorize_options(c.Struct):
SIZE = 32
callback: Annotated[nir_should_vectorize_mem_func, 0]
modes: Annotated[nir_variable_mode, 8]
robust_modes: Annotated[nir_variable_mode, 12]
cb_data: Annotated[ctypes.c_void_p, 16]
has_shared2_amd: Annotated[Annotated[bool, ctypes.c_bool], 24]
nir_load_store_vectorize_options: TypeAlias = struct_nir_load_store_vectorize_options
@dll.bind
def nir_opt_load_store_vectorize(shader:c.POINTER[nir_shader], options:c.POINTER[nir_load_store_vectorize_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_load_store_update_alignments(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
nir_lower_shader_calls_should_remat_func: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_instr], ctypes.c_void_p]]
@c.record
class struct_nir_lower_shader_calls_options(c.Struct):
SIZE = 48
address_format: Annotated[nir_address_format, 0]
stack_alignment: Annotated[Annotated[int, ctypes.c_uint32], 4]
localized_loads: Annotated[Annotated[bool, ctypes.c_bool], 8]
vectorizer_callback: Annotated[nir_should_vectorize_mem_func, 16]
vectorizer_data: Annotated[ctypes.c_void_p, 24]
should_remat_callback: Annotated[nir_lower_shader_calls_should_remat_func, 32]
should_remat_data: Annotated[ctypes.c_void_p, 40]
nir_lower_shader_calls_options: TypeAlias = struct_nir_lower_shader_calls_options
@dll.bind
def nir_lower_shader_calls(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_shader_calls_options], resume_shaders_out:c.POINTER[c.POINTER[c.POINTER[nir_shader]]], num_resume_shaders_out:c.POINTER[uint32_t], mem_ctx:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_get_io_offset_src_number(instr:c.POINTER[nir_intrinsic_instr]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def nir_get_io_index_src_number(instr:c.POINTER[nir_intrinsic_instr]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def nir_get_io_arrayed_index_src_number(instr:c.POINTER[nir_intrinsic_instr]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def nir_get_io_offset_src(instr:c.POINTER[nir_intrinsic_instr]) -> c.POINTER[nir_src]: ...
@dll.bind
def nir_get_io_index_src(instr:c.POINTER[nir_intrinsic_instr]) -> c.POINTER[nir_src]: ...
@dll.bind
def nir_get_io_arrayed_index_src(instr:c.POINTER[nir_intrinsic_instr]) -> c.POINTER[nir_src]: ...
@dll.bind
def nir_get_shader_call_payload_src(call:c.POINTER[nir_intrinsic_instr]) -> c.POINTER[nir_src]: ...
@dll.bind
def nir_is_output_load(intr:c.POINTER[nir_intrinsic_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_is_arrayed_io(var:c.POINTER[nir_variable], stage:gl_shader_stage) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_reg_intrinsics_to_ssa_impl(impl:c.POINTER[nir_function_impl]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_reg_intrinsics_to_ssa(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_vars_to_ssa(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_remove_dead_derefs(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_remove_dead_derefs_impl(impl:c.POINTER[nir_function_impl]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_remove_dead_variables_options(c.Struct):
SIZE = 16
can_remove_var: Annotated[c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[nir_variable], ctypes.c_void_p]], 0]
can_remove_var_data: Annotated[ctypes.c_void_p, 8]
nir_remove_dead_variables_options: TypeAlias = struct_nir_remove_dead_variables_options
@dll.bind
def nir_remove_dead_variables(shader:c.POINTER[nir_shader], modes:nir_variable_mode, options:c.POINTER[nir_remove_dead_variables_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_variable_initializers(shader:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_zero_initialize_shared_memory(shader:c.POINTER[nir_shader], shared_size:Annotated[int, ctypes.c_uint32], chunk_size:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_clear_shared_memory(shader:c.POINTER[nir_shader], shared_size:Annotated[int, ctypes.c_uint32], chunk_size:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
class nir_opt_move_to_top_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_move_to_entry_block_only = nir_opt_move_to_top_options.define('nir_move_to_entry_block_only', 1)
nir_move_to_top_input_loads = nir_opt_move_to_top_options.define('nir_move_to_top_input_loads', 2)
nir_move_to_top_load_smem_amd = nir_opt_move_to_top_options.define('nir_move_to_top_load_smem_amd', 4)
@dll.bind
def nir_opt_move_to_top(nir:c.POINTER[nir_shader], options:nir_opt_move_to_top_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_move_vec_src_uses_to_dest(shader:c.POINTER[nir_shader], skip_const_srcs:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_move_output_stores_to_end(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_vec_to_regs(shader:c.POINTER[nir_shader], cb:nir_instr_writemask_filter_cb, _data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
class enum_compare_func(Annotated[int, ctypes.c_uint32], c.Enum): pass
COMPARE_FUNC_NEVER = enum_compare_func.define('COMPARE_FUNC_NEVER', 0)
COMPARE_FUNC_LESS = enum_compare_func.define('COMPARE_FUNC_LESS', 1)
COMPARE_FUNC_EQUAL = enum_compare_func.define('COMPARE_FUNC_EQUAL', 2)
COMPARE_FUNC_LEQUAL = enum_compare_func.define('COMPARE_FUNC_LEQUAL', 3)
COMPARE_FUNC_GREATER = enum_compare_func.define('COMPARE_FUNC_GREATER', 4)
COMPARE_FUNC_NOTEQUAL = enum_compare_func.define('COMPARE_FUNC_NOTEQUAL', 5)
COMPARE_FUNC_GEQUAL = enum_compare_func.define('COMPARE_FUNC_GEQUAL', 6)
COMPARE_FUNC_ALWAYS = enum_compare_func.define('COMPARE_FUNC_ALWAYS', 7)
@dll.bind
def nir_lower_alpha_test(shader:c.POINTER[nir_shader], func:enum_compare_func, alpha_to_one:Annotated[bool, ctypes.c_bool], alpha_ref_state_tokens:c.POINTER[gl_state_index16]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_alpha_to_coverage(shader:c.POINTER[nir_shader], nr_samples:uint8_t, has_intrinsic:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_alpha_to_one(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_alu(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_flrp(shader:c.POINTER[nir_shader], lowering_mask:Annotated[int, ctypes.c_uint32], always_precise:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_scale_fdiv(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_alu_to_scalar(shader:c.POINTER[nir_shader], cb:nir_instr_filter_cb, data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_alu_width(shader:c.POINTER[nir_shader], cb:nir_vectorize_cb, data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_alu_vec8_16_srcs(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_bool_to_bitsize(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_bool_to_float(shader:c.POINTER[nir_shader], has_fcsel_ne:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_bool_to_int32(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_simplify_convert_alu_types(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_const_arrays_to_uniforms(shader:c.POINTER[nir_shader], max_uniform_components:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_convert_alu_types(shader:c.POINTER[nir_shader], should_lower:c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[nir_intrinsic_instr]]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_constant_convert_alu_types(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_alu_conversion_to_intrinsic(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_int_to_float(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_load_const_to_scalar(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_read_invocation_to_scalar(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_phis_to_scalar(shader:c.POINTER[nir_shader], cb:nir_vectorize_cb, data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_all_phis_to_scalar(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_io_array_vars_to_elements(producer:c.POINTER[nir_shader], consumer:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_lower_io_array_vars_to_elements_no_indirects(shader:c.POINTER[nir_shader], outputs_only:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_io_to_scalar(shader:c.POINTER[nir_shader], mask:nir_variable_mode, filter:nir_instr_filter_cb, filter_data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_io_vars_to_scalar(shader:c.POINTER[nir_shader], mask:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_vectorize_io_vars(shader:c.POINTER[nir_shader], mask:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_tess_level_array_vars_to_vec(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_create_passthrough_tcs_impl(options:c.POINTER[nir_shader_compiler_options], locations:c.POINTER[Annotated[int, ctypes.c_uint32]], num_locations:Annotated[int, ctypes.c_uint32], patch_vertices:uint8_t) -> c.POINTER[nir_shader]: ...
@dll.bind
def nir_create_passthrough_tcs(options:c.POINTER[nir_shader_compiler_options], vs:c.POINTER[nir_shader], patch_vertices:uint8_t) -> c.POINTER[nir_shader]: ...
@dll.bind
def nir_create_passthrough_gs(options:c.POINTER[nir_shader_compiler_options], prev_stage:c.POINTER[nir_shader], primitive_type:enum_mesa_prim, output_primitive_type:enum_mesa_prim, emulate_edgeflags:Annotated[bool, ctypes.c_bool], force_line_strip_out:Annotated[bool, ctypes.c_bool], passthrough_prim_id:Annotated[bool, ctypes.c_bool]) -> c.POINTER[nir_shader]: ...
@dll.bind
def nir_lower_fragcolor(shader:c.POINTER[nir_shader], max_cbufs:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_fragcoord_wtrans(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_frag_coord_to_pixel_coord(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_frag_coord_to_pixel_coord(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_viewport_transform(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_uniforms_to_ubo(shader:c.POINTER[nir_shader], dword_packed:Annotated[bool, ctypes.c_bool], load_vec4:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_is_helper_invocation(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_single_sampled(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_atomics(shader:c.POINTER[nir_shader], filter:nir_instr_filter_cb) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_subgroups_options(c.Struct):
SIZE = 24
filter: Annotated[nir_instr_filter_cb, 0]
filter_data: Annotated[ctypes.c_void_p, 8]
subgroup_size: Annotated[uint8_t, 16]
ballot_bit_size: Annotated[uint8_t, 17]
ballot_components: Annotated[uint8_t, 18]
lower_to_scalar: Annotated[Annotated[bool, ctypes.c_bool], 19, 1, 0]
lower_vote_trivial: Annotated[Annotated[bool, ctypes.c_bool], 19, 1, 1]
lower_vote_feq: Annotated[Annotated[bool, ctypes.c_bool], 19, 1, 2]
lower_vote_ieq: Annotated[Annotated[bool, ctypes.c_bool], 19, 1, 3]
lower_vote_bool_eq: Annotated[Annotated[bool, ctypes.c_bool], 19, 1, 4]
lower_first_invocation_to_ballot: Annotated[Annotated[bool, ctypes.c_bool], 19, 1, 5]
lower_read_first_invocation: Annotated[Annotated[bool, ctypes.c_bool], 19, 1, 6]
lower_subgroup_masks: Annotated[Annotated[bool, ctypes.c_bool], 19, 1, 7]
lower_relative_shuffle: Annotated[Annotated[bool, ctypes.c_bool], 20, 1, 0]
lower_shuffle_to_32bit: Annotated[Annotated[bool, ctypes.c_bool], 20, 1, 1]
lower_shuffle_to_swizzle_amd: Annotated[Annotated[bool, ctypes.c_bool], 20, 1, 2]
lower_shuffle: Annotated[Annotated[bool, ctypes.c_bool], 20, 1, 3]
lower_quad: Annotated[Annotated[bool, ctypes.c_bool], 20, 1, 4]
lower_quad_broadcast_dynamic: Annotated[Annotated[bool, ctypes.c_bool], 20, 1, 5]
lower_quad_broadcast_dynamic_to_const: Annotated[Annotated[bool, ctypes.c_bool], 20, 1, 6]
lower_quad_vote: Annotated[Annotated[bool, ctypes.c_bool], 20, 1, 7]
lower_elect: Annotated[Annotated[bool, ctypes.c_bool], 21, 1, 0]
lower_read_invocation_to_cond: Annotated[Annotated[bool, ctypes.c_bool], 21, 1, 1]
lower_rotate_to_shuffle: Annotated[Annotated[bool, ctypes.c_bool], 21, 1, 2]
lower_rotate_clustered_to_shuffle: Annotated[Annotated[bool, ctypes.c_bool], 21, 1, 3]
lower_ballot_bit_count_to_mbcnt_amd: Annotated[Annotated[bool, ctypes.c_bool], 21, 1, 4]
lower_inverse_ballot: Annotated[Annotated[bool, ctypes.c_bool], 21, 1, 5]
lower_reduce: Annotated[Annotated[bool, ctypes.c_bool], 21, 1, 6]
lower_boolean_reduce: Annotated[Annotated[bool, ctypes.c_bool], 21, 1, 7]
lower_boolean_shuffle: Annotated[Annotated[bool, ctypes.c_bool], 22, 1, 0]
nir_lower_subgroups_options: TypeAlias = struct_nir_lower_subgroups_options
@dll.bind
def nir_lower_subgroups(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_subgroups_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_system_values(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_build_lowered_load_helper_invocation(b:c.POINTER[nir_builder]) -> c.POINTER[nir_def]: ...
@c.record
class struct_nir_lower_compute_system_values_options(c.Struct):
SIZE = 16
has_base_global_invocation_id: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 0]
has_base_workgroup_id: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 1]
has_global_size: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 2]
shuffle_local_ids_for_quad_derivatives: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 3]
lower_local_invocation_index: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 4]
lower_cs_local_id_to_index: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 5]
lower_workgroup_id_to_index: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 6]
global_id_is_32bit: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 7]
shortcut_1d_workgroup_id: Annotated[Annotated[bool, ctypes.c_bool], 1, 1, 0]
num_workgroups: Annotated[c.Array[uint32_t, Literal[3]], 4]
nir_lower_compute_system_values_options: TypeAlias = struct_nir_lower_compute_system_values_options
@dll.bind
def nir_lower_compute_system_values(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_compute_system_values_options]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_sysvals_to_varyings_options(c.Struct):
SIZE = 1
frag_coord: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 0]
front_face: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 1]
point_coord: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 2]
nir_lower_sysvals_to_varyings_options: TypeAlias = struct_nir_lower_sysvals_to_varyings_options
@dll.bind
def nir_lower_sysvals_to_varyings(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_sysvals_to_varyings_options]) -> Annotated[bool, ctypes.c_bool]: ...
class enum_nir_lower_tex_packing(Annotated[int, ctypes.c_ubyte], c.Enum): pass
nir_lower_tex_packing_none = enum_nir_lower_tex_packing.define('nir_lower_tex_packing_none', 0)
nir_lower_tex_packing_16 = enum_nir_lower_tex_packing.define('nir_lower_tex_packing_16', 1)
nir_lower_tex_packing_8 = enum_nir_lower_tex_packing.define('nir_lower_tex_packing_8', 2)
@c.record
class struct_nir_lower_tex_options(c.Struct):
SIZE = 416
lower_txp: Annotated[Annotated[int, ctypes.c_uint32], 0]
lower_txp_array: Annotated[Annotated[bool, ctypes.c_bool], 4]
lower_txf_offset: Annotated[Annotated[bool, ctypes.c_bool], 5]
lower_rect_offset: Annotated[Annotated[bool, ctypes.c_bool], 6]
lower_offset_filter: Annotated[nir_instr_filter_cb, 8]
lower_rect: Annotated[Annotated[bool, ctypes.c_bool], 16]
lower_1d: Annotated[Annotated[bool, ctypes.c_bool], 17]
lower_1d_shadow: Annotated[Annotated[bool, ctypes.c_bool], 18]
lower_y_uv_external: Annotated[Annotated[int, ctypes.c_uint32], 20]
lower_y_vu_external: Annotated[Annotated[int, ctypes.c_uint32], 24]
lower_y_u_v_external: Annotated[Annotated[int, ctypes.c_uint32], 28]
lower_yx_xuxv_external: Annotated[Annotated[int, ctypes.c_uint32], 32]
lower_yx_xvxu_external: Annotated[Annotated[int, ctypes.c_uint32], 36]
lower_xy_uxvx_external: Annotated[Annotated[int, ctypes.c_uint32], 40]
lower_xy_vxux_external: Annotated[Annotated[int, ctypes.c_uint32], 44]
lower_ayuv_external: Annotated[Annotated[int, ctypes.c_uint32], 48]
lower_xyuv_external: Annotated[Annotated[int, ctypes.c_uint32], 52]
lower_yuv_external: Annotated[Annotated[int, ctypes.c_uint32], 56]
lower_yu_yv_external: Annotated[Annotated[int, ctypes.c_uint32], 60]
lower_yv_yu_external: Annotated[Annotated[int, ctypes.c_uint32], 64]
lower_y41x_external: Annotated[Annotated[int, ctypes.c_uint32], 68]
lower_sx10_external: Annotated[Annotated[int, ctypes.c_uint32], 72]
lower_sx12_external: Annotated[Annotated[int, ctypes.c_uint32], 76]
bt709_external: Annotated[Annotated[int, ctypes.c_uint32], 80]
bt2020_external: Annotated[Annotated[int, ctypes.c_uint32], 84]
yuv_full_range_external: Annotated[Annotated[int, ctypes.c_uint32], 88]
saturate_s: Annotated[Annotated[int, ctypes.c_uint32], 92]
saturate_t: Annotated[Annotated[int, ctypes.c_uint32], 96]
saturate_r: Annotated[Annotated[int, ctypes.c_uint32], 100]
swizzle_result: Annotated[Annotated[int, ctypes.c_uint32], 104]
swizzles: Annotated[c.Array[c.Array[uint8_t, Literal[4]], Literal[32]], 108]
scale_factors: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[32]], 236]
lower_srgb: Annotated[Annotated[int, ctypes.c_uint32], 364]
lower_txd_cube_map: Annotated[Annotated[bool, ctypes.c_bool], 368]
lower_txd_3d: Annotated[Annotated[bool, ctypes.c_bool], 369]
lower_txd_array: Annotated[Annotated[bool, ctypes.c_bool], 370]
lower_txd_shadow: Annotated[Annotated[bool, ctypes.c_bool], 371]
lower_txd: Annotated[Annotated[bool, ctypes.c_bool], 372]
lower_txd_clamp: Annotated[Annotated[bool, ctypes.c_bool], 373]
lower_txb_shadow_clamp: Annotated[Annotated[bool, ctypes.c_bool], 374]
lower_txd_shadow_clamp: Annotated[Annotated[bool, ctypes.c_bool], 375]
lower_txd_offset_clamp: Annotated[Annotated[bool, ctypes.c_bool], 376]
lower_txd_clamp_bindless_sampler: Annotated[Annotated[bool, ctypes.c_bool], 377]
lower_txd_clamp_if_sampler_index_not_lt_16: Annotated[Annotated[bool, ctypes.c_bool], 378]
lower_txs_lod: Annotated[Annotated[bool, ctypes.c_bool], 379]
lower_txs_cube_array: Annotated[Annotated[bool, ctypes.c_bool], 380]
lower_tg4_broadcom_swizzle: Annotated[Annotated[bool, ctypes.c_bool], 381]
lower_tg4_offsets: Annotated[Annotated[bool, ctypes.c_bool], 382]
lower_to_fragment_fetch_amd: Annotated[Annotated[bool, ctypes.c_bool], 383]
lower_tex_packing_cb: Annotated[c.CFUNCTYPE[enum_nir_lower_tex_packing, [c.POINTER[nir_tex_instr], ctypes.c_void_p]], 384]
lower_tex_packing_data: Annotated[ctypes.c_void_p, 392]
lower_lod_zero_width: Annotated[Annotated[bool, ctypes.c_bool], 400]
lower_sampler_lod_bias: Annotated[Annotated[bool, ctypes.c_bool], 401]
lower_invalid_implicit_lod: Annotated[Annotated[bool, ctypes.c_bool], 402]
lower_index_to_offset: Annotated[Annotated[bool, ctypes.c_bool], 403]
callback_data: Annotated[ctypes.c_void_p, 408]
nir_lower_tex_options: TypeAlias = struct_nir_lower_tex_options
@dll.bind
def nir_lower_tex(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_tex_options]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_tex_shadow_swizzle(c.Struct):
SIZE = 4
swizzle_r: Annotated[Annotated[int, ctypes.c_uint32], 0, 3, 0]
swizzle_g: Annotated[Annotated[int, ctypes.c_uint32], 0, 3, 3]
swizzle_b: Annotated[Annotated[int, ctypes.c_uint32], 0, 3, 6]
swizzle_a: Annotated[Annotated[int, ctypes.c_uint32], 1, 3, 1]
nir_lower_tex_shadow_swizzle: TypeAlias = struct_nir_lower_tex_shadow_swizzle
@dll.bind
def nir_lower_tex_shadow(s:c.POINTER[nir_shader], n_states:Annotated[int, ctypes.c_uint32], compare_func:c.POINTER[enum_compare_func], tex_swizzles:c.POINTER[nir_lower_tex_shadow_swizzle], is_fixed_point_format:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_image_options(c.Struct):
SIZE = 3
lower_cube_size: Annotated[Annotated[bool, ctypes.c_bool], 0]
lower_to_fragment_mask_load_amd: Annotated[Annotated[bool, ctypes.c_bool], 1]
lower_image_samples_to_one: Annotated[Annotated[bool, ctypes.c_bool], 2]
nir_lower_image_options: TypeAlias = struct_nir_lower_image_options
@dll.bind
def nir_lower_image(nir:c.POINTER[nir_shader], options:c.POINTER[nir_lower_image_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_image_atomics_to_global(s:c.POINTER[nir_shader], filter:nir_intrin_filter_cb, data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_readonly_images_to_tex(shader:c.POINTER[nir_shader], per_variable:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
class enum_nir_lower_non_uniform_access_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_lower_non_uniform_ubo_access = enum_nir_lower_non_uniform_access_type.define('nir_lower_non_uniform_ubo_access', 1)
nir_lower_non_uniform_ssbo_access = enum_nir_lower_non_uniform_access_type.define('nir_lower_non_uniform_ssbo_access', 2)
nir_lower_non_uniform_texture_access = enum_nir_lower_non_uniform_access_type.define('nir_lower_non_uniform_texture_access', 4)
nir_lower_non_uniform_image_access = enum_nir_lower_non_uniform_access_type.define('nir_lower_non_uniform_image_access', 8)
nir_lower_non_uniform_get_ssbo_size = enum_nir_lower_non_uniform_access_type.define('nir_lower_non_uniform_get_ssbo_size', 16)
nir_lower_non_uniform_texture_offset_access = enum_nir_lower_non_uniform_access_type.define('nir_lower_non_uniform_texture_offset_access', 32)
nir_lower_non_uniform_access_type_count = enum_nir_lower_non_uniform_access_type.define('nir_lower_non_uniform_access_type_count', 6)
nir_lower_non_uniform_src_access_callback: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_tex_instr], Annotated[int, ctypes.c_uint32], ctypes.c_void_p]]
nir_lower_non_uniform_access_callback: TypeAlias = c.CFUNCTYPE[Annotated[int, ctypes.c_uint16], [c.POINTER[struct_nir_src], ctypes.c_void_p]]
@c.record
class struct_nir_lower_non_uniform_access_options(c.Struct):
SIZE = 32
types: Annotated[enum_nir_lower_non_uniform_access_type, 0]
tex_src_callback: Annotated[nir_lower_non_uniform_src_access_callback, 8]
callback: Annotated[nir_lower_non_uniform_access_callback, 16]
callback_data: Annotated[ctypes.c_void_p, 24]
nir_lower_non_uniform_access_options: TypeAlias = struct_nir_lower_non_uniform_access_options
@dll.bind
def nir_has_non_uniform_access(shader:c.POINTER[nir_shader], types:enum_nir_lower_non_uniform_access_type) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_non_uniform_access(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_non_uniform_access(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_non_uniform_access_options]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_idiv_options(c.Struct):
SIZE = 1
allow_fp16: Annotated[Annotated[bool, ctypes.c_bool], 0]
nir_lower_idiv_options: TypeAlias = struct_nir_lower_idiv_options
@dll.bind
def nir_lower_idiv(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_idiv_options]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_input_attachment_options(c.Struct):
SIZE = 12
use_ia_coord_intrin: Annotated[Annotated[bool, ctypes.c_bool], 0]
use_fragcoord_sysval: Annotated[Annotated[bool, ctypes.c_bool], 1]
use_layer_id_sysval: Annotated[Annotated[bool, ctypes.c_bool], 2]
use_view_id_for_layer: Annotated[Annotated[bool, ctypes.c_bool], 3]
unscaled_depth_stencil_ir3: Annotated[Annotated[bool, ctypes.c_bool], 4]
unscaled_input_attachment_ir3: Annotated[uint32_t, 8]
nir_input_attachment_options: TypeAlias = struct_nir_input_attachment_options
@dll.bind
def nir_lower_input_attachments(shader:c.POINTER[nir_shader], options:c.POINTER[nir_input_attachment_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_clip_vs(shader:c.POINTER[nir_shader], ucp_enables:Annotated[int, ctypes.c_uint32], use_vars:Annotated[bool, ctypes.c_bool], use_clipdist_array:Annotated[bool, ctypes.c_bool], clipplane_state_tokens:c.Array[c.Array[gl_state_index16, Literal[4]], Literal[0]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_clip_gs(shader:c.POINTER[nir_shader], ucp_enables:Annotated[int, ctypes.c_uint32], use_clipdist_array:Annotated[bool, ctypes.c_bool], clipplane_state_tokens:c.Array[c.Array[gl_state_index16, Literal[4]], Literal[0]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_clip_fs(shader:c.POINTER[nir_shader], ucp_enables:Annotated[int, ctypes.c_uint32], use_clipdist_array:Annotated[bool, ctypes.c_bool], use_load_interp:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_clip_cull_distance_to_vec4s(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_clip_cull_distance_array_vars(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_clip_disable(shader:c.POINTER[nir_shader], clip_plane_enable:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_point_size_mov(shader:c.POINTER[nir_shader], pointsize_state_tokens:c.POINTER[gl_state_index16]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_frexp(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_two_sided_color(shader:c.POINTER[nir_shader], face_sysval:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_clamp_color_outputs(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_flatshade(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_passthrough_edgeflags(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_patch_vertices(nir:c.POINTER[nir_shader], static_count:Annotated[int, ctypes.c_uint32], uniform_state_tokens:c.POINTER[gl_state_index16]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_wpos_ytransform_options(c.Struct):
SIZE = 10
state_tokens: Annotated[c.Array[gl_state_index16, Literal[4]], 0]
fs_coord_origin_upper_left: Annotated[Annotated[bool, ctypes.c_bool], 8, 1, 0]
fs_coord_origin_lower_left: Annotated[Annotated[bool, ctypes.c_bool], 8, 1, 1]
fs_coord_pixel_center_integer: Annotated[Annotated[bool, ctypes.c_bool], 8, 1, 2]
fs_coord_pixel_center_half_integer: Annotated[Annotated[bool, ctypes.c_bool], 8, 1, 3]
nir_lower_wpos_ytransform_options: TypeAlias = struct_nir_lower_wpos_ytransform_options
@dll.bind
def nir_lower_wpos_ytransform(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_wpos_ytransform_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_wpos_center(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_pntc_ytransform(shader:c.POINTER[nir_shader], clipplane_state_tokens:c.Array[c.Array[gl_state_index16, Literal[4]], Literal[0]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_wrmasks(shader:c.POINTER[nir_shader], cb:nir_instr_filter_cb, data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_fb_read(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_drawpixels_options(c.Struct):
SIZE = 36
texcoord_state_tokens: Annotated[c.Array[gl_state_index16, Literal[4]], 0]
scale_state_tokens: Annotated[c.Array[gl_state_index16, Literal[4]], 8]
bias_state_tokens: Annotated[c.Array[gl_state_index16, Literal[4]], 16]
drawpix_sampler: Annotated[Annotated[int, ctypes.c_uint32], 24]
pixelmap_sampler: Annotated[Annotated[int, ctypes.c_uint32], 28]
pixel_maps: Annotated[Annotated[bool, ctypes.c_bool], 32, 1, 0]
scale_and_bias: Annotated[Annotated[bool, ctypes.c_bool], 32, 1, 1]
nir_lower_drawpixels_options: TypeAlias = struct_nir_lower_drawpixels_options
@dll.bind
def nir_lower_drawpixels(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_drawpixels_options]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_bitmap_options(c.Struct):
SIZE = 8
sampler: Annotated[Annotated[int, ctypes.c_uint32], 0]
swizzle_xxxx: Annotated[Annotated[bool, ctypes.c_bool], 4]
nir_lower_bitmap_options: TypeAlias = struct_nir_lower_bitmap_options
@dll.bind
def nir_lower_bitmap(shader:c.POINTER[nir_shader], options:c.POINTER[nir_lower_bitmap_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_atomics_to_ssbo(shader:c.POINTER[nir_shader], offset_align_state:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
class nir_lower_gs_intrinsics_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_lower_gs_intrinsics_per_stream = nir_lower_gs_intrinsics_flags.define('nir_lower_gs_intrinsics_per_stream', 1)
nir_lower_gs_intrinsics_count_primitives = nir_lower_gs_intrinsics_flags.define('nir_lower_gs_intrinsics_count_primitives', 2)
nir_lower_gs_intrinsics_count_vertices_per_primitive = nir_lower_gs_intrinsics_flags.define('nir_lower_gs_intrinsics_count_vertices_per_primitive', 4)
nir_lower_gs_intrinsics_overwrite_incomplete = nir_lower_gs_intrinsics_flags.define('nir_lower_gs_intrinsics_overwrite_incomplete', 8)
@dll.bind
def nir_lower_gs_intrinsics(shader:c.POINTER[nir_shader], options:nir_lower_gs_intrinsics_flags) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_halt_to_return(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_tess_coord_z(shader:c.POINTER[nir_shader], triangles:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_task_shader_options(c.Struct):
SIZE = 8
payload_to_shared_for_atomics: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 0]
payload_to_shared_for_small_types: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 1]
payload_offset_in_bytes: Annotated[uint32_t, 4]
nir_lower_task_shader_options: TypeAlias = struct_nir_lower_task_shader_options
@dll.bind
def nir_lower_task_shader(shader:c.POINTER[nir_shader], options:nir_lower_task_shader_options) -> Annotated[bool, ctypes.c_bool]: ...
nir_lower_bit_size_callback: TypeAlias = c.CFUNCTYPE[Annotated[int, ctypes.c_uint32], [c.POINTER[struct_nir_instr], ctypes.c_void_p]]
@dll.bind
def nir_lower_bit_size(shader:c.POINTER[nir_shader], callback:nir_lower_bit_size_callback, callback_data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_64bit_phis(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_split_conversions_options(c.Struct):
SIZE = 24
callback: Annotated[nir_lower_bit_size_callback, 0]
callback_data: Annotated[ctypes.c_void_p, 8]
has_convert_alu_types: Annotated[Annotated[bool, ctypes.c_bool], 16]
nir_split_conversions_options: TypeAlias = struct_nir_split_conversions_options
@dll.bind
def nir_split_conversions(shader:c.POINTER[nir_shader], options:c.POINTER[nir_split_conversions_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_split_64bit_vec3_and_vec4(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_int64_op_to_options_mask(opcode:nir_op) -> nir_lower_int64_options: ...
@dll.bind
def nir_lower_int64(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_int64_float_conversions(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_doubles_op_to_options_mask(opcode:nir_op) -> nir_lower_doubles_options: ...
@dll.bind
def nir_lower_doubles(shader:c.POINTER[nir_shader], softfp64:c.POINTER[nir_shader], options:nir_lower_doubles_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_pack(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_get_io_intrinsic(instr:c.POINTER[nir_instr], modes:nir_variable_mode, out_mode:c.POINTER[nir_variable_mode]) -> c.POINTER[nir_intrinsic_instr]: ...
@dll.bind
def nir_recompute_io_bases(nir:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_mediump_vars(nir:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_mediump_io(nir:c.POINTER[nir_shader], modes:nir_variable_mode, varying_mask:uint64_t, use_16bit_slots:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_clear_mediump_io_flag(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_opt_tex_srcs_options(c.Struct):
SIZE = 8
sampler_dims: Annotated[Annotated[int, ctypes.c_uint32], 0]
src_types: Annotated[Annotated[int, ctypes.c_uint32], 4]
nir_opt_tex_srcs_options: TypeAlias = struct_nir_opt_tex_srcs_options
@c.record
class struct_nir_opt_16bit_tex_image_options(c.Struct):
SIZE = 24
rounding_mode: Annotated[nir_rounding_mode, 0]
opt_tex_dest_types: Annotated[nir_alu_type, 4]
opt_image_dest_types: Annotated[nir_alu_type, 5]
integer_dest_saturates: Annotated[Annotated[bool, ctypes.c_bool], 6]
opt_image_store_data: Annotated[Annotated[bool, ctypes.c_bool], 7]
opt_image_srcs: Annotated[Annotated[bool, ctypes.c_bool], 8]
opt_srcs_options_count: Annotated[Annotated[int, ctypes.c_uint32], 12]
opt_srcs_options: Annotated[c.POINTER[nir_opt_tex_srcs_options], 16]
nir_opt_16bit_tex_image_options: TypeAlias = struct_nir_opt_16bit_tex_image_options
@dll.bind
def nir_opt_16bit_tex_image(nir:c.POINTER[nir_shader], options:c.POINTER[nir_opt_16bit_tex_image_options]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_tex_src_type_constraint(c.Struct):
SIZE = 8
legalize_type: Annotated[Annotated[bool, ctypes.c_bool], 0]
bit_size: Annotated[uint8_t, 1]
match_src: Annotated[nir_tex_src_type, 4]
nir_tex_src_type_constraint: TypeAlias = struct_nir_tex_src_type_constraint
nir_tex_src_type_constraints: TypeAlias = c.Array[struct_nir_tex_src_type_constraint, Literal[23]]
@dll.bind
def nir_legalize_16bit_sampler_srcs(nir:c.POINTER[nir_shader], constraints:nir_tex_src_type_constraints) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_point_size(shader:c.POINTER[nir_shader], min:Annotated[float, ctypes.c_float], max:Annotated[float, ctypes.c_float]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_default_point_size(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_texcoord_replace(s:c.POINTER[nir_shader], coord_replace:Annotated[int, ctypes.c_uint32], point_coord_is_sysval:Annotated[bool, ctypes.c_bool], yinvert:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_texcoord_replace_late(s:c.POINTER[nir_shader], coord_replace:Annotated[int, ctypes.c_uint32], point_coord_is_sysval:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
class nir_lower_interpolation_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_lower_interpolation_at_sample = nir_lower_interpolation_options.define('nir_lower_interpolation_at_sample', 2)
nir_lower_interpolation_at_offset = nir_lower_interpolation_options.define('nir_lower_interpolation_at_offset', 4)
nir_lower_interpolation_centroid = nir_lower_interpolation_options.define('nir_lower_interpolation_centroid', 8)
nir_lower_interpolation_pixel = nir_lower_interpolation_options.define('nir_lower_interpolation_pixel', 16)
nir_lower_interpolation_sample = nir_lower_interpolation_options.define('nir_lower_interpolation_sample', 32)
@dll.bind
def nir_lower_interpolation(shader:c.POINTER[nir_shader], options:nir_lower_interpolation_options) -> Annotated[bool, ctypes.c_bool]: ...
class nir_lower_discard_if_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_lower_demote_if_to_cf = nir_lower_discard_if_options.define('nir_lower_demote_if_to_cf', 1)
nir_lower_terminate_if_to_cf = nir_lower_discard_if_options.define('nir_lower_terminate_if_to_cf', 2)
nir_move_terminate_out_of_loops = nir_lower_discard_if_options.define('nir_move_terminate_out_of_loops', 4)
@dll.bind
def nir_lower_discard_if(shader:c.POINTER[nir_shader], options:nir_lower_discard_if_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_terminate_to_demote(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_memory_model(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_goto_ifs(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_continue_constructs(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_multiview_options(c.Struct):
SIZE = 16
view_mask: Annotated[uint32_t, 0]
allowed_per_view_outputs: Annotated[uint64_t, 8]
nir_lower_multiview_options: TypeAlias = struct_nir_lower_multiview_options
@dll.bind
def nir_shader_uses_view_index(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_can_lower_multiview(shader:c.POINTER[nir_shader], options:nir_lower_multiview_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_multiview(shader:c.POINTER[nir_shader], options:nir_lower_multiview_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_view_index_to_device_index(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
class nir_lower_fp16_cast_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_lower_fp16_rtz = nir_lower_fp16_cast_options.define('nir_lower_fp16_rtz', 1)
nir_lower_fp16_rtne = nir_lower_fp16_cast_options.define('nir_lower_fp16_rtne', 2)
nir_lower_fp16_ru = nir_lower_fp16_cast_options.define('nir_lower_fp16_ru', 4)
nir_lower_fp16_rd = nir_lower_fp16_cast_options.define('nir_lower_fp16_rd', 8)
nir_lower_fp16_all = nir_lower_fp16_cast_options.define('nir_lower_fp16_all', 15)
nir_lower_fp16_split_fp64 = nir_lower_fp16_cast_options.define('nir_lower_fp16_split_fp64', 16)
@dll.bind
def nir_lower_fp16_casts(shader:c.POINTER[nir_shader], options:nir_lower_fp16_cast_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_normalize_cubemap_coords(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_shader_supports_implicit_lod(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_live_defs_impl(impl:c.POINTER[nir_function_impl]) -> None: ...
@dll.bind
def nir_get_live_defs(cursor:nir_cursor, mem_ctx:ctypes.c_void_p) -> c.POINTER[Annotated[int, ctypes.c_uint32]]: ...
@dll.bind
def nir_loop_analyze_impl(impl:c.POINTER[nir_function_impl], indirect_mask:nir_variable_mode, force_unroll_sampler_indirect:Annotated[bool, ctypes.c_bool]) -> None: ...
@dll.bind
def nir_defs_interfere(a:c.POINTER[nir_def], b:c.POINTER[nir_def]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_repair_ssa_impl(impl:c.POINTER[nir_function_impl]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_repair_ssa(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_convert_loop_to_lcssa(loop:c.POINTER[nir_loop]) -> None: ...
@dll.bind
def nir_convert_to_lcssa(shader:c.POINTER[nir_shader], skip_invariants:Annotated[bool, ctypes.c_bool], skip_bool_invariants:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_divergence_analysis_impl(impl:c.POINTER[nir_function_impl], options:nir_divergence_options) -> None: ...
@dll.bind
def nir_divergence_analysis(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_vertex_divergence_analysis(shader:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def nir_has_divergent_loop(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_rewrite_uses_to_load_reg(b:c.POINTER[nir_builder], old:c.POINTER[nir_def], reg:c.POINTER[nir_def]) -> None: ...
@dll.bind
def nir_convert_from_ssa(shader:c.POINTER[nir_shader], phi_webs_only:Annotated[bool, ctypes.c_bool], consider_divergence:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_phis_to_regs_block(block:c.POINTER[nir_block], place_writes_in_imm_preds:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_ssa_defs_to_regs_block(block:c.POINTER[nir_block]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_rematerialize_deref_in_use_blocks(instr:c.POINTER[nir_deref_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_rematerialize_derefs_in_use_blocks_impl(impl:c.POINTER[nir_function_impl]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_samplers(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_cl_images(shader:c.POINTER[nir_shader], lower_image_derefs:Annotated[bool, ctypes.c_bool], lower_sampler_derefs:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_dedup_inline_samplers(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_ssbo_options(c.Struct):
SIZE = 2
native_loads: Annotated[Annotated[bool, ctypes.c_bool], 0]
native_offset: Annotated[Annotated[bool, ctypes.c_bool], 1]
nir_lower_ssbo_options: TypeAlias = struct_nir_lower_ssbo_options
@dll.bind
def nir_lower_ssbo(shader:c.POINTER[nir_shader], opts:c.POINTER[nir_lower_ssbo_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_helper_writes(shader:c.POINTER[nir_shader], lower_plain_stores:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_lower_printf_options(c.Struct):
SIZE = 12
max_buffer_size: Annotated[Annotated[int, ctypes.c_uint32], 0]
ptr_bit_size: Annotated[Annotated[int, ctypes.c_uint32], 4]
hash_format_strings: Annotated[Annotated[bool, ctypes.c_bool], 8]
nir_lower_printf_options: TypeAlias = struct_nir_lower_printf_options
@dll.bind
def nir_lower_printf(nir:c.POINTER[nir_shader], options:c.POINTER[nir_lower_printf_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_printf_buffer(nir:c.POINTER[nir_shader], address:uint64_t, size:uint32_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_comparison_pre_impl(impl:c.POINTER[nir_function_impl]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_comparison_pre(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_opt_access_options(c.Struct):
SIZE = 1
is_vulkan: Annotated[Annotated[bool, ctypes.c_bool], 0]
nir_opt_access_options: TypeAlias = struct_nir_opt_access_options
@dll.bind
def nir_opt_access(shader:c.POINTER[nir_shader], options:c.POINTER[nir_opt_access_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_algebraic(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_algebraic_before_ffma(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_algebraic_before_lower_int64(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_algebraic_late(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_algebraic_distribute_src_mods(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_algebraic_integer_promotion(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_reassociate_matrix_mul(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_constant_folding(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
nir_combine_barrier_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_intrinsic_instr], c.POINTER[struct_nir_intrinsic_instr], ctypes.c_void_p]]
@dll.bind
def nir_opt_combine_barriers(shader:c.POINTER[nir_shader], combine_cb:nir_combine_barrier_cb, data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
class mesa_scope(Annotated[int, ctypes.c_uint32], c.Enum): pass
SCOPE_NONE = mesa_scope.define('SCOPE_NONE', 0)
SCOPE_INVOCATION = mesa_scope.define('SCOPE_INVOCATION', 1)
SCOPE_SUBGROUP = mesa_scope.define('SCOPE_SUBGROUP', 2)
SCOPE_SHADER_CALL = mesa_scope.define('SCOPE_SHADER_CALL', 3)
SCOPE_WORKGROUP = mesa_scope.define('SCOPE_WORKGROUP', 4)
SCOPE_QUEUE_FAMILY = mesa_scope.define('SCOPE_QUEUE_FAMILY', 5)
SCOPE_DEVICE = mesa_scope.define('SCOPE_DEVICE', 6)
@dll.bind
def nir_opt_acquire_release_barriers(shader:c.POINTER[nir_shader], max_scope:mesa_scope) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_barrier_modes(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_minimize_call_live_states(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_combine_stores(shader:c.POINTER[nir_shader], modes:nir_variable_mode) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_copy_prop_impl(impl:c.POINTER[nir_function_impl]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_copy_prop(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_copy_prop_vars(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_cse(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_dce(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_dead_cf(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_dead_write_vars(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_deref_impl(impl:c.POINTER[nir_function_impl]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_deref(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_find_array_copies(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_def_is_frag_coord_z(_def:c.POINTER[nir_def]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_fragdepth(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_gcm(shader:c.POINTER[nir_shader], value_number:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_generate_bfi(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_idiv_const(shader:c.POINTER[nir_shader], min_bit_size:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_mqsad(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
class nir_opt_if_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_opt_if_optimize_phi_true_false = nir_opt_if_options.define('nir_opt_if_optimize_phi_true_false', 1)
nir_opt_if_avoid_64bit_phis = nir_opt_if_options.define('nir_opt_if_avoid_64bit_phis', 2)
@dll.bind
def nir_opt_if(shader:c.POINTER[nir_shader], options:nir_opt_if_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_intrinsics(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_large_constants(shader:c.POINTER[nir_shader], size_align:glsl_type_size_align_func, threshold:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_licm(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_loop(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_loop_unroll(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
class nir_move_options(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_move_const_undef = nir_move_options.define('nir_move_const_undef', 1)
nir_move_load_ubo = nir_move_options.define('nir_move_load_ubo', 2)
nir_move_load_input = nir_move_options.define('nir_move_load_input', 4)
nir_move_comparisons = nir_move_options.define('nir_move_comparisons', 8)
nir_move_copies = nir_move_options.define('nir_move_copies', 16)
nir_move_load_ssbo = nir_move_options.define('nir_move_load_ssbo', 32)
nir_move_load_uniform = nir_move_options.define('nir_move_load_uniform', 64)
nir_move_alu = nir_move_options.define('nir_move_alu', 128)
nir_dont_move_byte_word_vecs = nir_move_options.define('nir_dont_move_byte_word_vecs', 256)
@dll.bind
def nir_can_move_instr(instr:c.POINTER[nir_instr], options:nir_move_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_sink(shader:c.POINTER[nir_shader], options:nir_move_options) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_move(shader:c.POINTER[nir_shader], options:nir_move_options) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_opt_offsets_options(c.Struct):
SIZE = 48
uniform_max: Annotated[uint32_t, 0]
ubo_vec4_max: Annotated[uint32_t, 4]
shared_max: Annotated[uint32_t, 8]
shared_atomic_max: Annotated[uint32_t, 12]
buffer_max: Annotated[uint32_t, 16]
max_offset_cb: Annotated[c.CFUNCTYPE[uint32_t, [c.POINTER[nir_intrinsic_instr], ctypes.c_void_p]], 24]
max_offset_data: Annotated[ctypes.c_void_p, 32]
allow_offset_wrap: Annotated[Annotated[bool, ctypes.c_bool], 40]
nir_opt_offsets_options: TypeAlias = struct_nir_opt_offsets_options
@dll.bind
def nir_opt_offsets(shader:c.POINTER[nir_shader], options:c.POINTER[nir_opt_offsets_options]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_opt_peephole_select_options(c.Struct):
SIZE = 8
limit: Annotated[Annotated[int, ctypes.c_uint32], 0]
indirect_load_ok: Annotated[Annotated[bool, ctypes.c_bool], 4]
expensive_alu_ok: Annotated[Annotated[bool, ctypes.c_bool], 5]
discard_ok: Annotated[Annotated[bool, ctypes.c_bool], 6]
nir_opt_peephole_select_options: TypeAlias = struct_nir_opt_peephole_select_options
@dll.bind
def nir_opt_peephole_select(shader:c.POINTER[nir_shader], options:c.POINTER[nir_opt_peephole_select_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_reassociate_bfi(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_rematerialize_compares(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_remove_phis(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_remove_single_src_phis_block(block:c.POINTER[nir_block]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_phi_precision(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_phi_to_bool(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_shrink_stores(shader:c.POINTER[nir_shader], shrink_image_store:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_shrink_vectors(shader:c.POINTER[nir_shader], shrink_start:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_undef(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_undef_to_zero(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_uniform_atomics(shader:c.POINTER[nir_shader], fs_atomics_predicated:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_uniform_subgroup(shader:c.POINTER[nir_shader], _1:c.POINTER[nir_lower_subgroups_options]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_vectorize(shader:c.POINTER[nir_shader], filter:nir_vectorize_cb, data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_vectorize_io(shader:c.POINTER[nir_shader], modes:nir_variable_mode, allow_holes:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_move_discards_to_top(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_ray_queries(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_ray_query_ranges(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_opt_tex_skip_helpers(shader:c.POINTER[nir_shader], no_add_divergence:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_sweep(shader:c.POINTER[nir_shader]) -> None: ...
class gl_system_value(Annotated[int, ctypes.c_uint32], c.Enum): pass
SYSTEM_VALUE_SUBGROUP_SIZE = gl_system_value.define('SYSTEM_VALUE_SUBGROUP_SIZE', 0)
SYSTEM_VALUE_SUBGROUP_INVOCATION = gl_system_value.define('SYSTEM_VALUE_SUBGROUP_INVOCATION', 1)
SYSTEM_VALUE_SUBGROUP_EQ_MASK = gl_system_value.define('SYSTEM_VALUE_SUBGROUP_EQ_MASK', 2)
SYSTEM_VALUE_SUBGROUP_GE_MASK = gl_system_value.define('SYSTEM_VALUE_SUBGROUP_GE_MASK', 3)
SYSTEM_VALUE_SUBGROUP_GT_MASK = gl_system_value.define('SYSTEM_VALUE_SUBGROUP_GT_MASK', 4)
SYSTEM_VALUE_SUBGROUP_LE_MASK = gl_system_value.define('SYSTEM_VALUE_SUBGROUP_LE_MASK', 5)
SYSTEM_VALUE_SUBGROUP_LT_MASK = gl_system_value.define('SYSTEM_VALUE_SUBGROUP_LT_MASK', 6)
SYSTEM_VALUE_NUM_SUBGROUPS = gl_system_value.define('SYSTEM_VALUE_NUM_SUBGROUPS', 7)
SYSTEM_VALUE_SUBGROUP_ID = gl_system_value.define('SYSTEM_VALUE_SUBGROUP_ID', 8)
SYSTEM_VALUE_VERTEX_ID = gl_system_value.define('SYSTEM_VALUE_VERTEX_ID', 9)
SYSTEM_VALUE_INSTANCE_ID = gl_system_value.define('SYSTEM_VALUE_INSTANCE_ID', 10)
SYSTEM_VALUE_INSTANCE_INDEX = gl_system_value.define('SYSTEM_VALUE_INSTANCE_INDEX', 11)
SYSTEM_VALUE_VERTEX_ID_ZERO_BASE = gl_system_value.define('SYSTEM_VALUE_VERTEX_ID_ZERO_BASE', 12)
SYSTEM_VALUE_BASE_VERTEX = gl_system_value.define('SYSTEM_VALUE_BASE_VERTEX', 13)
SYSTEM_VALUE_FIRST_VERTEX = gl_system_value.define('SYSTEM_VALUE_FIRST_VERTEX', 14)
SYSTEM_VALUE_IS_INDEXED_DRAW = gl_system_value.define('SYSTEM_VALUE_IS_INDEXED_DRAW', 15)
SYSTEM_VALUE_BASE_INSTANCE = gl_system_value.define('SYSTEM_VALUE_BASE_INSTANCE', 16)
SYSTEM_VALUE_DRAW_ID = gl_system_value.define('SYSTEM_VALUE_DRAW_ID', 17)
SYSTEM_VALUE_INVOCATION_ID = gl_system_value.define('SYSTEM_VALUE_INVOCATION_ID', 18)
SYSTEM_VALUE_FRAG_COORD = gl_system_value.define('SYSTEM_VALUE_FRAG_COORD', 19)
SYSTEM_VALUE_PIXEL_COORD = gl_system_value.define('SYSTEM_VALUE_PIXEL_COORD', 20)
SYSTEM_VALUE_FRAG_COORD_Z = gl_system_value.define('SYSTEM_VALUE_FRAG_COORD_Z', 21)
SYSTEM_VALUE_FRAG_COORD_W = gl_system_value.define('SYSTEM_VALUE_FRAG_COORD_W', 22)
SYSTEM_VALUE_POINT_COORD = gl_system_value.define('SYSTEM_VALUE_POINT_COORD', 23)
SYSTEM_VALUE_LINE_COORD = gl_system_value.define('SYSTEM_VALUE_LINE_COORD', 24)
SYSTEM_VALUE_FRONT_FACE = gl_system_value.define('SYSTEM_VALUE_FRONT_FACE', 25)
SYSTEM_VALUE_FRONT_FACE_FSIGN = gl_system_value.define('SYSTEM_VALUE_FRONT_FACE_FSIGN', 26)
SYSTEM_VALUE_SAMPLE_ID = gl_system_value.define('SYSTEM_VALUE_SAMPLE_ID', 27)
SYSTEM_VALUE_SAMPLE_POS = gl_system_value.define('SYSTEM_VALUE_SAMPLE_POS', 28)
SYSTEM_VALUE_SAMPLE_POS_OR_CENTER = gl_system_value.define('SYSTEM_VALUE_SAMPLE_POS_OR_CENTER', 29)
SYSTEM_VALUE_SAMPLE_MASK_IN = gl_system_value.define('SYSTEM_VALUE_SAMPLE_MASK_IN', 30)
SYSTEM_VALUE_LAYER_ID = gl_system_value.define('SYSTEM_VALUE_LAYER_ID', 31)
SYSTEM_VALUE_HELPER_INVOCATION = gl_system_value.define('SYSTEM_VALUE_HELPER_INVOCATION', 32)
SYSTEM_VALUE_COLOR0 = gl_system_value.define('SYSTEM_VALUE_COLOR0', 33)
SYSTEM_VALUE_COLOR1 = gl_system_value.define('SYSTEM_VALUE_COLOR1', 34)
SYSTEM_VALUE_TESS_COORD = gl_system_value.define('SYSTEM_VALUE_TESS_COORD', 35)
SYSTEM_VALUE_VERTICES_IN = gl_system_value.define('SYSTEM_VALUE_VERTICES_IN', 36)
SYSTEM_VALUE_PRIMITIVE_ID = gl_system_value.define('SYSTEM_VALUE_PRIMITIVE_ID', 37)
SYSTEM_VALUE_TESS_LEVEL_OUTER = gl_system_value.define('SYSTEM_VALUE_TESS_LEVEL_OUTER', 38)
SYSTEM_VALUE_TESS_LEVEL_INNER = gl_system_value.define('SYSTEM_VALUE_TESS_LEVEL_INNER', 39)
SYSTEM_VALUE_TESS_LEVEL_OUTER_DEFAULT = gl_system_value.define('SYSTEM_VALUE_TESS_LEVEL_OUTER_DEFAULT', 40)
SYSTEM_VALUE_TESS_LEVEL_INNER_DEFAULT = gl_system_value.define('SYSTEM_VALUE_TESS_LEVEL_INNER_DEFAULT', 41)
SYSTEM_VALUE_LOCAL_INVOCATION_ID = gl_system_value.define('SYSTEM_VALUE_LOCAL_INVOCATION_ID', 42)
SYSTEM_VALUE_LOCAL_INVOCATION_INDEX = gl_system_value.define('SYSTEM_VALUE_LOCAL_INVOCATION_INDEX', 43)
SYSTEM_VALUE_GLOBAL_INVOCATION_ID = gl_system_value.define('SYSTEM_VALUE_GLOBAL_INVOCATION_ID', 44)
SYSTEM_VALUE_BASE_GLOBAL_INVOCATION_ID = gl_system_value.define('SYSTEM_VALUE_BASE_GLOBAL_INVOCATION_ID', 45)
SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX = gl_system_value.define('SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX', 46)
SYSTEM_VALUE_WORKGROUP_ID = gl_system_value.define('SYSTEM_VALUE_WORKGROUP_ID', 47)
SYSTEM_VALUE_BASE_WORKGROUP_ID = gl_system_value.define('SYSTEM_VALUE_BASE_WORKGROUP_ID', 48)
SYSTEM_VALUE_WORKGROUP_INDEX = gl_system_value.define('SYSTEM_VALUE_WORKGROUP_INDEX', 49)
SYSTEM_VALUE_NUM_WORKGROUPS = gl_system_value.define('SYSTEM_VALUE_NUM_WORKGROUPS', 50)
SYSTEM_VALUE_WORKGROUP_SIZE = gl_system_value.define('SYSTEM_VALUE_WORKGROUP_SIZE', 51)
SYSTEM_VALUE_GLOBAL_GROUP_SIZE = gl_system_value.define('SYSTEM_VALUE_GLOBAL_GROUP_SIZE', 52)
SYSTEM_VALUE_WORK_DIM = gl_system_value.define('SYSTEM_VALUE_WORK_DIM', 53)
SYSTEM_VALUE_USER_DATA_AMD = gl_system_value.define('SYSTEM_VALUE_USER_DATA_AMD', 54)
SYSTEM_VALUE_DEVICE_INDEX = gl_system_value.define('SYSTEM_VALUE_DEVICE_INDEX', 55)
SYSTEM_VALUE_VIEW_INDEX = gl_system_value.define('SYSTEM_VALUE_VIEW_INDEX', 56)
SYSTEM_VALUE_VERTEX_CNT = gl_system_value.define('SYSTEM_VALUE_VERTEX_CNT', 57)
SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL = gl_system_value.define('SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL', 58)
SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE = gl_system_value.define('SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE', 59)
SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID = gl_system_value.define('SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID', 60)
SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTER_RHW = gl_system_value.define('SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTER_RHW', 61)
SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL = gl_system_value.define('SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL', 62)
SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID = gl_system_value.define('SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID', 63)
SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE = gl_system_value.define('SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE', 64)
SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL = gl_system_value.define('SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL', 65)
SYSTEM_VALUE_BARYCENTRIC_PERSP_COORD = gl_system_value.define('SYSTEM_VALUE_BARYCENTRIC_PERSP_COORD', 66)
SYSTEM_VALUE_BARYCENTRIC_LINEAR_COORD = gl_system_value.define('SYSTEM_VALUE_BARYCENTRIC_LINEAR_COORD', 67)
SYSTEM_VALUE_RAY_LAUNCH_ID = gl_system_value.define('SYSTEM_VALUE_RAY_LAUNCH_ID', 68)
SYSTEM_VALUE_RAY_LAUNCH_SIZE = gl_system_value.define('SYSTEM_VALUE_RAY_LAUNCH_SIZE', 69)
SYSTEM_VALUE_RAY_WORLD_ORIGIN = gl_system_value.define('SYSTEM_VALUE_RAY_WORLD_ORIGIN', 70)
SYSTEM_VALUE_RAY_WORLD_DIRECTION = gl_system_value.define('SYSTEM_VALUE_RAY_WORLD_DIRECTION', 71)
SYSTEM_VALUE_RAY_OBJECT_ORIGIN = gl_system_value.define('SYSTEM_VALUE_RAY_OBJECT_ORIGIN', 72)
SYSTEM_VALUE_RAY_OBJECT_DIRECTION = gl_system_value.define('SYSTEM_VALUE_RAY_OBJECT_DIRECTION', 73)
SYSTEM_VALUE_RAY_T_MIN = gl_system_value.define('SYSTEM_VALUE_RAY_T_MIN', 74)
SYSTEM_VALUE_RAY_T_MAX = gl_system_value.define('SYSTEM_VALUE_RAY_T_MAX', 75)
SYSTEM_VALUE_RAY_OBJECT_TO_WORLD = gl_system_value.define('SYSTEM_VALUE_RAY_OBJECT_TO_WORLD', 76)
SYSTEM_VALUE_RAY_WORLD_TO_OBJECT = gl_system_value.define('SYSTEM_VALUE_RAY_WORLD_TO_OBJECT', 77)
SYSTEM_VALUE_RAY_HIT_KIND = gl_system_value.define('SYSTEM_VALUE_RAY_HIT_KIND', 78)
SYSTEM_VALUE_RAY_FLAGS = gl_system_value.define('SYSTEM_VALUE_RAY_FLAGS', 79)
SYSTEM_VALUE_RAY_GEOMETRY_INDEX = gl_system_value.define('SYSTEM_VALUE_RAY_GEOMETRY_INDEX', 80)
SYSTEM_VALUE_RAY_INSTANCE_CUSTOM_INDEX = gl_system_value.define('SYSTEM_VALUE_RAY_INSTANCE_CUSTOM_INDEX', 81)
SYSTEM_VALUE_CULL_MASK = gl_system_value.define('SYSTEM_VALUE_CULL_MASK', 82)
SYSTEM_VALUE_RAY_TRIANGLE_VERTEX_POSITIONS = gl_system_value.define('SYSTEM_VALUE_RAY_TRIANGLE_VERTEX_POSITIONS', 83)
SYSTEM_VALUE_MESH_VIEW_COUNT = gl_system_value.define('SYSTEM_VALUE_MESH_VIEW_COUNT', 84)
SYSTEM_VALUE_MESH_VIEW_INDICES = gl_system_value.define('SYSTEM_VALUE_MESH_VIEW_INDICES', 85)
SYSTEM_VALUE_GS_HEADER_IR3 = gl_system_value.define('SYSTEM_VALUE_GS_HEADER_IR3', 86)
SYSTEM_VALUE_TCS_HEADER_IR3 = gl_system_value.define('SYSTEM_VALUE_TCS_HEADER_IR3', 87)
SYSTEM_VALUE_REL_PATCH_ID_IR3 = gl_system_value.define('SYSTEM_VALUE_REL_PATCH_ID_IR3', 88)
SYSTEM_VALUE_FRAG_SHADING_RATE = gl_system_value.define('SYSTEM_VALUE_FRAG_SHADING_RATE', 89)
SYSTEM_VALUE_FULLY_COVERED = gl_system_value.define('SYSTEM_VALUE_FULLY_COVERED', 90)
SYSTEM_VALUE_FRAG_SIZE = gl_system_value.define('SYSTEM_VALUE_FRAG_SIZE', 91)
SYSTEM_VALUE_FRAG_INVOCATION_COUNT = gl_system_value.define('SYSTEM_VALUE_FRAG_INVOCATION_COUNT', 92)
SYSTEM_VALUE_SHADER_INDEX = gl_system_value.define('SYSTEM_VALUE_SHADER_INDEX', 93)
SYSTEM_VALUE_COALESCED_INPUT_COUNT = gl_system_value.define('SYSTEM_VALUE_COALESCED_INPUT_COUNT', 94)
SYSTEM_VALUE_WARPS_PER_SM_NV = gl_system_value.define('SYSTEM_VALUE_WARPS_PER_SM_NV', 95)
SYSTEM_VALUE_SM_COUNT_NV = gl_system_value.define('SYSTEM_VALUE_SM_COUNT_NV', 96)
SYSTEM_VALUE_WARP_ID_NV = gl_system_value.define('SYSTEM_VALUE_WARP_ID_NV', 97)
SYSTEM_VALUE_SM_ID_NV = gl_system_value.define('SYSTEM_VALUE_SM_ID_NV', 98)
SYSTEM_VALUE_MAX = gl_system_value.define('SYSTEM_VALUE_MAX', 99)
@dll.bind
def nir_intrinsic_from_system_value(val:gl_system_value) -> nir_intrinsic_op: ...
@dll.bind
def nir_system_value_from_intrinsic(intrin:nir_intrinsic_op) -> gl_system_value: ...
@c.record
class struct_nir_unsigned_upper_bound_config(c.Struct):
SIZE = 164
min_subgroup_size: Annotated[Annotated[int, ctypes.c_uint32], 0]
max_subgroup_size: Annotated[Annotated[int, ctypes.c_uint32], 4]
max_workgroup_invocations: Annotated[Annotated[int, ctypes.c_uint32], 8]
max_workgroup_count: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[3]], 12]
max_workgroup_size: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[3]], 24]
vertex_attrib_max: Annotated[c.Array[uint32_t, Literal[32]], 36]
nir_unsigned_upper_bound_config: TypeAlias = struct_nir_unsigned_upper_bound_config
@dll.bind
def nir_unsigned_upper_bound(shader:c.POINTER[nir_shader], range_ht:c.POINTER[struct_hash_table], scalar:nir_scalar, config:c.POINTER[nir_unsigned_upper_bound_config]) -> uint32_t: ...
@dll.bind
def nir_addition_might_overflow(shader:c.POINTER[nir_shader], range_ht:c.POINTER[struct_hash_table], ssa:nir_scalar, const_val:Annotated[int, ctypes.c_uint32], config:c.POINTER[nir_unsigned_upper_bound_config]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nir_opt_preamble_options(c.Struct):
SIZE = 56
drawid_uniform: Annotated[Annotated[bool, ctypes.c_bool], 0]
subgroup_size_uniform: Annotated[Annotated[bool, ctypes.c_bool], 1]
load_workgroup_size_allowed: Annotated[Annotated[bool, ctypes.c_bool], 2]
def_size: Annotated[c.CFUNCTYPE[None, [c.POINTER[nir_def], c.POINTER[Annotated[int, ctypes.c_uint32]], c.POINTER[Annotated[int, ctypes.c_uint32]], c.POINTER[nir_preamble_class]]], 8]
preamble_storage_size: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[2]], 16]
instr_cost_cb: Annotated[c.CFUNCTYPE[Annotated[float, ctypes.c_float], [c.POINTER[nir_instr], ctypes.c_void_p]], 24]
rewrite_cost_cb: Annotated[c.CFUNCTYPE[Annotated[float, ctypes.c_float], [c.POINTER[nir_def], ctypes.c_void_p]], 32]
avoid_instr_cb: Annotated[nir_instr_filter_cb, 40]
cb_data: Annotated[ctypes.c_void_p, 48]
nir_opt_preamble_options: TypeAlias = struct_nir_opt_preamble_options
@dll.bind
def nir_opt_preamble(shader:c.POINTER[nir_shader], options:c.POINTER[nir_opt_preamble_options], size:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_shader_get_preamble(shader:c.POINTER[nir_shader]) -> c.POINTER[nir_function_impl]: ...
@dll.bind
def nir_lower_point_smooth(shader:c.POINTER[nir_shader], set_barycentrics:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_lower_poly_line_smooth(shader:c.POINTER[nir_shader], num_smooth_aa_sample:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_mod_analysis(val:nir_scalar, val_type:nir_alu_type, div:Annotated[int, ctypes.c_uint32], mod:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_remove_tex_shadow(shader:c.POINTER[nir_shader], textures_bitmask:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_trivialize_registers(s:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_static_workgroup_size(s:c.POINTER[nir_shader]) -> Annotated[int, ctypes.c_uint32]: ...
class struct_nir_use_dominance_state(ctypes.Structure): pass
nir_use_dominance_state: TypeAlias = struct_nir_use_dominance_state
@dll.bind
def nir_calc_use_dominance_impl(impl:c.POINTER[nir_function_impl], post_dominance:Annotated[bool, ctypes.c_bool]) -> c.POINTER[nir_use_dominance_state]: ...
@dll.bind
def nir_get_immediate_use_dominator(state:c.POINTER[nir_use_dominance_state], instr:c.POINTER[nir_instr]) -> c.POINTER[nir_instr]: ...
@dll.bind
def nir_use_dominance_lca(state:c.POINTER[nir_use_dominance_state], i1:c.POINTER[nir_instr], i2:c.POINTER[nir_instr]) -> c.POINTER[nir_instr]: ...
@dll.bind
def nir_instr_dominates_use(state:c.POINTER[nir_use_dominance_state], parent:c.POINTER[nir_instr], child:c.POINTER[nir_instr]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_print_use_dominators(state:c.POINTER[nir_use_dominance_state], instructions:c.POINTER[c.POINTER[nir_instr]], num_instructions:Annotated[int, ctypes.c_uint32]) -> None: ...
@c.record
class nir_output_deps(c.Struct):
SIZE = 1792
output: Annotated[c.Array[nir_output_deps_output, Literal[112]], 0]
@c.record
class nir_output_deps_output(c.Struct):
SIZE = 16
instr_list: Annotated[c.POINTER[c.POINTER[nir_instr]], 0]
num_instr: Annotated[Annotated[int, ctypes.c_uint32], 8]
@dll.bind
def nir_gather_output_dependencies(nir:c.POINTER[nir_shader], deps:c.POINTER[nir_output_deps]) -> None: ...
@dll.bind
def nir_free_output_dependencies(deps:c.POINTER[nir_output_deps]) -> None: ...
@c.record
class nir_input_to_output_deps(c.Struct):
SIZE = 12992
output: Annotated[c.Array[nir_input_to_output_deps_output, Literal[112]], 0]
@c.record
class nir_input_to_output_deps_output(c.Struct):
SIZE = 116
inputs: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[28]], 0]
defined: Annotated[Annotated[bool, ctypes.c_bool], 112]
uses_ssbo_reads: Annotated[Annotated[bool, ctypes.c_bool], 113]
uses_image_reads: Annotated[Annotated[bool, ctypes.c_bool], 114]
@dll.bind
def nir_gather_input_to_output_dependencies(nir:c.POINTER[nir_shader], out_deps:c.POINTER[nir_input_to_output_deps]) -> None: ...
@dll.bind
def nir_print_input_to_output_deps(deps:c.POINTER[nir_input_to_output_deps], nir:c.POINTER[nir_shader], f:c.POINTER[FILE]) -> None: ...
@c.record
class nir_output_clipper_var_groups(c.Struct):
SIZE = 336
pos_only: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[28]], 0]
var_only: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[28]], 112]
both: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[28]], 224]
@dll.bind
def nir_gather_output_clipper_var_groups(nir:c.POINTER[nir_shader], groups:c.POINTER[nir_output_clipper_var_groups]) -> None: ...
@dll.bind
def nir_builder_init_simple_shader(stage:gl_shader_stage, options:c.POINTER[nir_shader_compiler_options], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> nir_builder: ...
nir_instr_pass_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_builder], c.POINTER[struct_nir_instr], ctypes.c_void_p]]
nir_intrinsic_pass_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_builder], c.POINTER[struct_nir_intrinsic_instr], ctypes.c_void_p]]
nir_alu_pass_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_builder], c.POINTER[struct_nir_alu_instr], ctypes.c_void_p]]
nir_tex_pass_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_builder], c.POINTER[struct_nir_tex_instr], ctypes.c_void_p]]
nir_phi_pass_cb: TypeAlias = c.CFUNCTYPE[Annotated[bool, ctypes.c_bool], [c.POINTER[struct_nir_builder], c.POINTER[struct_nir_phi_instr], ctypes.c_void_p]]
@dll.bind
def nir_builder_instr_insert(build:c.POINTER[nir_builder], instr:c.POINTER[nir_instr]) -> None: ...
@dll.bind
def nir_builder_instr_insert_at_top(build:c.POINTER[nir_builder], instr:c.POINTER[nir_instr]) -> None: ...
@dll.bind
def nir_build_alu(build:c.POINTER[nir_builder], op:nir_op, src0:c.POINTER[nir_def], src1:c.POINTER[nir_def], src2:c.POINTER[nir_def], src3:c.POINTER[nir_def]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_build_alu1(build:c.POINTER[nir_builder], op:nir_op, src0:c.POINTER[nir_def]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_build_alu2(build:c.POINTER[nir_builder], op:nir_op, src0:c.POINTER[nir_def], src1:c.POINTER[nir_def]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_build_alu3(build:c.POINTER[nir_builder], op:nir_op, src0:c.POINTER[nir_def], src1:c.POINTER[nir_def], src2:c.POINTER[nir_def]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_build_alu4(build:c.POINTER[nir_builder], op:nir_op, src0:c.POINTER[nir_def], src1:c.POINTER[nir_def], src2:c.POINTER[nir_def], src3:c.POINTER[nir_def]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_build_alu_src_arr(build:c.POINTER[nir_builder], op:nir_op, srcs:c.POINTER[c.POINTER[nir_def]]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_build_tex_deref_instr(build:c.POINTER[nir_builder], op:nir_texop, texture:c.POINTER[nir_deref_instr], sampler:c.POINTER[nir_deref_instr], num_extra_srcs:Annotated[int, ctypes.c_uint32], extra_srcs:c.POINTER[nir_tex_src]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_builder_cf_insert(build:c.POINTER[nir_builder], cf:c.POINTER[nir_cf_node]) -> None: ...
@dll.bind
def nir_builder_is_inside_cf(build:c.POINTER[nir_builder], cf_node:c.POINTER[nir_cf_node]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def nir_push_if(build:c.POINTER[nir_builder], condition:c.POINTER[nir_def]) -> c.POINTER[nir_if]: ...
@dll.bind
def nir_push_else(build:c.POINTER[nir_builder], nif:c.POINTER[nir_if]) -> c.POINTER[nir_if]: ...
@dll.bind
def nir_pop_if(build:c.POINTER[nir_builder], nif:c.POINTER[nir_if]) -> None: ...
@dll.bind
def nir_if_phi(build:c.POINTER[nir_builder], then_def:c.POINTER[nir_def], else_def:c.POINTER[nir_def]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_push_loop(build:c.POINTER[nir_builder]) -> c.POINTER[nir_loop]: ...
@dll.bind
def nir_push_continue(build:c.POINTER[nir_builder], loop:c.POINTER[nir_loop]) -> c.POINTER[nir_loop]: ...
@dll.bind
def nir_pop_loop(build:c.POINTER[nir_builder], loop:c.POINTER[nir_loop]) -> None: ...
@dll.bind
def nir_builder_alu_instr_finish_and_insert(build:c.POINTER[nir_builder], instr:c.POINTER[nir_alu_instr]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_load_system_value(build:c.POINTER[nir_builder], op:nir_intrinsic_op, index:Annotated[int, ctypes.c_int32], num_components:Annotated[int, ctypes.c_uint32], bit_size:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_type_convert(b:c.POINTER[nir_builder], src:c.POINTER[nir_def], src_type:nir_alu_type, dest_type:nir_alu_type, rnd:nir_rounding_mode) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_vec_scalars(build:c.POINTER[nir_builder], comp:c.POINTER[nir_scalar], num_components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_ssa_for_alu_src(build:c.POINTER[nir_builder], instr:c.POINTER[nir_alu_instr], srcn:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_build_string(build:c.POINTER[nir_builder], value:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_compare_func(b:c.POINTER[nir_builder], func:enum_compare_func, src0:c.POINTER[nir_def], src1:c.POINTER[nir_def]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_gen_rect_vertices(b:c.POINTER[nir_builder], z:c.POINTER[nir_def], w:c.POINTER[nir_def]) -> c.POINTER[nir_def]: ...
@dll.bind
def nir_printf_fmt(b:c.POINTER[nir_builder], ptr_bit_size:Annotated[int, ctypes.c_uint32], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> None: ...
@dll.bind
def nir_printf_fmt_at_px(b:c.POINTER[nir_builder], ptr_bit_size:Annotated[int, ctypes.c_uint32], x:Annotated[int, ctypes.c_uint32], y:Annotated[int, ctypes.c_uint32], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> None: ...
@dll.bind
def nir_call_serialized(build:c.POINTER[nir_builder], serialized:c.POINTER[uint32_t], serialized_size_B:size_t, args:c.POINTER[c.POINTER[nir_def]]) -> c.POINTER[nir_def]: ...
class nir_lower_packing_op(Annotated[int, ctypes.c_uint32], c.Enum): pass
nir_lower_packing_op_pack_64_2x32 = nir_lower_packing_op.define('nir_lower_packing_op_pack_64_2x32', 0)
nir_lower_packing_op_unpack_64_2x32 = nir_lower_packing_op.define('nir_lower_packing_op_unpack_64_2x32', 1)
nir_lower_packing_op_pack_64_4x16 = nir_lower_packing_op.define('nir_lower_packing_op_pack_64_4x16', 2)
nir_lower_packing_op_unpack_64_4x16 = nir_lower_packing_op.define('nir_lower_packing_op_unpack_64_4x16', 3)
nir_lower_packing_op_pack_32_2x16 = nir_lower_packing_op.define('nir_lower_packing_op_pack_32_2x16', 4)
nir_lower_packing_op_unpack_32_2x16 = nir_lower_packing_op.define('nir_lower_packing_op_unpack_32_2x16', 5)
nir_lower_packing_op_pack_32_4x8 = nir_lower_packing_op.define('nir_lower_packing_op_pack_32_4x8', 6)
nir_lower_packing_op_unpack_32_4x8 = nir_lower_packing_op.define('nir_lower_packing_op_unpack_32_4x8', 7)
nir_lower_packing_num_ops = nir_lower_packing_op.define('nir_lower_packing_num_ops', 8)
@c.record
class struct_blob(c.Struct):
SIZE = 32
data: Annotated[c.POINTER[uint8_t], 0]
allocated: Annotated[size_t, 8]
size: Annotated[size_t, 16]
fixed_allocation: Annotated[Annotated[bool, ctypes.c_bool], 24]
out_of_memory: Annotated[Annotated[bool, ctypes.c_bool], 25]
@dll.bind
def nir_serialize(blob:c.POINTER[struct_blob], nir:c.POINTER[nir_shader], strip:Annotated[bool, ctypes.c_bool]) -> None: ...
@c.record
class struct_blob_reader(c.Struct):
SIZE = 32
data: Annotated[c.POINTER[uint8_t], 0]
end: Annotated[c.POINTER[uint8_t], 8]
current: Annotated[c.POINTER[uint8_t], 16]
overrun: Annotated[Annotated[bool, ctypes.c_bool], 24]
@dll.bind
def nir_deserialize(mem_ctx:ctypes.c_void_p, options:c.POINTER[struct_nir_shader_compiler_options], blob:c.POINTER[struct_blob_reader]) -> c.POINTER[nir_shader]: ...
@dll.bind
def nir_serialize_function(blob:c.POINTER[struct_blob], fxn:c.POINTER[nir_function]) -> None: ...
@dll.bind
def nir_deserialize_function(mem_ctx:ctypes.c_void_p, options:c.POINTER[struct_nir_shader_compiler_options], blob:c.POINTER[struct_blob_reader]) -> c.POINTER[nir_function]: ...
class nir_intrinsic_index_flag(Annotated[int, ctypes.c_uint32], c.Enum): pass
NIR_INTRINSIC_BASE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_BASE', 0)
NIR_INTRINSIC_WRITE_MASK = nir_intrinsic_index_flag.define('NIR_INTRINSIC_WRITE_MASK', 1)
NIR_INTRINSIC_STREAM_ID = nir_intrinsic_index_flag.define('NIR_INTRINSIC_STREAM_ID', 2)
NIR_INTRINSIC_UCP_ID = nir_intrinsic_index_flag.define('NIR_INTRINSIC_UCP_ID', 3)
NIR_INTRINSIC_RANGE_BASE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_RANGE_BASE', 4)
NIR_INTRINSIC_RANGE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_RANGE', 5)
NIR_INTRINSIC_DESC_SET = nir_intrinsic_index_flag.define('NIR_INTRINSIC_DESC_SET', 6)
NIR_INTRINSIC_BINDING = nir_intrinsic_index_flag.define('NIR_INTRINSIC_BINDING', 7)
NIR_INTRINSIC_COMPONENT = nir_intrinsic_index_flag.define('NIR_INTRINSIC_COMPONENT', 8)
NIR_INTRINSIC_COLUMN = nir_intrinsic_index_flag.define('NIR_INTRINSIC_COLUMN', 9)
NIR_INTRINSIC_INTERP_MODE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_INTERP_MODE', 10)
NIR_INTRINSIC_REDUCTION_OP = nir_intrinsic_index_flag.define('NIR_INTRINSIC_REDUCTION_OP', 11)
NIR_INTRINSIC_CLUSTER_SIZE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_CLUSTER_SIZE', 12)
NIR_INTRINSIC_PARAM_IDX = nir_intrinsic_index_flag.define('NIR_INTRINSIC_PARAM_IDX', 13)
NIR_INTRINSIC_IMAGE_DIM = nir_intrinsic_index_flag.define('NIR_INTRINSIC_IMAGE_DIM', 14)
NIR_INTRINSIC_IMAGE_ARRAY = nir_intrinsic_index_flag.define('NIR_INTRINSIC_IMAGE_ARRAY', 15)
NIR_INTRINSIC_FORMAT = nir_intrinsic_index_flag.define('NIR_INTRINSIC_FORMAT', 16)
NIR_INTRINSIC_ACCESS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_ACCESS', 17)
NIR_INTRINSIC_CALL_IDX = nir_intrinsic_index_flag.define('NIR_INTRINSIC_CALL_IDX', 18)
NIR_INTRINSIC_STACK_SIZE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_STACK_SIZE', 19)
NIR_INTRINSIC_ALIGN_MUL = nir_intrinsic_index_flag.define('NIR_INTRINSIC_ALIGN_MUL', 20)
NIR_INTRINSIC_ALIGN_OFFSET = nir_intrinsic_index_flag.define('NIR_INTRINSIC_ALIGN_OFFSET', 21)
NIR_INTRINSIC_DESC_TYPE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_DESC_TYPE', 22)
NIR_INTRINSIC_SRC_TYPE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_SRC_TYPE', 23)
NIR_INTRINSIC_DEST_TYPE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_DEST_TYPE', 24)
NIR_INTRINSIC_SRC_BASE_TYPE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_SRC_BASE_TYPE', 25)
NIR_INTRINSIC_SRC_BASE_TYPE2 = nir_intrinsic_index_flag.define('NIR_INTRINSIC_SRC_BASE_TYPE2', 26)
NIR_INTRINSIC_DEST_BASE_TYPE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_DEST_BASE_TYPE', 27)
NIR_INTRINSIC_SWIZZLE_MASK = nir_intrinsic_index_flag.define('NIR_INTRINSIC_SWIZZLE_MASK', 28)
NIR_INTRINSIC_FETCH_INACTIVE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_FETCH_INACTIVE', 29)
NIR_INTRINSIC_OFFSET0 = nir_intrinsic_index_flag.define('NIR_INTRINSIC_OFFSET0', 30)
NIR_INTRINSIC_OFFSET1 = nir_intrinsic_index_flag.define('NIR_INTRINSIC_OFFSET1', 31)
NIR_INTRINSIC_ST64 = nir_intrinsic_index_flag.define('NIR_INTRINSIC_ST64', 32)
NIR_INTRINSIC_ARG_UPPER_BOUND_U32_AMD = nir_intrinsic_index_flag.define('NIR_INTRINSIC_ARG_UPPER_BOUND_U32_AMD', 33)
NIR_INTRINSIC_DST_ACCESS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_DST_ACCESS', 34)
NIR_INTRINSIC_SRC_ACCESS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_SRC_ACCESS', 35)
NIR_INTRINSIC_DRIVER_LOCATION = nir_intrinsic_index_flag.define('NIR_INTRINSIC_DRIVER_LOCATION', 36)
NIR_INTRINSIC_MEMORY_SEMANTICS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_MEMORY_SEMANTICS', 37)
NIR_INTRINSIC_MEMORY_MODES = nir_intrinsic_index_flag.define('NIR_INTRINSIC_MEMORY_MODES', 38)
NIR_INTRINSIC_MEMORY_SCOPE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_MEMORY_SCOPE', 39)
NIR_INTRINSIC_EXECUTION_SCOPE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_EXECUTION_SCOPE', 40)
NIR_INTRINSIC_IO_SEMANTICS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_IO_SEMANTICS', 41)
NIR_INTRINSIC_IO_XFB = nir_intrinsic_index_flag.define('NIR_INTRINSIC_IO_XFB', 42)
NIR_INTRINSIC_IO_XFB2 = nir_intrinsic_index_flag.define('NIR_INTRINSIC_IO_XFB2', 43)
NIR_INTRINSIC_RAY_QUERY_VALUE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_RAY_QUERY_VALUE', 44)
NIR_INTRINSIC_COMMITTED = nir_intrinsic_index_flag.define('NIR_INTRINSIC_COMMITTED', 45)
NIR_INTRINSIC_ROUNDING_MODE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_ROUNDING_MODE', 46)
NIR_INTRINSIC_SATURATE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_SATURATE', 47)
NIR_INTRINSIC_SYNCHRONOUS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_SYNCHRONOUS', 48)
NIR_INTRINSIC_VALUE_ID = nir_intrinsic_index_flag.define('NIR_INTRINSIC_VALUE_ID', 49)
NIR_INTRINSIC_SIGN_EXTEND = nir_intrinsic_index_flag.define('NIR_INTRINSIC_SIGN_EXTEND', 50)
NIR_INTRINSIC_FLAGS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_FLAGS', 51)
NIR_INTRINSIC_ATOMIC_OP = nir_intrinsic_index_flag.define('NIR_INTRINSIC_ATOMIC_OP', 52)
NIR_INTRINSIC_RESOURCE_BLOCK_INTEL = nir_intrinsic_index_flag.define('NIR_INTRINSIC_RESOURCE_BLOCK_INTEL', 53)
NIR_INTRINSIC_RESOURCE_ACCESS_INTEL = nir_intrinsic_index_flag.define('NIR_INTRINSIC_RESOURCE_ACCESS_INTEL', 54)
NIR_INTRINSIC_NUM_COMPONENTS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_NUM_COMPONENTS', 55)
NIR_INTRINSIC_NUM_ARRAY_ELEMS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_NUM_ARRAY_ELEMS', 56)
NIR_INTRINSIC_BIT_SIZE = nir_intrinsic_index_flag.define('NIR_INTRINSIC_BIT_SIZE', 57)
NIR_INTRINSIC_DIVERGENT = nir_intrinsic_index_flag.define('NIR_INTRINSIC_DIVERGENT', 58)
NIR_INTRINSIC_LEGACY_FABS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_LEGACY_FABS', 59)
NIR_INTRINSIC_LEGACY_FNEG = nir_intrinsic_index_flag.define('NIR_INTRINSIC_LEGACY_FNEG', 60)
NIR_INTRINSIC_LEGACY_FSAT = nir_intrinsic_index_flag.define('NIR_INTRINSIC_LEGACY_FSAT', 61)
NIR_INTRINSIC_CMAT_DESC = nir_intrinsic_index_flag.define('NIR_INTRINSIC_CMAT_DESC', 62)
NIR_INTRINSIC_MATRIX_LAYOUT = nir_intrinsic_index_flag.define('NIR_INTRINSIC_MATRIX_LAYOUT', 63)
NIR_INTRINSIC_CMAT_SIGNED_MASK = nir_intrinsic_index_flag.define('NIR_INTRINSIC_CMAT_SIGNED_MASK', 64)
NIR_INTRINSIC_ALU_OP = nir_intrinsic_index_flag.define('NIR_INTRINSIC_ALU_OP', 65)
NIR_INTRINSIC_NEG_LO_AMD = nir_intrinsic_index_flag.define('NIR_INTRINSIC_NEG_LO_AMD', 66)
NIR_INTRINSIC_NEG_HI_AMD = nir_intrinsic_index_flag.define('NIR_INTRINSIC_NEG_HI_AMD', 67)
NIR_INTRINSIC_SYSTOLIC_DEPTH = nir_intrinsic_index_flag.define('NIR_INTRINSIC_SYSTOLIC_DEPTH', 68)
NIR_INTRINSIC_REPEAT_COUNT = nir_intrinsic_index_flag.define('NIR_INTRINSIC_REPEAT_COUNT', 69)
NIR_INTRINSIC_DST_CMAT_DESC = nir_intrinsic_index_flag.define('NIR_INTRINSIC_DST_CMAT_DESC', 70)
NIR_INTRINSIC_SRC_CMAT_DESC = nir_intrinsic_index_flag.define('NIR_INTRINSIC_SRC_CMAT_DESC', 71)
NIR_INTRINSIC_EXPLICIT_COORD = nir_intrinsic_index_flag.define('NIR_INTRINSIC_EXPLICIT_COORD', 72)
NIR_INTRINSIC_FMT_IDX = nir_intrinsic_index_flag.define('NIR_INTRINSIC_FMT_IDX', 73)
NIR_INTRINSIC_PREAMBLE_CLASS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_PREAMBLE_CLASS', 74)
NIR_INTRINSIC_NUM_INDEX_FLAGS = nir_intrinsic_index_flag.define('NIR_INTRINSIC_NUM_INDEX_FLAGS', 75)
try: nir_intrinsic_index_names = c.Array[c.POINTER[Annotated[bytes, ctypes.c_char]], Literal[75]].in_dll(dll, 'nir_intrinsic_index_names') # type: ignore
except (ValueError,AttributeError): pass
class enum_nv_device_type(Annotated[int, ctypes.c_ubyte], c.Enum): pass
NV_DEVICE_TYPE_IGP = enum_nv_device_type.define('NV_DEVICE_TYPE_IGP', 0)
NV_DEVICE_TYPE_DIS = enum_nv_device_type.define('NV_DEVICE_TYPE_DIS', 1)
NV_DEVICE_TYPE_SOC = enum_nv_device_type.define('NV_DEVICE_TYPE_SOC', 2)
@c.record
class struct_nv_device_info(c.Struct):
SIZE = 128
type: Annotated[enum_nv_device_type, 0]
device_id: Annotated[uint16_t, 2]
chipset: Annotated[uint16_t, 4]
device_name: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[64]], 6]
chipset_name: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[16]], 70]
pci: Annotated[struct_nv_device_info_pci, 86]
sm: Annotated[uint8_t, 92]
gpc_count: Annotated[uint8_t, 93]
tpc_count: Annotated[uint16_t, 94]
mp_per_tpc: Annotated[uint8_t, 96]
max_warps_per_mp: Annotated[uint8_t, 97]
cls_copy: Annotated[uint16_t, 98]
cls_eng2d: Annotated[uint16_t, 100]
cls_eng3d: Annotated[uint16_t, 102]
cls_m2mf: Annotated[uint16_t, 104]
cls_compute: Annotated[uint16_t, 106]
vram_size_B: Annotated[uint64_t, 112]
bar_size_B: Annotated[uint64_t, 120]
@c.record
class struct_nv_device_info_pci(c.Struct):
SIZE = 6
domain: Annotated[uint16_t, 0]
bus: Annotated[uint8_t, 2]
dev: Annotated[uint8_t, 3]
func: Annotated[uint8_t, 4]
revision_id: Annotated[uint8_t, 5]
class struct_nak_compiler(ctypes.Structure): pass
@dll.bind
def nak_compiler_create(dev:c.POINTER[struct_nv_device_info]) -> c.POINTER[struct_nak_compiler]: ...
@dll.bind
def nak_compiler_destroy(nak:c.POINTER[struct_nak_compiler]) -> None: ...
@dll.bind
def nak_debug_flags(nak:c.POINTER[struct_nak_compiler]) -> uint64_t: ...
@dll.bind
def nak_nir_options(nak:c.POINTER[struct_nak_compiler]) -> c.POINTER[struct_nir_shader_compiler_options]: ...
@dll.bind
def nak_preprocess_nir(nir:c.POINTER[nir_shader], nak:c.POINTER[struct_nak_compiler]) -> None: ...
@dll.bind
def nak_nir_lower_image_addrs(nir:c.POINTER[nir_shader], nak:c.POINTER[struct_nak_compiler]) -> Annotated[bool, ctypes.c_bool]: ...
@c.record
class struct_nak_sample_location(c.Struct):
SIZE = 1
x_u4: Annotated[uint8_t, 0, 4, 0]
y_u4: Annotated[uint8_t, 0, 4, 4]
@c.record
class struct_nak_sample_mask(c.Struct):
SIZE = 2
sample_mask: Annotated[uint16_t, 0]
@c.record
class struct_nak_fs_key(c.Struct):
SIZE = 12
zs_self_dep: Annotated[Annotated[bool, ctypes.c_bool], 0]
force_sample_shading: Annotated[Annotated[bool, ctypes.c_bool], 1]
uses_underestimate: Annotated[Annotated[bool, ctypes.c_bool], 2]
sample_info_cb: Annotated[uint8_t, 3]
sample_locations_offset: Annotated[uint32_t, 4]
sample_masks_offset: Annotated[uint32_t, 8]
@dll.bind
def nak_postprocess_nir(nir:c.POINTER[nir_shader], nak:c.POINTER[struct_nak_compiler], robust2_modes:nir_variable_mode, fs_key:c.POINTER[struct_nak_fs_key]) -> None: ...
class enum_nak_ts_domain(Annotated[int, ctypes.c_ubyte], c.Enum): pass
NAK_TS_DOMAIN_ISOLINE = enum_nak_ts_domain.define('NAK_TS_DOMAIN_ISOLINE', 0)
NAK_TS_DOMAIN_TRIANGLE = enum_nak_ts_domain.define('NAK_TS_DOMAIN_TRIANGLE', 1)
NAK_TS_DOMAIN_QUAD = enum_nak_ts_domain.define('NAK_TS_DOMAIN_QUAD', 2)
class enum_nak_ts_spacing(Annotated[int, ctypes.c_ubyte], c.Enum): pass
NAK_TS_SPACING_INTEGER = enum_nak_ts_spacing.define('NAK_TS_SPACING_INTEGER', 0)
NAK_TS_SPACING_FRACT_ODD = enum_nak_ts_spacing.define('NAK_TS_SPACING_FRACT_ODD', 1)
NAK_TS_SPACING_FRACT_EVEN = enum_nak_ts_spacing.define('NAK_TS_SPACING_FRACT_EVEN', 2)
class enum_nak_ts_prims(Annotated[int, ctypes.c_ubyte], c.Enum): pass
NAK_TS_PRIMS_POINTS = enum_nak_ts_prims.define('NAK_TS_PRIMS_POINTS', 0)
NAK_TS_PRIMS_LINES = enum_nak_ts_prims.define('NAK_TS_PRIMS_LINES', 1)
NAK_TS_PRIMS_TRIANGLES_CW = enum_nak_ts_prims.define('NAK_TS_PRIMS_TRIANGLES_CW', 2)
NAK_TS_PRIMS_TRIANGLES_CCW = enum_nak_ts_prims.define('NAK_TS_PRIMS_TRIANGLES_CCW', 3)
@c.record
class struct_nak_xfb_info(c.Struct):
SIZE = 536
stride: Annotated[c.Array[uint32_t, Literal[4]], 0]
stream: Annotated[c.Array[uint8_t, Literal[4]], 16]
attr_count: Annotated[c.Array[uint8_t, Literal[4]], 20]
attr_index: Annotated[c.Array[c.Array[uint8_t, Literal[128]], Literal[4]], 24]
@c.record
class struct_nak_shader_info(c.Struct):
SIZE = 728
stage: Annotated[gl_shader_stage, 0]
sm: Annotated[uint8_t, 4]
num_gprs: Annotated[uint8_t, 5]
num_control_barriers: Annotated[uint8_t, 6]
_pad0: Annotated[uint8_t, 7]
max_warps_per_sm: Annotated[uint32_t, 8]
num_instrs: Annotated[uint32_t, 12]
num_static_cycles: Annotated[uint32_t, 16]
num_spills_to_mem: Annotated[uint32_t, 20]
num_fills_from_mem: Annotated[uint32_t, 24]
num_spills_to_reg: Annotated[uint32_t, 28]
num_fills_from_reg: Annotated[uint32_t, 32]
slm_size: Annotated[uint32_t, 36]
crs_size: Annotated[uint32_t, 40]
cs: Annotated[struct_nak_shader_info_cs, 44]
fs: Annotated[struct_nak_shader_info_fs, 44]
ts: Annotated[struct_nak_shader_info_ts, 44]
_pad: Annotated[c.Array[uint8_t, Literal[12]], 44]
vtg: Annotated[struct_nak_shader_info_vtg, 56]
hdr: Annotated[c.Array[uint32_t, Literal[32]], 600]
@c.record
class struct_nak_shader_info_cs(c.Struct):
SIZE = 12
local_size: Annotated[c.Array[uint16_t, Literal[3]], 0]
smem_size: Annotated[uint16_t, 6]
_pad: Annotated[c.Array[uint8_t, Literal[4]], 8]
@c.record
class struct_nak_shader_info_fs(c.Struct):
SIZE = 12
writes_depth: Annotated[Annotated[bool, ctypes.c_bool], 0]
reads_sample_mask: Annotated[Annotated[bool, ctypes.c_bool], 1]
post_depth_coverage: Annotated[Annotated[bool, ctypes.c_bool], 2]
uses_sample_shading: Annotated[Annotated[bool, ctypes.c_bool], 3]
early_fragment_tests: Annotated[Annotated[bool, ctypes.c_bool], 4]
_pad: Annotated[c.Array[uint8_t, Literal[7]], 5]
@c.record
class struct_nak_shader_info_ts(c.Struct):
SIZE = 12
domain: Annotated[enum_nak_ts_domain, 0]
spacing: Annotated[enum_nak_ts_spacing, 1]
prims: Annotated[enum_nak_ts_prims, 2]
_pad: Annotated[c.Array[uint8_t, Literal[9]], 3]
@c.record
class struct_nak_shader_info_vtg(c.Struct):
SIZE = 544
writes_layer: Annotated[Annotated[bool, ctypes.c_bool], 0]
writes_point_size: Annotated[Annotated[bool, ctypes.c_bool], 1]
writes_vprs_table_index: Annotated[Annotated[bool, ctypes.c_bool], 2]
clip_enable: Annotated[uint8_t, 3]
cull_enable: Annotated[uint8_t, 4]
_pad: Annotated[c.Array[uint8_t, Literal[3]], 5]
xfb: Annotated[struct_nak_xfb_info, 8]
@c.record
class struct_nak_shader_bin(c.Struct):
SIZE = 752
info: Annotated[struct_nak_shader_info, 0]
code_size: Annotated[uint32_t, 728]
code: Annotated[ctypes.c_void_p, 736]
asm_str: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 744]
@dll.bind
def nak_shader_bin_destroy(bin:c.POINTER[struct_nak_shader_bin]) -> None: ...
@dll.bind
def nak_compile_shader(nir:c.POINTER[nir_shader], dump_asm:Annotated[bool, ctypes.c_bool], nak:c.POINTER[struct_nak_compiler], robust2_modes:nir_variable_mode, fs_key:c.POINTER[struct_nak_fs_key]) -> c.POINTER[struct_nak_shader_bin]: ...
@c.record
class struct_nak_qmd_cbuf(c.Struct):
SIZE = 16
index: Annotated[uint32_t, 0]
size: Annotated[uint32_t, 4]
addr: Annotated[uint64_t, 8]
@c.record
class struct_nak_qmd_info(c.Struct):
SIZE = 160
addr: Annotated[uint64_t, 0]
smem_size: Annotated[uint16_t, 8]
smem_max: Annotated[uint16_t, 10]
global_size: Annotated[c.Array[uint32_t, Literal[3]], 12]
num_cbufs: Annotated[uint32_t, 24]
cbufs: Annotated[c.Array[struct_nak_qmd_cbuf, Literal[8]], 32]
@dll.bind
def nak_qmd_size_B(dev:c.POINTER[struct_nv_device_info]) -> uint32_t: ...
@dll.bind
def nak_fill_qmd(dev:c.POINTER[struct_nv_device_info], info:c.POINTER[struct_nak_shader_info], qmd_info:c.POINTER[struct_nak_qmd_info], qmd_out:ctypes.c_void_p, qmd_size:size_t) -> None: ...
@c.record
class struct_nak_qmd_dispatch_size_layout(c.Struct):
SIZE = 12
x_start: Annotated[uint16_t, 0]
x_end: Annotated[uint16_t, 2]
y_start: Annotated[uint16_t, 4]
y_end: Annotated[uint16_t, 6]
z_start: Annotated[uint16_t, 8]
z_end: Annotated[uint16_t, 10]
@dll.bind
def nak_get_qmd_dispatch_size_layout(dev:c.POINTER[struct_nv_device_info]) -> struct_nak_qmd_dispatch_size_layout: ...
@c.record
class struct_nak_qmd_cbuf_desc_layout(c.Struct):
SIZE = 10
addr_shift: Annotated[uint16_t, 0]
addr_lo_start: Annotated[uint16_t, 2]
addr_lo_end: Annotated[uint16_t, 4]
addr_hi_start: Annotated[uint16_t, 6]
addr_hi_end: Annotated[uint16_t, 8]
@dll.bind
def nak_get_qmd_cbuf_desc_layout(dev:c.POINTER[struct_nv_device_info], idx:uint8_t) -> struct_nak_qmd_cbuf_desc_layout: ...
@c.record
class struct_lp_context_ref(c.Struct):
SIZE = 16
ref: Annotated[LLVMContextRef, 0]
owned: Annotated[Annotated[bool, ctypes.c_bool], 8]
class struct_LLVMOpaqueContext(ctypes.Structure): pass
LLVMContextRef: TypeAlias = c.POINTER[struct_LLVMOpaqueContext]
lp_context_ref: TypeAlias = struct_lp_context_ref
class struct_lp_passmgr(ctypes.Structure): pass
class struct_LLVMOpaqueModule(ctypes.Structure): pass
LLVMModuleRef: TypeAlias = c.POINTER[struct_LLVMOpaqueModule]
@dll.bind
def lp_passmgr_create(module:LLVMModuleRef, mgr:c.POINTER[c.POINTER[struct_lp_passmgr]]) -> Annotated[bool, ctypes.c_bool]: ...
class struct_LLVMOpaqueTargetMachine(ctypes.Structure): pass
LLVMTargetMachineRef: TypeAlias = c.POINTER[struct_LLVMOpaqueTargetMachine]
@dll.bind
def lp_passmgr_run(mgr:c.POINTER[struct_lp_passmgr], module:LLVMModuleRef, tm:LLVMTargetMachineRef, module_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> None: ...
@dll.bind
def lp_passmgr_dispose(mgr:c.POINTER[struct_lp_passmgr]) -> None: ...
@c.record
class struct_lp_cached_code(c.Struct):
SIZE = 32
data: Annotated[ctypes.c_void_p, 0]
data_size: Annotated[size_t, 8]
dont_cache: Annotated[Annotated[bool, ctypes.c_bool], 16]
jit_obj_cache: Annotated[ctypes.c_void_p, 24]
class struct_lp_generated_code(ctypes.Structure): pass
class struct_LLVMOpaqueTargetLibraryInfotData(ctypes.Structure): pass
LLVMTargetLibraryInfoRef: TypeAlias = c.POINTER[struct_LLVMOpaqueTargetLibraryInfotData]
@dll.bind
def gallivm_create_target_library_info(triple:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> LLVMTargetLibraryInfoRef: ...
@dll.bind
def gallivm_dispose_target_library_info(library_info:LLVMTargetLibraryInfoRef) -> None: ...
@dll.bind
def lp_set_target_options() -> None: ...
@dll.bind
def lp_bld_init_native_targets() -> None: ...
class struct_LLVMOpaqueExecutionEngine(ctypes.Structure): pass
LLVMExecutionEngineRef: TypeAlias = c.POINTER[struct_LLVMOpaqueExecutionEngine]
class struct_LLVMOpaqueMCJITMemoryManager(ctypes.Structure): pass
LLVMMCJITMemoryManagerRef: TypeAlias = c.POINTER[struct_LLVMOpaqueMCJITMemoryManager]
@dll.bind
def lp_build_create_jit_compiler_for_module(OutJIT:c.POINTER[LLVMExecutionEngineRef], OutCode:c.POINTER[c.POINTER[struct_lp_generated_code]], cache_out:c.POINTER[struct_lp_cached_code], M:LLVMModuleRef, MM:LLVMMCJITMemoryManagerRef, OptLevel:Annotated[int, ctypes.c_uint32], OutError:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def lp_free_generated_code(code:c.POINTER[struct_lp_generated_code]) -> None: ...
@dll.bind
def lp_get_default_memory_manager() -> LLVMMCJITMemoryManagerRef: ...
@dll.bind
def lp_free_memory_manager(memorymgr:LLVMMCJITMemoryManagerRef) -> None: ...
class struct_LLVMOpaqueValue(ctypes.Structure): pass
LLVMValueRef: TypeAlias = c.POINTER[struct_LLVMOpaqueValue]
@dll.bind
def lp_get_called_value(call:LLVMValueRef) -> LLVMValueRef: ...
@dll.bind
def lp_is_function(v:LLVMValueRef) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def lp_free_objcache(objcache:ctypes.c_void_p) -> None: ...
@dll.bind
def lp_set_module_stack_alignment_override(M:LLVMModuleRef, align:Annotated[int, ctypes.c_uint32]) -> None: ...
try: lp_native_vector_width = Annotated[int, ctypes.c_uint32].in_dll(dll, 'lp_native_vector_width') # type: ignore
except (ValueError,AttributeError): pass
@c.record
class struct_lp_type(c.Struct):
SIZE = 8
floating: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 0]
fixed: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 1]
sign: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 2]
norm: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 3]
signed_zero_preserve: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 4]
nan_preserve: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 5]
width: Annotated[Annotated[int, ctypes.c_uint32], 0, 14, 6]
length: Annotated[Annotated[int, ctypes.c_uint32], 4, 14, 0]
@c.record
class struct_lp_build_context(c.Struct):
SIZE = 72
gallivm: Annotated[c.POINTER[struct_gallivm_state], 0]
type: Annotated[struct_lp_type, 8]
elem_type: Annotated[LLVMTypeRef, 16]
vec_type: Annotated[LLVMTypeRef, 24]
int_elem_type: Annotated[LLVMTypeRef, 32]
int_vec_type: Annotated[LLVMTypeRef, 40]
undef: Annotated[LLVMValueRef, 48]
zero: Annotated[LLVMValueRef, 56]
one: Annotated[LLVMValueRef, 64]
@c.record
class struct_gallivm_state(c.Struct):
SIZE = 192
module_name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
file_name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
module: Annotated[LLVMModuleRef, 16]
target: Annotated[LLVMTargetDataRef, 24]
engine: Annotated[LLVMExecutionEngineRef, 32]
passmgr: Annotated[c.POINTER[struct_lp_passmgr], 40]
memorymgr: Annotated[LLVMMCJITMemoryManagerRef, 48]
code: Annotated[c.POINTER[struct_lp_generated_code], 56]
context: Annotated[LLVMContextRef, 64]
builder: Annotated[LLVMBuilderRef, 72]
di_builder: Annotated[LLVMDIBuilderRef, 80]
cache: Annotated[c.POINTER[struct_lp_cached_code], 88]
compiled: Annotated[Annotated[int, ctypes.c_uint32], 96]
coro_malloc_hook: Annotated[LLVMValueRef, 104]
coro_free_hook: Annotated[LLVMValueRef, 112]
debug_printf_hook: Annotated[LLVMValueRef, 120]
coro_malloc_hook_type: Annotated[LLVMTypeRef, 128]
coro_free_hook_type: Annotated[LLVMTypeRef, 136]
di_function: Annotated[LLVMMetadataRef, 144]
file: Annotated[LLVMMetadataRef, 152]
get_time_hook: Annotated[LLVMValueRef, 160]
texture_descriptor: Annotated[LLVMValueRef, 168]
texture_dynamic_state: Annotated[c.POINTER[struct_lp_jit_texture], 176]
sampler_descriptor: Annotated[LLVMValueRef, 184]
class struct_LLVMOpaqueType(ctypes.Structure): pass
LLVMTypeRef: TypeAlias = c.POINTER[struct_LLVMOpaqueType]
@dll.bind
def lp_build_elem_type(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type) -> LLVMTypeRef: ...
@dll.bind
def lp_build_vec_type(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type) -> LLVMTypeRef: ...
@dll.bind
def lp_check_elem_type(type:struct_lp_type, elem_type:LLVMTypeRef) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def lp_check_vec_type(type:struct_lp_type, vec_type:LLVMTypeRef) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def lp_check_value(type:struct_lp_type, val:LLVMValueRef) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def lp_build_int_elem_type(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type) -> LLVMTypeRef: ...
@dll.bind
def lp_build_int_vec_type(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type) -> LLVMTypeRef: ...
@dll.bind
def lp_elem_type(type:struct_lp_type) -> struct_lp_type: ...
@dll.bind
def lp_uint_type(type:struct_lp_type) -> struct_lp_type: ...
@dll.bind
def lp_int_type(type:struct_lp_type) -> struct_lp_type: ...
@dll.bind
def lp_wider_type(type:struct_lp_type) -> struct_lp_type: ...
@dll.bind
def lp_sizeof_llvm_type(t:LLVMTypeRef) -> Annotated[int, ctypes.c_uint32]: ...
class LLVMTypeKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
LLVMVoidTypeKind = LLVMTypeKind.define('LLVMVoidTypeKind', 0)
LLVMHalfTypeKind = LLVMTypeKind.define('LLVMHalfTypeKind', 1)
LLVMFloatTypeKind = LLVMTypeKind.define('LLVMFloatTypeKind', 2)
LLVMDoubleTypeKind = LLVMTypeKind.define('LLVMDoubleTypeKind', 3)
LLVMX86_FP80TypeKind = LLVMTypeKind.define('LLVMX86_FP80TypeKind', 4)
LLVMFP128TypeKind = LLVMTypeKind.define('LLVMFP128TypeKind', 5)
LLVMPPC_FP128TypeKind = LLVMTypeKind.define('LLVMPPC_FP128TypeKind', 6)
LLVMLabelTypeKind = LLVMTypeKind.define('LLVMLabelTypeKind', 7)
LLVMIntegerTypeKind = LLVMTypeKind.define('LLVMIntegerTypeKind', 8)
LLVMFunctionTypeKind = LLVMTypeKind.define('LLVMFunctionTypeKind', 9)
LLVMStructTypeKind = LLVMTypeKind.define('LLVMStructTypeKind', 10)
LLVMArrayTypeKind = LLVMTypeKind.define('LLVMArrayTypeKind', 11)
LLVMPointerTypeKind = LLVMTypeKind.define('LLVMPointerTypeKind', 12)
LLVMVectorTypeKind = LLVMTypeKind.define('LLVMVectorTypeKind', 13)
LLVMMetadataTypeKind = LLVMTypeKind.define('LLVMMetadataTypeKind', 14)
LLVMTokenTypeKind = LLVMTypeKind.define('LLVMTokenTypeKind', 16)
LLVMScalableVectorTypeKind = LLVMTypeKind.define('LLVMScalableVectorTypeKind', 17)
LLVMBFloatTypeKind = LLVMTypeKind.define('LLVMBFloatTypeKind', 18)
LLVMX86_AMXTypeKind = LLVMTypeKind.define('LLVMX86_AMXTypeKind', 19)
LLVMTargetExtTypeKind = LLVMTypeKind.define('LLVMTargetExtTypeKind', 20)
@dll.bind
def lp_typekind_name(t:LLVMTypeKind) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def lp_dump_llvmtype(t:LLVMTypeRef) -> None: ...
@dll.bind
def lp_build_context_init(bld:c.POINTER[struct_lp_build_context], gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type) -> None: ...
@dll.bind
def lp_build_count_ir_module(module:LLVMModuleRef) -> Annotated[int, ctypes.c_uint32]: ...
@c.record
class struct_lp_jit_texture(c.Struct):
SIZE = 216
base: Annotated[ctypes.c_void_p, 0]
width: Annotated[uint32_t, 8]
height: Annotated[uint16_t, 12]
depth: Annotated[uint16_t, 14]
row_stride: Annotated[c.Array[uint32_t, Literal[16]], 16]
img_stride: Annotated[c.Array[uint32_t, Literal[16]], 80]
residency: Annotated[ctypes.c_void_p, 16]
first_level: Annotated[uint8_t, 144]
last_level: Annotated[uint8_t, 145]
mip_offsets: Annotated[c.Array[uint32_t, Literal[16]], 148]
sampler_index: Annotated[uint32_t, 212]
class struct_LLVMOpaqueTargetData(ctypes.Structure): pass
LLVMTargetDataRef: TypeAlias = c.POINTER[struct_LLVMOpaqueTargetData]
class struct_LLVMOpaqueBuilder(ctypes.Structure): pass
LLVMBuilderRef: TypeAlias = c.POINTER[struct_LLVMOpaqueBuilder]
class struct_LLVMOpaqueDIBuilder(ctypes.Structure): pass
LLVMDIBuilderRef: TypeAlias = c.POINTER[struct_LLVMOpaqueDIBuilder]
class struct_LLVMOpaqueMetadata(ctypes.Structure): pass
LLVMMetadataRef: TypeAlias = c.POINTER[struct_LLVMOpaqueMetadata]
@dll.bind
def lp_build_init_native_width() -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def lp_build_init() -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def gallivm_create(name:c.POINTER[Annotated[bytes, ctypes.c_char]], context:c.POINTER[lp_context_ref], cache:c.POINTER[struct_lp_cached_code]) -> c.POINTER[struct_gallivm_state]: ...
@dll.bind
def gallivm_destroy(gallivm:c.POINTER[struct_gallivm_state]) -> None: ...
@dll.bind
def gallivm_free_ir(gallivm:c.POINTER[struct_gallivm_state]) -> None: ...
@dll.bind
def gallivm_verify_function(gallivm:c.POINTER[struct_gallivm_state], func:LLVMValueRef) -> None: ...
@dll.bind
def gallivm_add_global_mapping(gallivm:c.POINTER[struct_gallivm_state], sym:LLVMValueRef, addr:ctypes.c_void_p) -> None: ...
@dll.bind
def gallivm_compile_module(gallivm:c.POINTER[struct_gallivm_state]) -> None: ...
func_pointer: TypeAlias = c.CFUNCTYPE[None, []]
@dll.bind
def gallivm_jit_function(gallivm:c.POINTER[struct_gallivm_state], func:LLVMValueRef, func_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> func_pointer: ...
@dll.bind
def gallivm_stub_func(gallivm:c.POINTER[struct_gallivm_state], func:LLVMValueRef) -> None: ...
@dll.bind
def gallivm_get_perf_flags() -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def lp_init_clock_hook(gallivm:c.POINTER[struct_gallivm_state]) -> None: ...
@dll.bind
def lp_init_env_options() -> None: ...
@c.record
class struct_lp_build_tgsi_params(c.Struct):
SIZE = 248
type: Annotated[struct_lp_type, 0]
mask: Annotated[c.POINTER[struct_lp_build_mask_context], 8]
consts_ptr: Annotated[LLVMValueRef, 16]
const_sizes_ptr: Annotated[LLVMValueRef, 24]
system_values: Annotated[c.POINTER[struct_lp_bld_tgsi_system_values], 32]
inputs: Annotated[c.POINTER[c.Array[LLVMValueRef, Literal[4]]], 40]
num_inputs: Annotated[Annotated[int, ctypes.c_int32], 48]
context_type: Annotated[LLVMTypeRef, 56]
context_ptr: Annotated[LLVMValueRef, 64]
resources_type: Annotated[LLVMTypeRef, 72]
resources_ptr: Annotated[LLVMValueRef, 80]
thread_data_type: Annotated[LLVMTypeRef, 88]
thread_data_ptr: Annotated[LLVMValueRef, 96]
sampler: Annotated[c.POINTER[struct_lp_build_sampler_soa], 104]
info: Annotated[c.POINTER[struct_tgsi_shader_info], 112]
gs_iface: Annotated[c.POINTER[struct_lp_build_gs_iface], 120]
tcs_iface: Annotated[c.POINTER[struct_lp_build_tcs_iface], 128]
tes_iface: Annotated[c.POINTER[struct_lp_build_tes_iface], 136]
mesh_iface: Annotated[c.POINTER[struct_lp_build_mesh_iface], 144]
ssbo_ptr: Annotated[LLVMValueRef, 152]
ssbo_sizes_ptr: Annotated[LLVMValueRef, 160]
image: Annotated[c.POINTER[struct_lp_build_image_soa], 168]
shared_ptr: Annotated[LLVMValueRef, 176]
payload_ptr: Annotated[LLVMValueRef, 184]
coro: Annotated[c.POINTER[struct_lp_build_coro_suspend_info], 192]
fs_iface: Annotated[c.POINTER[struct_lp_build_fs_iface], 200]
gs_vertex_streams: Annotated[Annotated[int, ctypes.c_uint32], 208]
current_func: Annotated[LLVMValueRef, 216]
fns: Annotated[c.POINTER[struct_hash_table], 224]
scratch_ptr: Annotated[LLVMValueRef, 232]
call_context_ptr: Annotated[LLVMValueRef, 240]
@c.record
class struct_lp_build_mask_context(c.Struct):
SIZE = 40
skip: Annotated[struct_lp_build_skip_context, 0]
reg_type: Annotated[LLVMTypeRef, 16]
var_type: Annotated[LLVMTypeRef, 24]
var: Annotated[LLVMValueRef, 32]
@c.record
class struct_lp_build_skip_context(c.Struct):
SIZE = 16
gallivm: Annotated[c.POINTER[struct_gallivm_state], 0]
block: Annotated[LLVMBasicBlockRef, 8]
class struct_LLVMOpaqueBasicBlock(ctypes.Structure): pass
LLVMBasicBlockRef: TypeAlias = c.POINTER[struct_LLVMOpaqueBasicBlock]
@c.record
class struct_lp_bld_tgsi_system_values(c.Struct):
SIZE = 272
instance_id: Annotated[LLVMValueRef, 0]
base_instance: Annotated[LLVMValueRef, 8]
vertex_id: Annotated[LLVMValueRef, 16]
vertex_id_nobase: Annotated[LLVMValueRef, 24]
prim_id: Annotated[LLVMValueRef, 32]
basevertex: Annotated[LLVMValueRef, 40]
firstvertex: Annotated[LLVMValueRef, 48]
invocation_id: Annotated[LLVMValueRef, 56]
draw_id: Annotated[LLVMValueRef, 64]
thread_id: Annotated[c.Array[LLVMValueRef, Literal[3]], 72]
block_id: Annotated[c.Array[LLVMValueRef, Literal[3]], 96]
grid_size: Annotated[c.Array[LLVMValueRef, Literal[3]], 120]
front_facing: Annotated[LLVMValueRef, 144]
work_dim: Annotated[LLVMValueRef, 152]
block_size: Annotated[c.Array[LLVMValueRef, Literal[3]], 160]
tess_coord: Annotated[LLVMValueRef, 184]
tess_outer: Annotated[LLVMValueRef, 192]
tess_inner: Annotated[LLVMValueRef, 200]
vertices_in: Annotated[LLVMValueRef, 208]
sample_id: Annotated[LLVMValueRef, 216]
sample_pos_type: Annotated[LLVMTypeRef, 224]
sample_pos: Annotated[LLVMValueRef, 232]
sample_mask_in: Annotated[LLVMValueRef, 240]
view_index: Annotated[LLVMValueRef, 248]
subgroup_id: Annotated[LLVMValueRef, 256]
num_subgroups: Annotated[LLVMValueRef, 264]
@c.record
class struct_lp_build_sampler_soa(c.Struct):
SIZE = 16
emit_tex_sample: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_sampler_soa], c.POINTER[struct_gallivm_state], c.POINTER[struct_lp_sampler_params]]], 0]
emit_size_query: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_sampler_soa], c.POINTER[struct_gallivm_state], c.POINTER[struct_lp_sampler_size_query_params]]], 8]
@c.record
class struct_lp_sampler_params(c.Struct):
SIZE = 152
type: Annotated[struct_lp_type, 0]
texture_index: Annotated[Annotated[int, ctypes.c_uint32], 8]
sampler_index: Annotated[Annotated[int, ctypes.c_uint32], 12]
texture_index_offset: Annotated[LLVMValueRef, 16]
sample_key: Annotated[Annotated[int, ctypes.c_uint32], 24]
resources_type: Annotated[LLVMTypeRef, 32]
resources_ptr: Annotated[LLVMValueRef, 40]
thread_data_type: Annotated[LLVMTypeRef, 48]
thread_data_ptr: Annotated[LLVMValueRef, 56]
coords: Annotated[c.POINTER[LLVMValueRef], 64]
offsets: Annotated[c.POINTER[LLVMValueRef], 72]
ms_index: Annotated[LLVMValueRef, 80]
lod: Annotated[LLVMValueRef, 88]
min_lod: Annotated[LLVMValueRef, 96]
derivs: Annotated[c.POINTER[struct_lp_derivatives], 104]
texel: Annotated[c.POINTER[LLVMValueRef], 112]
texture_resource: Annotated[LLVMValueRef, 120]
sampler_resource: Annotated[LLVMValueRef, 128]
exec_mask: Annotated[LLVMValueRef, 136]
exec_mask_nz: Annotated[Annotated[bool, ctypes.c_bool], 144]
@c.record
class struct_lp_derivatives(c.Struct):
SIZE = 48
ddx: Annotated[c.Array[LLVMValueRef, Literal[3]], 0]
ddy: Annotated[c.Array[LLVMValueRef, Literal[3]], 24]
@c.record
class struct_lp_sampler_size_query_params(c.Struct):
SIZE = 96
int_type: Annotated[struct_lp_type, 0]
texture_unit: Annotated[Annotated[int, ctypes.c_uint32], 8]
texture_unit_offset: Annotated[LLVMValueRef, 16]
target: Annotated[Annotated[int, ctypes.c_uint32], 24]
resources_type: Annotated[LLVMTypeRef, 32]
resources_ptr: Annotated[LLVMValueRef, 40]
is_sviewinfo: Annotated[Annotated[bool, ctypes.c_bool], 48]
samples_only: Annotated[Annotated[bool, ctypes.c_bool], 49]
ms: Annotated[Annotated[bool, ctypes.c_bool], 50]
lod_property: Annotated[enum_lp_sampler_lod_property, 52]
explicit_lod: Annotated[LLVMValueRef, 56]
sizes_out: Annotated[c.POINTER[LLVMValueRef], 64]
resource: Annotated[LLVMValueRef, 72]
exec_mask: Annotated[LLVMValueRef, 80]
exec_mask_nz: Annotated[Annotated[bool, ctypes.c_bool], 88]
format: Annotated[enum_pipe_format, 92]
class enum_lp_sampler_lod_property(Annotated[int, ctypes.c_uint32], c.Enum): pass
LP_SAMPLER_LOD_SCALAR = enum_lp_sampler_lod_property.define('LP_SAMPLER_LOD_SCALAR', 0)
LP_SAMPLER_LOD_PER_ELEMENT = enum_lp_sampler_lod_property.define('LP_SAMPLER_LOD_PER_ELEMENT', 1)
LP_SAMPLER_LOD_PER_QUAD = enum_lp_sampler_lod_property.define('LP_SAMPLER_LOD_PER_QUAD', 2)
@c.record
class struct_tgsi_shader_info(c.Struct):
SIZE = 2744
num_inputs: Annotated[uint8_t, 0]
num_outputs: Annotated[uint8_t, 1]
input_semantic_name: Annotated[c.Array[uint8_t, Literal[80]], 2]
input_semantic_index: Annotated[c.Array[uint8_t, Literal[80]], 82]
input_interpolate: Annotated[c.Array[uint8_t, Literal[80]], 162]
input_interpolate_loc: Annotated[c.Array[uint8_t, Literal[80]], 242]
input_usage_mask: Annotated[c.Array[uint8_t, Literal[80]], 322]
output_semantic_name: Annotated[c.Array[uint8_t, Literal[80]], 402]
output_semantic_index: Annotated[c.Array[uint8_t, Literal[80]], 482]
output_usagemask: Annotated[c.Array[uint8_t, Literal[80]], 562]
output_streams: Annotated[c.Array[uint8_t, Literal[80]], 642]
num_system_values: Annotated[uint8_t, 722]
system_value_semantic_name: Annotated[c.Array[uint8_t, Literal[80]], 723]
processor: Annotated[uint8_t, 803]
file_mask: Annotated[c.Array[uint32_t, Literal[15]], 804]
file_count: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[15]], 864]
file_max: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[15]], 924]
const_file_max: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[32]], 984]
const_buffers_declared: Annotated[Annotated[int, ctypes.c_uint32], 1112]
samplers_declared: Annotated[Annotated[int, ctypes.c_uint32], 1116]
sampler_targets: Annotated[c.Array[uint8_t, Literal[128]], 1120]
sampler_type: Annotated[c.Array[uint8_t, Literal[128]], 1248]
num_stream_output_components: Annotated[c.Array[uint8_t, Literal[4]], 1376]
input_array_first: Annotated[c.Array[uint8_t, Literal[80]], 1380]
output_array_first: Annotated[c.Array[uint8_t, Literal[80]], 1460]
immediate_count: Annotated[Annotated[int, ctypes.c_uint32], 1540]
num_instructions: Annotated[Annotated[int, ctypes.c_uint32], 1544]
opcode_count: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[252]], 1548]
reads_pervertex_outputs: Annotated[Annotated[bool, ctypes.c_bool], 2556]
reads_perpatch_outputs: Annotated[Annotated[bool, ctypes.c_bool], 2557]
reads_tessfactor_outputs: Annotated[Annotated[bool, ctypes.c_bool], 2558]
reads_z: Annotated[Annotated[bool, ctypes.c_bool], 2559]
writes_z: Annotated[Annotated[bool, ctypes.c_bool], 2560]
writes_stencil: Annotated[Annotated[bool, ctypes.c_bool], 2561]
writes_samplemask: Annotated[Annotated[bool, ctypes.c_bool], 2562]
writes_edgeflag: Annotated[Annotated[bool, ctypes.c_bool], 2563]
uses_kill: Annotated[Annotated[bool, ctypes.c_bool], 2564]
uses_instanceid: Annotated[Annotated[bool, ctypes.c_bool], 2565]
uses_vertexid: Annotated[Annotated[bool, ctypes.c_bool], 2566]
uses_vertexid_nobase: Annotated[Annotated[bool, ctypes.c_bool], 2567]
uses_basevertex: Annotated[Annotated[bool, ctypes.c_bool], 2568]
uses_primid: Annotated[Annotated[bool, ctypes.c_bool], 2569]
uses_frontface: Annotated[Annotated[bool, ctypes.c_bool], 2570]
uses_invocationid: Annotated[Annotated[bool, ctypes.c_bool], 2571]
uses_grid_size: Annotated[Annotated[bool, ctypes.c_bool], 2572]
writes_position: Annotated[Annotated[bool, ctypes.c_bool], 2573]
writes_psize: Annotated[Annotated[bool, ctypes.c_bool], 2574]
writes_clipvertex: Annotated[Annotated[bool, ctypes.c_bool], 2575]
writes_viewport_index: Annotated[Annotated[bool, ctypes.c_bool], 2576]
writes_layer: Annotated[Annotated[bool, ctypes.c_bool], 2577]
writes_memory: Annotated[Annotated[bool, ctypes.c_bool], 2578]
uses_fbfetch: Annotated[Annotated[bool, ctypes.c_bool], 2579]
num_written_culldistance: Annotated[Annotated[int, ctypes.c_uint32], 2580]
num_written_clipdistance: Annotated[Annotated[int, ctypes.c_uint32], 2584]
images_declared: Annotated[Annotated[int, ctypes.c_uint32], 2588]
msaa_images_declared: Annotated[Annotated[int, ctypes.c_uint32], 2592]
images_buffers: Annotated[Annotated[int, ctypes.c_uint32], 2596]
shader_buffers_declared: Annotated[Annotated[int, ctypes.c_uint32], 2600]
shader_buffers_load: Annotated[Annotated[int, ctypes.c_uint32], 2604]
shader_buffers_store: Annotated[Annotated[int, ctypes.c_uint32], 2608]
shader_buffers_atomic: Annotated[Annotated[int, ctypes.c_uint32], 2612]
hw_atomic_declared: Annotated[Annotated[int, ctypes.c_uint32], 2616]
indirect_files: Annotated[Annotated[int, ctypes.c_uint32], 2620]
dim_indirect_files: Annotated[Annotated[int, ctypes.c_uint32], 2624]
properties: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[29]], 2628]
@c.record
class struct_lp_build_gs_iface(c.Struct):
SIZE = 32
fetch_input: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_lp_build_gs_iface], c.POINTER[struct_lp_build_context], Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef, LLVMValueRef]], 0]
emit_vertex: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_gs_iface], c.POINTER[struct_lp_build_context], c.POINTER[c.Array[LLVMValueRef, Literal[4]]], LLVMValueRef, LLVMValueRef, LLVMValueRef]], 8]
end_primitive: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_gs_iface], c.POINTER[struct_lp_build_context], LLVMValueRef, LLVMValueRef, LLVMValueRef, LLVMValueRef, Annotated[int, ctypes.c_uint32]]], 16]
gs_epilogue: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_gs_iface], LLVMValueRef, LLVMValueRef, Annotated[int, ctypes.c_uint32]]], 24]
@c.record
class struct_lp_build_tcs_iface(c.Struct):
SIZE = 48
emit_prologue: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_context]]], 0]
emit_epilogue: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_context]]], 8]
emit_barrier: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_context]]], 16]
emit_store_output: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_tcs_iface], c.POINTER[struct_lp_build_context], Annotated[int, ctypes.c_uint32], Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef, LLVMValueRef, LLVMValueRef]], 24]
emit_fetch_input: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_lp_build_tcs_iface], c.POINTER[struct_lp_build_context], Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef]], 32]
emit_fetch_output: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_lp_build_tcs_iface], c.POINTER[struct_lp_build_context], Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef, uint32_t]], 40]
@c.record
class struct_lp_build_tes_iface(c.Struct):
SIZE = 16
fetch_vertex_input: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_lp_build_tes_iface], c.POINTER[struct_lp_build_context], Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef]], 0]
fetch_patch_input: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_lp_build_tes_iface], c.POINTER[struct_lp_build_context], Annotated[bool, ctypes.c_bool], LLVMValueRef, LLVMValueRef]], 8]
@c.record
class struct_lp_build_mesh_iface(c.Struct):
SIZE = 16
emit_store_output: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_mesh_iface], c.POINTER[struct_lp_build_context], Annotated[int, ctypes.c_uint32], Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef, Annotated[bool, ctypes.c_bool], LLVMValueRef, LLVMValueRef, LLVMValueRef]], 0]
emit_vertex_and_primitive_count: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_mesh_iface], c.POINTER[struct_lp_build_context], LLVMValueRef, LLVMValueRef]], 8]
@c.record
class struct_lp_build_image_soa(c.Struct):
SIZE = 16
emit_op: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_image_soa], c.POINTER[struct_gallivm_state], c.POINTER[struct_lp_img_params]]], 0]
emit_size_query: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_image_soa], c.POINTER[struct_gallivm_state], c.POINTER[struct_lp_sampler_size_query_params]]], 8]
@c.record
class struct_lp_img_params(c.Struct):
SIZE = 192
type: Annotated[struct_lp_type, 0]
image_index: Annotated[Annotated[int, ctypes.c_uint32], 8]
image_index_offset: Annotated[LLVMValueRef, 16]
img_op: Annotated[Annotated[int, ctypes.c_uint32], 24]
target: Annotated[Annotated[int, ctypes.c_uint32], 28]
packed_op: Annotated[Annotated[int, ctypes.c_uint32], 32]
op: Annotated[LLVMAtomicRMWBinOp, 36]
exec_mask: Annotated[LLVMValueRef, 40]
exec_mask_nz: Annotated[Annotated[bool, ctypes.c_bool], 48]
resources_type: Annotated[LLVMTypeRef, 56]
resources_ptr: Annotated[LLVMValueRef, 64]
thread_data_type: Annotated[LLVMTypeRef, 72]
thread_data_ptr: Annotated[LLVMValueRef, 80]
coords: Annotated[c.POINTER[LLVMValueRef], 88]
ms_index: Annotated[LLVMValueRef, 96]
indata: Annotated[c.Array[LLVMValueRef, Literal[4]], 104]
indata2: Annotated[c.Array[LLVMValueRef, Literal[4]], 136]
outdata: Annotated[c.POINTER[LLVMValueRef], 168]
resource: Annotated[LLVMValueRef, 176]
format: Annotated[enum_pipe_format, 184]
class LLVMAtomicRMWBinOp(Annotated[int, ctypes.c_uint32], c.Enum): pass
LLVMAtomicRMWBinOpXchg = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpXchg', 0)
LLVMAtomicRMWBinOpAdd = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpAdd', 1)
LLVMAtomicRMWBinOpSub = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpSub', 2)
LLVMAtomicRMWBinOpAnd = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpAnd', 3)
LLVMAtomicRMWBinOpNand = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpNand', 4)
LLVMAtomicRMWBinOpOr = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpOr', 5)
LLVMAtomicRMWBinOpXor = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpXor', 6)
LLVMAtomicRMWBinOpMax = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpMax', 7)
LLVMAtomicRMWBinOpMin = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpMin', 8)
LLVMAtomicRMWBinOpUMax = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpUMax', 9)
LLVMAtomicRMWBinOpUMin = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpUMin', 10)
LLVMAtomicRMWBinOpFAdd = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpFAdd', 11)
LLVMAtomicRMWBinOpFSub = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpFSub', 12)
LLVMAtomicRMWBinOpFMax = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpFMax', 13)
LLVMAtomicRMWBinOpFMin = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpFMin', 14)
LLVMAtomicRMWBinOpUIncWrap = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpUIncWrap', 15)
LLVMAtomicRMWBinOpUDecWrap = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpUDecWrap', 16)
LLVMAtomicRMWBinOpUSubCond = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpUSubCond', 17)
LLVMAtomicRMWBinOpUSubSat = LLVMAtomicRMWBinOp.define('LLVMAtomicRMWBinOpUSubSat', 18)
class struct_lp_build_coro_suspend_info(ctypes.Structure): pass
@c.record
class struct_lp_build_fs_iface(c.Struct):
SIZE = 16
interp_fn: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_lp_build_fs_iface], c.POINTER[struct_lp_build_context], Annotated[int, ctypes.c_uint32], Annotated[int, ctypes.c_uint32], Annotated[bool, ctypes.c_bool], Annotated[bool, ctypes.c_bool], LLVMValueRef, c.Array[LLVMValueRef, Literal[2]]]], 0]
fb_fetch: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_lp_build_fs_iface], c.POINTER[struct_lp_build_context], Annotated[int, ctypes.c_int32], c.Array[LLVMValueRef, Literal[4]]]], 8]
@dll.bind
def lp_build_nir_soa(gallivm:c.POINTER[struct_gallivm_state], shader:c.POINTER[struct_nir_shader], params:c.POINTER[struct_lp_build_tgsi_params], outputs:c.POINTER[c.Array[LLVMValueRef, Literal[4]]]) -> None: ...
@dll.bind
def lp_build_nir_soa_func(gallivm:c.POINTER[struct_gallivm_state], shader:c.POINTER[struct_nir_shader], impl:c.POINTER[nir_function_impl], params:c.POINTER[struct_lp_build_tgsi_params], outputs:c.POINTER[c.Array[LLVMValueRef, Literal[4]]]) -> None: ...
@c.record
class struct_lp_build_sampler_aos(c.Struct):
SIZE = 8
emit_fetch_texel: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_lp_build_sampler_aos], c.POINTER[struct_lp_build_context], enum_tgsi_texture_type, Annotated[int, ctypes.c_uint32], LLVMValueRef, struct_lp_derivatives, enum_lp_build_tex_modifier]], 0]
class enum_tgsi_texture_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
TGSI_TEXTURE_BUFFER = enum_tgsi_texture_type.define('TGSI_TEXTURE_BUFFER', 0)
TGSI_TEXTURE_1D = enum_tgsi_texture_type.define('TGSI_TEXTURE_1D', 1)
TGSI_TEXTURE_2D = enum_tgsi_texture_type.define('TGSI_TEXTURE_2D', 2)
TGSI_TEXTURE_3D = enum_tgsi_texture_type.define('TGSI_TEXTURE_3D', 3)
TGSI_TEXTURE_CUBE = enum_tgsi_texture_type.define('TGSI_TEXTURE_CUBE', 4)
TGSI_TEXTURE_RECT = enum_tgsi_texture_type.define('TGSI_TEXTURE_RECT', 5)
TGSI_TEXTURE_SHADOW1D = enum_tgsi_texture_type.define('TGSI_TEXTURE_SHADOW1D', 6)
TGSI_TEXTURE_SHADOW2D = enum_tgsi_texture_type.define('TGSI_TEXTURE_SHADOW2D', 7)
TGSI_TEXTURE_SHADOWRECT = enum_tgsi_texture_type.define('TGSI_TEXTURE_SHADOWRECT', 8)
TGSI_TEXTURE_1D_ARRAY = enum_tgsi_texture_type.define('TGSI_TEXTURE_1D_ARRAY', 9)
TGSI_TEXTURE_2D_ARRAY = enum_tgsi_texture_type.define('TGSI_TEXTURE_2D_ARRAY', 10)
TGSI_TEXTURE_SHADOW1D_ARRAY = enum_tgsi_texture_type.define('TGSI_TEXTURE_SHADOW1D_ARRAY', 11)
TGSI_TEXTURE_SHADOW2D_ARRAY = enum_tgsi_texture_type.define('TGSI_TEXTURE_SHADOW2D_ARRAY', 12)
TGSI_TEXTURE_SHADOWCUBE = enum_tgsi_texture_type.define('TGSI_TEXTURE_SHADOWCUBE', 13)
TGSI_TEXTURE_2D_MSAA = enum_tgsi_texture_type.define('TGSI_TEXTURE_2D_MSAA', 14)
TGSI_TEXTURE_2D_ARRAY_MSAA = enum_tgsi_texture_type.define('TGSI_TEXTURE_2D_ARRAY_MSAA', 15)
TGSI_TEXTURE_CUBE_ARRAY = enum_tgsi_texture_type.define('TGSI_TEXTURE_CUBE_ARRAY', 16)
TGSI_TEXTURE_SHADOWCUBE_ARRAY = enum_tgsi_texture_type.define('TGSI_TEXTURE_SHADOWCUBE_ARRAY', 17)
TGSI_TEXTURE_UNKNOWN = enum_tgsi_texture_type.define('TGSI_TEXTURE_UNKNOWN', 18)
TGSI_TEXTURE_COUNT = enum_tgsi_texture_type.define('TGSI_TEXTURE_COUNT', 19)
class enum_lp_build_tex_modifier(Annotated[int, ctypes.c_uint32], c.Enum): pass
LP_BLD_TEX_MODIFIER_NONE = enum_lp_build_tex_modifier.define('LP_BLD_TEX_MODIFIER_NONE', 0)
LP_BLD_TEX_MODIFIER_PROJECTED = enum_lp_build_tex_modifier.define('LP_BLD_TEX_MODIFIER_PROJECTED', 1)
LP_BLD_TEX_MODIFIER_LOD_BIAS = enum_lp_build_tex_modifier.define('LP_BLD_TEX_MODIFIER_LOD_BIAS', 2)
LP_BLD_TEX_MODIFIER_EXPLICIT_LOD = enum_lp_build_tex_modifier.define('LP_BLD_TEX_MODIFIER_EXPLICIT_LOD', 3)
LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV = enum_lp_build_tex_modifier.define('LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV', 4)
LP_BLD_TEX_MODIFIER_LOD_ZERO = enum_lp_build_tex_modifier.define('LP_BLD_TEX_MODIFIER_LOD_ZERO', 5)
@dll.bind
def lp_build_nir_aos(gallivm:c.POINTER[struct_gallivm_state], shader:c.POINTER[struct_nir_shader], type:struct_lp_type, swizzles:c.Array[Annotated[int, ctypes.c_ubyte], Literal[4]], consts_ptr:LLVMValueRef, inputs:c.POINTER[LLVMValueRef], outputs:c.POINTER[LLVMValueRef], sampler:c.POINTER[struct_lp_build_sampler_aos]) -> None: ...
@c.record
class struct_lp_build_fn(c.Struct):
SIZE = 16
fn_type: Annotated[LLVMTypeRef, 0]
fn: Annotated[LLVMValueRef, 8]
@dll.bind
def lp_build_nir_soa_prepasses(nir:c.POINTER[struct_nir_shader]) -> None: ...
@dll.bind
def lp_build_opt_nir(nir:c.POINTER[struct_nir_shader]) -> None: ...
@dll.bind
def lp_translate_atomic_op(op:nir_atomic_op) -> LLVMAtomicRMWBinOp: ...
@dll.bind
def lp_build_nir_sample_key(stage:gl_shader_stage, instr:c.POINTER[nir_tex_instr]) -> uint32_t: ...
@dll.bind
def lp_img_op_from_intrinsic(params:c.POINTER[struct_lp_img_params], instr:c.POINTER[nir_intrinsic_instr]) -> None: ...
@dll.bind
def lp_packed_img_op_from_intrinsic(instr:c.POINTER[nir_intrinsic_instr]) -> uint32_t: ...
class enum_lp_nir_call_context_args(Annotated[int, ctypes.c_uint32], c.Enum): pass
LP_NIR_CALL_CONTEXT_CONTEXT = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_CONTEXT', 0)
LP_NIR_CALL_CONTEXT_RESOURCES = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_RESOURCES', 1)
LP_NIR_CALL_CONTEXT_SHARED = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_SHARED', 2)
LP_NIR_CALL_CONTEXT_SCRATCH = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_SCRATCH', 3)
LP_NIR_CALL_CONTEXT_WORK_DIM = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_WORK_DIM', 4)
LP_NIR_CALL_CONTEXT_THREAD_ID_0 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_THREAD_ID_0', 5)
LP_NIR_CALL_CONTEXT_THREAD_ID_1 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_THREAD_ID_1', 6)
LP_NIR_CALL_CONTEXT_THREAD_ID_2 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_THREAD_ID_2', 7)
LP_NIR_CALL_CONTEXT_BLOCK_ID_0 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_BLOCK_ID_0', 8)
LP_NIR_CALL_CONTEXT_BLOCK_ID_1 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_BLOCK_ID_1', 9)
LP_NIR_CALL_CONTEXT_BLOCK_ID_2 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_BLOCK_ID_2', 10)
LP_NIR_CALL_CONTEXT_GRID_SIZE_0 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_GRID_SIZE_0', 11)
LP_NIR_CALL_CONTEXT_GRID_SIZE_1 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_GRID_SIZE_1', 12)
LP_NIR_CALL_CONTEXT_GRID_SIZE_2 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_GRID_SIZE_2', 13)
LP_NIR_CALL_CONTEXT_BLOCK_SIZE_0 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_BLOCK_SIZE_0', 14)
LP_NIR_CALL_CONTEXT_BLOCK_SIZE_1 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_BLOCK_SIZE_1', 15)
LP_NIR_CALL_CONTEXT_BLOCK_SIZE_2 = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_BLOCK_SIZE_2', 16)
LP_NIR_CALL_CONTEXT_MAX_ARGS = enum_lp_nir_call_context_args.define('LP_NIR_CALL_CONTEXT_MAX_ARGS', 17)
@dll.bind
def lp_build_cs_func_call_context(gallivm:c.POINTER[struct_gallivm_state], length:Annotated[int, ctypes.c_int32], context_type:LLVMTypeRef, resources_type:LLVMTypeRef) -> LLVMTypeRef: ...
@dll.bind
def lp_build_struct_get_ptr2(gallivm:c.POINTER[struct_gallivm_state], ptr_type:LLVMTypeRef, ptr:LLVMValueRef, member:Annotated[int, ctypes.c_uint32], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> LLVMValueRef: ...
@dll.bind
def lp_build_struct_get2(gallivm:c.POINTER[struct_gallivm_state], ptr_type:LLVMTypeRef, ptr:LLVMValueRef, member:Annotated[int, ctypes.c_uint32], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> LLVMValueRef: ...
@dll.bind
def lp_build_array_get_ptr2(gallivm:c.POINTER[struct_gallivm_state], array_type:LLVMTypeRef, ptr:LLVMValueRef, index:LLVMValueRef) -> LLVMValueRef: ...
@dll.bind
def lp_build_array_get2(gallivm:c.POINTER[struct_gallivm_state], array_type:LLVMTypeRef, ptr:LLVMValueRef, index:LLVMValueRef) -> LLVMValueRef: ...
@dll.bind
def lp_build_pointer_get2(builder:LLVMBuilderRef, ptr_type:LLVMTypeRef, ptr:LLVMValueRef, index:LLVMValueRef) -> LLVMValueRef: ...
@dll.bind
def lp_build_pointer_get_unaligned2(builder:LLVMBuilderRef, ptr_type:LLVMTypeRef, ptr:LLVMValueRef, index:LLVMValueRef, alignment:Annotated[int, ctypes.c_uint32]) -> LLVMValueRef: ...
@dll.bind
def lp_build_pointer_set(builder:LLVMBuilderRef, ptr:LLVMValueRef, index:LLVMValueRef, value:LLVMValueRef) -> None: ...
@dll.bind
def lp_build_pointer_set_unaligned(builder:LLVMBuilderRef, ptr:LLVMValueRef, index:LLVMValueRef, value:LLVMValueRef, alignment:Annotated[int, ctypes.c_uint32]) -> None: ...
@c.record
class struct_lp_sampler_dynamic_state(c.Struct):
SIZE = 144
width: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef]], 0]
height: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef]], 8]
depth: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef]], 16]
first_level: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef]], 24]
last_level: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef]], 32]
row_stride: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef, c.POINTER[LLVMTypeRef]]], 40]
img_stride: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef, c.POINTER[LLVMTypeRef]]], 48]
base_ptr: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef]], 56]
mip_offsets: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef, c.POINTER[LLVMTypeRef]]], 64]
num_samples: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef]], 72]
sample_stride: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef]], 80]
min_lod: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32]]], 88]
max_lod: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32]]], 96]
lod_bias: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32]]], 104]
border_color: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32]]], 112]
cache_ptr: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32]]], 120]
residency: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef]], 128]
base_offset: Annotated[c.CFUNCTYPE[LLVMValueRef, [c.POINTER[struct_gallivm_state], LLVMTypeRef, LLVMValueRef, Annotated[int, ctypes.c_uint32], LLVMValueRef]], 136]
@c.record
class struct_lp_jit_buffer(c.Struct):
SIZE = 16
u: Annotated[c.POINTER[uint32_t], 0]
f: Annotated[c.POINTER[Annotated[float, ctypes.c_float]], 0]
num_elements: Annotated[uint32_t, 8]
class _anonenum0(Annotated[int, ctypes.c_uint32], c.Enum): pass
LP_JIT_BUFFER_BASE = _anonenum0.define('LP_JIT_BUFFER_BASE', 0)
LP_JIT_BUFFER_NUM_ELEMENTS = _anonenum0.define('LP_JIT_BUFFER_NUM_ELEMENTS', 1)
LP_JIT_BUFFER_NUM_FIELDS = _anonenum0.define('LP_JIT_BUFFER_NUM_FIELDS', 2)
@dll.bind
def lp_llvm_descriptor_base(gallivm:c.POINTER[struct_gallivm_state], buffers_ptr:LLVMValueRef, index:LLVMValueRef, buffers_limit:Annotated[int, ctypes.c_uint32]) -> LLVMValueRef: ...
@dll.bind
def lp_llvm_buffer_base(gallivm:c.POINTER[struct_gallivm_state], buffers_ptr:LLVMValueRef, buffers_offset:LLVMValueRef, buffers_limit:Annotated[int, ctypes.c_uint32]) -> LLVMValueRef: ...
@dll.bind
def lp_llvm_buffer_num_elements(gallivm:c.POINTER[struct_gallivm_state], buffers_ptr:LLVMValueRef, buffers_offset:LLVMValueRef, buffers_limit:Annotated[int, ctypes.c_uint32]) -> LLVMValueRef: ...
class _anonenum1(Annotated[int, ctypes.c_uint32], c.Enum): pass
LP_JIT_TEXTURE_BASE = _anonenum1.define('LP_JIT_TEXTURE_BASE', 0)
LP_JIT_TEXTURE_WIDTH = _anonenum1.define('LP_JIT_TEXTURE_WIDTH', 1)
LP_JIT_TEXTURE_HEIGHT = _anonenum1.define('LP_JIT_TEXTURE_HEIGHT', 2)
LP_JIT_TEXTURE_DEPTH = _anonenum1.define('LP_JIT_TEXTURE_DEPTH', 3)
LP_JIT_TEXTURE_ROW_STRIDE = _anonenum1.define('LP_JIT_TEXTURE_ROW_STRIDE', 4)
LP_JIT_TEXTURE_IMG_STRIDE = _anonenum1.define('LP_JIT_TEXTURE_IMG_STRIDE', 5)
LP_JIT_TEXTURE_FIRST_LEVEL = _anonenum1.define('LP_JIT_TEXTURE_FIRST_LEVEL', 6)
LP_JIT_TEXTURE_LAST_LEVEL = _anonenum1.define('LP_JIT_TEXTURE_LAST_LEVEL', 7)
LP_JIT_TEXTURE_MIP_OFFSETS = _anonenum1.define('LP_JIT_TEXTURE_MIP_OFFSETS', 8)
LP_JIT_SAMPLER_INDEX_DUMMY = _anonenum1.define('LP_JIT_SAMPLER_INDEX_DUMMY', 9)
LP_JIT_TEXTURE_NUM_FIELDS = _anonenum1.define('LP_JIT_TEXTURE_NUM_FIELDS', 10)
@c.record
class struct_lp_jit_sampler(c.Struct):
SIZE = 28
min_lod: Annotated[Annotated[float, ctypes.c_float], 0]
max_lod: Annotated[Annotated[float, ctypes.c_float], 4]
lod_bias: Annotated[Annotated[float, ctypes.c_float], 8]
border_color: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[4]], 12]
class _anonenum2(Annotated[int, ctypes.c_uint32], c.Enum): pass
LP_JIT_SAMPLER_MIN_LOD = _anonenum2.define('LP_JIT_SAMPLER_MIN_LOD', 0)
LP_JIT_SAMPLER_MAX_LOD = _anonenum2.define('LP_JIT_SAMPLER_MAX_LOD', 1)
LP_JIT_SAMPLER_LOD_BIAS = _anonenum2.define('LP_JIT_SAMPLER_LOD_BIAS', 2)
LP_JIT_SAMPLER_BORDER_COLOR = _anonenum2.define('LP_JIT_SAMPLER_BORDER_COLOR', 3)
LP_JIT_SAMPLER_NUM_FIELDS = _anonenum2.define('LP_JIT_SAMPLER_NUM_FIELDS', 4)
@c.record
class struct_lp_jit_image(c.Struct):
SIZE = 48
base: Annotated[ctypes.c_void_p, 0]
width: Annotated[uint32_t, 8]
height: Annotated[uint16_t, 12]
depth: Annotated[uint16_t, 14]
num_samples: Annotated[uint8_t, 16]
sample_stride: Annotated[uint32_t, 20]
row_stride: Annotated[uint32_t, 24]
img_stride: Annotated[uint32_t, 28]
residency: Annotated[ctypes.c_void_p, 32]
base_offset: Annotated[uint32_t, 40]
class _anonenum3(Annotated[int, ctypes.c_uint32], c.Enum): pass
LP_JIT_IMAGE_BASE = _anonenum3.define('LP_JIT_IMAGE_BASE', 0)
LP_JIT_IMAGE_WIDTH = _anonenum3.define('LP_JIT_IMAGE_WIDTH', 1)
LP_JIT_IMAGE_HEIGHT = _anonenum3.define('LP_JIT_IMAGE_HEIGHT', 2)
LP_JIT_IMAGE_DEPTH = _anonenum3.define('LP_JIT_IMAGE_DEPTH', 3)
LP_JIT_IMAGE_NUM_SAMPLES = _anonenum3.define('LP_JIT_IMAGE_NUM_SAMPLES', 4)
LP_JIT_IMAGE_SAMPLE_STRIDE = _anonenum3.define('LP_JIT_IMAGE_SAMPLE_STRIDE', 5)
LP_JIT_IMAGE_ROW_STRIDE = _anonenum3.define('LP_JIT_IMAGE_ROW_STRIDE', 6)
LP_JIT_IMAGE_IMG_STRIDE = _anonenum3.define('LP_JIT_IMAGE_IMG_STRIDE', 7)
LP_JIT_IMAGE_RESIDENCY = _anonenum3.define('LP_JIT_IMAGE_RESIDENCY', 8)
LP_JIT_IMAGE_BASE_OFFSET = _anonenum3.define('LP_JIT_IMAGE_BASE_OFFSET', 9)
LP_JIT_IMAGE_NUM_FIELDS = _anonenum3.define('LP_JIT_IMAGE_NUM_FIELDS', 10)
@c.record
class struct_lp_jit_resources(c.Struct):
SIZE = 32384
constants: Annotated[c.Array[struct_lp_jit_buffer, Literal[16]], 0]
ssbos: Annotated[c.Array[struct_lp_jit_buffer, Literal[32]], 256]
textures: Annotated[c.Array[struct_lp_jit_texture, Literal[128]], 768]
samplers: Annotated[c.Array[struct_lp_jit_sampler, Literal[32]], 28416]
images: Annotated[c.Array[struct_lp_jit_image, Literal[64]], 29312]
class _anonenum4(Annotated[int, ctypes.c_uint32], c.Enum): pass
LP_JIT_RES_CONSTANTS = _anonenum4.define('LP_JIT_RES_CONSTANTS', 0)
LP_JIT_RES_SSBOS = _anonenum4.define('LP_JIT_RES_SSBOS', 1)
LP_JIT_RES_TEXTURES = _anonenum4.define('LP_JIT_RES_TEXTURES', 2)
LP_JIT_RES_SAMPLERS = _anonenum4.define('LP_JIT_RES_SAMPLERS', 3)
LP_JIT_RES_IMAGES = _anonenum4.define('LP_JIT_RES_IMAGES', 4)
LP_JIT_RES_COUNT = _anonenum4.define('LP_JIT_RES_COUNT', 5)
@dll.bind
def lp_build_jit_resources_type(gallivm:c.POINTER[struct_gallivm_state]) -> LLVMTypeRef: ...
class _anonenum5(Annotated[int, ctypes.c_uint32], c.Enum): pass
LP_JIT_VERTEX_HEADER_VERTEX_ID = _anonenum5.define('LP_JIT_VERTEX_HEADER_VERTEX_ID', 0)
LP_JIT_VERTEX_HEADER_CLIP_POS = _anonenum5.define('LP_JIT_VERTEX_HEADER_CLIP_POS', 1)
LP_JIT_VERTEX_HEADER_DATA = _anonenum5.define('LP_JIT_VERTEX_HEADER_DATA', 2)
@dll.bind
def lp_build_create_jit_vertex_header_type(gallivm:c.POINTER[struct_gallivm_state], data_elems:Annotated[int, ctypes.c_int32]) -> LLVMTypeRef: ...
@dll.bind
def lp_build_jit_fill_sampler_dynamic_state(state:c.POINTER[struct_lp_sampler_dynamic_state]) -> None: ...
@dll.bind
def lp_build_jit_fill_image_dynamic_state(state:c.POINTER[struct_lp_sampler_dynamic_state]) -> None: ...
@dll.bind
def lp_build_sample_function_type(gallivm:c.POINTER[struct_gallivm_state], sample_key:uint32_t) -> LLVMTypeRef: ...
@dll.bind
def lp_build_size_function_type(gallivm:c.POINTER[struct_gallivm_state], params:c.POINTER[struct_lp_sampler_size_query_params]) -> LLVMTypeRef: ...
@dll.bind
def lp_build_image_function_type(gallivm:c.POINTER[struct_gallivm_state], params:c.POINTER[struct_lp_img_params], ms:Annotated[bool, ctypes.c_bool], is64:Annotated[bool, ctypes.c_bool]) -> LLVMTypeRef: ...
@c.record
class struct_lp_texture_handle_state(c.Struct):
SIZE = 232
static_state: Annotated[struct_lp_static_texture_state, 0]
dynamic_state: Annotated[struct_lp_jit_texture, 16]
@c.record
class struct_lp_static_texture_state(c.Struct):
SIZE = 12
format: Annotated[enum_pipe_format, 0]
res_format: Annotated[enum_pipe_format, 4]
swizzle_r: Annotated[Annotated[int, ctypes.c_uint32], 8, 3, 0]
swizzle_g: Annotated[Annotated[int, ctypes.c_uint32], 8, 3, 3]
swizzle_b: Annotated[Annotated[int, ctypes.c_uint32], 8, 3, 6]
swizzle_a: Annotated[Annotated[int, ctypes.c_uint32], 9, 3, 1]
target: Annotated[enum_pipe_texture_target, 9, 5, 4]
res_target: Annotated[enum_pipe_texture_target, 10, 5, 1]
pot_width: Annotated[Annotated[int, ctypes.c_uint32], 10, 1, 6]
pot_height: Annotated[Annotated[int, ctypes.c_uint32], 10, 1, 7]
pot_depth: Annotated[Annotated[int, ctypes.c_uint32], 11, 1, 0]
level_zero_only: Annotated[Annotated[int, ctypes.c_uint32], 11, 1, 1]
tiled: Annotated[Annotated[int, ctypes.c_uint32], 11, 1, 2]
tiled_samples: Annotated[Annotated[int, ctypes.c_uint32], 11, 5, 3]
class enum_pipe_texture_target(Annotated[int, ctypes.c_uint32], c.Enum): pass
PIPE_BUFFER = enum_pipe_texture_target.define('PIPE_BUFFER', 0)
PIPE_TEXTURE_1D = enum_pipe_texture_target.define('PIPE_TEXTURE_1D', 1)
PIPE_TEXTURE_2D = enum_pipe_texture_target.define('PIPE_TEXTURE_2D', 2)
PIPE_TEXTURE_3D = enum_pipe_texture_target.define('PIPE_TEXTURE_3D', 3)
PIPE_TEXTURE_CUBE = enum_pipe_texture_target.define('PIPE_TEXTURE_CUBE', 4)
PIPE_TEXTURE_RECT = enum_pipe_texture_target.define('PIPE_TEXTURE_RECT', 5)
PIPE_TEXTURE_1D_ARRAY = enum_pipe_texture_target.define('PIPE_TEXTURE_1D_ARRAY', 6)
PIPE_TEXTURE_2D_ARRAY = enum_pipe_texture_target.define('PIPE_TEXTURE_2D_ARRAY', 7)
PIPE_TEXTURE_CUBE_ARRAY = enum_pipe_texture_target.define('PIPE_TEXTURE_CUBE_ARRAY', 8)
PIPE_MAX_TEXTURE_TYPES = enum_pipe_texture_target.define('PIPE_MAX_TEXTURE_TYPES', 9)
@c.record
class struct_lp_texture_functions(c.Struct):
SIZE = 296
sample_functions: Annotated[c.POINTER[c.POINTER[ctypes.c_void_p]], 0]
sampler_count: Annotated[uint32_t, 8]
fetch_functions: Annotated[c.POINTER[ctypes.c_void_p], 16]
size_function: Annotated[ctypes.c_void_p, 24]
samples_function: Annotated[ctypes.c_void_p, 32]
image_functions: Annotated[c.POINTER[ctypes.c_void_p], 40]
state: Annotated[struct_lp_texture_handle_state, 48]
sampled: Annotated[Annotated[bool, ctypes.c_bool], 280]
storage: Annotated[Annotated[bool, ctypes.c_bool], 281]
matrix: Annotated[ctypes.c_void_p, 288]
@c.record
class struct_lp_texture_handle(c.Struct):
SIZE = 16
functions: Annotated[ctypes.c_void_p, 0]
sampler_index: Annotated[uint32_t, 8]
@c.record
class struct_lp_jit_bindless_texture(c.Struct):
SIZE = 24
base: Annotated[ctypes.c_void_p, 0]
residency: Annotated[ctypes.c_void_p, 8]
sampler_index: Annotated[uint32_t, 16]
@c.record
class struct_lp_descriptor(c.Struct):
SIZE = 64
texture: Annotated[struct_lp_jit_bindless_texture, 0]
sampler: Annotated[struct_lp_jit_sampler, 24]
image: Annotated[struct_lp_jit_image, 0]
buffer: Annotated[struct_lp_jit_buffer, 0]
accel_struct: Annotated[uint64_t, 0]
functions: Annotated[ctypes.c_void_p, 56]
@dll.bind
def lp_build_flow_skip_begin(ctx:c.POINTER[struct_lp_build_skip_context], gallivm:c.POINTER[struct_gallivm_state]) -> None: ...
@dll.bind
def lp_build_flow_skip_cond_break(ctx:c.POINTER[struct_lp_build_skip_context], cond:LLVMValueRef) -> None: ...
@dll.bind
def lp_build_flow_skip_end(ctx:c.POINTER[struct_lp_build_skip_context]) -> None: ...
@dll.bind
def lp_build_mask_begin(mask:c.POINTER[struct_lp_build_mask_context], gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type, value:LLVMValueRef) -> None: ...
@dll.bind
def lp_build_mask_value(mask:c.POINTER[struct_lp_build_mask_context]) -> LLVMValueRef: ...
@dll.bind
def lp_build_mask_update(mask:c.POINTER[struct_lp_build_mask_context], value:LLVMValueRef) -> None: ...
@dll.bind
def lp_build_mask_force(mask:c.POINTER[struct_lp_build_mask_context], value:LLVMValueRef) -> None: ...
@dll.bind
def lp_build_mask_check(mask:c.POINTER[struct_lp_build_mask_context]) -> None: ...
@dll.bind
def lp_build_mask_end(mask:c.POINTER[struct_lp_build_mask_context]) -> LLVMValueRef: ...
@c.record
class struct_lp_build_loop_state(c.Struct):
SIZE = 40
block: Annotated[LLVMBasicBlockRef, 0]
counter_var: Annotated[LLVMValueRef, 8]
counter: Annotated[LLVMValueRef, 16]
counter_type: Annotated[LLVMTypeRef, 24]
gallivm: Annotated[c.POINTER[struct_gallivm_state], 32]
@dll.bind
def lp_build_loop_begin(state:c.POINTER[struct_lp_build_loop_state], gallivm:c.POINTER[struct_gallivm_state], start:LLVMValueRef) -> None: ...
@dll.bind
def lp_build_loop_end(state:c.POINTER[struct_lp_build_loop_state], end:LLVMValueRef, step:LLVMValueRef) -> None: ...
@dll.bind
def lp_build_loop_force_set_counter(state:c.POINTER[struct_lp_build_loop_state], end:LLVMValueRef) -> None: ...
@dll.bind
def lp_build_loop_force_reload_counter(state:c.POINTER[struct_lp_build_loop_state]) -> None: ...
class LLVMIntPredicate(Annotated[int, ctypes.c_uint32], c.Enum): pass
LLVMIntEQ = LLVMIntPredicate.define('LLVMIntEQ', 32)
LLVMIntNE = LLVMIntPredicate.define('LLVMIntNE', 33)
LLVMIntUGT = LLVMIntPredicate.define('LLVMIntUGT', 34)
LLVMIntUGE = LLVMIntPredicate.define('LLVMIntUGE', 35)
LLVMIntULT = LLVMIntPredicate.define('LLVMIntULT', 36)
LLVMIntULE = LLVMIntPredicate.define('LLVMIntULE', 37)
LLVMIntSGT = LLVMIntPredicate.define('LLVMIntSGT', 38)
LLVMIntSGE = LLVMIntPredicate.define('LLVMIntSGE', 39)
LLVMIntSLT = LLVMIntPredicate.define('LLVMIntSLT', 40)
LLVMIntSLE = LLVMIntPredicate.define('LLVMIntSLE', 41)
@dll.bind
def lp_build_loop_end_cond(state:c.POINTER[struct_lp_build_loop_state], end:LLVMValueRef, step:LLVMValueRef, cond:LLVMIntPredicate) -> None: ...
@c.record
class struct_lp_build_for_loop_state(c.Struct):
SIZE = 80
begin: Annotated[LLVMBasicBlockRef, 0]
body: Annotated[LLVMBasicBlockRef, 8]
exit: Annotated[LLVMBasicBlockRef, 16]
counter_var: Annotated[LLVMValueRef, 24]
counter: Annotated[LLVMValueRef, 32]
counter_type: Annotated[LLVMTypeRef, 40]
step: Annotated[LLVMValueRef, 48]
cond: Annotated[LLVMIntPredicate, 56]
end: Annotated[LLVMValueRef, 64]
gallivm: Annotated[c.POINTER[struct_gallivm_state], 72]
@dll.bind
def lp_build_for_loop_begin(state:c.POINTER[struct_lp_build_for_loop_state], gallivm:c.POINTER[struct_gallivm_state], start:LLVMValueRef, llvm_cond:LLVMIntPredicate, end:LLVMValueRef, step:LLVMValueRef) -> None: ...
@dll.bind
def lp_build_for_loop_end(state:c.POINTER[struct_lp_build_for_loop_state]) -> None: ...
@c.record
class struct_lp_build_if_state(c.Struct):
SIZE = 48
gallivm: Annotated[c.POINTER[struct_gallivm_state], 0]
condition: Annotated[LLVMValueRef, 8]
entry_block: Annotated[LLVMBasicBlockRef, 16]
true_block: Annotated[LLVMBasicBlockRef, 24]
false_block: Annotated[LLVMBasicBlockRef, 32]
merge_block: Annotated[LLVMBasicBlockRef, 40]
@dll.bind
def lp_build_if(ctx:c.POINTER[struct_lp_build_if_state], gallivm:c.POINTER[struct_gallivm_state], condition:LLVMValueRef) -> None: ...
@dll.bind
def lp_build_else(ctx:c.POINTER[struct_lp_build_if_state]) -> None: ...
@dll.bind
def lp_build_endif(ctx:c.POINTER[struct_lp_build_if_state]) -> None: ...
@dll.bind
def lp_build_insert_new_block(gallivm:c.POINTER[struct_gallivm_state], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> LLVMBasicBlockRef: ...
@dll.bind
def lp_create_builder_at_entry(gallivm:c.POINTER[struct_gallivm_state]) -> LLVMBuilderRef: ...
@dll.bind
def lp_build_alloca(gallivm:c.POINTER[struct_gallivm_state], type:LLVMTypeRef, name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> LLVMValueRef: ...
@dll.bind
def lp_build_alloca_undef(gallivm:c.POINTER[struct_gallivm_state], type:LLVMTypeRef, name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> LLVMValueRef: ...
@dll.bind
def lp_build_array_alloca(gallivm:c.POINTER[struct_gallivm_state], type:LLVMTypeRef, count:LLVMValueRef, name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> LLVMValueRef: ...
@dll.bind
def lp_mantissa(type:struct_lp_type) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def lp_const_shift(type:struct_lp_type) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def lp_const_offset(type:struct_lp_type) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def lp_const_scale(type:struct_lp_type) -> Annotated[float, ctypes.c_double]: ...
@dll.bind
def lp_const_min(type:struct_lp_type) -> Annotated[float, ctypes.c_double]: ...
@dll.bind
def lp_const_max(type:struct_lp_type) -> Annotated[float, ctypes.c_double]: ...
@dll.bind
def lp_const_eps(type:struct_lp_type) -> Annotated[float, ctypes.c_double]: ...
@dll.bind
def lp_build_undef(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type) -> LLVMValueRef: ...
@dll.bind
def lp_build_zero(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type) -> LLVMValueRef: ...
@dll.bind
def lp_build_one(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type) -> LLVMValueRef: ...
@dll.bind
def lp_build_const_elem(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type, val:Annotated[float, ctypes.c_double]) -> LLVMValueRef: ...
@dll.bind
def lp_build_const_vec(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type, val:Annotated[float, ctypes.c_double]) -> LLVMValueRef: ...
@dll.bind
def lp_build_const_int_vec(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type, val:Annotated[int, ctypes.c_int64]) -> LLVMValueRef: ...
@dll.bind
def lp_build_const_channel_vec(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type) -> LLVMValueRef: ...
@dll.bind
def lp_build_const_aos(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type, r:Annotated[float, ctypes.c_double], g:Annotated[float, ctypes.c_double], b:Annotated[float, ctypes.c_double], a:Annotated[float, ctypes.c_double], swizzle:c.POINTER[Annotated[int, ctypes.c_ubyte]]) -> LLVMValueRef: ...
@dll.bind
def lp_build_const_mask_aos(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type, mask:Annotated[int, ctypes.c_uint32], channels:Annotated[int, ctypes.c_uint32]) -> LLVMValueRef: ...
@dll.bind
def lp_build_const_mask_aos_swizzled(gallivm:c.POINTER[struct_gallivm_state], type:struct_lp_type, mask:Annotated[int, ctypes.c_uint32], channels:Annotated[int, ctypes.c_uint32], swizzle:c.POINTER[Annotated[int, ctypes.c_ubyte]]) -> LLVMValueRef: ...
@dll.bind
def lp_build_const_string(gallivm:c.POINTER[struct_gallivm_state], str:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> LLVMValueRef: ...
@dll.bind
def lp_build_const_func_pointer(gallivm:c.POINTER[struct_gallivm_state], ptr:ctypes.c_void_p, ret_type:LLVMTypeRef, arg_types:c.POINTER[LLVMTypeRef], num_args:Annotated[int, ctypes.c_uint32], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> LLVMValueRef: ...
@dll.bind
def lp_build_const_func_pointer_from_type(gallivm:c.POINTER[struct_gallivm_state], ptr:ctypes.c_void_p, function_type:LLVMTypeRef, name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> LLVMValueRef: ...
@c.record
class struct_fd_dev_info(c.Struct):
SIZE = 764
chip: Annotated[uint8_t, 0]
tile_align_w: Annotated[uint32_t, 4]
tile_align_h: Annotated[uint32_t, 8]
gmem_align_w: Annotated[uint32_t, 12]
gmem_align_h: Annotated[uint32_t, 16]
tile_max_w: Annotated[uint32_t, 20]
tile_max_h: Annotated[uint32_t, 24]
num_vsc_pipes: Annotated[uint32_t, 28]
cs_shared_mem_size: Annotated[uint32_t, 32]
wave_granularity: Annotated[Annotated[int, ctypes.c_int32], 36]
highest_bank_bit: Annotated[uint32_t, 40]
ubwc_swizzle: Annotated[uint32_t, 44]
macrotile_mode: Annotated[uint32_t, 48]
fibers_per_sp: Annotated[uint32_t, 52]
threadsize_base: Annotated[uint32_t, 56]
max_waves: Annotated[uint32_t, 60]
compute_lb_size: Annotated[uint32_t, 64]
num_sp_cores: Annotated[uint32_t, 68]
num_ccu: Annotated[uint32_t, 68]
a6xx: Annotated[struct_fd_dev_info_a6xx, 72]
a7xx: Annotated[struct_fd_dev_info_a7xx, 728]
@c.record
class struct_fd_dev_info_a6xx(c.Struct):
SIZE = 656
reg_size_vec4: Annotated[uint32_t, 0]
instr_cache_size: Annotated[uint32_t, 4]
has_hw_multiview: Annotated[Annotated[bool, ctypes.c_bool], 8]
has_fs_tex_prefetch: Annotated[Annotated[bool, ctypes.c_bool], 9]
supports_multiview_mask: Annotated[Annotated[bool, ctypes.c_bool], 10]
concurrent_resolve: Annotated[Annotated[bool, ctypes.c_bool], 11]
has_z24uint_s8uint: Annotated[Annotated[bool, ctypes.c_bool], 12]
tess_use_shared: Annotated[Annotated[bool, ctypes.c_bool], 13]
has_legacy_pipeline_shading_rate: Annotated[Annotated[bool, ctypes.c_bool], 14]
storage_16bit: Annotated[Annotated[bool, ctypes.c_bool], 15]
indirect_draw_wfm_quirk: Annotated[Annotated[bool, ctypes.c_bool], 16]
depth_bounds_require_depth_test_quirk: Annotated[Annotated[bool, ctypes.c_bool], 17]
has_tex_filter_cubic: Annotated[Annotated[bool, ctypes.c_bool], 18]
has_separate_chroma_filter: Annotated[Annotated[bool, ctypes.c_bool], 19]
has_sample_locations: Annotated[Annotated[bool, ctypes.c_bool], 20]
has_cp_reg_write: Annotated[Annotated[bool, ctypes.c_bool], 21]
has_8bpp_ubwc: Annotated[Annotated[bool, ctypes.c_bool], 22]
has_lpac: Annotated[Annotated[bool, ctypes.c_bool], 23]
has_getfiberid: Annotated[Annotated[bool, ctypes.c_bool], 24]
mov_half_shared_quirk: Annotated[Annotated[bool, ctypes.c_bool], 25]
has_movs: Annotated[Annotated[bool, ctypes.c_bool], 26]
has_dp2acc: Annotated[Annotated[bool, ctypes.c_bool], 27]
has_dp4acc: Annotated[Annotated[bool, ctypes.c_bool], 28]
enable_lrz_fast_clear: Annotated[Annotated[bool, ctypes.c_bool], 29]
has_lrz_dir_tracking: Annotated[Annotated[bool, ctypes.c_bool], 30]
lrz_track_quirk: Annotated[Annotated[bool, ctypes.c_bool], 31]
has_lrz_feedback: Annotated[Annotated[bool, ctypes.c_bool], 32]
has_per_view_viewport: Annotated[Annotated[bool, ctypes.c_bool], 33]
has_gmem_fast_clear: Annotated[Annotated[bool, ctypes.c_bool], 34]
sysmem_per_ccu_depth_cache_size: Annotated[uint32_t, 36]
sysmem_per_ccu_color_cache_size: Annotated[uint32_t, 40]
gmem_ccu_color_cache_fraction: Annotated[uint32_t, 44]
prim_alloc_threshold: Annotated[uint32_t, 48]
vs_max_inputs_count: Annotated[uint32_t, 52]
supports_double_threadsize: Annotated[Annotated[bool, ctypes.c_bool], 56]
has_sampler_minmax: Annotated[Annotated[bool, ctypes.c_bool], 57]
broken_ds_ubwc_quirk: Annotated[Annotated[bool, ctypes.c_bool], 58]
has_scalar_alu: Annotated[Annotated[bool, ctypes.c_bool], 59]
has_early_preamble: Annotated[Annotated[bool, ctypes.c_bool], 60]
has_isam_v: Annotated[Annotated[bool, ctypes.c_bool], 61]
has_ssbo_imm_offsets: Annotated[Annotated[bool, ctypes.c_bool], 62]
has_coherent_ubwc_flag_caches: Annotated[Annotated[bool, ctypes.c_bool], 63]
has_attachment_shading_rate: Annotated[Annotated[bool, ctypes.c_bool], 64]
has_ubwc_linear_mipmap_fallback: Annotated[Annotated[bool, ctypes.c_bool], 65]
predtf_nop_quirk: Annotated[Annotated[bool, ctypes.c_bool], 66]
prede_nop_quirk: Annotated[Annotated[bool, ctypes.c_bool], 67]
has_sad: Annotated[Annotated[bool, ctypes.c_bool], 68]
is_a702: Annotated[Annotated[bool, ctypes.c_bool], 69]
magic: Annotated[struct_fd_dev_info_a6xx_magic, 72]
magic_raw: Annotated[c.Array[struct_fd_dev_info_a6xx_magic_raw, Literal[64]], 128]
max_sets: Annotated[uint32_t, 640]
line_width_min: Annotated[Annotated[float, ctypes.c_float], 644]
line_width_max: Annotated[Annotated[float, ctypes.c_float], 648]
has_bin_mask: Annotated[Annotated[bool, ctypes.c_bool], 652]
@c.record
class struct_fd_dev_info_a6xx_magic(c.Struct):
SIZE = 56
PC_POWER_CNTL: Annotated[uint32_t, 0]
TPL1_DBG_ECO_CNTL: Annotated[uint32_t, 4]
GRAS_DBG_ECO_CNTL: Annotated[uint32_t, 8]
SP_CHICKEN_BITS: Annotated[uint32_t, 12]
UCHE_CLIENT_PF: Annotated[uint32_t, 16]
PC_MODE_CNTL: Annotated[uint32_t, 20]
SP_DBG_ECO_CNTL: Annotated[uint32_t, 24]
RB_DBG_ECO_CNTL: Annotated[uint32_t, 28]
RB_DBG_ECO_CNTL_blit: Annotated[uint32_t, 32]
HLSQ_DBG_ECO_CNTL: Annotated[uint32_t, 36]
RB_UNKNOWN_8E01: Annotated[uint32_t, 40]
VPC_DBG_ECO_CNTL: Annotated[uint32_t, 44]
UCHE_UNKNOWN_0E12: Annotated[uint32_t, 48]
RB_CCU_DBG_ECO_CNTL: Annotated[uint32_t, 52]
@c.record
class struct_fd_dev_info_a6xx_magic_raw(c.Struct):
SIZE = 8
reg: Annotated[uint32_t, 0]
value: Annotated[uint32_t, 4]
@c.record
class struct_fd_dev_info_a7xx(c.Struct):
SIZE = 36
stsc_duplication_quirk: Annotated[Annotated[bool, ctypes.c_bool], 0]
has_event_write_sample_count: Annotated[Annotated[bool, ctypes.c_bool], 1]
has_64b_ssbo_atomics: Annotated[Annotated[bool, ctypes.c_bool], 2]
cmdbuf_start_a725_quirk: Annotated[Annotated[bool, ctypes.c_bool], 3]
load_inline_uniforms_via_preamble_ldgk: Annotated[Annotated[bool, ctypes.c_bool], 4]
load_shader_consts_via_preamble: Annotated[Annotated[bool, ctypes.c_bool], 5]
has_gmem_vpc_attr_buf: Annotated[Annotated[bool, ctypes.c_bool], 6]
sysmem_vpc_attr_buf_size: Annotated[uint32_t, 8]
gmem_vpc_attr_buf_size: Annotated[uint32_t, 12]
supports_uav_ubwc: Annotated[Annotated[bool, ctypes.c_bool], 16]
ubwc_unorm_snorm_int_compatible: Annotated[Annotated[bool, ctypes.c_bool], 17]
fs_must_have_non_zero_constlen_quirk: Annotated[Annotated[bool, ctypes.c_bool], 18]
gs_vpc_adjacency_quirk: Annotated[Annotated[bool, ctypes.c_bool], 19]
enable_tp_ubwc_flag_hint: Annotated[Annotated[bool, ctypes.c_bool], 20]
storage_8bit: Annotated[Annotated[bool, ctypes.c_bool], 21]
ubwc_all_formats_compatible: Annotated[Annotated[bool, ctypes.c_bool], 22]
has_compliant_dp4acc: Annotated[Annotated[bool, ctypes.c_bool], 23]
has_generic_clear: Annotated[Annotated[bool, ctypes.c_bool], 24]
r8g8_faulty_fast_clear_quirk: Annotated[Annotated[bool, ctypes.c_bool], 25]
ubwc_coherency_quirk: Annotated[Annotated[bool, ctypes.c_bool], 26]
has_persistent_counter: Annotated[Annotated[bool, ctypes.c_bool], 27]
has_primitive_shading_rate: Annotated[Annotated[bool, ctypes.c_bool], 28]
reading_shading_rate_requires_smask_quirk: Annotated[Annotated[bool, ctypes.c_bool], 29]
has_ray_intersection: Annotated[Annotated[bool, ctypes.c_bool], 30]
has_sw_fuse: Annotated[Annotated[bool, ctypes.c_bool], 31]
has_rt_workaround: Annotated[Annotated[bool, ctypes.c_bool], 32]
has_alias_rt: Annotated[Annotated[bool, ctypes.c_bool], 33]
has_abs_bin_mask: Annotated[Annotated[bool, ctypes.c_bool], 34]
new_control_regs: Annotated[Annotated[bool, ctypes.c_bool], 35]
@c.record
class struct_fd_dev_id(c.Struct):
SIZE = 16
gpu_id: Annotated[uint32_t, 0]
chip_id: Annotated[uint64_t, 8]
@dll.bind
def fd_dev_info_raw(id:c.POINTER[struct_fd_dev_id]) -> c.POINTER[struct_fd_dev_info]: ...
@dll.bind
def fd_dev_info(id:c.POINTER[struct_fd_dev_id]) -> struct_fd_dev_info: ...
@dll.bind
def fd_dev_info_raw_by_name(name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[struct_fd_dev_info]: ...
@dll.bind
def fd_dev_name(id:c.POINTER[struct_fd_dev_id]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def fd_dev_info_apply_dbg_options(info:c.POINTER[struct_fd_dev_info]) -> None: ...
class struct_ir3_ra_reg_set(ctypes.Structure): pass
@c.record
class struct_ir3_shader(c.Struct):
SIZE = 1216
type: Annotated[gl_shader_stage, 0]
id: Annotated[uint32_t, 4]
variant_count: Annotated[uint32_t, 8]
initial_variants_done: Annotated[Annotated[bool, ctypes.c_bool], 12]
compiler: Annotated[c.POINTER[struct_ir3_compiler], 16]
options: Annotated[struct_ir3_shader_options, 24]
nir_finalized: Annotated[Annotated[bool, ctypes.c_bool], 252]
nir: Annotated[c.POINTER[struct_nir_shader], 256]
stream_output: Annotated[struct_ir3_stream_output_info, 264]
cs: Annotated[struct_ir3_shader_cs, 800]
vs: Annotated[struct_ir3_shader_vs, 800]
variants: Annotated[c.POINTER[struct_ir3_shader_variant], 1064]
variants_lock: Annotated[mtx_t, 1072]
cache_key: Annotated[cache_key, 1112]
key_mask: Annotated[struct_ir3_shader_key, 1132]
@c.record
class struct_ir3_compiler_options(c.Struct):
SIZE = 32
push_ubo_with_preamble: Annotated[Annotated[bool, ctypes.c_bool], 0]
disable_cache: Annotated[Annotated[bool, ctypes.c_bool], 1]
bindless_fb_read_descriptor: Annotated[Annotated[int, ctypes.c_int32], 4]
bindless_fb_read_slot: Annotated[Annotated[int, ctypes.c_int32], 8]
storage_16bit: Annotated[Annotated[bool, ctypes.c_bool], 12]
storage_8bit: Annotated[Annotated[bool, ctypes.c_bool], 13]
lower_base_vertex: Annotated[Annotated[bool, ctypes.c_bool], 14]
shared_push_consts: Annotated[Annotated[bool, ctypes.c_bool], 15]
dual_color_blend_by_location: Annotated[Annotated[bool, ctypes.c_bool], 16]
uche_trap_base: Annotated[uint64_t, 24]
@c.record
class struct_ir3_compiler(c.Struct):
SIZE = 456
dev: Annotated[c.POINTER[struct_fd_device], 0]
dev_id: Annotated[c.POINTER[struct_fd_dev_id], 8]
gen: Annotated[uint8_t, 16]
shader_count: Annotated[uint32_t, 20]
disk_cache: Annotated[c.POINTER[struct_disk_cache], 24]
nir_options: Annotated[struct_nir_shader_compiler_options, 32]
options: Annotated[struct_ir3_compiler_options, 280]
is_64bit: Annotated[Annotated[bool, ctypes.c_bool], 312]
flat_bypass: Annotated[Annotated[bool, ctypes.c_bool], 313]
levels_add_one: Annotated[Annotated[bool, ctypes.c_bool], 314]
unminify_coords: Annotated[Annotated[bool, ctypes.c_bool], 315]
txf_ms_with_isaml: Annotated[Annotated[bool, ctypes.c_bool], 316]
array_index_add_half: Annotated[Annotated[bool, ctypes.c_bool], 317]
samgq_workaround: Annotated[Annotated[bool, ctypes.c_bool], 318]
tess_use_shared: Annotated[Annotated[bool, ctypes.c_bool], 319]
mergedregs: Annotated[Annotated[bool, ctypes.c_bool], 320]
max_const_pipeline: Annotated[uint16_t, 322]
max_const_geom: Annotated[uint16_t, 324]
max_const_frag: Annotated[uint16_t, 326]
max_const_safe: Annotated[uint16_t, 328]
max_const_compute: Annotated[uint16_t, 330]
compute_lb_size: Annotated[uint32_t, 332]
instr_align: Annotated[uint32_t, 336]
const_upload_unit: Annotated[uint32_t, 340]
threadsize_base: Annotated[uint32_t, 344]
wave_granularity: Annotated[uint32_t, 348]
max_waves: Annotated[uint32_t, 352]
reg_size_vec4: Annotated[uint32_t, 356]
local_mem_size: Annotated[uint32_t, 360]
branchstack_size: Annotated[uint32_t, 364]
pvtmem_per_fiber_align: Annotated[uint32_t, 368]
has_clip_cull: Annotated[Annotated[bool, ctypes.c_bool], 372]
has_pvtmem: Annotated[Annotated[bool, ctypes.c_bool], 373]
has_isam_ssbo: Annotated[Annotated[bool, ctypes.c_bool], 374]
has_isam_v: Annotated[Annotated[bool, ctypes.c_bool], 375]
has_ssbo_imm_offsets: Annotated[Annotated[bool, ctypes.c_bool], 376]
has_getfiberid: Annotated[Annotated[bool, ctypes.c_bool], 377]
mov_half_shared_quirk: Annotated[Annotated[bool, ctypes.c_bool], 378]
has_movs: Annotated[Annotated[bool, ctypes.c_bool], 379]
has_shfl: Annotated[Annotated[bool, ctypes.c_bool], 380]
has_bitwise_triops: Annotated[Annotated[bool, ctypes.c_bool], 381]
num_predicates: Annotated[uint32_t, 384]
bitops_can_write_predicates: Annotated[Annotated[bool, ctypes.c_bool], 388]
has_branch_and_or: Annotated[Annotated[bool, ctypes.c_bool], 389]
has_predication: Annotated[Annotated[bool, ctypes.c_bool], 390]
predtf_nop_quirk: Annotated[Annotated[bool, ctypes.c_bool], 391]
prede_nop_quirk: Annotated[Annotated[bool, ctypes.c_bool], 392]
max_variable_workgroup_size: Annotated[uint32_t, 396]
has_dp2acc: Annotated[Annotated[bool, ctypes.c_bool], 400]
has_dp4acc: Annotated[Annotated[bool, ctypes.c_bool], 401]
has_compliant_dp4acc: Annotated[Annotated[bool, ctypes.c_bool], 402]
bool_type: Annotated[type_t, 404]
has_shared_regfile: Annotated[Annotated[bool, ctypes.c_bool], 408]
has_preamble: Annotated[Annotated[bool, ctypes.c_bool], 409]
shared_consts_base_offset: Annotated[uint16_t, 410]
shared_consts_size: Annotated[uint64_t, 416]
geom_shared_consts_size_quirk: Annotated[uint64_t, 424]
has_fs_tex_prefetch: Annotated[Annotated[bool, ctypes.c_bool], 432]
stsc_duplication_quirk: Annotated[Annotated[bool, ctypes.c_bool], 433]
load_shader_consts_via_preamble: Annotated[Annotated[bool, ctypes.c_bool], 434]
load_inline_uniforms_via_preamble_ldgk: Annotated[Annotated[bool, ctypes.c_bool], 435]
has_scalar_alu: Annotated[Annotated[bool, ctypes.c_bool], 436]
fs_must_have_non_zero_constlen_quirk: Annotated[Annotated[bool, ctypes.c_bool], 437]
has_early_preamble: Annotated[Annotated[bool, ctypes.c_bool], 438]
has_rpt_bary_f: Annotated[Annotated[bool, ctypes.c_bool], 439]
has_alias_tex: Annotated[Annotated[bool, ctypes.c_bool], 440]
has_alias_rt: Annotated[Annotated[bool, ctypes.c_bool], 441]
reading_shading_rate_requires_smask_quirk: Annotated[Annotated[bool, ctypes.c_bool], 442]
delay_slots: Annotated[struct_ir3_compiler_delay_slots, 444]
class struct_fd_device(ctypes.Structure): pass
class struct_disk_cache(ctypes.Structure): pass
class type_t(Annotated[int, ctypes.c_uint32], c.Enum): pass
TYPE_F16 = type_t.define('TYPE_F16', 0)
TYPE_F32 = type_t.define('TYPE_F32', 1)
TYPE_U16 = type_t.define('TYPE_U16', 2)
TYPE_U32 = type_t.define('TYPE_U32', 3)
TYPE_S16 = type_t.define('TYPE_S16', 4)
TYPE_S32 = type_t.define('TYPE_S32', 5)
TYPE_ATOMIC_U64 = type_t.define('TYPE_ATOMIC_U64', 6)
TYPE_U8 = type_t.define('TYPE_U8', 6)
TYPE_U8_32 = type_t.define('TYPE_U8_32', 7)
@c.record
class struct_ir3_compiler_delay_slots(c.Struct):
SIZE = 12
alu_to_alu: Annotated[Annotated[int, ctypes.c_uint32], 0]
non_alu: Annotated[Annotated[int, ctypes.c_uint32], 4]
cat3_src2_read: Annotated[Annotated[int, ctypes.c_uint32], 8]
@dll.bind
def ir3_compiler_destroy(compiler:c.POINTER[struct_ir3_compiler]) -> None: ...
@dll.bind
def ir3_compiler_create(dev:c.POINTER[struct_fd_device], dev_id:c.POINTER[struct_fd_dev_id], dev_info:c.POINTER[struct_fd_dev_info], options:c.POINTER[struct_ir3_compiler_options]) -> c.POINTER[struct_ir3_compiler]: ...
@dll.bind
def ir3_disk_cache_init(compiler:c.POINTER[struct_ir3_compiler]) -> None: ...
@dll.bind
def ir3_disk_cache_init_shader_key(compiler:c.POINTER[struct_ir3_compiler], shader:c.POINTER[struct_ir3_shader]) -> None: ...
@c.record
class struct_ir3_shader_variant(c.Struct):
SIZE = 2040
bo: Annotated[c.POINTER[struct_fd_bo], 0]
id: Annotated[uint32_t, 8]
shader_id: Annotated[uint32_t, 12]
key: Annotated[struct_ir3_shader_key, 16]
binning_pass: Annotated[Annotated[bool, ctypes.c_bool], 96]
binning: Annotated[c.POINTER[struct_ir3_shader_variant], 104]
nonbinning: Annotated[c.POINTER[struct_ir3_shader_variant], 112]
ir: Annotated[c.POINTER[struct_ir3], 120]
next: Annotated[c.POINTER[struct_ir3_shader_variant], 128]
type: Annotated[gl_shader_stage, 136]
compiler: Annotated[c.POINTER[struct_ir3_compiler], 144]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 152]
constant_data: Annotated[ctypes.c_void_p, 160]
disasm_info: Annotated[struct_ir3_disasm_info, 168]
bin: Annotated[c.POINTER[uint32_t], 192]
const_state: Annotated[c.POINTER[struct_ir3_const_state], 200]
imm_state: Annotated[struct_ir3_imm_const_state, 208]
info: Annotated[struct_ir3_info, 224]
sha1_str: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[41]], 288]
shader_options: Annotated[struct_ir3_shader_options, 332]
constant_data_size: Annotated[uint32_t, 560]
branchstack: Annotated[Annotated[int, ctypes.c_uint32], 564]
loops: Annotated[Annotated[int, ctypes.c_uint32], 568]
instrlen: Annotated[Annotated[int, ctypes.c_uint32], 572]
constlen: Annotated[Annotated[int, ctypes.c_uint32], 576]
pvtmem_size: Annotated[Annotated[int, ctypes.c_uint32], 580]
pvtmem_per_wave: Annotated[Annotated[bool, ctypes.c_bool], 584]
multi_pos_output: Annotated[Annotated[bool, ctypes.c_bool], 585]
dual_src_blend: Annotated[Annotated[bool, ctypes.c_bool], 586]
early_preamble: Annotated[Annotated[bool, ctypes.c_bool], 587]
shared_size: Annotated[Annotated[int, ctypes.c_uint32], 588]
frag_face: Annotated[Annotated[bool, ctypes.c_bool], 592]
color0_mrt: Annotated[Annotated[bool, ctypes.c_bool], 593]
fragcoord_compmask: Annotated[uint8_t, 594]
outputs_count: Annotated[Annotated[int, ctypes.c_uint32], 596]
outputs: Annotated[c.Array[struct_ir3_shader_output, Literal[34]], 600]
writes_pos: Annotated[Annotated[bool, ctypes.c_bool], 736]
writes_smask: Annotated[Annotated[bool, ctypes.c_bool], 737]
writes_psize: Annotated[Annotated[bool, ctypes.c_bool], 738]
writes_viewport: Annotated[Annotated[bool, ctypes.c_bool], 739]
writes_stencilref: Annotated[Annotated[bool, ctypes.c_bool], 740]
writes_shading_rate: Annotated[Annotated[bool, ctypes.c_bool], 741]
output_size: Annotated[uint32_t, 744]
input_size: Annotated[uint32_t, 748]
output_loc: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[45]], 752]
inputs_count: Annotated[Annotated[int, ctypes.c_uint32], 932]
inputs: Annotated[c.Array[struct_ir3_shader_variant_input, Literal[34]], 936]
reads_primid: Annotated[Annotated[bool, ctypes.c_bool], 1106]
reads_shading_rate: Annotated[Annotated[bool, ctypes.c_bool], 1107]
reads_smask: Annotated[Annotated[bool, ctypes.c_bool], 1108]
total_in: Annotated[Annotated[int, ctypes.c_uint32], 1112]
sysval_in: Annotated[Annotated[int, ctypes.c_uint32], 1116]
varying_in: Annotated[Annotated[int, ctypes.c_uint32], 1120]
image_mapping: Annotated[struct_ir3_ibo_mapping, 1124]
num_samp: Annotated[Annotated[int, ctypes.c_int32], 1224]
fb_read: Annotated[Annotated[bool, ctypes.c_bool], 1228]
has_ssbo: Annotated[Annotated[bool, ctypes.c_bool], 1229]
bindless_tex: Annotated[Annotated[bool, ctypes.c_bool], 1230]
bindless_samp: Annotated[Annotated[bool, ctypes.c_bool], 1231]
bindless_ibo: Annotated[Annotated[bool, ctypes.c_bool], 1232]
bindless_ubo: Annotated[Annotated[bool, ctypes.c_bool], 1233]
need_pixlod: Annotated[Annotated[bool, ctypes.c_bool], 1234]
need_full_quad: Annotated[Annotated[bool, ctypes.c_bool], 1235]
need_driver_params: Annotated[Annotated[bool, ctypes.c_bool], 1236]
no_earlyz: Annotated[Annotated[bool, ctypes.c_bool], 1237]
has_kill: Annotated[Annotated[bool, ctypes.c_bool], 1238]
per_samp: Annotated[Annotated[bool, ctypes.c_bool], 1239]
post_depth_coverage: Annotated[Annotated[bool, ctypes.c_bool], 1240]
empty: Annotated[Annotated[bool, ctypes.c_bool], 1241]
writes_only_color: Annotated[Annotated[bool, ctypes.c_bool], 1242]
mergedregs: Annotated[Annotated[bool, ctypes.c_bool], 1243]
clip_mask: Annotated[uint8_t, 1244]
cull_mask: Annotated[uint8_t, 1245]
astc_srgb: Annotated[struct_ir3_shader_variant_astc_srgb, 1248]
tg4: Annotated[struct_ir3_shader_variant_tg4, 1320]
num_sampler_prefetch: Annotated[uint32_t, 1392]
sampler_prefetch: Annotated[c.Array[struct_ir3_sampler_prefetch, Literal[4]], 1396]
prefetch_bary_type: Annotated[enum_ir3_bary, 1460]
prefetch_end_of_quad: Annotated[Annotated[bool, ctypes.c_bool], 1464]
local_size: Annotated[c.Array[uint16_t, Literal[3]], 1466]
local_size_variable: Annotated[Annotated[bool, ctypes.c_bool], 1472]
has_barrier: Annotated[Annotated[bool, ctypes.c_bool], 1473]
num_ssbos: Annotated[Annotated[int, ctypes.c_uint32], 1476]
num_uavs: Annotated[Annotated[int, ctypes.c_uint32], 1480]
tess: Annotated[struct_ir3_shader_variant_tess, 1484]
gs: Annotated[struct_ir3_shader_variant_gs, 1484]
fs: Annotated[struct_ir3_shader_variant_fs, 1484]
cs: Annotated[struct_ir3_shader_variant_cs, 1484]
vtxid_base: Annotated[uint32_t, 1500]
stream_output: Annotated[struct_ir3_stream_output_info, 1504]
@dll.bind
def ir3_retrieve_variant(blob:c.POINTER[struct_blob_reader], compiler:c.POINTER[struct_ir3_compiler], mem_ctx:ctypes.c_void_p) -> c.POINTER[struct_ir3_shader_variant]: ...
@dll.bind
def ir3_store_variant(blob:c.POINTER[struct_blob], v:c.POINTER[struct_ir3_shader_variant]) -> None: ...
@dll.bind
def ir3_disk_cache_retrieve(shader:c.POINTER[struct_ir3_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_disk_cache_store(shader:c.POINTER[struct_ir3_shader], v:c.POINTER[struct_ir3_shader_variant]) -> None: ...
@dll.bind
def ir3_get_compiler_options(compiler:c.POINTER[struct_ir3_compiler]) -> c.POINTER[nir_shader_compiler_options]: ...
@dll.bind
def ir3_compile_shader_nir(compiler:c.POINTER[struct_ir3_compiler], shader:c.POINTER[struct_ir3_shader], so:c.POINTER[struct_ir3_shader_variant]) -> Annotated[int, ctypes.c_int32]: ...
class enum_ir3_shader_debug(Annotated[int, ctypes.c_uint32], c.Enum): pass
IR3_DBG_SHADER_VS = enum_ir3_shader_debug.define('IR3_DBG_SHADER_VS', 1)
IR3_DBG_SHADER_TCS = enum_ir3_shader_debug.define('IR3_DBG_SHADER_TCS', 2)
IR3_DBG_SHADER_TES = enum_ir3_shader_debug.define('IR3_DBG_SHADER_TES', 4)
IR3_DBG_SHADER_GS = enum_ir3_shader_debug.define('IR3_DBG_SHADER_GS', 8)
IR3_DBG_SHADER_FS = enum_ir3_shader_debug.define('IR3_DBG_SHADER_FS', 16)
IR3_DBG_SHADER_CS = enum_ir3_shader_debug.define('IR3_DBG_SHADER_CS', 32)
IR3_DBG_DISASM = enum_ir3_shader_debug.define('IR3_DBG_DISASM', 64)
IR3_DBG_OPTMSGS = enum_ir3_shader_debug.define('IR3_DBG_OPTMSGS', 128)
IR3_DBG_FORCES2EN = enum_ir3_shader_debug.define('IR3_DBG_FORCES2EN', 256)
IR3_DBG_NOUBOOPT = enum_ir3_shader_debug.define('IR3_DBG_NOUBOOPT', 512)
IR3_DBG_NOFP16 = enum_ir3_shader_debug.define('IR3_DBG_NOFP16', 1024)
IR3_DBG_NOCACHE = enum_ir3_shader_debug.define('IR3_DBG_NOCACHE', 2048)
IR3_DBG_SPILLALL = enum_ir3_shader_debug.define('IR3_DBG_SPILLALL', 4096)
IR3_DBG_NOPREAMBLE = enum_ir3_shader_debug.define('IR3_DBG_NOPREAMBLE', 8192)
IR3_DBG_SHADER_INTERNAL = enum_ir3_shader_debug.define('IR3_DBG_SHADER_INTERNAL', 16384)
IR3_DBG_FULLSYNC = enum_ir3_shader_debug.define('IR3_DBG_FULLSYNC', 32768)
IR3_DBG_FULLNOP = enum_ir3_shader_debug.define('IR3_DBG_FULLNOP', 65536)
IR3_DBG_NOEARLYPREAMBLE = enum_ir3_shader_debug.define('IR3_DBG_NOEARLYPREAMBLE', 131072)
IR3_DBG_NODESCPREFETCH = enum_ir3_shader_debug.define('IR3_DBG_NODESCPREFETCH', 262144)
IR3_DBG_EXPANDRPT = enum_ir3_shader_debug.define('IR3_DBG_EXPANDRPT', 524288)
IR3_DBG_ASM_ROUNDTRIP = enum_ir3_shader_debug.define('IR3_DBG_ASM_ROUNDTRIP', 1048576)
IR3_DBG_SCHEDMSGS = enum_ir3_shader_debug.define('IR3_DBG_SCHEDMSGS', 2097152)
IR3_DBG_RAMSGS = enum_ir3_shader_debug.define('IR3_DBG_RAMSGS', 4194304)
IR3_DBG_NOALIASTEX = enum_ir3_shader_debug.define('IR3_DBG_NOALIASTEX', 8388608)
IR3_DBG_NOALIASRT = enum_ir3_shader_debug.define('IR3_DBG_NOALIASRT', 16777216)
try: ir3_shader_debug = enum_ir3_shader_debug.in_dll(dll, 'ir3_shader_debug') # type: ignore
except (ValueError,AttributeError): pass
try: ir3_shader_override_path = c.POINTER[Annotated[bytes, ctypes.c_char]].in_dll(dll, 'ir3_shader_override_path') # type: ignore
except (ValueError,AttributeError): pass
@dll.bind
def ir3_shader_debug_as_string() -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@c.record
class struct_ir3_driver_params_cs(c.Struct):
SIZE = 64
num_work_groups_x: Annotated[uint32_t, 0]
num_work_groups_y: Annotated[uint32_t, 4]
num_work_groups_z: Annotated[uint32_t, 8]
work_dim: Annotated[uint32_t, 12]
base_group_x: Annotated[uint32_t, 16]
base_group_y: Annotated[uint32_t, 20]
base_group_z: Annotated[uint32_t, 24]
subgroup_size: Annotated[uint32_t, 28]
local_group_size_x: Annotated[uint32_t, 32]
local_group_size_y: Annotated[uint32_t, 36]
local_group_size_z: Annotated[uint32_t, 40]
subgroup_id_shift: Annotated[uint32_t, 44]
workgroup_id_x: Annotated[uint32_t, 48]
workgroup_id_y: Annotated[uint32_t, 52]
workgroup_id_z: Annotated[uint32_t, 56]
__pad: Annotated[uint32_t, 60]
@c.record
class struct_ir3_driver_params_vs(c.Struct):
SIZE = 160
draw_id: Annotated[uint32_t, 0]
vtxid_base: Annotated[uint32_t, 4]
instid_base: Annotated[uint32_t, 8]
vtxcnt_max: Annotated[uint32_t, 12]
is_indexed_draw: Annotated[uint32_t, 16]
ucp: Annotated[c.Array[struct_ir3_driver_params_vs_ucp, Literal[8]], 20]
__pad_37_39: Annotated[c.Array[uint32_t, Literal[3]], 148]
@c.record
class struct_ir3_driver_params_vs_ucp(c.Struct):
SIZE = 16
x: Annotated[uint32_t, 0]
y: Annotated[uint32_t, 4]
z: Annotated[uint32_t, 8]
w: Annotated[uint32_t, 12]
@c.record
class struct_ir3_driver_params_tcs(c.Struct):
SIZE = 32
default_outer_level_x: Annotated[uint32_t, 0]
default_outer_level_y: Annotated[uint32_t, 4]
default_outer_level_z: Annotated[uint32_t, 8]
default_outer_level_w: Annotated[uint32_t, 12]
default_inner_level_x: Annotated[uint32_t, 16]
default_inner_level_y: Annotated[uint32_t, 20]
__pad_06_07: Annotated[c.Array[uint32_t, Literal[2]], 24]
@c.record
class struct_ir3_driver_params_fs(c.Struct):
SIZE = 52
subgroup_size: Annotated[uint32_t, 0]
__pad_01_03: Annotated[c.Array[uint32_t, Literal[3]], 4]
frag_invocation_count: Annotated[uint32_t, 16]
__pad_05_07: Annotated[c.Array[uint32_t, Literal[3]], 20]
frag_size: Annotated[uint32_t, 32]
__pad_09: Annotated[uint32_t, 36]
frag_offset: Annotated[uint32_t, 40]
__pad_11_12: Annotated[c.Array[uint32_t, Literal[2]], 44]
class enum_ir3_bary(Annotated[int, ctypes.c_uint32], c.Enum): pass
IJ_PERSP_PIXEL = enum_ir3_bary.define('IJ_PERSP_PIXEL', 0)
IJ_PERSP_SAMPLE = enum_ir3_bary.define('IJ_PERSP_SAMPLE', 1)
IJ_PERSP_CENTROID = enum_ir3_bary.define('IJ_PERSP_CENTROID', 2)
IJ_PERSP_CENTER_RHW = enum_ir3_bary.define('IJ_PERSP_CENTER_RHW', 3)
IJ_LINEAR_PIXEL = enum_ir3_bary.define('IJ_LINEAR_PIXEL', 4)
IJ_LINEAR_CENTROID = enum_ir3_bary.define('IJ_LINEAR_CENTROID', 5)
IJ_LINEAR_SAMPLE = enum_ir3_bary.define('IJ_LINEAR_SAMPLE', 6)
IJ_COUNT = enum_ir3_bary.define('IJ_COUNT', 7)
class enum_ir3_wavesize_option(Annotated[int, ctypes.c_uint32], c.Enum): pass
IR3_SINGLE_ONLY = enum_ir3_wavesize_option.define('IR3_SINGLE_ONLY', 0)
IR3_SINGLE_OR_DOUBLE = enum_ir3_wavesize_option.define('IR3_SINGLE_OR_DOUBLE', 1)
IR3_DOUBLE_ONLY = enum_ir3_wavesize_option.define('IR3_DOUBLE_ONLY', 2)
@c.record
class struct_ir3_ubo_info(c.Struct):
SIZE = 16
global_base: Annotated[c.POINTER[struct_nir_def], 0]
block: Annotated[uint32_t, 8]
bindless_base: Annotated[uint16_t, 12]
bindless: Annotated[Annotated[bool, ctypes.c_bool], 14]
_global: Annotated[Annotated[bool, ctypes.c_bool], 15]
@c.record
class struct_ir3_ubo_range(c.Struct):
SIZE = 32
ubo: Annotated[struct_ir3_ubo_info, 0]
offset: Annotated[uint32_t, 16]
start: Annotated[uint32_t, 20]
end: Annotated[uint32_t, 24]
@c.record
class struct_ir3_ubo_analysis_state(c.Struct):
SIZE = 1032
range: Annotated[c.Array[struct_ir3_ubo_range, Literal[32]], 0]
num_enabled: Annotated[uint32_t, 1024]
size: Annotated[uint32_t, 1028]
class enum_ir3_push_consts_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IR3_PUSH_CONSTS_NONE = enum_ir3_push_consts_type.define('IR3_PUSH_CONSTS_NONE', 0)
IR3_PUSH_CONSTS_PER_STAGE = enum_ir3_push_consts_type.define('IR3_PUSH_CONSTS_PER_STAGE', 1)
IR3_PUSH_CONSTS_SHARED = enum_ir3_push_consts_type.define('IR3_PUSH_CONSTS_SHARED', 2)
IR3_PUSH_CONSTS_SHARED_PREAMBLE = enum_ir3_push_consts_type.define('IR3_PUSH_CONSTS_SHARED_PREAMBLE', 3)
@c.record
class struct_ir3_driver_ubo(c.Struct):
SIZE = 8
idx: Annotated[int32_t, 0]
size: Annotated[uint32_t, 4]
class enum_ir3_const_alloc_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IR3_CONST_ALLOC_PUSH_CONSTS = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_PUSH_CONSTS', 0)
IR3_CONST_ALLOC_DYN_DESCRIPTOR_OFFSET = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_DYN_DESCRIPTOR_OFFSET', 1)
IR3_CONST_ALLOC_INLINE_UNIFORM_ADDRS = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_INLINE_UNIFORM_ADDRS', 2)
IR3_CONST_ALLOC_DRIVER_PARAMS = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_DRIVER_PARAMS', 3)
IR3_CONST_ALLOC_UBO_RANGES = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_UBO_RANGES', 4)
IR3_CONST_ALLOC_PREAMBLE = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_PREAMBLE', 5)
IR3_CONST_ALLOC_GLOBAL = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_GLOBAL', 6)
IR3_CONST_ALLOC_UBO_PTRS = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_UBO_PTRS', 7)
IR3_CONST_ALLOC_IMAGE_DIMS = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_IMAGE_DIMS', 8)
IR3_CONST_ALLOC_TFBO = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_TFBO', 9)
IR3_CONST_ALLOC_PRIMITIVE_PARAM = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_PRIMITIVE_PARAM', 10)
IR3_CONST_ALLOC_PRIMITIVE_MAP = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_PRIMITIVE_MAP', 11)
IR3_CONST_ALLOC_MAX = enum_ir3_const_alloc_type.define('IR3_CONST_ALLOC_MAX', 12)
@c.record
class struct_ir3_const_allocation(c.Struct):
SIZE = 16
offset_vec4: Annotated[uint32_t, 0]
size_vec4: Annotated[uint32_t, 4]
reserved_size_vec4: Annotated[uint32_t, 8]
reserved_align_vec4: Annotated[uint32_t, 12]
@c.record
class struct_ir3_const_allocations(c.Struct):
SIZE = 200
consts: Annotated[c.Array[struct_ir3_const_allocation, Literal[12]], 0]
max_const_offset_vec4: Annotated[uint32_t, 192]
reserved_vec4: Annotated[uint32_t, 196]
@c.record
class struct_ir3_const_image_dims(c.Struct):
SIZE = 136
mask: Annotated[uint32_t, 0]
count: Annotated[uint32_t, 4]
off: Annotated[c.Array[uint32_t, Literal[32]], 8]
@c.record
class struct_ir3_imm_const_state(c.Struct):
SIZE = 16
size: Annotated[Annotated[int, ctypes.c_uint32], 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 4]
values: Annotated[c.POINTER[uint32_t], 8]
@c.record
class struct_ir3_const_state(c.Struct):
SIZE = 1424
num_ubos: Annotated[Annotated[int, ctypes.c_uint32], 0]
num_app_ubos: Annotated[Annotated[int, ctypes.c_uint32], 4]
num_driver_params: Annotated[Annotated[int, ctypes.c_uint32], 8]
consts_ubo: Annotated[struct_ir3_driver_ubo, 12]
driver_params_ubo: Annotated[struct_ir3_driver_ubo, 20]
primitive_map_ubo: Annotated[struct_ir3_driver_ubo, 28]
primitive_param_ubo: Annotated[struct_ir3_driver_ubo, 36]
allocs: Annotated[struct_ir3_const_allocations, 44]
image_dims: Annotated[struct_ir3_const_image_dims, 244]
ubo_state: Annotated[struct_ir3_ubo_analysis_state, 384]
push_consts_type: Annotated[enum_ir3_push_consts_type, 1416]
@c.record
class struct_ir3_stream_output(c.Struct):
SIZE = 4
register_index: Annotated[Annotated[int, ctypes.c_uint32], 0, 6, 0]
start_component: Annotated[Annotated[int, ctypes.c_uint32], 0, 2, 6]
num_components: Annotated[Annotated[int, ctypes.c_uint32], 1, 3, 0]
output_buffer: Annotated[Annotated[int, ctypes.c_uint32], 1, 3, 3]
dst_offset: Annotated[Annotated[int, ctypes.c_uint32], 1, 16, 6]
stream: Annotated[Annotated[int, ctypes.c_uint32], 3, 2, 6]
@c.record
class struct_ir3_stream_output_info(c.Struct):
SIZE = 532
num_outputs: Annotated[Annotated[int, ctypes.c_uint32], 0]
stride: Annotated[c.Array[uint16_t, Literal[4]], 4]
streams_written: Annotated[uint8_t, 12]
buffer_to_stream: Annotated[c.Array[uint8_t, Literal[4]], 13]
output: Annotated[c.Array[struct_ir3_stream_output, Literal[128]], 20]
@c.record
class struct_ir3_sampler_prefetch(c.Struct):
SIZE = 16
src: Annotated[uint8_t, 0]
bindless: Annotated[Annotated[bool, ctypes.c_bool], 1]
samp_id: Annotated[uint8_t, 2]
tex_id: Annotated[uint8_t, 3]
samp_bindless_id: Annotated[uint16_t, 4]
tex_bindless_id: Annotated[uint16_t, 6]
dst: Annotated[uint8_t, 8]
wrmask: Annotated[uint8_t, 9]
half_precision: Annotated[uint8_t, 10]
tex_opc: Annotated[opc_t, 12]
class opc_t(Annotated[int, ctypes.c_uint32], c.Enum): pass
OPC_NOP = opc_t.define('OPC_NOP', 0)
OPC_JUMP = opc_t.define('OPC_JUMP', 2)
OPC_CALL = opc_t.define('OPC_CALL', 3)
OPC_RET = opc_t.define('OPC_RET', 4)
OPC_KILL = opc_t.define('OPC_KILL', 5)
OPC_END = opc_t.define('OPC_END', 6)
OPC_EMIT = opc_t.define('OPC_EMIT', 7)
OPC_CUT = opc_t.define('OPC_CUT', 8)
OPC_CHMASK = opc_t.define('OPC_CHMASK', 9)
OPC_CHSH = opc_t.define('OPC_CHSH', 10)
OPC_FLOW_REV = opc_t.define('OPC_FLOW_REV', 11)
OPC_BKT = opc_t.define('OPC_BKT', 16)
OPC_STKS = opc_t.define('OPC_STKS', 17)
OPC_STKR = opc_t.define('OPC_STKR', 18)
OPC_XSET = opc_t.define('OPC_XSET', 19)
OPC_XCLR = opc_t.define('OPC_XCLR', 20)
OPC_GETONE = opc_t.define('OPC_GETONE', 21)
OPC_DBG = opc_t.define('OPC_DBG', 22)
OPC_SHPS = opc_t.define('OPC_SHPS', 23)
OPC_SHPE = opc_t.define('OPC_SHPE', 24)
OPC_GETLAST = opc_t.define('OPC_GETLAST', 25)
OPC_PREDT = opc_t.define('OPC_PREDT', 29)
OPC_PREDF = opc_t.define('OPC_PREDF', 30)
OPC_PREDE = opc_t.define('OPC_PREDE', 31)
OPC_BR = opc_t.define('OPC_BR', 40)
OPC_BRAO = opc_t.define('OPC_BRAO', 41)
OPC_BRAA = opc_t.define('OPC_BRAA', 42)
OPC_BRAC = opc_t.define('OPC_BRAC', 43)
OPC_BANY = opc_t.define('OPC_BANY', 44)
OPC_BALL = opc_t.define('OPC_BALL', 45)
OPC_BRAX = opc_t.define('OPC_BRAX', 46)
OPC_DEMOTE = opc_t.define('OPC_DEMOTE', 47)
OPC_MOV = opc_t.define('OPC_MOV', 128)
OPC_MOVP = opc_t.define('OPC_MOVP', 129)
OPC_MOVS = opc_t.define('OPC_MOVS', 130)
OPC_MOVMSK = opc_t.define('OPC_MOVMSK', 131)
OPC_SWZ = opc_t.define('OPC_SWZ', 132)
OPC_GAT = opc_t.define('OPC_GAT', 133)
OPC_SCT = opc_t.define('OPC_SCT', 134)
OPC_MOV_IMMED = opc_t.define('OPC_MOV_IMMED', 168)
OPC_MOV_CONST = opc_t.define('OPC_MOV_CONST', 169)
OPC_MOV_GPR = opc_t.define('OPC_MOV_GPR', 170)
OPC_MOV_RELGPR = opc_t.define('OPC_MOV_RELGPR', 171)
OPC_MOV_RELCONST = opc_t.define('OPC_MOV_RELCONST', 172)
OPC_MOVS_IMMED = opc_t.define('OPC_MOVS_IMMED', 173)
OPC_MOVS_A0 = opc_t.define('OPC_MOVS_A0', 174)
OPC_BALLOT_MACRO = opc_t.define('OPC_BALLOT_MACRO', 178)
OPC_ANY_MACRO = opc_t.define('OPC_ANY_MACRO', 179)
OPC_ALL_MACRO = opc_t.define('OPC_ALL_MACRO', 180)
OPC_ELECT_MACRO = opc_t.define('OPC_ELECT_MACRO', 181)
OPC_READ_COND_MACRO = opc_t.define('OPC_READ_COND_MACRO', 182)
OPC_READ_FIRST_MACRO = opc_t.define('OPC_READ_FIRST_MACRO', 183)
OPC_SHPS_MACRO = opc_t.define('OPC_SHPS_MACRO', 184)
OPC_READ_GETLAST_MACRO = opc_t.define('OPC_READ_GETLAST_MACRO', 185)
OPC_SCAN_MACRO = opc_t.define('OPC_SCAN_MACRO', 186)
OPC_SCAN_CLUSTERS_MACRO = opc_t.define('OPC_SCAN_CLUSTERS_MACRO', 188)
OPC_ADD_F = opc_t.define('OPC_ADD_F', 256)
OPC_MIN_F = opc_t.define('OPC_MIN_F', 257)
OPC_MAX_F = opc_t.define('OPC_MAX_F', 258)
OPC_MUL_F = opc_t.define('OPC_MUL_F', 259)
OPC_SIGN_F = opc_t.define('OPC_SIGN_F', 260)
OPC_CMPS_F = opc_t.define('OPC_CMPS_F', 261)
OPC_ABSNEG_F = opc_t.define('OPC_ABSNEG_F', 262)
OPC_CMPV_F = opc_t.define('OPC_CMPV_F', 263)
OPC_FLOOR_F = opc_t.define('OPC_FLOOR_F', 265)
OPC_CEIL_F = opc_t.define('OPC_CEIL_F', 266)
OPC_RNDNE_F = opc_t.define('OPC_RNDNE_F', 267)
OPC_RNDAZ_F = opc_t.define('OPC_RNDAZ_F', 268)
OPC_TRUNC_F = opc_t.define('OPC_TRUNC_F', 269)
OPC_ADD_U = opc_t.define('OPC_ADD_U', 272)
OPC_ADD_S = opc_t.define('OPC_ADD_S', 273)
OPC_SUB_U = opc_t.define('OPC_SUB_U', 274)
OPC_SUB_S = opc_t.define('OPC_SUB_S', 275)
OPC_CMPS_U = opc_t.define('OPC_CMPS_U', 276)
OPC_CMPS_S = opc_t.define('OPC_CMPS_S', 277)
OPC_MIN_U = opc_t.define('OPC_MIN_U', 278)
OPC_MIN_S = opc_t.define('OPC_MIN_S', 279)
OPC_MAX_U = opc_t.define('OPC_MAX_U', 280)
OPC_MAX_S = opc_t.define('OPC_MAX_S', 281)
OPC_ABSNEG_S = opc_t.define('OPC_ABSNEG_S', 282)
OPC_AND_B = opc_t.define('OPC_AND_B', 284)
OPC_OR_B = opc_t.define('OPC_OR_B', 285)
OPC_NOT_B = opc_t.define('OPC_NOT_B', 286)
OPC_XOR_B = opc_t.define('OPC_XOR_B', 287)
OPC_CMPV_U = opc_t.define('OPC_CMPV_U', 289)
OPC_CMPV_S = opc_t.define('OPC_CMPV_S', 290)
OPC_MUL_U24 = opc_t.define('OPC_MUL_U24', 304)
OPC_MUL_S24 = opc_t.define('OPC_MUL_S24', 305)
OPC_MULL_U = opc_t.define('OPC_MULL_U', 306)
OPC_BFREV_B = opc_t.define('OPC_BFREV_B', 307)
OPC_CLZ_S = opc_t.define('OPC_CLZ_S', 308)
OPC_CLZ_B = opc_t.define('OPC_CLZ_B', 309)
OPC_SHL_B = opc_t.define('OPC_SHL_B', 310)
OPC_SHR_B = opc_t.define('OPC_SHR_B', 311)
OPC_ASHR_B = opc_t.define('OPC_ASHR_B', 312)
OPC_BARY_F = opc_t.define('OPC_BARY_F', 313)
OPC_MGEN_B = opc_t.define('OPC_MGEN_B', 314)
OPC_GETBIT_B = opc_t.define('OPC_GETBIT_B', 315)
OPC_SETRM = opc_t.define('OPC_SETRM', 316)
OPC_CBITS_B = opc_t.define('OPC_CBITS_B', 317)
OPC_SHB = opc_t.define('OPC_SHB', 318)
OPC_MSAD = opc_t.define('OPC_MSAD', 319)
OPC_FLAT_B = opc_t.define('OPC_FLAT_B', 320)
OPC_MAD_U16 = opc_t.define('OPC_MAD_U16', 384)
OPC_MADSH_U16 = opc_t.define('OPC_MADSH_U16', 385)
OPC_MAD_S16 = opc_t.define('OPC_MAD_S16', 386)
OPC_MADSH_M16 = opc_t.define('OPC_MADSH_M16', 387)
OPC_MAD_U24 = opc_t.define('OPC_MAD_U24', 388)
OPC_MAD_S24 = opc_t.define('OPC_MAD_S24', 389)
OPC_MAD_F16 = opc_t.define('OPC_MAD_F16', 390)
OPC_MAD_F32 = opc_t.define('OPC_MAD_F32', 391)
OPC_SEL_B16 = opc_t.define('OPC_SEL_B16', 392)
OPC_SEL_B32 = opc_t.define('OPC_SEL_B32', 393)
OPC_SEL_S16 = opc_t.define('OPC_SEL_S16', 394)
OPC_SEL_S32 = opc_t.define('OPC_SEL_S32', 395)
OPC_SEL_F16 = opc_t.define('OPC_SEL_F16', 396)
OPC_SEL_F32 = opc_t.define('OPC_SEL_F32', 397)
OPC_SAD_S16 = opc_t.define('OPC_SAD_S16', 398)
OPC_SAD_S32 = opc_t.define('OPC_SAD_S32', 399)
OPC_SHRM = opc_t.define('OPC_SHRM', 400)
OPC_SHLM = opc_t.define('OPC_SHLM', 401)
OPC_SHRG = opc_t.define('OPC_SHRG', 402)
OPC_SHLG = opc_t.define('OPC_SHLG', 403)
OPC_ANDG = opc_t.define('OPC_ANDG', 404)
OPC_DP2ACC = opc_t.define('OPC_DP2ACC', 405)
OPC_DP4ACC = opc_t.define('OPC_DP4ACC', 406)
OPC_WMM = opc_t.define('OPC_WMM', 407)
OPC_WMM_ACCU = opc_t.define('OPC_WMM_ACCU', 408)
OPC_RCP = opc_t.define('OPC_RCP', 512)
OPC_RSQ = opc_t.define('OPC_RSQ', 513)
OPC_LOG2 = opc_t.define('OPC_LOG2', 514)
OPC_EXP2 = opc_t.define('OPC_EXP2', 515)
OPC_SIN = opc_t.define('OPC_SIN', 516)
OPC_COS = opc_t.define('OPC_COS', 517)
OPC_SQRT = opc_t.define('OPC_SQRT', 518)
OPC_HRSQ = opc_t.define('OPC_HRSQ', 521)
OPC_HLOG2 = opc_t.define('OPC_HLOG2', 522)
OPC_HEXP2 = opc_t.define('OPC_HEXP2', 523)
OPC_ISAM = opc_t.define('OPC_ISAM', 640)
OPC_ISAML = opc_t.define('OPC_ISAML', 641)
OPC_ISAMM = opc_t.define('OPC_ISAMM', 642)
OPC_SAM = opc_t.define('OPC_SAM', 643)
OPC_SAMB = opc_t.define('OPC_SAMB', 644)
OPC_SAML = opc_t.define('OPC_SAML', 645)
OPC_SAMGQ = opc_t.define('OPC_SAMGQ', 646)
OPC_GETLOD = opc_t.define('OPC_GETLOD', 647)
OPC_CONV = opc_t.define('OPC_CONV', 648)
OPC_CONVM = opc_t.define('OPC_CONVM', 649)
OPC_GETSIZE = opc_t.define('OPC_GETSIZE', 650)
OPC_GETBUF = opc_t.define('OPC_GETBUF', 651)
OPC_GETPOS = opc_t.define('OPC_GETPOS', 652)
OPC_GETINFO = opc_t.define('OPC_GETINFO', 653)
OPC_DSX = opc_t.define('OPC_DSX', 654)
OPC_DSY = opc_t.define('OPC_DSY', 655)
OPC_GATHER4R = opc_t.define('OPC_GATHER4R', 656)
OPC_GATHER4G = opc_t.define('OPC_GATHER4G', 657)
OPC_GATHER4B = opc_t.define('OPC_GATHER4B', 658)
OPC_GATHER4A = opc_t.define('OPC_GATHER4A', 659)
OPC_SAMGP0 = opc_t.define('OPC_SAMGP0', 660)
OPC_SAMGP1 = opc_t.define('OPC_SAMGP1', 661)
OPC_SAMGP2 = opc_t.define('OPC_SAMGP2', 662)
OPC_SAMGP3 = opc_t.define('OPC_SAMGP3', 663)
OPC_DSXPP_1 = opc_t.define('OPC_DSXPP_1', 664)
OPC_DSYPP_1 = opc_t.define('OPC_DSYPP_1', 665)
OPC_RGETPOS = opc_t.define('OPC_RGETPOS', 666)
OPC_RGETINFO = opc_t.define('OPC_RGETINFO', 667)
OPC_BRCST_ACTIVE = opc_t.define('OPC_BRCST_ACTIVE', 668)
OPC_QUAD_SHUFFLE_BRCST = opc_t.define('OPC_QUAD_SHUFFLE_BRCST', 669)
OPC_QUAD_SHUFFLE_HORIZ = opc_t.define('OPC_QUAD_SHUFFLE_HORIZ', 670)
OPC_QUAD_SHUFFLE_VERT = opc_t.define('OPC_QUAD_SHUFFLE_VERT', 671)
OPC_QUAD_SHUFFLE_DIAG = opc_t.define('OPC_QUAD_SHUFFLE_DIAG', 672)
OPC_TCINV = opc_t.define('OPC_TCINV', 673)
OPC_DSXPP_MACRO = opc_t.define('OPC_DSXPP_MACRO', 675)
OPC_DSYPP_MACRO = opc_t.define('OPC_DSYPP_MACRO', 676)
OPC_LDG = opc_t.define('OPC_LDG', 768)
OPC_LDL = opc_t.define('OPC_LDL', 769)
OPC_LDP = opc_t.define('OPC_LDP', 770)
OPC_STG = opc_t.define('OPC_STG', 771)
OPC_STL = opc_t.define('OPC_STL', 772)
OPC_STP = opc_t.define('OPC_STP', 773)
OPC_LDIB = opc_t.define('OPC_LDIB', 774)
OPC_G2L = opc_t.define('OPC_G2L', 775)
OPC_L2G = opc_t.define('OPC_L2G', 776)
OPC_PREFETCH = opc_t.define('OPC_PREFETCH', 777)
OPC_LDLW = opc_t.define('OPC_LDLW', 778)
OPC_STLW = opc_t.define('OPC_STLW', 779)
OPC_RESFMT = opc_t.define('OPC_RESFMT', 782)
OPC_RESINFO = opc_t.define('OPC_RESINFO', 783)
OPC_ATOMIC_ADD = opc_t.define('OPC_ATOMIC_ADD', 784)
OPC_ATOMIC_SUB = opc_t.define('OPC_ATOMIC_SUB', 785)
OPC_ATOMIC_XCHG = opc_t.define('OPC_ATOMIC_XCHG', 786)
OPC_ATOMIC_INC = opc_t.define('OPC_ATOMIC_INC', 787)
OPC_ATOMIC_DEC = opc_t.define('OPC_ATOMIC_DEC', 788)
OPC_ATOMIC_CMPXCHG = opc_t.define('OPC_ATOMIC_CMPXCHG', 789)
OPC_ATOMIC_MIN = opc_t.define('OPC_ATOMIC_MIN', 790)
OPC_ATOMIC_MAX = opc_t.define('OPC_ATOMIC_MAX', 791)
OPC_ATOMIC_AND = opc_t.define('OPC_ATOMIC_AND', 792)
OPC_ATOMIC_OR = opc_t.define('OPC_ATOMIC_OR', 793)
OPC_ATOMIC_XOR = opc_t.define('OPC_ATOMIC_XOR', 794)
OPC_LDGB = opc_t.define('OPC_LDGB', 795)
OPC_STGB = opc_t.define('OPC_STGB', 796)
OPC_STIB = opc_t.define('OPC_STIB', 797)
OPC_LDC = opc_t.define('OPC_LDC', 798)
OPC_LDLV = opc_t.define('OPC_LDLV', 799)
OPC_PIPR = opc_t.define('OPC_PIPR', 800)
OPC_PIPC = opc_t.define('OPC_PIPC', 801)
OPC_EMIT2 = opc_t.define('OPC_EMIT2', 802)
OPC_ENDLS = opc_t.define('OPC_ENDLS', 803)
OPC_GETSPID = opc_t.define('OPC_GETSPID', 804)
OPC_GETWID = opc_t.define('OPC_GETWID', 805)
OPC_GETFIBERID = opc_t.define('OPC_GETFIBERID', 806)
OPC_SHFL = opc_t.define('OPC_SHFL', 807)
OPC_STC = opc_t.define('OPC_STC', 808)
OPC_RESINFO_B = opc_t.define('OPC_RESINFO_B', 809)
OPC_LDIB_B = opc_t.define('OPC_LDIB_B', 810)
OPC_STIB_B = opc_t.define('OPC_STIB_B', 811)
OPC_ATOMIC_B_ADD = opc_t.define('OPC_ATOMIC_B_ADD', 812)
OPC_ATOMIC_B_SUB = opc_t.define('OPC_ATOMIC_B_SUB', 813)
OPC_ATOMIC_B_XCHG = opc_t.define('OPC_ATOMIC_B_XCHG', 814)
OPC_ATOMIC_B_INC = opc_t.define('OPC_ATOMIC_B_INC', 815)
OPC_ATOMIC_B_DEC = opc_t.define('OPC_ATOMIC_B_DEC', 816)
OPC_ATOMIC_B_CMPXCHG = opc_t.define('OPC_ATOMIC_B_CMPXCHG', 817)
OPC_ATOMIC_B_MIN = opc_t.define('OPC_ATOMIC_B_MIN', 818)
OPC_ATOMIC_B_MAX = opc_t.define('OPC_ATOMIC_B_MAX', 819)
OPC_ATOMIC_B_AND = opc_t.define('OPC_ATOMIC_B_AND', 820)
OPC_ATOMIC_B_OR = opc_t.define('OPC_ATOMIC_B_OR', 821)
OPC_ATOMIC_B_XOR = opc_t.define('OPC_ATOMIC_B_XOR', 822)
OPC_ATOMIC_S_ADD = opc_t.define('OPC_ATOMIC_S_ADD', 823)
OPC_ATOMIC_S_SUB = opc_t.define('OPC_ATOMIC_S_SUB', 824)
OPC_ATOMIC_S_XCHG = opc_t.define('OPC_ATOMIC_S_XCHG', 825)
OPC_ATOMIC_S_INC = opc_t.define('OPC_ATOMIC_S_INC', 826)
OPC_ATOMIC_S_DEC = opc_t.define('OPC_ATOMIC_S_DEC', 827)
OPC_ATOMIC_S_CMPXCHG = opc_t.define('OPC_ATOMIC_S_CMPXCHG', 828)
OPC_ATOMIC_S_MIN = opc_t.define('OPC_ATOMIC_S_MIN', 829)
OPC_ATOMIC_S_MAX = opc_t.define('OPC_ATOMIC_S_MAX', 830)
OPC_ATOMIC_S_AND = opc_t.define('OPC_ATOMIC_S_AND', 831)
OPC_ATOMIC_S_OR = opc_t.define('OPC_ATOMIC_S_OR', 832)
OPC_ATOMIC_S_XOR = opc_t.define('OPC_ATOMIC_S_XOR', 833)
OPC_ATOMIC_G_ADD = opc_t.define('OPC_ATOMIC_G_ADD', 834)
OPC_ATOMIC_G_SUB = opc_t.define('OPC_ATOMIC_G_SUB', 835)
OPC_ATOMIC_G_XCHG = opc_t.define('OPC_ATOMIC_G_XCHG', 836)
OPC_ATOMIC_G_INC = opc_t.define('OPC_ATOMIC_G_INC', 837)
OPC_ATOMIC_G_DEC = opc_t.define('OPC_ATOMIC_G_DEC', 838)
OPC_ATOMIC_G_CMPXCHG = opc_t.define('OPC_ATOMIC_G_CMPXCHG', 839)
OPC_ATOMIC_G_MIN = opc_t.define('OPC_ATOMIC_G_MIN', 840)
OPC_ATOMIC_G_MAX = opc_t.define('OPC_ATOMIC_G_MAX', 841)
OPC_ATOMIC_G_AND = opc_t.define('OPC_ATOMIC_G_AND', 842)
OPC_ATOMIC_G_OR = opc_t.define('OPC_ATOMIC_G_OR', 843)
OPC_ATOMIC_G_XOR = opc_t.define('OPC_ATOMIC_G_XOR', 844)
OPC_LDG_A = opc_t.define('OPC_LDG_A', 845)
OPC_STG_A = opc_t.define('OPC_STG_A', 846)
OPC_SPILL_MACRO = opc_t.define('OPC_SPILL_MACRO', 847)
OPC_RELOAD_MACRO = opc_t.define('OPC_RELOAD_MACRO', 848)
OPC_LDC_K = opc_t.define('OPC_LDC_K', 849)
OPC_STSC = opc_t.define('OPC_STSC', 850)
OPC_LDG_K = opc_t.define('OPC_LDG_K', 851)
OPC_PUSH_CONSTS_LOAD_MACRO = opc_t.define('OPC_PUSH_CONSTS_LOAD_MACRO', 852)
OPC_RAY_INTERSECTION = opc_t.define('OPC_RAY_INTERSECTION', 858)
OPC_RESBASE = opc_t.define('OPC_RESBASE', 859)
OPC_BAR = opc_t.define('OPC_BAR', 896)
OPC_FENCE = opc_t.define('OPC_FENCE', 897)
OPC_SLEEP = opc_t.define('OPC_SLEEP', 898)
OPC_ICINV = opc_t.define('OPC_ICINV', 899)
OPC_DCCLN = opc_t.define('OPC_DCCLN', 900)
OPC_DCINV = opc_t.define('OPC_DCINV', 901)
OPC_DCFLU = opc_t.define('OPC_DCFLU', 902)
OPC_LOCK = opc_t.define('OPC_LOCK', 903)
OPC_UNLOCK = opc_t.define('OPC_UNLOCK', 904)
OPC_ALIAS = opc_t.define('OPC_ALIAS', 905)
OPC_CCINV = opc_t.define('OPC_CCINV', 906)
OPC_META_INPUT = opc_t.define('OPC_META_INPUT', 1024)
OPC_META_SPLIT = opc_t.define('OPC_META_SPLIT', 1026)
OPC_META_COLLECT = opc_t.define('OPC_META_COLLECT', 1027)
OPC_META_TEX_PREFETCH = opc_t.define('OPC_META_TEX_PREFETCH', 1028)
OPC_META_PARALLEL_COPY = opc_t.define('OPC_META_PARALLEL_COPY', 1029)
OPC_META_PHI = opc_t.define('OPC_META_PHI', 1030)
OPC_META_RAW = opc_t.define('OPC_META_RAW', 1031)
@c.record
class struct_ir3_shader_key(c.Struct):
SIZE = 80
ucp_enables: Annotated[Annotated[int, ctypes.c_uint32], 0, 8, 0]
has_per_samp: Annotated[Annotated[int, ctypes.c_uint32], 1, 1, 0]
sample_shading: Annotated[Annotated[int, ctypes.c_uint32], 1, 1, 1]
msaa: Annotated[Annotated[int, ctypes.c_uint32], 1, 1, 2]
rasterflat: Annotated[Annotated[int, ctypes.c_uint32], 1, 1, 3]
tessellation: Annotated[Annotated[int, ctypes.c_uint32], 1, 2, 4]
has_gs: Annotated[Annotated[int, ctypes.c_uint32], 1, 1, 6]
tcs_store_primid: Annotated[Annotated[int, ctypes.c_uint32], 1, 1, 7]
safe_constlen: Annotated[Annotated[int, ctypes.c_uint32], 2, 1, 0]
force_dual_color_blend: Annotated[Annotated[int, ctypes.c_uint32], 2, 1, 1]
_global: Annotated[uint32_t, 0]
vsamples: Annotated[uint32_t, 4]
fsamples: Annotated[uint32_t, 8]
vastc_srgb: Annotated[uint16_t, 12]
fastc_srgb: Annotated[uint16_t, 14]
vsampler_swizzles: Annotated[c.Array[uint16_t, Literal[16]], 16]
fsampler_swizzles: Annotated[c.Array[uint16_t, Literal[16]], 48]
@c.record
class struct_ir3_ibo_mapping(c.Struct):
SIZE = 98
ssbo_to_tex: Annotated[c.Array[uint8_t, Literal[32]], 0]
image_to_tex: Annotated[c.Array[uint8_t, Literal[32]], 32]
tex_to_image: Annotated[c.Array[uint8_t, Literal[32]], 64]
num_tex: Annotated[uint8_t, 96]
tex_base: Annotated[uint8_t, 97]
@c.record
class struct_ir3_disasm_info(c.Struct):
SIZE = 24
write_disasm: Annotated[Annotated[bool, ctypes.c_bool], 0]
nir: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
disasm: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
@c.record
class struct_ir3_shader_nir_options(c.Struct):
SIZE = 4
robust_modes: Annotated[nir_variable_mode, 0]
@c.record
class struct_ir3_shader_options(c.Struct):
SIZE = 228
api_wavesize: Annotated[enum_ir3_wavesize_option, 0]
real_wavesize: Annotated[enum_ir3_wavesize_option, 4]
push_consts_type: Annotated[enum_ir3_push_consts_type, 8]
push_consts_base: Annotated[uint32_t, 12]
push_consts_dwords: Annotated[uint32_t, 16]
const_allocs: Annotated[struct_ir3_const_allocations, 20]
nir_options: Annotated[struct_ir3_shader_nir_options, 220]
fragdata_dynamic_remap: Annotated[Annotated[bool, ctypes.c_bool], 224]
@c.record
class struct_ir3_shader_output(c.Struct):
SIZE = 4
slot: Annotated[uint8_t, 0]
regid: Annotated[uint8_t, 1]
view: Annotated[uint8_t, 2]
aliased_components: Annotated[uint8_t, 3, 4, 0]
half: Annotated[Annotated[bool, ctypes.c_bool], 3, 1, 4]
class struct_fd_bo(ctypes.Structure): pass
@c.record
class struct_ir3(c.Struct):
SIZE = 152
compiler: Annotated[c.POINTER[struct_ir3_compiler], 0]
type: Annotated[gl_shader_stage, 8]
inputs_count: Annotated[Annotated[int, ctypes.c_uint32], 12]
inputs_sz: Annotated[Annotated[int, ctypes.c_uint32], 16]
inputs: Annotated[c.POINTER[c.POINTER[struct_ir3_instruction]], 24]
baryfs_count: Annotated[Annotated[int, ctypes.c_uint32], 32]
baryfs_sz: Annotated[Annotated[int, ctypes.c_uint32], 36]
baryfs: Annotated[c.POINTER[c.POINTER[struct_ir3_instruction]], 40]
a0_users_count: Annotated[Annotated[int, ctypes.c_uint32], 48]
a0_users_sz: Annotated[Annotated[int, ctypes.c_uint32], 52]
a0_users: Annotated[c.POINTER[c.POINTER[struct_ir3_instruction]], 56]
a1_users_count: Annotated[Annotated[int, ctypes.c_uint32], 64]
a1_users_sz: Annotated[Annotated[int, ctypes.c_uint32], 68]
a1_users: Annotated[c.POINTER[c.POINTER[struct_ir3_instruction]], 72]
astc_srgb_count: Annotated[Annotated[int, ctypes.c_uint32], 80]
astc_srgb_sz: Annotated[Annotated[int, ctypes.c_uint32], 84]
astc_srgb: Annotated[c.POINTER[c.POINTER[struct_ir3_instruction]], 88]
tg4_count: Annotated[Annotated[int, ctypes.c_uint32], 96]
tg4_sz: Annotated[Annotated[int, ctypes.c_uint32], 100]
tg4: Annotated[c.POINTER[c.POINTER[struct_ir3_instruction]], 104]
block_list: Annotated[struct_list_head, 112]
array_list: Annotated[struct_list_head, 128]
instr_count: Annotated[Annotated[int, ctypes.c_uint32], 144]
@c.record
class struct_ir3_instruction(c.Struct):
SIZE = 184
block: Annotated[c.POINTER[struct_ir3_block], 0]
opc: Annotated[opc_t, 8]
flags: Annotated[enum_ir3_instruction_flags, 12]
repeat: Annotated[uint8_t, 16]
nop: Annotated[uint8_t, 17]
srcs_count: Annotated[Annotated[int, ctypes.c_uint32], 20]
dsts_count: Annotated[Annotated[int, ctypes.c_uint32], 24]
dsts: Annotated[c.POINTER[c.POINTER[struct_ir3_register]], 32]
srcs: Annotated[c.POINTER[c.POINTER[struct_ir3_register]], 40]
cat0: Annotated[struct_ir3_instruction_cat0, 48]
cat1: Annotated[struct_ir3_instruction_cat1, 48]
cat2: Annotated[struct_ir3_instruction_cat2, 48]
cat3: Annotated[struct_ir3_instruction_cat3, 48]
cat5: Annotated[struct_ir3_instruction_cat5, 48]
cat6: Annotated[struct_ir3_instruction_cat6, 48]
cat7: Annotated[struct_ir3_instruction_cat7, 48]
split: Annotated[struct_ir3_instruction_split, 48]
end: Annotated[struct_ir3_instruction_end, 48]
phi: Annotated[struct_ir3_instruction_phi, 48]
prefetch: Annotated[struct_ir3_instruction_prefetch, 48]
input: Annotated[struct_ir3_instruction_input, 48]
push_consts: Annotated[struct_ir3_instruction_push_consts, 48]
raw: Annotated[struct_ir3_instruction_raw, 48]
ip: Annotated[uint32_t, 80]
data: Annotated[ctypes.c_void_p, 88]
uses: Annotated[c.POINTER[struct_set], 96]
use_count: Annotated[Annotated[int, ctypes.c_int32], 104]
address: Annotated[c.POINTER[struct_ir3_register], 112]
deps_count: Annotated[Annotated[int, ctypes.c_uint32], 120]
deps_sz: Annotated[Annotated[int, ctypes.c_uint32], 124]
deps: Annotated[c.POINTER[c.POINTER[struct_ir3_instruction]], 128]
barrier_class: Annotated[struct_ir3_instruction_barrier_class, 136]
barrier_conflict: Annotated[struct_ir3_instruction_barrier_class, 140]
node: Annotated[struct_list_head, 144]
rpt_node: Annotated[struct_list_head, 160]
serialno: Annotated[uint32_t, 176]
line: Annotated[Annotated[int, ctypes.c_int32], 180]
@c.record
class struct_ir3_block(c.Struct):
SIZE = 200
node: Annotated[struct_list_head, 0]
shader: Annotated[c.POINTER[struct_ir3], 16]
nblock: Annotated[c.POINTER[struct_nir_block], 24]
instr_list: Annotated[struct_list_head, 32]
successors: Annotated[c.Array[c.POINTER[struct_ir3_block], Literal[2]], 48]
divergent_condition: Annotated[Annotated[bool, ctypes.c_bool], 64]
predecessors_count: Annotated[Annotated[int, ctypes.c_uint32], 68]
predecessors_sz: Annotated[Annotated[int, ctypes.c_uint32], 72]
predecessors: Annotated[c.POINTER[c.POINTER[struct_ir3_block]], 80]
physical_predecessors_count: Annotated[Annotated[int, ctypes.c_uint32], 88]
physical_predecessors_sz: Annotated[Annotated[int, ctypes.c_uint32], 92]
physical_predecessors: Annotated[c.POINTER[c.POINTER[struct_ir3_block]], 96]
physical_successors_count: Annotated[Annotated[int, ctypes.c_uint32], 104]
physical_successors_sz: Annotated[Annotated[int, ctypes.c_uint32], 108]
physical_successors: Annotated[c.POINTER[c.POINTER[struct_ir3_block]], 112]
start_ip: Annotated[uint16_t, 120]
end_ip: Annotated[uint16_t, 122]
reconvergence_point: Annotated[Annotated[bool, ctypes.c_bool], 124]
in_early_preamble: Annotated[Annotated[bool, ctypes.c_bool], 125]
keeps_count: Annotated[Annotated[int, ctypes.c_uint32], 128]
keeps_sz: Annotated[Annotated[int, ctypes.c_uint32], 132]
keeps: Annotated[c.POINTER[c.POINTER[struct_ir3_instruction]], 136]
data: Annotated[ctypes.c_void_p, 144]
index: Annotated[uint32_t, 152]
imm_dom: Annotated[c.POINTER[struct_ir3_block], 160]
dom_children_count: Annotated[Annotated[int, ctypes.c_uint32], 168]
dom_children_sz: Annotated[Annotated[int, ctypes.c_uint32], 172]
dom_children: Annotated[c.POINTER[c.POINTER[struct_ir3_block]], 176]
dom_pre_index: Annotated[uint32_t, 184]
dom_post_index: Annotated[uint32_t, 188]
loop_depth: Annotated[uint32_t, 192]
class enum_ir3_instruction_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IR3_INSTR_SY = enum_ir3_instruction_flags.define('IR3_INSTR_SY', 1)
IR3_INSTR_SS = enum_ir3_instruction_flags.define('IR3_INSTR_SS', 2)
IR3_INSTR_JP = enum_ir3_instruction_flags.define('IR3_INSTR_JP', 4)
IR3_INSTR_EQ = enum_ir3_instruction_flags.define('IR3_INSTR_EQ', 8)
IR3_INSTR_UL = enum_ir3_instruction_flags.define('IR3_INSTR_UL', 16)
IR3_INSTR_3D = enum_ir3_instruction_flags.define('IR3_INSTR_3D', 32)
IR3_INSTR_A = enum_ir3_instruction_flags.define('IR3_INSTR_A', 64)
IR3_INSTR_O = enum_ir3_instruction_flags.define('IR3_INSTR_O', 128)
IR3_INSTR_P = enum_ir3_instruction_flags.define('IR3_INSTR_P', 256)
IR3_INSTR_S = enum_ir3_instruction_flags.define('IR3_INSTR_S', 512)
IR3_INSTR_S2EN = enum_ir3_instruction_flags.define('IR3_INSTR_S2EN', 1024)
IR3_INSTR_SAT = enum_ir3_instruction_flags.define('IR3_INSTR_SAT', 2048)
IR3_INSTR_B = enum_ir3_instruction_flags.define('IR3_INSTR_B', 4096)
IR3_INSTR_NONUNIF = enum_ir3_instruction_flags.define('IR3_INSTR_NONUNIF', 8192)
IR3_INSTR_A1EN = enum_ir3_instruction_flags.define('IR3_INSTR_A1EN', 16384)
IR3_INSTR_U = enum_ir3_instruction_flags.define('IR3_INSTR_U', 32768)
IR3_INSTR_MARK = enum_ir3_instruction_flags.define('IR3_INSTR_MARK', 65536)
IR3_INSTR_SHARED_SPILL = enum_ir3_instruction_flags.define('IR3_INSTR_SHARED_SPILL', 65536)
IR3_INSTR_UNUSED = enum_ir3_instruction_flags.define('IR3_INSTR_UNUSED', 131072)
IR3_INSTR_NEEDS_HELPERS = enum_ir3_instruction_flags.define('IR3_INSTR_NEEDS_HELPERS', 262144)
IR3_INSTR_V = enum_ir3_instruction_flags.define('IR3_INSTR_V', 524288)
IR3_INSTR_INV_1D = enum_ir3_instruction_flags.define('IR3_INSTR_INV_1D', 1048576)
IR3_INSTR_IMM_OFFSET = enum_ir3_instruction_flags.define('IR3_INSTR_IMM_OFFSET', 2097152)
@c.record
class struct_ir3_register(c.Struct):
SIZE = 80
flags: Annotated[enum_ir3_register_flags, 0]
name: Annotated[Annotated[int, ctypes.c_uint32], 4]
wrmask: Annotated[Annotated[int, ctypes.c_uint32], 8, 16, 0]
size: Annotated[Annotated[int, ctypes.c_uint32], 10, 16, 0]
num: Annotated[uint16_t, 12]
iim_val: Annotated[int32_t, 16]
uim_val: Annotated[uint32_t, 16]
fim_val: Annotated[Annotated[float, ctypes.c_float], 16]
array: Annotated[struct_ir3_register_array, 16]
instr: Annotated[c.POINTER[struct_ir3_instruction], 24]
_def: Annotated[c.POINTER[struct_ir3_register], 32]
tied: Annotated[c.POINTER[struct_ir3_register], 40]
spill_slot: Annotated[Annotated[int, ctypes.c_uint32], 48]
next_use: Annotated[Annotated[int, ctypes.c_uint32], 52]
merge_set_offset: Annotated[Annotated[int, ctypes.c_uint32], 56]
merge_set: Annotated[c.POINTER[struct_ir3_merge_set], 64]
interval_start: Annotated[Annotated[int, ctypes.c_uint32], 72]
interval_end: Annotated[Annotated[int, ctypes.c_uint32], 76]
class enum_ir3_register_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IR3_REG_CONST = enum_ir3_register_flags.define('IR3_REG_CONST', 1)
IR3_REG_IMMED = enum_ir3_register_flags.define('IR3_REG_IMMED', 2)
IR3_REG_HALF = enum_ir3_register_flags.define('IR3_REG_HALF', 4)
IR3_REG_SHARED = enum_ir3_register_flags.define('IR3_REG_SHARED', 8)
IR3_REG_RELATIV = enum_ir3_register_flags.define('IR3_REG_RELATIV', 16)
IR3_REG_R = enum_ir3_register_flags.define('IR3_REG_R', 32)
IR3_REG_FNEG = enum_ir3_register_flags.define('IR3_REG_FNEG', 64)
IR3_REG_FABS = enum_ir3_register_flags.define('IR3_REG_FABS', 128)
IR3_REG_SNEG = enum_ir3_register_flags.define('IR3_REG_SNEG', 256)
IR3_REG_SABS = enum_ir3_register_flags.define('IR3_REG_SABS', 512)
IR3_REG_BNOT = enum_ir3_register_flags.define('IR3_REG_BNOT', 1024)
IR3_REG_EI = enum_ir3_register_flags.define('IR3_REG_EI', 2048)
IR3_REG_SSA = enum_ir3_register_flags.define('IR3_REG_SSA', 4096)
IR3_REG_ARRAY = enum_ir3_register_flags.define('IR3_REG_ARRAY', 8192)
IR3_REG_KILL = enum_ir3_register_flags.define('IR3_REG_KILL', 16384)
IR3_REG_FIRST_KILL = enum_ir3_register_flags.define('IR3_REG_FIRST_KILL', 32768)
IR3_REG_UNUSED = enum_ir3_register_flags.define('IR3_REG_UNUSED', 65536)
IR3_REG_EARLY_CLOBBER = enum_ir3_register_flags.define('IR3_REG_EARLY_CLOBBER', 131072)
IR3_REG_LAST_USE = enum_ir3_register_flags.define('IR3_REG_LAST_USE', 262144)
IR3_REG_PREDICATE = enum_ir3_register_flags.define('IR3_REG_PREDICATE', 524288)
IR3_REG_RT = enum_ir3_register_flags.define('IR3_REG_RT', 1048576)
IR3_REG_ALIAS = enum_ir3_register_flags.define('IR3_REG_ALIAS', 2097152)
IR3_REG_FIRST_ALIAS = enum_ir3_register_flags.define('IR3_REG_FIRST_ALIAS', 4194304)
@c.record
class struct_ir3_register_array(c.Struct):
SIZE = 6
id: Annotated[uint16_t, 0]
offset: Annotated[int16_t, 2]
base: Annotated[uint16_t, 4]
@c.record
class struct_ir3_merge_set(c.Struct):
SIZE = 32
preferred_reg: Annotated[uint16_t, 0]
size: Annotated[uint16_t, 2]
alignment: Annotated[uint16_t, 4]
interval_start: Annotated[Annotated[int, ctypes.c_uint32], 8]
spill_slot: Annotated[Annotated[int, ctypes.c_uint32], 12]
regs_count: Annotated[Annotated[int, ctypes.c_uint32], 16]
regs: Annotated[c.POINTER[c.POINTER[struct_ir3_register]], 24]
@c.record
class struct_ir3_instruction_cat0(c.Struct):
SIZE = 32
inv1: Annotated[Annotated[bytes, ctypes.c_char], 0]
inv2: Annotated[Annotated[bytes, ctypes.c_char], 1]
immed: Annotated[Annotated[int, ctypes.c_int32], 4]
target: Annotated[c.POINTER[struct_ir3_block], 8]
target_label: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
idx: Annotated[Annotated[int, ctypes.c_uint32], 24]
@c.record
class struct_ir3_instruction_cat1(c.Struct):
SIZE = 16
src_type: Annotated[type_t, 0]
dst_type: Annotated[type_t, 4]
round: Annotated[round_t, 8]
reduce_op: Annotated[reduce_op_t, 12]
class round_t(Annotated[int, ctypes.c_uint32], c.Enum): pass
ROUND_ZERO = round_t.define('ROUND_ZERO', 0)
ROUND_EVEN = round_t.define('ROUND_EVEN', 1)
ROUND_POS_INF = round_t.define('ROUND_POS_INF', 2)
ROUND_NEG_INF = round_t.define('ROUND_NEG_INF', 3)
class reduce_op_t(Annotated[int, ctypes.c_uint32], c.Enum): pass
REDUCE_OP_ADD_U = reduce_op_t.define('REDUCE_OP_ADD_U', 0)
REDUCE_OP_ADD_F = reduce_op_t.define('REDUCE_OP_ADD_F', 1)
REDUCE_OP_MUL_U = reduce_op_t.define('REDUCE_OP_MUL_U', 2)
REDUCE_OP_MUL_F = reduce_op_t.define('REDUCE_OP_MUL_F', 3)
REDUCE_OP_MIN_U = reduce_op_t.define('REDUCE_OP_MIN_U', 4)
REDUCE_OP_MIN_S = reduce_op_t.define('REDUCE_OP_MIN_S', 5)
REDUCE_OP_MIN_F = reduce_op_t.define('REDUCE_OP_MIN_F', 6)
REDUCE_OP_MAX_U = reduce_op_t.define('REDUCE_OP_MAX_U', 7)
REDUCE_OP_MAX_S = reduce_op_t.define('REDUCE_OP_MAX_S', 8)
REDUCE_OP_MAX_F = reduce_op_t.define('REDUCE_OP_MAX_F', 9)
REDUCE_OP_AND_B = reduce_op_t.define('REDUCE_OP_AND_B', 10)
REDUCE_OP_OR_B = reduce_op_t.define('REDUCE_OP_OR_B', 11)
REDUCE_OP_XOR_B = reduce_op_t.define('REDUCE_OP_XOR_B', 12)
@c.record
class struct_ir3_instruction_cat2(c.Struct):
SIZE = 4
condition: Annotated[struct_ir3_instruction_cat2_condition, 0]
class struct_ir3_instruction_cat2_condition(Annotated[int, ctypes.c_uint32], c.Enum): pass
IR3_COND_LT = struct_ir3_instruction_cat2_condition.define('IR3_COND_LT', 0)
IR3_COND_LE = struct_ir3_instruction_cat2_condition.define('IR3_COND_LE', 1)
IR3_COND_GT = struct_ir3_instruction_cat2_condition.define('IR3_COND_GT', 2)
IR3_COND_GE = struct_ir3_instruction_cat2_condition.define('IR3_COND_GE', 3)
IR3_COND_EQ = struct_ir3_instruction_cat2_condition.define('IR3_COND_EQ', 4)
IR3_COND_NE = struct_ir3_instruction_cat2_condition.define('IR3_COND_NE', 5)
@c.record
class struct_ir3_instruction_cat3(c.Struct):
SIZE = 12
signedness: Annotated[struct_ir3_instruction_cat3_signedness, 0]
packed: Annotated[struct_ir3_instruction_cat3_packed, 4]
swapped: Annotated[Annotated[bool, ctypes.c_bool], 8]
class struct_ir3_instruction_cat3_signedness(Annotated[int, ctypes.c_uint32], c.Enum): pass
IR3_SRC_UNSIGNED = struct_ir3_instruction_cat3_signedness.define('IR3_SRC_UNSIGNED', 0)
IR3_SRC_MIXED = struct_ir3_instruction_cat3_signedness.define('IR3_SRC_MIXED', 1)
class struct_ir3_instruction_cat3_packed(Annotated[int, ctypes.c_uint32], c.Enum): pass
IR3_SRC_PACKED_LOW = struct_ir3_instruction_cat3_packed.define('IR3_SRC_PACKED_LOW', 0)
IR3_SRC_PACKED_HIGH = struct_ir3_instruction_cat3_packed.define('IR3_SRC_PACKED_HIGH', 1)
@c.record
class struct_ir3_instruction_cat5(c.Struct):
SIZE = 16
samp: Annotated[Annotated[int, ctypes.c_uint32], 0]
tex: Annotated[Annotated[int, ctypes.c_uint32], 4]
tex_base: Annotated[Annotated[int, ctypes.c_uint32], 8, 3, 0]
cluster_size: Annotated[Annotated[int, ctypes.c_uint32], 8, 4, 3]
type: Annotated[type_t, 12]
@c.record
class struct_ir3_instruction_cat6(c.Struct):
SIZE = 16
type: Annotated[type_t, 0]
dst_offset: Annotated[Annotated[int, ctypes.c_int32], 4]
iim_val: Annotated[Annotated[int, ctypes.c_int32], 8]
d: Annotated[Annotated[int, ctypes.c_uint32], 12, 3, 0]
typed: Annotated[Annotated[bool, ctypes.c_bool], 12, 1, 3]
base: Annotated[Annotated[int, ctypes.c_uint32], 12, 3, 4]
shfl_mode: Annotated[ir3_shfl_mode, 12, 3, 7]
class ir3_shfl_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
SHFL_XOR = ir3_shfl_mode.define('SHFL_XOR', 1)
SHFL_UP = ir3_shfl_mode.define('SHFL_UP', 2)
SHFL_DOWN = ir3_shfl_mode.define('SHFL_DOWN', 3)
SHFL_RUP = ir3_shfl_mode.define('SHFL_RUP', 6)
SHFL_RDOWN = ir3_shfl_mode.define('SHFL_RDOWN', 7)
@c.record
class struct_ir3_instruction_cat7(c.Struct):
SIZE = 16
w: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 0]
r: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 1]
l: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 2]
g: Annotated[Annotated[int, ctypes.c_uint32], 0, 1, 3]
alias_scope: Annotated[ir3_alias_scope, 4]
alias_table_size_minus_one: Annotated[Annotated[int, ctypes.c_uint32], 8]
alias_type_float: Annotated[Annotated[bool, ctypes.c_bool], 12]
class ir3_alias_scope(Annotated[int, ctypes.c_uint32], c.Enum): pass
ALIAS_TEX = ir3_alias_scope.define('ALIAS_TEX', 0)
ALIAS_RT = ir3_alias_scope.define('ALIAS_RT', 1)
ALIAS_MEM = ir3_alias_scope.define('ALIAS_MEM', 2)
@c.record
class struct_ir3_instruction_split(c.Struct):
SIZE = 4
off: Annotated[Annotated[int, ctypes.c_int32], 0]
@c.record
class struct_ir3_instruction_end(c.Struct):
SIZE = 8
outidxs: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
@c.record
class struct_ir3_instruction_phi(c.Struct):
SIZE = 16
nphi: Annotated[ctypes.c_void_p, 0]
comp: Annotated[Annotated[int, ctypes.c_uint32], 8]
@c.record
class struct_ir3_instruction_prefetch(c.Struct):
SIZE = 16
samp: Annotated[Annotated[int, ctypes.c_uint32], 0]
tex: Annotated[Annotated[int, ctypes.c_uint32], 4]
input_offset: Annotated[Annotated[int, ctypes.c_uint32], 8]
samp_base: Annotated[Annotated[int, ctypes.c_uint32], 12, 3, 0]
tex_base: Annotated[Annotated[int, ctypes.c_uint32], 12, 3, 3]
@c.record
class struct_ir3_instruction_input(c.Struct):
SIZE = 8
inidx: Annotated[Annotated[int, ctypes.c_int32], 0]
sysval: Annotated[gl_system_value, 4]
@c.record
class struct_ir3_instruction_push_consts(c.Struct):
SIZE = 12
src_base: Annotated[Annotated[int, ctypes.c_uint32], 0]
src_size: Annotated[Annotated[int, ctypes.c_uint32], 4]
dst_base: Annotated[Annotated[int, ctypes.c_uint32], 8]
@c.record
class struct_ir3_instruction_raw(c.Struct):
SIZE = 8
value: Annotated[uint64_t, 0]
class struct_ir3_instruction_barrier_class(Annotated[int, ctypes.c_uint32], c.Enum): pass
IR3_BARRIER_EVERYTHING = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_EVERYTHING', 1)
IR3_BARRIER_SHARED_R = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_SHARED_R', 2)
IR3_BARRIER_SHARED_W = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_SHARED_W', 4)
IR3_BARRIER_IMAGE_R = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_IMAGE_R', 8)
IR3_BARRIER_IMAGE_W = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_IMAGE_W', 16)
IR3_BARRIER_BUFFER_R = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_BUFFER_R', 32)
IR3_BARRIER_BUFFER_W = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_BUFFER_W', 64)
IR3_BARRIER_ARRAY_R = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_ARRAY_R', 128)
IR3_BARRIER_ARRAY_W = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_ARRAY_W', 256)
IR3_BARRIER_PRIVATE_R = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_PRIVATE_R', 512)
IR3_BARRIER_PRIVATE_W = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_PRIVATE_W', 1024)
IR3_BARRIER_CONST_W = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_CONST_W', 2048)
IR3_BARRIER_ACTIVE_FIBERS_R = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_ACTIVE_FIBERS_R', 4096)
IR3_BARRIER_ACTIVE_FIBERS_W = struct_ir3_instruction_barrier_class.define('IR3_BARRIER_ACTIVE_FIBERS_W', 8192)
@c.record
class struct_ir3_info(c.Struct):
SIZE = 64
size: Annotated[uint32_t, 0]
constant_data_offset: Annotated[uint32_t, 4]
sizedwords: Annotated[uint16_t, 8]
instrs_count: Annotated[uint16_t, 10]
preamble_instrs_count: Annotated[uint16_t, 12]
nops_count: Annotated[uint16_t, 14]
mov_count: Annotated[uint16_t, 16]
cov_count: Annotated[uint16_t, 18]
stp_count: Annotated[uint16_t, 20]
ldp_count: Annotated[uint16_t, 22]
max_reg: Annotated[int8_t, 24]
max_half_reg: Annotated[int8_t, 25]
max_const: Annotated[int16_t, 26]
max_waves: Annotated[int8_t, 28]
subgroup_size: Annotated[uint8_t, 29]
double_threadsize: Annotated[Annotated[bool, ctypes.c_bool], 30]
multi_dword_ldp_stp: Annotated[Annotated[bool, ctypes.c_bool], 31]
early_preamble: Annotated[Annotated[bool, ctypes.c_bool], 32]
uses_ray_intersection: Annotated[Annotated[bool, ctypes.c_bool], 33]
ss: Annotated[uint16_t, 34]
sy: Annotated[uint16_t, 36]
sstall: Annotated[uint16_t, 38]
systall: Annotated[uint16_t, 40]
last_baryf: Annotated[uint16_t, 42]
last_helper: Annotated[uint16_t, 44]
instrs_per_cat: Annotated[c.Array[uint16_t, Literal[8]], 46]
@c.record
class struct_ir3_shader_variant_input(c.Struct):
SIZE = 5
slot: Annotated[uint8_t, 0]
regid: Annotated[uint8_t, 1]
compmask: Annotated[uint8_t, 2]
inloc: Annotated[uint8_t, 3]
sysval: Annotated[Annotated[bool, ctypes.c_bool], 4, 1, 0]
bary: Annotated[Annotated[bool, ctypes.c_bool], 4, 1, 1]
rasterflat: Annotated[Annotated[bool, ctypes.c_bool], 4, 1, 2]
half: Annotated[Annotated[bool, ctypes.c_bool], 4, 1, 3]
flat: Annotated[Annotated[bool, ctypes.c_bool], 4, 1, 4]
@c.record
class struct_ir3_shader_variant_astc_srgb(c.Struct):
SIZE = 72
base: Annotated[Annotated[int, ctypes.c_uint32], 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 4]
orig_idx: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 8]
@c.record
class struct_ir3_shader_variant_tg4(c.Struct):
SIZE = 72
base: Annotated[Annotated[int, ctypes.c_uint32], 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 4]
orig_idx: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 8]
@c.record
class struct_ir3_shader_variant_tess(c.Struct):
SIZE = 8
primitive_mode: Annotated[enum_tess_primitive_mode, 0]
tcs_vertices_out: Annotated[uint8_t, 4]
spacing: Annotated[enum_gl_tess_spacing, 5, 2, 0]
ccw: Annotated[Annotated[bool, ctypes.c_bool], 5, 1, 2]
point_mode: Annotated[Annotated[bool, ctypes.c_bool], 5, 1, 3]
class enum_gl_tess_spacing(Annotated[int, ctypes.c_uint32], c.Enum): pass
TESS_SPACING_UNSPECIFIED = enum_gl_tess_spacing.define('TESS_SPACING_UNSPECIFIED', 0)
TESS_SPACING_EQUAL = enum_gl_tess_spacing.define('TESS_SPACING_EQUAL', 1)
TESS_SPACING_FRACTIONAL_ODD = enum_gl_tess_spacing.define('TESS_SPACING_FRACTIONAL_ODD', 2)
TESS_SPACING_FRACTIONAL_EVEN = enum_gl_tess_spacing.define('TESS_SPACING_FRACTIONAL_EVEN', 3)
@c.record
class struct_ir3_shader_variant_gs(c.Struct):
SIZE = 6
output_primitive: Annotated[uint16_t, 0]
vertices_out: Annotated[uint16_t, 2]
invocations: Annotated[uint8_t, 4]
vertices_in: Annotated[uint8_t, 5, 3, 0]
@c.record
class struct_ir3_shader_variant_fs(c.Struct):
SIZE = 8
early_fragment_tests: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 0]
color_is_dual_source: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 1]
uses_fbfetch_output: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 2]
fbfetch_coherent: Annotated[Annotated[bool, ctypes.c_bool], 0, 1, 3]
depth_layout: Annotated[enum_gl_frag_depth_layout, 4]
@c.record
class struct_ir3_shader_variant_cs(c.Struct):
SIZE = 16
req_local_mem: Annotated[Annotated[int, ctypes.c_uint32], 0]
force_linear_dispatch: Annotated[Annotated[bool, ctypes.c_bool], 4]
local_invocation_id: Annotated[uint32_t, 8]
work_group_id: Annotated[uint32_t, 12]
@c.record
class struct_ir3_shader_cs(c.Struct):
SIZE = 8
req_local_mem: Annotated[Annotated[int, ctypes.c_uint32], 0]
force_linear_dispatch: Annotated[Annotated[bool, ctypes.c_bool], 4]
@c.record
class struct_ir3_shader_vs(c.Struct):
SIZE = 264
passthrough_tcs_compiled: Annotated[Annotated[int, ctypes.c_uint32], 0]
passthrough_tcs: Annotated[c.Array[c.POINTER[struct_ir3_shader], Literal[32]], 8]
@c.record
class pthread_mutex_t(c.Struct):
SIZE = 40
__data: Annotated[struct___pthread_mutex_s, 0]
__size: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[40]], 0]
__align: Annotated[Annotated[int, ctypes.c_int64], 0]
mtx_t: TypeAlias = pthread_mutex_t
@c.record
class struct___pthread_mutex_s(c.Struct):
SIZE = 40
__lock: Annotated[Annotated[int, ctypes.c_int32], 0]
__count: Annotated[Annotated[int, ctypes.c_uint32], 4]
__owner: Annotated[Annotated[int, ctypes.c_int32], 8]
__nusers: Annotated[Annotated[int, ctypes.c_uint32], 12]
__kind: Annotated[Annotated[int, ctypes.c_int32], 16]
__spins: Annotated[Annotated[int, ctypes.c_int16], 20]
__elision: Annotated[Annotated[int, ctypes.c_int16], 22]
__list: Annotated[struct___pthread_internal_list, 24]
@c.record
class struct___pthread_internal_list(c.Struct):
SIZE = 16
__prev: Annotated[c.POINTER[struct___pthread_internal_list], 0]
__next: Annotated[c.POINTER[struct___pthread_internal_list], 8]
__pthread_list_t: TypeAlias = struct___pthread_internal_list
cache_key: TypeAlias = c.Array[Annotated[int, ctypes.c_ubyte], Literal[20]]
@dll.bind
def ir3_const_ensure_imm_size(v:c.POINTER[struct_ir3_shader_variant], size:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_const_imm_index_to_reg(const_state:c.POINTER[struct_ir3_const_state], i:Annotated[int, ctypes.c_uint32]) -> uint16_t: ...
@dll.bind
def ir3_const_find_imm(v:c.POINTER[struct_ir3_shader_variant], imm:uint32_t) -> uint16_t: ...
@dll.bind
def ir3_const_add_imm(v:c.POINTER[struct_ir3_shader_variant], imm:uint32_t) -> uint16_t: ...
@dll.bind
def ir3_shader_assemble(v:c.POINTER[struct_ir3_shader_variant]) -> ctypes.c_void_p: ...
@dll.bind
def ir3_shader_create_variant(shader:c.POINTER[struct_ir3_shader], key:c.POINTER[struct_ir3_shader_key], keep_ir:Annotated[bool, ctypes.c_bool]) -> c.POINTER[struct_ir3_shader_variant]: ...
@dll.bind
def ir3_shader_get_variant(shader:c.POINTER[struct_ir3_shader], key:c.POINTER[struct_ir3_shader_key], binning_pass:Annotated[bool, ctypes.c_bool], keep_ir:Annotated[bool, ctypes.c_bool], created:c.POINTER[Annotated[bool, ctypes.c_bool]]) -> c.POINTER[struct_ir3_shader_variant]: ...
@dll.bind
def ir3_shader_from_nir(compiler:c.POINTER[struct_ir3_compiler], nir:c.POINTER[nir_shader], options:c.POINTER[struct_ir3_shader_options], stream_output:c.POINTER[struct_ir3_stream_output_info]) -> c.POINTER[struct_ir3_shader]: ...
@dll.bind
def ir3_trim_constlen(variants:c.POINTER[c.POINTER[struct_ir3_shader_variant]], compiler:c.POINTER[struct_ir3_compiler]) -> uint32_t: ...
@dll.bind
def ir3_shader_passthrough_tcs(vs:c.POINTER[struct_ir3_shader], patch_vertices:Annotated[int, ctypes.c_uint32]) -> c.POINTER[struct_ir3_shader]: ...
@dll.bind
def ir3_shader_destroy(shader:c.POINTER[struct_ir3_shader]) -> None: ...
@dll.bind
def ir3_shader_disasm(so:c.POINTER[struct_ir3_shader_variant], bin:c.POINTER[uint32_t], out:c.POINTER[FILE]) -> None: ...
@dll.bind
def ir3_shader_outputs(so:c.POINTER[struct_ir3_shader]) -> uint64_t: ...
@dll.bind
def ir3_glsl_type_size(type:c.POINTER[struct_glsl_type], bindless:Annotated[bool, ctypes.c_bool]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ir3_shader_get_subgroup_size(compiler:c.POINTER[struct_ir3_compiler], options:c.POINTER[struct_ir3_shader_options], stage:gl_shader_stage, subgroup_size:c.POINTER[Annotated[int, ctypes.c_uint32]], max_subgroup_size:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@c.record
class struct_ir3_shader_linkage(c.Struct):
SIZE = 152
max_loc: Annotated[uint8_t, 0]
cnt: Annotated[uint8_t, 1]
varmask: Annotated[c.Array[uint32_t, Literal[4]], 4]
var: Annotated[c.Array[struct_ir3_shader_linkage_var, Literal[32]], 20]
primid_loc: Annotated[uint8_t, 148]
viewid_loc: Annotated[uint8_t, 149]
clip0_loc: Annotated[uint8_t, 150]
clip1_loc: Annotated[uint8_t, 151]
@c.record
class struct_ir3_shader_linkage_var(c.Struct):
SIZE = 4
slot: Annotated[uint8_t, 0]
regid: Annotated[uint8_t, 1]
compmask: Annotated[uint8_t, 2]
loc: Annotated[uint8_t, 3]
@dll.bind
def print_raw(out:c.POINTER[FILE], data:c.POINTER[Annotated[int, ctypes.c_uint32]], size:size_t) -> None: ...
@dll.bind
def ir3_link_stream_out(l:c.POINTER[struct_ir3_shader_linkage], v:c.POINTER[struct_ir3_shader_variant]) -> None: ...
@dll.bind
def ir3_nir_apply_trig_workarounds(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_imul(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_io_offsets(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_load_barycentric_at_sample(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_load_barycentric_at_offset(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_push_consts_to_preamble(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_driver_params_to_ubo(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_move_varying_inputs(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_coord_offset(ssa:c.POINTER[nir_def], bary_type:c.POINTER[gl_system_value]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ir3_nir_lower_tex_prefetch(shader:c.POINTER[nir_shader], prefetch_bary_type:c.POINTER[enum_ir3_bary]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_layer_id(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_frag_shading_rate(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_primitive_shading_rate(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_to_explicit_output(shader:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant], topology:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_to_explicit_input(shader:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_tess_ctrl(shader:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant], topology:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_tess_eval(shader:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant], topology:Annotated[int, ctypes.c_uint32]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_gs(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_supports_vectorized_nir_op(op:nir_op) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_vectorize_filter(instr:c.POINTER[nir_instr], data:ctypes.c_void_p) -> uint8_t: ...
@dll.bind
def ir3_nir_lower_64b_intrinsics(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_64b_undef(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_64b_global(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_64b_regs(shader:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_mem_access_size_align(intrin:nir_intrinsic_op, bytes:uint8_t, bit_size:uint8_t, align:uint32_t, align_offset:uint32_t, offset_is_const:Annotated[bool, ctypes.c_bool], access:enum_gl_access_qualifier, cb_data:ctypes.c_void_p) -> nir_mem_access_size_align: ...
@dll.bind
def ir3_nir_opt_branch_and_or_not(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_opt_triops_bitwise(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_optimize_loop(compiler:c.POINTER[struct_ir3_compiler], options:c.POINTER[struct_ir3_shader_nir_options], s:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_io_vars_to_temporaries(s:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def ir3_finalize_nir(compiler:c.POINTER[struct_ir3_compiler], options:c.POINTER[struct_ir3_shader_nir_options], s:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def ir3_nir_post_finalize(shader:c.POINTER[struct_ir3_shader]) -> None: ...
@dll.bind
def ir3_nir_lower_variant(so:c.POINTER[struct_ir3_shader_variant], options:c.POINTER[struct_ir3_shader_nir_options], s:c.POINTER[nir_shader]) -> None: ...
@dll.bind
def ir3_setup_const_state(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant], const_state:c.POINTER[struct_ir3_const_state]) -> None: ...
@dll.bind
def ir3_const_state_get_free_space(v:c.POINTER[struct_ir3_shader_variant], const_state:c.POINTER[struct_ir3_const_state], align_vec4:uint32_t) -> uint32_t: ...
@dll.bind
def ir3_const_alloc(const_alloc:c.POINTER[struct_ir3_const_allocations], type:enum_ir3_const_alloc_type, size_vec4:uint32_t, align_vec4:uint32_t) -> None: ...
@dll.bind
def ir3_const_reserve_space(const_alloc:c.POINTER[struct_ir3_const_allocations], type:enum_ir3_const_alloc_type, size_vec4:uint32_t, align_vec4:uint32_t) -> None: ...
@dll.bind
def ir3_const_free_reserved_space(const_alloc:c.POINTER[struct_ir3_const_allocations], type:enum_ir3_const_alloc_type) -> None: ...
@dll.bind
def ir3_const_alloc_all_reserved_space(const_alloc:c.POINTER[struct_ir3_const_allocations]) -> None: ...
@dll.bind
def ir3_nir_scan_driver_consts(compiler:c.POINTER[struct_ir3_compiler], shader:c.POINTER[nir_shader], image_dims:c.POINTER[struct_ir3_const_image_dims]) -> uint32_t: ...
@dll.bind
def ir3_alloc_driver_params(const_alloc:c.POINTER[struct_ir3_const_allocations], num_driver_params:c.POINTER[uint32_t], compiler:c.POINTER[struct_ir3_compiler], shader_type:enum_pipe_shader_type) -> None: ...
@dll.bind
def ir3_nir_lower_load_constant(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_analyze_ubo_ranges(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> None: ...
@dll.bind
def ir3_nir_lower_ubo_loads(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_const_global_loads(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_fixup_load_const_ir3(nir:c.POINTER[nir_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_opt_preamble(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_opt_prefetch_descriptors(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_preamble(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_try_propagate_bit_shift(b:c.POINTER[nir_builder], offset:c.POINTER[nir_def], shift:int32_t) -> c.POINTER[nir_def]: ...
@dll.bind
def ir3_nir_lower_subgroups_filter(instr:c.POINTER[nir_instr], data:ctypes.c_void_p) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_lower_shuffle(nir:c.POINTER[nir_shader], shader:c.POINTER[struct_ir3_shader]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_opt_subgroups(nir:c.POINTER[nir_shader], v:c.POINTER[struct_ir3_shader_variant]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_get_shared_driver_ubo(b:c.POINTER[nir_builder], ubo:c.POINTER[struct_ir3_driver_ubo]) -> c.POINTER[nir_def]: ...
@dll.bind
def ir3_get_driver_ubo(b:c.POINTER[nir_builder], ubo:c.POINTER[struct_ir3_driver_ubo]) -> c.POINTER[nir_def]: ...
@dll.bind
def ir3_get_driver_consts_ubo(b:c.POINTER[nir_builder], v:c.POINTER[struct_ir3_shader_variant]) -> c.POINTER[nir_def]: ...
@dll.bind
def ir3_update_driver_ubo(nir:c.POINTER[nir_shader], ubo:c.POINTER[struct_ir3_driver_ubo], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> None: ...
@dll.bind
def ir3_load_shared_driver_ubo(b:c.POINTER[nir_builder], components:Annotated[int, ctypes.c_uint32], ubo:c.POINTER[struct_ir3_driver_ubo], offset:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_def]: ...
@dll.bind
def ir3_load_driver_ubo(b:c.POINTER[nir_builder], components:Annotated[int, ctypes.c_uint32], ubo:c.POINTER[struct_ir3_driver_ubo], offset:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_def]: ...
@dll.bind
def ir3_load_driver_ubo_indirect(b:c.POINTER[nir_builder], components:Annotated[int, ctypes.c_uint32], ubo:c.POINTER[struct_ir3_driver_ubo], base:Annotated[int, ctypes.c_uint32], offset:c.POINTER[nir_def], range:Annotated[int, ctypes.c_uint32]) -> c.POINTER[nir_def]: ...
@dll.bind
def ir3_def_is_rematerializable_for_preamble(_def:c.POINTER[nir_def], preamble_defs:c.POINTER[c.POINTER[nir_def]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_rematerialize_def_for_preamble(b:c.POINTER[nir_builder], _def:c.POINTER[nir_def], instr_set:c.POINTER[struct_set], preamble_defs:c.POINTER[c.POINTER[nir_def]]) -> c.POINTER[nir_def]: ...
@c.record
class struct_driver_param_info(c.Struct):
SIZE = 8
offset: Annotated[uint32_t, 0]
extra_size: Annotated[uint32_t, 4]
@dll.bind
def ir3_get_driver_param_info(shader:c.POINTER[nir_shader], intr:c.POINTER[nir_intrinsic_instr], param_info:c.POINTER[struct_driver_param_info]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ir3_nir_max_imm_offset(intrin:c.POINTER[nir_intrinsic_instr], data:ctypes.c_void_p) -> uint32_t: ...
@dll.bind
def ir3_nir_intrinsic_barycentric_sysval(intr:c.POINTER[nir_intrinsic_instr]) -> gl_system_value: ...
@dll.bind
def glsl_type_singleton_init_or_ref() -> None: ...
@dll.bind
def glsl_type_singleton_decref() -> None: ...
@dll.bind
def encode_type_to_blob(blob:c.POINTER[struct_blob], type:c.POINTER[glsl_type]) -> None: ...
@dll.bind
def decode_type_from_blob(blob:c.POINTER[struct_blob_reader]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_apply_signedness_to_base_type(type:enum_glsl_base_type, signedness:Annotated[bool, ctypes.c_bool]) -> enum_glsl_base_type: ...
@dll.bind
def glsl_get_sampler_dim_coordinate_components(dim:enum_glsl_sampler_dim) -> Annotated[int, ctypes.c_int32]: ...
class enum_glsl_matrix_layout(Annotated[int, ctypes.c_uint32], c.Enum): pass
GLSL_MATRIX_LAYOUT_INHERITED = enum_glsl_matrix_layout.define('GLSL_MATRIX_LAYOUT_INHERITED', 0)
GLSL_MATRIX_LAYOUT_COLUMN_MAJOR = enum_glsl_matrix_layout.define('GLSL_MATRIX_LAYOUT_COLUMN_MAJOR', 1)
GLSL_MATRIX_LAYOUT_ROW_MAJOR = enum_glsl_matrix_layout.define('GLSL_MATRIX_LAYOUT_ROW_MAJOR', 2)
class _anonenum6(Annotated[int, ctypes.c_uint32], c.Enum): pass
GLSL_PRECISION_NONE = _anonenum6.define('GLSL_PRECISION_NONE', 0)
GLSL_PRECISION_HIGH = _anonenum6.define('GLSL_PRECISION_HIGH', 1)
GLSL_PRECISION_MEDIUM = _anonenum6.define('GLSL_PRECISION_MEDIUM', 2)
GLSL_PRECISION_LOW = _anonenum6.define('GLSL_PRECISION_LOW', 3)
class enum_glsl_cmat_use(Annotated[int, ctypes.c_uint32], c.Enum): pass
GLSL_CMAT_USE_NONE = enum_glsl_cmat_use.define('GLSL_CMAT_USE_NONE', 0)
GLSL_CMAT_USE_A = enum_glsl_cmat_use.define('GLSL_CMAT_USE_A', 1)
GLSL_CMAT_USE_B = enum_glsl_cmat_use.define('GLSL_CMAT_USE_B', 2)
GLSL_CMAT_USE_ACCUMULATOR = enum_glsl_cmat_use.define('GLSL_CMAT_USE_ACCUMULATOR', 3)
@dll.bind
def glsl_get_type_name(type:c.POINTER[glsl_type]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def glsl_type_is_vector(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_type_is_scalar(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_type_is_vector_or_scalar(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_type_is_matrix(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_type_is_array_or_matrix(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_type_is_dual_slot(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_type_is_leaf(type:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_get_bare_type(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_scalar_type(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_base_glsl_type(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_length(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_type_wrap_in_arrays(t:c.POINTER[glsl_type], arrays:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_aoa_size(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_array_element(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_without_array(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_without_array_or_matrix(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_cmat_element(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_cmat_description(t:c.POINTER[glsl_type]) -> c.POINTER[struct_glsl_cmat_description]: ...
@dll.bind
def glsl_atomic_size(type:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_type_contains_32bit(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_type_contains_64bit(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_type_contains_image(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_contains_atomic(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_contains_double(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_contains_integer(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_contains_opaque(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_contains_sampler(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_contains_array(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_contains_subroutine(t:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_get_sampler_coordinate_components(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def glsl_type_compare_no_precision(a:c.POINTER[glsl_type], b:c.POINTER[glsl_type]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_record_compare(a:c.POINTER[glsl_type], b:c.POINTER[glsl_type], match_name:Annotated[bool, ctypes.c_bool], match_locations:Annotated[bool, ctypes.c_bool], match_precision:Annotated[bool, ctypes.c_bool]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def glsl_get_struct_field(t:c.POINTER[glsl_type], index:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_struct_field_data(t:c.POINTER[glsl_type], index:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_struct_field]: ...
@dll.bind
def glsl_get_struct_location_offset(t:c.POINTER[glsl_type], length:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_field_index(t:c.POINTER[glsl_type], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def glsl_get_field_type(t:c.POINTER[glsl_type], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_vec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_f16vec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_bf16vec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_e4m3fnvec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_e5m2vec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_dvec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_ivec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_uvec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_bvec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_i64vec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_u64vec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_i16vec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_u16vec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_i8vec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_u8vec_type(components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_simple_explicit_type(base_type:Annotated[int, ctypes.c_uint32], rows:Annotated[int, ctypes.c_uint32], columns:Annotated[int, ctypes.c_uint32], explicit_stride:Annotated[int, ctypes.c_uint32], row_major:Annotated[bool, ctypes.c_bool], explicit_alignment:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_sampler_type(dim:enum_glsl_sampler_dim, shadow:Annotated[bool, ctypes.c_bool], array:Annotated[bool, ctypes.c_bool], type:enum_glsl_base_type) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_bare_sampler_type() -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_bare_shadow_sampler_type() -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_texture_type(dim:enum_glsl_sampler_dim, array:Annotated[bool, ctypes.c_bool], type:enum_glsl_base_type) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_image_type(dim:enum_glsl_sampler_dim, array:Annotated[bool, ctypes.c_bool], type:enum_glsl_base_type) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_array_type(element:c.POINTER[glsl_type], array_size:Annotated[int, ctypes.c_uint32], explicit_stride:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_cmat_type(desc:c.POINTER[struct_glsl_cmat_description]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_struct_type_with_explicit_alignment(fields:c.POINTER[glsl_struct_field], num_fields:Annotated[int, ctypes.c_uint32], name:c.POINTER[Annotated[bytes, ctypes.c_char]], packed:Annotated[bool, ctypes.c_bool], explicit_alignment:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
class enum_glsl_interface_packing(Annotated[int, ctypes.c_uint32], c.Enum): pass
GLSL_INTERFACE_PACKING_STD140 = enum_glsl_interface_packing.define('GLSL_INTERFACE_PACKING_STD140', 0)
GLSL_INTERFACE_PACKING_SHARED = enum_glsl_interface_packing.define('GLSL_INTERFACE_PACKING_SHARED', 1)
GLSL_INTERFACE_PACKING_PACKED = enum_glsl_interface_packing.define('GLSL_INTERFACE_PACKING_PACKED', 2)
GLSL_INTERFACE_PACKING_STD430 = enum_glsl_interface_packing.define('GLSL_INTERFACE_PACKING_STD430', 3)
@dll.bind
def glsl_interface_type(fields:c.POINTER[glsl_struct_field], num_fields:Annotated[int, ctypes.c_uint32], packing:enum_glsl_interface_packing, row_major:Annotated[bool, ctypes.c_bool], block_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_subroutine_type(subroutine_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_row_type(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_column_type(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_explicit_type_for_size_align(type:c.POINTER[glsl_type], type_info:glsl_type_size_align_func, size:c.POINTER[Annotated[int, ctypes.c_uint32]], alignment:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_type_replace_vec3_with_vec4(type:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_float16_type(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_int16_type(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_uint16_type(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_type_to_16bit(old_type:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_replace_vector_type(t:c.POINTER[glsl_type], components:Annotated[int, ctypes.c_uint32]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_channel_type(t:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_mul_type(type_a:c.POINTER[glsl_type], type_b:c.POINTER[glsl_type]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_type_get_sampler_count(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_type_get_texture_count(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_type_get_image_count(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_count_vec4_slots(t:c.POINTER[glsl_type], is_gl_vertex_input:Annotated[bool, ctypes.c_bool], is_bindless:Annotated[bool, ctypes.c_bool]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_count_dword_slots(t:c.POINTER[glsl_type], is_bindless:Annotated[bool, ctypes.c_bool]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_component_slots(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_component_slots_aligned(t:c.POINTER[glsl_type], offset:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_varying_count(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_type_uniform_locations(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_cl_size(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_cl_alignment(t:c.POINTER[glsl_type]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_cl_type_size_align(t:c.POINTER[glsl_type], size:c.POINTER[Annotated[int, ctypes.c_uint32]], align:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def glsl_get_internal_ifc_packing(t:c.POINTER[glsl_type], std430_supported:Annotated[bool, ctypes.c_bool]) -> enum_glsl_interface_packing: ...
@dll.bind
def glsl_get_std140_base_alignment(t:c.POINTER[glsl_type], row_major:Annotated[bool, ctypes.c_bool]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_std140_size(t:c.POINTER[glsl_type], row_major:Annotated[bool, ctypes.c_bool]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_std430_array_stride(t:c.POINTER[glsl_type], row_major:Annotated[bool, ctypes.c_bool]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_std430_base_alignment(t:c.POINTER[glsl_type], row_major:Annotated[bool, ctypes.c_bool]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_std430_size(t:c.POINTER[glsl_type], row_major:Annotated[bool, ctypes.c_bool]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_explicit_size(t:c.POINTER[glsl_type], align_to_stride:Annotated[bool, ctypes.c_bool]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def glsl_get_explicit_std140_type(t:c.POINTER[glsl_type], row_major:Annotated[bool, ctypes.c_bool]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_get_explicit_std430_type(t:c.POINTER[glsl_type], row_major:Annotated[bool, ctypes.c_bool]) -> c.POINTER[glsl_type]: ...
@dll.bind
def glsl_size_align_handle_array_and_structs(type:c.POINTER[glsl_type], size_align:glsl_type_size_align_func, size:c.POINTER[Annotated[int, ctypes.c_uint32]], align:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def glsl_get_natural_size_align_bytes(t:c.POINTER[glsl_type], size:c.POINTER[Annotated[int, ctypes.c_uint32]], align:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def glsl_get_word_size_align_bytes(type:c.POINTER[glsl_type], size:c.POINTER[Annotated[int, ctypes.c_uint32]], align:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def glsl_get_vec4_size_align_bytes(type:c.POINTER[glsl_type], size:c.POINTER[Annotated[int, ctypes.c_uint32]], align:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def blob_init(blob:c.POINTER[struct_blob]) -> None: ...
@dll.bind
def blob_init_fixed(blob:c.POINTER[struct_blob], data:ctypes.c_void_p, size:size_t) -> None: ...
@dll.bind
def blob_finish_get_buffer(blob:c.POINTER[struct_blob], buffer:c.POINTER[ctypes.c_void_p], size:c.POINTER[size_t]) -> None: ...
@dll.bind
def blob_align(blob:c.POINTER[struct_blob], alignment:size_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_write_bytes(blob:c.POINTER[struct_blob], bytes:ctypes.c_void_p, to_write:size_t) -> Annotated[bool, ctypes.c_bool]: ...
intptr_t: TypeAlias = Annotated[int, ctypes.c_int64]
@dll.bind
def blob_reserve_bytes(blob:c.POINTER[struct_blob], to_write:size_t) -> intptr_t: ...
@dll.bind
def blob_reserve_uint32(blob:c.POINTER[struct_blob]) -> intptr_t: ...
@dll.bind
def blob_reserve_intptr(blob:c.POINTER[struct_blob]) -> intptr_t: ...
@dll.bind
def blob_overwrite_bytes(blob:c.POINTER[struct_blob], offset:size_t, bytes:ctypes.c_void_p, to_write:size_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_write_uint8(blob:c.POINTER[struct_blob], value:uint8_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_overwrite_uint8(blob:c.POINTER[struct_blob], offset:size_t, value:uint8_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_write_uint16(blob:c.POINTER[struct_blob], value:uint16_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_write_uint32(blob:c.POINTER[struct_blob], value:uint32_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_overwrite_uint32(blob:c.POINTER[struct_blob], offset:size_t, value:uint32_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_write_uint64(blob:c.POINTER[struct_blob], value:uint64_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_write_intptr(blob:c.POINTER[struct_blob], value:intptr_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_overwrite_intptr(blob:c.POINTER[struct_blob], offset:size_t, value:intptr_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_write_string(blob:c.POINTER[struct_blob], str:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def blob_reader_init(blob:c.POINTER[struct_blob_reader], data:ctypes.c_void_p, size:size_t) -> None: ...
@dll.bind
def blob_reader_align(blob:c.POINTER[struct_blob_reader], alignment:size_t) -> None: ...
@dll.bind
def blob_read_bytes(blob:c.POINTER[struct_blob_reader], size:size_t) -> ctypes.c_void_p: ...
@dll.bind
def blob_copy_bytes(blob:c.POINTER[struct_blob_reader], dest:ctypes.c_void_p, size:size_t) -> None: ...
@dll.bind
def blob_skip_bytes(blob:c.POINTER[struct_blob_reader], size:size_t) -> None: ...
@dll.bind
def blob_read_uint8(blob:c.POINTER[struct_blob_reader]) -> uint8_t: ...
@dll.bind
def blob_read_uint16(blob:c.POINTER[struct_blob_reader]) -> uint16_t: ...
@dll.bind
def blob_read_uint32(blob:c.POINTER[struct_blob_reader]) -> uint32_t: ...
@dll.bind
def blob_read_uint64(blob:c.POINTER[struct_blob_reader]) -> uint64_t: ...
@dll.bind
def blob_read_intptr(blob:c.POINTER[struct_blob_reader]) -> intptr_t: ...
@dll.bind
def blob_read_string(blob:c.POINTER[struct_blob_reader]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def ralloc_context(ctx:ctypes.c_void_p) -> ctypes.c_void_p: ...
@dll.bind
def ralloc_size(ctx:ctypes.c_void_p, size:size_t) -> ctypes.c_void_p: ...
@dll.bind
def rzalloc_size(ctx:ctypes.c_void_p, size:size_t) -> ctypes.c_void_p: ...
@dll.bind
def reralloc_size(ctx:ctypes.c_void_p, ptr:ctypes.c_void_p, size:size_t) -> ctypes.c_void_p: ...
@dll.bind
def rerzalloc_size(ctx:ctypes.c_void_p, ptr:ctypes.c_void_p, old_size:size_t, new_size:size_t) -> ctypes.c_void_p: ...
@dll.bind
def ralloc_array_size(ctx:ctypes.c_void_p, size:size_t, count:Annotated[int, ctypes.c_uint32]) -> ctypes.c_void_p: ...
@dll.bind
def rzalloc_array_size(ctx:ctypes.c_void_p, size:size_t, count:Annotated[int, ctypes.c_uint32]) -> ctypes.c_void_p: ...
@dll.bind
def reralloc_array_size(ctx:ctypes.c_void_p, ptr:ctypes.c_void_p, size:size_t, count:Annotated[int, ctypes.c_uint32]) -> ctypes.c_void_p: ...
@dll.bind
def rerzalloc_array_size(ctx:ctypes.c_void_p, ptr:ctypes.c_void_p, size:size_t, old_count:Annotated[int, ctypes.c_uint32], new_count:Annotated[int, ctypes.c_uint32]) -> ctypes.c_void_p: ...
@dll.bind
def ralloc_free(ptr:ctypes.c_void_p) -> None: ...
@dll.bind
def ralloc_steal(new_ctx:ctypes.c_void_p, ptr:ctypes.c_void_p) -> None: ...
@dll.bind
def ralloc_adopt(new_ctx:ctypes.c_void_p, old_ctx:ctypes.c_void_p) -> None: ...
@dll.bind
def ralloc_parent(ptr:ctypes.c_void_p) -> ctypes.c_void_p: ...
@dll.bind
def ralloc_set_destructor(ptr:ctypes.c_void_p, destructor:c.CFUNCTYPE[None, [ctypes.c_void_p]]) -> None: ...
@dll.bind
def ralloc_memdup(ctx:ctypes.c_void_p, mem:ctypes.c_void_p, n:size_t) -> ctypes.c_void_p: ...
@dll.bind
def ralloc_strdup(ctx:ctypes.c_void_p, str:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def ralloc_strndup(ctx:ctypes.c_void_p, str:c.POINTER[Annotated[bytes, ctypes.c_char]], n:size_t) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def ralloc_strcat(dest:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], str:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ralloc_strncat(dest:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], str:c.POINTER[Annotated[bytes, ctypes.c_char]], n:size_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ralloc_str_append(dest:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], str:c.POINTER[Annotated[bytes, ctypes.c_char]], existing_length:size_t, str_size:size_t) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ralloc_asprintf(ctx:ctypes.c_void_p, fmt:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@c.record
class struct___va_list_tag(c.Struct):
SIZE = 24
gp_offset: Annotated[Annotated[int, ctypes.c_uint32], 0]
fp_offset: Annotated[Annotated[int, ctypes.c_uint32], 4]
overflow_arg_area: Annotated[ctypes.c_void_p, 8]
reg_save_area: Annotated[ctypes.c_void_p, 16]
va_list: TypeAlias = c.Array[struct___va_list_tag, Literal[1]]
@dll.bind
def ralloc_vasprintf(ctx:ctypes.c_void_p, fmt:c.POINTER[Annotated[bytes, ctypes.c_char]], args:va_list) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def ralloc_asprintf_rewrite_tail(str:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], start:c.POINTER[size_t], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ralloc_vasprintf_rewrite_tail(str:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], start:c.POINTER[size_t], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]], args:va_list) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ralloc_asprintf_append(str:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ralloc_vasprintf_append(str:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]], args:va_list) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def ralloc_total_size(ptr:ctypes.c_void_p) -> size_t: ...
@dll.bind
def gc_context(parent:ctypes.c_void_p) -> c.POINTER[gc_ctx]: ...
@dll.bind
def gc_alloc_size(ctx:c.POINTER[gc_ctx], size:size_t, alignment:size_t) -> ctypes.c_void_p: ...
@dll.bind
def gc_zalloc_size(ctx:c.POINTER[gc_ctx], size:size_t, alignment:size_t) -> ctypes.c_void_p: ...
@dll.bind
def gc_free(ptr:ctypes.c_void_p) -> None: ...
@dll.bind
def gc_get_context(ptr:ctypes.c_void_p) -> c.POINTER[gc_ctx]: ...
@dll.bind
def gc_sweep_start(ctx:c.POINTER[gc_ctx]) -> None: ...
@dll.bind
def gc_mark_live(ctx:c.POINTER[gc_ctx], mem:ctypes.c_void_p) -> None: ...
@dll.bind
def gc_sweep_end(ctx:c.POINTER[gc_ctx]) -> None: ...
class struct_linear_ctx(ctypes.Structure): pass
linear_ctx: TypeAlias = struct_linear_ctx
@dll.bind
def linear_alloc_child(ctx:c.POINTER[linear_ctx], size:Annotated[int, ctypes.c_uint32]) -> ctypes.c_void_p: ...
@c.record
class linear_opts(c.Struct):
SIZE = 4
min_buffer_size: Annotated[Annotated[int, ctypes.c_uint32], 0]
@dll.bind
def linear_context(ralloc_ctx:ctypes.c_void_p) -> c.POINTER[linear_ctx]: ...
@dll.bind
def linear_context_with_opts(ralloc_ctx:ctypes.c_void_p, opts:c.POINTER[linear_opts]) -> c.POINTER[linear_ctx]: ...
@dll.bind
def linear_zalloc_child(ctx:c.POINTER[linear_ctx], size:Annotated[int, ctypes.c_uint32]) -> ctypes.c_void_p: ...
@dll.bind
def linear_free_context(ctx:c.POINTER[linear_ctx]) -> None: ...
@dll.bind
def ralloc_steal_linear_context(new_ralloc_ctx:ctypes.c_void_p, ctx:c.POINTER[linear_ctx]) -> None: ...
@dll.bind
def ralloc_parent_of_linear_context(ctx:c.POINTER[linear_ctx]) -> ctypes.c_void_p: ...
@dll.bind
def linear_alloc_child_array(ctx:c.POINTER[linear_ctx], size:size_t, count:Annotated[int, ctypes.c_uint32]) -> ctypes.c_void_p: ...
@dll.bind
def linear_zalloc_child_array(ctx:c.POINTER[linear_ctx], size:size_t, count:Annotated[int, ctypes.c_uint32]) -> ctypes.c_void_p: ...
@dll.bind
def linear_strdup(ctx:c.POINTER[linear_ctx], str:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def linear_asprintf(ctx:c.POINTER[linear_ctx], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def linear_vasprintf(ctx:c.POINTER[linear_ctx], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]], args:va_list) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def linear_asprintf_append(ctx:c.POINTER[linear_ctx], str:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def linear_vasprintf_append(ctx:c.POINTER[linear_ctx], str:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]], args:va_list) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def linear_asprintf_rewrite_tail(ctx:c.POINTER[linear_ctx], str:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], start:c.POINTER[size_t], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def linear_vasprintf_rewrite_tail(ctx:c.POINTER[linear_ctx], str:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], start:c.POINTER[size_t], fmt:c.POINTER[Annotated[bytes, ctypes.c_char]], args:va_list) -> Annotated[bool, ctypes.c_bool]: ...
@dll.bind
def linear_strcat(ctx:c.POINTER[linear_ctx], dest:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], str:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> Annotated[bool, ctypes.c_bool]: ...
class _anonenum7(Annotated[int, ctypes.c_uint32], c.Enum): pass
RALLOC_PRINT_INFO_SUMMARY_ONLY = _anonenum7.define('RALLOC_PRINT_INFO_SUMMARY_ONLY', 1)
@dll.bind
def ralloc_print_info(f:c.POINTER[FILE], p:ctypes.c_void_p, flags:Annotated[int, ctypes.c_uint32]) -> None: ...
@c.record
class struct_isa_decode_options(c.Struct):
SIZE = 80
gpu_id: Annotated[uint32_t, 0]
show_errors: Annotated[Annotated[bool, ctypes.c_bool], 4]
max_errors: Annotated[Annotated[int, ctypes.c_uint32], 8]
branch_labels: Annotated[Annotated[bool, ctypes.c_bool], 12]
stop: Annotated[Annotated[bool, ctypes.c_bool], 13]
cbdata: Annotated[ctypes.c_void_p, 16]
field_cb: Annotated[c.CFUNCTYPE[None, [ctypes.c_void_p, c.POINTER[Annotated[bytes, ctypes.c_char]], c.POINTER[struct_isa_decode_value]]], 24]
field_print_cb: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_isa_print_state], c.POINTER[Annotated[bytes, ctypes.c_char]], uint64_t]], 32]
pre_instr_cb: Annotated[c.CFUNCTYPE[None, [ctypes.c_void_p, Annotated[int, ctypes.c_uint32], ctypes.c_void_p]], 40]
post_instr_cb: Annotated[c.CFUNCTYPE[None, [ctypes.c_void_p, Annotated[int, ctypes.c_uint32], ctypes.c_void_p]], 48]
no_match_cb: Annotated[c.CFUNCTYPE[None, [c.POINTER[FILE], c.POINTER[Annotated[int, ctypes.c_uint32]], size_t]], 56]
entrypoint_count: Annotated[Annotated[int, ctypes.c_uint32], 64]
entrypoints: Annotated[c.POINTER[struct_isa_entrypoint], 72]
@c.record
class struct_isa_decode_value(c.Struct):
SIZE = 16
str: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
num: Annotated[uint64_t, 8]
@c.record
class struct_isa_print_state(c.Struct):
SIZE = 16
out: Annotated[c.POINTER[FILE], 0]
line_column: Annotated[Annotated[int, ctypes.c_uint32], 8]
@c.record
class struct_isa_entrypoint(c.Struct):
SIZE = 16
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
offset: Annotated[uint32_t, 8]
@dll.bind
def ir3_isa_disasm(bin:ctypes.c_void_p, sz:Annotated[int, ctypes.c_int32], out:c.POINTER[FILE], options:c.POINTER[struct_isa_decode_options]) -> None: ...
@dll.bind
def ir3_isa_decode(out:ctypes.c_void_p, bin:ctypes.c_void_p, options:c.POINTER[struct_isa_decode_options]) -> Annotated[bool, ctypes.c_bool]: ...
class struct_decode_scope(ctypes.Structure): pass
@dll.bind
def ir3_isa_get_gpu_id(scope:c.POINTER[struct_decode_scope]) -> uint32_t: ...
try: glsl_type_builtin_error = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_error') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_void = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_void') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bool = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bool') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bvec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bvec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bvec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bvec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bvec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bvec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bvec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bvec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bvec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bvec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bvec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bvec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_int = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_int') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_ivec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_ivec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_ivec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_ivec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_ivec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_ivec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_ivec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_ivec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_ivec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_ivec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_ivec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_ivec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uint = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uint') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uvec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uvec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uvec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uvec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uvec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uvec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uvec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uvec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uvec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uvec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uvec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uvec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_float = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_float') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_float16_t = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_float16_t') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16vec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16vec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16vec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16vec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16vec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16vec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16vec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16vec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16vec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16vec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16vec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16vec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_double = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_double') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dvec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dvec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dvec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dvec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dvec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dvec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dvec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dvec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dvec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dvec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dvec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dvec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_int64_t = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_int64_t') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64vec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64vec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64vec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64vec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64vec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64vec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64vec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64vec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64vec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64vec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64vec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64vec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uint64_t = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uint64_t') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64vec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64vec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64vec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64vec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64vec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64vec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64vec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64vec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64vec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64vec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64vec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64vec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_int16_t = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_int16_t') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i16vec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i16vec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i16vec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i16vec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i16vec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i16vec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i16vec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i16vec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i16vec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i16vec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i16vec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i16vec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uint16_t = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uint16_t') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u16vec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u16vec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u16vec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u16vec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u16vec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u16vec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u16vec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u16vec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u16vec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u16vec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u16vec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u16vec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_int8_t = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_int8_t') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i8vec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i8vec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i8vec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i8vec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i8vec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i8vec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i8vec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i8vec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i8vec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i8vec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i8vec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i8vec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uint8_t = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uint8_t') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u8vec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u8vec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u8vec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u8vec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u8vec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u8vec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u8vec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u8vec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u8vec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u8vec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u8vec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u8vec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bfloat16_t = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bfloat16_t') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bf16vec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bf16vec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bf16vec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bf16vec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bf16vec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bf16vec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bf16vec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bf16vec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bf16vec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bf16vec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_bf16vec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_bf16vec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e4m3fn_t = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e4m3fn_t') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e4m3fnvec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e4m3fnvec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e4m3fnvec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e4m3fnvec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e4m3fnvec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e4m3fnvec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e4m3fnvec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e4m3fnvec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e4m3fnvec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e4m3fnvec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e4m3fnvec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e4m3fnvec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e5m2_t = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e5m2_t') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e5m2vec2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e5m2vec2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e5m2vec3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e5m2vec3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e5m2vec4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e5m2vec4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e5m2vec5 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e5m2vec5') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e5m2vec8 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e5m2vec8') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_e5m2vec16 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_e5m2vec16') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_mat2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_mat2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_mat3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_mat3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_mat4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_mat4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_mat2x3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_mat2x3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_mat2x4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_mat2x4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_mat3x2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_mat3x2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_mat3x4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_mat3x4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_mat4x2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_mat4x2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_mat4x3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_mat4x3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16mat2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16mat2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16mat3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16mat3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16mat4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16mat4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16mat2x3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16mat2x3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16mat2x4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16mat2x4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16mat3x2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16mat3x2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16mat3x4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16mat3x4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16mat4x2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16mat4x2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_f16mat4x3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_f16mat4x3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dmat2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dmat2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dmat3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dmat3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dmat4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dmat4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dmat2x3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dmat2x3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dmat2x4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dmat2x4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dmat3x2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dmat3x2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dmat3x4 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dmat3x4') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dmat4x2 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dmat4x2') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_dmat4x3 = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_dmat4x3') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_atomic_uint = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_atomic_uint') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_samplerCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_samplerCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_samplerCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_samplerCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_samplerBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_samplerBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isampler1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isampler1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isampler2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isampler2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isampler3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isampler3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isamplerCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isamplerCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isampler1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isampler1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isampler2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isampler2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isamplerCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isamplerCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isampler2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isampler2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isamplerBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isamplerBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isampler2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isampler2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isampler2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isampler2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usampler1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usampler1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usampler2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usampler2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usampler3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usampler3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usamplerCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usamplerCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usampler1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usampler1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usampler2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usampler2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usamplerCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usamplerCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usampler2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usampler2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usamplerBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usamplerBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usampler2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usampler2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usampler2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usampler2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_samplerShadow = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_samplerShadow') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler1DShadow = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler1DShadow') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler2DShadow = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler2DShadow') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_samplerCubeShadow = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_samplerCubeShadow') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler1DArrayShadow = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler1DArrayShadow') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler2DArrayShadow = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler2DArrayShadow') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_samplerCubeArrayShadow = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_samplerCubeArrayShadow') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_sampler2DRectShadow = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_sampler2DRectShadow') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_samplerExternalOES = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_samplerExternalOES') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_texture1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_texture1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_texture2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_texture2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_texture3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_texture3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_textureCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_textureCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_texture1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_texture1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_texture2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_texture2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_textureCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_textureCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_texture2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_texture2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_textureBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_textureBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_texture2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_texture2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_texture2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_texture2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itexture1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itexture1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itexture2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itexture2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itexture3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itexture3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itextureCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itextureCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itexture1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itexture1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itexture2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itexture2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itextureCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itextureCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itexture2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itexture2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itextureBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itextureBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itexture2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itexture2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itexture2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itexture2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utexture1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utexture1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utexture2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utexture2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utexture3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utexture3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utextureCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utextureCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utexture1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utexture1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utexture2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utexture2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utextureCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utextureCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utexture2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utexture2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utextureBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utextureBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utexture2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utexture2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utexture2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utexture2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_textureExternalOES = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_textureExternalOES') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vtexture1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vtexture1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vtexture2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vtexture2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vtexture3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vtexture3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vtexture2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vtexture2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vtexture2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vtexture2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vtexture1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vtexture1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vtexture2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vtexture2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vtextureBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vtextureBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_image1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_image1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_image2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_image2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_image3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_image3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_image2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_image2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_imageCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_imageCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_imageBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_imageBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_image1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_image1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_image2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_image2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_imageCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_imageCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_image2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_image2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_image2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_image2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimage1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimage1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimage2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimage2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimage3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimage3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimage2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimage2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimageCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimageCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimageBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimageBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimage1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimage1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimage2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimage2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimageCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimageCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimage2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimage2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_iimage2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_iimage2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimage1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimage1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimage2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimage2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimage3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimage3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimage2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimage2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimageCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimageCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimageBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimageBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimage1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimage1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimage2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimage2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimageCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimageCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimage2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimage2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_uimage2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_uimage2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64image1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64image1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64image2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64image2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64image3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64image3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64image2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64image2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64imageCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64imageCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64imageBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64imageBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64image1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64image1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64image2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64image2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64imageCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64imageCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64image2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64image2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_i64image2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_i64image2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64image1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64image1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64image2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64image2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64image3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64image3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64image2DRect = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64image2DRect') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64imageCube = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64imageCube') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64imageBuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64imageBuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64image1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64image1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64image2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64image2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64imageCubeArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64imageCubeArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64image2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64image2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_u64image2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_u64image2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vbuffer = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vbuffer') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vimage1D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vimage1D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vimage2D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vimage2D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vimage3D = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vimage3D') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vimage2DMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vimage2DMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vimage2DMSArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vimage2DMSArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vimage1DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vimage1DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_vimage2DArray = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_vimage2DArray') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_subpassInput = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_subpassInput') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_subpassInputMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_subpassInputMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isubpassInput = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isubpassInput') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_isubpassInputMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_isubpassInputMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usubpassInput = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usubpassInput') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_usubpassInputMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_usubpassInputMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_textureSubpassInput = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_textureSubpassInput') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_textureSubpassInputMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_textureSubpassInputMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itextureSubpassInput = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itextureSubpassInput') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_itextureSubpassInputMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_itextureSubpassInputMS') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utextureSubpassInput = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utextureSubpassInput') # type: ignore
except (ValueError,AttributeError): pass
try: glsl_type_builtin_utextureSubpassInputMS = struct_glsl_type.in_dll(dll, 'glsl_type_builtin_utextureSubpassInputMS') # type: ignore
except (ValueError,AttributeError): pass
class enum_a6xx_shift_amount(Annotated[int, ctypes.c_uint32], c.Enum): pass
NO_SHIFT = enum_a6xx_shift_amount.define('NO_SHIFT', 0)
HALF_PIXEL_SHIFT = enum_a6xx_shift_amount.define('HALF_PIXEL_SHIFT', 1)
FULL_PIXEL_SHIFT = enum_a6xx_shift_amount.define('FULL_PIXEL_SHIFT', 2)
class enum_a6xx_sequenced_thread_dist(Annotated[int, ctypes.c_uint32], c.Enum): pass
DIST_SCREEN_COORD = enum_a6xx_sequenced_thread_dist.define('DIST_SCREEN_COORD', 0)
DIST_ALL_TO_RB0 = enum_a6xx_sequenced_thread_dist.define('DIST_ALL_TO_RB0', 1)
class enum_a6xx_single_prim_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
NO_FLUSH = enum_a6xx_single_prim_mode.define('NO_FLUSH', 0)
FLUSH_PER_OVERLAP_AND_OVERWRITE = enum_a6xx_single_prim_mode.define('FLUSH_PER_OVERLAP_AND_OVERWRITE', 1)
FLUSH_PER_OVERLAP = enum_a6xx_single_prim_mode.define('FLUSH_PER_OVERLAP', 3)
class enum_a6xx_raster_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
TYPE_TILED = enum_a6xx_raster_mode.define('TYPE_TILED', 0)
TYPE_WRITER = enum_a6xx_raster_mode.define('TYPE_WRITER', 1)
class enum_a6xx_raster_direction(Annotated[int, ctypes.c_uint32], c.Enum): pass
LR_TB = enum_a6xx_raster_direction.define('LR_TB', 0)
RL_TB = enum_a6xx_raster_direction.define('RL_TB', 1)
LR_BT = enum_a6xx_raster_direction.define('LR_BT', 2)
RB_BT = enum_a6xx_raster_direction.define('RB_BT', 3)
class enum_a6xx_render_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
RENDERING_PASS = enum_a6xx_render_mode.define('RENDERING_PASS', 0)
BINNING_PASS = enum_a6xx_render_mode.define('BINNING_PASS', 1)
class enum_a6xx_buffers_location(Annotated[int, ctypes.c_uint32], c.Enum): pass
BUFFERS_IN_GMEM = enum_a6xx_buffers_location.define('BUFFERS_IN_GMEM', 0)
BUFFERS_IN_SYSMEM = enum_a6xx_buffers_location.define('BUFFERS_IN_SYSMEM', 3)
class enum_a6xx_lrz_feedback_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
LRZ_FEEDBACK_NONE = enum_a6xx_lrz_feedback_mask.define('LRZ_FEEDBACK_NONE', 0)
LRZ_FEEDBACK_EARLY_Z = enum_a6xx_lrz_feedback_mask.define('LRZ_FEEDBACK_EARLY_Z', 1)
LRZ_FEEDBACK_EARLY_Z_LATE_Z = enum_a6xx_lrz_feedback_mask.define('LRZ_FEEDBACK_EARLY_Z_LATE_Z', 2)
LRZ_FEEDBACK_EARLY_Z_OR_EARLY_Z_LATE_Z = enum_a6xx_lrz_feedback_mask.define('LRZ_FEEDBACK_EARLY_Z_OR_EARLY_Z_LATE_Z', 3)
LRZ_FEEDBACK_LATE_Z = enum_a6xx_lrz_feedback_mask.define('LRZ_FEEDBACK_LATE_Z', 4)
class enum_a6xx_fsr_combiner(Annotated[int, ctypes.c_uint32], c.Enum): pass
FSR_COMBINER_OP_KEEP = enum_a6xx_fsr_combiner.define('FSR_COMBINER_OP_KEEP', 0)
FSR_COMBINER_OP_REPLACE = enum_a6xx_fsr_combiner.define('FSR_COMBINER_OP_REPLACE', 1)
FSR_COMBINER_OP_MIN = enum_a6xx_fsr_combiner.define('FSR_COMBINER_OP_MIN', 2)
FSR_COMBINER_OP_MAX = enum_a6xx_fsr_combiner.define('FSR_COMBINER_OP_MAX', 3)
FSR_COMBINER_OP_MUL = enum_a6xx_fsr_combiner.define('FSR_COMBINER_OP_MUL', 4)
class enum_a6xx_lrz_dir_status(Annotated[int, ctypes.c_uint32], c.Enum): pass
LRZ_DIR_LE = enum_a6xx_lrz_dir_status.define('LRZ_DIR_LE', 1)
LRZ_DIR_GE = enum_a6xx_lrz_dir_status.define('LRZ_DIR_GE', 2)
LRZ_DIR_INVALID = enum_a6xx_lrz_dir_status.define('LRZ_DIR_INVALID', 3)
class enum_a6xx_fragcoord_sample_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
FRAGCOORD_CENTER = enum_a6xx_fragcoord_sample_mode.define('FRAGCOORD_CENTER', 0)
FRAGCOORD_SAMPLE = enum_a6xx_fragcoord_sample_mode.define('FRAGCOORD_SAMPLE', 3)
class enum_a6xx_rotation(Annotated[int, ctypes.c_uint32], c.Enum): pass
ROTATE_0 = enum_a6xx_rotation.define('ROTATE_0', 0)
ROTATE_90 = enum_a6xx_rotation.define('ROTATE_90', 1)
ROTATE_180 = enum_a6xx_rotation.define('ROTATE_180', 2)
ROTATE_270 = enum_a6xx_rotation.define('ROTATE_270', 3)
ROTATE_HFLIP = enum_a6xx_rotation.define('ROTATE_HFLIP', 4)
ROTATE_VFLIP = enum_a6xx_rotation.define('ROTATE_VFLIP', 5)
class enum_a6xx_blit_event_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
BLIT_EVENT_STORE = enum_a6xx_blit_event_type.define('BLIT_EVENT_STORE', 0)
BLIT_EVENT_STORE_AND_CLEAR = enum_a6xx_blit_event_type.define('BLIT_EVENT_STORE_AND_CLEAR', 1)
BLIT_EVENT_CLEAR = enum_a6xx_blit_event_type.define('BLIT_EVENT_CLEAR', 2)
BLIT_EVENT_LOAD = enum_a6xx_blit_event_type.define('BLIT_EVENT_LOAD', 3)
class enum_a7xx_blit_clear_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CLEAR_MODE_SYSMEM = enum_a7xx_blit_clear_mode.define('CLEAR_MODE_SYSMEM', 0)
CLEAR_MODE_GMEM = enum_a7xx_blit_clear_mode.define('CLEAR_MODE_GMEM', 1)
class enum_a6xx_ccu_cache_size(Annotated[int, ctypes.c_uint32], c.Enum): pass
CCU_CACHE_SIZE_FULL = enum_a6xx_ccu_cache_size.define('CCU_CACHE_SIZE_FULL', 0)
CCU_CACHE_SIZE_HALF = enum_a6xx_ccu_cache_size.define('CCU_CACHE_SIZE_HALF', 1)
CCU_CACHE_SIZE_QUARTER = enum_a6xx_ccu_cache_size.define('CCU_CACHE_SIZE_QUARTER', 2)
CCU_CACHE_SIZE_EIGHTH = enum_a6xx_ccu_cache_size.define('CCU_CACHE_SIZE_EIGHTH', 3)
class enum_a7xx_concurrent_resolve_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CONCURRENT_RESOLVE_MODE_DISABLED = enum_a7xx_concurrent_resolve_mode.define('CONCURRENT_RESOLVE_MODE_DISABLED', 0)
CONCURRENT_RESOLVE_MODE_1 = enum_a7xx_concurrent_resolve_mode.define('CONCURRENT_RESOLVE_MODE_1', 1)
CONCURRENT_RESOLVE_MODE_2 = enum_a7xx_concurrent_resolve_mode.define('CONCURRENT_RESOLVE_MODE_2', 2)
class enum_a7xx_concurrent_unresolve_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CONCURRENT_UNRESOLVE_MODE_DISABLED = enum_a7xx_concurrent_unresolve_mode.define('CONCURRENT_UNRESOLVE_MODE_DISABLED', 0)
CONCURRENT_UNRESOLVE_MODE_PARTIAL = enum_a7xx_concurrent_unresolve_mode.define('CONCURRENT_UNRESOLVE_MODE_PARTIAL', 1)
CONCURRENT_UNRESOLVE_MODE_FULL = enum_a7xx_concurrent_unresolve_mode.define('CONCURRENT_UNRESOLVE_MODE_FULL', 3)
class enum_a6xx_varying_interp_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
INTERP_SMOOTH = enum_a6xx_varying_interp_mode.define('INTERP_SMOOTH', 0)
INTERP_FLAT = enum_a6xx_varying_interp_mode.define('INTERP_FLAT', 1)
INTERP_ZERO = enum_a6xx_varying_interp_mode.define('INTERP_ZERO', 2)
INTERP_ONE = enum_a6xx_varying_interp_mode.define('INTERP_ONE', 3)
class enum_a6xx_varying_ps_repl_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
PS_REPL_NONE = enum_a6xx_varying_ps_repl_mode.define('PS_REPL_NONE', 0)
PS_REPL_S = enum_a6xx_varying_ps_repl_mode.define('PS_REPL_S', 1)
PS_REPL_T = enum_a6xx_varying_ps_repl_mode.define('PS_REPL_T', 2)
PS_REPL_ONE_MINUS_T = enum_a6xx_varying_ps_repl_mode.define('PS_REPL_ONE_MINUS_T', 3)
class enum_a6xx_threadsize(Annotated[int, ctypes.c_uint32], c.Enum): pass
THREAD64 = enum_a6xx_threadsize.define('THREAD64', 0)
THREAD128 = enum_a6xx_threadsize.define('THREAD128', 1)
class enum_a6xx_const_ram_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CONSTLEN_128 = enum_a6xx_const_ram_mode.define('CONSTLEN_128', 0)
CONSTLEN_192 = enum_a6xx_const_ram_mode.define('CONSTLEN_192', 1)
CONSTLEN_256 = enum_a6xx_const_ram_mode.define('CONSTLEN_256', 2)
CONSTLEN_512 = enum_a6xx_const_ram_mode.define('CONSTLEN_512', 3)
class enum_a7xx_workitem_rast_order(Annotated[int, ctypes.c_uint32], c.Enum): pass
WORKITEMRASTORDER_LINEAR = enum_a7xx_workitem_rast_order.define('WORKITEMRASTORDER_LINEAR', 0)
WORKITEMRASTORDER_TILED = enum_a7xx_workitem_rast_order.define('WORKITEMRASTORDER_TILED', 1)
class enum_a6xx_bindless_descriptor_size(Annotated[int, ctypes.c_uint32], c.Enum): pass
BINDLESS_DESCRIPTOR_16B = enum_a6xx_bindless_descriptor_size.define('BINDLESS_DESCRIPTOR_16B', 1)
BINDLESS_DESCRIPTOR_64B = enum_a6xx_bindless_descriptor_size.define('BINDLESS_DESCRIPTOR_64B', 3)
class enum_a6xx_isam_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
ISAMMODE_CL = enum_a6xx_isam_mode.define('ISAMMODE_CL', 1)
ISAMMODE_GL = enum_a6xx_isam_mode.define('ISAMMODE_GL', 2)
class enum_a6xx_sp_a2d_output_ifmt_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
OUTPUT_IFMT_2D_FLOAT = enum_a6xx_sp_a2d_output_ifmt_type.define('OUTPUT_IFMT_2D_FLOAT', 0)
OUTPUT_IFMT_2D_SINT = enum_a6xx_sp_a2d_output_ifmt_type.define('OUTPUT_IFMT_2D_SINT', 1)
OUTPUT_IFMT_2D_UINT = enum_a6xx_sp_a2d_output_ifmt_type.define('OUTPUT_IFMT_2D_UINT', 2)
class enum_a6xx_coord_round(Annotated[int, ctypes.c_uint32], c.Enum): pass
COORD_TRUNCATE = enum_a6xx_coord_round.define('COORD_TRUNCATE', 0)
COORD_ROUND_NEAREST_EVEN = enum_a6xx_coord_round.define('COORD_ROUND_NEAREST_EVEN', 1)
class enum_a6xx_nearest_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
ROUND_CLAMP_TRUNCATE = enum_a6xx_nearest_mode.define('ROUND_CLAMP_TRUNCATE', 0)
CLAMP_ROUND_TRUNCATE = enum_a6xx_nearest_mode.define('CLAMP_ROUND_TRUNCATE', 1)
class enum_a7xx_cs_yalign(Annotated[int, ctypes.c_uint32], c.Enum): pass
CS_YALIGN_1 = enum_a7xx_cs_yalign.define('CS_YALIGN_1', 8)
CS_YALIGN_2 = enum_a7xx_cs_yalign.define('CS_YALIGN_2', 4)
CS_YALIGN_4 = enum_a7xx_cs_yalign.define('CS_YALIGN_4', 2)
CS_YALIGN_8 = enum_a7xx_cs_yalign.define('CS_YALIGN_8', 1)
class enum_vgt_event_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
VS_DEALLOC = enum_vgt_event_type.define('VS_DEALLOC', 0)
PS_DEALLOC = enum_vgt_event_type.define('PS_DEALLOC', 1)
VS_DONE_TS = enum_vgt_event_type.define('VS_DONE_TS', 2)
PS_DONE_TS = enum_vgt_event_type.define('PS_DONE_TS', 3)
CACHE_FLUSH_TS = enum_vgt_event_type.define('CACHE_FLUSH_TS', 4)
CONTEXT_DONE = enum_vgt_event_type.define('CONTEXT_DONE', 5)
CACHE_FLUSH = enum_vgt_event_type.define('CACHE_FLUSH', 6)
VIZQUERY_START = enum_vgt_event_type.define('VIZQUERY_START', 7)
HLSQ_FLUSH = enum_vgt_event_type.define('HLSQ_FLUSH', 7)
VIZQUERY_END = enum_vgt_event_type.define('VIZQUERY_END', 8)
SC_WAIT_WC = enum_vgt_event_type.define('SC_WAIT_WC', 9)
WRITE_PRIMITIVE_COUNTS = enum_vgt_event_type.define('WRITE_PRIMITIVE_COUNTS', 9)
START_PRIMITIVE_CTRS = enum_vgt_event_type.define('START_PRIMITIVE_CTRS', 11)
STOP_PRIMITIVE_CTRS = enum_vgt_event_type.define('STOP_PRIMITIVE_CTRS', 12)
RST_PIX_CNT = enum_vgt_event_type.define('RST_PIX_CNT', 13)
RST_VTX_CNT = enum_vgt_event_type.define('RST_VTX_CNT', 14)
TILE_FLUSH = enum_vgt_event_type.define('TILE_FLUSH', 15)
STAT_EVENT = enum_vgt_event_type.define('STAT_EVENT', 16)
CACHE_FLUSH_AND_INV_TS_EVENT = enum_vgt_event_type.define('CACHE_FLUSH_AND_INV_TS_EVENT', 20)
ZPASS_DONE = enum_vgt_event_type.define('ZPASS_DONE', 21)
CACHE_FLUSH_AND_INV_EVENT = enum_vgt_event_type.define('CACHE_FLUSH_AND_INV_EVENT', 22)
RB_DONE_TS = enum_vgt_event_type.define('RB_DONE_TS', 22)
PERFCOUNTER_START = enum_vgt_event_type.define('PERFCOUNTER_START', 23)
PERFCOUNTER_STOP = enum_vgt_event_type.define('PERFCOUNTER_STOP', 24)
VS_FETCH_DONE = enum_vgt_event_type.define('VS_FETCH_DONE', 27)
FACENESS_FLUSH = enum_vgt_event_type.define('FACENESS_FLUSH', 28)
WT_DONE_TS = enum_vgt_event_type.define('WT_DONE_TS', 8)
START_FRAGMENT_CTRS = enum_vgt_event_type.define('START_FRAGMENT_CTRS', 13)
STOP_FRAGMENT_CTRS = enum_vgt_event_type.define('STOP_FRAGMENT_CTRS', 14)
START_COMPUTE_CTRS = enum_vgt_event_type.define('START_COMPUTE_CTRS', 15)
STOP_COMPUTE_CTRS = enum_vgt_event_type.define('STOP_COMPUTE_CTRS', 16)
FLUSH_SO_0 = enum_vgt_event_type.define('FLUSH_SO_0', 17)
FLUSH_SO_1 = enum_vgt_event_type.define('FLUSH_SO_1', 18)
FLUSH_SO_2 = enum_vgt_event_type.define('FLUSH_SO_2', 19)
FLUSH_SO_3 = enum_vgt_event_type.define('FLUSH_SO_3', 20)
PC_CCU_INVALIDATE_DEPTH = enum_vgt_event_type.define('PC_CCU_INVALIDATE_DEPTH', 24)
PC_CCU_INVALIDATE_COLOR = enum_vgt_event_type.define('PC_CCU_INVALIDATE_COLOR', 25)
PC_CCU_RESOLVE_TS = enum_vgt_event_type.define('PC_CCU_RESOLVE_TS', 26)
PC_CCU_FLUSH_DEPTH_TS = enum_vgt_event_type.define('PC_CCU_FLUSH_DEPTH_TS', 28)
PC_CCU_FLUSH_COLOR_TS = enum_vgt_event_type.define('PC_CCU_FLUSH_COLOR_TS', 29)
BLIT = enum_vgt_event_type.define('BLIT', 30)
LRZ_FLIP_BUFFER = enum_vgt_event_type.define('LRZ_FLIP_BUFFER', 36)
LRZ_CLEAR = enum_vgt_event_type.define('LRZ_CLEAR', 37)
LRZ_FLUSH = enum_vgt_event_type.define('LRZ_FLUSH', 38)
BLIT_OP_FILL_2D = enum_vgt_event_type.define('BLIT_OP_FILL_2D', 39)
BLIT_OP_COPY_2D = enum_vgt_event_type.define('BLIT_OP_COPY_2D', 40)
UNK_40 = enum_vgt_event_type.define('UNK_40', 40)
LRZ_Q_CACHE_INVALIDATE = enum_vgt_event_type.define('LRZ_Q_CACHE_INVALIDATE', 41)
BLIT_OP_SCALE_2D = enum_vgt_event_type.define('BLIT_OP_SCALE_2D', 42)
CONTEXT_DONE_2D = enum_vgt_event_type.define('CONTEXT_DONE_2D', 43)
UNK_2C = enum_vgt_event_type.define('UNK_2C', 44)
UNK_2D = enum_vgt_event_type.define('UNK_2D', 45)
CACHE_INVALIDATE = enum_vgt_event_type.define('CACHE_INVALIDATE', 49)
LABEL = enum_vgt_event_type.define('LABEL', 63)
DUMMY_EVENT = enum_vgt_event_type.define('DUMMY_EVENT', 1)
CCU_INVALIDATE_DEPTH = enum_vgt_event_type.define('CCU_INVALIDATE_DEPTH', 24)
CCU_INVALIDATE_COLOR = enum_vgt_event_type.define('CCU_INVALIDATE_COLOR', 25)
CCU_RESOLVE_CLEAN = enum_vgt_event_type.define('CCU_RESOLVE_CLEAN', 26)
CCU_FLUSH_DEPTH = enum_vgt_event_type.define('CCU_FLUSH_DEPTH', 28)
CCU_FLUSH_COLOR = enum_vgt_event_type.define('CCU_FLUSH_COLOR', 29)
CCU_RESOLVE = enum_vgt_event_type.define('CCU_RESOLVE', 30)
CCU_END_RESOLVE_GROUP = enum_vgt_event_type.define('CCU_END_RESOLVE_GROUP', 31)
CCU_CLEAN_DEPTH = enum_vgt_event_type.define('CCU_CLEAN_DEPTH', 32)
CCU_CLEAN_COLOR = enum_vgt_event_type.define('CCU_CLEAN_COLOR', 33)
CACHE_RESET = enum_vgt_event_type.define('CACHE_RESET', 48)
CACHE_CLEAN = enum_vgt_event_type.define('CACHE_CLEAN', 49)
CACHE_FLUSH7 = enum_vgt_event_type.define('CACHE_FLUSH7', 50)
CACHE_INVALIDATE7 = enum_vgt_event_type.define('CACHE_INVALIDATE7', 51)
class enum_pc_di_primtype(Annotated[int, ctypes.c_uint32], c.Enum): pass
DI_PT_NONE = enum_pc_di_primtype.define('DI_PT_NONE', 0)
DI_PT_POINTLIST_PSIZE = enum_pc_di_primtype.define('DI_PT_POINTLIST_PSIZE', 1)
DI_PT_LINELIST = enum_pc_di_primtype.define('DI_PT_LINELIST', 2)
DI_PT_LINESTRIP = enum_pc_di_primtype.define('DI_PT_LINESTRIP', 3)
DI_PT_TRILIST = enum_pc_di_primtype.define('DI_PT_TRILIST', 4)
DI_PT_TRIFAN = enum_pc_di_primtype.define('DI_PT_TRIFAN', 5)
DI_PT_TRISTRIP = enum_pc_di_primtype.define('DI_PT_TRISTRIP', 6)
DI_PT_LINELOOP = enum_pc_di_primtype.define('DI_PT_LINELOOP', 7)
DI_PT_RECTLIST = enum_pc_di_primtype.define('DI_PT_RECTLIST', 8)
DI_PT_POINTLIST = enum_pc_di_primtype.define('DI_PT_POINTLIST', 9)
DI_PT_LINE_ADJ = enum_pc_di_primtype.define('DI_PT_LINE_ADJ', 10)
DI_PT_LINESTRIP_ADJ = enum_pc_di_primtype.define('DI_PT_LINESTRIP_ADJ', 11)
DI_PT_TRI_ADJ = enum_pc_di_primtype.define('DI_PT_TRI_ADJ', 12)
DI_PT_TRISTRIP_ADJ = enum_pc_di_primtype.define('DI_PT_TRISTRIP_ADJ', 13)
DI_PT_PATCHES0 = enum_pc_di_primtype.define('DI_PT_PATCHES0', 31)
DI_PT_PATCHES1 = enum_pc_di_primtype.define('DI_PT_PATCHES1', 32)
DI_PT_PATCHES2 = enum_pc_di_primtype.define('DI_PT_PATCHES2', 33)
DI_PT_PATCHES3 = enum_pc_di_primtype.define('DI_PT_PATCHES3', 34)
DI_PT_PATCHES4 = enum_pc_di_primtype.define('DI_PT_PATCHES4', 35)
DI_PT_PATCHES5 = enum_pc_di_primtype.define('DI_PT_PATCHES5', 36)
DI_PT_PATCHES6 = enum_pc_di_primtype.define('DI_PT_PATCHES6', 37)
DI_PT_PATCHES7 = enum_pc_di_primtype.define('DI_PT_PATCHES7', 38)
DI_PT_PATCHES8 = enum_pc_di_primtype.define('DI_PT_PATCHES8', 39)
DI_PT_PATCHES9 = enum_pc_di_primtype.define('DI_PT_PATCHES9', 40)
DI_PT_PATCHES10 = enum_pc_di_primtype.define('DI_PT_PATCHES10', 41)
DI_PT_PATCHES11 = enum_pc_di_primtype.define('DI_PT_PATCHES11', 42)
DI_PT_PATCHES12 = enum_pc_di_primtype.define('DI_PT_PATCHES12', 43)
DI_PT_PATCHES13 = enum_pc_di_primtype.define('DI_PT_PATCHES13', 44)
DI_PT_PATCHES14 = enum_pc_di_primtype.define('DI_PT_PATCHES14', 45)
DI_PT_PATCHES15 = enum_pc_di_primtype.define('DI_PT_PATCHES15', 46)
DI_PT_PATCHES16 = enum_pc_di_primtype.define('DI_PT_PATCHES16', 47)
DI_PT_PATCHES17 = enum_pc_di_primtype.define('DI_PT_PATCHES17', 48)
DI_PT_PATCHES18 = enum_pc_di_primtype.define('DI_PT_PATCHES18', 49)
DI_PT_PATCHES19 = enum_pc_di_primtype.define('DI_PT_PATCHES19', 50)
DI_PT_PATCHES20 = enum_pc_di_primtype.define('DI_PT_PATCHES20', 51)
DI_PT_PATCHES21 = enum_pc_di_primtype.define('DI_PT_PATCHES21', 52)
DI_PT_PATCHES22 = enum_pc_di_primtype.define('DI_PT_PATCHES22', 53)
DI_PT_PATCHES23 = enum_pc_di_primtype.define('DI_PT_PATCHES23', 54)
DI_PT_PATCHES24 = enum_pc_di_primtype.define('DI_PT_PATCHES24', 55)
DI_PT_PATCHES25 = enum_pc_di_primtype.define('DI_PT_PATCHES25', 56)
DI_PT_PATCHES26 = enum_pc_di_primtype.define('DI_PT_PATCHES26', 57)
DI_PT_PATCHES27 = enum_pc_di_primtype.define('DI_PT_PATCHES27', 58)
DI_PT_PATCHES28 = enum_pc_di_primtype.define('DI_PT_PATCHES28', 59)
DI_PT_PATCHES29 = enum_pc_di_primtype.define('DI_PT_PATCHES29', 60)
DI_PT_PATCHES30 = enum_pc_di_primtype.define('DI_PT_PATCHES30', 61)
DI_PT_PATCHES31 = enum_pc_di_primtype.define('DI_PT_PATCHES31', 62)
class enum_pc_di_src_sel(Annotated[int, ctypes.c_uint32], c.Enum): pass
DI_SRC_SEL_DMA = enum_pc_di_src_sel.define('DI_SRC_SEL_DMA', 0)
DI_SRC_SEL_IMMEDIATE = enum_pc_di_src_sel.define('DI_SRC_SEL_IMMEDIATE', 1)
DI_SRC_SEL_AUTO_INDEX = enum_pc_di_src_sel.define('DI_SRC_SEL_AUTO_INDEX', 2)
DI_SRC_SEL_AUTO_XFB = enum_pc_di_src_sel.define('DI_SRC_SEL_AUTO_XFB', 3)
class enum_pc_di_face_cull_sel(Annotated[int, ctypes.c_uint32], c.Enum): pass
DI_FACE_CULL_NONE = enum_pc_di_face_cull_sel.define('DI_FACE_CULL_NONE', 0)
DI_FACE_CULL_FETCH = enum_pc_di_face_cull_sel.define('DI_FACE_CULL_FETCH', 1)
DI_FACE_BACKFACE_CULL = enum_pc_di_face_cull_sel.define('DI_FACE_BACKFACE_CULL', 2)
DI_FACE_FRONTFACE_CULL = enum_pc_di_face_cull_sel.define('DI_FACE_FRONTFACE_CULL', 3)
class enum_pc_di_index_size(Annotated[int, ctypes.c_uint32], c.Enum): pass
INDEX_SIZE_IGN = enum_pc_di_index_size.define('INDEX_SIZE_IGN', 0)
INDEX_SIZE_16_BIT = enum_pc_di_index_size.define('INDEX_SIZE_16_BIT', 0)
INDEX_SIZE_32_BIT = enum_pc_di_index_size.define('INDEX_SIZE_32_BIT', 1)
INDEX_SIZE_8_BIT = enum_pc_di_index_size.define('INDEX_SIZE_8_BIT', 2)
INDEX_SIZE_INVALID = enum_pc_di_index_size.define('INDEX_SIZE_INVALID', 0)
class enum_pc_di_vis_cull_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
IGNORE_VISIBILITY = enum_pc_di_vis_cull_mode.define('IGNORE_VISIBILITY', 0)
USE_VISIBILITY = enum_pc_di_vis_cull_mode.define('USE_VISIBILITY', 1)
class enum_adreno_pm4_packet_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
CP_TYPE0_PKT = enum_adreno_pm4_packet_type.define('CP_TYPE0_PKT', 0)
CP_TYPE1_PKT = enum_adreno_pm4_packet_type.define('CP_TYPE1_PKT', 1073741824)
CP_TYPE2_PKT = enum_adreno_pm4_packet_type.define('CP_TYPE2_PKT', 2147483648)
CP_TYPE3_PKT = enum_adreno_pm4_packet_type.define('CP_TYPE3_PKT', 3221225472)
CP_TYPE4_PKT = enum_adreno_pm4_packet_type.define('CP_TYPE4_PKT', 1073741824)
CP_TYPE7_PKT = enum_adreno_pm4_packet_type.define('CP_TYPE7_PKT', 1879048192)
class enum_adreno_pm4_type3_packets(Annotated[int, ctypes.c_uint32], c.Enum): pass
CP_ME_INIT = enum_adreno_pm4_type3_packets.define('CP_ME_INIT', 72)
CP_NOP = enum_adreno_pm4_type3_packets.define('CP_NOP', 16)
CP_PREEMPT_ENABLE = enum_adreno_pm4_type3_packets.define('CP_PREEMPT_ENABLE', 28)
CP_PREEMPT_TOKEN = enum_adreno_pm4_type3_packets.define('CP_PREEMPT_TOKEN', 30)
CP_INDIRECT_BUFFER = enum_adreno_pm4_type3_packets.define('CP_INDIRECT_BUFFER', 63)
CP_INDIRECT_BUFFER_CHAIN = enum_adreno_pm4_type3_packets.define('CP_INDIRECT_BUFFER_CHAIN', 87)
CP_INDIRECT_BUFFER_PFD = enum_adreno_pm4_type3_packets.define('CP_INDIRECT_BUFFER_PFD', 55)
CP_WAIT_FOR_IDLE = enum_adreno_pm4_type3_packets.define('CP_WAIT_FOR_IDLE', 38)
CP_WAIT_REG_MEM = enum_adreno_pm4_type3_packets.define('CP_WAIT_REG_MEM', 60)
CP_WAIT_REG_EQ = enum_adreno_pm4_type3_packets.define('CP_WAIT_REG_EQ', 82)
CP_WAIT_REG_GTE = enum_adreno_pm4_type3_packets.define('CP_WAIT_REG_GTE', 83)
CP_WAIT_UNTIL_READ = enum_adreno_pm4_type3_packets.define('CP_WAIT_UNTIL_READ', 92)
CP_WAIT_IB_PFD_COMPLETE = enum_adreno_pm4_type3_packets.define('CP_WAIT_IB_PFD_COMPLETE', 93)
CP_REG_RMW = enum_adreno_pm4_type3_packets.define('CP_REG_RMW', 33)
CP_SET_BIN_DATA = enum_adreno_pm4_type3_packets.define('CP_SET_BIN_DATA', 47)
CP_SET_BIN_DATA5 = enum_adreno_pm4_type3_packets.define('CP_SET_BIN_DATA5', 47)
CP_REG_TO_MEM = enum_adreno_pm4_type3_packets.define('CP_REG_TO_MEM', 62)
CP_MEM_WRITE = enum_adreno_pm4_type3_packets.define('CP_MEM_WRITE', 61)
CP_MEM_WRITE_CNTR = enum_adreno_pm4_type3_packets.define('CP_MEM_WRITE_CNTR', 79)
CP_COND_EXEC = enum_adreno_pm4_type3_packets.define('CP_COND_EXEC', 68)
CP_COND_WRITE = enum_adreno_pm4_type3_packets.define('CP_COND_WRITE', 69)
CP_COND_WRITE5 = enum_adreno_pm4_type3_packets.define('CP_COND_WRITE5', 69)
CP_EVENT_WRITE = enum_adreno_pm4_type3_packets.define('CP_EVENT_WRITE', 70)
CP_EVENT_WRITE7 = enum_adreno_pm4_type3_packets.define('CP_EVENT_WRITE7', 70)
CP_EVENT_WRITE_SHD = enum_adreno_pm4_type3_packets.define('CP_EVENT_WRITE_SHD', 88)
CP_EVENT_WRITE_CFL = enum_adreno_pm4_type3_packets.define('CP_EVENT_WRITE_CFL', 89)
CP_EVENT_WRITE_ZPD = enum_adreno_pm4_type3_packets.define('CP_EVENT_WRITE_ZPD', 91)
CP_RUN_OPENCL = enum_adreno_pm4_type3_packets.define('CP_RUN_OPENCL', 49)
CP_DRAW_INDX = enum_adreno_pm4_type3_packets.define('CP_DRAW_INDX', 34)
CP_DRAW_INDX_2 = enum_adreno_pm4_type3_packets.define('CP_DRAW_INDX_2', 54)
CP_DRAW_INDX_BIN = enum_adreno_pm4_type3_packets.define('CP_DRAW_INDX_BIN', 52)
CP_DRAW_INDX_2_BIN = enum_adreno_pm4_type3_packets.define('CP_DRAW_INDX_2_BIN', 53)
CP_VIZ_QUERY = enum_adreno_pm4_type3_packets.define('CP_VIZ_QUERY', 35)
CP_SET_STATE = enum_adreno_pm4_type3_packets.define('CP_SET_STATE', 37)
CP_SET_CONSTANT = enum_adreno_pm4_type3_packets.define('CP_SET_CONSTANT', 45)
CP_IM_LOAD = enum_adreno_pm4_type3_packets.define('CP_IM_LOAD', 39)
CP_IM_LOAD_IMMEDIATE = enum_adreno_pm4_type3_packets.define('CP_IM_LOAD_IMMEDIATE', 43)
CP_LOAD_CONSTANT_CONTEXT = enum_adreno_pm4_type3_packets.define('CP_LOAD_CONSTANT_CONTEXT', 46)
CP_INVALIDATE_STATE = enum_adreno_pm4_type3_packets.define('CP_INVALIDATE_STATE', 59)
CP_SET_SHADER_BASES = enum_adreno_pm4_type3_packets.define('CP_SET_SHADER_BASES', 74)
CP_SET_BIN_MASK = enum_adreno_pm4_type3_packets.define('CP_SET_BIN_MASK', 80)
CP_SET_BIN_SELECT = enum_adreno_pm4_type3_packets.define('CP_SET_BIN_SELECT', 81)
CP_CONTEXT_UPDATE = enum_adreno_pm4_type3_packets.define('CP_CONTEXT_UPDATE', 94)
CP_INTERRUPT = enum_adreno_pm4_type3_packets.define('CP_INTERRUPT', 64)
CP_IM_STORE = enum_adreno_pm4_type3_packets.define('CP_IM_STORE', 44)
CP_SET_DRAW_INIT_FLAGS = enum_adreno_pm4_type3_packets.define('CP_SET_DRAW_INIT_FLAGS', 75)
CP_SET_PROTECTED_MODE = enum_adreno_pm4_type3_packets.define('CP_SET_PROTECTED_MODE', 95)
CP_BOOTSTRAP_UCODE = enum_adreno_pm4_type3_packets.define('CP_BOOTSTRAP_UCODE', 111)
CP_LOAD_STATE = enum_adreno_pm4_type3_packets.define('CP_LOAD_STATE', 48)
CP_LOAD_STATE4 = enum_adreno_pm4_type3_packets.define('CP_LOAD_STATE4', 48)
CP_COND_INDIRECT_BUFFER_PFE = enum_adreno_pm4_type3_packets.define('CP_COND_INDIRECT_BUFFER_PFE', 58)
CP_COND_INDIRECT_BUFFER_PFD = enum_adreno_pm4_type3_packets.define('CP_COND_INDIRECT_BUFFER_PFD', 50)
CP_INDIRECT_BUFFER_PFE = enum_adreno_pm4_type3_packets.define('CP_INDIRECT_BUFFER_PFE', 63)
CP_SET_BIN = enum_adreno_pm4_type3_packets.define('CP_SET_BIN', 76)
CP_TEST_TWO_MEMS = enum_adreno_pm4_type3_packets.define('CP_TEST_TWO_MEMS', 113)
CP_REG_WR_NO_CTXT = enum_adreno_pm4_type3_packets.define('CP_REG_WR_NO_CTXT', 120)
CP_RECORD_PFP_TIMESTAMP = enum_adreno_pm4_type3_packets.define('CP_RECORD_PFP_TIMESTAMP', 17)
CP_SET_SECURE_MODE = enum_adreno_pm4_type3_packets.define('CP_SET_SECURE_MODE', 102)
CP_WAIT_FOR_ME = enum_adreno_pm4_type3_packets.define('CP_WAIT_FOR_ME', 19)
CP_SET_DRAW_STATE = enum_adreno_pm4_type3_packets.define('CP_SET_DRAW_STATE', 67)
CP_DRAW_INDX_OFFSET = enum_adreno_pm4_type3_packets.define('CP_DRAW_INDX_OFFSET', 56)
CP_DRAW_INDIRECT = enum_adreno_pm4_type3_packets.define('CP_DRAW_INDIRECT', 40)
CP_DRAW_INDX_INDIRECT = enum_adreno_pm4_type3_packets.define('CP_DRAW_INDX_INDIRECT', 41)
CP_DRAW_INDIRECT_MULTI = enum_adreno_pm4_type3_packets.define('CP_DRAW_INDIRECT_MULTI', 42)
CP_DRAW_AUTO = enum_adreno_pm4_type3_packets.define('CP_DRAW_AUTO', 36)
CP_DRAW_PRED_ENABLE_GLOBAL = enum_adreno_pm4_type3_packets.define('CP_DRAW_PRED_ENABLE_GLOBAL', 25)
CP_DRAW_PRED_ENABLE_LOCAL = enum_adreno_pm4_type3_packets.define('CP_DRAW_PRED_ENABLE_LOCAL', 26)
CP_DRAW_PRED_SET = enum_adreno_pm4_type3_packets.define('CP_DRAW_PRED_SET', 78)
CP_WIDE_REG_WRITE = enum_adreno_pm4_type3_packets.define('CP_WIDE_REG_WRITE', 116)
CP_SCRATCH_TO_REG = enum_adreno_pm4_type3_packets.define('CP_SCRATCH_TO_REG', 77)
CP_REG_TO_SCRATCH = enum_adreno_pm4_type3_packets.define('CP_REG_TO_SCRATCH', 74)
CP_WAIT_MEM_WRITES = enum_adreno_pm4_type3_packets.define('CP_WAIT_MEM_WRITES', 18)
CP_COND_REG_EXEC = enum_adreno_pm4_type3_packets.define('CP_COND_REG_EXEC', 71)
CP_MEM_TO_REG = enum_adreno_pm4_type3_packets.define('CP_MEM_TO_REG', 66)
CP_EXEC_CS_INDIRECT = enum_adreno_pm4_type3_packets.define('CP_EXEC_CS_INDIRECT', 65)
CP_EXEC_CS = enum_adreno_pm4_type3_packets.define('CP_EXEC_CS', 51)
CP_PERFCOUNTER_ACTION = enum_adreno_pm4_type3_packets.define('CP_PERFCOUNTER_ACTION', 80)
CP_SMMU_TABLE_UPDATE = enum_adreno_pm4_type3_packets.define('CP_SMMU_TABLE_UPDATE', 83)
CP_SET_MARKER = enum_adreno_pm4_type3_packets.define('CP_SET_MARKER', 101)
CP_SET_PSEUDO_REG = enum_adreno_pm4_type3_packets.define('CP_SET_PSEUDO_REG', 86)
CP_CONTEXT_REG_BUNCH = enum_adreno_pm4_type3_packets.define('CP_CONTEXT_REG_BUNCH', 92)
CP_YIELD_ENABLE = enum_adreno_pm4_type3_packets.define('CP_YIELD_ENABLE', 28)
CP_SKIP_IB2_ENABLE_GLOBAL = enum_adreno_pm4_type3_packets.define('CP_SKIP_IB2_ENABLE_GLOBAL', 29)
CP_SKIP_IB2_ENABLE_LOCAL = enum_adreno_pm4_type3_packets.define('CP_SKIP_IB2_ENABLE_LOCAL', 35)
CP_SET_SUBDRAW_SIZE = enum_adreno_pm4_type3_packets.define('CP_SET_SUBDRAW_SIZE', 53)
CP_WHERE_AM_I = enum_adreno_pm4_type3_packets.define('CP_WHERE_AM_I', 98)
CP_SET_VISIBILITY_OVERRIDE = enum_adreno_pm4_type3_packets.define('CP_SET_VISIBILITY_OVERRIDE', 100)
CP_PREEMPT_ENABLE_GLOBAL = enum_adreno_pm4_type3_packets.define('CP_PREEMPT_ENABLE_GLOBAL', 105)
CP_PREEMPT_ENABLE_LOCAL = enum_adreno_pm4_type3_packets.define('CP_PREEMPT_ENABLE_LOCAL', 106)
CP_CONTEXT_SWITCH_YIELD = enum_adreno_pm4_type3_packets.define('CP_CONTEXT_SWITCH_YIELD', 107)
CP_SET_RENDER_MODE = enum_adreno_pm4_type3_packets.define('CP_SET_RENDER_MODE', 108)
CP_COMPUTE_CHECKPOINT = enum_adreno_pm4_type3_packets.define('CP_COMPUTE_CHECKPOINT', 110)
CP_MEM_TO_MEM = enum_adreno_pm4_type3_packets.define('CP_MEM_TO_MEM', 115)
CP_BLIT = enum_adreno_pm4_type3_packets.define('CP_BLIT', 44)
CP_REG_TEST = enum_adreno_pm4_type3_packets.define('CP_REG_TEST', 57)
CP_SET_MODE = enum_adreno_pm4_type3_packets.define('CP_SET_MODE', 99)
CP_LOAD_STATE6_GEOM = enum_adreno_pm4_type3_packets.define('CP_LOAD_STATE6_GEOM', 50)
CP_LOAD_STATE6_FRAG = enum_adreno_pm4_type3_packets.define('CP_LOAD_STATE6_FRAG', 52)
CP_LOAD_STATE6 = enum_adreno_pm4_type3_packets.define('CP_LOAD_STATE6', 54)
IN_IB_PREFETCH_END = enum_adreno_pm4_type3_packets.define('IN_IB_PREFETCH_END', 23)
IN_SUBBLK_PREFETCH = enum_adreno_pm4_type3_packets.define('IN_SUBBLK_PREFETCH', 31)
IN_INSTR_PREFETCH = enum_adreno_pm4_type3_packets.define('IN_INSTR_PREFETCH', 32)
IN_INSTR_MATCH = enum_adreno_pm4_type3_packets.define('IN_INSTR_MATCH', 71)
IN_CONST_PREFETCH = enum_adreno_pm4_type3_packets.define('IN_CONST_PREFETCH', 73)
IN_INCR_UPDT_STATE = enum_adreno_pm4_type3_packets.define('IN_INCR_UPDT_STATE', 85)
IN_INCR_UPDT_CONST = enum_adreno_pm4_type3_packets.define('IN_INCR_UPDT_CONST', 86)
IN_INCR_UPDT_INSTR = enum_adreno_pm4_type3_packets.define('IN_INCR_UPDT_INSTR', 87)
PKT4 = enum_adreno_pm4_type3_packets.define('PKT4', 4)
IN_IB_END = enum_adreno_pm4_type3_packets.define('IN_IB_END', 10)
IN_GMU_INTERRUPT = enum_adreno_pm4_type3_packets.define('IN_GMU_INTERRUPT', 11)
IN_PREEMPT = enum_adreno_pm4_type3_packets.define('IN_PREEMPT', 15)
CP_SCRATCH_WRITE = enum_adreno_pm4_type3_packets.define('CP_SCRATCH_WRITE', 76)
CP_REG_TO_MEM_OFFSET_MEM = enum_adreno_pm4_type3_packets.define('CP_REG_TO_MEM_OFFSET_MEM', 116)
CP_REG_TO_MEM_OFFSET_REG = enum_adreno_pm4_type3_packets.define('CP_REG_TO_MEM_OFFSET_REG', 114)
CP_WAIT_MEM_GTE = enum_adreno_pm4_type3_packets.define('CP_WAIT_MEM_GTE', 20)
CP_WAIT_TWO_REGS = enum_adreno_pm4_type3_packets.define('CP_WAIT_TWO_REGS', 112)
CP_MEMCPY = enum_adreno_pm4_type3_packets.define('CP_MEMCPY', 117)
CP_SET_BIN_DATA5_OFFSET = enum_adreno_pm4_type3_packets.define('CP_SET_BIN_DATA5_OFFSET', 46)
CP_SET_UNK_BIN_DATA = enum_adreno_pm4_type3_packets.define('CP_SET_UNK_BIN_DATA', 45)
CP_CONTEXT_SWITCH = enum_adreno_pm4_type3_packets.define('CP_CONTEXT_SWITCH', 84)
CP_SET_AMBLE = enum_adreno_pm4_type3_packets.define('CP_SET_AMBLE', 85)
CP_REG_WRITE = enum_adreno_pm4_type3_packets.define('CP_REG_WRITE', 109)
CP_START_BIN = enum_adreno_pm4_type3_packets.define('CP_START_BIN', 80)
CP_END_BIN = enum_adreno_pm4_type3_packets.define('CP_END_BIN', 81)
CP_PREEMPT_DISABLE = enum_adreno_pm4_type3_packets.define('CP_PREEMPT_DISABLE', 108)
CP_WAIT_TIMESTAMP = enum_adreno_pm4_type3_packets.define('CP_WAIT_TIMESTAMP', 20)
CP_GLOBAL_TIMESTAMP = enum_adreno_pm4_type3_packets.define('CP_GLOBAL_TIMESTAMP', 21)
CP_LOCAL_TIMESTAMP = enum_adreno_pm4_type3_packets.define('CP_LOCAL_TIMESTAMP', 22)
CP_THREAD_CONTROL = enum_adreno_pm4_type3_packets.define('CP_THREAD_CONTROL', 23)
CP_RESOURCE_LIST = enum_adreno_pm4_type3_packets.define('CP_RESOURCE_LIST', 24)
CP_BV_BR_COUNT_OPS = enum_adreno_pm4_type3_packets.define('CP_BV_BR_COUNT_OPS', 27)
CP_MODIFY_TIMESTAMP = enum_adreno_pm4_type3_packets.define('CP_MODIFY_TIMESTAMP', 28)
CP_CONTEXT_REG_BUNCH2 = enum_adreno_pm4_type3_packets.define('CP_CONTEXT_REG_BUNCH2', 93)
CP_MEM_TO_SCRATCH_MEM = enum_adreno_pm4_type3_packets.define('CP_MEM_TO_SCRATCH_MEM', 73)
CP_FIXED_STRIDE_DRAW_TABLE = enum_adreno_pm4_type3_packets.define('CP_FIXED_STRIDE_DRAW_TABLE', 127)
CP_RESET_CONTEXT_STATE = enum_adreno_pm4_type3_packets.define('CP_RESET_CONTEXT_STATE', 31)
CP_CCHE_INVALIDATE = enum_adreno_pm4_type3_packets.define('CP_CCHE_INVALIDATE', 58)
CP_SCOPE_CNTL = enum_adreno_pm4_type3_packets.define('CP_SCOPE_CNTL', 108)
class enum_adreno_state_block(Annotated[int, ctypes.c_uint32], c.Enum): pass
SB_VERT_TEX = enum_adreno_state_block.define('SB_VERT_TEX', 0)
SB_VERT_MIPADDR = enum_adreno_state_block.define('SB_VERT_MIPADDR', 1)
SB_FRAG_TEX = enum_adreno_state_block.define('SB_FRAG_TEX', 2)
SB_FRAG_MIPADDR = enum_adreno_state_block.define('SB_FRAG_MIPADDR', 3)
SB_VERT_SHADER = enum_adreno_state_block.define('SB_VERT_SHADER', 4)
SB_GEOM_SHADER = enum_adreno_state_block.define('SB_GEOM_SHADER', 5)
SB_FRAG_SHADER = enum_adreno_state_block.define('SB_FRAG_SHADER', 6)
SB_COMPUTE_SHADER = enum_adreno_state_block.define('SB_COMPUTE_SHADER', 7)
class enum_adreno_state_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
ST_SHADER = enum_adreno_state_type.define('ST_SHADER', 0)
ST_CONSTANTS = enum_adreno_state_type.define('ST_CONSTANTS', 1)
class enum_adreno_state_src(Annotated[int, ctypes.c_uint32], c.Enum): pass
SS_DIRECT = enum_adreno_state_src.define('SS_DIRECT', 0)
SS_INVALID_ALL_IC = enum_adreno_state_src.define('SS_INVALID_ALL_IC', 2)
SS_INVALID_PART_IC = enum_adreno_state_src.define('SS_INVALID_PART_IC', 3)
SS_INDIRECT = enum_adreno_state_src.define('SS_INDIRECT', 4)
SS_INDIRECT_TCM = enum_adreno_state_src.define('SS_INDIRECT_TCM', 5)
SS_INDIRECT_STM = enum_adreno_state_src.define('SS_INDIRECT_STM', 6)
class enum_a4xx_state_block(Annotated[int, ctypes.c_uint32], c.Enum): pass
SB4_VS_TEX = enum_a4xx_state_block.define('SB4_VS_TEX', 0)
SB4_HS_TEX = enum_a4xx_state_block.define('SB4_HS_TEX', 1)
SB4_DS_TEX = enum_a4xx_state_block.define('SB4_DS_TEX', 2)
SB4_GS_TEX = enum_a4xx_state_block.define('SB4_GS_TEX', 3)
SB4_FS_TEX = enum_a4xx_state_block.define('SB4_FS_TEX', 4)
SB4_CS_TEX = enum_a4xx_state_block.define('SB4_CS_TEX', 5)
SB4_VS_SHADER = enum_a4xx_state_block.define('SB4_VS_SHADER', 8)
SB4_HS_SHADER = enum_a4xx_state_block.define('SB4_HS_SHADER', 9)
SB4_DS_SHADER = enum_a4xx_state_block.define('SB4_DS_SHADER', 10)
SB4_GS_SHADER = enum_a4xx_state_block.define('SB4_GS_SHADER', 11)
SB4_FS_SHADER = enum_a4xx_state_block.define('SB4_FS_SHADER', 12)
SB4_CS_SHADER = enum_a4xx_state_block.define('SB4_CS_SHADER', 13)
SB4_SSBO = enum_a4xx_state_block.define('SB4_SSBO', 14)
SB4_CS_SSBO = enum_a4xx_state_block.define('SB4_CS_SSBO', 15)
class enum_a4xx_state_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
ST4_SHADER = enum_a4xx_state_type.define('ST4_SHADER', 0)
ST4_CONSTANTS = enum_a4xx_state_type.define('ST4_CONSTANTS', 1)
ST4_UBO = enum_a4xx_state_type.define('ST4_UBO', 2)
class enum_a4xx_state_src(Annotated[int, ctypes.c_uint32], c.Enum): pass
SS4_DIRECT = enum_a4xx_state_src.define('SS4_DIRECT', 0)
SS4_INDIRECT = enum_a4xx_state_src.define('SS4_INDIRECT', 2)
class enum_a6xx_state_block(Annotated[int, ctypes.c_uint32], c.Enum): pass
SB6_VS_TEX = enum_a6xx_state_block.define('SB6_VS_TEX', 0)
SB6_HS_TEX = enum_a6xx_state_block.define('SB6_HS_TEX', 1)
SB6_DS_TEX = enum_a6xx_state_block.define('SB6_DS_TEX', 2)
SB6_GS_TEX = enum_a6xx_state_block.define('SB6_GS_TEX', 3)
SB6_FS_TEX = enum_a6xx_state_block.define('SB6_FS_TEX', 4)
SB6_CS_TEX = enum_a6xx_state_block.define('SB6_CS_TEX', 5)
SB6_VS_SHADER = enum_a6xx_state_block.define('SB6_VS_SHADER', 8)
SB6_HS_SHADER = enum_a6xx_state_block.define('SB6_HS_SHADER', 9)
SB6_DS_SHADER = enum_a6xx_state_block.define('SB6_DS_SHADER', 10)
SB6_GS_SHADER = enum_a6xx_state_block.define('SB6_GS_SHADER', 11)
SB6_FS_SHADER = enum_a6xx_state_block.define('SB6_FS_SHADER', 12)
SB6_CS_SHADER = enum_a6xx_state_block.define('SB6_CS_SHADER', 13)
SB6_UAV = enum_a6xx_state_block.define('SB6_UAV', 14)
SB6_CS_UAV = enum_a6xx_state_block.define('SB6_CS_UAV', 15)
class enum_a6xx_state_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
ST6_SHADER = enum_a6xx_state_type.define('ST6_SHADER', 0)
ST6_CONSTANTS = enum_a6xx_state_type.define('ST6_CONSTANTS', 1)
ST6_UBO = enum_a6xx_state_type.define('ST6_UBO', 2)
ST6_UAV = enum_a6xx_state_type.define('ST6_UAV', 3)
class enum_a6xx_state_src(Annotated[int, ctypes.c_uint32], c.Enum): pass
SS6_DIRECT = enum_a6xx_state_src.define('SS6_DIRECT', 0)
SS6_BINDLESS = enum_a6xx_state_src.define('SS6_BINDLESS', 1)
SS6_INDIRECT = enum_a6xx_state_src.define('SS6_INDIRECT', 2)
SS6_UBO = enum_a6xx_state_src.define('SS6_UBO', 3)
class enum_a4xx_index_size(Annotated[int, ctypes.c_uint32], c.Enum): pass
INDEX4_SIZE_8_BIT = enum_a4xx_index_size.define('INDEX4_SIZE_8_BIT', 0)
INDEX4_SIZE_16_BIT = enum_a4xx_index_size.define('INDEX4_SIZE_16_BIT', 1)
INDEX4_SIZE_32_BIT = enum_a4xx_index_size.define('INDEX4_SIZE_32_BIT', 2)
class enum_a6xx_patch_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
TESS_QUADS = enum_a6xx_patch_type.define('TESS_QUADS', 0)
TESS_TRIANGLES = enum_a6xx_patch_type.define('TESS_TRIANGLES', 1)
TESS_ISOLINES = enum_a6xx_patch_type.define('TESS_ISOLINES', 2)
class enum_a6xx_draw_indirect_opcode(Annotated[int, ctypes.c_uint32], c.Enum): pass
INDIRECT_OP_NORMAL = enum_a6xx_draw_indirect_opcode.define('INDIRECT_OP_NORMAL', 2)
INDIRECT_OP_INDEXED = enum_a6xx_draw_indirect_opcode.define('INDIRECT_OP_INDEXED', 4)
INDIRECT_OP_INDIRECT_COUNT = enum_a6xx_draw_indirect_opcode.define('INDIRECT_OP_INDIRECT_COUNT', 6)
INDIRECT_OP_INDIRECT_COUNT_INDEXED = enum_a6xx_draw_indirect_opcode.define('INDIRECT_OP_INDIRECT_COUNT_INDEXED', 7)
class enum_cp_draw_pred_src(Annotated[int, ctypes.c_uint32], c.Enum): pass
PRED_SRC_MEM = enum_cp_draw_pred_src.define('PRED_SRC_MEM', 5)
class enum_cp_draw_pred_test(Annotated[int, ctypes.c_uint32], c.Enum): pass
NE_0_PASS = enum_cp_draw_pred_test.define('NE_0_PASS', 0)
EQ_0_PASS = enum_cp_draw_pred_test.define('EQ_0_PASS', 1)
class enum_a7xx_abs_mask_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
ABS_MASK = enum_a7xx_abs_mask_mode.define('ABS_MASK', 1)
NO_ABS_MASK = enum_a7xx_abs_mask_mode.define('NO_ABS_MASK', 0)
class enum_cp_cond_function(Annotated[int, ctypes.c_uint32], c.Enum): pass
WRITE_ALWAYS = enum_cp_cond_function.define('WRITE_ALWAYS', 0)
WRITE_LT = enum_cp_cond_function.define('WRITE_LT', 1)
WRITE_LE = enum_cp_cond_function.define('WRITE_LE', 2)
WRITE_EQ = enum_cp_cond_function.define('WRITE_EQ', 3)
WRITE_NE = enum_cp_cond_function.define('WRITE_NE', 4)
WRITE_GE = enum_cp_cond_function.define('WRITE_GE', 5)
WRITE_GT = enum_cp_cond_function.define('WRITE_GT', 6)
class enum_poll_memory_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
POLL_REGISTER = enum_poll_memory_type.define('POLL_REGISTER', 0)
POLL_MEMORY = enum_poll_memory_type.define('POLL_MEMORY', 1)
POLL_SCRATCH = enum_poll_memory_type.define('POLL_SCRATCH', 2)
POLL_ON_CHIP = enum_poll_memory_type.define('POLL_ON_CHIP', 3)
class enum_render_mode_cmd(Annotated[int, ctypes.c_uint32], c.Enum): pass
BYPASS = enum_render_mode_cmd.define('BYPASS', 1)
BINNING = enum_render_mode_cmd.define('BINNING', 2)
GMEM = enum_render_mode_cmd.define('GMEM', 3)
BLIT2D = enum_render_mode_cmd.define('BLIT2D', 5)
BLIT2DSCALE = enum_render_mode_cmd.define('BLIT2DSCALE', 7)
END2D = enum_render_mode_cmd.define('END2D', 8)
class enum_event_write_src(Annotated[int, ctypes.c_uint32], c.Enum): pass
EV_WRITE_USER_32B = enum_event_write_src.define('EV_WRITE_USER_32B', 0)
EV_WRITE_USER_64B = enum_event_write_src.define('EV_WRITE_USER_64B', 1)
EV_WRITE_TIMESTAMP_SUM = enum_event_write_src.define('EV_WRITE_TIMESTAMP_SUM', 2)
EV_WRITE_ALWAYSON = enum_event_write_src.define('EV_WRITE_ALWAYSON', 3)
EV_WRITE_REGS_CONTENT = enum_event_write_src.define('EV_WRITE_REGS_CONTENT', 4)
class enum_event_write_dst(Annotated[int, ctypes.c_uint32], c.Enum): pass
EV_DST_RAM = enum_event_write_dst.define('EV_DST_RAM', 0)
EV_DST_ONCHIP = enum_event_write_dst.define('EV_DST_ONCHIP', 1)
class enum_cp_blit_cmd(Annotated[int, ctypes.c_uint32], c.Enum): pass
BLIT_OP_FILL = enum_cp_blit_cmd.define('BLIT_OP_FILL', 0)
BLIT_OP_COPY = enum_cp_blit_cmd.define('BLIT_OP_COPY', 1)
BLIT_OP_SCALE = enum_cp_blit_cmd.define('BLIT_OP_SCALE', 3)
class enum_set_marker_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
SET_RENDER_MODE = enum_set_marker_mode.define('SET_RENDER_MODE', 0)
SET_IFPC_MODE = enum_set_marker_mode.define('SET_IFPC_MODE', 1)
class enum_a6xx_ifpc_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
IFPC_ENABLE = enum_a6xx_ifpc_mode.define('IFPC_ENABLE', 0)
IFPC_DISABLE = enum_a6xx_ifpc_mode.define('IFPC_DISABLE', 1)
class enum_a6xx_marker(Annotated[int, ctypes.c_uint32], c.Enum): pass
RM6_DIRECT_RENDER = enum_a6xx_marker.define('RM6_DIRECT_RENDER', 1)
RM6_BIN_VISIBILITY = enum_a6xx_marker.define('RM6_BIN_VISIBILITY', 2)
RM6_BIN_DIRECT = enum_a6xx_marker.define('RM6_BIN_DIRECT', 3)
RM6_BIN_RENDER_START = enum_a6xx_marker.define('RM6_BIN_RENDER_START', 4)
RM6_BIN_END_OF_DRAWS = enum_a6xx_marker.define('RM6_BIN_END_OF_DRAWS', 5)
RM6_BIN_RESOLVE = enum_a6xx_marker.define('RM6_BIN_RESOLVE', 6)
RM6_BIN_RENDER_END = enum_a6xx_marker.define('RM6_BIN_RENDER_END', 7)
RM6_COMPUTE = enum_a6xx_marker.define('RM6_COMPUTE', 8)
RM6_BLIT2DSCALE = enum_a6xx_marker.define('RM6_BLIT2DSCALE', 12)
RM6_IB1LIST_START = enum_a6xx_marker.define('RM6_IB1LIST_START', 13)
RM6_IB1LIST_END = enum_a6xx_marker.define('RM6_IB1LIST_END', 14)
class enum_pseudo_reg(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMMU_INFO = enum_pseudo_reg.define('SMMU_INFO', 0)
NON_SECURE_SAVE_ADDR = enum_pseudo_reg.define('NON_SECURE_SAVE_ADDR', 1)
SECURE_SAVE_ADDR = enum_pseudo_reg.define('SECURE_SAVE_ADDR', 2)
NON_PRIV_SAVE_ADDR = enum_pseudo_reg.define('NON_PRIV_SAVE_ADDR', 3)
COUNTER = enum_pseudo_reg.define('COUNTER', 4)
VSC_PIPE_DATA_DRAW_BASE = enum_pseudo_reg.define('VSC_PIPE_DATA_DRAW_BASE', 8)
VSC_SIZE_BASE = enum_pseudo_reg.define('VSC_SIZE_BASE', 9)
VSC_PIPE_DATA_PRIM_BASE = enum_pseudo_reg.define('VSC_PIPE_DATA_PRIM_BASE', 10)
UNK_STRM_ADDRESS = enum_pseudo_reg.define('UNK_STRM_ADDRESS', 11)
UNK_STRM_SIZE_ADDRESS = enum_pseudo_reg.define('UNK_STRM_SIZE_ADDRESS', 12)
BINDLESS_BASE_0_ADDR = enum_pseudo_reg.define('BINDLESS_BASE_0_ADDR', 16)
BINDLESS_BASE_1_ADDR = enum_pseudo_reg.define('BINDLESS_BASE_1_ADDR', 17)
BINDLESS_BASE_2_ADDR = enum_pseudo_reg.define('BINDLESS_BASE_2_ADDR', 18)
BINDLESS_BASE_3_ADDR = enum_pseudo_reg.define('BINDLESS_BASE_3_ADDR', 19)
BINDLESS_BASE_4_ADDR = enum_pseudo_reg.define('BINDLESS_BASE_4_ADDR', 20)
BINDLESS_BASE_5_ADDR = enum_pseudo_reg.define('BINDLESS_BASE_5_ADDR', 21)
BINDLESS_BASE_6_ADDR = enum_pseudo_reg.define('BINDLESS_BASE_6_ADDR', 22)
class enum_source_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
SOURCE_REG = enum_source_type.define('SOURCE_REG', 0)
SOURCE_SCRATCH_MEM = enum_source_type.define('SOURCE_SCRATCH_MEM', 1)
class enum_compare_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
PRED_TEST = enum_compare_mode.define('PRED_TEST', 1)
REG_COMPARE = enum_compare_mode.define('REG_COMPARE', 2)
RENDER_MODE = enum_compare_mode.define('RENDER_MODE', 3)
REG_COMPARE_IMM = enum_compare_mode.define('REG_COMPARE_IMM', 4)
THREAD_MODE = enum_compare_mode.define('THREAD_MODE', 5)
class enum_amble_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
PREAMBLE_AMBLE_TYPE = enum_amble_type.define('PREAMBLE_AMBLE_TYPE', 0)
BIN_PREAMBLE_AMBLE_TYPE = enum_amble_type.define('BIN_PREAMBLE_AMBLE_TYPE', 1)
POSTAMBLE_AMBLE_TYPE = enum_amble_type.define('POSTAMBLE_AMBLE_TYPE', 2)
KMD_AMBLE_TYPE = enum_amble_type.define('KMD_AMBLE_TYPE', 3)
class enum_reg_tracker(Annotated[int, ctypes.c_uint32], c.Enum): pass
TRACK_CNTL_REG = enum_reg_tracker.define('TRACK_CNTL_REG', 1)
TRACK_RENDER_CNTL = enum_reg_tracker.define('TRACK_RENDER_CNTL', 2)
UNK_EVENT_WRITE = enum_reg_tracker.define('UNK_EVENT_WRITE', 4)
TRACK_LRZ = enum_reg_tracker.define('TRACK_LRZ', 8)
class enum_ts_wait_value_src(Annotated[int, ctypes.c_uint32], c.Enum): pass
TS_WAIT_GE_32B = enum_ts_wait_value_src.define('TS_WAIT_GE_32B', 0)
TS_WAIT_GE_64B = enum_ts_wait_value_src.define('TS_WAIT_GE_64B', 1)
TS_WAIT_GE_TIMESTAMP_SUM = enum_ts_wait_value_src.define('TS_WAIT_GE_TIMESTAMP_SUM', 2)
class enum_ts_wait_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
TS_WAIT_RAM = enum_ts_wait_type.define('TS_WAIT_RAM', 0)
TS_WAIT_ONCHIP = enum_ts_wait_type.define('TS_WAIT_ONCHIP', 1)
class enum_pipe_count_op(Annotated[int, ctypes.c_uint32], c.Enum): pass
PIPE_CLEAR_BV_BR = enum_pipe_count_op.define('PIPE_CLEAR_BV_BR', 1)
PIPE_SET_BR_OFFSET = enum_pipe_count_op.define('PIPE_SET_BR_OFFSET', 2)
PIPE_BR_WAIT_FOR_BV = enum_pipe_count_op.define('PIPE_BR_WAIT_FOR_BV', 3)
PIPE_BV_WAIT_FOR_BR = enum_pipe_count_op.define('PIPE_BV_WAIT_FOR_BR', 4)
class enum_timestamp_op(Annotated[int, ctypes.c_uint32], c.Enum): pass
MODIFY_TIMESTAMP_CLEAR = enum_timestamp_op.define('MODIFY_TIMESTAMP_CLEAR', 0)
MODIFY_TIMESTAMP_ADD_GLOBAL = enum_timestamp_op.define('MODIFY_TIMESTAMP_ADD_GLOBAL', 1)
MODIFY_TIMESTAMP_ADD_LOCAL = enum_timestamp_op.define('MODIFY_TIMESTAMP_ADD_LOCAL', 2)
class enum_cp_thread(Annotated[int, ctypes.c_uint32], c.Enum): pass
CP_SET_THREAD_BR = enum_cp_thread.define('CP_SET_THREAD_BR', 1)
CP_SET_THREAD_BV = enum_cp_thread.define('CP_SET_THREAD_BV', 2)
CP_SET_THREAD_BOTH = enum_cp_thread.define('CP_SET_THREAD_BOTH', 3)
class enum_cp_scope(Annotated[int, ctypes.c_uint32], c.Enum): pass
INTERRUPTS = enum_cp_scope.define('INTERRUPTS', 0)
class enum_a6xx_tile_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
TILE6_LINEAR = enum_a6xx_tile_mode.define('TILE6_LINEAR', 0)
TILE6_2 = enum_a6xx_tile_mode.define('TILE6_2', 2)
TILE6_3 = enum_a6xx_tile_mode.define('TILE6_3', 3)
class enum_a6xx_format(Annotated[int, ctypes.c_uint32], c.Enum): pass
FMT6_A8_UNORM = enum_a6xx_format.define('FMT6_A8_UNORM', 2)
FMT6_8_UNORM = enum_a6xx_format.define('FMT6_8_UNORM', 3)
FMT6_8_SNORM = enum_a6xx_format.define('FMT6_8_SNORM', 4)
FMT6_8_UINT = enum_a6xx_format.define('FMT6_8_UINT', 5)
FMT6_8_SINT = enum_a6xx_format.define('FMT6_8_SINT', 6)
FMT6_4_4_4_4_UNORM = enum_a6xx_format.define('FMT6_4_4_4_4_UNORM', 8)
FMT6_5_5_5_1_UNORM = enum_a6xx_format.define('FMT6_5_5_5_1_UNORM', 10)
FMT6_1_5_5_5_UNORM = enum_a6xx_format.define('FMT6_1_5_5_5_UNORM', 12)
FMT6_5_6_5_UNORM = enum_a6xx_format.define('FMT6_5_6_5_UNORM', 14)
FMT6_8_8_UNORM = enum_a6xx_format.define('FMT6_8_8_UNORM', 15)
FMT6_8_8_SNORM = enum_a6xx_format.define('FMT6_8_8_SNORM', 16)
FMT6_8_8_UINT = enum_a6xx_format.define('FMT6_8_8_UINT', 17)
FMT6_8_8_SINT = enum_a6xx_format.define('FMT6_8_8_SINT', 18)
FMT6_L8_A8_UNORM = enum_a6xx_format.define('FMT6_L8_A8_UNORM', 19)
FMT6_16_UNORM = enum_a6xx_format.define('FMT6_16_UNORM', 21)
FMT6_16_SNORM = enum_a6xx_format.define('FMT6_16_SNORM', 22)
FMT6_16_FLOAT = enum_a6xx_format.define('FMT6_16_FLOAT', 23)
FMT6_16_UINT = enum_a6xx_format.define('FMT6_16_UINT', 24)
FMT6_16_SINT = enum_a6xx_format.define('FMT6_16_SINT', 25)
FMT6_8_8_8_UNORM = enum_a6xx_format.define('FMT6_8_8_8_UNORM', 33)
FMT6_8_8_8_SNORM = enum_a6xx_format.define('FMT6_8_8_8_SNORM', 34)
FMT6_8_8_8_UINT = enum_a6xx_format.define('FMT6_8_8_8_UINT', 35)
FMT6_8_8_8_SINT = enum_a6xx_format.define('FMT6_8_8_8_SINT', 36)
FMT6_8_8_8_8_UNORM = enum_a6xx_format.define('FMT6_8_8_8_8_UNORM', 48)
FMT6_8_8_8_X8_UNORM = enum_a6xx_format.define('FMT6_8_8_8_X8_UNORM', 49)
FMT6_8_8_8_8_SNORM = enum_a6xx_format.define('FMT6_8_8_8_8_SNORM', 50)
FMT6_8_8_8_8_UINT = enum_a6xx_format.define('FMT6_8_8_8_8_UINT', 51)
FMT6_8_8_8_8_SINT = enum_a6xx_format.define('FMT6_8_8_8_8_SINT', 52)
FMT6_9_9_9_E5_FLOAT = enum_a6xx_format.define('FMT6_9_9_9_E5_FLOAT', 53)
FMT6_10_10_10_2_UNORM = enum_a6xx_format.define('FMT6_10_10_10_2_UNORM', 54)
FMT6_10_10_10_2_UNORM_DEST = enum_a6xx_format.define('FMT6_10_10_10_2_UNORM_DEST', 55)
FMT6_10_10_10_2_SNORM = enum_a6xx_format.define('FMT6_10_10_10_2_SNORM', 57)
FMT6_10_10_10_2_UINT = enum_a6xx_format.define('FMT6_10_10_10_2_UINT', 58)
FMT6_10_10_10_2_SINT = enum_a6xx_format.define('FMT6_10_10_10_2_SINT', 59)
FMT6_11_11_10_FLOAT = enum_a6xx_format.define('FMT6_11_11_10_FLOAT', 66)
FMT6_16_16_UNORM = enum_a6xx_format.define('FMT6_16_16_UNORM', 67)
FMT6_16_16_SNORM = enum_a6xx_format.define('FMT6_16_16_SNORM', 68)
FMT6_16_16_FLOAT = enum_a6xx_format.define('FMT6_16_16_FLOAT', 69)
FMT6_16_16_UINT = enum_a6xx_format.define('FMT6_16_16_UINT', 70)
FMT6_16_16_SINT = enum_a6xx_format.define('FMT6_16_16_SINT', 71)
FMT6_32_UNORM = enum_a6xx_format.define('FMT6_32_UNORM', 72)
FMT6_32_SNORM = enum_a6xx_format.define('FMT6_32_SNORM', 73)
FMT6_32_FLOAT = enum_a6xx_format.define('FMT6_32_FLOAT', 74)
FMT6_32_UINT = enum_a6xx_format.define('FMT6_32_UINT', 75)
FMT6_32_SINT = enum_a6xx_format.define('FMT6_32_SINT', 76)
FMT6_32_FIXED = enum_a6xx_format.define('FMT6_32_FIXED', 77)
FMT6_16_16_16_UNORM = enum_a6xx_format.define('FMT6_16_16_16_UNORM', 88)
FMT6_16_16_16_SNORM = enum_a6xx_format.define('FMT6_16_16_16_SNORM', 89)
FMT6_16_16_16_FLOAT = enum_a6xx_format.define('FMT6_16_16_16_FLOAT', 90)
FMT6_16_16_16_UINT = enum_a6xx_format.define('FMT6_16_16_16_UINT', 91)
FMT6_16_16_16_SINT = enum_a6xx_format.define('FMT6_16_16_16_SINT', 92)
FMT6_16_16_16_16_UNORM = enum_a6xx_format.define('FMT6_16_16_16_16_UNORM', 96)
FMT6_16_16_16_16_SNORM = enum_a6xx_format.define('FMT6_16_16_16_16_SNORM', 97)
FMT6_16_16_16_16_FLOAT = enum_a6xx_format.define('FMT6_16_16_16_16_FLOAT', 98)
FMT6_16_16_16_16_UINT = enum_a6xx_format.define('FMT6_16_16_16_16_UINT', 99)
FMT6_16_16_16_16_SINT = enum_a6xx_format.define('FMT6_16_16_16_16_SINT', 100)
FMT6_32_32_UNORM = enum_a6xx_format.define('FMT6_32_32_UNORM', 101)
FMT6_32_32_SNORM = enum_a6xx_format.define('FMT6_32_32_SNORM', 102)
FMT6_32_32_FLOAT = enum_a6xx_format.define('FMT6_32_32_FLOAT', 103)
FMT6_32_32_UINT = enum_a6xx_format.define('FMT6_32_32_UINT', 104)
FMT6_32_32_SINT = enum_a6xx_format.define('FMT6_32_32_SINT', 105)
FMT6_32_32_FIXED = enum_a6xx_format.define('FMT6_32_32_FIXED', 106)
FMT6_32_32_32_UNORM = enum_a6xx_format.define('FMT6_32_32_32_UNORM', 112)
FMT6_32_32_32_SNORM = enum_a6xx_format.define('FMT6_32_32_32_SNORM', 113)
FMT6_32_32_32_UINT = enum_a6xx_format.define('FMT6_32_32_32_UINT', 114)
FMT6_32_32_32_SINT = enum_a6xx_format.define('FMT6_32_32_32_SINT', 115)
FMT6_32_32_32_FLOAT = enum_a6xx_format.define('FMT6_32_32_32_FLOAT', 116)
FMT6_32_32_32_FIXED = enum_a6xx_format.define('FMT6_32_32_32_FIXED', 117)
FMT6_32_32_32_32_UNORM = enum_a6xx_format.define('FMT6_32_32_32_32_UNORM', 128)
FMT6_32_32_32_32_SNORM = enum_a6xx_format.define('FMT6_32_32_32_32_SNORM', 129)
FMT6_32_32_32_32_FLOAT = enum_a6xx_format.define('FMT6_32_32_32_32_FLOAT', 130)
FMT6_32_32_32_32_UINT = enum_a6xx_format.define('FMT6_32_32_32_32_UINT', 131)
FMT6_32_32_32_32_SINT = enum_a6xx_format.define('FMT6_32_32_32_32_SINT', 132)
FMT6_32_32_32_32_FIXED = enum_a6xx_format.define('FMT6_32_32_32_32_FIXED', 133)
FMT6_G8R8B8R8_422_UNORM = enum_a6xx_format.define('FMT6_G8R8B8R8_422_UNORM', 140)
FMT6_R8G8R8B8_422_UNORM = enum_a6xx_format.define('FMT6_R8G8R8B8_422_UNORM', 141)
FMT6_R8_G8B8_2PLANE_420_UNORM = enum_a6xx_format.define('FMT6_R8_G8B8_2PLANE_420_UNORM', 142)
FMT6_NV21 = enum_a6xx_format.define('FMT6_NV21', 143)
FMT6_R8_G8_B8_3PLANE_420_UNORM = enum_a6xx_format.define('FMT6_R8_G8_B8_3PLANE_420_UNORM', 144)
FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 = enum_a6xx_format.define('FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8', 145)
FMT6_NV12_Y = enum_a6xx_format.define('FMT6_NV12_Y', 148)
FMT6_NV12_UV = enum_a6xx_format.define('FMT6_NV12_UV', 149)
FMT6_NV12_VU = enum_a6xx_format.define('FMT6_NV12_VU', 150)
FMT6_NV12_4R = enum_a6xx_format.define('FMT6_NV12_4R', 151)
FMT6_NV12_4R_Y = enum_a6xx_format.define('FMT6_NV12_4R_Y', 152)
FMT6_NV12_4R_UV = enum_a6xx_format.define('FMT6_NV12_4R_UV', 153)
FMT6_P010 = enum_a6xx_format.define('FMT6_P010', 154)
FMT6_P010_Y = enum_a6xx_format.define('FMT6_P010_Y', 155)
FMT6_P010_UV = enum_a6xx_format.define('FMT6_P010_UV', 156)
FMT6_TP10 = enum_a6xx_format.define('FMT6_TP10', 157)
FMT6_TP10_Y = enum_a6xx_format.define('FMT6_TP10_Y', 158)
FMT6_TP10_UV = enum_a6xx_format.define('FMT6_TP10_UV', 159)
FMT6_Z24_UNORM_S8_UINT = enum_a6xx_format.define('FMT6_Z24_UNORM_S8_UINT', 160)
FMT6_ETC2_RG11_UNORM = enum_a6xx_format.define('FMT6_ETC2_RG11_UNORM', 171)
FMT6_ETC2_RG11_SNORM = enum_a6xx_format.define('FMT6_ETC2_RG11_SNORM', 172)
FMT6_ETC2_R11_UNORM = enum_a6xx_format.define('FMT6_ETC2_R11_UNORM', 173)
FMT6_ETC2_R11_SNORM = enum_a6xx_format.define('FMT6_ETC2_R11_SNORM', 174)
FMT6_ETC1 = enum_a6xx_format.define('FMT6_ETC1', 175)
FMT6_ETC2_RGB8 = enum_a6xx_format.define('FMT6_ETC2_RGB8', 176)
FMT6_ETC2_RGBA8 = enum_a6xx_format.define('FMT6_ETC2_RGBA8', 177)
FMT6_ETC2_RGB8A1 = enum_a6xx_format.define('FMT6_ETC2_RGB8A1', 178)
FMT6_DXT1 = enum_a6xx_format.define('FMT6_DXT1', 179)
FMT6_DXT3 = enum_a6xx_format.define('FMT6_DXT3', 180)
FMT6_DXT5 = enum_a6xx_format.define('FMT6_DXT5', 181)
FMT6_RGTC1_UNORM = enum_a6xx_format.define('FMT6_RGTC1_UNORM', 182)
FMT6_RGTC1_UNORM_FAST = enum_a6xx_format.define('FMT6_RGTC1_UNORM_FAST', 183)
FMT6_RGTC1_SNORM = enum_a6xx_format.define('FMT6_RGTC1_SNORM', 184)
FMT6_RGTC1_SNORM_FAST = enum_a6xx_format.define('FMT6_RGTC1_SNORM_FAST', 185)
FMT6_RGTC2_UNORM = enum_a6xx_format.define('FMT6_RGTC2_UNORM', 186)
FMT6_RGTC2_UNORM_FAST = enum_a6xx_format.define('FMT6_RGTC2_UNORM_FAST', 187)
FMT6_RGTC2_SNORM = enum_a6xx_format.define('FMT6_RGTC2_SNORM', 188)
FMT6_RGTC2_SNORM_FAST = enum_a6xx_format.define('FMT6_RGTC2_SNORM_FAST', 189)
FMT6_BPTC_UFLOAT = enum_a6xx_format.define('FMT6_BPTC_UFLOAT', 190)
FMT6_BPTC_FLOAT = enum_a6xx_format.define('FMT6_BPTC_FLOAT', 191)
FMT6_BPTC = enum_a6xx_format.define('FMT6_BPTC', 192)
FMT6_ASTC_4x4 = enum_a6xx_format.define('FMT6_ASTC_4x4', 193)
FMT6_ASTC_5x4 = enum_a6xx_format.define('FMT6_ASTC_5x4', 194)
FMT6_ASTC_5x5 = enum_a6xx_format.define('FMT6_ASTC_5x5', 195)
FMT6_ASTC_6x5 = enum_a6xx_format.define('FMT6_ASTC_6x5', 196)
FMT6_ASTC_6x6 = enum_a6xx_format.define('FMT6_ASTC_6x6', 197)
FMT6_ASTC_8x5 = enum_a6xx_format.define('FMT6_ASTC_8x5', 198)
FMT6_ASTC_8x6 = enum_a6xx_format.define('FMT6_ASTC_8x6', 199)
FMT6_ASTC_8x8 = enum_a6xx_format.define('FMT6_ASTC_8x8', 200)
FMT6_ASTC_10x5 = enum_a6xx_format.define('FMT6_ASTC_10x5', 201)
FMT6_ASTC_10x6 = enum_a6xx_format.define('FMT6_ASTC_10x6', 202)
FMT6_ASTC_10x8 = enum_a6xx_format.define('FMT6_ASTC_10x8', 203)
FMT6_ASTC_10x10 = enum_a6xx_format.define('FMT6_ASTC_10x10', 204)
FMT6_ASTC_12x10 = enum_a6xx_format.define('FMT6_ASTC_12x10', 205)
FMT6_ASTC_12x12 = enum_a6xx_format.define('FMT6_ASTC_12x12', 206)
FMT6_Z24_UINT_S8_UINT = enum_a6xx_format.define('FMT6_Z24_UINT_S8_UINT', 234)
FMT6_NONE = enum_a6xx_format.define('FMT6_NONE', 255)
class enum_a6xx_polygon_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
POLYMODE6_POINTS = enum_a6xx_polygon_mode.define('POLYMODE6_POINTS', 1)
POLYMODE6_LINES = enum_a6xx_polygon_mode.define('POLYMODE6_LINES', 2)
POLYMODE6_TRIANGLES = enum_a6xx_polygon_mode.define('POLYMODE6_TRIANGLES', 3)
class enum_a6xx_depth_format(Annotated[int, ctypes.c_uint32], c.Enum): pass
DEPTH6_NONE = enum_a6xx_depth_format.define('DEPTH6_NONE', 0)
DEPTH6_16 = enum_a6xx_depth_format.define('DEPTH6_16', 1)
DEPTH6_24_8 = enum_a6xx_depth_format.define('DEPTH6_24_8', 2)
DEPTH6_32 = enum_a6xx_depth_format.define('DEPTH6_32', 4)
class enum_a6xx_shader_id(Annotated[int, ctypes.c_uint32], c.Enum): pass
A6XX_TP0_TMO_DATA = enum_a6xx_shader_id.define('A6XX_TP0_TMO_DATA', 9)
A6XX_TP0_SMO_DATA = enum_a6xx_shader_id.define('A6XX_TP0_SMO_DATA', 10)
A6XX_TP0_MIPMAP_BASE_DATA = enum_a6xx_shader_id.define('A6XX_TP0_MIPMAP_BASE_DATA', 11)
A6XX_TP1_TMO_DATA = enum_a6xx_shader_id.define('A6XX_TP1_TMO_DATA', 25)
A6XX_TP1_SMO_DATA = enum_a6xx_shader_id.define('A6XX_TP1_SMO_DATA', 26)
A6XX_TP1_MIPMAP_BASE_DATA = enum_a6xx_shader_id.define('A6XX_TP1_MIPMAP_BASE_DATA', 27)
A6XX_SP_INST_DATA = enum_a6xx_shader_id.define('A6XX_SP_INST_DATA', 41)
A6XX_SP_LB_0_DATA = enum_a6xx_shader_id.define('A6XX_SP_LB_0_DATA', 42)
A6XX_SP_LB_1_DATA = enum_a6xx_shader_id.define('A6XX_SP_LB_1_DATA', 43)
A6XX_SP_LB_2_DATA = enum_a6xx_shader_id.define('A6XX_SP_LB_2_DATA', 44)
A6XX_SP_LB_3_DATA = enum_a6xx_shader_id.define('A6XX_SP_LB_3_DATA', 45)
A6XX_SP_LB_4_DATA = enum_a6xx_shader_id.define('A6XX_SP_LB_4_DATA', 46)
A6XX_SP_LB_5_DATA = enum_a6xx_shader_id.define('A6XX_SP_LB_5_DATA', 47)
A6XX_SP_CB_BINDLESS_DATA = enum_a6xx_shader_id.define('A6XX_SP_CB_BINDLESS_DATA', 48)
A6XX_SP_CB_LEGACY_DATA = enum_a6xx_shader_id.define('A6XX_SP_CB_LEGACY_DATA', 49)
A6XX_SP_GFX_UAV_BASE_DATA = enum_a6xx_shader_id.define('A6XX_SP_GFX_UAV_BASE_DATA', 50)
A6XX_SP_INST_TAG = enum_a6xx_shader_id.define('A6XX_SP_INST_TAG', 51)
A6XX_SP_CB_BINDLESS_TAG = enum_a6xx_shader_id.define('A6XX_SP_CB_BINDLESS_TAG', 52)
A6XX_SP_TMO_UMO_TAG = enum_a6xx_shader_id.define('A6XX_SP_TMO_UMO_TAG', 53)
A6XX_SP_SMO_TAG = enum_a6xx_shader_id.define('A6XX_SP_SMO_TAG', 54)
A6XX_SP_STATE_DATA = enum_a6xx_shader_id.define('A6XX_SP_STATE_DATA', 55)
A6XX_HLSQ_CHUNK_CVS_RAM = enum_a6xx_shader_id.define('A6XX_HLSQ_CHUNK_CVS_RAM', 73)
A6XX_HLSQ_CHUNK_CPS_RAM = enum_a6xx_shader_id.define('A6XX_HLSQ_CHUNK_CPS_RAM', 74)
A6XX_HLSQ_CHUNK_CVS_RAM_TAG = enum_a6xx_shader_id.define('A6XX_HLSQ_CHUNK_CVS_RAM_TAG', 75)
A6XX_HLSQ_CHUNK_CPS_RAM_TAG = enum_a6xx_shader_id.define('A6XX_HLSQ_CHUNK_CPS_RAM_TAG', 76)
A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = enum_a6xx_shader_id.define('A6XX_HLSQ_ICB_CVS_CB_BASE_TAG', 77)
A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = enum_a6xx_shader_id.define('A6XX_HLSQ_ICB_CPS_CB_BASE_TAG', 78)
A6XX_HLSQ_CVS_MISC_RAM = enum_a6xx_shader_id.define('A6XX_HLSQ_CVS_MISC_RAM', 80)
A6XX_HLSQ_CPS_MISC_RAM = enum_a6xx_shader_id.define('A6XX_HLSQ_CPS_MISC_RAM', 81)
A6XX_HLSQ_INST_RAM = enum_a6xx_shader_id.define('A6XX_HLSQ_INST_RAM', 82)
A6XX_HLSQ_GFX_CVS_CONST_RAM = enum_a6xx_shader_id.define('A6XX_HLSQ_GFX_CVS_CONST_RAM', 83)
A6XX_HLSQ_GFX_CPS_CONST_RAM = enum_a6xx_shader_id.define('A6XX_HLSQ_GFX_CPS_CONST_RAM', 84)
A6XX_HLSQ_CVS_MISC_RAM_TAG = enum_a6xx_shader_id.define('A6XX_HLSQ_CVS_MISC_RAM_TAG', 85)
A6XX_HLSQ_CPS_MISC_RAM_TAG = enum_a6xx_shader_id.define('A6XX_HLSQ_CPS_MISC_RAM_TAG', 86)
A6XX_HLSQ_INST_RAM_TAG = enum_a6xx_shader_id.define('A6XX_HLSQ_INST_RAM_TAG', 87)
A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = enum_a6xx_shader_id.define('A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG', 88)
A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = enum_a6xx_shader_id.define('A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG', 89)
A6XX_HLSQ_PWR_REST_RAM = enum_a6xx_shader_id.define('A6XX_HLSQ_PWR_REST_RAM', 90)
A6XX_HLSQ_PWR_REST_TAG = enum_a6xx_shader_id.define('A6XX_HLSQ_PWR_REST_TAG', 91)
A6XX_HLSQ_DATAPATH_META = enum_a6xx_shader_id.define('A6XX_HLSQ_DATAPATH_META', 96)
A6XX_HLSQ_FRONTEND_META = enum_a6xx_shader_id.define('A6XX_HLSQ_FRONTEND_META', 97)
A6XX_HLSQ_INDIRECT_META = enum_a6xx_shader_id.define('A6XX_HLSQ_INDIRECT_META', 98)
A6XX_HLSQ_BACKEND_META = enum_a6xx_shader_id.define('A6XX_HLSQ_BACKEND_META', 99)
A6XX_SP_LB_6_DATA = enum_a6xx_shader_id.define('A6XX_SP_LB_6_DATA', 112)
A6XX_SP_LB_7_DATA = enum_a6xx_shader_id.define('A6XX_SP_LB_7_DATA', 113)
A6XX_HLSQ_INST_RAM_1 = enum_a6xx_shader_id.define('A6XX_HLSQ_INST_RAM_1', 115)
class enum_a6xx_debugbus_id(Annotated[int, ctypes.c_uint32], c.Enum): pass
A6XX_DBGBUS_CP = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_CP', 1)
A6XX_DBGBUS_RBBM = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_RBBM', 2)
A6XX_DBGBUS_VBIF = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_VBIF', 3)
A6XX_DBGBUS_HLSQ = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_HLSQ', 4)
A6XX_DBGBUS_UCHE = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_UCHE', 5)
A6XX_DBGBUS_DPM = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_DPM', 6)
A6XX_DBGBUS_TESS = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_TESS', 7)
A6XX_DBGBUS_PC = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_PC', 8)
A6XX_DBGBUS_VFDP = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_VFDP', 9)
A6XX_DBGBUS_VPC = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_VPC', 10)
A6XX_DBGBUS_TSE = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_TSE', 11)
A6XX_DBGBUS_RAS = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_RAS', 12)
A6XX_DBGBUS_VSC = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_VSC', 13)
A6XX_DBGBUS_COM = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_COM', 14)
A6XX_DBGBUS_LRZ = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_LRZ', 16)
A6XX_DBGBUS_A2D = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_A2D', 17)
A6XX_DBGBUS_CCUFCHE = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_CCUFCHE', 18)
A6XX_DBGBUS_GMU_CX = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_GMU_CX', 19)
A6XX_DBGBUS_RBP = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_RBP', 20)
A6XX_DBGBUS_DCS = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_DCS', 21)
A6XX_DBGBUS_DBGC = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_DBGC', 22)
A6XX_DBGBUS_CX = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_CX', 23)
A6XX_DBGBUS_GMU_GX = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_GMU_GX', 24)
A6XX_DBGBUS_TPFCHE = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_TPFCHE', 25)
A6XX_DBGBUS_GBIF_GX = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_GBIF_GX', 26)
A6XX_DBGBUS_GPC = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_GPC', 29)
A6XX_DBGBUS_LARC = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_LARC', 30)
A6XX_DBGBUS_HLSQ_SPTP = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_HLSQ_SPTP', 31)
A6XX_DBGBUS_RB_0 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_RB_0', 32)
A6XX_DBGBUS_RB_1 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_RB_1', 33)
A6XX_DBGBUS_RB_2 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_RB_2', 34)
A6XX_DBGBUS_UCHE_WRAPPER = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_UCHE_WRAPPER', 36)
A6XX_DBGBUS_CCU_0 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_CCU_0', 40)
A6XX_DBGBUS_CCU_1 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_CCU_1', 41)
A6XX_DBGBUS_CCU_2 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_CCU_2', 42)
A6XX_DBGBUS_VFD_0 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_VFD_0', 56)
A6XX_DBGBUS_VFD_1 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_VFD_1', 57)
A6XX_DBGBUS_VFD_2 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_VFD_2', 58)
A6XX_DBGBUS_VFD_3 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_VFD_3', 59)
A6XX_DBGBUS_VFD_4 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_VFD_4', 60)
A6XX_DBGBUS_VFD_5 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_VFD_5', 61)
A6XX_DBGBUS_SP_0 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_SP_0', 64)
A6XX_DBGBUS_SP_1 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_SP_1', 65)
A6XX_DBGBUS_SP_2 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_SP_2', 66)
A6XX_DBGBUS_TPL1_0 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_TPL1_0', 72)
A6XX_DBGBUS_TPL1_1 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_TPL1_1', 73)
A6XX_DBGBUS_TPL1_2 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_TPL1_2', 74)
A6XX_DBGBUS_TPL1_3 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_TPL1_3', 75)
A6XX_DBGBUS_TPL1_4 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_TPL1_4', 76)
A6XX_DBGBUS_TPL1_5 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_TPL1_5', 77)
A6XX_DBGBUS_SPTP_0 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_SPTP_0', 88)
A6XX_DBGBUS_SPTP_1 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_SPTP_1', 89)
A6XX_DBGBUS_SPTP_2 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_SPTP_2', 90)
A6XX_DBGBUS_SPTP_3 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_SPTP_3', 91)
A6XX_DBGBUS_SPTP_4 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_SPTP_4', 92)
A6XX_DBGBUS_SPTP_5 = enum_a6xx_debugbus_id.define('A6XX_DBGBUS_SPTP_5', 93)
class enum_a6xx_2d_ifmt(Annotated[int, ctypes.c_uint32], c.Enum): pass
R2D_INT32 = enum_a6xx_2d_ifmt.define('R2D_INT32', 7)
R2D_INT16 = enum_a6xx_2d_ifmt.define('R2D_INT16', 6)
R2D_INT8 = enum_a6xx_2d_ifmt.define('R2D_INT8', 5)
R2D_FLOAT32 = enum_a6xx_2d_ifmt.define('R2D_FLOAT32', 4)
R2D_FLOAT16 = enum_a6xx_2d_ifmt.define('R2D_FLOAT16', 3)
R2D_SNORM8 = enum_a6xx_2d_ifmt.define('R2D_SNORM8', 2)
R2D_UNORM8_SRGB = enum_a6xx_2d_ifmt.define('R2D_UNORM8_SRGB', 1)
R2D_UNORM8 = enum_a6xx_2d_ifmt.define('R2D_UNORM8', 0)
class enum_a6xx_tex_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
A6XX_TEX_1D = enum_a6xx_tex_type.define('A6XX_TEX_1D', 0)
A6XX_TEX_2D = enum_a6xx_tex_type.define('A6XX_TEX_2D', 1)
A6XX_TEX_CUBE = enum_a6xx_tex_type.define('A6XX_TEX_CUBE', 2)
A6XX_TEX_3D = enum_a6xx_tex_type.define('A6XX_TEX_3D', 3)
A6XX_TEX_BUFFER = enum_a6xx_tex_type.define('A6XX_TEX_BUFFER', 4)
A6XX_TEX_IMG_BUFFER = enum_a6xx_tex_type.define('A6XX_TEX_IMG_BUFFER', 5)
class enum_a6xx_ztest_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
A6XX_EARLY_Z = enum_a6xx_ztest_mode.define('A6XX_EARLY_Z', 0)
A6XX_LATE_Z = enum_a6xx_ztest_mode.define('A6XX_LATE_Z', 1)
A6XX_EARLY_Z_LATE_Z = enum_a6xx_ztest_mode.define('A6XX_EARLY_Z_LATE_Z', 2)
A6XX_INVALID_ZTEST = enum_a6xx_ztest_mode.define('A6XX_INVALID_ZTEST', 3)
class enum_a6xx_tess_spacing(Annotated[int, ctypes.c_uint32], c.Enum): pass
TESS_EQUAL = enum_a6xx_tess_spacing.define('TESS_EQUAL', 0)
TESS_FRACTIONAL_ODD = enum_a6xx_tess_spacing.define('TESS_FRACTIONAL_ODD', 2)
TESS_FRACTIONAL_EVEN = enum_a6xx_tess_spacing.define('TESS_FRACTIONAL_EVEN', 3)
class enum_a6xx_tess_output(Annotated[int, ctypes.c_uint32], c.Enum): pass
TESS_POINTS = enum_a6xx_tess_output.define('TESS_POINTS', 0)
TESS_LINES = enum_a6xx_tess_output.define('TESS_LINES', 1)
TESS_CW_TRIS = enum_a6xx_tess_output.define('TESS_CW_TRIS', 2)
TESS_CCW_TRIS = enum_a6xx_tess_output.define('TESS_CCW_TRIS', 3)
class enum_a6xx_tex_filter(Annotated[int, ctypes.c_uint32], c.Enum): pass
A6XX_TEX_NEAREST = enum_a6xx_tex_filter.define('A6XX_TEX_NEAREST', 0)
A6XX_TEX_LINEAR = enum_a6xx_tex_filter.define('A6XX_TEX_LINEAR', 1)
A6XX_TEX_ANISO = enum_a6xx_tex_filter.define('A6XX_TEX_ANISO', 2)
A6XX_TEX_CUBIC = enum_a6xx_tex_filter.define('A6XX_TEX_CUBIC', 3)
class enum_a6xx_tex_clamp(Annotated[int, ctypes.c_uint32], c.Enum): pass
A6XX_TEX_REPEAT = enum_a6xx_tex_clamp.define('A6XX_TEX_REPEAT', 0)
A6XX_TEX_CLAMP_TO_EDGE = enum_a6xx_tex_clamp.define('A6XX_TEX_CLAMP_TO_EDGE', 1)
A6XX_TEX_MIRROR_REPEAT = enum_a6xx_tex_clamp.define('A6XX_TEX_MIRROR_REPEAT', 2)
A6XX_TEX_CLAMP_TO_BORDER = enum_a6xx_tex_clamp.define('A6XX_TEX_CLAMP_TO_BORDER', 3)
A6XX_TEX_MIRROR_CLAMP = enum_a6xx_tex_clamp.define('A6XX_TEX_MIRROR_CLAMP', 4)
class enum_a6xx_tex_aniso(Annotated[int, ctypes.c_uint32], c.Enum): pass
A6XX_TEX_ANISO_1 = enum_a6xx_tex_aniso.define('A6XX_TEX_ANISO_1', 0)
A6XX_TEX_ANISO_2 = enum_a6xx_tex_aniso.define('A6XX_TEX_ANISO_2', 1)
A6XX_TEX_ANISO_4 = enum_a6xx_tex_aniso.define('A6XX_TEX_ANISO_4', 2)
A6XX_TEX_ANISO_8 = enum_a6xx_tex_aniso.define('A6XX_TEX_ANISO_8', 3)
A6XX_TEX_ANISO_16 = enum_a6xx_tex_aniso.define('A6XX_TEX_ANISO_16', 4)
class enum_a6xx_reduction_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
A6XX_REDUCTION_MODE_AVERAGE = enum_a6xx_reduction_mode.define('A6XX_REDUCTION_MODE_AVERAGE', 0)
A6XX_REDUCTION_MODE_MIN = enum_a6xx_reduction_mode.define('A6XX_REDUCTION_MODE_MIN', 1)
A6XX_REDUCTION_MODE_MAX = enum_a6xx_reduction_mode.define('A6XX_REDUCTION_MODE_MAX', 2)
class enum_a6xx_fast_border_color(Annotated[int, ctypes.c_uint32], c.Enum): pass
A6XX_BORDER_COLOR_0_0_0_0 = enum_a6xx_fast_border_color.define('A6XX_BORDER_COLOR_0_0_0_0', 0)
A6XX_BORDER_COLOR_0_0_0_1 = enum_a6xx_fast_border_color.define('A6XX_BORDER_COLOR_0_0_0_1', 1)
A6XX_BORDER_COLOR_1_1_1_0 = enum_a6xx_fast_border_color.define('A6XX_BORDER_COLOR_1_1_1_0', 2)
A6XX_BORDER_COLOR_1_1_1_1 = enum_a6xx_fast_border_color.define('A6XX_BORDER_COLOR_1_1_1_1', 3)
class enum_a6xx_tex_swiz(Annotated[int, ctypes.c_uint32], c.Enum): pass
A6XX_TEX_X = enum_a6xx_tex_swiz.define('A6XX_TEX_X', 0)
A6XX_TEX_Y = enum_a6xx_tex_swiz.define('A6XX_TEX_Y', 1)
A6XX_TEX_Z = enum_a6xx_tex_swiz.define('A6XX_TEX_Z', 2)
A6XX_TEX_W = enum_a6xx_tex_swiz.define('A6XX_TEX_W', 3)
A6XX_TEX_ZERO = enum_a6xx_tex_swiz.define('A6XX_TEX_ZERO', 4)
A6XX_TEX_ONE = enum_a6xx_tex_swiz.define('A6XX_TEX_ONE', 5)
c.init_records()
NIR_DEBUG_CLONE = (1 << 0) # type: ignore
NIR_DEBUG_SERIALIZE = (1 << 1) # type: ignore
NIR_DEBUG_NOVALIDATE = (1 << 2) # type: ignore
NIR_DEBUG_EXTENDED_VALIDATION = (1 << 3) # type: ignore
NIR_DEBUG_TGSI = (1 << 4) # type: ignore
NIR_DEBUG_PRINT_VS = (1 << 5) # type: ignore
NIR_DEBUG_PRINT_TCS = (1 << 6) # type: ignore
NIR_DEBUG_PRINT_TES = (1 << 7) # type: ignore
NIR_DEBUG_PRINT_GS = (1 << 8) # type: ignore
NIR_DEBUG_PRINT_FS = (1 << 9) # type: ignore
NIR_DEBUG_PRINT_CS = (1 << 10) # type: ignore
NIR_DEBUG_PRINT_TS = (1 << 11) # type: ignore
NIR_DEBUG_PRINT_MS = (1 << 12) # type: ignore
NIR_DEBUG_PRINT_RGS = (1 << 13) # type: ignore
NIR_DEBUG_PRINT_AHS = (1 << 14) # type: ignore
NIR_DEBUG_PRINT_CHS = (1 << 15) # type: ignore
NIR_DEBUG_PRINT_MHS = (1 << 16) # type: ignore
NIR_DEBUG_PRINT_IS = (1 << 17) # type: ignore
NIR_DEBUG_PRINT_CBS = (1 << 18) # type: ignore
NIR_DEBUG_PRINT_KS = (1 << 19) # type: ignore
NIR_DEBUG_PRINT_NO_INLINE_CONSTS = (1 << 20) # type: ignore
NIR_DEBUG_PRINT_INTERNAL = (1 << 21) # type: ignore
NIR_DEBUG_PRINT_PASS_FLAGS = (1 << 22) # type: ignore
NIR_DEBUG_INVALIDATE_METADATA = (1 << 23) # type: ignore
NIR_DEBUG_PRINT_STRUCT_DECLS = (1 << 24) # type: ignore
NIR_DEBUG_PRINT = (NIR_DEBUG_PRINT_VS | NIR_DEBUG_PRINT_TCS | NIR_DEBUG_PRINT_TES | NIR_DEBUG_PRINT_GS | NIR_DEBUG_PRINT_FS | NIR_DEBUG_PRINT_CS | NIR_DEBUG_PRINT_TS | NIR_DEBUG_PRINT_MS | NIR_DEBUG_PRINT_RGS | NIR_DEBUG_PRINT_AHS | NIR_DEBUG_PRINT_CHS | NIR_DEBUG_PRINT_MHS | NIR_DEBUG_PRINT_IS | NIR_DEBUG_PRINT_CBS | NIR_DEBUG_PRINT_KS) # type: ignore
NIR_FALSE = 0 # type: ignore
NIR_TRUE = (~0) # type: ignore
NIR_MAX_VEC_COMPONENTS = 16 # type: ignore
NIR_MAX_MATRIX_COLUMNS = 4 # type: ignore
NIR_STREAM_PACKED = (1 << 8) # type: ignore
NIR_VARIABLE_NO_INDEX = ~0 # type: ignore
nir_foreach_variable_in_list = lambda var,var_list: foreach_list_typed(nir_variable, var, node, var_list) # type: ignore
nir_foreach_variable_in_list_safe = lambda var,var_list: foreach_list_typed_safe(nir_variable, var, node, var_list) # type: ignore
nir_foreach_shader_in_variable = lambda var,shader: nir_foreach_variable_with_modes(var, shader, nir_var_shader_in) # type: ignore
nir_foreach_shader_in_variable_safe = lambda var,shader: nir_foreach_variable_with_modes_safe(var, shader, nir_var_shader_in) # type: ignore
nir_foreach_shader_out_variable = lambda var,shader: nir_foreach_variable_with_modes(var, shader, nir_var_shader_out) # type: ignore
nir_foreach_shader_out_variable_safe = lambda var,shader: nir_foreach_variable_with_modes_safe(var, shader, nir_var_shader_out) # type: ignore
nir_foreach_uniform_variable = lambda var,shader: nir_foreach_variable_with_modes(var, shader, nir_var_uniform) # type: ignore
nir_foreach_uniform_variable_safe = lambda var,shader: nir_foreach_variable_with_modes_safe(var, shader, nir_var_uniform) # type: ignore
nir_foreach_image_variable = lambda var,shader: nir_foreach_variable_with_modes(var, shader, nir_var_image) # type: ignore
nir_foreach_image_variable_safe = lambda var,shader: nir_foreach_variable_with_modes_safe(var, shader, nir_var_image) # type: ignore
NIR_SRC_PARENT_IS_IF = (0x1) # type: ignore
NIR_ALU_MAX_INPUTS = NIR_MAX_VEC_COMPONENTS # type: ignore
NIR_INTRINSIC_MAX_CONST_INDEX = 8 # type: ignore
NIR_ALIGN_MUL_MAX = 0x40000000 # type: ignore
NIR_INTRINSIC_MAX_INPUTS = 11 # type: ignore
nir_log_shadere = lambda s: nir_log_shader_annotated_tagged(MESA_LOG_ERROR, (MESA_LOG_TAG), (s), NULL) # type: ignore
nir_log_shaderw = lambda s: nir_log_shader_annotated_tagged(MESA_LOG_WARN, (MESA_LOG_TAG), (s), NULL) # type: ignore
nir_log_shaderi = lambda s: nir_log_shader_annotated_tagged(MESA_LOG_INFO, (MESA_LOG_TAG), (s), NULL) # type: ignore
nir_log_shader_annotated = lambda s,annotations: nir_log_shader_annotated_tagged(MESA_LOG_ERROR, (MESA_LOG_TAG), (s), annotations) # type: ignore
NIR_STRINGIZE = lambda x: NIR_STRINGIZE_INNER(x) # type: ignore
NVIDIA_VENDOR_ID = 0x10de # type: ignore
NAK_SUBGROUP_SIZE = 32 # type: ignore
NAK_QMD_ALIGN_B = 256 # type: ignore
NAK_MAX_QMD_SIZE_B = 384 # type: ignore
NAK_MAX_QMD_DWORDS = (NAK_MAX_QMD_SIZE_B / 4) # type: ignore
LP_MAX_VECTOR_WIDTH = 512 # type: ignore
LP_MIN_VECTOR_ALIGN = 64 # type: ignore
LP_MAX_VECTOR_LENGTH = (LP_MAX_VECTOR_WIDTH/8) # type: ignore
LP_RESV_FUNC_ARGS = 2 # type: ignore
LP_JIT_TEXTURE_SAMPLE_STRIDE = 15 # type: ignore
lp_jit_resources_constants = lambda _gallivm,_type,_ptr: lp_build_struct__get_ptr2(_gallivm, _type, _ptr, LP_JIT_RES_CONSTANTS, "constants") # type: ignore
lp_jit_resources_ssbos = lambda _gallivm,_type,_ptr: lp_build_struct__get_ptr2(_gallivm, _type, _ptr, LP_JIT_RES_SSBOS, "ssbos") # type: ignore
lp_jit_resources_textures = lambda _gallivm,_type,_ptr: lp_build_struct__get_ptr2(_gallivm, _type, _ptr, LP_JIT_RES_TEXTURES, "textures") # type: ignore
lp_jit_resources_samplers = lambda _gallivm,_type,_ptr: lp_build_struct__get_ptr2(_gallivm, _type, _ptr, LP_JIT_RES_SAMPLERS, "samplers") # type: ignore
lp_jit_resources_images = lambda _gallivm,_type,_ptr: lp_build_struct__get_ptr2(_gallivm, _type, _ptr, LP_JIT_RES_IMAGES, "images") # type: ignore
lp_jit_vertex_header_id = lambda _gallivm,_type,_ptr: lp_build_struct__get_ptr2(_gallivm, _type, _ptr, LP_JIT_VERTEX_HEADER_VERTEX_ID, "id") # type: ignore
lp_jit_vertex_header_clip_pos = lambda _gallivm,_type,_ptr: lp_build_struct__get_ptr2(_gallivm, _type, _ptr, LP_JIT_VERTEX_HEADER_CLIP_POS, "clip_pos") # type: ignore
lp_jit_vertex_header_data = lambda _gallivm,_type,_ptr: lp_build_struct__get_ptr2(_gallivm, _type, _ptr, LP_JIT_VERTEX_HEADER_DATA, "data") # type: ignore
LP_MAX_TEX_FUNC_ARGS = 32 # type: ignore
A6XX_CCU_DEPTH_SIZE = (64 * 1024) # type: ignore
A6XX_CCU_GMEM_COLOR_SIZE = (16 * 1024) # type: ignore
dword_offsetof = lambda type,name: DIV_ROUND_UP(offsetof(type, name), 4) # type: ignore
dword_sizeof = lambda type: DIV_ROUND_UP(sizeof(type), 4) # type: ignore
IR3_DP_CS = lambda name: dword_offsetof(struct_ir3_driver_params_cs, name) # type: ignore
IR3_DP_VS = lambda name: dword_offsetof(struct_ir3_driver_params_vs, name) # type: ignore
IR3_DP_TCS = lambda name: dword_offsetof(struct_ir3_driver_params_tcs, name) # type: ignore
IR3_DP_FS = lambda name: dword_offsetof(struct_ir3_driver_params_fs, name) # type: ignore
IR3_MAX_SHADER_BUFFERS = 32 # type: ignore
IR3_MAX_SHADER_IMAGES = 32 # type: ignore
IR3_MAX_SO_BUFFERS = 4 # type: ignore
IR3_MAX_SO_STREAMS = 4 # type: ignore
IR3_MAX_SO_OUTPUTS = 128 # type: ignore
IR3_MAX_UBO_PUSH_RANGES = 32 # type: ignore
IR3_MAX_SAMPLER_PREFETCH = 4 # type: ignore
IR3_SAMPLER_PREFETCH_CMD = 0x4 # type: ignore
IR3_SAMPLER_BINDLESS_PREFETCH_CMD = 0x6 # type: ignore
IR3_TESS_NONE = 0 # type: ignore
IR3_TESS_QUADS = 1 # type: ignore
IR3_TESS_TRIANGLES = 2 # type: ignore
IR3_TESS_ISOLINES = 3 # type: ignore
UAV_INVALID = 0xff # type: ignore
UAV_SSBO = 0x80 # type: ignore
HALF_REG_ID = 0x100 # type: ignore
gc_alloc = lambda ctx,type,count: gc_alloc_size(ctx, sizeof(type) * (count), alignof(type)) # type: ignore
gc_zalloc = lambda ctx,type,count: gc_zalloc_size(ctx, sizeof(type) * (count), alignof(type)) # type: ignore
gc_alloc_zla = lambda ctx,type,type2,count: gc_alloc_size(ctx, sizeof(type) + sizeof(type2) * (count), MAX2(alignof(type), alignof(type2))) # type: ignore
gc_zalloc_zla = lambda ctx,type,type2,count: gc_zalloc_size(ctx, sizeof(type) + sizeof(type2) * (count), MAX2(alignof(type), alignof(type2))) # type: ignore
DECLARE_RALLOC_CXX_OPERATORS = lambda type: DECLARE_RALLOC_CXX_OPERATORS_TEMPLATE(type, ralloc_size) # type: ignore
DECLARE_RZALLOC_CXX_OPERATORS = lambda type: DECLARE_RALLOC_CXX_OPERATORS_TEMPLATE(type, rzalloc_size) # type: ignore
DECLARE_LINEAR_ALLOC_CXX_OPERATORS = lambda type: DECLARE_LINEAR_ALLOC_CXX_OPERATORS_TEMPLATE(type, linear_alloc_child) # type: ignore
DECLARE_LINEAR_ZALLOC_CXX_OPERATORS = lambda type: DECLARE_LINEAR_ALLOC_CXX_OPERATORS_TEMPLATE(type, linear_zalloc_child) # type: ignore
ISA_GPU_ID = lambda: ir3_isa_get_gpu_id(scope) # type: ignore
__struct__cast = lambda X: (struct_X) # type: ignore
A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE = 0x00000001 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR = 0x00000002 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_IPC_INTR_0 = 0x00000010 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_IPC_INTR_1 = 0x00000020 # type: ignore
A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW = 0x00000040 # type: ignore
A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR = 0x00000080 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_SW = 0x00000100 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_HW_ERROR = 0x00000200 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS = 0x00000400 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS = 0x00000800 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS = 0x00001000 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_IB2 = 0x00002000 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_IB1 = 0x00004000 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_RB = 0x00008000 # type: ignore
A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT = 0x00008000 # type: ignore
A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPTLPAC = 0x00010000 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS = 0x00020000 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS = 0x00040000 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS = 0x00100000 # type: ignore
A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS_LPAC = 0x00200000 # type: ignore
A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW = 0x00400000 # type: ignore
A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT = 0x00800000 # type: ignore
A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS = 0x01000000 # type: ignore
A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR = 0x02000000 # type: ignore
A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 = 0x04000000 # type: ignore
A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 = 0x08000000 # type: ignore
A6XX_RBBM_INT_0_MASK_TSBWRITEERROR = 0x10000000 # type: ignore
A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION = 0x20000000 # type: ignore
A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ = 0x40000000 # type: ignore
A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG = 0x80000000 # type: ignore
A6XX_CP_INT_CP_OPCODE_ERROR = 0x00000001 # type: ignore
A6XX_CP_INT_CP_UCODE_ERROR = 0x00000002 # type: ignore
A6XX_CP_INT_CP_HW_FAULT_ERROR = 0x00000004 # type: ignore
A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR = 0x00000010 # type: ignore
A6XX_CP_INT_CP_AHB_ERROR = 0x00000020 # type: ignore
A6XX_CP_INT_CP_VSD_PARITY_ERROR = 0x00000040 # type: ignore
A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR = 0x00000080 # type: ignore
A6XX_CP_INT_CP_OPCODE_ERROR_LPAC = 0x00000100 # type: ignore
A6XX_CP_INT_CP_UCODE_ERROR_LPAC = 0x00000200 # type: ignore
A6XX_CP_INT_CP_HW_FAULT_ERROR_LPAC = 0x00000400 # type: ignore
A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_LPAC = 0x00000800 # type: ignore
A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_LPAC = 0x00001000 # type: ignore
A6XX_CP_INT_CP_OPCODE_ERROR_BV = 0x00002000 # type: ignore
A6XX_CP_INT_CP_UCODE_ERROR_BV = 0x00004000 # type: ignore
A6XX_CP_INT_CP_HW_FAULT_ERROR_BV = 0x00008000 # type: ignore
A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_BV = 0x00010000 # type: ignore
A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_BV = 0x00020000 # type: ignore
REG_A6XX_CP_RB_BASE = 0x00000800 # type: ignore
REG_A6XX_CP_RB_CNTL = 0x00000802 # type: ignore
REG_A6XX_CP_RB_RPTR_ADDR = 0x00000804 # type: ignore
REG_A6XX_CP_RB_RPTR = 0x00000806 # type: ignore
REG_A6XX_CP_RB_WPTR = 0x00000807 # type: ignore
REG_A6XX_CP_SQE_CNTL = 0x00000808 # type: ignore
REG_A6XX_CP_CP2GMU_STATUS = 0x00000812 # type: ignore
A6XX_CP_CP2GMU_STATUS_IFPC = 0x00000001 # type: ignore
REG_A6XX_CP_HW_FAULT = 0x00000821 # type: ignore
REG_A6XX_CP_INTERRUPT_STATUS = 0x00000823 # type: ignore
REG_A6XX_CP_PROTECT_STATUS = 0x00000824 # type: ignore
REG_A6XX_CP_STATUS_1 = 0x00000825 # type: ignore
REG_A6XX_CP_SQE_INSTR_BASE = 0x00000830 # type: ignore
REG_A6XX_CP_MISC_CNTL = 0x00000840 # type: ignore
REG_A6XX_CP_APRIV_CNTL = 0x00000844 # type: ignore
A6XX_CP_APRIV_CNTL_CDWRITE = 0x00000040 # type: ignore
A6XX_CP_APRIV_CNTL_CDREAD = 0x00000020 # type: ignore
A6XX_CP_APRIV_CNTL_RBRPWB = 0x00000008 # type: ignore
A6XX_CP_APRIV_CNTL_RBPRIVLEVEL = 0x00000004 # type: ignore
A6XX_CP_APRIV_CNTL_RBFETCH = 0x00000002 # type: ignore
A6XX_CP_APRIV_CNTL_ICACHE = 0x00000001 # type: ignore
REG_A6XX_CP_PREEMPT_THRESHOLD = 0x000008c0 # type: ignore
REG_A6XX_CP_ROQ_THRESHOLDS_1 = 0x000008c1 # type: ignore
A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK = 0x000000ff # type: ignore
A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT = 0 # type: ignore
A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK = 0x0000ff00 # type: ignore
A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT = 8 # type: ignore
A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK = 0x00ff0000 # type: ignore
A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT = 16 # type: ignore
A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK = 0xff000000 # type: ignore
A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT = 24 # type: ignore
REG_A6XX_CP_ROQ_THRESHOLDS_2 = 0x000008c2 # type: ignore
A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK = 0x000001ff # type: ignore
A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT = 0 # type: ignore
A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK = 0xffff0000 # type: ignore
A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT = 16 # type: ignore
REG_A6XX_CP_MEM_POOL_SIZE = 0x000008c3 # type: ignore
REG_A6XX_CP_CHICKEN_DBG = 0x00000841 # type: ignore
REG_A6XX_CP_ADDR_MODE_CNTL = 0x00000842 # type: ignore
REG_A6XX_CP_DBG_ECO_CNTL = 0x00000843 # type: ignore
REG_A6XX_CP_PROTECT_CNTL = 0x0000084f # type: ignore
A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE = 0x00000008 # type: ignore
A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN = 0x00000002 # type: ignore
A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN = 0x00000001 # type: ignore
REG_A6XX_CP_SCRATCH = lambda i0: (0x00000883 + 0x1*i0 ) # type: ignore
REG_A6XX_CP_PROTECT = lambda i0: (0x00000850 + 0x1*i0 ) # type: ignore
A6XX_CP_PROTECT_REG_BASE_ADDR__MASK = 0x0003ffff # type: ignore
A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT = 0 # type: ignore
A6XX_CP_PROTECT_REG_MASK_LEN__MASK = 0x7ffc0000 # type: ignore
A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT = 18 # type: ignore
A6XX_CP_PROTECT_REG_READ = 0x80000000 # type: ignore
REG_A6XX_CP_CONTEXT_SWITCH_CNTL = 0x000008a0 # type: ignore
A6XX_CP_CONTEXT_SWITCH_CNTL_STOP = 0x00000001 # type: ignore
A6XX_CP_CONTEXT_SWITCH_CNTL_LEVEL__MASK = 0x000000c0 # type: ignore
A6XX_CP_CONTEXT_SWITCH_CNTL_LEVEL__SHIFT = 6 # type: ignore
A6XX_CP_CONTEXT_SWITCH_CNTL_USES_GMEM = 0x00000100 # type: ignore
A6XX_CP_CONTEXT_SWITCH_CNTL_SKIP_SAVE_RESTORE = 0x00000200 # type: ignore
REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO = 0x000008a1 # type: ignore
REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR = 0x000008a3 # type: ignore
REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR = 0x000008a5 # type: ignore
REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR = 0x000008a7 # type: ignore
REG_A7XX_CP_CONTEXT_SWITCH_LEVEL_STATUS = 0x000008ab # type: ignore
REG_A6XX_CP_PERFCTR_CP_SEL = lambda i0: (0x000008d0 + 0x1*i0 ) # type: ignore
REG_A7XX_CP_BV_PERFCTR_CP_SEL = lambda i0: (0x000008e0 + 0x1*i0 ) # type: ignore
REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE = 0x00000900 # type: ignore
REG_A6XX_CP_CRASH_DUMP_CNTL = 0x00000902 # type: ignore
REG_A6XX_CP_CRASH_DUMP_STATUS = 0x00000903 # type: ignore
REG_A6XX_CP_SQE_STAT_ADDR = 0x00000908 # type: ignore
REG_A6XX_CP_SQE_STAT_DATA = 0x00000909 # type: ignore
REG_A6XX_CP_DRAW_STATE_ADDR = 0x0000090a # type: ignore
REG_A6XX_CP_DRAW_STATE_DATA = 0x0000090b # type: ignore
REG_A6XX_CP_ROQ_DBG_ADDR = 0x0000090c # type: ignore
REG_A6XX_CP_ROQ_DBG_DATA = 0x0000090d # type: ignore
REG_A6XX_CP_MEM_POOL_DBG_ADDR = 0x0000090e # type: ignore
REG_A6XX_CP_MEM_POOL_DBG_DATA = 0x0000090f # type: ignore
REG_A6XX_CP_SQE_UCODE_DBG_ADDR = 0x00000910 # type: ignore
REG_A6XX_CP_SQE_UCODE_DBG_DATA = 0x00000911 # type: ignore
REG_A6XX_CP_IB1_BASE = 0x00000928 # type: ignore
REG_A6XX_CP_IB1_REM_SIZE = 0x0000092a # type: ignore
REG_A6XX_CP_IB2_BASE = 0x0000092b # type: ignore
REG_A6XX_CP_IB2_REM_SIZE = 0x0000092d # type: ignore
REG_A6XX_CP_SDS_BASE = 0x0000092e # type: ignore
REG_A6XX_CP_SDS_REM_SIZE = 0x00000930 # type: ignore
REG_A6XX_CP_MRB_BASE = 0x00000931 # type: ignore
REG_A6XX_CP_MRB_REM_SIZE = 0x00000933 # type: ignore
REG_A6XX_CP_VSD_BASE = 0x00000934 # type: ignore
REG_A6XX_CP_ROQ_RB_STATUS = 0x00000939 # type: ignore
A6XX_CP_ROQ_RB_STATUS_RPTR__MASK = 0x000003ff # type: ignore
A6XX_CP_ROQ_RB_STATUS_RPTR__SHIFT = 0 # type: ignore
A6XX_CP_ROQ_RB_STATUS_WPTR__MASK = 0x03ff0000 # type: ignore
A6XX_CP_ROQ_RB_STATUS_WPTR__SHIFT = 16 # type: ignore
REG_A6XX_CP_ROQ_IB1_STATUS = 0x0000093a # type: ignore
A6XX_CP_ROQ_IB1_STATUS_RPTR__MASK = 0x000003ff # type: ignore
A6XX_CP_ROQ_IB1_STATUS_RPTR__SHIFT = 0 # type: ignore
A6XX_CP_ROQ_IB1_STATUS_WPTR__MASK = 0x03ff0000 # type: ignore
A6XX_CP_ROQ_IB1_STATUS_WPTR__SHIFT = 16 # type: ignore
REG_A6XX_CP_ROQ_IB2_STATUS = 0x0000093b # type: ignore
A6XX_CP_ROQ_IB2_STATUS_RPTR__MASK = 0x000003ff # type: ignore
A6XX_CP_ROQ_IB2_STATUS_RPTR__SHIFT = 0 # type: ignore
A6XX_CP_ROQ_IB2_STATUS_WPTR__MASK = 0x03ff0000 # type: ignore
A6XX_CP_ROQ_IB2_STATUS_WPTR__SHIFT = 16 # type: ignore
REG_A6XX_CP_ROQ_SDS_STATUS = 0x0000093c # type: ignore
A6XX_CP_ROQ_SDS_STATUS_RPTR__MASK = 0x000003ff # type: ignore
A6XX_CP_ROQ_SDS_STATUS_RPTR__SHIFT = 0 # type: ignore
A6XX_CP_ROQ_SDS_STATUS_WPTR__MASK = 0x03ff0000 # type: ignore
A6XX_CP_ROQ_SDS_STATUS_WPTR__SHIFT = 16 # type: ignore
REG_A6XX_CP_ROQ_MRB_STATUS = 0x0000093d # type: ignore
A6XX_CP_ROQ_MRB_STATUS_RPTR__MASK = 0x000003ff # type: ignore
A6XX_CP_ROQ_MRB_STATUS_RPTR__SHIFT = 0 # type: ignore
A6XX_CP_ROQ_MRB_STATUS_WPTR__MASK = 0x03ff0000 # type: ignore
A6XX_CP_ROQ_MRB_STATUS_WPTR__SHIFT = 16 # type: ignore
REG_A6XX_CP_ROQ_VSD_STATUS = 0x0000093e # type: ignore
A6XX_CP_ROQ_VSD_STATUS_RPTR__MASK = 0x000003ff # type: ignore
A6XX_CP_ROQ_VSD_STATUS_RPTR__SHIFT = 0 # type: ignore
A6XX_CP_ROQ_VSD_STATUS_WPTR__MASK = 0x03ff0000 # type: ignore
A6XX_CP_ROQ_VSD_STATUS_WPTR__SHIFT = 16 # type: ignore
REG_A6XX_CP_IB1_INIT_SIZE = 0x00000943 # type: ignore
REG_A6XX_CP_IB2_INIT_SIZE = 0x00000944 # type: ignore
REG_A6XX_CP_SDS_INIT_SIZE = 0x00000945 # type: ignore
REG_A6XX_CP_MRB_INIT_SIZE = 0x00000946 # type: ignore
REG_A6XX_CP_VSD_INIT_SIZE = 0x00000947 # type: ignore
REG_A6XX_CP_ROQ_AVAIL_RB = 0x00000948 # type: ignore
A6XX_CP_ROQ_AVAIL_RB_REM__MASK = 0xffff0000 # type: ignore
A6XX_CP_ROQ_AVAIL_RB_REM__SHIFT = 16 # type: ignore
REG_A6XX_CP_ROQ_AVAIL_IB1 = 0x00000949 # type: ignore
A6XX_CP_ROQ_AVAIL_IB1_REM__MASK = 0xffff0000 # type: ignore
A6XX_CP_ROQ_AVAIL_IB1_REM__SHIFT = 16 # type: ignore
REG_A6XX_CP_ROQ_AVAIL_IB2 = 0x0000094a # type: ignore
A6XX_CP_ROQ_AVAIL_IB2_REM__MASK = 0xffff0000 # type: ignore
A6XX_CP_ROQ_AVAIL_IB2_REM__SHIFT = 16 # type: ignore
REG_A6XX_CP_ROQ_AVAIL_SDS = 0x0000094b # type: ignore
A6XX_CP_ROQ_AVAIL_SDS_REM__MASK = 0xffff0000 # type: ignore
A6XX_CP_ROQ_AVAIL_SDS_REM__SHIFT = 16 # type: ignore
REG_A6XX_CP_ROQ_AVAIL_MRB = 0x0000094c # type: ignore
A6XX_CP_ROQ_AVAIL_MRB_REM__MASK = 0xffff0000 # type: ignore
A6XX_CP_ROQ_AVAIL_MRB_REM__SHIFT = 16 # type: ignore
REG_A6XX_CP_ROQ_AVAIL_VSD = 0x0000094d # type: ignore
A6XX_CP_ROQ_AVAIL_VSD_REM__MASK = 0xffff0000 # type: ignore
A6XX_CP_ROQ_AVAIL_VSD_REM__SHIFT = 16 # type: ignore
REG_A6XX_CP_ALWAYS_ON_COUNTER = 0x00000980 # type: ignore
REG_A6XX_CP_AHB_CNTL = 0x0000098d # type: ignore
REG_A6XX_CP_APERTURE_CNTL_HOST = 0x00000a00 # type: ignore
REG_A7XX_CP_APERTURE_CNTL_HOST = 0x00000a00 # type: ignore
A7XX_CP_APERTURE_CNTL_HOST_PIPE__MASK = 0x00003000 # type: ignore
A7XX_CP_APERTURE_CNTL_HOST_PIPE__SHIFT = 12 # type: ignore
A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__MASK = 0x00000700 # type: ignore
A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__SHIFT = 8 # type: ignore
A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__MASK = 0x00000030 # type: ignore
A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__SHIFT = 4 # type: ignore
REG_A6XX_CP_APERTURE_CNTL_SQE = 0x00000a01 # type: ignore
REG_A6XX_CP_APERTURE_CNTL_CD = 0x00000a03 # type: ignore
REG_A7XX_CP_APERTURE_CNTL_CD = 0x00000a03 # type: ignore
A7XX_CP_APERTURE_CNTL_CD_PIPE__MASK = 0x00003000 # type: ignore
A7XX_CP_APERTURE_CNTL_CD_PIPE__SHIFT = 12 # type: ignore
A7XX_CP_APERTURE_CNTL_CD_CLUSTER__MASK = 0x00000700 # type: ignore
A7XX_CP_APERTURE_CNTL_CD_CLUSTER__SHIFT = 8 # type: ignore
A7XX_CP_APERTURE_CNTL_CD_CONTEXT__MASK = 0x00000030 # type: ignore
A7XX_CP_APERTURE_CNTL_CD_CONTEXT__SHIFT = 4 # type: ignore
REG_A7XX_CP_BV_PROTECT_STATUS = 0x00000a61 # type: ignore
REG_A7XX_CP_BV_HW_FAULT = 0x00000a64 # type: ignore
REG_A7XX_CP_BV_DRAW_STATE_ADDR = 0x00000a81 # type: ignore
REG_A7XX_CP_BV_DRAW_STATE_DATA = 0x00000a82 # type: ignore
REG_A7XX_CP_BV_ROQ_DBG_ADDR = 0x00000a83 # type: ignore
REG_A7XX_CP_BV_ROQ_DBG_DATA = 0x00000a84 # type: ignore
REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR = 0x00000a85 # type: ignore
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA = 0x00000a86 # type: ignore
REG_A7XX_CP_BV_SQE_STAT_ADDR = 0x00000a87 # type: ignore
REG_A7XX_CP_BV_SQE_STAT_DATA = 0x00000a88 # type: ignore
REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR = 0x00000a96 # type: ignore
REG_A7XX_CP_BV_MEM_POOL_DBG_DATA = 0x00000a97 # type: ignore
REG_A7XX_CP_BV_RB_RPTR_ADDR = 0x00000a98 # type: ignore
REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR = 0x00000a9a # type: ignore
REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA = 0x00000a9b # type: ignore
REG_A7XX_CP_BV_APRIV_CNTL = 0x00000ad0 # type: ignore
REG_A7XX_CP_BV_CHICKEN_DBG = 0x00000ada # type: ignore
REG_A7XX_CP_LPAC_DRAW_STATE_ADDR = 0x00000b0a # type: ignore
REG_A7XX_CP_LPAC_DRAW_STATE_DATA = 0x00000b0b # type: ignore
REG_A7XX_CP_LPAC_ROQ_DBG_ADDR = 0x00000b0c # type: ignore
REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR = 0x00000b27 # type: ignore
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA = 0x00000b28 # type: ignore
REG_A7XX_CP_SQE_AC_STAT_ADDR = 0x00000b29 # type: ignore
REG_A7XX_CP_SQE_AC_STAT_DATA = 0x00000b2a # type: ignore
REG_A7XX_CP_LPAC_APRIV_CNTL = 0x00000b31 # type: ignore
REG_A6XX_CP_LPAC_PROG_FIFO_SIZE = 0x00000b34 # type: ignore
REG_A7XX_CP_LPAC_ROQ_DBG_DATA = 0x00000b35 # type: ignore
REG_A7XX_CP_LPAC_FIFO_DBG_DATA = 0x00000b36 # type: ignore
REG_A7XX_CP_LPAC_FIFO_DBG_ADDR = 0x00000b40 # type: ignore
REG_A6XX_CP_LPAC_SQE_CNTL = 0x00000b81 # type: ignore
REG_A6XX_CP_LPAC_SQE_INSTR_BASE = 0x00000b82 # type: ignore
REG_A7XX_CP_AQE_INSTR_BASE_0 = 0x00000b70 # type: ignore
REG_A7XX_CP_AQE_INSTR_BASE_1 = 0x00000b72 # type: ignore
REG_A7XX_CP_AQE_APRIV_CNTL = 0x00000b78 # type: ignore
REG_A7XX_CP_AQE_ROQ_DBG_ADDR_0 = 0x00000ba8 # type: ignore
REG_A7XX_CP_AQE_ROQ_DBG_ADDR_1 = 0x00000ba9 # type: ignore
REG_A7XX_CP_AQE_ROQ_DBG_DATA_0 = 0x00000bac # type: ignore
REG_A7XX_CP_AQE_ROQ_DBG_DATA_1 = 0x00000bad # type: ignore
REG_A7XX_CP_AQE_UCODE_DBG_ADDR_0 = 0x00000bb0 # type: ignore
REG_A7XX_CP_AQE_UCODE_DBG_ADDR_1 = 0x00000bb1 # type: ignore
REG_A7XX_CP_AQE_UCODE_DBG_DATA_0 = 0x00000bb4 # type: ignore
REG_A7XX_CP_AQE_UCODE_DBG_DATA_1 = 0x00000bb5 # type: ignore
REG_A7XX_CP_AQE_STAT_ADDR_0 = 0x00000bb8 # type: ignore
REG_A7XX_CP_AQE_STAT_ADDR_1 = 0x00000bb9 # type: ignore
REG_A7XX_CP_AQE_STAT_DATA_0 = 0x00000bbc # type: ignore
REG_A7XX_CP_AQE_STAT_DATA_1 = 0x00000bbd # type: ignore
REG_A6XX_VSC_ADDR_MODE_CNTL = 0x00000c01 # type: ignore
REG_A6XX_RBBM_GPR0_CNTL = 0x00000018 # type: ignore
REG_A6XX_RBBM_INT_0_STATUS = 0x00000201 # type: ignore
REG_A6XX_RBBM_STATUS = 0x00000210 # type: ignore
A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB = 0x00800000 # type: ignore
A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP = 0x00400000 # type: ignore
A6XX_RBBM_STATUS_HLSQ_BUSY = 0x00200000 # type: ignore
A6XX_RBBM_STATUS_VSC_BUSY = 0x00100000 # type: ignore
A6XX_RBBM_STATUS_TPL1_BUSY = 0x00080000 # type: ignore
A6XX_RBBM_STATUS_SP_BUSY = 0x00040000 # type: ignore
A6XX_RBBM_STATUS_UCHE_BUSY = 0x00020000 # type: ignore
A6XX_RBBM_STATUS_VPC_BUSY = 0x00010000 # type: ignore
A6XX_RBBM_STATUS_VFD_BUSY = 0x00008000 # type: ignore
A6XX_RBBM_STATUS_TESS_BUSY = 0x00004000 # type: ignore
A6XX_RBBM_STATUS_PC_VSD_BUSY = 0x00002000 # type: ignore
A6XX_RBBM_STATUS_PC_DCALL_BUSY = 0x00001000 # type: ignore
A6XX_RBBM_STATUS_COM_DCOM_BUSY = 0x00000800 # type: ignore
A6XX_RBBM_STATUS_LRZ_BUSY = 0x00000400 # type: ignore
A6XX_RBBM_STATUS_A2D_BUSY = 0x00000200 # type: ignore
A6XX_RBBM_STATUS_CCU_BUSY = 0x00000100 # type: ignore
A6XX_RBBM_STATUS_RB_BUSY = 0x00000080 # type: ignore
A6XX_RBBM_STATUS_RAS_BUSY = 0x00000040 # type: ignore
A6XX_RBBM_STATUS_TSE_BUSY = 0x00000020 # type: ignore
A6XX_RBBM_STATUS_VBIF_BUSY = 0x00000010 # type: ignore
A6XX_RBBM_STATUS_GFX_DBGC_BUSY = 0x00000008 # type: ignore
A6XX_RBBM_STATUS_CP_BUSY = 0x00000004 # type: ignore
A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER = 0x00000002 # type: ignore
A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER = 0x00000001 # type: ignore
REG_A6XX_RBBM_STATUS1 = 0x00000211 # type: ignore
REG_A6XX_RBBM_STATUS2 = 0x00000212 # type: ignore
REG_A6XX_RBBM_STATUS3 = 0x00000213 # type: ignore
A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT = 0x01000000 # type: ignore
REG_A6XX_RBBM_VBIF_GX_RESET_STATUS = 0x00000215 # type: ignore
REG_A7XX_RBBM_CLOCK_MODE_CP = 0x00000260 # type: ignore
REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ = 0x00000284 # type: ignore
REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS = 0x00000285 # type: ignore
REG_A7XX_RBBM_CLOCK_MODE2_GRAS = 0x00000286 # type: ignore
REG_A7XX_RBBM_CLOCK_MODE_BV_VFD = 0x00000287 # type: ignore
REG_A7XX_RBBM_CLOCK_MODE_BV_GPC = 0x00000288 # type: ignore
REG_A7XX_RBBM_SW_FUSE_INT_STATUS = 0x000002c0 # type: ignore
REG_A7XX_RBBM_SW_FUSE_INT_MASK = 0x000002c1 # type: ignore
REG_A6XX_RBBM_PERFCTR_CP = lambda i0: (0x00000400 + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_RBBM = lambda i0: (0x0000041c + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_PC = lambda i0: (0x00000424 + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_VFD = lambda i0: (0x00000434 + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_HLSQ = lambda i0: (0x00000444 + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_VPC = lambda i0: (0x00000450 + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_CCU = lambda i0: (0x0000045c + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_TSE = lambda i0: (0x00000466 + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_RAS = lambda i0: (0x0000046e + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_UCHE = lambda i0: (0x00000476 + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_TP = lambda i0: (0x0000048e + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_SP = lambda i0: (0x000004a6 + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_RB = lambda i0: (0x000004d6 + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_VSC = lambda i0: (0x000004e6 + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_LRZ = lambda i0: (0x000004ea + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_CMP = lambda i0: (0x000004f2 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_CP = lambda i0: (0x00000300 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_RBBM = lambda i0: (0x0000031c + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_PC = lambda i0: (0x00000324 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_VFD = lambda i0: (0x00000334 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_HLSQ = lambda i0: (0x00000344 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_VPC = lambda i0: (0x00000350 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_CCU = lambda i0: (0x0000035c + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_TSE = lambda i0: (0x00000366 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_RAS = lambda i0: (0x0000036e + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_UCHE = lambda i0: (0x00000376 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_TP = lambda i0: (0x0000038e + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_SP = lambda i0: (0x000003a6 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_RB = lambda i0: (0x000003d6 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_VSC = lambda i0: (0x000003e6 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_LRZ = lambda i0: (0x000003ea + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_CMP = lambda i0: (0x000003f2 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_UFC = lambda i0: (0x000003fa + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR2_HLSQ = lambda i0: (0x00000410 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR2_CP = lambda i0: (0x0000041c + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR2_SP = lambda i0: (0x0000042a + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR2_TP = lambda i0: (0x00000442 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR2_UFC = lambda i0: (0x0000044e + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_BV_PC = lambda i0: (0x00000460 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_BV_VFD = lambda i0: (0x00000470 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_BV_VPC = lambda i0: (0x00000480 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_BV_TSE = lambda i0: (0x0000048c + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_BV_RAS = lambda i0: (0x00000494 + 0x2*i0 ) # type: ignore
REG_A7XX_RBBM_PERFCTR_BV_LRZ = lambda i0: (0x0000049c + 0x2*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_CNTL = 0x00000500 # type: ignore
REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 = 0x00000501 # type: ignore
REG_A6XX_RBBM_PERFCTR_LOAD_CMD1 = 0x00000502 # type: ignore
REG_A6XX_RBBM_PERFCTR_LOAD_CMD2 = 0x00000503 # type: ignore
REG_A6XX_RBBM_PERFCTR_LOAD_CMD3 = 0x00000504 # type: ignore
REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_LO = 0x00000505 # type: ignore
REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI = 0x00000506 # type: ignore
REG_A6XX_RBBM_PERFCTR_RBBM_SEL = lambda i0: (0x00000507 + 0x1*i0 ) # type: ignore
REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED = 0x0000050b # type: ignore
REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD = 0x0000050e # type: ignore
REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS = 0x0000050f # type: ignore
REG_A6XX_RBBM_ISDB_CNT = 0x00000533 # type: ignore
REG_A6XX_RBBM_NC_MODE_CNTL = 0x00000534 # type: ignore
REG_A7XX_RBBM_SNAPSHOT_STATUS = 0x00000535 # type: ignore
REG_A6XX_RBBM_PIPESTAT_IAVERTICES = 0x00000540 # type: ignore
REG_A6XX_RBBM_PIPESTAT_IAPRIMITIVES = 0x00000542 # type: ignore
REG_A6XX_RBBM_PIPESTAT_VSINVOCATIONS = 0x00000544 # type: ignore
REG_A6XX_RBBM_PIPESTAT_HSINVOCATIONS = 0x00000546 # type: ignore
REG_A6XX_RBBM_PIPESTAT_DSINVOCATIONS = 0x00000548 # type: ignore
REG_A6XX_RBBM_PIPESTAT_GSINVOCATIONS = 0x0000054a # type: ignore
REG_A6XX_RBBM_PIPESTAT_GSPRIMITIVES = 0x0000054c # type: ignore
REG_A6XX_RBBM_PIPESTAT_CINVOCATIONS = 0x0000054e # type: ignore
REG_A6XX_RBBM_PIPESTAT_CPRIMITIVES = 0x00000550 # type: ignore
REG_A6XX_RBBM_PIPESTAT_PSINVOCATIONS = 0x00000552 # type: ignore
REG_A6XX_RBBM_PIPESTAT_CSINVOCATIONS = 0x00000554 # type: ignore
REG_A6XX_RBBM_SECVID_TRUST_CNTL = 0x0000f400 # type: ignore
REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE = 0x0000f800 # type: ignore
REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE = 0x0000f802 # type: ignore
REG_A6XX_RBBM_SECVID_TSB_CNTL = 0x0000f803 # type: ignore
REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL = 0x0000f810 # type: ignore
REG_A7XX_RBBM_SECVID_TSB_STATUS = 0x0000fc00 # type: ignore
REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL = 0x00000010 # type: ignore
REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL = 0x00000011 # type: ignore
REG_A6XX_RBBM_GBIF_HALT = 0x00000016 # type: ignore
REG_A6XX_RBBM_GBIF_HALT_ACK = 0x00000017 # type: ignore
REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD = 0x0000001c # type: ignore
A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE = 0x00000001 # type: ignore
REG_A7XX_RBBM_GBIF_HALT = 0x00000016 # type: ignore
REG_A7XX_RBBM_GBIF_HALT_ACK = 0x00000017 # type: ignore
REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL = 0x0000001f # type: ignore
REG_A6XX_RBBM_INT_CLEAR_CMD = 0x00000037 # type: ignore
REG_A6XX_RBBM_INT_0_MASK = 0x00000038 # type: ignore
REG_A7XX_RBBM_INT_2_MASK = 0x0000003a # type: ignore
REG_A6XX_RBBM_SP_HYST_CNT = 0x00000042 # type: ignore
REG_A6XX_RBBM_SW_RESET_CMD = 0x00000043 # type: ignore
REG_A6XX_RBBM_RAC_THRESHOLD_CNT = 0x00000044 # type: ignore
REG_A6XX_RBBM_BLOCK_SW_RESET_CMD = 0x00000045 # type: ignore
REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 = 0x00000046 # type: ignore
REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL = 0x000000ad # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL = 0x000000ae # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_SP0 = 0x000000b0 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_SP1 = 0x000000b1 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_SP2 = 0x000000b2 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_SP3 = 0x000000b3 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_SP0 = 0x000000b4 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_SP1 = 0x000000b5 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_SP2 = 0x000000b6 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_SP3 = 0x000000b7 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_SP0 = 0x000000b8 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_SP1 = 0x000000b9 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_SP2 = 0x000000ba # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_SP3 = 0x000000bb # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_SP0 = 0x000000bc # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_SP1 = 0x000000bd # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_SP2 = 0x000000be # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_SP3 = 0x000000bf # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_TP0 = 0x000000c0 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_TP1 = 0x000000c1 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_TP2 = 0x000000c2 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_TP3 = 0x000000c3 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_TP0 = 0x000000c4 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_TP1 = 0x000000c5 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_TP2 = 0x000000c6 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_TP3 = 0x000000c7 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL3_TP0 = 0x000000c8 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL3_TP1 = 0x000000c9 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL3_TP2 = 0x000000ca # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL3_TP3 = 0x000000cb # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL4_TP0 = 0x000000cc # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL4_TP1 = 0x000000cd # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL4_TP2 = 0x000000ce # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL4_TP3 = 0x000000cf # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_TP0 = 0x000000d0 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_TP1 = 0x000000d1 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_TP2 = 0x000000d2 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_TP3 = 0x000000d3 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY2_TP0 = 0x000000d4 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY2_TP1 = 0x000000d5 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY2_TP2 = 0x000000d6 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY2_TP3 = 0x000000d7 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY3_TP0 = 0x000000d8 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY3_TP1 = 0x000000d9 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY3_TP2 = 0x000000da # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY3_TP3 = 0x000000db # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY4_TP0 = 0x000000dc # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY4_TP1 = 0x000000dd # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY4_TP2 = 0x000000de # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY4_TP3 = 0x000000df # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_TP0 = 0x000000e0 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_TP1 = 0x000000e1 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_TP2 = 0x000000e2 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_TP3 = 0x000000e3 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST2_TP0 = 0x000000e4 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST2_TP1 = 0x000000e5 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST2_TP2 = 0x000000e6 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST2_TP3 = 0x000000e7 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST3_TP0 = 0x000000e8 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST3_TP1 = 0x000000e9 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST3_TP2 = 0x000000ea # type: ignore
REG_A6XX_RBBM_CLOCK_HYST3_TP3 = 0x000000eb # type: ignore
REG_A6XX_RBBM_CLOCK_HYST4_TP0 = 0x000000ec # type: ignore
REG_A6XX_RBBM_CLOCK_HYST4_TP1 = 0x000000ed # type: ignore
REG_A6XX_RBBM_CLOCK_HYST4_TP2 = 0x000000ee # type: ignore
REG_A6XX_RBBM_CLOCK_HYST4_TP3 = 0x000000ef # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_RB0 = 0x000000f0 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_RB1 = 0x000000f1 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_RB2 = 0x000000f2 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_RB3 = 0x000000f3 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_RB0 = 0x000000f4 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_RB1 = 0x000000f5 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_RB2 = 0x000000f6 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_RB3 = 0x000000f7 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_CCU0 = 0x000000f8 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_CCU1 = 0x000000f9 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_CCU2 = 0x000000fa # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_CCU3 = 0x000000fb # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0 = 0x00000100 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1 = 0x00000101 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2 = 0x00000102 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3 = 0x00000103 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_RAC = 0x00000104 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_RAC = 0x00000105 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_RAC = 0x00000106 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_RAC = 0x00000107 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM = 0x00000108 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM = 0x00000109 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM = 0x0000010a # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_UCHE = 0x0000010b # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL2_UCHE = 0x0000010c # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL3_UCHE = 0x0000010d # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL4_UCHE = 0x0000010e # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_UCHE = 0x0000010f # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_UCHE = 0x00000110 # type: ignore
REG_A6XX_RBBM_CLOCK_MODE_VFD = 0x00000111 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_VFD = 0x00000112 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_VFD = 0x00000113 # type: ignore
REG_A6XX_RBBM_CLOCK_MODE_GPC = 0x00000114 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_GPC = 0x00000115 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_GPC = 0x00000116 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2 = 0x00000117 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX = 0x00000118 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX = 0x00000119 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_GMU_GX = 0x0000011a # type: ignore
REG_A6XX_RBBM_CLOCK_MODE_HLSQ = 0x0000011b # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_HLSQ = 0x0000011c # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_HLSQ = 0x0000011d # type: ignore
REG_A7XX_RBBM_CGC_GLOBAL_LOAD_CMD = 0x0000011e # type: ignore
REG_A7XX_RBBM_CGC_P2S_TRIG_CMD = 0x0000011f # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE = 0x00000120 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE = 0x00000121 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE = 0x00000122 # type: ignore
REG_A7XX_RBBM_CGC_P2S_STATUS = 0x00000122 # type: ignore
A7XX_RBBM_CGC_P2S_STATUS_TXDONE = 0x00000001 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_FCHE = 0x00000123 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_FCHE = 0x00000124 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_FCHE = 0x00000125 # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_MHUB = 0x00000126 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_MHUB = 0x00000127 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_MHUB = 0x00000128 # type: ignore
REG_A6XX_RBBM_CLOCK_DELAY_GLC = 0x00000129 # type: ignore
REG_A6XX_RBBM_CLOCK_HYST_GLC = 0x0000012a # type: ignore
REG_A6XX_RBBM_CLOCK_CNTL_GLC = 0x0000012b # type: ignore
REG_A7XX_RBBM_CLOCK_HYST2_VFD = 0x0000012f # type: ignore
REG_A6XX_RBBM_LPAC_GBIF_CLIENT_QOS_CNTL = 0x000005ff # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_SEL_A = 0x00000600 # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_SEL_B = 0x00000601 # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_SEL_C = 0x00000602 # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_SEL_D = 0x00000603 # type: ignore
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK = 0x000000ff # type: ignore
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT = 0 # type: ignore
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK = 0x0000ff00 # type: ignore
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT = 8 # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_CNTLT = 0x00000604 # type: ignore
A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK = 0x0000003f # type: ignore
A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT = 0 # type: ignore
A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK = 0x00007000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT = 12 # type: ignore
A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK = 0xf0000000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT = 28 # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_CNTLM = 0x00000605 # type: ignore
A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK = 0x0f000000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT = 24 # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0 = 0x00000608 # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1 = 0x00000609 # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2 = 0x0000060a # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3 = 0x0000060b # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0 = 0x0000060c # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1 = 0x0000060d # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2 = 0x0000060e # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3 = 0x0000060f # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0 = 0x00000610 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK = 0x0000000f # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT = 0 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK = 0x000000f0 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT = 4 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK = 0x00000f00 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT = 8 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK = 0x0000f000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT = 12 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK = 0x000f0000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT = 16 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK = 0x00f00000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT = 20 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK = 0x0f000000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT = 24 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK = 0xf0000000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT = 28 # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1 = 0x00000611 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK = 0x0000000f # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT = 0 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK = 0x000000f0 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT = 4 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK = 0x00000f00 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT = 8 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK = 0x0000f000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT = 12 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK = 0x000f0000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT = 16 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK = 0x00f00000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT = 20 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK = 0x0f000000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT = 24 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK = 0xf0000000 # type: ignore
A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT = 28 # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 = 0x0000062f # type: ignore
REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 = 0x00000630 # type: ignore
REG_A6XX_VSC_PERFCTR_VSC_SEL = lambda i0: (0x00000cd8 + 0x1*i0 ) # type: ignore
REG_A7XX_VSC_UNKNOWN_0CD8 = 0x00000cd8 # type: ignore
A7XX_VSC_UNKNOWN_0CD8_BINNING = 0x00000001 # type: ignore
REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE = 0x0000c800 # type: ignore
REG_A6XX_HLSQ_DBG_READ_SEL = 0x0000d000 # type: ignore
REG_A6XX_UCHE_ADDR_MODE_CNTL = 0x00000e00 # type: ignore
REG_A6XX_UCHE_MODE_CNTL = 0x00000e01 # type: ignore
REG_A6XX_UCHE_WRITE_RANGE_MAX = 0x00000e05 # type: ignore
REG_A6XX_UCHE_WRITE_THRU_BASE = 0x00000e07 # type: ignore
REG_A6XX_UCHE_TRAP_BASE = 0x00000e09 # type: ignore
REG_A6XX_UCHE_GMEM_RANGE_MIN = 0x00000e0b # type: ignore
REG_A6XX_UCHE_GMEM_RANGE_MAX = 0x00000e0d # type: ignore
REG_A6XX_UCHE_CACHE_WAYS = 0x00000e17 # type: ignore
REG_A6XX_UCHE_FILTER_CNTL = 0x00000e18 # type: ignore
REG_A6XX_UCHE_CLIENT_PF = 0x00000e19 # type: ignore
A6XX_UCHE_CLIENT_PF_PERFSEL__MASK = 0x000000ff # type: ignore
A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT = 0 # type: ignore
REG_A6XX_UCHE_PERFCTR_UCHE_SEL = lambda i0: (0x00000e1c + 0x1*i0 ) # type: ignore
REG_A6XX_UCHE_GBIF_GX_CONFIG = 0x00000e3a # type: ignore
REG_A6XX_UCHE_CMDQ_CONFIG = 0x00000e3c # type: ignore
REG_A6XX_VBIF_VERSION = 0x00003000 # type: ignore
REG_A6XX_VBIF_CLKON = 0x00003001 # type: ignore
A6XX_VBIF_CLKON_FORCE_ON_TESTBUS = 0x00000002 # type: ignore
REG_A6XX_VBIF_GATE_OFF_WRREQ_EN = 0x0000302a # type: ignore
REG_A6XX_VBIF_XIN_HALT_CTRL0 = 0x00003080 # type: ignore
REG_A6XX_VBIF_XIN_HALT_CTRL1 = 0x00003081 # type: ignore
REG_A6XX_VBIF_TEST_BUS_OUT_CTRL = 0x00003084 # type: ignore
REG_A6XX_VBIF_TEST_BUS1_CTRL0 = 0x00003085 # type: ignore
REG_A6XX_VBIF_TEST_BUS1_CTRL1 = 0x00003086 # type: ignore
A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK = 0x0000000f # type: ignore
A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT = 0 # type: ignore
REG_A6XX_VBIF_TEST_BUS2_CTRL0 = 0x00003087 # type: ignore
REG_A6XX_VBIF_TEST_BUS2_CTRL1 = 0x00003088 # type: ignore
A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK = 0x000001ff # type: ignore
A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT = 0 # type: ignore
REG_A6XX_VBIF_TEST_BUS_OUT = 0x0000308c # type: ignore
REG_A6XX_VBIF_PERF_CNT_SEL0 = 0x000030d0 # type: ignore
REG_A6XX_VBIF_PERF_CNT_SEL1 = 0x000030d1 # type: ignore
REG_A6XX_VBIF_PERF_CNT_SEL2 = 0x000030d2 # type: ignore
REG_A6XX_VBIF_PERF_CNT_SEL3 = 0x000030d3 # type: ignore
REG_A6XX_VBIF_PERF_CNT_LOW0 = 0x000030d8 # type: ignore
REG_A6XX_VBIF_PERF_CNT_LOW1 = 0x000030d9 # type: ignore
REG_A6XX_VBIF_PERF_CNT_LOW2 = 0x000030da # type: ignore
REG_A6XX_VBIF_PERF_CNT_LOW3 = 0x000030db # type: ignore
REG_A6XX_VBIF_PERF_CNT_HIGH0 = 0x000030e0 # type: ignore
REG_A6XX_VBIF_PERF_CNT_HIGH1 = 0x000030e1 # type: ignore
REG_A6XX_VBIF_PERF_CNT_HIGH2 = 0x000030e2 # type: ignore
REG_A6XX_VBIF_PERF_CNT_HIGH3 = 0x000030e3 # type: ignore
REG_A6XX_VBIF_PERF_PWR_CNT_EN0 = 0x00003100 # type: ignore
REG_A6XX_VBIF_PERF_PWR_CNT_EN1 = 0x00003101 # type: ignore
REG_A6XX_VBIF_PERF_PWR_CNT_EN2 = 0x00003102 # type: ignore
REG_A6XX_VBIF_PERF_PWR_CNT_LOW0 = 0x00003110 # type: ignore
REG_A6XX_VBIF_PERF_PWR_CNT_LOW1 = 0x00003111 # type: ignore
REG_A6XX_VBIF_PERF_PWR_CNT_LOW2 = 0x00003112 # type: ignore
REG_A6XX_VBIF_PERF_PWR_CNT_HIGH0 = 0x00003118 # type: ignore
REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 = 0x00003119 # type: ignore
REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 = 0x0000311a # type: ignore
REG_A6XX_GBIF_SCACHE_CNTL0 = 0x00003c01 # type: ignore
REG_A6XX_GBIF_SCACHE_CNTL1 = 0x00003c02 # type: ignore
REG_A6XX_GBIF_QSB_SIDE0 = 0x00003c03 # type: ignore
REG_A6XX_GBIF_QSB_SIDE1 = 0x00003c04 # type: ignore
REG_A6XX_GBIF_QSB_SIDE2 = 0x00003c05 # type: ignore
REG_A6XX_GBIF_QSB_SIDE3 = 0x00003c06 # type: ignore
REG_A6XX_GBIF_HALT = 0x00003c45 # type: ignore
REG_A6XX_GBIF_HALT_ACK = 0x00003c46 # type: ignore
REG_A6XX_GBIF_PERF_PWR_CNT_EN = 0x00003cc0 # type: ignore
REG_A6XX_GBIF_PERF_PWR_CNT_CLR = 0x00003cc1 # type: ignore
REG_A6XX_GBIF_PERF_CNT_SEL = 0x00003cc2 # type: ignore
REG_A6XX_GBIF_PERF_PWR_CNT_SEL = 0x00003cc3 # type: ignore
REG_A6XX_GBIF_PERF_CNT_LOW0 = 0x00003cc4 # type: ignore
REG_A6XX_GBIF_PERF_CNT_LOW1 = 0x00003cc5 # type: ignore
REG_A6XX_GBIF_PERF_CNT_LOW2 = 0x00003cc6 # type: ignore
REG_A6XX_GBIF_PERF_CNT_LOW3 = 0x00003cc7 # type: ignore
REG_A6XX_GBIF_PERF_CNT_HIGH0 = 0x00003cc8 # type: ignore
REG_A6XX_GBIF_PERF_CNT_HIGH1 = 0x00003cc9 # type: ignore
REG_A6XX_GBIF_PERF_CNT_HIGH2 = 0x00003cca # type: ignore
REG_A6XX_GBIF_PERF_CNT_HIGH3 = 0x00003ccb # type: ignore
REG_A6XX_GBIF_PWR_CNT_LOW0 = 0x00003ccc # type: ignore
REG_A6XX_GBIF_PWR_CNT_LOW1 = 0x00003ccd # type: ignore
REG_A6XX_GBIF_PWR_CNT_LOW2 = 0x00003cce # type: ignore
REG_A6XX_GBIF_PWR_CNT_HIGH0 = 0x00003ccf # type: ignore
REG_A6XX_GBIF_PWR_CNT_HIGH1 = 0x00003cd0 # type: ignore
REG_A6XX_GBIF_PWR_CNT_HIGH2 = 0x00003cd1 # type: ignore
REG_A6XX_VSC_DBG_ECO_CNTL = 0x00000c00 # type: ignore
REG_A6XX_VSC_BIN_SIZE = 0x00000c02 # type: ignore
A6XX_VSC_BIN_SIZE_WIDTH__MASK = 0x000000ff # type: ignore
A6XX_VSC_BIN_SIZE_WIDTH__SHIFT = 0 # type: ignore
A6XX_VSC_BIN_SIZE_HEIGHT__MASK = 0x0001ff00 # type: ignore
A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT = 8 # type: ignore
REG_A6XX_VSC_SIZE_BASE = 0x00000c03 # type: ignore
REG_A6XX_VSC_EXPANDED_BIN_CNTL = 0x00000c06 # type: ignore
A6XX_VSC_EXPANDED_BIN_CNTL_NX__MASK = 0x000007fe # type: ignore
A6XX_VSC_EXPANDED_BIN_CNTL_NX__SHIFT = 1 # type: ignore
A6XX_VSC_EXPANDED_BIN_CNTL_NY__MASK = 0x001ff800 # type: ignore
A6XX_VSC_EXPANDED_BIN_CNTL_NY__SHIFT = 11 # type: ignore
REG_A6XX_VSC_PIPE_CONFIG = lambda i0: (0x00000c10 + 0x1*i0 ) # type: ignore
A6XX_VSC_PIPE_CONFIG_REG_X__MASK = 0x000003ff # type: ignore
A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT = 0 # type: ignore
A6XX_VSC_PIPE_CONFIG_REG_Y__MASK = 0x000ffc00 # type: ignore
A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT = 10 # type: ignore
A6XX_VSC_PIPE_CONFIG_REG_W__MASK = 0x03f00000 # type: ignore
A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT = 20 # type: ignore
A6XX_VSC_PIPE_CONFIG_REG_H__MASK = 0xfc000000 # type: ignore
A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT = 26 # type: ignore
REG_A6XX_VSC_PIPE_DATA_PRIM_BASE = 0x00000c30 # type: ignore
REG_A6XX_VSC_PIPE_DATA_PRIM_STRIDE = 0x00000c32 # type: ignore
REG_A6XX_VSC_PIPE_DATA_PRIM_LENGTH = 0x00000c33 # type: ignore
REG_A6XX_VSC_PIPE_DATA_DRAW_BASE = 0x00000c34 # type: ignore
REG_A6XX_VSC_PIPE_DATA_DRAW_STRIDE = 0x00000c36 # type: ignore
REG_A6XX_VSC_PIPE_DATA_DRAW_LENGTH = 0x00000c37 # type: ignore
REG_A6XX_VSC_CHANNEL_VISIBILITY = lambda i0: (0x00000c38 + 0x1*i0 ) # type: ignore
REG_A6XX_VSC_PIPE_DATA_PRIM_SIZE = lambda i0: (0x00000c58 + 0x1*i0 ) # type: ignore
REG_A6XX_VSC_PIPE_DATA_DRAW_SIZE = lambda i0: (0x00000c78 + 0x1*i0 ) # type: ignore
REG_A7XX_VSC_UNKNOWN_0D08 = 0x00000d08 # type: ignore
REG_A7XX_UCHE_UNKNOWN_0E10 = 0x00000e10 # type: ignore
REG_A7XX_UCHE_UNKNOWN_0E11 = 0x00000e11 # type: ignore
REG_A6XX_UCHE_UNKNOWN_0E12 = 0x00000e12 # type: ignore
REG_A6XX_GRAS_CL_CNTL = 0x00008000 # type: ignore
A6XX_GRAS_CL_CNTL_CLIP_DISABLE = 0x00000001 # type: ignore
A6XX_GRAS_CL_CNTL_ZNEAR_CLIP_DISABLE = 0x00000002 # type: ignore
A6XX_GRAS_CL_CNTL_ZFAR_CLIP_DISABLE = 0x00000004 # type: ignore
A6XX_GRAS_CL_CNTL_Z_CLAMP_ENABLE = 0x00000020 # type: ignore
A6XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z = 0x00000040 # type: ignore
A6XX_GRAS_CL_CNTL_VP_CLIP_CODE_IGNORE = 0x00000080 # type: ignore
A6XX_GRAS_CL_CNTL_VP_XFORM_DISABLE = 0x00000100 # type: ignore
A6XX_GRAS_CL_CNTL_PERSP_DIVISION_DISABLE = 0x00000200 # type: ignore
REG_A6XX_GRAS_CL_VS_CLIP_CULL_DISTANCE = 0x00008001 # type: ignore
A6XX_GRAS_CL_VS_CLIP_CULL_DISTANCE_CLIP_MASK__MASK = 0x000000ff # type: ignore
A6XX_GRAS_CL_VS_CLIP_CULL_DISTANCE_CLIP_MASK__SHIFT = 0 # type: ignore
A6XX_GRAS_CL_VS_CLIP_CULL_DISTANCE_CULL_MASK__MASK = 0x0000ff00 # type: ignore
A6XX_GRAS_CL_VS_CLIP_CULL_DISTANCE_CULL_MASK__SHIFT = 8 # type: ignore
REG_A6XX_GRAS_CL_DS_CLIP_CULL_DISTANCE = 0x00008002 # type: ignore
A6XX_GRAS_CL_DS_CLIP_CULL_DISTANCE_CLIP_MASK__MASK = 0x000000ff # type: ignore
A6XX_GRAS_CL_DS_CLIP_CULL_DISTANCE_CLIP_MASK__SHIFT = 0 # type: ignore
A6XX_GRAS_CL_DS_CLIP_CULL_DISTANCE_CULL_MASK__MASK = 0x0000ff00 # type: ignore
A6XX_GRAS_CL_DS_CLIP_CULL_DISTANCE_CULL_MASK__SHIFT = 8 # type: ignore
REG_A6XX_GRAS_CL_GS_CLIP_CULL_DISTANCE = 0x00008003 # type: ignore
A6XX_GRAS_CL_GS_CLIP_CULL_DISTANCE_CLIP_MASK__MASK = 0x000000ff # type: ignore
A6XX_GRAS_CL_GS_CLIP_CULL_DISTANCE_CLIP_MASK__SHIFT = 0 # type: ignore
A6XX_GRAS_CL_GS_CLIP_CULL_DISTANCE_CULL_MASK__MASK = 0x0000ff00 # type: ignore
A6XX_GRAS_CL_GS_CLIP_CULL_DISTANCE_CULL_MASK__SHIFT = 8 # type: ignore
REG_A6XX_GRAS_CL_ARRAY_SIZE = 0x00008004 # type: ignore
REG_A6XX_GRAS_CL_INTERP_CNTL = 0x00008005 # type: ignore
A6XX_GRAS_CL_INTERP_CNTL_IJ_PERSP_PIXEL = 0x00000001 # type: ignore
A6XX_GRAS_CL_INTERP_CNTL_IJ_PERSP_CENTROID = 0x00000002 # type: ignore
A6XX_GRAS_CL_INTERP_CNTL_IJ_PERSP_SAMPLE = 0x00000004 # type: ignore
A6XX_GRAS_CL_INTERP_CNTL_IJ_LINEAR_PIXEL = 0x00000008 # type: ignore
A6XX_GRAS_CL_INTERP_CNTL_IJ_LINEAR_CENTROID = 0x00000010 # type: ignore
A6XX_GRAS_CL_INTERP_CNTL_IJ_LINEAR_SAMPLE = 0x00000020 # type: ignore
A6XX_GRAS_CL_INTERP_CNTL_COORD_MASK__MASK = 0x000003c0 # type: ignore
A6XX_GRAS_CL_INTERP_CNTL_COORD_MASK__SHIFT = 6 # type: ignore
A6XX_GRAS_CL_INTERP_CNTL_UNK10 = 0x00000400 # type: ignore
A6XX_GRAS_CL_INTERP_CNTL_UNK11 = 0x00000800 # type: ignore
REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ = 0x00008006 # type: ignore
A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK = 0x000001ff # type: ignore
A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT = 0 # type: ignore
A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK = 0x0007fc00 # type: ignore
A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT = 10 # type: ignore
REG_A7XX_GRAS_UNKNOWN_8007 = 0x00008007 # type: ignore
REG_A7XX_GRAS_UNKNOWN_8008 = 0x00008008 # type: ignore
REG_A7XX_GRAS_UNKNOWN_8009 = 0x00008009 # type: ignore
REG_A7XX_GRAS_UNKNOWN_800A = 0x0000800a # type: ignore
REG_A7XX_GRAS_UNKNOWN_800B = 0x0000800b # type: ignore
REG_A7XX_GRAS_UNKNOWN_800C = 0x0000800c # type: ignore
REG_A6XX_GRAS_CL_VIEWPORT = lambda i0: (0x00008010 + 0x6*i0 ) # type: ignore
A6XX_GRAS_CL_VIEWPORT_XOFFSET__MASK = 0xffffffff # type: ignore
A6XX_GRAS_CL_VIEWPORT_XOFFSET__SHIFT = 0 # type: ignore
A6XX_GRAS_CL_VIEWPORT_XSCALE__MASK = 0xffffffff # type: ignore
A6XX_GRAS_CL_VIEWPORT_XSCALE__SHIFT = 0 # type: ignore
A6XX_GRAS_CL_VIEWPORT_YOFFSET__MASK = 0xffffffff # type: ignore
A6XX_GRAS_CL_VIEWPORT_YOFFSET__SHIFT = 0 # type: ignore
A6XX_GRAS_CL_VIEWPORT_YSCALE__MASK = 0xffffffff # type: ignore
A6XX_GRAS_CL_VIEWPORT_YSCALE__SHIFT = 0 # type: ignore
A6XX_GRAS_CL_VIEWPORT_ZOFFSET__MASK = 0xffffffff # type: ignore
A6XX_GRAS_CL_VIEWPORT_ZOFFSET__SHIFT = 0 # type: ignore
A6XX_GRAS_CL_VIEWPORT_ZSCALE__MASK = 0xffffffff # type: ignore
A6XX_GRAS_CL_VIEWPORT_ZSCALE__SHIFT = 0 # type: ignore
REG_A6XX_GRAS_CL_VIEWPORT_ZCLAMP = lambda i0: (0x00008070 + 0x2*i0 ) # type: ignore
A6XX_GRAS_CL_VIEWPORT_ZCLAMP_MIN__MASK = 0xffffffff # type: ignore
A6XX_GRAS_CL_VIEWPORT_ZCLAMP_MIN__SHIFT = 0 # type: ignore
A6XX_GRAS_CL_VIEWPORT_ZCLAMP_MAX__MASK = 0xffffffff # type: ignore
A6XX_GRAS_CL_VIEWPORT_ZCLAMP_MAX__SHIFT = 0 # type: ignore
REG_A6XX_GRAS_SU_CNTL = 0x00008090 # type: ignore
A6XX_GRAS_SU_CNTL_CULL_FRONT = 0x00000001 # type: ignore
A6XX_GRAS_SU_CNTL_CULL_BACK = 0x00000002 # type: ignore
A6XX_GRAS_SU_CNTL_FRONT_CW = 0x00000004 # type: ignore
A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK = 0x000007f8 # type: ignore
A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT = 3 # type: ignore
A6XX_GRAS_SU_CNTL_POLY_OFFSET = 0x00000800 # type: ignore
A6XX_GRAS_SU_CNTL_UNK12 = 0x00001000 # type: ignore
A6XX_GRAS_SU_CNTL_LINE_MODE__MASK = 0x00002000 # type: ignore
A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT = 13 # type: ignore
A6XX_GRAS_SU_CNTL_UNK15__MASK = 0x00018000 # type: ignore
A6XX_GRAS_SU_CNTL_UNK15__SHIFT = 15 # type: ignore
A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE = 0x00020000 # type: ignore
A6XX_GRAS_SU_CNTL_RENDERTARGETINDEXINCR = 0x00040000 # type: ignore
A6XX_GRAS_SU_CNTL_VIEWPORTINDEXINCR = 0x00080000 # type: ignore
A6XX_GRAS_SU_CNTL_UNK20__MASK = 0x00700000 # type: ignore
A6XX_GRAS_SU_CNTL_UNK20__SHIFT = 20 # type: ignore
REG_A6XX_GRAS_SU_POINT_MINMAX = 0x00008091 # type: ignore
A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK = 0x0000ffff # type: ignore
A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT = 0 # type: ignore
A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK = 0xffff0000 # type: ignore
A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT = 16 # type: ignore
REG_A6XX_GRAS_SU_POINT_SIZE = 0x00008092 # type: ignore
A6XX_GRAS_SU_POINT_SIZE__MASK = 0x0000ffff # type: ignore
A6XX_GRAS_SU_POINT_SIZE__SHIFT = 0 # type: ignore
REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL = 0x00008094 # type: ignore
A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK = 0x00000003 # type: ignore
A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT = 0 # type: ignore
REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE = 0x00008095 # type: ignore
A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK = 0xffffffff # type: ignore
A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT = 0 # type: ignore
REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET = 0x00008096 # type: ignore
A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK = 0xffffffff # type: ignore
A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT = 0 # type: ignore
REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP = 0x00008097 # type: ignore
A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK = 0xffffffff # type: ignore
A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT = 0 # type: ignore
REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO = 0x00008098 # type: ignore
A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK = 0x00000007 # type: ignore
A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT = 0 # type: ignore
A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3 = 0x00000008 # type: ignore
REG_A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL = 0x00008099 # type: ignore
A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN = 0x00000001 # type: ignore
A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK = 0x00000006 # type: ignore
A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT = 1 # type: ignore
A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_INNERCONSERVATIVERASEN = 0x00000008 # type: ignore
A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK = 0x00000030 # type: ignore
A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT = 4 # type: ignore
REG_A6XX_GRAS_SU_PATH_RENDERING_CNTL = 0x0000809a # type: ignore
A6XX_GRAS_SU_PATH_RENDERING_CNTL_UNK0 = 0x00000001 # type: ignore
A6XX_GRAS_SU_PATH_RENDERING_CNTL_LINELENGTHEN = 0x00000002 # type: ignore
REG_A6XX_GRAS_SU_VS_SIV_CNTL = 0x0000809b # type: ignore
A6XX_GRAS_SU_VS_SIV_CNTL_WRITES_LAYER = 0x00000001 # type: ignore
A6XX_GRAS_SU_VS_SIV_CNTL_WRITES_VIEW = 0x00000002 # type: ignore
REG_A6XX_GRAS_SU_GS_SIV_CNTL = 0x0000809c # type: ignore
A6XX_GRAS_SU_GS_SIV_CNTL_WRITES_LAYER = 0x00000001 # type: ignore
A6XX_GRAS_SU_GS_SIV_CNTL_WRITES_VIEW = 0x00000002 # type: ignore
REG_A6XX_GRAS_SU_DS_SIV_CNTL = 0x0000809d # type: ignore
A6XX_GRAS_SU_DS_SIV_CNTL_WRITES_LAYER = 0x00000001 # type: ignore
A6XX_GRAS_SU_DS_SIV_CNTL_WRITES_VIEW = 0x00000002 # type: ignore
REG_A6XX_GRAS_SC_CNTL = 0x000080a0 # type: ignore
A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK = 0x00000007 # type: ignore
A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK = 0x00000018 # type: ignore
A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT = 3 # type: ignore
A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK = 0x00000020 # type: ignore
A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT = 5 # type: ignore
A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK = 0x000000c0 # type: ignore
A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT = 6 # type: ignore
A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK = 0x00000100 # type: ignore
A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT = 8 # type: ignore
A6XX_GRAS_SC_CNTL_UNK9 = 0x00000200 # type: ignore
A6XX_GRAS_SC_CNTL_ROTATION__MASK = 0x00000c00 # type: ignore
A6XX_GRAS_SC_CNTL_ROTATION__SHIFT = 10 # type: ignore
A6XX_GRAS_SC_CNTL_EARLYVIZOUTEN = 0x00001000 # type: ignore
REG_A6XX_GRAS_SC_BIN_CNTL = 0x000080a1 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_BINW__MASK = 0x0000003f # type: ignore
A6XX_GRAS_SC_BIN_CNTL_BINW__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_BINH__MASK = 0x00007f00 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_BINH__SHIFT = 8 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_RENDER_MODE__MASK = 0x001c0000 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_RENDER_MODE__SHIFT = 18 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_FORCE_LRZ_WRITE_DIS = 0x00200000 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_BUFFERS_LOCATION__MASK = 0x00c00000 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_BUFFERS_LOCATION__SHIFT = 22 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_LRZ_FEEDBACK_ZMODE_MASK__MASK = 0x07000000 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT = 24 # type: ignore
A6XX_GRAS_SC_BIN_CNTL_UNK27 = 0x08000000 # type: ignore
REG_A6XX_GRAS_SC_RAS_MSAA_CNTL = 0x000080a2 # type: ignore
A6XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK = 0x00000003 # type: ignore
A6XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_RAS_MSAA_CNTL_UNK2 = 0x00000004 # type: ignore
A6XX_GRAS_SC_RAS_MSAA_CNTL_UNK3 = 0x00000008 # type: ignore
REG_A6XX_GRAS_SC_DEST_MSAA_CNTL = 0x000080a3 # type: ignore
A6XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK = 0x00000003 # type: ignore
A6XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE = 0x00000004 # type: ignore
REG_A6XX_GRAS_SC_MSAA_SAMPLE_POS_CNTL = 0x000080a4 # type: ignore
A6XX_GRAS_SC_MSAA_SAMPLE_POS_CNTL_UNK0 = 0x00000001 # type: ignore
A6XX_GRAS_SC_MSAA_SAMPLE_POS_CNTL_LOCATION_ENABLE = 0x00000002 # type: ignore
REG_A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0 = 0x000080a5 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_X__MASK = 0x0000000f # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_X__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_Y__MASK = 0x000000f0 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_Y__SHIFT = 4 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_X__MASK = 0x00000f00 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_X__SHIFT = 8 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_Y__MASK = 0x0000f000 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_Y__SHIFT = 12 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_X__MASK = 0x000f0000 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_X__SHIFT = 16 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_Y__MASK = 0x00f00000 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_Y__SHIFT = 20 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_X__MASK = 0x0f000000 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_X__SHIFT = 24 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_Y__MASK = 0xf0000000 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_Y__SHIFT = 28 # type: ignore
REG_A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1 = 0x000080a6 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_X__MASK = 0x0000000f # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_X__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_Y__MASK = 0x000000f0 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_Y__SHIFT = 4 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_X__MASK = 0x00000f00 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_X__SHIFT = 8 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_Y__MASK = 0x0000f000 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_Y__SHIFT = 12 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_X__MASK = 0x000f0000 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_X__SHIFT = 16 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_Y__MASK = 0x00f00000 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_Y__SHIFT = 20 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_X__MASK = 0x0f000000 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_X__SHIFT = 24 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_Y__MASK = 0xf0000000 # type: ignore
A6XX_GRAS_SC_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_Y__SHIFT = 28 # type: ignore
REG_A7XX_GRAS_UNKNOWN_80A7 = 0x000080a7 # type: ignore
REG_A6XX_GRAS_UNKNOWN_80AF = 0x000080af # type: ignore
REG_A6XX_GRAS_SC_SCREEN_SCISSOR = lambda i0: (0x000080b0 + 0x2*i0 ) # type: ignore
A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK = 0x0000ffff # type: ignore
A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK = 0xffff0000 # type: ignore
A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT = 16 # type: ignore
A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK = 0x0000ffff # type: ignore
A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK = 0xffff0000 # type: ignore
A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT = 16 # type: ignore
REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR = lambda i0: (0x000080d0 + 0x2*i0 ) # type: ignore
A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK = 0x0000ffff # type: ignore
A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK = 0xffff0000 # type: ignore
A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT = 16 # type: ignore
A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK = 0x0000ffff # type: ignore
A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK = 0xffff0000 # type: ignore
A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT = 16 # type: ignore
REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL = 0x000080f0 # type: ignore
A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK = 0x00003fff # type: ignore
A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK = 0x3fff0000 # type: ignore
A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT = 16 # type: ignore
REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR = 0x000080f1 # type: ignore
A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK = 0x00003fff # type: ignore
A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT = 0 # type: ignore
A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK = 0x3fff0000 # type: ignore
A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT = 16 # type: ignore
REG_A7XX_GRAS_VRS_CONFIG = 0x000080f4 # type: ignore
A7XX_GRAS_VRS_CONFIG_PIPELINE_FSR_ENABLE = 0x00000001 # type: ignore
A7XX_GRAS_VRS_CONFIG_FRAG_SIZE_X__MASK = 0x00000006 # type: ignore
A7XX_GRAS_VRS_CONFIG_FRAG_SIZE_X__SHIFT = 1 # type: ignore
A7XX_GRAS_VRS_CONFIG_FRAG_SIZE_Y__MASK = 0x00000018 # type: ignore
A7XX_GRAS_VRS_CONFIG_FRAG_SIZE_Y__SHIFT = 3 # type: ignore
A7XX_GRAS_VRS_CONFIG_COMBINER_OP_1__MASK = 0x000000e0 # type: ignore
A7XX_GRAS_VRS_CONFIG_COMBINER_OP_1__SHIFT = 5 # type: ignore
A7XX_GRAS_VRS_CONFIG_COMBINER_OP_2__MASK = 0x00000700 # type: ignore
A7XX_GRAS_VRS_CONFIG_COMBINER_OP_2__SHIFT = 8 # type: ignore
A7XX_GRAS_VRS_CONFIG_ATTACHMENT_FSR_ENABLE = 0x00002000 # type: ignore
A7XX_GRAS_VRS_CONFIG_PRIMITIVE_FSR_ENABLE = 0x00100000 # type: ignore
REG_A7XX_GRAS_QUALITY_BUFFER_INFO = 0x000080f5 # type: ignore
A7XX_GRAS_QUALITY_BUFFER_INFO_LAYERED = 0x00000001 # type: ignore
A7XX_GRAS_QUALITY_BUFFER_INFO_TILE_MODE__MASK = 0x00000006 # type: ignore
A7XX_GRAS_QUALITY_BUFFER_INFO_TILE_MODE__SHIFT = 1 # type: ignore
REG_A7XX_GRAS_QUALITY_BUFFER_DIMENSION = 0x000080f6 # type: ignore
A7XX_GRAS_QUALITY_BUFFER_DIMENSION_WIDTH__MASK = 0x0000ffff # type: ignore
A7XX_GRAS_QUALITY_BUFFER_DIMENSION_WIDTH__SHIFT = 0 # type: ignore
A7XX_GRAS_QUALITY_BUFFER_DIMENSION_HEIGHT__MASK = 0xffff0000 # type: ignore
A7XX_GRAS_QUALITY_BUFFER_DIMENSION_HEIGHT__SHIFT = 16 # type: ignore
REG_A7XX_GRAS_QUALITY_BUFFER_BASE = 0x000080f8 # type: ignore
REG_A7XX_GRAS_QUALITY_BUFFER_PITCH = 0x000080fa # type: ignore
A7XX_GRAS_QUALITY_BUFFER_PITCH_PITCH__MASK = 0x000000ff # type: ignore
A7XX_GRAS_QUALITY_BUFFER_PITCH_PITCH__SHIFT = 0 # type: ignore
A7XX_GRAS_QUALITY_BUFFER_PITCH_ARRAY_PITCH__MASK = 0x1ffffc00 # type: ignore
A7XX_GRAS_QUALITY_BUFFER_PITCH_ARRAY_PITCH__SHIFT = 10 # type: ignore
REG_A6XX_GRAS_LRZ_CNTL = 0x00008100 # type: ignore
A6XX_GRAS_LRZ_CNTL_ENABLE = 0x00000001 # type: ignore
A6XX_GRAS_LRZ_CNTL_LRZ_WRITE = 0x00000002 # type: ignore
A6XX_GRAS_LRZ_CNTL_GREATER = 0x00000004 # type: ignore
A6XX_GRAS_LRZ_CNTL_FC_ENABLE = 0x00000008 # type: ignore
A6XX_GRAS_LRZ_CNTL_Z_WRITE_ENABLE = 0x00000010 # type: ignore
A6XX_GRAS_LRZ_CNTL_Z_BOUNDS_ENABLE = 0x00000020 # type: ignore
A6XX_GRAS_LRZ_CNTL_DIR__MASK = 0x000000c0 # type: ignore
A6XX_GRAS_LRZ_CNTL_DIR__SHIFT = 6 # type: ignore
A6XX_GRAS_LRZ_CNTL_DIR_WRITE = 0x00000100 # type: ignore
A6XX_GRAS_LRZ_CNTL_DISABLE_ON_WRONG_DIR = 0x00000200 # type: ignore
A6XX_GRAS_LRZ_CNTL_Z_FUNC__MASK = 0x00003800 # type: ignore
A6XX_GRAS_LRZ_CNTL_Z_FUNC__SHIFT = 11 # type: ignore
REG_A6XX_GRAS_LRZ_PS_INPUT_CNTL = 0x00008101 # type: ignore
A6XX_GRAS_LRZ_PS_INPUT_CNTL_SAMPLEID = 0x00000001 # type: ignore
A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK = 0x00000006 # type: ignore
A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT = 1 # type: ignore
REG_A6XX_GRAS_LRZ_MRT_BUFFER_INFO_0 = 0x00008102 # type: ignore
A6XX_GRAS_LRZ_MRT_BUFFER_INFO_0_COLOR_FORMAT__MASK = 0x000000ff # type: ignore
A6XX_GRAS_LRZ_MRT_BUFFER_INFO_0_COLOR_FORMAT__SHIFT = 0 # type: ignore
REG_A6XX_GRAS_LRZ_BUFFER_BASE = 0x00008103 # type: ignore
REG_A6XX_GRAS_LRZ_BUFFER_PITCH = 0x00008105 # type: ignore
A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK = 0x000000ff # type: ignore
A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT = 0 # type: ignore
A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK = 0x1ffffc00 # type: ignore
A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT = 10 # type: ignore
REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE = 0x00008106 # type: ignore
REG_A6XX_GRAS_LRZ_PS_SAMPLEFREQ_CNTL = 0x00008109 # type: ignore
A6XX_GRAS_LRZ_PS_SAMPLEFREQ_CNTL_PER_SAMP_MODE = 0x00000001 # type: ignore
REG_A6XX_GRAS_LRZ_VIEW_INFO = 0x0000810a # type: ignore
A6XX_GRAS_LRZ_VIEW_INFO_BASE_LAYER__MASK = 0x000007ff # type: ignore
A6XX_GRAS_LRZ_VIEW_INFO_BASE_LAYER__SHIFT = 0 # type: ignore
A6XX_GRAS_LRZ_VIEW_INFO_LAYER_COUNT__MASK = 0x07ff0000 # type: ignore
A6XX_GRAS_LRZ_VIEW_INFO_LAYER_COUNT__SHIFT = 16 # type: ignore
A6XX_GRAS_LRZ_VIEW_INFO_BASE_MIP_LEVEL__MASK = 0xf0000000 # type: ignore
A6XX_GRAS_LRZ_VIEW_INFO_BASE_MIP_LEVEL__SHIFT = 28 # type: ignore
REG_A7XX_GRAS_LRZ_CNTL2 = 0x0000810b # type: ignore
A7XX_GRAS_LRZ_CNTL2_DISABLE_ON_WRONG_DIR = 0x00000001 # type: ignore
A7XX_GRAS_LRZ_CNTL2_FC_ENABLE = 0x00000002 # type: ignore
REG_A6XX_GRAS_UNKNOWN_8110 = 0x00008110 # type: ignore
REG_A7XX_GRAS_LRZ_DEPTH_CLEAR = 0x00008111 # type: ignore
A7XX_GRAS_LRZ_DEPTH_CLEAR__MASK = 0xffffffff # type: ignore
A7XX_GRAS_LRZ_DEPTH_CLEAR__SHIFT = 0 # type: ignore
REG_A7XX_GRAS_LRZ_DEPTH_BUFFER_INFO = 0x00008113 # type: ignore
A7XX_GRAS_LRZ_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK = 0x00000007 # type: ignore
A7XX_GRAS_LRZ_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT = 0 # type: ignore
A7XX_GRAS_LRZ_DEPTH_BUFFER_INFO_UNK3 = 0x00000008 # type: ignore
REG_A7XX_GRAS_UNKNOWN_8120 = 0x00008120 # type: ignore
REG_A7XX_GRAS_UNKNOWN_8121 = 0x00008121 # type: ignore
REG_A6XX_GRAS_A2D_BLT_CNTL = 0x00008400 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_ROTATE__MASK = 0x00000007 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_ROTATE__SHIFT = 0 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_OVERWRITEEN = 0x00000008 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_UNK4__MASK = 0x00000070 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_UNK4__SHIFT = 4 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_SOLID_COLOR = 0x00000080 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_COLOR_FORMAT__MASK = 0x0000ff00 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_COLOR_FORMAT__SHIFT = 8 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_SCISSOR = 0x00010000 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_UNK17__MASK = 0x00060000 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_UNK17__SHIFT = 17 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_D24S8 = 0x00080000 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_MASK__MASK = 0x00f00000 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_MASK__SHIFT = 20 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_IFMT__MASK = 0x07000000 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_IFMT__SHIFT = 24 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_UNK27 = 0x08000000 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_UNK28 = 0x10000000 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_RASTER_MODE__MASK = 0x20000000 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_RASTER_MODE__SHIFT = 29 # type: ignore
A6XX_GRAS_A2D_BLT_CNTL_COPY = 0x40000000 # type: ignore
REG_A6XX_GRAS_A2D_SRC_XMIN = 0x00008401 # type: ignore
A6XX_GRAS_A2D_SRC_XMIN__MASK = 0x01ffff00 # type: ignore
A6XX_GRAS_A2D_SRC_XMIN__SHIFT = 8 # type: ignore
REG_A6XX_GRAS_A2D_SRC_XMAX = 0x00008402 # type: ignore
A6XX_GRAS_A2D_SRC_XMAX__MASK = 0x01ffff00 # type: ignore
A6XX_GRAS_A2D_SRC_XMAX__SHIFT = 8 # type: ignore
REG_A6XX_GRAS_A2D_SRC_YMIN = 0x00008403 # type: ignore
A6XX_GRAS_A2D_SRC_YMIN__MASK = 0x01ffff00 # type: ignore
A6XX_GRAS_A2D_SRC_YMIN__SHIFT = 8 # type: ignore
REG_A6XX_GRAS_A2D_SRC_YMAX = 0x00008404 # type: ignore
A6XX_GRAS_A2D_SRC_YMAX__MASK = 0x01ffff00 # type: ignore
A6XX_GRAS_A2D_SRC_YMAX__SHIFT = 8 # type: ignore
REG_A6XX_GRAS_A2D_DEST_TL = 0x00008405 # type: ignore
A6XX_GRAS_A2D_DEST_TL_X__MASK = 0x00003fff # type: ignore
A6XX_GRAS_A2D_DEST_TL_X__SHIFT = 0 # type: ignore
A6XX_GRAS_A2D_DEST_TL_Y__MASK = 0x3fff0000 # type: ignore
A6XX_GRAS_A2D_DEST_TL_Y__SHIFT = 16 # type: ignore
REG_A6XX_GRAS_A2D_DEST_BR = 0x00008406 # type: ignore
A6XX_GRAS_A2D_DEST_BR_X__MASK = 0x00003fff # type: ignore
A6XX_GRAS_A2D_DEST_BR_X__SHIFT = 0 # type: ignore
A6XX_GRAS_A2D_DEST_BR_Y__MASK = 0x3fff0000 # type: ignore
A6XX_GRAS_A2D_DEST_BR_Y__SHIFT = 16 # type: ignore
REG_A6XX_GRAS_2D_UNKNOWN_8407 = 0x00008407 # type: ignore
REG_A6XX_GRAS_2D_UNKNOWN_8408 = 0x00008408 # type: ignore
REG_A6XX_GRAS_2D_UNKNOWN_8409 = 0x00008409 # type: ignore
REG_A6XX_GRAS_A2D_SCISSOR_TL = 0x0000840a # type: ignore
A6XX_GRAS_A2D_SCISSOR_TL_X__MASK = 0x00003fff # type: ignore
A6XX_GRAS_A2D_SCISSOR_TL_X__SHIFT = 0 # type: ignore
A6XX_GRAS_A2D_SCISSOR_TL_Y__MASK = 0x3fff0000 # type: ignore
A6XX_GRAS_A2D_SCISSOR_TL_Y__SHIFT = 16 # type: ignore
REG_A6XX_GRAS_A2D_SCISSOR_BR = 0x0000840b # type: ignore
A6XX_GRAS_A2D_SCISSOR_BR_X__MASK = 0x00003fff # type: ignore
A6XX_GRAS_A2D_SCISSOR_BR_X__SHIFT = 0 # type: ignore
A6XX_GRAS_A2D_SCISSOR_BR_Y__MASK = 0x3fff0000 # type: ignore
A6XX_GRAS_A2D_SCISSOR_BR_Y__SHIFT = 16 # type: ignore
REG_A6XX_GRAS_DBG_ECO_CNTL = 0x00008600 # type: ignore
A6XX_GRAS_DBG_ECO_CNTL_UNK7 = 0x00000080 # type: ignore
A6XX_GRAS_DBG_ECO_CNTL_LRZCACHELOCKDIS = 0x00000800 # type: ignore
REG_A6XX_GRAS_ADDR_MODE_CNTL = 0x00008601 # type: ignore
REG_A7XX_GRAS_NC_MODE_CNTL = 0x00008602 # type: ignore
REG_A6XX_GRAS_PERFCTR_TSE_SEL = lambda i0: (0x00008610 + 0x1*i0 ) # type: ignore
REG_A6XX_GRAS_PERFCTR_RAS_SEL = lambda i0: (0x00008614 + 0x1*i0 ) # type: ignore
REG_A6XX_GRAS_PERFCTR_LRZ_SEL = lambda i0: (0x00008618 + 0x1*i0 ) # type: ignore
REG_A6XX_RB_CNTL = 0x00008800 # type: ignore
A6XX_RB_CNTL_BINW__MASK = 0x0000003f # type: ignore
A6XX_RB_CNTL_BINW__SHIFT = 0 # type: ignore
A6XX_RB_CNTL_BINH__MASK = 0x00007f00 # type: ignore
A6XX_RB_CNTL_BINH__SHIFT = 8 # type: ignore
A6XX_RB_CNTL_RENDER_MODE__MASK = 0x001c0000 # type: ignore
A6XX_RB_CNTL_RENDER_MODE__SHIFT = 18 # type: ignore
A6XX_RB_CNTL_FORCE_LRZ_WRITE_DIS = 0x00200000 # type: ignore
A6XX_RB_CNTL_BUFFERS_LOCATION__MASK = 0x00c00000 # type: ignore
A6XX_RB_CNTL_BUFFERS_LOCATION__SHIFT = 22 # type: ignore
A6XX_RB_CNTL_LRZ_FEEDBACK_ZMODE_MASK__MASK = 0x07000000 # type: ignore
A6XX_RB_CNTL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT = 24 # type: ignore
REG_A7XX_RB_CNTL = 0x00008800 # type: ignore
A7XX_RB_CNTL_BINW__MASK = 0x0000003f # type: ignore
A7XX_RB_CNTL_BINW__SHIFT = 0 # type: ignore
A7XX_RB_CNTL_BINH__MASK = 0x00007f00 # type: ignore
A7XX_RB_CNTL_BINH__SHIFT = 8 # type: ignore
A7XX_RB_CNTL_RENDER_MODE__MASK = 0x001c0000 # type: ignore
A7XX_RB_CNTL_RENDER_MODE__SHIFT = 18 # type: ignore
A7XX_RB_CNTL_FORCE_LRZ_WRITE_DIS = 0x00200000 # type: ignore
A7XX_RB_CNTL_LRZ_FEEDBACK_ZMODE_MASK__MASK = 0x07000000 # type: ignore
A7XX_RB_CNTL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT = 24 # type: ignore
REG_A6XX_RB_RENDER_CNTL = 0x00008801 # type: ignore
A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK = 0x00000038 # type: ignore
A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT = 3 # type: ignore
A6XX_RB_RENDER_CNTL_EARLYVIZOUTEN = 0x00000040 # type: ignore
A6XX_RB_RENDER_CNTL_FS_DISABLE = 0x00000080 # type: ignore
A6XX_RB_RENDER_CNTL_UNK8__MASK = 0x00000700 # type: ignore
A6XX_RB_RENDER_CNTL_UNK8__SHIFT = 8 # type: ignore
A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK = 0x00000100 # type: ignore
A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT = 8 # type: ignore
A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK = 0x00000600 # type: ignore
A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT = 9 # type: ignore
A6XX_RB_RENDER_CNTL_CONSERVATIVERASEN = 0x00000800 # type: ignore
A6XX_RB_RENDER_CNTL_INNERCONSERVATIVERASEN = 0x00001000 # type: ignore
A6XX_RB_RENDER_CNTL_FLAG_DEPTH = 0x00004000 # type: ignore
A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK = 0x00ff0000 # type: ignore
A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT = 16 # type: ignore
REG_A7XX_RB_RENDER_CNTL = 0x00008801 # type: ignore
A7XX_RB_RENDER_CNTL_EARLYVIZOUTEN = 0x00000040 # type: ignore
A7XX_RB_RENDER_CNTL_FS_DISABLE = 0x00000080 # type: ignore
A7XX_RB_RENDER_CNTL_RASTER_MODE__MASK = 0x00000100 # type: ignore
A7XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT = 8 # type: ignore
A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK = 0x00000600 # type: ignore
A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT = 9 # type: ignore
A7XX_RB_RENDER_CNTL_CONSERVATIVERASEN = 0x00000800 # type: ignore
A7XX_RB_RENDER_CNTL_INNERCONSERVATIVERASEN = 0x00001000 # type: ignore
REG_A7XX_GRAS_SU_RENDER_CNTL = 0x00008116 # type: ignore
A7XX_GRAS_SU_RENDER_CNTL_FS_DISABLE = 0x00000080 # type: ignore
REG_A6XX_RB_RAS_MSAA_CNTL = 0x00008802 # type: ignore
A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK = 0x00000003 # type: ignore
A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT = 0 # type: ignore
A6XX_RB_RAS_MSAA_CNTL_UNK2 = 0x00000004 # type: ignore
A6XX_RB_RAS_MSAA_CNTL_UNK3 = 0x00000008 # type: ignore
REG_A6XX_RB_DEST_MSAA_CNTL = 0x00008803 # type: ignore
A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK = 0x00000003 # type: ignore
A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT = 0 # type: ignore
A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE = 0x00000004 # type: ignore
REG_A6XX_RB_MSAA_SAMPLE_POS_CNTL = 0x00008804 # type: ignore
A6XX_RB_MSAA_SAMPLE_POS_CNTL_UNK0 = 0x00000001 # type: ignore
A6XX_RB_MSAA_SAMPLE_POS_CNTL_LOCATION_ENABLE = 0x00000002 # type: ignore
REG_A6XX_RB_PROGRAMMABLE_MSAA_POS_0 = 0x00008805 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_X__MASK = 0x0000000f # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_X__SHIFT = 0 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_Y__MASK = 0x000000f0 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_Y__SHIFT = 4 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_X__MASK = 0x00000f00 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_X__SHIFT = 8 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_Y__MASK = 0x0000f000 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_Y__SHIFT = 12 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_X__MASK = 0x000f0000 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_X__SHIFT = 16 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_Y__MASK = 0x00f00000 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_Y__SHIFT = 20 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_X__MASK = 0x0f000000 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_X__SHIFT = 24 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_Y__MASK = 0xf0000000 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_Y__SHIFT = 28 # type: ignore
REG_A6XX_RB_PROGRAMMABLE_MSAA_POS_1 = 0x00008806 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_X__MASK = 0x0000000f # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_X__SHIFT = 0 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_Y__MASK = 0x000000f0 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_Y__SHIFT = 4 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_X__MASK = 0x00000f00 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_X__SHIFT = 8 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_Y__MASK = 0x0000f000 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_Y__SHIFT = 12 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_X__MASK = 0x000f0000 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_X__SHIFT = 16 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_Y__MASK = 0x00f00000 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_Y__SHIFT = 20 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_X__MASK = 0x0f000000 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_X__SHIFT = 24 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_Y__MASK = 0xf0000000 # type: ignore
A6XX_RB_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_Y__SHIFT = 28 # type: ignore
REG_A6XX_RB_INTERP_CNTL = 0x00008809 # type: ignore
A6XX_RB_INTERP_CNTL_IJ_PERSP_PIXEL = 0x00000001 # type: ignore
A6XX_RB_INTERP_CNTL_IJ_PERSP_CENTROID = 0x00000002 # type: ignore
A6XX_RB_INTERP_CNTL_IJ_PERSP_SAMPLE = 0x00000004 # type: ignore
A6XX_RB_INTERP_CNTL_IJ_LINEAR_PIXEL = 0x00000008 # type: ignore
A6XX_RB_INTERP_CNTL_IJ_LINEAR_CENTROID = 0x00000010 # type: ignore
A6XX_RB_INTERP_CNTL_IJ_LINEAR_SAMPLE = 0x00000020 # type: ignore
A6XX_RB_INTERP_CNTL_COORD_MASK__MASK = 0x000003c0 # type: ignore
A6XX_RB_INTERP_CNTL_COORD_MASK__SHIFT = 6 # type: ignore
A6XX_RB_INTERP_CNTL_UNK10 = 0x00000400 # type: ignore
REG_A6XX_RB_PS_INPUT_CNTL = 0x0000880a # type: ignore
A6XX_RB_PS_INPUT_CNTL_SAMPLEMASK = 0x00000001 # type: ignore
A6XX_RB_PS_INPUT_CNTL_POSTDEPTHCOVERAGE = 0x00000002 # type: ignore
A6XX_RB_PS_INPUT_CNTL_FACENESS = 0x00000004 # type: ignore
A6XX_RB_PS_INPUT_CNTL_SAMPLEID = 0x00000008 # type: ignore
A6XX_RB_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK = 0x00000030 # type: ignore
A6XX_RB_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT = 4 # type: ignore
A6XX_RB_PS_INPUT_CNTL_CENTERRHW = 0x00000040 # type: ignore
A6XX_RB_PS_INPUT_CNTL_LINELENGTHEN = 0x00000080 # type: ignore
A6XX_RB_PS_INPUT_CNTL_FOVEATION = 0x00000100 # type: ignore
REG_A6XX_RB_PS_OUTPUT_CNTL = 0x0000880b # type: ignore
A6XX_RB_PS_OUTPUT_CNTL_DUAL_COLOR_IN_ENABLE = 0x00000001 # type: ignore
A6XX_RB_PS_OUTPUT_CNTL_FRAG_WRITES_Z = 0x00000002 # type: ignore
A6XX_RB_PS_OUTPUT_CNTL_FRAG_WRITES_SAMPMASK = 0x00000004 # type: ignore
A6XX_RB_PS_OUTPUT_CNTL_FRAG_WRITES_STENCILREF = 0x00000008 # type: ignore
REG_A6XX_RB_PS_MRT_CNTL = 0x0000880c # type: ignore
A6XX_RB_PS_MRT_CNTL_MRT__MASK = 0x0000000f # type: ignore
A6XX_RB_PS_MRT_CNTL_MRT__SHIFT = 0 # type: ignore
REG_A6XX_RB_PS_OUTPUT_MASK = 0x0000880d # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT0__MASK = 0x0000000f # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT0__SHIFT = 0 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT1__MASK = 0x000000f0 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT1__SHIFT = 4 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT2__MASK = 0x00000f00 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT2__SHIFT = 8 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT3__MASK = 0x0000f000 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT3__SHIFT = 12 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT4__MASK = 0x000f0000 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT4__SHIFT = 16 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT5__MASK = 0x00f00000 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT5__SHIFT = 20 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT6__MASK = 0x0f000000 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT6__SHIFT = 24 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT7__MASK = 0xf0000000 # type: ignore
A6XX_RB_PS_OUTPUT_MASK_RT7__SHIFT = 28 # type: ignore
REG_A6XX_RB_DITHER_CNTL = 0x0000880e # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK = 0x00000003 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT = 0 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK = 0x0000000c # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT = 2 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK = 0x00000030 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT = 4 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK = 0x000000c0 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT = 6 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK = 0x00000300 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT = 8 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK = 0x00000c00 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT = 10 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK = 0x00003000 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT = 12 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK = 0x0000c000 # type: ignore
A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT = 14 # type: ignore
REG_A6XX_RB_SRGB_CNTL = 0x0000880f # type: ignore
A6XX_RB_SRGB_CNTL_SRGB_MRT0 = 0x00000001 # type: ignore
A6XX_RB_SRGB_CNTL_SRGB_MRT1 = 0x00000002 # type: ignore
A6XX_RB_SRGB_CNTL_SRGB_MRT2 = 0x00000004 # type: ignore
A6XX_RB_SRGB_CNTL_SRGB_MRT3 = 0x00000008 # type: ignore
A6XX_RB_SRGB_CNTL_SRGB_MRT4 = 0x00000010 # type: ignore
A6XX_RB_SRGB_CNTL_SRGB_MRT5 = 0x00000020 # type: ignore
A6XX_RB_SRGB_CNTL_SRGB_MRT6 = 0x00000040 # type: ignore
A6XX_RB_SRGB_CNTL_SRGB_MRT7 = 0x00000080 # type: ignore
REG_A6XX_RB_PS_SAMPLEFREQ_CNTL = 0x00008810 # type: ignore
A6XX_RB_PS_SAMPLEFREQ_CNTL_PER_SAMP_MODE = 0x00000001 # type: ignore
REG_A6XX_RB_UNKNOWN_8811 = 0x00008811 # type: ignore
REG_A7XX_RB_UNKNOWN_8812 = 0x00008812 # type: ignore
REG_A6XX_RB_UNKNOWN_8818 = 0x00008818 # type: ignore
REG_A6XX_RB_UNKNOWN_8819 = 0x00008819 # type: ignore
REG_A6XX_RB_UNKNOWN_881A = 0x0000881a # type: ignore
REG_A6XX_RB_UNKNOWN_881B = 0x0000881b # type: ignore
REG_A6XX_RB_UNKNOWN_881C = 0x0000881c # type: ignore
REG_A6XX_RB_UNKNOWN_881D = 0x0000881d # type: ignore
REG_A6XX_RB_UNKNOWN_881E = 0x0000881e # type: ignore
REG_A6XX_RB_MRT = lambda i0: (0x00008820 + 0x8*i0 ) # type: ignore
A6XX_RB_MRT_CONTROL_BLEND = 0x00000001 # type: ignore
A6XX_RB_MRT_CONTROL_BLEND2 = 0x00000002 # type: ignore
A6XX_RB_MRT_CONTROL_ROP_ENABLE = 0x00000004 # type: ignore
A6XX_RB_MRT_CONTROL_ROP_CODE__MASK = 0x00000078 # type: ignore
A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT = 3 # type: ignore
A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK = 0x00000780 # type: ignore
A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT = 7 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK = 0x0000001f # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT = 0 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK = 0x000000e0 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT = 5 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK = 0x00001f00 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT = 8 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK = 0x001f0000 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT = 16 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK = 0x00e00000 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT = 21 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK = 0x1f000000 # type: ignore
A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT = 24 # type: ignore
A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK = 0x000000ff # type: ignore
A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT = 0 # type: ignore
A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK = 0x00000300 # type: ignore
A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT = 8 # type: ignore
A6XX_RB_MRT_BUF_INFO_UNK10 = 0x00000400 # type: ignore
A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK = 0x00006000 # type: ignore
A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT = 13 # type: ignore
A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK = 0x000000ff # type: ignore
A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT = 0 # type: ignore
A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK = 0x00000300 # type: ignore
A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT = 8 # type: ignore
A7XX_RB_MRT_BUF_INFO_UNK10 = 0x00000400 # type: ignore
A7XX_RB_MRT_BUF_INFO_LOSSLESSCOMPEN = 0x00000800 # type: ignore
A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK = 0x00006000 # type: ignore
A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT = 13 # type: ignore
A7XX_RB_MRT_BUF_INFO_MUTABLEEN = 0x00010000 # type: ignore
A6XX_RB_MRT_PITCH__MASK = 0xffffffff # type: ignore
A6XX_RB_MRT_PITCH__SHIFT = 0 # type: ignore
A6XX_RB_MRT_ARRAY_PITCH__MASK = 0xffffffff # type: ignore
A6XX_RB_MRT_ARRAY_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_RB_BLEND_CONSTANT_RED_FP32 = 0x00008860 # type: ignore
A6XX_RB_BLEND_CONSTANT_RED_FP32__MASK = 0xffffffff # type: ignore
A6XX_RB_BLEND_CONSTANT_RED_FP32__SHIFT = 0 # type: ignore
REG_A6XX_RB_BLEND_CONSTANT_GREEN_FP32 = 0x00008861 # type: ignore
A6XX_RB_BLEND_CONSTANT_GREEN_FP32__MASK = 0xffffffff # type: ignore
A6XX_RB_BLEND_CONSTANT_GREEN_FP32__SHIFT = 0 # type: ignore
REG_A6XX_RB_BLEND_CONSTANT_BLUE_FP32 = 0x00008862 # type: ignore
A6XX_RB_BLEND_CONSTANT_BLUE_FP32__MASK = 0xffffffff # type: ignore
A6XX_RB_BLEND_CONSTANT_BLUE_FP32__SHIFT = 0 # type: ignore
REG_A6XX_RB_BLEND_CONSTANT_ALPHA_FP32 = 0x00008863 # type: ignore
A6XX_RB_BLEND_CONSTANT_ALPHA_FP32__MASK = 0xffffffff # type: ignore
A6XX_RB_BLEND_CONSTANT_ALPHA_FP32__SHIFT = 0 # type: ignore
REG_A6XX_RB_ALPHA_TEST_CNTL = 0x00008864 # type: ignore
A6XX_RB_ALPHA_TEST_CNTL_ALPHA_REF__MASK = 0x000000ff # type: ignore
A6XX_RB_ALPHA_TEST_CNTL_ALPHA_REF__SHIFT = 0 # type: ignore
A6XX_RB_ALPHA_TEST_CNTL_ALPHA_TEST = 0x00000100 # type: ignore
A6XX_RB_ALPHA_TEST_CNTL_ALPHA_TEST_FUNC__MASK = 0x00000e00 # type: ignore
A6XX_RB_ALPHA_TEST_CNTL_ALPHA_TEST_FUNC__SHIFT = 9 # type: ignore
REG_A6XX_RB_BLEND_CNTL = 0x00008865 # type: ignore
A6XX_RB_BLEND_CNTL_BLEND_READS_DEST__MASK = 0x000000ff # type: ignore
A6XX_RB_BLEND_CNTL_BLEND_READS_DEST__SHIFT = 0 # type: ignore
A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND = 0x00000100 # type: ignore
A6XX_RB_BLEND_CNTL_DUAL_COLOR_IN_ENABLE = 0x00000200 # type: ignore
A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE = 0x00000400 # type: ignore
A6XX_RB_BLEND_CNTL_ALPHA_TO_ONE = 0x00000800 # type: ignore
A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK = 0xffff0000 # type: ignore
A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT = 16 # type: ignore
REG_A6XX_RB_DEPTH_PLANE_CNTL = 0x00008870 # type: ignore
A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK = 0x00000003 # type: ignore
A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT = 0 # type: ignore
REG_A6XX_RB_DEPTH_CNTL = 0x00008871 # type: ignore
A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE = 0x00000001 # type: ignore
A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE = 0x00000002 # type: ignore
A6XX_RB_DEPTH_CNTL_ZFUNC__MASK = 0x0000001c # type: ignore
A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT = 2 # type: ignore
A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE = 0x00000020 # type: ignore
A6XX_RB_DEPTH_CNTL_Z_READ_ENABLE = 0x00000040 # type: ignore
A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE = 0x00000080 # type: ignore
REG_A6XX_GRAS_SU_DEPTH_CNTL = 0x00008114 # type: ignore
A6XX_GRAS_SU_DEPTH_CNTL_Z_TEST_ENABLE = 0x00000001 # type: ignore
REG_A6XX_RB_DEPTH_BUFFER_INFO = 0x00008872 # type: ignore
A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK = 0x00000007 # type: ignore
A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT = 0 # type: ignore
A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK = 0x00000018 # type: ignore
A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT = 3 # type: ignore
REG_A7XX_RB_DEPTH_BUFFER_INFO = 0x00008872 # type: ignore
A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK = 0x00000007 # type: ignore
A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT = 0 # type: ignore
A7XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK = 0x00000018 # type: ignore
A7XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT = 3 # type: ignore
A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__MASK = 0x00000060 # type: ignore
A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__SHIFT = 5 # type: ignore
A7XX_RB_DEPTH_BUFFER_INFO_LOSSLESSCOMPEN = 0x00000080 # type: ignore
REG_A6XX_RB_DEPTH_BUFFER_PITCH = 0x00008873 # type: ignore
A6XX_RB_DEPTH_BUFFER_PITCH__MASK = 0x00003fff # type: ignore
A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH = 0x00008874 # type: ignore
A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK = 0x0fffffff # type: ignore
A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_RB_DEPTH_BUFFER_BASE = 0x00008875 # type: ignore
REG_A6XX_RB_DEPTH_GMEM_BASE = 0x00008877 # type: ignore
REG_A6XX_RB_DEPTH_BOUND_MIN = 0x00008878 # type: ignore
A6XX_RB_DEPTH_BOUND_MIN__MASK = 0xffffffff # type: ignore
A6XX_RB_DEPTH_BOUND_MIN__SHIFT = 0 # type: ignore
REG_A6XX_RB_DEPTH_BOUND_MAX = 0x00008879 # type: ignore
A6XX_RB_DEPTH_BOUND_MAX__MASK = 0xffffffff # type: ignore
A6XX_RB_DEPTH_BOUND_MAX__SHIFT = 0 # type: ignore
REG_A6XX_RB_STENCIL_CNTL = 0x00008880 # type: ignore
A6XX_RB_STENCIL_CNTL_STENCIL_ENABLE = 0x00000001 # type: ignore
A6XX_RB_STENCIL_CNTL_STENCIL_ENABLE_BF = 0x00000002 # type: ignore
A6XX_RB_STENCIL_CNTL_STENCIL_READ = 0x00000004 # type: ignore
A6XX_RB_STENCIL_CNTL_FUNC__MASK = 0x00000700 # type: ignore
A6XX_RB_STENCIL_CNTL_FUNC__SHIFT = 8 # type: ignore
A6XX_RB_STENCIL_CNTL_FAIL__MASK = 0x00003800 # type: ignore
A6XX_RB_STENCIL_CNTL_FAIL__SHIFT = 11 # type: ignore
A6XX_RB_STENCIL_CNTL_ZPASS__MASK = 0x0001c000 # type: ignore
A6XX_RB_STENCIL_CNTL_ZPASS__SHIFT = 14 # type: ignore
A6XX_RB_STENCIL_CNTL_ZFAIL__MASK = 0x000e0000 # type: ignore
A6XX_RB_STENCIL_CNTL_ZFAIL__SHIFT = 17 # type: ignore
A6XX_RB_STENCIL_CNTL_FUNC_BF__MASK = 0x00700000 # type: ignore
A6XX_RB_STENCIL_CNTL_FUNC_BF__SHIFT = 20 # type: ignore
A6XX_RB_STENCIL_CNTL_FAIL_BF__MASK = 0x03800000 # type: ignore
A6XX_RB_STENCIL_CNTL_FAIL_BF__SHIFT = 23 # type: ignore
A6XX_RB_STENCIL_CNTL_ZPASS_BF__MASK = 0x1c000000 # type: ignore
A6XX_RB_STENCIL_CNTL_ZPASS_BF__SHIFT = 26 # type: ignore
A6XX_RB_STENCIL_CNTL_ZFAIL_BF__MASK = 0xe0000000 # type: ignore
A6XX_RB_STENCIL_CNTL_ZFAIL_BF__SHIFT = 29 # type: ignore
REG_A6XX_GRAS_SU_STENCIL_CNTL = 0x00008115 # type: ignore
A6XX_GRAS_SU_STENCIL_CNTL_STENCIL_ENABLE = 0x00000001 # type: ignore
REG_A6XX_RB_STENCIL_BUFFER_INFO = 0x00008881 # type: ignore
A6XX_RB_STENCIL_BUFFER_INFO_SEPARATE_STENCIL = 0x00000001 # type: ignore
A6XX_RB_STENCIL_BUFFER_INFO_UNK1 = 0x00000002 # type: ignore
REG_A7XX_RB_STENCIL_BUFFER_INFO = 0x00008881 # type: ignore
A7XX_RB_STENCIL_BUFFER_INFO_SEPARATE_STENCIL = 0x00000001 # type: ignore
A7XX_RB_STENCIL_BUFFER_INFO_UNK1 = 0x00000002 # type: ignore
A7XX_RB_STENCIL_BUFFER_INFO_TILEMODE__MASK = 0x0000000c # type: ignore
A7XX_RB_STENCIL_BUFFER_INFO_TILEMODE__SHIFT = 2 # type: ignore
REG_A6XX_RB_STENCIL_BUFFER_PITCH = 0x00008882 # type: ignore
A6XX_RB_STENCIL_BUFFER_PITCH__MASK = 0x00000fff # type: ignore
A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH = 0x00008883 # type: ignore
A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK = 0x00ffffff # type: ignore
A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_RB_STENCIL_BUFFER_BASE = 0x00008884 # type: ignore
REG_A6XX_RB_STENCIL_GMEM_BASE = 0x00008886 # type: ignore
REG_A6XX_RB_STENCIL_REF_CNTL = 0x00008887 # type: ignore
A6XX_RB_STENCIL_REF_CNTL_REF__MASK = 0x000000ff # type: ignore
A6XX_RB_STENCIL_REF_CNTL_REF__SHIFT = 0 # type: ignore
A6XX_RB_STENCIL_REF_CNTL_BFREF__MASK = 0x0000ff00 # type: ignore
A6XX_RB_STENCIL_REF_CNTL_BFREF__SHIFT = 8 # type: ignore
REG_A6XX_RB_STENCIL_MASK = 0x00008888 # type: ignore
A6XX_RB_STENCIL_MASK_MASK__MASK = 0x000000ff # type: ignore
A6XX_RB_STENCIL_MASK_MASK__SHIFT = 0 # type: ignore
A6XX_RB_STENCIL_MASK_BFMASK__MASK = 0x0000ff00 # type: ignore
A6XX_RB_STENCIL_MASK_BFMASK__SHIFT = 8 # type: ignore
REG_A6XX_RB_STENCIL_WRITE_MASK = 0x00008889 # type: ignore
A6XX_RB_STENCIL_WRITE_MASK_WRMASK__MASK = 0x000000ff # type: ignore
A6XX_RB_STENCIL_WRITE_MASK_WRMASK__SHIFT = 0 # type: ignore
A6XX_RB_STENCIL_WRITE_MASK_BFWRMASK__MASK = 0x0000ff00 # type: ignore
A6XX_RB_STENCIL_WRITE_MASK_BFWRMASK__SHIFT = 8 # type: ignore
REG_A6XX_RB_WINDOW_OFFSET = 0x00008890 # type: ignore
A6XX_RB_WINDOW_OFFSET_X__MASK = 0x00003fff # type: ignore
A6XX_RB_WINDOW_OFFSET_X__SHIFT = 0 # type: ignore
A6XX_RB_WINDOW_OFFSET_Y__MASK = 0x3fff0000 # type: ignore
A6XX_RB_WINDOW_OFFSET_Y__SHIFT = 16 # type: ignore
REG_A6XX_RB_SAMPLE_COUNTER_CNTL = 0x00008891 # type: ignore
A6XX_RB_SAMPLE_COUNTER_CNTL_DISABLE = 0x00000001 # type: ignore
A6XX_RB_SAMPLE_COUNTER_CNTL_COPY = 0x00000002 # type: ignore
REG_A6XX_RB_LRZ_CNTL = 0x00008898 # type: ignore
A6XX_RB_LRZ_CNTL_ENABLE = 0x00000001 # type: ignore
REG_A7XX_RB_UNKNOWN_8899 = 0x00008899 # type: ignore
REG_A6XX_RB_VIEWPORT_ZCLAMP_MIN = 0x000088c0 # type: ignore
A6XX_RB_VIEWPORT_ZCLAMP_MIN__MASK = 0xffffffff # type: ignore
A6XX_RB_VIEWPORT_ZCLAMP_MIN__SHIFT = 0 # type: ignore
REG_A6XX_RB_VIEWPORT_ZCLAMP_MAX = 0x000088c1 # type: ignore
A6XX_RB_VIEWPORT_ZCLAMP_MAX__MASK = 0xffffffff # type: ignore
A6XX_RB_VIEWPORT_ZCLAMP_MAX__SHIFT = 0 # type: ignore
REG_A6XX_RB_RESOLVE_CNTL_0 = 0x000088d0 # type: ignore
A6XX_RB_RESOLVE_CNTL_0_UNK0__MASK = 0x00001fff # type: ignore
A6XX_RB_RESOLVE_CNTL_0_UNK0__SHIFT = 0 # type: ignore
A6XX_RB_RESOLVE_CNTL_0_UNK16__MASK = 0x07ff0000 # type: ignore
A6XX_RB_RESOLVE_CNTL_0_UNK16__SHIFT = 16 # type: ignore
REG_A6XX_RB_RESOLVE_CNTL_1 = 0x000088d1 # type: ignore
A6XX_RB_RESOLVE_CNTL_1_X__MASK = 0x00003fff # type: ignore
A6XX_RB_RESOLVE_CNTL_1_X__SHIFT = 0 # type: ignore
A6XX_RB_RESOLVE_CNTL_1_Y__MASK = 0x3fff0000 # type: ignore
A6XX_RB_RESOLVE_CNTL_1_Y__SHIFT = 16 # type: ignore
REG_A6XX_RB_RESOLVE_CNTL_2 = 0x000088d2 # type: ignore
A6XX_RB_RESOLVE_CNTL_2_X__MASK = 0x00003fff # type: ignore
A6XX_RB_RESOLVE_CNTL_2_X__SHIFT = 0 # type: ignore
A6XX_RB_RESOLVE_CNTL_2_Y__MASK = 0x3fff0000 # type: ignore
A6XX_RB_RESOLVE_CNTL_2_Y__SHIFT = 16 # type: ignore
REG_A6XX_RB_RESOLVE_CNTL_3 = 0x000088d3 # type: ignore
A6XX_RB_RESOLVE_CNTL_3_BINW__MASK = 0x0000003f # type: ignore
A6XX_RB_RESOLVE_CNTL_3_BINW__SHIFT = 0 # type: ignore
A6XX_RB_RESOLVE_CNTL_3_BINH__MASK = 0x00007f00 # type: ignore
A6XX_RB_RESOLVE_CNTL_3_BINH__SHIFT = 8 # type: ignore
REG_A6XX_RB_RESOLVE_WINDOW_OFFSET = 0x000088d4 # type: ignore
A6XX_RB_RESOLVE_WINDOW_OFFSET_X__MASK = 0x00003fff # type: ignore
A6XX_RB_RESOLVE_WINDOW_OFFSET_X__SHIFT = 0 # type: ignore
A6XX_RB_RESOLVE_WINDOW_OFFSET_Y__MASK = 0x3fff0000 # type: ignore
A6XX_RB_RESOLVE_WINDOW_OFFSET_Y__SHIFT = 16 # type: ignore
REG_A6XX_RB_RESOLVE_GMEM_BUFFER_INFO = 0x000088d5 # type: ignore
A6XX_RB_RESOLVE_GMEM_BUFFER_INFO_SAMPLES__MASK = 0x00000018 # type: ignore
A6XX_RB_RESOLVE_GMEM_BUFFER_INFO_SAMPLES__SHIFT = 3 # type: ignore
REG_A6XX_RB_RESOLVE_GMEM_BUFFER_BASE = 0x000088d6 # type: ignore
REG_A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO = 0x000088d7 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_TILE_MODE__MASK = 0x00000003 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_TILE_MODE__SHIFT = 0 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_FLAGS = 0x00000004 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_SAMPLES__MASK = 0x00000018 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_SAMPLES__SHIFT = 3 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_COLOR_SWAP__MASK = 0x00000060 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_COLOR_SWAP__SHIFT = 5 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_COLOR_FORMAT__MASK = 0x00007f80 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_COLOR_FORMAT__SHIFT = 7 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_UNK15 = 0x00008000 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_INFO_MUTABLEEN = 0x00010000 # type: ignore
REG_A6XX_RB_RESOLVE_SYSTEM_BUFFER_BASE = 0x000088d8 # type: ignore
REG_A6XX_RB_RESOLVE_SYSTEM_BUFFER_PITCH = 0x000088da # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_PITCH__MASK = 0x0000ffff # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH = 0x000088db # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH__MASK = 0x1fffffff # type: ignore
A6XX_RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_RB_RESOLVE_SYSTEM_FLAG_BUFFER_BASE = 0x000088dc # type: ignore
REG_A6XX_RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH = 0x000088de # type: ignore
A6XX_RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH_PITCH__MASK = 0x000007ff # type: ignore
A6XX_RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH_PITCH__SHIFT = 0 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK = 0x0ffff800 # type: ignore
A6XX_RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT = 11 # type: ignore
REG_A6XX_RB_RESOLVE_CLEAR_COLOR_DW0 = 0x000088df # type: ignore
REG_A6XX_RB_RESOLVE_CLEAR_COLOR_DW1 = 0x000088e0 # type: ignore
REG_A6XX_RB_RESOLVE_CLEAR_COLOR_DW2 = 0x000088e1 # type: ignore
REG_A6XX_RB_RESOLVE_CLEAR_COLOR_DW3 = 0x000088e2 # type: ignore
REG_A6XX_RB_RESOLVE_OPERATION = 0x000088e3 # type: ignore
A6XX_RB_RESOLVE_OPERATION_TYPE__MASK = 0x00000003 # type: ignore
A6XX_RB_RESOLVE_OPERATION_TYPE__SHIFT = 0 # type: ignore
A6XX_RB_RESOLVE_OPERATION_SAMPLE_0 = 0x00000004 # type: ignore
A6XX_RB_RESOLVE_OPERATION_DEPTH = 0x00000008 # type: ignore
A6XX_RB_RESOLVE_OPERATION_CLEAR_MASK__MASK = 0x000000f0 # type: ignore
A6XX_RB_RESOLVE_OPERATION_CLEAR_MASK__SHIFT = 4 # type: ignore
A6XX_RB_RESOLVE_OPERATION_LAST__MASK = 0x00000300 # type: ignore
A6XX_RB_RESOLVE_OPERATION_LAST__SHIFT = 8 # type: ignore
A6XX_RB_RESOLVE_OPERATION_BUFFER_ID__MASK = 0x0000f000 # type: ignore
A6XX_RB_RESOLVE_OPERATION_BUFFER_ID__SHIFT = 12 # type: ignore
REG_A7XX_RB_CLEAR_TARGET = 0x000088e4 # type: ignore
A7XX_RB_CLEAR_TARGET_CLEAR_MODE__MASK = 0x00000001 # type: ignore
A7XX_RB_CLEAR_TARGET_CLEAR_MODE__SHIFT = 0 # type: ignore
REG_A7XX_RB_CCU_CACHE_CNTL = 0x000088e5 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_DEPTH_OFFSET_HI__MASK = 0x00000001 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_DEPTH_OFFSET_HI__SHIFT = 0 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_COLOR_OFFSET_HI__MASK = 0x00000004 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_COLOR_OFFSET_HI__SHIFT = 2 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_DEPTH_CACHE_SIZE__MASK = 0x00000c00 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_DEPTH_CACHE_SIZE__SHIFT = 10 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_DEPTH_OFFSET__MASK = 0x001ff000 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_DEPTH_OFFSET__SHIFT = 12 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_COLOR_CACHE_SIZE__MASK = 0x00600000 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_COLOR_CACHE_SIZE__SHIFT = 21 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_COLOR_OFFSET__MASK = 0xff800000 # type: ignore
A7XX_RB_CCU_CACHE_CNTL_COLOR_OFFSET__SHIFT = 23 # type: ignore
REG_A6XX_RB_UNKNOWN_88F0 = 0x000088f0 # type: ignore
REG_A6XX_RB_UNK_FLAG_BUFFER_BASE = 0x000088f1 # type: ignore
REG_A6XX_RB_UNK_FLAG_BUFFER_PITCH = 0x000088f3 # type: ignore
A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK = 0x000007ff # type: ignore
A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT = 0 # type: ignore
A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK = 0x00fff800 # type: ignore
A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT = 11 # type: ignore
REG_A6XX_RB_VRS_CONFIG = 0x000088f4 # type: ignore
A6XX_RB_VRS_CONFIG_UNK2 = 0x00000004 # type: ignore
A6XX_RB_VRS_CONFIG_PIPELINE_FSR_ENABLE = 0x00000010 # type: ignore
A6XX_RB_VRS_CONFIG_ATTACHMENT_FSR_ENABLE = 0x00000020 # type: ignore
A6XX_RB_VRS_CONFIG_PRIMITIVE_FSR_ENABLE = 0x00040000 # type: ignore
REG_A7XX_RB_UNKNOWN_88F5 = 0x000088f5 # type: ignore
REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE = 0x00008900 # type: ignore
REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH = 0x00008902 # type: ignore
A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK = 0x0000007f # type: ignore
A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT = 0 # type: ignore
A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK = 0x00000700 # type: ignore
A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT = 8 # type: ignore
A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK = 0x0ffff800 # type: ignore
A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT = 11 # type: ignore
REG_A6XX_RB_COLOR_FLAG_BUFFER = lambda i0: (0x00008903 + 0x3*i0 ) # type: ignore
A6XX_RB_COLOR_FLAG_BUFFER_PITCH_PITCH__MASK = 0x000007ff # type: ignore
A6XX_RB_COLOR_FLAG_BUFFER_PITCH_PITCH__SHIFT = 0 # type: ignore
A6XX_RB_COLOR_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK = 0x1ffff800 # type: ignore
A6XX_RB_COLOR_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT = 11 # type: ignore
REG_A6XX_RB_SAMPLE_COUNTER_BASE = 0x00008927 # type: ignore
REG_A6XX_RB_UNKNOWN_8A00 = 0x00008a00 # type: ignore
REG_A6XX_RB_UNKNOWN_8A10 = 0x00008a10 # type: ignore
REG_A6XX_RB_UNKNOWN_8A20 = 0x00008a20 # type: ignore
REG_A6XX_RB_UNKNOWN_8A30 = 0x00008a30 # type: ignore
REG_A6XX_RB_A2D_BLT_CNTL = 0x00008c00 # type: ignore
A6XX_RB_A2D_BLT_CNTL_ROTATE__MASK = 0x00000007 # type: ignore
A6XX_RB_A2D_BLT_CNTL_ROTATE__SHIFT = 0 # type: ignore
A6XX_RB_A2D_BLT_CNTL_OVERWRITEEN = 0x00000008 # type: ignore
A6XX_RB_A2D_BLT_CNTL_UNK4__MASK = 0x00000070 # type: ignore
A6XX_RB_A2D_BLT_CNTL_UNK4__SHIFT = 4 # type: ignore
A6XX_RB_A2D_BLT_CNTL_SOLID_COLOR = 0x00000080 # type: ignore
A6XX_RB_A2D_BLT_CNTL_COLOR_FORMAT__MASK = 0x0000ff00 # type: ignore
A6XX_RB_A2D_BLT_CNTL_COLOR_FORMAT__SHIFT = 8 # type: ignore
A6XX_RB_A2D_BLT_CNTL_SCISSOR = 0x00010000 # type: ignore
A6XX_RB_A2D_BLT_CNTL_UNK17__MASK = 0x00060000 # type: ignore
A6XX_RB_A2D_BLT_CNTL_UNK17__SHIFT = 17 # type: ignore
A6XX_RB_A2D_BLT_CNTL_D24S8 = 0x00080000 # type: ignore
A6XX_RB_A2D_BLT_CNTL_MASK__MASK = 0x00f00000 # type: ignore
A6XX_RB_A2D_BLT_CNTL_MASK__SHIFT = 20 # type: ignore
A6XX_RB_A2D_BLT_CNTL_IFMT__MASK = 0x07000000 # type: ignore
A6XX_RB_A2D_BLT_CNTL_IFMT__SHIFT = 24 # type: ignore
A6XX_RB_A2D_BLT_CNTL_UNK27 = 0x08000000 # type: ignore
A6XX_RB_A2D_BLT_CNTL_UNK28 = 0x10000000 # type: ignore
A6XX_RB_A2D_BLT_CNTL_RASTER_MODE__MASK = 0x20000000 # type: ignore
A6XX_RB_A2D_BLT_CNTL_RASTER_MODE__SHIFT = 29 # type: ignore
A6XX_RB_A2D_BLT_CNTL_COPY = 0x40000000 # type: ignore
REG_A6XX_RB_A2D_PIXEL_CNTL = 0x00008c01 # type: ignore
REG_A6XX_RB_A2D_DEST_BUFFER_INFO = 0x00008c17 # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_COLOR_FORMAT__MASK = 0x000000ff # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_COLOR_FORMAT__SHIFT = 0 # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_TILE_MODE__MASK = 0x00000300 # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_TILE_MODE__SHIFT = 8 # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_COLOR_SWAP__MASK = 0x00000c00 # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_COLOR_SWAP__SHIFT = 10 # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_FLAGS = 0x00001000 # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_SRGB = 0x00002000 # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_SAMPLES__MASK = 0x0000c000 # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_SAMPLES__SHIFT = 14 # type: ignore
A6XX_RB_A2D_DEST_BUFFER_INFO_MUTABLEEN = 0x00020000 # type: ignore
REG_A6XX_RB_A2D_DEST_BUFFER_BASE = 0x00008c18 # type: ignore
REG_A6XX_RB_A2D_DEST_BUFFER_PITCH = 0x00008c1a # type: ignore
A6XX_RB_A2D_DEST_BUFFER_PITCH__MASK = 0x0000ffff # type: ignore
A6XX_RB_A2D_DEST_BUFFER_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_RB_A2D_DEST_BUFFER_BASE_1 = 0x00008c1b # type: ignore
REG_A6XX_RB_A2D_DEST_BUFFER_PITCH_1 = 0x00008c1d # type: ignore
A6XX_RB_A2D_DEST_BUFFER_PITCH_1__MASK = 0x0000ffff # type: ignore
A6XX_RB_A2D_DEST_BUFFER_PITCH_1__SHIFT = 0 # type: ignore
REG_A6XX_RB_A2D_DEST_BUFFER_BASE_2 = 0x00008c1e # type: ignore
REG_A6XX_RB_A2D_DEST_FLAG_BUFFER_BASE = 0x00008c20 # type: ignore
REG_A6XX_RB_A2D_DEST_FLAG_BUFFER_PITCH = 0x00008c22 # type: ignore
A6XX_RB_A2D_DEST_FLAG_BUFFER_PITCH__MASK = 0x000000ff # type: ignore
A6XX_RB_A2D_DEST_FLAG_BUFFER_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_RB_A2D_DEST_FLAG_BUFFER_BASE_1 = 0x00008c23 # type: ignore
REG_A6XX_RB_A2D_DEST_FLAG_BUFFER_PITCH_1 = 0x00008c25 # type: ignore
A6XX_RB_A2D_DEST_FLAG_BUFFER_PITCH_1__MASK = 0x000000ff # type: ignore
A6XX_RB_A2D_DEST_FLAG_BUFFER_PITCH_1__SHIFT = 0 # type: ignore
REG_A6XX_RB_A2D_CLEAR_COLOR_DW0 = 0x00008c2c # type: ignore
REG_A6XX_RB_A2D_CLEAR_COLOR_DW1 = 0x00008c2d # type: ignore
REG_A6XX_RB_A2D_CLEAR_COLOR_DW2 = 0x00008c2e # type: ignore
REG_A6XX_RB_A2D_CLEAR_COLOR_DW3 = 0x00008c2f # type: ignore
REG_A7XX_RB_UNKNOWN_8C34 = 0x00008c34 # type: ignore
REG_A6XX_RB_UNKNOWN_8E01 = 0x00008e01 # type: ignore
REG_A6XX_RB_DBG_ECO_CNTL = 0x00008e04 # type: ignore
REG_A6XX_RB_ADDR_MODE_CNTL = 0x00008e05 # type: ignore
REG_A7XX_RB_CCU_DBG_ECO_CNTL = 0x00008e06 # type: ignore
REG_A6XX_RB_CCU_CNTL = 0x00008e07 # type: ignore
A6XX_RB_CCU_CNTL_GMEM_FAST_CLEAR_DISABLE = 0x00000001 # type: ignore
A6XX_RB_CCU_CNTL_CONCURRENT_RESOLVE = 0x00000004 # type: ignore
A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__MASK = 0x00000080 # type: ignore
A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__SHIFT = 7 # type: ignore
A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__MASK = 0x00000200 # type: ignore
A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__SHIFT = 9 # type: ignore
A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__MASK = 0x00000c00 # type: ignore
A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__SHIFT = 10 # type: ignore
A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK = 0x001ff000 # type: ignore
A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT = 12 # type: ignore
A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__MASK = 0x00600000 # type: ignore
A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__SHIFT = 21 # type: ignore
A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK = 0xff800000 # type: ignore
A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT = 23 # type: ignore
REG_A7XX_RB_CCU_CNTL = 0x00008e07 # type: ignore
A7XX_RB_CCU_CNTL_GMEM_FAST_CLEAR_DISABLE = 0x00000001 # type: ignore
A7XX_RB_CCU_CNTL_CONCURRENT_RESOLVE_MODE__MASK = 0x0000000c # type: ignore
A7XX_RB_CCU_CNTL_CONCURRENT_RESOLVE_MODE__SHIFT = 2 # type: ignore
A7XX_RB_CCU_CNTL_CONCURRENT_UNRESOLVE_MODE__MASK = 0x00000060 # type: ignore
A7XX_RB_CCU_CNTL_CONCURRENT_UNRESOLVE_MODE__SHIFT = 5 # type: ignore
REG_A6XX_RB_NC_MODE_CNTL = 0x00008e08 # type: ignore
A6XX_RB_NC_MODE_CNTL_MODE = 0x00000001 # type: ignore
A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK = 0x00000006 # type: ignore
A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT = 1 # type: ignore
A6XX_RB_NC_MODE_CNTL_MIN_ACCESS_LENGTH = 0x00000008 # type: ignore
A6XX_RB_NC_MODE_CNTL_AMSBC = 0x00000010 # type: ignore
A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK = 0x00000400 # type: ignore
A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT = 10 # type: ignore
A6XX_RB_NC_MODE_CNTL_RGB565_PREDICATOR = 0x00000800 # type: ignore
A6XX_RB_NC_MODE_CNTL_UNK12__MASK = 0x00003000 # type: ignore
A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT = 12 # type: ignore
REG_A7XX_RB_UNKNOWN_8E09 = 0x00008e09 # type: ignore
REG_A6XX_RB_PERFCTR_RB_SEL = lambda i0: (0x00008e10 + 0x1*i0 ) # type: ignore
REG_A6XX_RB_PERFCTR_CCU_SEL = lambda i0: (0x00008e18 + 0x1*i0 ) # type: ignore
REG_A6XX_RB_CMP_DBG_ECO_CNTL = 0x00008e28 # type: ignore
REG_A6XX_RB_PERFCTR_CMP_SEL = lambda i0: (0x00008e2c + 0x1*i0 ) # type: ignore
REG_A7XX_RB_PERFCTR_UFC_SEL = lambda i0: (0x00008e30 + 0x1*i0 ) # type: ignore
REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST = 0x00008e3b # type: ignore
REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD = 0x00008e3d # type: ignore
REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE = 0x00008e50 # type: ignore
REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ADDR = 0x00008e51 # type: ignore
REG_A7XX_RB_UNKNOWN_8E79 = 0x00008e79 # type: ignore
REG_A6XX_VPC_GS_PARAM = 0x00009100 # type: ignore
A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK = 0x000000ff # type: ignore
A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT = 0 # type: ignore
REG_A6XX_VPC_VS_CLIP_CULL_CNTL = 0x00009101 # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_CLIP_MASK__MASK = 0x000000ff # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_CLIP_MASK__SHIFT = 0 # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_CLIP_DIST_03_LOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_CLIP_DIST_03_LOC__SHIFT = 8 # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_CLIP_DIST_47_LOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_CLIP_DIST_47_LOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_GS_CLIP_CULL_CNTL = 0x00009102 # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_CLIP_MASK__MASK = 0x000000ff # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_CLIP_MASK__SHIFT = 0 # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_CLIP_DIST_03_LOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_CLIP_DIST_03_LOC__SHIFT = 8 # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_CLIP_DIST_47_LOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_CLIP_DIST_47_LOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_DS_CLIP_CULL_CNTL = 0x00009103 # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_CLIP_MASK__MASK = 0x000000ff # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_CLIP_MASK__SHIFT = 0 # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_CLIP_DIST_03_LOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_CLIP_DIST_03_LOC__SHIFT = 8 # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_CLIP_DIST_47_LOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_CLIP_DIST_47_LOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_VS_CLIP_CULL_CNTL_V2 = 0x00009311 # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_V2_CLIP_MASK__MASK = 0x000000ff # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_V2_CLIP_MASK__SHIFT = 0 # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_V2_CLIP_DIST_03_LOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_V2_CLIP_DIST_03_LOC__SHIFT = 8 # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_V2_CLIP_DIST_47_LOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_VS_CLIP_CULL_CNTL_V2_CLIP_DIST_47_LOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_GS_CLIP_CULL_CNTL_V2 = 0x00009312 # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_V2_CLIP_MASK__MASK = 0x000000ff # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_V2_CLIP_MASK__SHIFT = 0 # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_V2_CLIP_DIST_03_LOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_V2_CLIP_DIST_03_LOC__SHIFT = 8 # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_V2_CLIP_DIST_47_LOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_GS_CLIP_CULL_CNTL_V2_CLIP_DIST_47_LOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_DS_CLIP_CULL_CNTL_V2 = 0x00009313 # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_V2_CLIP_MASK__MASK = 0x000000ff # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_V2_CLIP_MASK__SHIFT = 0 # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_V2_CLIP_DIST_03_LOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_V2_CLIP_DIST_03_LOC__SHIFT = 8 # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_V2_CLIP_DIST_47_LOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_DS_CLIP_CULL_CNTL_V2_CLIP_DIST_47_LOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_VS_SIV_CNTL = 0x00009104 # type: ignore
A6XX_VPC_VS_SIV_CNTL_LAYERLOC__MASK = 0x000000ff # type: ignore
A6XX_VPC_VS_SIV_CNTL_LAYERLOC__SHIFT = 0 # type: ignore
A6XX_VPC_VS_SIV_CNTL_VIEWLOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_VS_SIV_CNTL_VIEWLOC__SHIFT = 8 # type: ignore
A6XX_VPC_VS_SIV_CNTL_SHADINGRATELOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_VS_SIV_CNTL_SHADINGRATELOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_GS_SIV_CNTL = 0x00009105 # type: ignore
A6XX_VPC_GS_SIV_CNTL_LAYERLOC__MASK = 0x000000ff # type: ignore
A6XX_VPC_GS_SIV_CNTL_LAYERLOC__SHIFT = 0 # type: ignore
A6XX_VPC_GS_SIV_CNTL_VIEWLOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_GS_SIV_CNTL_VIEWLOC__SHIFT = 8 # type: ignore
A6XX_VPC_GS_SIV_CNTL_SHADINGRATELOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_GS_SIV_CNTL_SHADINGRATELOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_DS_SIV_CNTL = 0x00009106 # type: ignore
A6XX_VPC_DS_SIV_CNTL_LAYERLOC__MASK = 0x000000ff # type: ignore
A6XX_VPC_DS_SIV_CNTL_LAYERLOC__SHIFT = 0 # type: ignore
A6XX_VPC_DS_SIV_CNTL_VIEWLOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_DS_SIV_CNTL_VIEWLOC__SHIFT = 8 # type: ignore
A6XX_VPC_DS_SIV_CNTL_SHADINGRATELOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_DS_SIV_CNTL_SHADINGRATELOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_VS_SIV_CNTL_V2 = 0x00009314 # type: ignore
A6XX_VPC_VS_SIV_CNTL_V2_LAYERLOC__MASK = 0x000000ff # type: ignore
A6XX_VPC_VS_SIV_CNTL_V2_LAYERLOC__SHIFT = 0 # type: ignore
A6XX_VPC_VS_SIV_CNTL_V2_VIEWLOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_VS_SIV_CNTL_V2_VIEWLOC__SHIFT = 8 # type: ignore
A6XX_VPC_VS_SIV_CNTL_V2_SHADINGRATELOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_VS_SIV_CNTL_V2_SHADINGRATELOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_GS_SIV_CNTL_V2 = 0x00009315 # type: ignore
A6XX_VPC_GS_SIV_CNTL_V2_LAYERLOC__MASK = 0x000000ff # type: ignore
A6XX_VPC_GS_SIV_CNTL_V2_LAYERLOC__SHIFT = 0 # type: ignore
A6XX_VPC_GS_SIV_CNTL_V2_VIEWLOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_GS_SIV_CNTL_V2_VIEWLOC__SHIFT = 8 # type: ignore
A6XX_VPC_GS_SIV_CNTL_V2_SHADINGRATELOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_GS_SIV_CNTL_V2_SHADINGRATELOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_DS_SIV_CNTL_V2 = 0x00009316 # type: ignore
A6XX_VPC_DS_SIV_CNTL_V2_LAYERLOC__MASK = 0x000000ff # type: ignore
A6XX_VPC_DS_SIV_CNTL_V2_LAYERLOC__SHIFT = 0 # type: ignore
A6XX_VPC_DS_SIV_CNTL_V2_VIEWLOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_DS_SIV_CNTL_V2_VIEWLOC__SHIFT = 8 # type: ignore
A6XX_VPC_DS_SIV_CNTL_V2_SHADINGRATELOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_DS_SIV_CNTL_V2_SHADINGRATELOC__SHIFT = 16 # type: ignore
REG_A6XX_VPC_UNKNOWN_9107 = 0x00009107 # type: ignore
A6XX_VPC_UNKNOWN_9107_RASTER_DISCARD = 0x00000001 # type: ignore
A6XX_VPC_UNKNOWN_9107_UNK2 = 0x00000004 # type: ignore
REG_A6XX_VPC_RAST_CNTL = 0x00009108 # type: ignore
A6XX_VPC_RAST_CNTL_MODE__MASK = 0x00000003 # type: ignore
A6XX_VPC_RAST_CNTL_MODE__SHIFT = 0 # type: ignore
REG_A7XX_VPC_PC_CNTL = 0x00009109 # type: ignore
A7XX_VPC_PC_CNTL_PRIMITIVE_RESTART = 0x00000001 # type: ignore
A7XX_VPC_PC_CNTL_PROVOKING_VTX_LAST = 0x00000002 # type: ignore
A7XX_VPC_PC_CNTL_D3D_VERTEX_ORDERING = 0x00000004 # type: ignore
A7XX_VPC_PC_CNTL_UNK3 = 0x00000008 # type: ignore
REG_A7XX_VPC_GS_PARAM_0 = 0x0000910a # type: ignore
A7XX_VPC_GS_PARAM_0_GS_VERTICES_OUT__MASK = 0x000000ff # type: ignore
A7XX_VPC_GS_PARAM_0_GS_VERTICES_OUT__SHIFT = 0 # type: ignore
A7XX_VPC_GS_PARAM_0_GS_INVOCATIONS__MASK = 0x00007c00 # type: ignore
A7XX_VPC_GS_PARAM_0_GS_INVOCATIONS__SHIFT = 10 # type: ignore
A7XX_VPC_GS_PARAM_0_LINELENGTHEN = 0x00008000 # type: ignore
A7XX_VPC_GS_PARAM_0_GS_OUTPUT__MASK = 0x00030000 # type: ignore
A7XX_VPC_GS_PARAM_0_GS_OUTPUT__SHIFT = 16 # type: ignore
A7XX_VPC_GS_PARAM_0_UNK18 = 0x00040000 # type: ignore
REG_A7XX_VPC_STEREO_RENDERING_VIEWMASK = 0x0000910b # type: ignore
REG_A7XX_VPC_STEREO_RENDERING_CNTL = 0x0000910c # type: ignore
A7XX_VPC_STEREO_RENDERING_CNTL_ENABLE = 0x00000001 # type: ignore
A7XX_VPC_STEREO_RENDERING_CNTL_DISABLEMULTIPOS = 0x00000002 # type: ignore
A7XX_VPC_STEREO_RENDERING_CNTL_VIEWS__MASK = 0x0000007c # type: ignore
A7XX_VPC_STEREO_RENDERING_CNTL_VIEWS__SHIFT = 2 # type: ignore
REG_A6XX_VPC_VARYING_INTERP_MODE = lambda i0: (0x00009200 + 0x1*i0 ) # type: ignore
REG_A6XX_VPC_VARYING_REPLACE_MODE_0 = lambda i0: (0x00009208 + 0x1*i0 ) # type: ignore
REG_A6XX_VPC_UNKNOWN_9210 = 0x00009210 # type: ignore
REG_A6XX_VPC_UNKNOWN_9211 = 0x00009211 # type: ignore
REG_A6XX_VPC_VARYING_LM_TRANSFER_CNTL_0 = lambda i0: (0x00009212 + 0x1*i0 ) # type: ignore
REG_A6XX_VPC_SO_MAPPING_WPTR = 0x00009216 # type: ignore
A6XX_VPC_SO_MAPPING_WPTR_ADDR__MASK = 0x000000ff # type: ignore
A6XX_VPC_SO_MAPPING_WPTR_ADDR__SHIFT = 0 # type: ignore
A6XX_VPC_SO_MAPPING_WPTR_RESET = 0x00010000 # type: ignore
REG_A6XX_VPC_SO_MAPPING_PORT = 0x00009217 # type: ignore
A6XX_VPC_SO_MAPPING_PORT_A_BUF__MASK = 0x00000003 # type: ignore
A6XX_VPC_SO_MAPPING_PORT_A_BUF__SHIFT = 0 # type: ignore
A6XX_VPC_SO_MAPPING_PORT_A_OFF__MASK = 0x000007fc # type: ignore
A6XX_VPC_SO_MAPPING_PORT_A_OFF__SHIFT = 2 # type: ignore
A6XX_VPC_SO_MAPPING_PORT_A_EN = 0x00000800 # type: ignore
A6XX_VPC_SO_MAPPING_PORT_B_BUF__MASK = 0x00003000 # type: ignore
A6XX_VPC_SO_MAPPING_PORT_B_BUF__SHIFT = 12 # type: ignore
A6XX_VPC_SO_MAPPING_PORT_B_OFF__MASK = 0x007fc000 # type: ignore
A6XX_VPC_SO_MAPPING_PORT_B_OFF__SHIFT = 14 # type: ignore
A6XX_VPC_SO_MAPPING_PORT_B_EN = 0x00800000 # type: ignore
REG_A6XX_VPC_SO_QUERY_BASE = 0x00009218 # type: ignore
REG_A6XX_VPC_SO = lambda i0: (0x0000921a + 0x7*i0 ) # type: ignore
REG_A6XX_VPC_REPLACE_MODE_CNTL = 0x00009236 # type: ignore
A6XX_VPC_REPLACE_MODE_CNTL_INVERT = 0x00000001 # type: ignore
REG_A6XX_VPC_UNKNOWN_9300 = 0x00009300 # type: ignore
REG_A6XX_VPC_VS_CNTL = 0x00009301 # type: ignore
A6XX_VPC_VS_CNTL_STRIDE_IN_VPC__MASK = 0x000000ff # type: ignore
A6XX_VPC_VS_CNTL_STRIDE_IN_VPC__SHIFT = 0 # type: ignore
A6XX_VPC_VS_CNTL_POSITIONLOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_VS_CNTL_POSITIONLOC__SHIFT = 8 # type: ignore
A6XX_VPC_VS_CNTL_PSIZELOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_VS_CNTL_PSIZELOC__SHIFT = 16 # type: ignore
A6XX_VPC_VS_CNTL_EXTRAPOS__MASK = 0x0f000000 # type: ignore
A6XX_VPC_VS_CNTL_EXTRAPOS__SHIFT = 24 # type: ignore
REG_A6XX_VPC_GS_CNTL = 0x00009302 # type: ignore
A6XX_VPC_GS_CNTL_STRIDE_IN_VPC__MASK = 0x000000ff # type: ignore
A6XX_VPC_GS_CNTL_STRIDE_IN_VPC__SHIFT = 0 # type: ignore
A6XX_VPC_GS_CNTL_POSITIONLOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_GS_CNTL_POSITIONLOC__SHIFT = 8 # type: ignore
A6XX_VPC_GS_CNTL_PSIZELOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_GS_CNTL_PSIZELOC__SHIFT = 16 # type: ignore
A6XX_VPC_GS_CNTL_EXTRAPOS__MASK = 0x0f000000 # type: ignore
A6XX_VPC_GS_CNTL_EXTRAPOS__SHIFT = 24 # type: ignore
REG_A6XX_VPC_DS_CNTL = 0x00009303 # type: ignore
A6XX_VPC_DS_CNTL_STRIDE_IN_VPC__MASK = 0x000000ff # type: ignore
A6XX_VPC_DS_CNTL_STRIDE_IN_VPC__SHIFT = 0 # type: ignore
A6XX_VPC_DS_CNTL_POSITIONLOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_DS_CNTL_POSITIONLOC__SHIFT = 8 # type: ignore
A6XX_VPC_DS_CNTL_PSIZELOC__MASK = 0x00ff0000 # type: ignore
A6XX_VPC_DS_CNTL_PSIZELOC__SHIFT = 16 # type: ignore
A6XX_VPC_DS_CNTL_EXTRAPOS__MASK = 0x0f000000 # type: ignore
A6XX_VPC_DS_CNTL_EXTRAPOS__SHIFT = 24 # type: ignore
REG_A6XX_VPC_PS_CNTL = 0x00009304 # type: ignore
A6XX_VPC_PS_CNTL_NUMNONPOSVAR__MASK = 0x000000ff # type: ignore
A6XX_VPC_PS_CNTL_NUMNONPOSVAR__SHIFT = 0 # type: ignore
A6XX_VPC_PS_CNTL_PRIMIDLOC__MASK = 0x0000ff00 # type: ignore
A6XX_VPC_PS_CNTL_PRIMIDLOC__SHIFT = 8 # type: ignore
A6XX_VPC_PS_CNTL_VARYING = 0x00010000 # type: ignore
A6XX_VPC_PS_CNTL_VIEWIDLOC__MASK = 0xff000000 # type: ignore
A6XX_VPC_PS_CNTL_VIEWIDLOC__SHIFT = 24 # type: ignore
REG_A6XX_VPC_SO_CNTL = 0x00009305 # type: ignore
A6XX_VPC_SO_CNTL_BUF0_STREAM__MASK = 0x00000007 # type: ignore
A6XX_VPC_SO_CNTL_BUF0_STREAM__SHIFT = 0 # type: ignore
A6XX_VPC_SO_CNTL_BUF1_STREAM__MASK = 0x00000038 # type: ignore
A6XX_VPC_SO_CNTL_BUF1_STREAM__SHIFT = 3 # type: ignore
A6XX_VPC_SO_CNTL_BUF2_STREAM__MASK = 0x000001c0 # type: ignore
A6XX_VPC_SO_CNTL_BUF2_STREAM__SHIFT = 6 # type: ignore
A6XX_VPC_SO_CNTL_BUF3_STREAM__MASK = 0x00000e00 # type: ignore
A6XX_VPC_SO_CNTL_BUF3_STREAM__SHIFT = 9 # type: ignore
A6XX_VPC_SO_CNTL_STREAM_ENABLE__MASK = 0x00078000 # type: ignore
A6XX_VPC_SO_CNTL_STREAM_ENABLE__SHIFT = 15 # type: ignore
REG_A6XX_VPC_SO_OVERRIDE = 0x00009306 # type: ignore
A6XX_VPC_SO_OVERRIDE_DISABLE = 0x00000001 # type: ignore
REG_A6XX_VPC_PS_RAST_CNTL = 0x00009307 # type: ignore
A6XX_VPC_PS_RAST_CNTL_MODE__MASK = 0x00000003 # type: ignore
A6XX_VPC_PS_RAST_CNTL_MODE__SHIFT = 0 # type: ignore
REG_A7XX_VPC_ATTR_BUF_GMEM_SIZE = 0x00009308 # type: ignore
A7XX_VPC_ATTR_BUF_GMEM_SIZE_SIZE_GMEM__MASK = 0xffffffff # type: ignore
A7XX_VPC_ATTR_BUF_GMEM_SIZE_SIZE_GMEM__SHIFT = 0 # type: ignore
REG_A7XX_VPC_ATTR_BUF_GMEM_BASE = 0x00009309 # type: ignore
A7XX_VPC_ATTR_BUF_GMEM_BASE_BASE_GMEM__MASK = 0xffffffff # type: ignore
A7XX_VPC_ATTR_BUF_GMEM_BASE_BASE_GMEM__SHIFT = 0 # type: ignore
REG_A7XX_PC_ATTR_BUF_GMEM_SIZE = 0x00009b09 # type: ignore
A7XX_PC_ATTR_BUF_GMEM_SIZE_SIZE_GMEM__MASK = 0xffffffff # type: ignore
A7XX_PC_ATTR_BUF_GMEM_SIZE_SIZE_GMEM__SHIFT = 0 # type: ignore
REG_A6XX_VPC_DBG_ECO_CNTL = 0x00009600 # type: ignore
REG_A6XX_VPC_ADDR_MODE_CNTL = 0x00009601 # type: ignore
REG_A6XX_VPC_UNKNOWN_9602 = 0x00009602 # type: ignore
REG_A6XX_VPC_UNKNOWN_9603 = 0x00009603 # type: ignore
REG_A6XX_VPC_PERFCTR_VPC_SEL = lambda i0: (0x00009604 + 0x1*i0 ) # type: ignore
REG_A7XX_VPC_PERFCTR_VPC_SEL = lambda i0: (0x0000960b + 0x1*i0 ) # type: ignore
REG_A6XX_PC_HS_PARAM_0 = 0x00009800 # type: ignore
REG_A6XX_PC_HS_PARAM_1 = 0x00009801 # type: ignore
A6XX_PC_HS_PARAM_1_SIZE__MASK = 0x000007ff # type: ignore
A6XX_PC_HS_PARAM_1_SIZE__SHIFT = 0 # type: ignore
A6XX_PC_HS_PARAM_1_UNK13 = 0x00002000 # type: ignore
REG_A6XX_PC_DS_PARAM = 0x00009802 # type: ignore
A6XX_PC_DS_PARAM_SPACING__MASK = 0x00000003 # type: ignore
A6XX_PC_DS_PARAM_SPACING__SHIFT = 0 # type: ignore
A6XX_PC_DS_PARAM_OUTPUT__MASK = 0x0000000c # type: ignore
A6XX_PC_DS_PARAM_OUTPUT__SHIFT = 2 # type: ignore
REG_A6XX_PC_RESTART_INDEX = 0x00009803 # type: ignore
REG_A6XX_PC_MODE_CNTL = 0x00009804 # type: ignore
REG_A6XX_PC_POWER_CNTL = 0x00009805 # type: ignore
REG_A6XX_PC_PS_CNTL = 0x00009806 # type: ignore
A6XX_PC_PS_CNTL_PRIMITIVEIDEN = 0x00000001 # type: ignore
REG_A6XX_PC_DGEN_SO_CNTL = 0x00009808 # type: ignore
A6XX_PC_DGEN_SO_CNTL_STREAM_ENABLE__MASK = 0x00078000 # type: ignore
A6XX_PC_DGEN_SO_CNTL_STREAM_ENABLE__SHIFT = 15 # type: ignore
REG_A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL = 0x0000980a # type: ignore
A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN = 0x00000001 # type: ignore
REG_A6XX_PC_DRAW_INITIATOR = 0x00009840 # type: ignore
A6XX_PC_DRAW_INITIATOR_STATE_ID__MASK = 0x000000ff # type: ignore
A6XX_PC_DRAW_INITIATOR_STATE_ID__SHIFT = 0 # type: ignore
REG_A6XX_PC_KERNEL_INITIATOR = 0x00009841 # type: ignore
A6XX_PC_KERNEL_INITIATOR_STATE_ID__MASK = 0x000000ff # type: ignore
A6XX_PC_KERNEL_INITIATOR_STATE_ID__SHIFT = 0 # type: ignore
REG_A6XX_PC_EVENT_INITIATOR = 0x00009842 # type: ignore
A6XX_PC_EVENT_INITIATOR_STATE_ID__MASK = 0x00ff0000 # type: ignore
A6XX_PC_EVENT_INITIATOR_STATE_ID__SHIFT = 16 # type: ignore
A6XX_PC_EVENT_INITIATOR_EVENT__MASK = 0x0000007f # type: ignore
A6XX_PC_EVENT_INITIATOR_EVENT__SHIFT = 0 # type: ignore
REG_A6XX_PC_MARKER = 0x00009880 # type: ignore
REG_A6XX_PC_DGEN_RAST_CNTL = 0x00009981 # type: ignore
A6XX_PC_DGEN_RAST_CNTL_MODE__MASK = 0x00000003 # type: ignore
A6XX_PC_DGEN_RAST_CNTL_MODE__SHIFT = 0 # type: ignore
REG_A7XX_PC_DGEN_RAST_CNTL = 0x00009809 # type: ignore
A7XX_PC_DGEN_RAST_CNTL_MODE__MASK = 0x00000003 # type: ignore
A7XX_PC_DGEN_RAST_CNTL_MODE__SHIFT = 0 # type: ignore
REG_A6XX_VPC_RAST_STREAM_CNTL = 0x00009980 # type: ignore
A6XX_VPC_RAST_STREAM_CNTL_STREAM__MASK = 0x00000003 # type: ignore
A6XX_VPC_RAST_STREAM_CNTL_STREAM__SHIFT = 0 # type: ignore
A6XX_VPC_RAST_STREAM_CNTL_DISCARD = 0x00000004 # type: ignore
REG_A7XX_VPC_RAST_STREAM_CNTL = 0x00009107 # type: ignore
A7XX_VPC_RAST_STREAM_CNTL_STREAM__MASK = 0x00000003 # type: ignore
A7XX_VPC_RAST_STREAM_CNTL_STREAM__SHIFT = 0 # type: ignore
A7XX_VPC_RAST_STREAM_CNTL_DISCARD = 0x00000004 # type: ignore
REG_A7XX_VPC_RAST_STREAM_CNTL_V2 = 0x00009317 # type: ignore
A7XX_VPC_RAST_STREAM_CNTL_V2_STREAM__MASK = 0x00000003 # type: ignore
A7XX_VPC_RAST_STREAM_CNTL_V2_STREAM__SHIFT = 0 # type: ignore
A7XX_VPC_RAST_STREAM_CNTL_V2_DISCARD = 0x00000004 # type: ignore
REG_A7XX_PC_HS_BUFFER_SIZE = 0x00009885 # type: ignore
REG_A7XX_PC_TF_BUFFER_SIZE = 0x00009886 # type: ignore
REG_A6XX_PC_CNTL = 0x00009b00 # type: ignore
A6XX_PC_CNTL_PRIMITIVE_RESTART = 0x00000001 # type: ignore
A6XX_PC_CNTL_PROVOKING_VTX_LAST = 0x00000002 # type: ignore
A6XX_PC_CNTL_D3D_VERTEX_ORDERING = 0x00000004 # type: ignore
A6XX_PC_CNTL_UNK3 = 0x00000008 # type: ignore
REG_A6XX_PC_VS_CNTL = 0x00009b01 # type: ignore
A6XX_PC_VS_CNTL_STRIDE_IN_VPC__MASK = 0x000000ff # type: ignore
A6XX_PC_VS_CNTL_STRIDE_IN_VPC__SHIFT = 0 # type: ignore
A6XX_PC_VS_CNTL_PSIZE = 0x00000100 # type: ignore
A6XX_PC_VS_CNTL_LAYER = 0x00000200 # type: ignore
A6XX_PC_VS_CNTL_VIEW = 0x00000400 # type: ignore
A6XX_PC_VS_CNTL_PRIMITIVE_ID = 0x00000800 # type: ignore
A6XX_PC_VS_CNTL_CLIP_MASK__MASK = 0x00ff0000 # type: ignore
A6XX_PC_VS_CNTL_CLIP_MASK__SHIFT = 16 # type: ignore
A6XX_PC_VS_CNTL_SHADINGRATE = 0x01000000 # type: ignore
REG_A6XX_PC_GS_CNTL = 0x00009b02 # type: ignore
A6XX_PC_GS_CNTL_STRIDE_IN_VPC__MASK = 0x000000ff # type: ignore
A6XX_PC_GS_CNTL_STRIDE_IN_VPC__SHIFT = 0 # type: ignore
A6XX_PC_GS_CNTL_PSIZE = 0x00000100 # type: ignore
A6XX_PC_GS_CNTL_LAYER = 0x00000200 # type: ignore
A6XX_PC_GS_CNTL_VIEW = 0x00000400 # type: ignore
A6XX_PC_GS_CNTL_PRIMITIVE_ID = 0x00000800 # type: ignore
A6XX_PC_GS_CNTL_CLIP_MASK__MASK = 0x00ff0000 # type: ignore
A6XX_PC_GS_CNTL_CLIP_MASK__SHIFT = 16 # type: ignore
A6XX_PC_GS_CNTL_SHADINGRATE = 0x01000000 # type: ignore
REG_A6XX_PC_HS_CNTL = 0x00009b03 # type: ignore
A6XX_PC_HS_CNTL_STRIDE_IN_VPC__MASK = 0x000000ff # type: ignore
A6XX_PC_HS_CNTL_STRIDE_IN_VPC__SHIFT = 0 # type: ignore
A6XX_PC_HS_CNTL_PSIZE = 0x00000100 # type: ignore
A6XX_PC_HS_CNTL_LAYER = 0x00000200 # type: ignore
A6XX_PC_HS_CNTL_VIEW = 0x00000400 # type: ignore
A6XX_PC_HS_CNTL_PRIMITIVE_ID = 0x00000800 # type: ignore
A6XX_PC_HS_CNTL_CLIP_MASK__MASK = 0x00ff0000 # type: ignore
A6XX_PC_HS_CNTL_CLIP_MASK__SHIFT = 16 # type: ignore
A6XX_PC_HS_CNTL_SHADINGRATE = 0x01000000 # type: ignore
REG_A6XX_PC_DS_CNTL = 0x00009b04 # type: ignore
A6XX_PC_DS_CNTL_STRIDE_IN_VPC__MASK = 0x000000ff # type: ignore
A6XX_PC_DS_CNTL_STRIDE_IN_VPC__SHIFT = 0 # type: ignore
A6XX_PC_DS_CNTL_PSIZE = 0x00000100 # type: ignore
A6XX_PC_DS_CNTL_LAYER = 0x00000200 # type: ignore
A6XX_PC_DS_CNTL_VIEW = 0x00000400 # type: ignore
A6XX_PC_DS_CNTL_PRIMITIVE_ID = 0x00000800 # type: ignore
A6XX_PC_DS_CNTL_CLIP_MASK__MASK = 0x00ff0000 # type: ignore
A6XX_PC_DS_CNTL_CLIP_MASK__SHIFT = 16 # type: ignore
A6XX_PC_DS_CNTL_SHADINGRATE = 0x01000000 # type: ignore
REG_A6XX_PC_GS_PARAM_0 = 0x00009b05 # type: ignore
A6XX_PC_GS_PARAM_0_GS_VERTICES_OUT__MASK = 0x000000ff # type: ignore
A6XX_PC_GS_PARAM_0_GS_VERTICES_OUT__SHIFT = 0 # type: ignore
A6XX_PC_GS_PARAM_0_GS_INVOCATIONS__MASK = 0x00007c00 # type: ignore
A6XX_PC_GS_PARAM_0_GS_INVOCATIONS__SHIFT = 10 # type: ignore
A6XX_PC_GS_PARAM_0_LINELENGTHEN = 0x00008000 # type: ignore
A6XX_PC_GS_PARAM_0_GS_OUTPUT__MASK = 0x00030000 # type: ignore
A6XX_PC_GS_PARAM_0_GS_OUTPUT__SHIFT = 16 # type: ignore
A6XX_PC_GS_PARAM_0_UNK18 = 0x00040000 # type: ignore
REG_A6XX_PC_PRIMITIVE_CNTL_6 = 0x00009b06 # type: ignore
A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK = 0x000007ff # type: ignore
A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT = 0 # type: ignore
REG_A6XX_PC_STEREO_RENDERING_CNTL = 0x00009b07 # type: ignore
A6XX_PC_STEREO_RENDERING_CNTL_ENABLE = 0x00000001 # type: ignore
A6XX_PC_STEREO_RENDERING_CNTL_DISABLEMULTIPOS = 0x00000002 # type: ignore
A6XX_PC_STEREO_RENDERING_CNTL_VIEWS__MASK = 0x0000007c # type: ignore
A6XX_PC_STEREO_RENDERING_CNTL_VIEWS__SHIFT = 2 # type: ignore
REG_A6XX_PC_STEREO_RENDERING_VIEWMASK = 0x00009b08 # type: ignore
REG_A6XX_PC_2D_EVENT_CMD = 0x00009c00 # type: ignore
A6XX_PC_2D_EVENT_CMD_EVENT__MASK = 0x0000007f # type: ignore
A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT = 0 # type: ignore
A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK = 0x0000ff00 # type: ignore
A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT = 8 # type: ignore
REG_A6XX_PC_DBG_ECO_CNTL = 0x00009e00 # type: ignore
REG_A6XX_PC_ADDR_MODE_CNTL = 0x00009e01 # type: ignore
REG_A6XX_PC_DMA_BASE = 0x00009e04 # type: ignore
REG_A6XX_PC_DMA_OFFSET = 0x00009e06 # type: ignore
REG_A6XX_PC_DMA_SIZE = 0x00009e07 # type: ignore
REG_A6XX_PC_TESS_BASE = 0x00009e08 # type: ignore
REG_A7XX_PC_TESS_BASE = 0x00009810 # type: ignore
REG_A6XX_PC_DRAWCALL_CNTL = 0x00009e0b # type: ignore
A6XX_PC_DRAWCALL_CNTL_PRIM_TYPE__MASK = 0x0000003f # type: ignore
A6XX_PC_DRAWCALL_CNTL_PRIM_TYPE__SHIFT = 0 # type: ignore
A6XX_PC_DRAWCALL_CNTL_SOURCE_SELECT__MASK = 0x000000c0 # type: ignore
A6XX_PC_DRAWCALL_CNTL_SOURCE_SELECT__SHIFT = 6 # type: ignore
A6XX_PC_DRAWCALL_CNTL_VIS_CULL__MASK = 0x00000300 # type: ignore
A6XX_PC_DRAWCALL_CNTL_VIS_CULL__SHIFT = 8 # type: ignore
A6XX_PC_DRAWCALL_CNTL_INDEX_SIZE__MASK = 0x00000c00 # type: ignore
A6XX_PC_DRAWCALL_CNTL_INDEX_SIZE__SHIFT = 10 # type: ignore
A6XX_PC_DRAWCALL_CNTL_PATCH_TYPE__MASK = 0x00003000 # type: ignore
A6XX_PC_DRAWCALL_CNTL_PATCH_TYPE__SHIFT = 12 # type: ignore
A6XX_PC_DRAWCALL_CNTL_GS_ENABLE = 0x00010000 # type: ignore
A6XX_PC_DRAWCALL_CNTL_TESS_ENABLE = 0x00020000 # type: ignore
REG_A6XX_PC_DRAWCALL_INSTANCE_NUM = 0x00009e0c # type: ignore
REG_A6XX_PC_DRAWCALL_SIZE = 0x00009e0d # type: ignore
REG_A6XX_PC_VIS_STREAM_CNTL = 0x00009e11 # type: ignore
A6XX_PC_VIS_STREAM_CNTL_UNK0__MASK = 0x0000ffff # type: ignore
A6XX_PC_VIS_STREAM_CNTL_UNK0__SHIFT = 0 # type: ignore
A6XX_PC_VIS_STREAM_CNTL_VSC_SIZE__MASK = 0x003f0000 # type: ignore
A6XX_PC_VIS_STREAM_CNTL_VSC_SIZE__SHIFT = 16 # type: ignore
A6XX_PC_VIS_STREAM_CNTL_VSC_N__MASK = 0x07c00000 # type: ignore
A6XX_PC_VIS_STREAM_CNTL_VSC_N__SHIFT = 22 # type: ignore
REG_A6XX_PC_PVIS_STREAM_BIN_BASE = 0x00009e12 # type: ignore
REG_A6XX_PC_DVIS_STREAM_BIN_BASE = 0x00009e14 # type: ignore
REG_A6XX_PC_DRAWCALL_CNTL_OVERRIDE = 0x00009e1c # type: ignore
A6XX_PC_DRAWCALL_CNTL_OVERRIDE_OVERRIDE = 0x00000001 # type: ignore
REG_A7XX_PC_UNKNOWN_9E24 = 0x00009e24 # type: ignore
REG_A6XX_PC_PERFCTR_PC_SEL = lambda i0: (0x00009e34 + 0x1*i0 ) # type: ignore
REG_A7XX_PC_PERFCTR_PC_SEL = lambda i0: (0x00009e42 + 0x1*i0 ) # type: ignore
REG_A6XX_PC_UNKNOWN_9E72 = 0x00009e72 # type: ignore
REG_A6XX_VFD_CNTL_0 = 0x0000a000 # type: ignore
A6XX_VFD_CNTL_0_FETCH_CNT__MASK = 0x0000003f # type: ignore
A6XX_VFD_CNTL_0_FETCH_CNT__SHIFT = 0 # type: ignore
A6XX_VFD_CNTL_0_DECODE_CNT__MASK = 0x00003f00 # type: ignore
A6XX_VFD_CNTL_0_DECODE_CNT__SHIFT = 8 # type: ignore
REG_A6XX_VFD_CNTL_1 = 0x0000a001 # type: ignore
A6XX_VFD_CNTL_1_REGID4VTX__MASK = 0x000000ff # type: ignore
A6XX_VFD_CNTL_1_REGID4VTX__SHIFT = 0 # type: ignore
A6XX_VFD_CNTL_1_REGID4INST__MASK = 0x0000ff00 # type: ignore
A6XX_VFD_CNTL_1_REGID4INST__SHIFT = 8 # type: ignore
A6XX_VFD_CNTL_1_REGID4PRIMID__MASK = 0x00ff0000 # type: ignore
A6XX_VFD_CNTL_1_REGID4PRIMID__SHIFT = 16 # type: ignore
A6XX_VFD_CNTL_1_REGID4VIEWID__MASK = 0xff000000 # type: ignore
A6XX_VFD_CNTL_1_REGID4VIEWID__SHIFT = 24 # type: ignore
REG_A6XX_VFD_CNTL_2 = 0x0000a002 # type: ignore
A6XX_VFD_CNTL_2_REGID_HSRELPATCHID__MASK = 0x000000ff # type: ignore
A6XX_VFD_CNTL_2_REGID_HSRELPATCHID__SHIFT = 0 # type: ignore
A6XX_VFD_CNTL_2_REGID_INVOCATIONID__MASK = 0x0000ff00 # type: ignore
A6XX_VFD_CNTL_2_REGID_INVOCATIONID__SHIFT = 8 # type: ignore
REG_A6XX_VFD_CNTL_3 = 0x0000a003 # type: ignore
A6XX_VFD_CNTL_3_REGID_DSPRIMID__MASK = 0x000000ff # type: ignore
A6XX_VFD_CNTL_3_REGID_DSPRIMID__SHIFT = 0 # type: ignore
A6XX_VFD_CNTL_3_REGID_DSRELPATCHID__MASK = 0x0000ff00 # type: ignore
A6XX_VFD_CNTL_3_REGID_DSRELPATCHID__SHIFT = 8 # type: ignore
A6XX_VFD_CNTL_3_REGID_TESSX__MASK = 0x00ff0000 # type: ignore
A6XX_VFD_CNTL_3_REGID_TESSX__SHIFT = 16 # type: ignore
A6XX_VFD_CNTL_3_REGID_TESSY__MASK = 0xff000000 # type: ignore
A6XX_VFD_CNTL_3_REGID_TESSY__SHIFT = 24 # type: ignore
REG_A6XX_VFD_CNTL_4 = 0x0000a004 # type: ignore
A6XX_VFD_CNTL_4_UNK0__MASK = 0x000000ff # type: ignore
A6XX_VFD_CNTL_4_UNK0__SHIFT = 0 # type: ignore
REG_A6XX_VFD_CNTL_5 = 0x0000a005 # type: ignore
A6XX_VFD_CNTL_5_REGID_GSHEADER__MASK = 0x000000ff # type: ignore
A6XX_VFD_CNTL_5_REGID_GSHEADER__SHIFT = 0 # type: ignore
A6XX_VFD_CNTL_5_UNK8__MASK = 0x0000ff00 # type: ignore
A6XX_VFD_CNTL_5_UNK8__SHIFT = 8 # type: ignore
REG_A6XX_VFD_CNTL_6 = 0x0000a006 # type: ignore
A6XX_VFD_CNTL_6_PRIMID4PSEN = 0x00000001 # type: ignore
REG_A6XX_VFD_RENDER_MODE = 0x0000a007 # type: ignore
A6XX_VFD_RENDER_MODE_RENDER_MODE__MASK = 0x00000007 # type: ignore
A6XX_VFD_RENDER_MODE_RENDER_MODE__SHIFT = 0 # type: ignore
REG_A6XX_VFD_STEREO_RENDERING_CNTL = 0x0000a008 # type: ignore
A6XX_VFD_STEREO_RENDERING_CNTL_ENABLE = 0x00000001 # type: ignore
A6XX_VFD_STEREO_RENDERING_CNTL_DISABLEMULTIPOS = 0x00000002 # type: ignore
A6XX_VFD_STEREO_RENDERING_CNTL_VIEWS__MASK = 0x0000007c # type: ignore
A6XX_VFD_STEREO_RENDERING_CNTL_VIEWS__SHIFT = 2 # type: ignore
REG_A6XX_VFD_MODE_CNTL = 0x0000a009 # type: ignore
A6XX_VFD_MODE_CNTL_VERTEX = 0x00000001 # type: ignore
A6XX_VFD_MODE_CNTL_INSTANCE = 0x00000002 # type: ignore
REG_A6XX_VFD_INDEX_OFFSET = 0x0000a00e # type: ignore
REG_A6XX_VFD_INSTANCE_START_OFFSET = 0x0000a00f # type: ignore
REG_A6XX_VFD_VERTEX_BUFFER = lambda i0: (0x0000a010 + 0x4*i0 ) # type: ignore
REG_A6XX_VFD_FETCH_INSTR = lambda i0: (0x0000a090 + 0x2*i0 ) # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_IDX__MASK = 0x0000001f # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_IDX__SHIFT = 0 # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_OFFSET__MASK = 0x0001ffe0 # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_OFFSET__SHIFT = 5 # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_INSTANCED = 0x00020000 # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_FORMAT__MASK = 0x0ff00000 # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_FORMAT__SHIFT = 20 # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_SWAP__MASK = 0x30000000 # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_SWAP__SHIFT = 28 # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_UNK30 = 0x40000000 # type: ignore
A6XX_VFD_FETCH_INSTR_INSTR_FLOAT = 0x80000000 # type: ignore
REG_A6XX_VFD_DEST_CNTL = lambda i0: (0x0000a0d0 + 0x1*i0 ) # type: ignore
A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK = 0x0000000f # type: ignore
A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT = 0 # type: ignore
A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK = 0x00000ff0 # type: ignore
A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT = 4 # type: ignore
REG_A6XX_VFD_POWER_CNTL = 0x0000a0f8 # type: ignore
REG_A7XX_VFD_DBG_ECO_CNTL = 0x0000a600 # type: ignore
REG_A6XX_VFD_ADDR_MODE_CNTL = 0x0000a601 # type: ignore
REG_A6XX_VFD_PERFCTR_VFD_SEL = lambda i0: (0x0000a610 + 0x1*i0 ) # type: ignore
REG_A7XX_VFD_PERFCTR_VFD_SEL = lambda i0: (0x0000a610 + 0x1*i0 ) # type: ignore
REG_A6XX_SP_VS_CNTL_0 = 0x0000a800 # type: ignore
A6XX_SP_VS_CNTL_0_THREADMODE__MASK = 0x00000001 # type: ignore
A6XX_SP_VS_CNTL_0_THREADMODE__SHIFT = 0 # type: ignore
A6XX_SP_VS_CNTL_0_HALFREGFOOTPRINT__MASK = 0x0000007e # type: ignore
A6XX_SP_VS_CNTL_0_HALFREGFOOTPRINT__SHIFT = 1 # type: ignore
A6XX_SP_VS_CNTL_0_FULLREGFOOTPRINT__MASK = 0x00001f80 # type: ignore
A6XX_SP_VS_CNTL_0_FULLREGFOOTPRINT__SHIFT = 7 # type: ignore
A6XX_SP_VS_CNTL_0_UNK13 = 0x00002000 # type: ignore
A6XX_SP_VS_CNTL_0_BRANCHSTACK__MASK = 0x000fc000 # type: ignore
A6XX_SP_VS_CNTL_0_BRANCHSTACK__SHIFT = 14 # type: ignore
A6XX_SP_VS_CNTL_0_MERGEDREGS = 0x00100000 # type: ignore
A6XX_SP_VS_CNTL_0_EARLYPREAMBLE = 0x00200000 # type: ignore
REG_A6XX_SP_VS_BOOLEAN_CF_MASK = 0x0000a801 # type: ignore
REG_A6XX_SP_VS_OUTPUT_CNTL = 0x0000a802 # type: ignore
A6XX_SP_VS_OUTPUT_CNTL_OUT__MASK = 0x0000003f # type: ignore
A6XX_SP_VS_OUTPUT_CNTL_OUT__SHIFT = 0 # type: ignore
A6XX_SP_VS_OUTPUT_CNTL_FLAGS_REGID__MASK = 0x00003fc0 # type: ignore
A6XX_SP_VS_OUTPUT_CNTL_FLAGS_REGID__SHIFT = 6 # type: ignore
REG_A6XX_SP_VS_OUTPUT = lambda i0: (0x0000a803 + 0x1*i0 ) # type: ignore
A6XX_SP_VS_OUTPUT_REG_A_REGID__MASK = 0x000000ff # type: ignore
A6XX_SP_VS_OUTPUT_REG_A_REGID__SHIFT = 0 # type: ignore
A6XX_SP_VS_OUTPUT_REG_A_COMPMASK__MASK = 0x00000f00 # type: ignore
A6XX_SP_VS_OUTPUT_REG_A_COMPMASK__SHIFT = 8 # type: ignore
A6XX_SP_VS_OUTPUT_REG_B_REGID__MASK = 0x00ff0000 # type: ignore
A6XX_SP_VS_OUTPUT_REG_B_REGID__SHIFT = 16 # type: ignore
A6XX_SP_VS_OUTPUT_REG_B_COMPMASK__MASK = 0x0f000000 # type: ignore
A6XX_SP_VS_OUTPUT_REG_B_COMPMASK__SHIFT = 24 # type: ignore
REG_A6XX_SP_VS_VPC_DEST = lambda i0: (0x0000a813 + 0x1*i0 ) # type: ignore
A6XX_SP_VS_VPC_DEST_REG_OUTLOC0__MASK = 0x000000ff # type: ignore
A6XX_SP_VS_VPC_DEST_REG_OUTLOC0__SHIFT = 0 # type: ignore
A6XX_SP_VS_VPC_DEST_REG_OUTLOC1__MASK = 0x0000ff00 # type: ignore
A6XX_SP_VS_VPC_DEST_REG_OUTLOC1__SHIFT = 8 # type: ignore
A6XX_SP_VS_VPC_DEST_REG_OUTLOC2__MASK = 0x00ff0000 # type: ignore
A6XX_SP_VS_VPC_DEST_REG_OUTLOC2__SHIFT = 16 # type: ignore
A6XX_SP_VS_VPC_DEST_REG_OUTLOC3__MASK = 0xff000000 # type: ignore
A6XX_SP_VS_VPC_DEST_REG_OUTLOC3__SHIFT = 24 # type: ignore
REG_A6XX_SP_VS_PROGRAM_COUNTER_OFFSET = 0x0000a81b # type: ignore
REG_A6XX_SP_VS_BASE = 0x0000a81c # type: ignore
REG_A6XX_SP_VS_PVT_MEM_PARAM = 0x0000a81e # type: ignore
A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK = 0x000000ff # type: ignore
A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT = 0 # type: ignore
A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK = 0xff000000 # type: ignore
A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT = 24 # type: ignore
REG_A6XX_SP_VS_PVT_MEM_BASE = 0x0000a81f # type: ignore
REG_A6XX_SP_VS_PVT_MEM_SIZE = 0x0000a821 # type: ignore
A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK = 0x0003ffff # type: ignore
A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT = 0 # type: ignore
A6XX_SP_VS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT = 0x80000000 # type: ignore
REG_A6XX_SP_VS_TSIZE = 0x0000a822 # type: ignore
REG_A6XX_SP_VS_CONFIG = 0x0000a823 # type: ignore
A6XX_SP_VS_CONFIG_BINDLESS_TEX = 0x00000001 # type: ignore
A6XX_SP_VS_CONFIG_BINDLESS_SAMP = 0x00000002 # type: ignore
A6XX_SP_VS_CONFIG_BINDLESS_UAV = 0x00000004 # type: ignore
A6XX_SP_VS_CONFIG_BINDLESS_UBO = 0x00000008 # type: ignore
A6XX_SP_VS_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_VS_CONFIG_NTEX__MASK = 0x0001fe00 # type: ignore
A6XX_SP_VS_CONFIG_NTEX__SHIFT = 9 # type: ignore
A6XX_SP_VS_CONFIG_NSAMP__MASK = 0x003e0000 # type: ignore
A6XX_SP_VS_CONFIG_NSAMP__SHIFT = 17 # type: ignore
A6XX_SP_VS_CONFIG_NUAV__MASK = 0x1fc00000 # type: ignore
A6XX_SP_VS_CONFIG_NUAV__SHIFT = 22 # type: ignore
REG_A6XX_SP_VS_INSTR_SIZE = 0x0000a824 # type: ignore
REG_A6XX_SP_VS_PVT_MEM_STACK_OFFSET = 0x0000a825 # type: ignore
A6XX_SP_VS_PVT_MEM_STACK_OFFSET_OFFSET__MASK = 0x0007ffff # type: ignore
A6XX_SP_VS_PVT_MEM_STACK_OFFSET_OFFSET__SHIFT = 0 # type: ignore
REG_A7XX_SP_VS_VGS_CNTL = 0x0000a82d # type: ignore
REG_A6XX_SP_HS_CNTL_0 = 0x0000a830 # type: ignore
A6XX_SP_HS_CNTL_0_THREADMODE__MASK = 0x00000001 # type: ignore
A6XX_SP_HS_CNTL_0_THREADMODE__SHIFT = 0 # type: ignore
A6XX_SP_HS_CNTL_0_HALFREGFOOTPRINT__MASK = 0x0000007e # type: ignore
A6XX_SP_HS_CNTL_0_HALFREGFOOTPRINT__SHIFT = 1 # type: ignore
A6XX_SP_HS_CNTL_0_FULLREGFOOTPRINT__MASK = 0x00001f80 # type: ignore
A6XX_SP_HS_CNTL_0_FULLREGFOOTPRINT__SHIFT = 7 # type: ignore
A6XX_SP_HS_CNTL_0_UNK13 = 0x00002000 # type: ignore
A6XX_SP_HS_CNTL_0_BRANCHSTACK__MASK = 0x000fc000 # type: ignore
A6XX_SP_HS_CNTL_0_BRANCHSTACK__SHIFT = 14 # type: ignore
A6XX_SP_HS_CNTL_0_EARLYPREAMBLE = 0x00100000 # type: ignore
REG_A6XX_SP_HS_CNTL_1 = 0x0000a831 # type: ignore
REG_A6XX_SP_HS_BOOLEAN_CF_MASK = 0x0000a832 # type: ignore
REG_A6XX_SP_HS_PROGRAM_COUNTER_OFFSET = 0x0000a833 # type: ignore
REG_A6XX_SP_HS_BASE = 0x0000a834 # type: ignore
REG_A6XX_SP_HS_PVT_MEM_PARAM = 0x0000a836 # type: ignore
A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK = 0x000000ff # type: ignore
A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT = 0 # type: ignore
A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK = 0xff000000 # type: ignore
A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT = 24 # type: ignore
REG_A6XX_SP_HS_PVT_MEM_BASE = 0x0000a837 # type: ignore
REG_A6XX_SP_HS_PVT_MEM_SIZE = 0x0000a839 # type: ignore
A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK = 0x0003ffff # type: ignore
A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT = 0 # type: ignore
A6XX_SP_HS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT = 0x80000000 # type: ignore
REG_A6XX_SP_HS_TSIZE = 0x0000a83a # type: ignore
REG_A6XX_SP_HS_CONFIG = 0x0000a83b # type: ignore
A6XX_SP_HS_CONFIG_BINDLESS_TEX = 0x00000001 # type: ignore
A6XX_SP_HS_CONFIG_BINDLESS_SAMP = 0x00000002 # type: ignore
A6XX_SP_HS_CONFIG_BINDLESS_UAV = 0x00000004 # type: ignore
A6XX_SP_HS_CONFIG_BINDLESS_UBO = 0x00000008 # type: ignore
A6XX_SP_HS_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_HS_CONFIG_NTEX__MASK = 0x0001fe00 # type: ignore
A6XX_SP_HS_CONFIG_NTEX__SHIFT = 9 # type: ignore
A6XX_SP_HS_CONFIG_NSAMP__MASK = 0x003e0000 # type: ignore
A6XX_SP_HS_CONFIG_NSAMP__SHIFT = 17 # type: ignore
A6XX_SP_HS_CONFIG_NUAV__MASK = 0x1fc00000 # type: ignore
A6XX_SP_HS_CONFIG_NUAV__SHIFT = 22 # type: ignore
REG_A6XX_SP_HS_INSTR_SIZE = 0x0000a83c # type: ignore
REG_A6XX_SP_HS_PVT_MEM_STACK_OFFSET = 0x0000a83d # type: ignore
A6XX_SP_HS_PVT_MEM_STACK_OFFSET_OFFSET__MASK = 0x0007ffff # type: ignore
A6XX_SP_HS_PVT_MEM_STACK_OFFSET_OFFSET__SHIFT = 0 # type: ignore
REG_A7XX_SP_HS_VGS_CNTL = 0x0000a82f # type: ignore
REG_A6XX_SP_DS_CNTL_0 = 0x0000a840 # type: ignore
A6XX_SP_DS_CNTL_0_THREADMODE__MASK = 0x00000001 # type: ignore
A6XX_SP_DS_CNTL_0_THREADMODE__SHIFT = 0 # type: ignore
A6XX_SP_DS_CNTL_0_HALFREGFOOTPRINT__MASK = 0x0000007e # type: ignore
A6XX_SP_DS_CNTL_0_HALFREGFOOTPRINT__SHIFT = 1 # type: ignore
A6XX_SP_DS_CNTL_0_FULLREGFOOTPRINT__MASK = 0x00001f80 # type: ignore
A6XX_SP_DS_CNTL_0_FULLREGFOOTPRINT__SHIFT = 7 # type: ignore
A6XX_SP_DS_CNTL_0_UNK13 = 0x00002000 # type: ignore
A6XX_SP_DS_CNTL_0_BRANCHSTACK__MASK = 0x000fc000 # type: ignore
A6XX_SP_DS_CNTL_0_BRANCHSTACK__SHIFT = 14 # type: ignore
A6XX_SP_DS_CNTL_0_EARLYPREAMBLE = 0x00100000 # type: ignore
REG_A6XX_SP_DS_BOOLEAN_CF_MASK = 0x0000a841 # type: ignore
REG_A6XX_SP_DS_OUTPUT_CNTL = 0x0000a842 # type: ignore
A6XX_SP_DS_OUTPUT_CNTL_OUT__MASK = 0x0000003f # type: ignore
A6XX_SP_DS_OUTPUT_CNTL_OUT__SHIFT = 0 # type: ignore
A6XX_SP_DS_OUTPUT_CNTL_FLAGS_REGID__MASK = 0x00003fc0 # type: ignore
A6XX_SP_DS_OUTPUT_CNTL_FLAGS_REGID__SHIFT = 6 # type: ignore
REG_A6XX_SP_DS_OUTPUT = lambda i0: (0x0000a843 + 0x1*i0 ) # type: ignore
A6XX_SP_DS_OUTPUT_REG_A_REGID__MASK = 0x000000ff # type: ignore
A6XX_SP_DS_OUTPUT_REG_A_REGID__SHIFT = 0 # type: ignore
A6XX_SP_DS_OUTPUT_REG_A_COMPMASK__MASK = 0x00000f00 # type: ignore
A6XX_SP_DS_OUTPUT_REG_A_COMPMASK__SHIFT = 8 # type: ignore
A6XX_SP_DS_OUTPUT_REG_B_REGID__MASK = 0x00ff0000 # type: ignore
A6XX_SP_DS_OUTPUT_REG_B_REGID__SHIFT = 16 # type: ignore
A6XX_SP_DS_OUTPUT_REG_B_COMPMASK__MASK = 0x0f000000 # type: ignore
A6XX_SP_DS_OUTPUT_REG_B_COMPMASK__SHIFT = 24 # type: ignore
REG_A6XX_SP_DS_VPC_DEST = lambda i0: (0x0000a853 + 0x1*i0 ) # type: ignore
A6XX_SP_DS_VPC_DEST_REG_OUTLOC0__MASK = 0x000000ff # type: ignore
A6XX_SP_DS_VPC_DEST_REG_OUTLOC0__SHIFT = 0 # type: ignore
A6XX_SP_DS_VPC_DEST_REG_OUTLOC1__MASK = 0x0000ff00 # type: ignore
A6XX_SP_DS_VPC_DEST_REG_OUTLOC1__SHIFT = 8 # type: ignore
A6XX_SP_DS_VPC_DEST_REG_OUTLOC2__MASK = 0x00ff0000 # type: ignore
A6XX_SP_DS_VPC_DEST_REG_OUTLOC2__SHIFT = 16 # type: ignore
A6XX_SP_DS_VPC_DEST_REG_OUTLOC3__MASK = 0xff000000 # type: ignore
A6XX_SP_DS_VPC_DEST_REG_OUTLOC3__SHIFT = 24 # type: ignore
REG_A6XX_SP_DS_PROGRAM_COUNTER_OFFSET = 0x0000a85b # type: ignore
REG_A6XX_SP_DS_BASE = 0x0000a85c # type: ignore
REG_A6XX_SP_DS_PVT_MEM_PARAM = 0x0000a85e # type: ignore
A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK = 0x000000ff # type: ignore
A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT = 0 # type: ignore
A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK = 0xff000000 # type: ignore
A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT = 24 # type: ignore
REG_A6XX_SP_DS_PVT_MEM_BASE = 0x0000a85f # type: ignore
REG_A6XX_SP_DS_PVT_MEM_SIZE = 0x0000a861 # type: ignore
A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK = 0x0003ffff # type: ignore
A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT = 0 # type: ignore
A6XX_SP_DS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT = 0x80000000 # type: ignore
REG_A6XX_SP_DS_TSIZE = 0x0000a862 # type: ignore
REG_A6XX_SP_DS_CONFIG = 0x0000a863 # type: ignore
A6XX_SP_DS_CONFIG_BINDLESS_TEX = 0x00000001 # type: ignore
A6XX_SP_DS_CONFIG_BINDLESS_SAMP = 0x00000002 # type: ignore
A6XX_SP_DS_CONFIG_BINDLESS_UAV = 0x00000004 # type: ignore
A6XX_SP_DS_CONFIG_BINDLESS_UBO = 0x00000008 # type: ignore
A6XX_SP_DS_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_DS_CONFIG_NTEX__MASK = 0x0001fe00 # type: ignore
A6XX_SP_DS_CONFIG_NTEX__SHIFT = 9 # type: ignore
A6XX_SP_DS_CONFIG_NSAMP__MASK = 0x003e0000 # type: ignore
A6XX_SP_DS_CONFIG_NSAMP__SHIFT = 17 # type: ignore
A6XX_SP_DS_CONFIG_NUAV__MASK = 0x1fc00000 # type: ignore
A6XX_SP_DS_CONFIG_NUAV__SHIFT = 22 # type: ignore
REG_A6XX_SP_DS_INSTR_SIZE = 0x0000a864 # type: ignore
REG_A6XX_SP_DS_PVT_MEM_STACK_OFFSET = 0x0000a865 # type: ignore
A6XX_SP_DS_PVT_MEM_STACK_OFFSET_OFFSET__MASK = 0x0007ffff # type: ignore
A6XX_SP_DS_PVT_MEM_STACK_OFFSET_OFFSET__SHIFT = 0 # type: ignore
REG_A7XX_SP_DS_VGS_CNTL = 0x0000a868 # type: ignore
REG_A6XX_SP_GS_CNTL_0 = 0x0000a870 # type: ignore
A6XX_SP_GS_CNTL_0_THREADMODE__MASK = 0x00000001 # type: ignore
A6XX_SP_GS_CNTL_0_THREADMODE__SHIFT = 0 # type: ignore
A6XX_SP_GS_CNTL_0_HALFREGFOOTPRINT__MASK = 0x0000007e # type: ignore
A6XX_SP_GS_CNTL_0_HALFREGFOOTPRINT__SHIFT = 1 # type: ignore
A6XX_SP_GS_CNTL_0_FULLREGFOOTPRINT__MASK = 0x00001f80 # type: ignore
A6XX_SP_GS_CNTL_0_FULLREGFOOTPRINT__SHIFT = 7 # type: ignore
A6XX_SP_GS_CNTL_0_UNK13 = 0x00002000 # type: ignore
A6XX_SP_GS_CNTL_0_BRANCHSTACK__MASK = 0x000fc000 # type: ignore
A6XX_SP_GS_CNTL_0_BRANCHSTACK__SHIFT = 14 # type: ignore
A6XX_SP_GS_CNTL_0_EARLYPREAMBLE = 0x00100000 # type: ignore
REG_A6XX_SP_GS_CNTL_1 = 0x0000a871 # type: ignore
REG_A6XX_SP_GS_BOOLEAN_CF_MASK = 0x0000a872 # type: ignore
REG_A6XX_SP_GS_OUTPUT_CNTL = 0x0000a873 # type: ignore
A6XX_SP_GS_OUTPUT_CNTL_OUT__MASK = 0x0000003f # type: ignore
A6XX_SP_GS_OUTPUT_CNTL_OUT__SHIFT = 0 # type: ignore
A6XX_SP_GS_OUTPUT_CNTL_FLAGS_REGID__MASK = 0x00003fc0 # type: ignore
A6XX_SP_GS_OUTPUT_CNTL_FLAGS_REGID__SHIFT = 6 # type: ignore
REG_A6XX_SP_GS_OUTPUT = lambda i0: (0x0000a874 + 0x1*i0 ) # type: ignore
A6XX_SP_GS_OUTPUT_REG_A_REGID__MASK = 0x000000ff # type: ignore
A6XX_SP_GS_OUTPUT_REG_A_REGID__SHIFT = 0 # type: ignore
A6XX_SP_GS_OUTPUT_REG_A_COMPMASK__MASK = 0x00000f00 # type: ignore
A6XX_SP_GS_OUTPUT_REG_A_COMPMASK__SHIFT = 8 # type: ignore
A6XX_SP_GS_OUTPUT_REG_B_REGID__MASK = 0x00ff0000 # type: ignore
A6XX_SP_GS_OUTPUT_REG_B_REGID__SHIFT = 16 # type: ignore
A6XX_SP_GS_OUTPUT_REG_B_COMPMASK__MASK = 0x0f000000 # type: ignore
A6XX_SP_GS_OUTPUT_REG_B_COMPMASK__SHIFT = 24 # type: ignore
REG_A6XX_SP_GS_VPC_DEST = lambda i0: (0x0000a884 + 0x1*i0 ) # type: ignore
A6XX_SP_GS_VPC_DEST_REG_OUTLOC0__MASK = 0x000000ff # type: ignore
A6XX_SP_GS_VPC_DEST_REG_OUTLOC0__SHIFT = 0 # type: ignore
A6XX_SP_GS_VPC_DEST_REG_OUTLOC1__MASK = 0x0000ff00 # type: ignore
A6XX_SP_GS_VPC_DEST_REG_OUTLOC1__SHIFT = 8 # type: ignore
A6XX_SP_GS_VPC_DEST_REG_OUTLOC2__MASK = 0x00ff0000 # type: ignore
A6XX_SP_GS_VPC_DEST_REG_OUTLOC2__SHIFT = 16 # type: ignore
A6XX_SP_GS_VPC_DEST_REG_OUTLOC3__MASK = 0xff000000 # type: ignore
A6XX_SP_GS_VPC_DEST_REG_OUTLOC3__SHIFT = 24 # type: ignore
REG_A6XX_SP_GS_PROGRAM_COUNTER_OFFSET = 0x0000a88c # type: ignore
REG_A6XX_SP_GS_BASE = 0x0000a88d # type: ignore
REG_A6XX_SP_GS_PVT_MEM_PARAM = 0x0000a88f # type: ignore
A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK = 0x000000ff # type: ignore
A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT = 0 # type: ignore
A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK = 0xff000000 # type: ignore
A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT = 24 # type: ignore
REG_A6XX_SP_GS_PVT_MEM_BASE = 0x0000a890 # type: ignore
REG_A6XX_SP_GS_PVT_MEM_SIZE = 0x0000a892 # type: ignore
A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK = 0x0003ffff # type: ignore
A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT = 0 # type: ignore
A6XX_SP_GS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT = 0x80000000 # type: ignore
REG_A6XX_SP_GS_TSIZE = 0x0000a893 # type: ignore
REG_A6XX_SP_GS_CONFIG = 0x0000a894 # type: ignore
A6XX_SP_GS_CONFIG_BINDLESS_TEX = 0x00000001 # type: ignore
A6XX_SP_GS_CONFIG_BINDLESS_SAMP = 0x00000002 # type: ignore
A6XX_SP_GS_CONFIG_BINDLESS_UAV = 0x00000004 # type: ignore
A6XX_SP_GS_CONFIG_BINDLESS_UBO = 0x00000008 # type: ignore
A6XX_SP_GS_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_GS_CONFIG_NTEX__MASK = 0x0001fe00 # type: ignore
A6XX_SP_GS_CONFIG_NTEX__SHIFT = 9 # type: ignore
A6XX_SP_GS_CONFIG_NSAMP__MASK = 0x003e0000 # type: ignore
A6XX_SP_GS_CONFIG_NSAMP__SHIFT = 17 # type: ignore
A6XX_SP_GS_CONFIG_NUAV__MASK = 0x1fc00000 # type: ignore
A6XX_SP_GS_CONFIG_NUAV__SHIFT = 22 # type: ignore
REG_A6XX_SP_GS_INSTR_SIZE = 0x0000a895 # type: ignore
REG_A6XX_SP_GS_PVT_MEM_STACK_OFFSET = 0x0000a896 # type: ignore
A6XX_SP_GS_PVT_MEM_STACK_OFFSET_OFFSET__MASK = 0x0007ffff # type: ignore
A6XX_SP_GS_PVT_MEM_STACK_OFFSET_OFFSET__SHIFT = 0 # type: ignore
REG_A7XX_SP_GS_VGS_CNTL = 0x0000a899 # type: ignore
REG_A6XX_SP_VS_SAMPLER_BASE = 0x0000a8a0 # type: ignore
REG_A6XX_SP_HS_SAMPLER_BASE = 0x0000a8a2 # type: ignore
REG_A6XX_SP_DS_SAMPLER_BASE = 0x0000a8a4 # type: ignore
REG_A6XX_SP_GS_SAMPLER_BASE = 0x0000a8a6 # type: ignore
REG_A6XX_SP_VS_TEXMEMOBJ_BASE = 0x0000a8a8 # type: ignore
REG_A6XX_SP_HS_TEXMEMOBJ_BASE = 0x0000a8aa # type: ignore
REG_A6XX_SP_DS_TEXMEMOBJ_BASE = 0x0000a8ac # type: ignore
REG_A6XX_SP_GS_TEXMEMOBJ_BASE = 0x0000a8ae # type: ignore
REG_A6XX_SP_PS_CNTL_0 = 0x0000a980 # type: ignore
A6XX_SP_PS_CNTL_0_THREADMODE__MASK = 0x00000001 # type: ignore
A6XX_SP_PS_CNTL_0_THREADMODE__SHIFT = 0 # type: ignore
A6XX_SP_PS_CNTL_0_HALFREGFOOTPRINT__MASK = 0x0000007e # type: ignore
A6XX_SP_PS_CNTL_0_HALFREGFOOTPRINT__SHIFT = 1 # type: ignore
A6XX_SP_PS_CNTL_0_FULLREGFOOTPRINT__MASK = 0x00001f80 # type: ignore
A6XX_SP_PS_CNTL_0_FULLREGFOOTPRINT__SHIFT = 7 # type: ignore
A6XX_SP_PS_CNTL_0_UNK13 = 0x00002000 # type: ignore
A6XX_SP_PS_CNTL_0_BRANCHSTACK__MASK = 0x000fc000 # type: ignore
A6XX_SP_PS_CNTL_0_BRANCHSTACK__SHIFT = 14 # type: ignore
A6XX_SP_PS_CNTL_0_THREADSIZE__MASK = 0x00100000 # type: ignore
A6XX_SP_PS_CNTL_0_THREADSIZE__SHIFT = 20 # type: ignore
A6XX_SP_PS_CNTL_0_UNK21 = 0x00200000 # type: ignore
A6XX_SP_PS_CNTL_0_VARYING = 0x00400000 # type: ignore
A6XX_SP_PS_CNTL_0_LODPIXMASK = 0x00800000 # type: ignore
A6XX_SP_PS_CNTL_0_INOUTREGOVERLAP = 0x01000000 # type: ignore
A6XX_SP_PS_CNTL_0_UNK25 = 0x02000000 # type: ignore
A6XX_SP_PS_CNTL_0_PIXLODENABLE = 0x04000000 # type: ignore
A6XX_SP_PS_CNTL_0_UNK27 = 0x08000000 # type: ignore
A6XX_SP_PS_CNTL_0_EARLYPREAMBLE = 0x10000000 # type: ignore
A6XX_SP_PS_CNTL_0_MERGEDREGS = 0x80000000 # type: ignore
REG_A6XX_SP_PS_BOOLEAN_CF_MASK = 0x0000a981 # type: ignore
REG_A6XX_SP_PS_PROGRAM_COUNTER_OFFSET = 0x0000a982 # type: ignore
REG_A6XX_SP_PS_BASE = 0x0000a983 # type: ignore
REG_A6XX_SP_PS_PVT_MEM_PARAM = 0x0000a985 # type: ignore
A6XX_SP_PS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK = 0x000000ff # type: ignore
A6XX_SP_PS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT = 0 # type: ignore
A6XX_SP_PS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK = 0xff000000 # type: ignore
A6XX_SP_PS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT = 24 # type: ignore
REG_A6XX_SP_PS_PVT_MEM_BASE = 0x0000a986 # type: ignore
REG_A6XX_SP_PS_PVT_MEM_SIZE = 0x0000a988 # type: ignore
A6XX_SP_PS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK = 0x0003ffff # type: ignore
A6XX_SP_PS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT = 0 # type: ignore
A6XX_SP_PS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT = 0x80000000 # type: ignore
REG_A6XX_SP_BLEND_CNTL = 0x0000a989 # type: ignore
A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK = 0x000000ff # type: ignore
A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT = 0 # type: ignore
A6XX_SP_BLEND_CNTL_UNK8 = 0x00000100 # type: ignore
A6XX_SP_BLEND_CNTL_DUAL_COLOR_IN_ENABLE = 0x00000200 # type: ignore
A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE = 0x00000400 # type: ignore
REG_A6XX_SP_SRGB_CNTL = 0x0000a98a # type: ignore
A6XX_SP_SRGB_CNTL_SRGB_MRT0 = 0x00000001 # type: ignore
A6XX_SP_SRGB_CNTL_SRGB_MRT1 = 0x00000002 # type: ignore
A6XX_SP_SRGB_CNTL_SRGB_MRT2 = 0x00000004 # type: ignore
A6XX_SP_SRGB_CNTL_SRGB_MRT3 = 0x00000008 # type: ignore
A6XX_SP_SRGB_CNTL_SRGB_MRT4 = 0x00000010 # type: ignore
A6XX_SP_SRGB_CNTL_SRGB_MRT5 = 0x00000020 # type: ignore
A6XX_SP_SRGB_CNTL_SRGB_MRT6 = 0x00000040 # type: ignore
A6XX_SP_SRGB_CNTL_SRGB_MRT7 = 0x00000080 # type: ignore
REG_A6XX_SP_PS_OUTPUT_MASK = 0x0000a98b # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT0__MASK = 0x0000000f # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT0__SHIFT = 0 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT1__MASK = 0x000000f0 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT1__SHIFT = 4 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT2__MASK = 0x00000f00 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT2__SHIFT = 8 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT3__MASK = 0x0000f000 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT3__SHIFT = 12 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT4__MASK = 0x000f0000 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT4__SHIFT = 16 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT5__MASK = 0x00f00000 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT5__SHIFT = 20 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT6__MASK = 0x0f000000 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT6__SHIFT = 24 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT7__MASK = 0xf0000000 # type: ignore
A6XX_SP_PS_OUTPUT_MASK_RT7__SHIFT = 28 # type: ignore
REG_A6XX_SP_PS_OUTPUT_CNTL = 0x0000a98c # type: ignore
A6XX_SP_PS_OUTPUT_CNTL_DUAL_COLOR_IN_ENABLE = 0x00000001 # type: ignore
A6XX_SP_PS_OUTPUT_CNTL_DEPTH_REGID__MASK = 0x0000ff00 # type: ignore
A6XX_SP_PS_OUTPUT_CNTL_DEPTH_REGID__SHIFT = 8 # type: ignore
A6XX_SP_PS_OUTPUT_CNTL_SAMPMASK_REGID__MASK = 0x00ff0000 # type: ignore
A6XX_SP_PS_OUTPUT_CNTL_SAMPMASK_REGID__SHIFT = 16 # type: ignore
A6XX_SP_PS_OUTPUT_CNTL_STENCILREF_REGID__MASK = 0xff000000 # type: ignore
A6XX_SP_PS_OUTPUT_CNTL_STENCILREF_REGID__SHIFT = 24 # type: ignore
REG_A6XX_SP_PS_MRT_CNTL = 0x0000a98d # type: ignore
A6XX_SP_PS_MRT_CNTL_MRT__MASK = 0x0000000f # type: ignore
A6XX_SP_PS_MRT_CNTL_MRT__SHIFT = 0 # type: ignore
REG_A6XX_SP_PS_OUTPUT = lambda i0: (0x0000a98e + 0x1*i0 ) # type: ignore
A6XX_SP_PS_OUTPUT_REG_REGID__MASK = 0x000000ff # type: ignore
A6XX_SP_PS_OUTPUT_REG_REGID__SHIFT = 0 # type: ignore
A6XX_SP_PS_OUTPUT_REG_HALF_PRECISION = 0x00000100 # type: ignore
REG_A6XX_SP_PS_MRT = lambda i0: (0x0000a996 + 0x1*i0 ) # type: ignore
A6XX_SP_PS_MRT_REG_COLOR_FORMAT__MASK = 0x000000ff # type: ignore
A6XX_SP_PS_MRT_REG_COLOR_FORMAT__SHIFT = 0 # type: ignore
A6XX_SP_PS_MRT_REG_COLOR_SINT = 0x00000100 # type: ignore
A6XX_SP_PS_MRT_REG_COLOR_UINT = 0x00000200 # type: ignore
A6XX_SP_PS_MRT_REG_UNK10 = 0x00000400 # type: ignore
REG_A6XX_SP_PS_INITIAL_TEX_LOAD_CNTL = 0x0000a99e # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CNTL_COUNT__MASK = 0x00000007 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CNTL_COUNT__SHIFT = 0 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CNTL_IJ_WRITE_DISABLE = 0x00000008 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CNTL_ENDOFQUAD = 0x00000010 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CNTL_WRITE_COLOR_TO_OUTPUT = 0x00000020 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CNTL_CONSTSLOTID__MASK = 0x00007fc0 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CNTL_CONSTSLOTID__SHIFT = 6 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CNTL_CONSTSLOTID4COORD__MASK = 0x01ff0000 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CNTL_CONSTSLOTID4COORD__SHIFT = 16 # type: ignore
REG_A6XX_SP_PS_INITIAL_TEX_LOAD = lambda i0: (0x0000a99f + 0x1*i0 ) # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_SRC__MASK = 0x0000007f # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_SRC__SHIFT = 0 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_SAMP_ID__MASK = 0x00000780 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_SAMP_ID__SHIFT = 7 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_TEX_ID__MASK = 0x0000f800 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_TEX_ID__SHIFT = 11 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_DST__MASK = 0x003f0000 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_DST__SHIFT = 16 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_WRMASK__MASK = 0x03c00000 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_WRMASK__SHIFT = 22 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_HALF = 0x04000000 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_UNK27 = 0x08000000 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_BINDLESS = 0x10000000 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_CMD__MASK = 0xe0000000 # type: ignore
A6XX_SP_PS_INITIAL_TEX_LOAD_CMD_CMD__SHIFT = 29 # type: ignore
REG_A7XX_SP_PS_INITIAL_TEX_LOAD = lambda i0: (0x0000a99f + 0x1*i0 ) # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_SRC__MASK = 0x0000007f # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_SRC__SHIFT = 0 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_SAMP_ID__MASK = 0x00000380 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_SAMP_ID__SHIFT = 7 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_TEX_ID__MASK = 0x00001c00 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_TEX_ID__SHIFT = 10 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_DST__MASK = 0x0007e000 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_DST__SHIFT = 13 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_WRMASK__MASK = 0x00780000 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_WRMASK__SHIFT = 19 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_HALF = 0x00800000 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_BINDLESS = 0x02000000 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_CMD__MASK = 0x3c000000 # type: ignore
A7XX_SP_PS_INITIAL_TEX_LOAD_CMD_CMD__SHIFT = 26 # type: ignore
REG_A6XX_SP_PS_INITIAL_TEX_INDEX = lambda i0: (0x0000a9a3 + 0x1*i0 ) # type: ignore
A6XX_SP_PS_INITIAL_TEX_INDEX_CMD_SAMP_ID__MASK = 0x0000ffff # type: ignore
A6XX_SP_PS_INITIAL_TEX_INDEX_CMD_SAMP_ID__SHIFT = 0 # type: ignore
A6XX_SP_PS_INITIAL_TEX_INDEX_CMD_TEX_ID__MASK = 0xffff0000 # type: ignore
A6XX_SP_PS_INITIAL_TEX_INDEX_CMD_TEX_ID__SHIFT = 16 # type: ignore
REG_A6XX_SP_PS_TSIZE = 0x0000a9a7 # type: ignore
REG_A6XX_SP_UNKNOWN_A9A8 = 0x0000a9a8 # type: ignore
REG_A6XX_SP_PS_PVT_MEM_STACK_OFFSET = 0x0000a9a9 # type: ignore
A6XX_SP_PS_PVT_MEM_STACK_OFFSET_OFFSET__MASK = 0x0007ffff # type: ignore
A6XX_SP_PS_PVT_MEM_STACK_OFFSET_OFFSET__SHIFT = 0 # type: ignore
REG_A7XX_SP_PS_UNKNOWN_A9AB = 0x0000a9ab # type: ignore
REG_A6XX_SP_CS_CNTL_0 = 0x0000a9b0 # type: ignore
A6XX_SP_CS_CNTL_0_THREADMODE__MASK = 0x00000001 # type: ignore
A6XX_SP_CS_CNTL_0_THREADMODE__SHIFT = 0 # type: ignore
A6XX_SP_CS_CNTL_0_HALFREGFOOTPRINT__MASK = 0x0000007e # type: ignore
A6XX_SP_CS_CNTL_0_HALFREGFOOTPRINT__SHIFT = 1 # type: ignore
A6XX_SP_CS_CNTL_0_FULLREGFOOTPRINT__MASK = 0x00001f80 # type: ignore
A6XX_SP_CS_CNTL_0_FULLREGFOOTPRINT__SHIFT = 7 # type: ignore
A6XX_SP_CS_CNTL_0_UNK13 = 0x00002000 # type: ignore
A6XX_SP_CS_CNTL_0_BRANCHSTACK__MASK = 0x000fc000 # type: ignore
A6XX_SP_CS_CNTL_0_BRANCHSTACK__SHIFT = 14 # type: ignore
A6XX_SP_CS_CNTL_0_THREADSIZE__MASK = 0x00100000 # type: ignore
A6XX_SP_CS_CNTL_0_THREADSIZE__SHIFT = 20 # type: ignore
A6XX_SP_CS_CNTL_0_UNK21 = 0x00200000 # type: ignore
A6XX_SP_CS_CNTL_0_UNK22 = 0x00400000 # type: ignore
A6XX_SP_CS_CNTL_0_EARLYPREAMBLE = 0x00800000 # type: ignore
A6XX_SP_CS_CNTL_0_MERGEDREGS = 0x80000000 # type: ignore
REG_A6XX_SP_CS_CNTL_1 = 0x0000a9b1 # type: ignore
A6XX_SP_CS_CNTL_1_SHARED_SIZE__MASK = 0x0000001f # type: ignore
A6XX_SP_CS_CNTL_1_SHARED_SIZE__SHIFT = 0 # type: ignore
A6XX_SP_CS_CNTL_1_CONSTANTRAMMODE__MASK = 0x00000060 # type: ignore
A6XX_SP_CS_CNTL_1_CONSTANTRAMMODE__SHIFT = 5 # type: ignore
REG_A6XX_SP_CS_BOOLEAN_CF_MASK = 0x0000a9b2 # type: ignore
REG_A6XX_SP_CS_PROGRAM_COUNTER_OFFSET = 0x0000a9b3 # type: ignore
REG_A6XX_SP_CS_BASE = 0x0000a9b4 # type: ignore
REG_A6XX_SP_CS_PVT_MEM_PARAM = 0x0000a9b6 # type: ignore
A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK = 0x000000ff # type: ignore
A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT = 0 # type: ignore
A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK = 0xff000000 # type: ignore
A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT = 24 # type: ignore
REG_A6XX_SP_CS_PVT_MEM_BASE = 0x0000a9b7 # type: ignore
REG_A6XX_SP_CS_PVT_MEM_SIZE = 0x0000a9b9 # type: ignore
A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK = 0x0003ffff # type: ignore
A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT = 0 # type: ignore
A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT = 0x80000000 # type: ignore
REG_A6XX_SP_CS_TSIZE = 0x0000a9ba # type: ignore
REG_A6XX_SP_CS_CONFIG = 0x0000a9bb # type: ignore
A6XX_SP_CS_CONFIG_BINDLESS_TEX = 0x00000001 # type: ignore
A6XX_SP_CS_CONFIG_BINDLESS_SAMP = 0x00000002 # type: ignore
A6XX_SP_CS_CONFIG_BINDLESS_UAV = 0x00000004 # type: ignore
A6XX_SP_CS_CONFIG_BINDLESS_UBO = 0x00000008 # type: ignore
A6XX_SP_CS_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_CS_CONFIG_NTEX__MASK = 0x0001fe00 # type: ignore
A6XX_SP_CS_CONFIG_NTEX__SHIFT = 9 # type: ignore
A6XX_SP_CS_CONFIG_NSAMP__MASK = 0x003e0000 # type: ignore
A6XX_SP_CS_CONFIG_NSAMP__SHIFT = 17 # type: ignore
A6XX_SP_CS_CONFIG_NUAV__MASK = 0x1fc00000 # type: ignore
A6XX_SP_CS_CONFIG_NUAV__SHIFT = 22 # type: ignore
REG_A6XX_SP_CS_INSTR_SIZE = 0x0000a9bc # type: ignore
REG_A6XX_SP_CS_PVT_MEM_STACK_OFFSET = 0x0000a9bd # type: ignore
A6XX_SP_CS_PVT_MEM_STACK_OFFSET_OFFSET__MASK = 0x0007ffff # type: ignore
A6XX_SP_CS_PVT_MEM_STACK_OFFSET_OFFSET__SHIFT = 0 # type: ignore
REG_A7XX_SP_CS_UNKNOWN_A9BE = 0x0000a9be # type: ignore
REG_A7XX_SP_CS_VGS_CNTL = 0x0000a9c5 # type: ignore
REG_A6XX_SP_CS_WIE_CNTL_0 = 0x0000a9c2 # type: ignore
A6XX_SP_CS_WIE_CNTL_0_WGIDCONSTID__MASK = 0x000000ff # type: ignore
A6XX_SP_CS_WIE_CNTL_0_WGIDCONSTID__SHIFT = 0 # type: ignore
A6XX_SP_CS_WIE_CNTL_0_WGSIZECONSTID__MASK = 0x0000ff00 # type: ignore
A6XX_SP_CS_WIE_CNTL_0_WGSIZECONSTID__SHIFT = 8 # type: ignore
A6XX_SP_CS_WIE_CNTL_0_WGOFFSETCONSTID__MASK = 0x00ff0000 # type: ignore
A6XX_SP_CS_WIE_CNTL_0_WGOFFSETCONSTID__SHIFT = 16 # type: ignore
A6XX_SP_CS_WIE_CNTL_0_LOCALIDREGID__MASK = 0xff000000 # type: ignore
A6XX_SP_CS_WIE_CNTL_0_LOCALIDREGID__SHIFT = 24 # type: ignore
REG_A6XX_SP_CS_WIE_CNTL_1 = 0x0000a9c3 # type: ignore
A6XX_SP_CS_WIE_CNTL_1_LINEARLOCALIDREGID__MASK = 0x000000ff # type: ignore
A6XX_SP_CS_WIE_CNTL_1_LINEARLOCALIDREGID__SHIFT = 0 # type: ignore
A6XX_SP_CS_WIE_CNTL_1_SINGLE_SP_CORE = 0x00000100 # type: ignore
A6XX_SP_CS_WIE_CNTL_1_THREADSIZE__MASK = 0x00000200 # type: ignore
A6XX_SP_CS_WIE_CNTL_1_THREADSIZE__SHIFT = 9 # type: ignore
A6XX_SP_CS_WIE_CNTL_1_THREADSIZE_SCALAR = 0x00000400 # type: ignore
REG_A7XX_SP_CS_WIE_CNTL_1 = 0x0000a9c3 # type: ignore
A7XX_SP_CS_WIE_CNTL_1_LINEARLOCALIDREGID__MASK = 0x000000ff # type: ignore
A7XX_SP_CS_WIE_CNTL_1_LINEARLOCALIDREGID__SHIFT = 0 # type: ignore
A7XX_SP_CS_WIE_CNTL_1_THREADSIZE__MASK = 0x00000100 # type: ignore
A7XX_SP_CS_WIE_CNTL_1_THREADSIZE__SHIFT = 8 # type: ignore
A7XX_SP_CS_WIE_CNTL_1_THREADSIZE_SCALAR = 0x00000200 # type: ignore
A7XX_SP_CS_WIE_CNTL_1_WORKITEMRASTORDER__MASK = 0x00008000 # type: ignore
A7XX_SP_CS_WIE_CNTL_1_WORKITEMRASTORDER__SHIFT = 15 # type: ignore
REG_A6XX_SP_PS_SAMPLER_BASE = 0x0000a9e0 # type: ignore
REG_A6XX_SP_CS_SAMPLER_BASE = 0x0000a9e2 # type: ignore
REG_A6XX_SP_PS_TEXMEMOBJ_BASE = 0x0000a9e4 # type: ignore
REG_A6XX_SP_CS_TEXMEMOBJ_BASE = 0x0000a9e6 # type: ignore
REG_A6XX_SP_CS_BINDLESS_BASE = lambda i0: (0x0000a9e8 + 0x2*i0 ) # type: ignore
A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK = 0x00000003 # type: ignore
A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT = 0 # type: ignore
A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK = 0xfffffffffffffffc # type: ignore
A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT = 2 # type: ignore
REG_A7XX_SP_CS_BINDLESS_BASE = lambda i0: (0x0000a9e8 + 0x2*i0 ) # type: ignore
A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK = 0x00000003 # type: ignore
A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT = 0 # type: ignore
A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK = 0xfffffffffffffffc # type: ignore
A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT = 2 # type: ignore
REG_A6XX_SP_CS_UAV_BASE = 0x0000a9f2 # type: ignore
REG_A7XX_SP_CS_UAV_BASE = 0x0000a9f8 # type: ignore
REG_A6XX_SP_CS_USIZE = 0x0000aa00 # type: ignore
REG_A7XX_SP_PS_VGS_CNTL = 0x0000aa01 # type: ignore
REG_A7XX_SP_PS_OUTPUT_CONST_CNTL = 0x0000aa02 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_CNTL_ENABLED = 0x00000001 # type: ignore
REG_A7XX_SP_PS_OUTPUT_CONST_MASK = 0x0000aa03 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT0__MASK = 0x0000000f # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT0__SHIFT = 0 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT1__MASK = 0x000000f0 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT1__SHIFT = 4 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT2__MASK = 0x00000f00 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT2__SHIFT = 8 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT3__MASK = 0x0000f000 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT3__SHIFT = 12 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT4__MASK = 0x000f0000 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT4__SHIFT = 16 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT5__MASK = 0x00f00000 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT5__SHIFT = 20 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT6__MASK = 0x0f000000 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT6__SHIFT = 24 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT7__MASK = 0xf0000000 # type: ignore
A7XX_SP_PS_OUTPUT_CONST_MASK_RT7__SHIFT = 28 # type: ignore
REG_A6XX_SP_UNKNOWN_AAF2 = 0x0000aaf2 # type: ignore
REG_A6XX_SP_MODE_CNTL = 0x0000ab00 # type: ignore
A6XX_SP_MODE_CNTL_CONSTANT_DEMOTION_ENABLE = 0x00000001 # type: ignore
A6XX_SP_MODE_CNTL_ISAMMODE__MASK = 0x00000006 # type: ignore
A6XX_SP_MODE_CNTL_ISAMMODE__SHIFT = 1 # type: ignore
A6XX_SP_MODE_CNTL_SHARED_CONSTS_ENABLE = 0x00000008 # type: ignore
REG_A7XX_SP_UNKNOWN_AB01 = 0x0000ab01 # type: ignore
REG_A7XX_SP_UNKNOWN_AB02 = 0x0000ab02 # type: ignore
REG_A6XX_SP_PS_CONFIG = 0x0000ab04 # type: ignore
A6XX_SP_PS_CONFIG_BINDLESS_TEX = 0x00000001 # type: ignore
A6XX_SP_PS_CONFIG_BINDLESS_SAMP = 0x00000002 # type: ignore
A6XX_SP_PS_CONFIG_BINDLESS_UAV = 0x00000004 # type: ignore
A6XX_SP_PS_CONFIG_BINDLESS_UBO = 0x00000008 # type: ignore
A6XX_SP_PS_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_PS_CONFIG_NTEX__MASK = 0x0001fe00 # type: ignore
A6XX_SP_PS_CONFIG_NTEX__SHIFT = 9 # type: ignore
A6XX_SP_PS_CONFIG_NSAMP__MASK = 0x003e0000 # type: ignore
A6XX_SP_PS_CONFIG_NSAMP__SHIFT = 17 # type: ignore
A6XX_SP_PS_CONFIG_NUAV__MASK = 0x1fc00000 # type: ignore
A6XX_SP_PS_CONFIG_NUAV__SHIFT = 22 # type: ignore
REG_A6XX_SP_PS_INSTR_SIZE = 0x0000ab05 # type: ignore
REG_A6XX_SP_GFX_BINDLESS_BASE = lambda i0: (0x0000ab10 + 0x2*i0 ) # type: ignore
A6XX_SP_GFX_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK = 0x00000003 # type: ignore
A6XX_SP_GFX_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT = 0 # type: ignore
A6XX_SP_GFX_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK = 0xfffffffffffffffc # type: ignore
A6XX_SP_GFX_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT = 2 # type: ignore
REG_A7XX_SP_GFX_BINDLESS_BASE = lambda i0: (0x0000ab0a + 0x2*i0 ) # type: ignore
A7XX_SP_GFX_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK = 0x00000003 # type: ignore
A7XX_SP_GFX_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT = 0 # type: ignore
A7XX_SP_GFX_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK = 0xfffffffffffffffc # type: ignore
A7XX_SP_GFX_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT = 2 # type: ignore
REG_A6XX_SP_GFX_UAV_BASE = 0x0000ab1a # type: ignore
REG_A6XX_SP_GFX_USIZE = 0x0000ab20 # type: ignore
REG_A7XX_SP_UNKNOWN_AB22 = 0x0000ab22 # type: ignore
REG_A6XX_SP_A2D_OUTPUT_INFO = 0x0000acc0 # type: ignore
A6XX_SP_A2D_OUTPUT_INFO_HALF_PRECISION = 0x00000001 # type: ignore
A6XX_SP_A2D_OUTPUT_INFO_IFMT_TYPE__MASK = 0x00000006 # type: ignore
A6XX_SP_A2D_OUTPUT_INFO_IFMT_TYPE__SHIFT = 1 # type: ignore
A6XX_SP_A2D_OUTPUT_INFO_COLOR_FORMAT__MASK = 0x000007f8 # type: ignore
A6XX_SP_A2D_OUTPUT_INFO_COLOR_FORMAT__SHIFT = 3 # type: ignore
A6XX_SP_A2D_OUTPUT_INFO_SRGB = 0x00000800 # type: ignore
A6XX_SP_A2D_OUTPUT_INFO_MASK__MASK = 0x0000f000 # type: ignore
A6XX_SP_A2D_OUTPUT_INFO_MASK__SHIFT = 12 # type: ignore
REG_A7XX_SP_A2D_OUTPUT_INFO = 0x0000a9bf # type: ignore
A7XX_SP_A2D_OUTPUT_INFO_HALF_PRECISION = 0x00000001 # type: ignore
A7XX_SP_A2D_OUTPUT_INFO_IFMT_TYPE__MASK = 0x00000006 # type: ignore
A7XX_SP_A2D_OUTPUT_INFO_IFMT_TYPE__SHIFT = 1 # type: ignore
A7XX_SP_A2D_OUTPUT_INFO_COLOR_FORMAT__MASK = 0x000007f8 # type: ignore
A7XX_SP_A2D_OUTPUT_INFO_COLOR_FORMAT__SHIFT = 3 # type: ignore
A7XX_SP_A2D_OUTPUT_INFO_SRGB = 0x00000800 # type: ignore
A7XX_SP_A2D_OUTPUT_INFO_MASK__MASK = 0x0000f000 # type: ignore
A7XX_SP_A2D_OUTPUT_INFO_MASK__SHIFT = 12 # type: ignore
REG_A6XX_SP_DBG_ECO_CNTL = 0x0000ae00 # type: ignore
REG_A6XX_SP_ADDR_MODE_CNTL = 0x0000ae01 # type: ignore
REG_A6XX_SP_NC_MODE_CNTL = 0x0000ae02 # type: ignore
REG_A6XX_SP_CHICKEN_BITS = 0x0000ae03 # type: ignore
REG_A6XX_SP_NC_MODE_CNTL_2 = 0x0000ae04 # type: ignore
A6XX_SP_NC_MODE_CNTL_2_F16_NO_INF = 0x00000008 # type: ignore
REG_A7XX_SP_UNKNOWN_AE06 = 0x0000ae06 # type: ignore
REG_A7XX_SP_CHICKEN_BITS_1 = 0x0000ae08 # type: ignore
REG_A7XX_SP_CHICKEN_BITS_2 = 0x0000ae09 # type: ignore
REG_A7XX_SP_CHICKEN_BITS_3 = 0x0000ae0a # type: ignore
REG_A6XX_SP_PERFCTR_SHADER_MASK = 0x0000ae0f # type: ignore
A6XX_SP_PERFCTR_SHADER_MASK_VS = 0x00000001 # type: ignore
A6XX_SP_PERFCTR_SHADER_MASK_HS = 0x00000002 # type: ignore
A6XX_SP_PERFCTR_SHADER_MASK_DS = 0x00000004 # type: ignore
A6XX_SP_PERFCTR_SHADER_MASK_GS = 0x00000008 # type: ignore
A6XX_SP_PERFCTR_SHADER_MASK_FS = 0x00000010 # type: ignore
A6XX_SP_PERFCTR_SHADER_MASK_CS = 0x00000020 # type: ignore
REG_A6XX_SP_PERFCTR_SP_SEL = lambda i0: (0x0000ae10 + 0x1*i0 ) # type: ignore
REG_A7XX_SP_PERFCTR_HLSQ_SEL = lambda i0: (0x0000ae60 + 0x1*i0 ) # type: ignore
REG_A7XX_SP_UNKNOWN_AE6A = 0x0000ae6a # type: ignore
REG_A7XX_SP_UNKNOWN_AE6B = 0x0000ae6b # type: ignore
REG_A7XX_SP_HLSQ_DBG_ECO_CNTL = 0x0000ae6c # type: ignore
REG_A7XX_SP_READ_SEL = 0x0000ae6d # type: ignore
A7XX_SP_READ_SEL_LOCATION__MASK = 0x000c0000 # type: ignore
A7XX_SP_READ_SEL_LOCATION__SHIFT = 18 # type: ignore
A7XX_SP_READ_SEL_PIPE__MASK = 0x00030000 # type: ignore
A7XX_SP_READ_SEL_PIPE__SHIFT = 16 # type: ignore
A7XX_SP_READ_SEL_STATETYPE__MASK = 0x0000ff00 # type: ignore
A7XX_SP_READ_SEL_STATETYPE__SHIFT = 8 # type: ignore
A7XX_SP_READ_SEL_USPTP__MASK = 0x000000f0 # type: ignore
A7XX_SP_READ_SEL_USPTP__SHIFT = 4 # type: ignore
A7XX_SP_READ_SEL_SPTP__MASK = 0x0000000f # type: ignore
A7XX_SP_READ_SEL_SPTP__SHIFT = 0 # type: ignore
REG_A7XX_SP_DBG_CNTL = 0x0000ae71 # type: ignore
REG_A7XX_SP_UNKNOWN_AE73 = 0x0000ae73 # type: ignore
REG_A7XX_SP_PERFCTR_SP_SEL = lambda i0: (0x0000ae80 + 0x1*i0 ) # type: ignore
REG_A6XX_SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE = 0x0000be22 # type: ignore
REG_A6XX_TPL1_CS_BORDER_COLOR_BASE = 0x0000b180 # type: ignore
REG_A6XX_SP_UNKNOWN_B182 = 0x0000b182 # type: ignore
REG_A6XX_SP_UNKNOWN_B183 = 0x0000b183 # type: ignore
REG_A6XX_SP_UNKNOWN_B190 = 0x0000b190 # type: ignore
REG_A6XX_SP_UNKNOWN_B191 = 0x0000b191 # type: ignore
REG_A6XX_TPL1_RAS_MSAA_CNTL = 0x0000b300 # type: ignore
A6XX_TPL1_RAS_MSAA_CNTL_SAMPLES__MASK = 0x00000003 # type: ignore
A6XX_TPL1_RAS_MSAA_CNTL_SAMPLES__SHIFT = 0 # type: ignore
A6XX_TPL1_RAS_MSAA_CNTL_UNK2__MASK = 0x0000000c # type: ignore
A6XX_TPL1_RAS_MSAA_CNTL_UNK2__SHIFT = 2 # type: ignore
REG_A6XX_TPL1_DEST_MSAA_CNTL = 0x0000b301 # type: ignore
A6XX_TPL1_DEST_MSAA_CNTL_SAMPLES__MASK = 0x00000003 # type: ignore
A6XX_TPL1_DEST_MSAA_CNTL_SAMPLES__SHIFT = 0 # type: ignore
A6XX_TPL1_DEST_MSAA_CNTL_MSAA_DISABLE = 0x00000004 # type: ignore
REG_A6XX_TPL1_GFX_BORDER_COLOR_BASE = 0x0000b302 # type: ignore
REG_A6XX_TPL1_MSAA_SAMPLE_POS_CNTL = 0x0000b304 # type: ignore
A6XX_TPL1_MSAA_SAMPLE_POS_CNTL_UNK0 = 0x00000001 # type: ignore
A6XX_TPL1_MSAA_SAMPLE_POS_CNTL_LOCATION_ENABLE = 0x00000002 # type: ignore
REG_A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0 = 0x0000b305 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_X__MASK = 0x0000000f # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_X__SHIFT = 0 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_Y__MASK = 0x000000f0 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_0_Y__SHIFT = 4 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_X__MASK = 0x00000f00 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_X__SHIFT = 8 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_Y__MASK = 0x0000f000 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_1_Y__SHIFT = 12 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_X__MASK = 0x000f0000 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_X__SHIFT = 16 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_Y__MASK = 0x00f00000 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_2_Y__SHIFT = 20 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_X__MASK = 0x0f000000 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_X__SHIFT = 24 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_Y__MASK = 0xf0000000 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_0_SAMPLE_3_Y__SHIFT = 28 # type: ignore
REG_A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1 = 0x0000b306 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_X__MASK = 0x0000000f # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_X__SHIFT = 0 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_Y__MASK = 0x000000f0 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_0_Y__SHIFT = 4 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_X__MASK = 0x00000f00 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_X__SHIFT = 8 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_Y__MASK = 0x0000f000 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_1_Y__SHIFT = 12 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_X__MASK = 0x000f0000 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_X__SHIFT = 16 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_Y__MASK = 0x00f00000 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_2_Y__SHIFT = 20 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_X__MASK = 0x0f000000 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_X__SHIFT = 24 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_Y__MASK = 0xf0000000 # type: ignore
A6XX_TPL1_PROGRAMMABLE_MSAA_POS_1_SAMPLE_3_Y__SHIFT = 28 # type: ignore
REG_A6XX_TPL1_WINDOW_OFFSET = 0x0000b307 # type: ignore
A6XX_TPL1_WINDOW_OFFSET_X__MASK = 0x00003fff # type: ignore
A6XX_TPL1_WINDOW_OFFSET_X__SHIFT = 0 # type: ignore
A6XX_TPL1_WINDOW_OFFSET_Y__MASK = 0x3fff0000 # type: ignore
A6XX_TPL1_WINDOW_OFFSET_Y__SHIFT = 16 # type: ignore
REG_A6XX_TPL1_MODE_CNTL = 0x0000b309 # type: ignore
A6XX_TPL1_MODE_CNTL_ISAMMODE__MASK = 0x00000003 # type: ignore
A6XX_TPL1_MODE_CNTL_ISAMMODE__SHIFT = 0 # type: ignore
A6XX_TPL1_MODE_CNTL_TEXCOORDROUNDMODE__MASK = 0x00000004 # type: ignore
A6XX_TPL1_MODE_CNTL_TEXCOORDROUNDMODE__SHIFT = 2 # type: ignore
A6XX_TPL1_MODE_CNTL_NEARESTMIPSNAP__MASK = 0x00000020 # type: ignore
A6XX_TPL1_MODE_CNTL_NEARESTMIPSNAP__SHIFT = 5 # type: ignore
A6XX_TPL1_MODE_CNTL_DESTDATATYPEOVERRIDE = 0x00000080 # type: ignore
REG_A7XX_SP_UNKNOWN_B310 = 0x0000b310 # type: ignore
REG_A6XX_TPL1_A2D_SRC_TEXTURE_INFO = 0x0000b4c0 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_COLOR_FORMAT__MASK = 0x000000ff # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_COLOR_FORMAT__SHIFT = 0 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_TILE_MODE__MASK = 0x00000300 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_TILE_MODE__SHIFT = 8 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_COLOR_SWAP__MASK = 0x00000c00 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_COLOR_SWAP__SHIFT = 10 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_FLAGS = 0x00001000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_SRGB = 0x00002000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_SAMPLES__MASK = 0x0000c000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_SAMPLES__SHIFT = 14 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_FILTER = 0x00010000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK17 = 0x00020000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_SAMPLES_AVERAGE = 0x00040000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK19 = 0x00080000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK20 = 0x00100000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK21 = 0x00200000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK22 = 0x00400000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK23__MASK = 0x07800000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK23__SHIFT = 23 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK28 = 0x10000000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_INFO_MUTABLEEN = 0x20000000 # type: ignore
REG_A6XX_TPL1_A2D_SRC_TEXTURE_SIZE = 0x0000b4c1 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_SIZE_WIDTH__MASK = 0x00007fff # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_SIZE_WIDTH__SHIFT = 0 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_SIZE_HEIGHT__MASK = 0x3fff8000 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_SIZE_HEIGHT__SHIFT = 15 # type: ignore
REG_A6XX_TPL1_A2D_SRC_TEXTURE_BASE = 0x0000b4c2 # type: ignore
REG_A6XX_TPL1_A2D_SRC_TEXTURE_PITCH = 0x0000b4c4 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_PITCH_UNK0__MASK = 0x000001ff # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_PITCH_UNK0__SHIFT = 0 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_PITCH_PITCH__MASK = 0x00fffe00 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_PITCH_PITCH__SHIFT = 9 # type: ignore
REG_A7XX_TPL1_A2D_SRC_TEXTURE_INFO = 0x0000b2c0 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_COLOR_FORMAT__MASK = 0x000000ff # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_COLOR_FORMAT__SHIFT = 0 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_TILE_MODE__MASK = 0x00000300 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_TILE_MODE__SHIFT = 8 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_COLOR_SWAP__MASK = 0x00000c00 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_COLOR_SWAP__SHIFT = 10 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_FLAGS = 0x00001000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_SRGB = 0x00002000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_SAMPLES__MASK = 0x0000c000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_SAMPLES__SHIFT = 14 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_FILTER = 0x00010000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK17 = 0x00020000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_SAMPLES_AVERAGE = 0x00040000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK19 = 0x00080000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK20 = 0x00100000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK21 = 0x00200000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK22 = 0x00400000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK23__MASK = 0x07800000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK23__SHIFT = 23 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_UNK28 = 0x10000000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_INFO_MUTABLEEN = 0x20000000 # type: ignore
REG_A7XX_TPL1_A2D_SRC_TEXTURE_SIZE = 0x0000b2c1 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_SIZE_WIDTH__MASK = 0x00007fff # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_SIZE_WIDTH__SHIFT = 0 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_SIZE_HEIGHT__MASK = 0x3fff8000 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_SIZE_HEIGHT__SHIFT = 15 # type: ignore
REG_A7XX_TPL1_A2D_SRC_TEXTURE_BASE = 0x0000b2c2 # type: ignore
REG_A7XX_TPL1_A2D_SRC_TEXTURE_PITCH = 0x0000b2c4 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_PITCH_PITCH__MASK = 0x00fffff8 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_PITCH_PITCH__SHIFT = 3 # type: ignore
REG_A6XX_TPL1_A2D_SRC_TEXTURE_BASE_1 = 0x0000b4c5 # type: ignore
REG_A6XX_TPL1_A2D_SRC_TEXTURE_PITCH_1 = 0x0000b4c7 # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_PITCH_1__MASK = 0x00000fff # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_PITCH_1__SHIFT = 0 # type: ignore
REG_A6XX_TPL1_A2D_SRC_TEXTURE_BASE_2 = 0x0000b4c8 # type: ignore
REG_A7XX_TPL1_A2D_SRC_TEXTURE_BASE_1 = 0x0000b2c5 # type: ignore
REG_A7XX_TPL1_A2D_SRC_TEXTURE_PITCH_1 = 0x0000b2c7 # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_PITCH_1__MASK = 0x00000fff # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_PITCH_1__SHIFT = 0 # type: ignore
REG_A7XX_TPL1_A2D_SRC_TEXTURE_BASE_2 = 0x0000b2c8 # type: ignore
REG_A6XX_TPL1_A2D_SRC_TEXTURE_FLAG_BASE = 0x0000b4ca # type: ignore
REG_A6XX_TPL1_A2D_SRC_TEXTURE_FLAG_PITCH = 0x0000b4cc # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_FLAG_PITCH__MASK = 0x000000ff # type: ignore
A6XX_TPL1_A2D_SRC_TEXTURE_FLAG_PITCH__SHIFT = 0 # type: ignore
REG_A7XX_TPL1_A2D_SRC_TEXTURE_FLAG_BASE = 0x0000b2ca # type: ignore
REG_A7XX_TPL1_A2D_SRC_TEXTURE_FLAG_PITCH = 0x0000b2cc # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_FLAG_PITCH__MASK = 0x000000ff # type: ignore
A7XX_TPL1_A2D_SRC_TEXTURE_FLAG_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_SP_PS_UNKNOWN_B4CD = 0x0000b4cd # type: ignore
REG_A6XX_SP_PS_UNKNOWN_B4CE = 0x0000b4ce # type: ignore
REG_A6XX_SP_PS_UNKNOWN_B4CF = 0x0000b4cf # type: ignore
REG_A6XX_SP_PS_UNKNOWN_B4D0 = 0x0000b4d0 # type: ignore
REG_A6XX_SP_WINDOW_OFFSET = 0x0000b4d1 # type: ignore
A6XX_SP_WINDOW_OFFSET_X__MASK = 0x00003fff # type: ignore
A6XX_SP_WINDOW_OFFSET_X__SHIFT = 0 # type: ignore
A6XX_SP_WINDOW_OFFSET_Y__MASK = 0x3fff0000 # type: ignore
A6XX_SP_WINDOW_OFFSET_Y__SHIFT = 16 # type: ignore
REG_A7XX_SP_PS_UNKNOWN_B4CD = 0x0000b2cd # type: ignore
REG_A7XX_SP_PS_UNKNOWN_B4CE = 0x0000b2ce # type: ignore
REG_A7XX_SP_PS_UNKNOWN_B4CF = 0x0000b2cf # type: ignore
REG_A7XX_SP_PS_UNKNOWN_B4D0 = 0x0000b2d0 # type: ignore
REG_A7XX_TPL1_A2D_WINDOW_OFFSET = 0x0000b2d1 # type: ignore
A7XX_TPL1_A2D_WINDOW_OFFSET_X__MASK = 0x00003fff # type: ignore
A7XX_TPL1_A2D_WINDOW_OFFSET_X__SHIFT = 0 # type: ignore
A7XX_TPL1_A2D_WINDOW_OFFSET_Y__MASK = 0x3fff0000 # type: ignore
A7XX_TPL1_A2D_WINDOW_OFFSET_Y__SHIFT = 16 # type: ignore
REG_A7XX_TPL1_A2D_BLT_CNTL = 0x0000b2d2 # type: ignore
A7XX_TPL1_A2D_BLT_CNTL_RAW_COPY = 0x00000001 # type: ignore
A7XX_TPL1_A2D_BLT_CNTL_START_OFFSET_TEXELS__MASK = 0x003f0000 # type: ignore
A7XX_TPL1_A2D_BLT_CNTL_START_OFFSET_TEXELS__SHIFT = 16 # type: ignore
A7XX_TPL1_A2D_BLT_CNTL_TYPE__MASK = 0xe0000000 # type: ignore
A7XX_TPL1_A2D_BLT_CNTL_TYPE__SHIFT = 29 # type: ignore
REG_A7XX_SP_WINDOW_OFFSET = 0x0000ab21 # type: ignore
A7XX_SP_WINDOW_OFFSET_X__MASK = 0x00003fff # type: ignore
A7XX_SP_WINDOW_OFFSET_X__SHIFT = 0 # type: ignore
A7XX_SP_WINDOW_OFFSET_Y__MASK = 0x3fff0000 # type: ignore
A7XX_SP_WINDOW_OFFSET_Y__SHIFT = 16 # type: ignore
REG_A6XX_TPL1_DBG_ECO_CNTL = 0x0000b600 # type: ignore
REG_A6XX_TPL1_ADDR_MODE_CNTL = 0x0000b601 # type: ignore
REG_A6XX_TPL1_DBG_ECO_CNTL1 = 0x0000b602 # type: ignore
A6XX_TPL1_DBG_ECO_CNTL1_TP_UBWC_FLAG_HINT = 0x00040000 # type: ignore
REG_A6XX_TPL1_NC_MODE_CNTL = 0x0000b604 # type: ignore
A6XX_TPL1_NC_MODE_CNTL_MODE = 0x00000001 # type: ignore
A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK = 0x00000006 # type: ignore
A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT = 1 # type: ignore
A6XX_TPL1_NC_MODE_CNTL_MIN_ACCESS_LENGTH = 0x00000008 # type: ignore
A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK = 0x00000010 # type: ignore
A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT = 4 # type: ignore
A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK = 0x000000c0 # type: ignore
A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT = 6 # type: ignore
REG_A6XX_TPL1_UNKNOWN_B605 = 0x0000b605 # type: ignore
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 = 0x0000b608 # type: ignore
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 = 0x0000b609 # type: ignore
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 = 0x0000b60a # type: ignore
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 = 0x0000b60b # type: ignore
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 = 0x0000b60c # type: ignore
REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 = 0x0000b608 # type: ignore
REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 = 0x0000b609 # type: ignore
REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 = 0x0000b60a # type: ignore
REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 = 0x0000b60b # type: ignore
REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 = 0x0000b60c # type: ignore
REG_A6XX_TPL1_PERFCTR_TP_SEL = lambda i0: (0x0000b610 + 0x1*i0 ) # type: ignore
REG_A7XX_TPL1_PERFCTR_TP_SEL = lambda i0: (0x0000b610 + 0x1*i0 ) # type: ignore
REG_A6XX_SP_VS_CONST_CONFIG = 0x0000b800 # type: ignore
A6XX_SP_VS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A6XX_SP_VS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A6XX_SP_VS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_VS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A6XX_SP_HS_CONST_CONFIG = 0x0000b801 # type: ignore
A6XX_SP_HS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A6XX_SP_HS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A6XX_SP_HS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_HS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A6XX_SP_DS_CONST_CONFIG = 0x0000b802 # type: ignore
A6XX_SP_DS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A6XX_SP_DS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A6XX_SP_DS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_DS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A6XX_SP_GS_CONST_CONFIG = 0x0000b803 # type: ignore
A6XX_SP_GS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A6XX_SP_GS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A6XX_SP_GS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_GS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A7XX_SP_VS_CONST_CONFIG = 0x0000a827 # type: ignore
A7XX_SP_VS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A7XX_SP_VS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A7XX_SP_VS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A7XX_SP_VS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A7XX_SP_HS_CONST_CONFIG = 0x0000a83f # type: ignore
A7XX_SP_HS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A7XX_SP_HS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A7XX_SP_HS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A7XX_SP_HS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A7XX_SP_DS_CONST_CONFIG = 0x0000a867 # type: ignore
A7XX_SP_DS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A7XX_SP_DS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A7XX_SP_DS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A7XX_SP_DS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A7XX_SP_GS_CONST_CONFIG = 0x0000a898 # type: ignore
A7XX_SP_GS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A7XX_SP_GS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A7XX_SP_GS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A7XX_SP_GS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A7XX_SP_RENDER_CNTL = 0x0000a9aa # type: ignore
A7XX_SP_RENDER_CNTL_FS_DISABLE = 0x00000001 # type: ignore
REG_A7XX_SP_DITHER_CNTL = 0x0000a9ac # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT0__MASK = 0x00000003 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT = 0 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT1__MASK = 0x0000000c # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT = 2 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT2__MASK = 0x00000030 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT = 4 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT3__MASK = 0x000000c0 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT = 6 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT4__MASK = 0x00000300 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT = 8 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT5__MASK = 0x00000c00 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT = 10 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT6__MASK = 0x00003000 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT = 12 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT7__MASK = 0x0000c000 # type: ignore
A7XX_SP_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT = 14 # type: ignore
REG_A7XX_SP_VRS_CONFIG = 0x0000a9ad # type: ignore
A7XX_SP_VRS_CONFIG_PIPELINE_FSR_ENABLE = 0x00000001 # type: ignore
A7XX_SP_VRS_CONFIG_ATTACHMENT_FSR_ENABLE = 0x00000002 # type: ignore
A7XX_SP_VRS_CONFIG_PRIMITIVE_FSR_ENABLE = 0x00000008 # type: ignore
REG_A7XX_SP_PS_CNTL_1 = 0x0000a9ae # type: ignore
A7XX_SP_PS_CNTL_1_SYSVAL_REGS_COUNT__MASK = 0x000000ff # type: ignore
A7XX_SP_PS_CNTL_1_SYSVAL_REGS_COUNT__SHIFT = 0 # type: ignore
A7XX_SP_PS_CNTL_1_UNK8 = 0x00000100 # type: ignore
A7XX_SP_PS_CNTL_1_UNK9 = 0x00000200 # type: ignore
REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD = 0x0000b820 # type: ignore
REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR = 0x0000b821 # type: ignore
REG_A6XX_HLSQ_LOAD_STATE_GEOM_DATA = 0x0000b823 # type: ignore
REG_A6XX_SP_PS_WAVE_CNTL = 0x0000b980 # type: ignore
A6XX_SP_PS_WAVE_CNTL_THREADSIZE__MASK = 0x00000001 # type: ignore
A6XX_SP_PS_WAVE_CNTL_THREADSIZE__SHIFT = 0 # type: ignore
A6XX_SP_PS_WAVE_CNTL_VARYINGS = 0x00000002 # type: ignore
A6XX_SP_PS_WAVE_CNTL_UNK2__MASK = 0x00000ffc # type: ignore
A6XX_SP_PS_WAVE_CNTL_UNK2__SHIFT = 2 # type: ignore
REG_A6XX_HLSQ_UNKNOWN_B981 = 0x0000b981 # type: ignore
REG_A6XX_SP_LB_PARAM_LIMIT = 0x0000b982 # type: ignore
A6XX_SP_LB_PARAM_LIMIT_PRIMALLOCTHRESHOLD__MASK = 0x00000007 # type: ignore
A6XX_SP_LB_PARAM_LIMIT_PRIMALLOCTHRESHOLD__SHIFT = 0 # type: ignore
REG_A6XX_SP_REG_PROG_ID_0 = 0x0000b983 # type: ignore
A6XX_SP_REG_PROG_ID_0_FACEREGID__MASK = 0x000000ff # type: ignore
A6XX_SP_REG_PROG_ID_0_FACEREGID__SHIFT = 0 # type: ignore
A6XX_SP_REG_PROG_ID_0_SAMPLEID__MASK = 0x0000ff00 # type: ignore
A6XX_SP_REG_PROG_ID_0_SAMPLEID__SHIFT = 8 # type: ignore
A6XX_SP_REG_PROG_ID_0_SAMPLEMASK__MASK = 0x00ff0000 # type: ignore
A6XX_SP_REG_PROG_ID_0_SAMPLEMASK__SHIFT = 16 # type: ignore
A6XX_SP_REG_PROG_ID_0_CENTERRHW__MASK = 0xff000000 # type: ignore
A6XX_SP_REG_PROG_ID_0_CENTERRHW__SHIFT = 24 # type: ignore
REG_A6XX_SP_REG_PROG_ID_1 = 0x0000b984 # type: ignore
A6XX_SP_REG_PROG_ID_1_IJ_PERSP_PIXEL__MASK = 0x000000ff # type: ignore
A6XX_SP_REG_PROG_ID_1_IJ_PERSP_PIXEL__SHIFT = 0 # type: ignore
A6XX_SP_REG_PROG_ID_1_IJ_LINEAR_PIXEL__MASK = 0x0000ff00 # type: ignore
A6XX_SP_REG_PROG_ID_1_IJ_LINEAR_PIXEL__SHIFT = 8 # type: ignore
A6XX_SP_REG_PROG_ID_1_IJ_PERSP_CENTROID__MASK = 0x00ff0000 # type: ignore
A6XX_SP_REG_PROG_ID_1_IJ_PERSP_CENTROID__SHIFT = 16 # type: ignore
A6XX_SP_REG_PROG_ID_1_IJ_LINEAR_CENTROID__MASK = 0xff000000 # type: ignore
A6XX_SP_REG_PROG_ID_1_IJ_LINEAR_CENTROID__SHIFT = 24 # type: ignore
REG_A6XX_SP_REG_PROG_ID_2 = 0x0000b985 # type: ignore
A6XX_SP_REG_PROG_ID_2_IJ_PERSP_SAMPLE__MASK = 0x000000ff # type: ignore
A6XX_SP_REG_PROG_ID_2_IJ_PERSP_SAMPLE__SHIFT = 0 # type: ignore
A6XX_SP_REG_PROG_ID_2_IJ_LINEAR_SAMPLE__MASK = 0x0000ff00 # type: ignore
A6XX_SP_REG_PROG_ID_2_IJ_LINEAR_SAMPLE__SHIFT = 8 # type: ignore
A6XX_SP_REG_PROG_ID_2_XYCOORDREGID__MASK = 0x00ff0000 # type: ignore
A6XX_SP_REG_PROG_ID_2_XYCOORDREGID__SHIFT = 16 # type: ignore
A6XX_SP_REG_PROG_ID_2_ZWCOORDREGID__MASK = 0xff000000 # type: ignore
A6XX_SP_REG_PROG_ID_2_ZWCOORDREGID__SHIFT = 24 # type: ignore
REG_A6XX_SP_REG_PROG_ID_3 = 0x0000b986 # type: ignore
A6XX_SP_REG_PROG_ID_3_LINELENGTHREGID__MASK = 0x000000ff # type: ignore
A6XX_SP_REG_PROG_ID_3_LINELENGTHREGID__SHIFT = 0 # type: ignore
A6XX_SP_REG_PROG_ID_3_FOVEATIONQUALITYREGID__MASK = 0x0000ff00 # type: ignore
A6XX_SP_REG_PROG_ID_3_FOVEATIONQUALITYREGID__SHIFT = 8 # type: ignore
REG_A6XX_SP_CS_CONST_CONFIG = 0x0000b987 # type: ignore
A6XX_SP_CS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A6XX_SP_CS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A6XX_SP_CS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_CS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A7XX_SP_PS_WAVE_CNTL = 0x0000a9c6 # type: ignore
A7XX_SP_PS_WAVE_CNTL_THREADSIZE__MASK = 0x00000001 # type: ignore
A7XX_SP_PS_WAVE_CNTL_THREADSIZE__SHIFT = 0 # type: ignore
A7XX_SP_PS_WAVE_CNTL_VARYINGS = 0x00000002 # type: ignore
A7XX_SP_PS_WAVE_CNTL_UNK2__MASK = 0x00000ffc # type: ignore
A7XX_SP_PS_WAVE_CNTL_UNK2__SHIFT = 2 # type: ignore
REG_A7XX_SP_LB_PARAM_LIMIT = 0x0000a9c7 # type: ignore
A7XX_SP_LB_PARAM_LIMIT_PRIMALLOCTHRESHOLD__MASK = 0x00000007 # type: ignore
A7XX_SP_LB_PARAM_LIMIT_PRIMALLOCTHRESHOLD__SHIFT = 0 # type: ignore
REG_A7XX_SP_REG_PROG_ID_0 = 0x0000a9c8 # type: ignore
A7XX_SP_REG_PROG_ID_0_FACEREGID__MASK = 0x000000ff # type: ignore
A7XX_SP_REG_PROG_ID_0_FACEREGID__SHIFT = 0 # type: ignore
A7XX_SP_REG_PROG_ID_0_SAMPLEID__MASK = 0x0000ff00 # type: ignore
A7XX_SP_REG_PROG_ID_0_SAMPLEID__SHIFT = 8 # type: ignore
A7XX_SP_REG_PROG_ID_0_SAMPLEMASK__MASK = 0x00ff0000 # type: ignore
A7XX_SP_REG_PROG_ID_0_SAMPLEMASK__SHIFT = 16 # type: ignore
A7XX_SP_REG_PROG_ID_0_CENTERRHW__MASK = 0xff000000 # type: ignore
A7XX_SP_REG_PROG_ID_0_CENTERRHW__SHIFT = 24 # type: ignore
REG_A7XX_SP_REG_PROG_ID_1 = 0x0000a9c9 # type: ignore
A7XX_SP_REG_PROG_ID_1_IJ_PERSP_PIXEL__MASK = 0x000000ff # type: ignore
A7XX_SP_REG_PROG_ID_1_IJ_PERSP_PIXEL__SHIFT = 0 # type: ignore
A7XX_SP_REG_PROG_ID_1_IJ_LINEAR_PIXEL__MASK = 0x0000ff00 # type: ignore
A7XX_SP_REG_PROG_ID_1_IJ_LINEAR_PIXEL__SHIFT = 8 # type: ignore
A7XX_SP_REG_PROG_ID_1_IJ_PERSP_CENTROID__MASK = 0x00ff0000 # type: ignore
A7XX_SP_REG_PROG_ID_1_IJ_PERSP_CENTROID__SHIFT = 16 # type: ignore
A7XX_SP_REG_PROG_ID_1_IJ_LINEAR_CENTROID__MASK = 0xff000000 # type: ignore
A7XX_SP_REG_PROG_ID_1_IJ_LINEAR_CENTROID__SHIFT = 24 # type: ignore
REG_A7XX_SP_REG_PROG_ID_2 = 0x0000a9ca # type: ignore
A7XX_SP_REG_PROG_ID_2_IJ_PERSP_SAMPLE__MASK = 0x000000ff # type: ignore
A7XX_SP_REG_PROG_ID_2_IJ_PERSP_SAMPLE__SHIFT = 0 # type: ignore
A7XX_SP_REG_PROG_ID_2_IJ_LINEAR_SAMPLE__MASK = 0x0000ff00 # type: ignore
A7XX_SP_REG_PROG_ID_2_IJ_LINEAR_SAMPLE__SHIFT = 8 # type: ignore
A7XX_SP_REG_PROG_ID_2_XYCOORDREGID__MASK = 0x00ff0000 # type: ignore
A7XX_SP_REG_PROG_ID_2_XYCOORDREGID__SHIFT = 16 # type: ignore
A7XX_SP_REG_PROG_ID_2_ZWCOORDREGID__MASK = 0xff000000 # type: ignore
A7XX_SP_REG_PROG_ID_2_ZWCOORDREGID__SHIFT = 24 # type: ignore
REG_A7XX_SP_REG_PROG_ID_3 = 0x0000a9cb # type: ignore
A7XX_SP_REG_PROG_ID_3_LINELENGTHREGID__MASK = 0x000000ff # type: ignore
A7XX_SP_REG_PROG_ID_3_LINELENGTHREGID__SHIFT = 0 # type: ignore
A7XX_SP_REG_PROG_ID_3_FOVEATIONQUALITYREGID__MASK = 0x0000ff00 # type: ignore
A7XX_SP_REG_PROG_ID_3_FOVEATIONQUALITYREGID__SHIFT = 8 # type: ignore
REG_A7XX_SP_CS_CONST_CONFIG = 0x0000a9cd # type: ignore
A7XX_SP_CS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A7XX_SP_CS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A7XX_SP_CS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A7XX_SP_CS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A6XX_SP_CS_NDRANGE_0 = 0x0000b990 # type: ignore
A6XX_SP_CS_NDRANGE_0_KERNELDIM__MASK = 0x00000003 # type: ignore
A6XX_SP_CS_NDRANGE_0_KERNELDIM__SHIFT = 0 # type: ignore
A6XX_SP_CS_NDRANGE_0_LOCALSIZEX__MASK = 0x00000ffc # type: ignore
A6XX_SP_CS_NDRANGE_0_LOCALSIZEX__SHIFT = 2 # type: ignore
A6XX_SP_CS_NDRANGE_0_LOCALSIZEY__MASK = 0x003ff000 # type: ignore
A6XX_SP_CS_NDRANGE_0_LOCALSIZEY__SHIFT = 12 # type: ignore
A6XX_SP_CS_NDRANGE_0_LOCALSIZEZ__MASK = 0xffc00000 # type: ignore
A6XX_SP_CS_NDRANGE_0_LOCALSIZEZ__SHIFT = 22 # type: ignore
REG_A6XX_SP_CS_NDRANGE_1 = 0x0000b991 # type: ignore
A6XX_SP_CS_NDRANGE_1_GLOBALSIZE_X__MASK = 0xffffffff # type: ignore
A6XX_SP_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT = 0 # type: ignore
REG_A6XX_SP_CS_NDRANGE_2 = 0x0000b992 # type: ignore
A6XX_SP_CS_NDRANGE_2_GLOBALOFF_X__MASK = 0xffffffff # type: ignore
A6XX_SP_CS_NDRANGE_2_GLOBALOFF_X__SHIFT = 0 # type: ignore
REG_A6XX_SP_CS_NDRANGE_3 = 0x0000b993 # type: ignore
A6XX_SP_CS_NDRANGE_3_GLOBALSIZE_Y__MASK = 0xffffffff # type: ignore
A6XX_SP_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT = 0 # type: ignore
REG_A6XX_SP_CS_NDRANGE_4 = 0x0000b994 # type: ignore
A6XX_SP_CS_NDRANGE_4_GLOBALOFF_Y__MASK = 0xffffffff # type: ignore
A6XX_SP_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT = 0 # type: ignore
REG_A6XX_SP_CS_NDRANGE_5 = 0x0000b995 # type: ignore
A6XX_SP_CS_NDRANGE_5_GLOBALSIZE_Z__MASK = 0xffffffff # type: ignore
A6XX_SP_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT = 0 # type: ignore
REG_A6XX_SP_CS_NDRANGE_6 = 0x0000b996 # type: ignore
A6XX_SP_CS_NDRANGE_6_GLOBALOFF_Z__MASK = 0xffffffff # type: ignore
A6XX_SP_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT = 0 # type: ignore
REG_A6XX_SP_CS_CONST_CONFIG_0 = 0x0000b997 # type: ignore
A6XX_SP_CS_CONST_CONFIG_0_WGIDCONSTID__MASK = 0x000000ff # type: ignore
A6XX_SP_CS_CONST_CONFIG_0_WGIDCONSTID__SHIFT = 0 # type: ignore
A6XX_SP_CS_CONST_CONFIG_0_WGSIZECONSTID__MASK = 0x0000ff00 # type: ignore
A6XX_SP_CS_CONST_CONFIG_0_WGSIZECONSTID__SHIFT = 8 # type: ignore
A6XX_SP_CS_CONST_CONFIG_0_WGOFFSETCONSTID__MASK = 0x00ff0000 # type: ignore
A6XX_SP_CS_CONST_CONFIG_0_WGOFFSETCONSTID__SHIFT = 16 # type: ignore
A6XX_SP_CS_CONST_CONFIG_0_LOCALIDREGID__MASK = 0xff000000 # type: ignore
A6XX_SP_CS_CONST_CONFIG_0_LOCALIDREGID__SHIFT = 24 # type: ignore
REG_A6XX_SP_CS_WGE_CNTL = 0x0000b998 # type: ignore
A6XX_SP_CS_WGE_CNTL_LINEARLOCALIDREGID__MASK = 0x000000ff # type: ignore
A6XX_SP_CS_WGE_CNTL_LINEARLOCALIDREGID__SHIFT = 0 # type: ignore
A6XX_SP_CS_WGE_CNTL_SINGLE_SP_CORE = 0x00000100 # type: ignore
A6XX_SP_CS_WGE_CNTL_THREADSIZE__MASK = 0x00000200 # type: ignore
A6XX_SP_CS_WGE_CNTL_THREADSIZE__SHIFT = 9 # type: ignore
A6XX_SP_CS_WGE_CNTL_THREADSIZE_SCALAR = 0x00000400 # type: ignore
REG_A6XX_SP_CS_KERNEL_GROUP_X = 0x0000b999 # type: ignore
REG_A6XX_SP_CS_KERNEL_GROUP_Y = 0x0000b99a # type: ignore
REG_A6XX_SP_CS_KERNEL_GROUP_Z = 0x0000b99b # type: ignore
REG_A7XX_SP_CS_NDRANGE_0 = 0x0000a9d4 # type: ignore
A7XX_SP_CS_NDRANGE_0_KERNELDIM__MASK = 0x00000003 # type: ignore
A7XX_SP_CS_NDRANGE_0_KERNELDIM__SHIFT = 0 # type: ignore
A7XX_SP_CS_NDRANGE_0_LOCALSIZEX__MASK = 0x00000ffc # type: ignore
A7XX_SP_CS_NDRANGE_0_LOCALSIZEX__SHIFT = 2 # type: ignore
A7XX_SP_CS_NDRANGE_0_LOCALSIZEY__MASK = 0x003ff000 # type: ignore
A7XX_SP_CS_NDRANGE_0_LOCALSIZEY__SHIFT = 12 # type: ignore
A7XX_SP_CS_NDRANGE_0_LOCALSIZEZ__MASK = 0xffc00000 # type: ignore
A7XX_SP_CS_NDRANGE_0_LOCALSIZEZ__SHIFT = 22 # type: ignore
REG_A7XX_SP_CS_NDRANGE_1 = 0x0000a9d5 # type: ignore
A7XX_SP_CS_NDRANGE_1_GLOBALSIZE_X__MASK = 0xffffffff # type: ignore
A7XX_SP_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT = 0 # type: ignore
REG_A7XX_SP_CS_NDRANGE_2 = 0x0000a9d6 # type: ignore
A7XX_SP_CS_NDRANGE_2_GLOBALOFF_X__MASK = 0xffffffff # type: ignore
A7XX_SP_CS_NDRANGE_2_GLOBALOFF_X__SHIFT = 0 # type: ignore
REG_A7XX_SP_CS_NDRANGE_3 = 0x0000a9d7 # type: ignore
A7XX_SP_CS_NDRANGE_3_GLOBALSIZE_Y__MASK = 0xffffffff # type: ignore
A7XX_SP_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT = 0 # type: ignore
REG_A7XX_SP_CS_NDRANGE_4 = 0x0000a9d8 # type: ignore
A7XX_SP_CS_NDRANGE_4_GLOBALOFF_Y__MASK = 0xffffffff # type: ignore
A7XX_SP_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT = 0 # type: ignore
REG_A7XX_SP_CS_NDRANGE_5 = 0x0000a9d9 # type: ignore
A7XX_SP_CS_NDRANGE_5_GLOBALSIZE_Z__MASK = 0xffffffff # type: ignore
A7XX_SP_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT = 0 # type: ignore
REG_A7XX_SP_CS_NDRANGE_6 = 0x0000a9da # type: ignore
A7XX_SP_CS_NDRANGE_6_GLOBALOFF_Z__MASK = 0xffffffff # type: ignore
A7XX_SP_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT = 0 # type: ignore
REG_A7XX_SP_CS_KERNEL_GROUP_X = 0x0000a9dc # type: ignore
REG_A7XX_SP_CS_KERNEL_GROUP_Y = 0x0000a9dd # type: ignore
REG_A7XX_SP_CS_KERNEL_GROUP_Z = 0x0000a9de # type: ignore
REG_A7XX_SP_CS_WGE_CNTL = 0x0000a9db # type: ignore
A7XX_SP_CS_WGE_CNTL_LINEARLOCALIDREGID__MASK = 0x000000ff # type: ignore
A7XX_SP_CS_WGE_CNTL_LINEARLOCALIDREGID__SHIFT = 0 # type: ignore
A7XX_SP_CS_WGE_CNTL_THREADSIZE__MASK = 0x00000200 # type: ignore
A7XX_SP_CS_WGE_CNTL_THREADSIZE__SHIFT = 9 # type: ignore
A7XX_SP_CS_WGE_CNTL_WORKGROUPRASTORDERZFIRSTEN = 0x00000800 # type: ignore
A7XX_SP_CS_WGE_CNTL_WGTILEWIDTH__MASK = 0x03f00000 # type: ignore
A7XX_SP_CS_WGE_CNTL_WGTILEWIDTH__SHIFT = 20 # type: ignore
A7XX_SP_CS_WGE_CNTL_WGTILEHEIGHT__MASK = 0xfc000000 # type: ignore
A7XX_SP_CS_WGE_CNTL_WGTILEHEIGHT__SHIFT = 26 # type: ignore
REG_A7XX_SP_CS_NDRANGE_7 = 0x0000a9df # type: ignore
A7XX_SP_CS_NDRANGE_7_LOCALSIZEX__MASK = 0x00000ffc # type: ignore
A7XX_SP_CS_NDRANGE_7_LOCALSIZEX__SHIFT = 2 # type: ignore
A7XX_SP_CS_NDRANGE_7_LOCALSIZEY__MASK = 0x003ff000 # type: ignore
A7XX_SP_CS_NDRANGE_7_LOCALSIZEY__SHIFT = 12 # type: ignore
A7XX_SP_CS_NDRANGE_7_LOCALSIZEZ__MASK = 0xffc00000 # type: ignore
A7XX_SP_CS_NDRANGE_7_LOCALSIZEZ__SHIFT = 22 # type: ignore
REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD = 0x0000b9a0 # type: ignore
REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR = 0x0000b9a1 # type: ignore
REG_A6XX_HLSQ_LOAD_STATE_FRAG_DATA = 0x0000b9a3 # type: ignore
REG_A6XX_HLSQ_CS_BINDLESS_BASE = lambda i0: (0x0000b9c0 + 0x2*i0 ) # type: ignore
A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK = 0x00000003 # type: ignore
A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT = 0 # type: ignore
A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK = 0xfffffffffffffffc # type: ignore
A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT = 2 # type: ignore
REG_A6XX_HLSQ_CS_CTRL_REG1 = 0x0000b9d0 # type: ignore
A6XX_HLSQ_CS_CTRL_REG1_SHARED_SIZE__MASK = 0x0000001f # type: ignore
A6XX_HLSQ_CS_CTRL_REG1_SHARED_SIZE__SHIFT = 0 # type: ignore
A6XX_HLSQ_CS_CTRL_REG1_CONSTANTRAMMODE__MASK = 0x00000060 # type: ignore
A6XX_HLSQ_CS_CTRL_REG1_CONSTANTRAMMODE__SHIFT = 5 # type: ignore
REG_A6XX_SP_DRAW_INITIATOR = 0x0000bb00 # type: ignore
A6XX_SP_DRAW_INITIATOR_STATE_ID__MASK = 0x000000ff # type: ignore
A6XX_SP_DRAW_INITIATOR_STATE_ID__SHIFT = 0 # type: ignore
REG_A6XX_SP_KERNEL_INITIATOR = 0x0000bb01 # type: ignore
A6XX_SP_KERNEL_INITIATOR_STATE_ID__MASK = 0x000000ff # type: ignore
A6XX_SP_KERNEL_INITIATOR_STATE_ID__SHIFT = 0 # type: ignore
REG_A6XX_SP_EVENT_INITIATOR = 0x0000bb02 # type: ignore
A6XX_SP_EVENT_INITIATOR_STATE_ID__MASK = 0x00ff0000 # type: ignore
A6XX_SP_EVENT_INITIATOR_STATE_ID__SHIFT = 16 # type: ignore
A6XX_SP_EVENT_INITIATOR_EVENT__MASK = 0x0000007f # type: ignore
A6XX_SP_EVENT_INITIATOR_EVENT__SHIFT = 0 # type: ignore
REG_A6XX_SP_UPDATE_CNTL = 0x0000bb08 # type: ignore
A6XX_SP_UPDATE_CNTL_VS_STATE = 0x00000001 # type: ignore
A6XX_SP_UPDATE_CNTL_HS_STATE = 0x00000002 # type: ignore
A6XX_SP_UPDATE_CNTL_DS_STATE = 0x00000004 # type: ignore
A6XX_SP_UPDATE_CNTL_GS_STATE = 0x00000008 # type: ignore
A6XX_SP_UPDATE_CNTL_FS_STATE = 0x00000010 # type: ignore
A6XX_SP_UPDATE_CNTL_CS_STATE = 0x00000020 # type: ignore
A6XX_SP_UPDATE_CNTL_CS_UAV = 0x00000040 # type: ignore
A6XX_SP_UPDATE_CNTL_GFX_UAV = 0x00000080 # type: ignore
A6XX_SP_UPDATE_CNTL_CS_SHARED_CONST = 0x00080000 # type: ignore
A6XX_SP_UPDATE_CNTL_GFX_SHARED_CONST = 0x00000100 # type: ignore
A6XX_SP_UPDATE_CNTL_CS_BINDLESS__MASK = 0x00003e00 # type: ignore
A6XX_SP_UPDATE_CNTL_CS_BINDLESS__SHIFT = 9 # type: ignore
A6XX_SP_UPDATE_CNTL_GFX_BINDLESS__MASK = 0x0007c000 # type: ignore
A6XX_SP_UPDATE_CNTL_GFX_BINDLESS__SHIFT = 14 # type: ignore
REG_A7XX_SP_DRAW_INITIATOR = 0x0000ab1c # type: ignore
A7XX_SP_DRAW_INITIATOR_STATE_ID__MASK = 0x000000ff # type: ignore
A7XX_SP_DRAW_INITIATOR_STATE_ID__SHIFT = 0 # type: ignore
REG_A7XX_SP_KERNEL_INITIATOR = 0x0000ab1d # type: ignore
A7XX_SP_KERNEL_INITIATOR_STATE_ID__MASK = 0x000000ff # type: ignore
A7XX_SP_KERNEL_INITIATOR_STATE_ID__SHIFT = 0 # type: ignore
REG_A7XX_SP_EVENT_INITIATOR = 0x0000ab1e # type: ignore
A7XX_SP_EVENT_INITIATOR_STATE_ID__MASK = 0x00ff0000 # type: ignore
A7XX_SP_EVENT_INITIATOR_STATE_ID__SHIFT = 16 # type: ignore
A7XX_SP_EVENT_INITIATOR_EVENT__MASK = 0x0000007f # type: ignore
A7XX_SP_EVENT_INITIATOR_EVENT__SHIFT = 0 # type: ignore
REG_A7XX_SP_UPDATE_CNTL = 0x0000ab1f # type: ignore
A7XX_SP_UPDATE_CNTL_VS_STATE = 0x00000001 # type: ignore
A7XX_SP_UPDATE_CNTL_HS_STATE = 0x00000002 # type: ignore
A7XX_SP_UPDATE_CNTL_DS_STATE = 0x00000004 # type: ignore
A7XX_SP_UPDATE_CNTL_GS_STATE = 0x00000008 # type: ignore
A7XX_SP_UPDATE_CNTL_FS_STATE = 0x00000010 # type: ignore
A7XX_SP_UPDATE_CNTL_CS_STATE = 0x00000020 # type: ignore
A7XX_SP_UPDATE_CNTL_CS_UAV = 0x00000040 # type: ignore
A7XX_SP_UPDATE_CNTL_GFX_UAV = 0x00000080 # type: ignore
A7XX_SP_UPDATE_CNTL_CS_BINDLESS__MASK = 0x0001fe00 # type: ignore
A7XX_SP_UPDATE_CNTL_CS_BINDLESS__SHIFT = 9 # type: ignore
A7XX_SP_UPDATE_CNTL_GFX_BINDLESS__MASK = 0x01fe0000 # type: ignore
A7XX_SP_UPDATE_CNTL_GFX_BINDLESS__SHIFT = 17 # type: ignore
REG_A6XX_SP_PS_CONST_CONFIG = 0x0000bb10 # type: ignore
A6XX_SP_PS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A6XX_SP_PS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A6XX_SP_PS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A6XX_SP_PS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A7XX_SP_PS_CONST_CONFIG = 0x0000ab03 # type: ignore
A7XX_SP_PS_CONST_CONFIG_CONSTLEN__MASK = 0x000000ff # type: ignore
A7XX_SP_PS_CONST_CONFIG_CONSTLEN__SHIFT = 0 # type: ignore
A7XX_SP_PS_CONST_CONFIG_ENABLED = 0x00000100 # type: ignore
A7XX_SP_PS_CONST_CONFIG_READ_IMM_SHARED_CONSTS = 0x00000200 # type: ignore
REG_A7XX_SP_SHARED_CONSTANT_GFX_0 = lambda i0: (0x0000ab40 + 0x1*i0 ) # type: ignore
REG_A6XX_HLSQ_SHARED_CONSTS = 0x0000bb11 # type: ignore
A6XX_HLSQ_SHARED_CONSTS_ENABLE = 0x00000001 # type: ignore
REG_A6XX_HLSQ_BINDLESS_BASE = lambda i0: (0x0000bb20 + 0x2*i0 ) # type: ignore
A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK = 0x00000003 # type: ignore
A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT = 0 # type: ignore
A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK = 0xfffffffffffffffc # type: ignore
A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT = 2 # type: ignore
REG_A6XX_HLSQ_2D_EVENT_CMD = 0x0000bd80 # type: ignore
A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK = 0x0000ff00 # type: ignore
A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT = 8 # type: ignore
A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK = 0x0000007f # type: ignore
A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT = 0 # type: ignore
REG_A6XX_HLSQ_UNKNOWN_BE00 = 0x0000be00 # type: ignore
REG_A6XX_HLSQ_UNKNOWN_BE01 = 0x0000be01 # type: ignore
REG_A6XX_HLSQ_DBG_ECO_CNTL = 0x0000be04 # type: ignore
REG_A6XX_HLSQ_ADDR_MODE_CNTL = 0x0000be05 # type: ignore
REG_A6XX_HLSQ_UNKNOWN_BE08 = 0x0000be08 # type: ignore
REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL = lambda i0: (0x0000be10 + 0x1*i0 ) # type: ignore
REG_A6XX_HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE = 0x0000be22 # type: ignore
REG_A7XX_SP_AHB_READ_APERTURE = 0x0000c000 # type: ignore
REG_A7XX_SP_UNKNOWN_0CE2 = 0x00000ce2 # type: ignore
REG_A7XX_SP_UNKNOWN_0CE4 = 0x00000ce4 # type: ignore
REG_A7XX_SP_UNKNOWN_0CE6 = 0x00000ce6 # type: ignore
REG_A6XX_CP_EVENT_START = 0x0000d600 # type: ignore
A6XX_CP_EVENT_START_STATE_ID__MASK = 0x000000ff # type: ignore
A6XX_CP_EVENT_START_STATE_ID__SHIFT = 0 # type: ignore
REG_A6XX_CP_EVENT_END = 0x0000d601 # type: ignore
A6XX_CP_EVENT_END_STATE_ID__MASK = 0x000000ff # type: ignore
A6XX_CP_EVENT_END_STATE_ID__SHIFT = 0 # type: ignore
REG_A6XX_CP_2D_EVENT_START = 0x0000d700 # type: ignore
A6XX_CP_2D_EVENT_START_STATE_ID__MASK = 0x000000ff # type: ignore
A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT = 0 # type: ignore
REG_A6XX_CP_2D_EVENT_END = 0x0000d701 # type: ignore
A6XX_CP_2D_EVENT_END_STATE_ID__MASK = 0x000000ff # type: ignore
A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT = 0 # type: ignore
REG_A6XX_PDC_GPU_ENABLE_PDC = 0x00001140 # type: ignore
REG_A6XX_PDC_GPU_SEQ_START_ADDR = 0x00001148 # type: ignore
REG_A6XX_PDC_GPU_TCS0_CONTROL = 0x00001540 # type: ignore
REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK = 0x00001541 # type: ignore
REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK = 0x00001542 # type: ignore
REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID = 0x00001543 # type: ignore
REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR = 0x00001544 # type: ignore
REG_A6XX_PDC_GPU_TCS0_CMD0_DATA = 0x00001545 # type: ignore
REG_A6XX_PDC_GPU_TCS1_CONTROL = 0x00001572 # type: ignore
REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK = 0x00001573 # type: ignore
REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK = 0x00001574 # type: ignore
REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID = 0x00001575 # type: ignore
REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR = 0x00001576 # type: ignore
REG_A6XX_PDC_GPU_TCS1_CMD0_DATA = 0x00001577 # type: ignore
REG_A6XX_PDC_GPU_TCS2_CONTROL = 0x000015a4 # type: ignore
REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK = 0x000015a5 # type: ignore
REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK = 0x000015a6 # type: ignore
REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID = 0x000015a7 # type: ignore
REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR = 0x000015a8 # type: ignore
REG_A6XX_PDC_GPU_TCS2_CMD0_DATA = 0x000015a9 # type: ignore
REG_A6XX_PDC_GPU_TCS3_CONTROL = 0x000015d6 # type: ignore
REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK = 0x000015d7 # type: ignore
REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK = 0x000015d8 # type: ignore
REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID = 0x000015d9 # type: ignore
REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR = 0x000015da # type: ignore
REG_A6XX_PDC_GPU_TCS3_CMD0_DATA = 0x000015db # type: ignore
REG_A6XX_PDC_GPU_SEQ_MEM_0 = 0x00000000 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A = 0x00000000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK = 0x000000ff # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT = 0 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK = 0x0000ff00 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT = 8 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B = 0x00000001 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C = 0x00000002 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D = 0x00000003 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT = 0x00000004 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK = 0x0000003f # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT = 0 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK = 0x00007000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT = 12 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK = 0xf0000000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT = 28 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM = 0x00000005 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK = 0x0f000000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT = 24 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 = 0x00000008 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 = 0x00000009 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 = 0x0000000a # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 = 0x0000000b # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 = 0x0000000c # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 = 0x0000000d # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 = 0x0000000e # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 = 0x0000000f # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 = 0x00000010 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK = 0x0000000f # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT = 0 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK = 0x000000f0 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT = 4 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK = 0x00000f00 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT = 8 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK = 0x0000f000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT = 12 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK = 0x000f0000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT = 16 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK = 0x00f00000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT = 20 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK = 0x0f000000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT = 24 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK = 0xf0000000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT = 28 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 = 0x00000011 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK = 0x0000000f # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT = 0 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK = 0x000000f0 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT = 4 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK = 0x00000f00 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT = 8 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK = 0x0000f000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT = 12 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK = 0x000f0000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT = 16 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK = 0x00f00000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT = 20 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK = 0x0f000000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT = 24 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK = 0xf0000000 # type: ignore
A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT = 28 # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 = 0x0000002f # type: ignore
REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 = 0x00000030 # type: ignore
REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 = 0x00000001 # type: ignore
REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 = 0x00000002 # type: ignore
REG_A7XX_CX_MISC_TCM_RET_CNTL = 0x00000039 # type: ignore
REG_A7XX_CX_MISC_SW_FUSE_VALUE = 0x00000400 # type: ignore
A7XX_CX_MISC_SW_FUSE_VALUE_FASTBLEND = 0x00000001 # type: ignore
A7XX_CX_MISC_SW_FUSE_VALUE_LPAC = 0x00000002 # type: ignore
A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING = 0x00000004 # type: ignore
__struct__cast = lambda X: (struct_X) # type: ignore
REG_CP_LOAD_STATE_0 = 0x00000000 # type: ignore
CP_LOAD_STATE_0_DST_OFF__MASK = 0x0000ffff # type: ignore
CP_LOAD_STATE_0_DST_OFF__SHIFT = 0 # type: ignore
CP_LOAD_STATE_0_STATE_SRC__MASK = 0x00070000 # type: ignore
CP_LOAD_STATE_0_STATE_SRC__SHIFT = 16 # type: ignore
CP_LOAD_STATE_0_STATE_BLOCK__MASK = 0x00380000 # type: ignore
CP_LOAD_STATE_0_STATE_BLOCK__SHIFT = 19 # type: ignore
CP_LOAD_STATE_0_NUM_UNIT__MASK = 0xffc00000 # type: ignore
CP_LOAD_STATE_0_NUM_UNIT__SHIFT = 22 # type: ignore
REG_CP_LOAD_STATE_1 = 0x00000001 # type: ignore
CP_LOAD_STATE_1_STATE_TYPE__MASK = 0x00000003 # type: ignore
CP_LOAD_STATE_1_STATE_TYPE__SHIFT = 0 # type: ignore
CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK = 0xfffffffc # type: ignore
CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT = 2 # type: ignore
REG_CP_LOAD_STATE4_0 = 0x00000000 # type: ignore
CP_LOAD_STATE4_0_DST_OFF__MASK = 0x00003fff # type: ignore
CP_LOAD_STATE4_0_DST_OFF__SHIFT = 0 # type: ignore
CP_LOAD_STATE4_0_STATE_SRC__MASK = 0x00030000 # type: ignore
CP_LOAD_STATE4_0_STATE_SRC__SHIFT = 16 # type: ignore
CP_LOAD_STATE4_0_STATE_BLOCK__MASK = 0x003c0000 # type: ignore
CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT = 18 # type: ignore
CP_LOAD_STATE4_0_NUM_UNIT__MASK = 0xffc00000 # type: ignore
CP_LOAD_STATE4_0_NUM_UNIT__SHIFT = 22 # type: ignore
REG_CP_LOAD_STATE4_1 = 0x00000001 # type: ignore
CP_LOAD_STATE4_1_STATE_TYPE__MASK = 0x00000003 # type: ignore
CP_LOAD_STATE4_1_STATE_TYPE__SHIFT = 0 # type: ignore
CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK = 0xfffffffc # type: ignore
CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT = 2 # type: ignore
REG_CP_LOAD_STATE4_2 = 0x00000002 # type: ignore
CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK = 0xffffffff # type: ignore
CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT = 0 # type: ignore
REG_CP_LOAD_STATE6_0 = 0x00000000 # type: ignore
CP_LOAD_STATE6_0_DST_OFF__MASK = 0x00003fff # type: ignore
CP_LOAD_STATE6_0_DST_OFF__SHIFT = 0 # type: ignore
CP_LOAD_STATE6_0_STATE_TYPE__MASK = 0x0000c000 # type: ignore
CP_LOAD_STATE6_0_STATE_TYPE__SHIFT = 14 # type: ignore
CP_LOAD_STATE6_0_STATE_SRC__MASK = 0x00030000 # type: ignore
CP_LOAD_STATE6_0_STATE_SRC__SHIFT = 16 # type: ignore
CP_LOAD_STATE6_0_STATE_BLOCK__MASK = 0x003c0000 # type: ignore
CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT = 18 # type: ignore
CP_LOAD_STATE6_0_NUM_UNIT__MASK = 0xffc00000 # type: ignore
CP_LOAD_STATE6_0_NUM_UNIT__SHIFT = 22 # type: ignore
REG_CP_LOAD_STATE6_1 = 0x00000001 # type: ignore
CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK = 0xfffffffc # type: ignore
CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT = 2 # type: ignore
REG_CP_LOAD_STATE6_2 = 0x00000002 # type: ignore
CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK = 0xffffffff # type: ignore
CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT = 0 # type: ignore
REG_CP_LOAD_STATE6_EXT_SRC_ADDR = 0x00000001 # type: ignore
REG_CP_DRAW_INDX_0 = 0x00000000 # type: ignore
CP_DRAW_INDX_0_VIZ_QUERY__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_0_VIZ_QUERY__SHIFT = 0 # type: ignore
REG_CP_DRAW_INDX_1 = 0x00000001 # type: ignore
CP_DRAW_INDX_1_PRIM_TYPE__MASK = 0x0000003f # type: ignore
CP_DRAW_INDX_1_PRIM_TYPE__SHIFT = 0 # type: ignore
CP_DRAW_INDX_1_SOURCE_SELECT__MASK = 0x000000c0 # type: ignore
CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT = 6 # type: ignore
CP_DRAW_INDX_1_VIS_CULL__MASK = 0x00000600 # type: ignore
CP_DRAW_INDX_1_VIS_CULL__SHIFT = 9 # type: ignore
CP_DRAW_INDX_1_INDEX_SIZE__MASK = 0x00000800 # type: ignore
CP_DRAW_INDX_1_INDEX_SIZE__SHIFT = 11 # type: ignore
CP_DRAW_INDX_1_NOT_EOP = 0x00001000 # type: ignore
CP_DRAW_INDX_1_SMALL_INDEX = 0x00002000 # type: ignore
CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE = 0x00004000 # type: ignore
CP_DRAW_INDX_1_NUM_INSTANCES__MASK = 0xff000000 # type: ignore
CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT = 24 # type: ignore
REG_CP_DRAW_INDX_2 = 0x00000002 # type: ignore
CP_DRAW_INDX_2_NUM_INDICES__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_2_NUM_INDICES__SHIFT = 0 # type: ignore
REG_CP_DRAW_INDX_3 = 0x00000003 # type: ignore
CP_DRAW_INDX_3_INDX_BASE__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_3_INDX_BASE__SHIFT = 0 # type: ignore
REG_CP_DRAW_INDX_4 = 0x00000004 # type: ignore
CP_DRAW_INDX_4_INDX_SIZE__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_4_INDX_SIZE__SHIFT = 0 # type: ignore
REG_CP_DRAW_INDX_2_0 = 0x00000000 # type: ignore
CP_DRAW_INDX_2_0_VIZ_QUERY__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT = 0 # type: ignore
REG_CP_DRAW_INDX_2_1 = 0x00000001 # type: ignore
CP_DRAW_INDX_2_1_PRIM_TYPE__MASK = 0x0000003f # type: ignore
CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT = 0 # type: ignore
CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK = 0x000000c0 # type: ignore
CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT = 6 # type: ignore
CP_DRAW_INDX_2_1_VIS_CULL__MASK = 0x00000600 # type: ignore
CP_DRAW_INDX_2_1_VIS_CULL__SHIFT = 9 # type: ignore
CP_DRAW_INDX_2_1_INDEX_SIZE__MASK = 0x00000800 # type: ignore
CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT = 11 # type: ignore
CP_DRAW_INDX_2_1_NOT_EOP = 0x00001000 # type: ignore
CP_DRAW_INDX_2_1_SMALL_INDEX = 0x00002000 # type: ignore
CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE = 0x00004000 # type: ignore
CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK = 0xff000000 # type: ignore
CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT = 24 # type: ignore
REG_CP_DRAW_INDX_2_2 = 0x00000002 # type: ignore
CP_DRAW_INDX_2_2_NUM_INDICES__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT = 0 # type: ignore
REG_CP_DRAW_INDX_OFFSET_0 = 0x00000000 # type: ignore
CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK = 0x0000003f # type: ignore
CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT = 0 # type: ignore
CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK = 0x000000c0 # type: ignore
CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT = 6 # type: ignore
CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK = 0x00000300 # type: ignore
CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT = 8 # type: ignore
CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK = 0x00000c00 # type: ignore
CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT = 10 # type: ignore
CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK = 0x00003000 # type: ignore
CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT = 12 # type: ignore
CP_DRAW_INDX_OFFSET_0_GS_ENABLE = 0x00010000 # type: ignore
CP_DRAW_INDX_OFFSET_0_TESS_ENABLE = 0x00020000 # type: ignore
REG_CP_DRAW_INDX_OFFSET_1 = 0x00000001 # type: ignore
CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT = 0 # type: ignore
REG_CP_DRAW_INDX_OFFSET_2 = 0x00000002 # type: ignore
CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT = 0 # type: ignore
REG_CP_DRAW_INDX_OFFSET_3 = 0x00000003 # type: ignore
CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDX_OFFSET_4 = 0x00000004 # type: ignore
A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK = 0xffffffff # type: ignore
A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDX_OFFSET_5 = 0x00000005 # type: ignore
A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK = 0xffffffff # type: ignore
A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE = 0x00000004 # type: ignore
REG_A5XX_CP_DRAW_INDX_OFFSET_6 = 0x00000006 # type: ignore
A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK = 0xffffffff # type: ignore
A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT = 0 # type: ignore
REG_CP_DRAW_INDX_OFFSET_4 = 0x00000004 # type: ignore
CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT = 0 # type: ignore
REG_CP_DRAW_INDX_OFFSET_5 = 0x00000005 # type: ignore
CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK = 0xffffffff # type: ignore
CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT = 0 # type: ignore
REG_A4XX_CP_DRAW_INDIRECT_0 = 0x00000000 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK = 0x0000003f # type: ignore
A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT = 0 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK = 0x000000c0 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT = 6 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK = 0x00000300 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT = 8 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK = 0x00000c00 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT = 10 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK = 0x00003000 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT = 12 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_GS_ENABLE = 0x00010000 # type: ignore
A4XX_CP_DRAW_INDIRECT_0_TESS_ENABLE = 0x00020000 # type: ignore
REG_A4XX_CP_DRAW_INDIRECT_1 = 0x00000001 # type: ignore
A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK = 0xffffffff # type: ignore
A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDIRECT_1 = 0x00000001 # type: ignore
A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK = 0xffffffff # type: ignore
A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDIRECT_2 = 0x00000002 # type: ignore
A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK = 0xffffffff # type: ignore
A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDIRECT_INDIRECT = 0x00000001 # type: ignore
REG_A4XX_CP_DRAW_INDX_INDIRECT_0 = 0x00000000 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK = 0x0000003f # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT = 0 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK = 0x000000c0 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT = 6 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK = 0x00000300 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT = 8 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK = 0x00000c00 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT = 10 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK = 0x00003000 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT = 12 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_GS_ENABLE = 0x00010000 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_ENABLE = 0x00020000 # type: ignore
REG_A4XX_CP_DRAW_INDX_INDIRECT_1 = 0x00000001 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK = 0xffffffff # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT = 0 # type: ignore
REG_A4XX_CP_DRAW_INDX_INDIRECT_2 = 0x00000002 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK = 0xffffffff # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT = 0 # type: ignore
REG_A4XX_CP_DRAW_INDX_INDIRECT_3 = 0x00000003 # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK = 0xffffffff # type: ignore
A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDX_INDIRECT_1 = 0x00000001 # type: ignore
A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK = 0xffffffff # type: ignore
A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDX_INDIRECT_2 = 0x00000002 # type: ignore
A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK = 0xffffffff # type: ignore
A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE = 0x00000001 # type: ignore
REG_A5XX_CP_DRAW_INDX_INDIRECT_3 = 0x00000003 # type: ignore
A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK = 0xffffffff # type: ignore
A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDX_INDIRECT_4 = 0x00000004 # type: ignore
A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK = 0xffffffff # type: ignore
A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDX_INDIRECT_5 = 0x00000005 # type: ignore
A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK = 0xffffffff # type: ignore
A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT = 0 # type: ignore
REG_A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT = 0x00000004 # type: ignore
REG_A6XX_CP_DRAW_INDIRECT_MULTI_0 = 0x00000000 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK = 0x0000003f # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT = 0 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK = 0x000000c0 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT = 6 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK = 0x00000300 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT = 8 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK = 0x00000c00 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT = 10 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK = 0x00003000 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT = 12 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_GS_ENABLE = 0x00010000 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_0_TESS_ENABLE = 0x00020000 # type: ignore
REG_A6XX_CP_DRAW_INDIRECT_MULTI_1 = 0x00000001 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK = 0x0000000f # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT = 0 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK = 0x003fff00 # type: ignore
A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT = 8 # type: ignore
REG_A6XX_CP_DRAW_INDIRECT_MULTI_DRAW_COUNT = 0x00000002 # type: ignore
REG_INDIRECT_OP_NORMAL_CP_DRAW_INDIRECT_MULTI_INDIRECT = 0x00000003 # type: ignore
REG_INDIRECT_OP_NORMAL_CP_DRAW_INDIRECT_MULTI_STRIDE = 0x00000005 # type: ignore
REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_INDEX = 0x00000003 # type: ignore
REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_MAX_INDICES = 0x00000005 # type: ignore
REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT = 0x00000006 # type: ignore
REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_STRIDE = 0x00000008 # type: ignore
REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_INDIRECT = 0x00000003 # type: ignore
REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT = 0x00000005 # type: ignore
REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_STRIDE = 0x00000007 # type: ignore
REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDEX = 0x00000003 # type: ignore
REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_MAX_INDICES = 0x00000005 # type: ignore
REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT = 0x00000006 # type: ignore
REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT = 0x00000008 # type: ignore
REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_STRIDE = 0x0000000a # type: ignore
REG_CP_DRAW_AUTO_0 = 0x00000000 # type: ignore
CP_DRAW_AUTO_0_PRIM_TYPE__MASK = 0x0000003f # type: ignore
CP_DRAW_AUTO_0_PRIM_TYPE__SHIFT = 0 # type: ignore
CP_DRAW_AUTO_0_SOURCE_SELECT__MASK = 0x000000c0 # type: ignore
CP_DRAW_AUTO_0_SOURCE_SELECT__SHIFT = 6 # type: ignore
CP_DRAW_AUTO_0_VIS_CULL__MASK = 0x00000300 # type: ignore
CP_DRAW_AUTO_0_VIS_CULL__SHIFT = 8 # type: ignore
CP_DRAW_AUTO_0_INDEX_SIZE__MASK = 0x00000c00 # type: ignore
CP_DRAW_AUTO_0_INDEX_SIZE__SHIFT = 10 # type: ignore
CP_DRAW_AUTO_0_PATCH_TYPE__MASK = 0x00003000 # type: ignore
CP_DRAW_AUTO_0_PATCH_TYPE__SHIFT = 12 # type: ignore
CP_DRAW_AUTO_0_GS_ENABLE = 0x00010000 # type: ignore
CP_DRAW_AUTO_0_TESS_ENABLE = 0x00020000 # type: ignore
REG_CP_DRAW_AUTO_1 = 0x00000001 # type: ignore
CP_DRAW_AUTO_1_NUM_INSTANCES__MASK = 0xffffffff # type: ignore
CP_DRAW_AUTO_1_NUM_INSTANCES__SHIFT = 0 # type: ignore
REG_CP_DRAW_AUTO_NUM_VERTICES_BASE = 0x00000002 # type: ignore
REG_CP_DRAW_AUTO_4 = 0x00000004 # type: ignore
CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__MASK = 0xffffffff # type: ignore
CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__SHIFT = 0 # type: ignore
REG_CP_DRAW_AUTO_5 = 0x00000005 # type: ignore
CP_DRAW_AUTO_5_STRIDE__MASK = 0xffffffff # type: ignore
CP_DRAW_AUTO_5_STRIDE__SHIFT = 0 # type: ignore
REG_CP_DRAW_PRED_ENABLE_GLOBAL_0 = 0x00000000 # type: ignore
CP_DRAW_PRED_ENABLE_GLOBAL_0_ENABLE = 0x00000001 # type: ignore
REG_CP_DRAW_PRED_ENABLE_LOCAL_0 = 0x00000000 # type: ignore
CP_DRAW_PRED_ENABLE_LOCAL_0_ENABLE = 0x00000001 # type: ignore
REG_CP_DRAW_PRED_SET_0 = 0x00000000 # type: ignore
CP_DRAW_PRED_SET_0_SRC__MASK = 0x000000f0 # type: ignore
CP_DRAW_PRED_SET_0_SRC__SHIFT = 4 # type: ignore
CP_DRAW_PRED_SET_0_TEST__MASK = 0x00000100 # type: ignore
CP_DRAW_PRED_SET_0_TEST__SHIFT = 8 # type: ignore
REG_CP_DRAW_PRED_SET_MEM_ADDR = 0x00000001 # type: ignore
REG_CP_SET_DRAW_STATE_ = lambda i0: (0x00000000 + 0x3*i0 ) # type: ignore
CP_SET_DRAW_STATE__0_COUNT__MASK = 0x0000ffff # type: ignore
CP_SET_DRAW_STATE__0_COUNT__SHIFT = 0 # type: ignore
CP_SET_DRAW_STATE__0_DIRTY = 0x00010000 # type: ignore
CP_SET_DRAW_STATE__0_DISABLE = 0x00020000 # type: ignore
CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS = 0x00040000 # type: ignore
CP_SET_DRAW_STATE__0_LOAD_IMMED = 0x00080000 # type: ignore
CP_SET_DRAW_STATE__0_BINNING = 0x00100000 # type: ignore
CP_SET_DRAW_STATE__0_GMEM = 0x00200000 # type: ignore
CP_SET_DRAW_STATE__0_SYSMEM = 0x00400000 # type: ignore
CP_SET_DRAW_STATE__0_GROUP_ID__MASK = 0x1f000000 # type: ignore
CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT = 24 # type: ignore
CP_SET_DRAW_STATE__1_ADDR_LO__MASK = 0xffffffff # type: ignore
CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT = 0 # type: ignore
CP_SET_DRAW_STATE__2_ADDR_HI__MASK = 0xffffffff # type: ignore
CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT = 0 # type: ignore
REG_CP_SET_BIN_0 = 0x00000000 # type: ignore
REG_CP_SET_BIN_1 = 0x00000001 # type: ignore
CP_SET_BIN_1_X1__MASK = 0x0000ffff # type: ignore
CP_SET_BIN_1_X1__SHIFT = 0 # type: ignore
CP_SET_BIN_1_Y1__MASK = 0xffff0000 # type: ignore
CP_SET_BIN_1_Y1__SHIFT = 16 # type: ignore
REG_CP_SET_BIN_2 = 0x00000002 # type: ignore
CP_SET_BIN_2_X2__MASK = 0x0000ffff # type: ignore
CP_SET_BIN_2_X2__SHIFT = 0 # type: ignore
CP_SET_BIN_2_Y2__MASK = 0xffff0000 # type: ignore
CP_SET_BIN_2_Y2__SHIFT = 16 # type: ignore
REG_CP_SET_BIN_DATA_0 = 0x00000000 # type: ignore
CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK = 0xffffffff # type: ignore
CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT = 0 # type: ignore
REG_CP_SET_BIN_DATA_1 = 0x00000001 # type: ignore
CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK = 0xffffffff # type: ignore
CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT = 0 # type: ignore
REG_CP_SET_BIN_DATA5_0 = 0x00000000 # type: ignore
CP_SET_BIN_DATA5_0_VSC_MASK__MASK = 0x0000ffff # type: ignore
CP_SET_BIN_DATA5_0_VSC_MASK__SHIFT = 0 # type: ignore
CP_SET_BIN_DATA5_0_VSC_SIZE__MASK = 0x003f0000 # type: ignore
CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT = 16 # type: ignore
CP_SET_BIN_DATA5_0_VSC_N__MASK = 0x07c00000 # type: ignore
CP_SET_BIN_DATA5_0_VSC_N__SHIFT = 22 # type: ignore
CP_SET_BIN_DATA5_0_ABS_MASK__MASK = 0x10000000 # type: ignore
CP_SET_BIN_DATA5_0_ABS_MASK__SHIFT = 28 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_1 = 0x00000001 # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK = 0xffffffff # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT = 0 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_2 = 0x00000002 # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK = 0xffffffff # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT = 0 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_3 = 0x00000003 # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK = 0xffffffff # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT = 0 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_4 = 0x00000004 # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK = 0xffffffff # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT = 0 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_5 = 0x00000005 # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK = 0xffffffff # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT = 0 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_6 = 0x00000006 # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK = 0xffffffff # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT = 0 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_7 = 0x00000007 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_9 = 0x00000009 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_ABS_MASK = 0x00000001 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_2 = 0x00000002 # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_LO__MASK = 0xffffffff # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_LO__SHIFT = 0 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_3 = 0x00000003 # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_3_BIN_DATA_ADDR_HI__MASK = 0xffffffff # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_3_BIN_DATA_ADDR_HI__SHIFT = 0 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_4 = 0x00000004 # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_LO__MASK = 0xffffffff # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_LO__SHIFT = 0 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_5 = 0x00000005 # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_5_BIN_SIZE_ADDRESS_HI__MASK = 0xffffffff # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_5_BIN_SIZE_ADDRESS_HI__SHIFT = 0 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_6 = 0x00000006 # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_LO__MASK = 0xffffffff # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_LO__SHIFT = 0 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_7 = 0x00000007 # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_7_BIN_PRIM_STRM_HI__MASK = 0xffffffff # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_7_BIN_PRIM_STRM_HI__SHIFT = 0 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_8 = 0x00000008 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_10 = 0x0000000a # type: ignore
REG_CP_SET_BIN_DATA5_OFFSET_0 = 0x00000000 # type: ignore
CP_SET_BIN_DATA5_OFFSET_0_VSC_MASK__MASK = 0x0000ffff # type: ignore
CP_SET_BIN_DATA5_OFFSET_0_VSC_MASK__SHIFT = 0 # type: ignore
CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK = 0x003f0000 # type: ignore
CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT = 16 # type: ignore
CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK = 0x07c00000 # type: ignore
CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT = 22 # type: ignore
CP_SET_BIN_DATA5_OFFSET_0_ABS_MASK__MASK = 0x10000000 # type: ignore
CP_SET_BIN_DATA5_OFFSET_0_ABS_MASK__SHIFT = 28 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_1 = 0x00000001 # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK = 0xffffffff # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT = 0 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_2 = 0x00000002 # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK = 0xffffffff # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT = 0 # type: ignore
REG_NO_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_3 = 0x00000003 # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK = 0xffffffff # type: ignore
NO_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT = 0 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_ABS_MASK = 0x00000001 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_2 = 0x00000002 # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_OFFSET_2_BIN_DATA_OFFSET__MASK = 0xffffffff # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_OFFSET_2_BIN_DATA_OFFSET__SHIFT = 0 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_3 = 0x00000003 # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_OFFSET_3_BIN_SIZE_OFFSET__MASK = 0xffffffff # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_OFFSET_3_BIN_SIZE_OFFSET__SHIFT = 0 # type: ignore
REG_ABS_MASK_CP_SET_BIN_DATA5_OFFSET_4 = 0x00000004 # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_OFFSET_4_BIN_DATA2_OFFSET__MASK = 0xffffffff # type: ignore
ABS_MASK_CP_SET_BIN_DATA5_OFFSET_4_BIN_DATA2_OFFSET__SHIFT = 0 # type: ignore
REG_CP_REG_RMW_0 = 0x00000000 # type: ignore
CP_REG_RMW_0_DST_REG__MASK = 0x0003ffff # type: ignore
CP_REG_RMW_0_DST_REG__SHIFT = 0 # type: ignore
CP_REG_RMW_0_DST_SCRATCH = 0x00080000 # type: ignore
CP_REG_RMW_0_SKIP_WAIT_FOR_ME = 0x00800000 # type: ignore
CP_REG_RMW_0_ROTATE__MASK = 0x1f000000 # type: ignore
CP_REG_RMW_0_ROTATE__SHIFT = 24 # type: ignore
CP_REG_RMW_0_SRC1_ADD = 0x20000000 # type: ignore
CP_REG_RMW_0_SRC1_IS_REG = 0x40000000 # type: ignore
CP_REG_RMW_0_SRC0_IS_REG = 0x80000000 # type: ignore
REG_CP_REG_RMW_1 = 0x00000001 # type: ignore
CP_REG_RMW_1_SRC0__MASK = 0xffffffff # type: ignore
CP_REG_RMW_1_SRC0__SHIFT = 0 # type: ignore
REG_CP_REG_RMW_2 = 0x00000002 # type: ignore
CP_REG_RMW_2_SRC1__MASK = 0xffffffff # type: ignore
CP_REG_RMW_2_SRC1__SHIFT = 0 # type: ignore
REG_CP_REG_TO_MEM_0 = 0x00000000 # type: ignore
CP_REG_TO_MEM_0_REG__MASK = 0x0003ffff # type: ignore
CP_REG_TO_MEM_0_REG__SHIFT = 0 # type: ignore
CP_REG_TO_MEM_0_CNT__MASK = 0x3ffc0000 # type: ignore
CP_REG_TO_MEM_0_CNT__SHIFT = 18 # type: ignore
CP_REG_TO_MEM_0_64B = 0x40000000 # type: ignore
CP_REG_TO_MEM_0_ACCUMULATE = 0x80000000 # type: ignore
REG_CP_REG_TO_MEM_1 = 0x00000001 # type: ignore
CP_REG_TO_MEM_1_DEST__MASK = 0xffffffff # type: ignore
CP_REG_TO_MEM_1_DEST__SHIFT = 0 # type: ignore
REG_CP_REG_TO_MEM_2 = 0x00000002 # type: ignore
CP_REG_TO_MEM_2_DEST_HI__MASK = 0xffffffff # type: ignore
CP_REG_TO_MEM_2_DEST_HI__SHIFT = 0 # type: ignore
REG_CP_REG_TO_MEM_OFFSET_REG_0 = 0x00000000 # type: ignore
CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK = 0x0003ffff # type: ignore
CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT = 0 # type: ignore
CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK = 0x3ffc0000 # type: ignore
CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT = 18 # type: ignore
CP_REG_TO_MEM_OFFSET_REG_0_64B = 0x40000000 # type: ignore
CP_REG_TO_MEM_OFFSET_REG_0_ACCUMULATE = 0x80000000 # type: ignore
REG_CP_REG_TO_MEM_OFFSET_REG_1 = 0x00000001 # type: ignore
CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK = 0xffffffff # type: ignore
CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT = 0 # type: ignore
REG_CP_REG_TO_MEM_OFFSET_REG_2 = 0x00000002 # type: ignore
CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK = 0xffffffff # type: ignore
CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT = 0 # type: ignore
REG_CP_REG_TO_MEM_OFFSET_REG_3 = 0x00000003 # type: ignore
CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK = 0x0003ffff # type: ignore
CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT = 0 # type: ignore
CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0_SCRATCH = 0x00080000 # type: ignore
REG_CP_REG_TO_MEM_OFFSET_MEM_0 = 0x00000000 # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK = 0x0003ffff # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT = 0 # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK = 0x3ffc0000 # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT = 18 # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_0_64B = 0x40000000 # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_0_ACCUMULATE = 0x80000000 # type: ignore
REG_CP_REG_TO_MEM_OFFSET_MEM_1 = 0x00000001 # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK = 0xffffffff # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT = 0 # type: ignore
REG_CP_REG_TO_MEM_OFFSET_MEM_2 = 0x00000002 # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK = 0xffffffff # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT = 0 # type: ignore
REG_CP_REG_TO_MEM_OFFSET_MEM_3 = 0x00000003 # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK = 0xffffffff # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT = 0 # type: ignore
REG_CP_REG_TO_MEM_OFFSET_MEM_4 = 0x00000004 # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK = 0xffffffff # type: ignore
CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT = 0 # type: ignore
REG_CP_MEM_TO_REG_0 = 0x00000000 # type: ignore
CP_MEM_TO_REG_0_REG__MASK = 0x0003ffff # type: ignore
CP_MEM_TO_REG_0_REG__SHIFT = 0 # type: ignore
CP_MEM_TO_REG_0_CNT__MASK = 0x3ff80000 # type: ignore
CP_MEM_TO_REG_0_CNT__SHIFT = 19 # type: ignore
CP_MEM_TO_REG_0_SHIFT_BY_2 = 0x40000000 # type: ignore
CP_MEM_TO_REG_0_UNK31 = 0x80000000 # type: ignore
REG_CP_MEM_TO_REG_1 = 0x00000001 # type: ignore
CP_MEM_TO_REG_1_SRC__MASK = 0xffffffff # type: ignore
CP_MEM_TO_REG_1_SRC__SHIFT = 0 # type: ignore
REG_CP_MEM_TO_REG_2 = 0x00000002 # type: ignore
CP_MEM_TO_REG_2_SRC_HI__MASK = 0xffffffff # type: ignore
CP_MEM_TO_REG_2_SRC_HI__SHIFT = 0 # type: ignore
REG_CP_MEM_TO_MEM_0 = 0x00000000 # type: ignore
CP_MEM_TO_MEM_0_NEG_A = 0x00000001 # type: ignore
CP_MEM_TO_MEM_0_NEG_B = 0x00000002 # type: ignore
CP_MEM_TO_MEM_0_NEG_C = 0x00000004 # type: ignore
CP_MEM_TO_MEM_0_DOUBLE = 0x20000000 # type: ignore
CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES = 0x40000000 # type: ignore
CP_MEM_TO_MEM_0_UNK31 = 0x80000000 # type: ignore
REG_CP_MEMCPY_0 = 0x00000000 # type: ignore
CP_MEMCPY_0_DWORDS__MASK = 0xffffffff # type: ignore
CP_MEMCPY_0_DWORDS__SHIFT = 0 # type: ignore
REG_CP_MEMCPY_1 = 0x00000001 # type: ignore
CP_MEMCPY_1_SRC_LO__MASK = 0xffffffff # type: ignore
CP_MEMCPY_1_SRC_LO__SHIFT = 0 # type: ignore
REG_CP_MEMCPY_2 = 0x00000002 # type: ignore
CP_MEMCPY_2_SRC_HI__MASK = 0xffffffff # type: ignore
CP_MEMCPY_2_SRC_HI__SHIFT = 0 # type: ignore
REG_CP_MEMCPY_3 = 0x00000003 # type: ignore
CP_MEMCPY_3_DST_LO__MASK = 0xffffffff # type: ignore
CP_MEMCPY_3_DST_LO__SHIFT = 0 # type: ignore
REG_CP_MEMCPY_4 = 0x00000004 # type: ignore
CP_MEMCPY_4_DST_HI__MASK = 0xffffffff # type: ignore
CP_MEMCPY_4_DST_HI__SHIFT = 0 # type: ignore
REG_CP_REG_TO_SCRATCH_0 = 0x00000000 # type: ignore
CP_REG_TO_SCRATCH_0_REG__MASK = 0x0003ffff # type: ignore
CP_REG_TO_SCRATCH_0_REG__SHIFT = 0 # type: ignore
CP_REG_TO_SCRATCH_0_SCRATCH__MASK = 0x00700000 # type: ignore
CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT = 20 # type: ignore
CP_REG_TO_SCRATCH_0_CNT__MASK = 0x07000000 # type: ignore
CP_REG_TO_SCRATCH_0_CNT__SHIFT = 24 # type: ignore
CP_REG_TO_SCRATCH_0_SKIP_WAIT_FOR_ME = 0x08000000 # type: ignore
REG_CP_SCRATCH_TO_REG_0 = 0x00000000 # type: ignore
CP_SCRATCH_TO_REG_0_REG__MASK = 0x0003ffff # type: ignore
CP_SCRATCH_TO_REG_0_REG__SHIFT = 0 # type: ignore
CP_SCRATCH_TO_REG_0_UNK18 = 0x00040000 # type: ignore
CP_SCRATCH_TO_REG_0_SCRATCH__MASK = 0x00700000 # type: ignore
CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT = 20 # type: ignore
CP_SCRATCH_TO_REG_0_CNT__MASK = 0x07000000 # type: ignore
CP_SCRATCH_TO_REG_0_CNT__SHIFT = 24 # type: ignore
REG_CP_SCRATCH_WRITE_0 = 0x00000000 # type: ignore
CP_SCRATCH_WRITE_0_SCRATCH__MASK = 0x00700000 # type: ignore
CP_SCRATCH_WRITE_0_SCRATCH__SHIFT = 20 # type: ignore
REG_CP_MEM_WRITE_0 = 0x00000000 # type: ignore
CP_MEM_WRITE_0_ADDR_LO__MASK = 0xffffffff # type: ignore
CP_MEM_WRITE_0_ADDR_LO__SHIFT = 0 # type: ignore
REG_CP_MEM_WRITE_1 = 0x00000001 # type: ignore
CP_MEM_WRITE_1_ADDR_HI__MASK = 0xffffffff # type: ignore
CP_MEM_WRITE_1_ADDR_HI__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE_0 = 0x00000000 # type: ignore
CP_COND_WRITE_0_FUNCTION__MASK = 0x00000007 # type: ignore
CP_COND_WRITE_0_FUNCTION__SHIFT = 0 # type: ignore
CP_COND_WRITE_0_POLL_MEMORY = 0x00000010 # type: ignore
CP_COND_WRITE_0_WRITE_MEMORY = 0x00000100 # type: ignore
REG_CP_COND_WRITE_1 = 0x00000001 # type: ignore
CP_COND_WRITE_1_POLL_ADDR__MASK = 0xffffffff # type: ignore
CP_COND_WRITE_1_POLL_ADDR__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE_2 = 0x00000002 # type: ignore
CP_COND_WRITE_2_REF__MASK = 0xffffffff # type: ignore
CP_COND_WRITE_2_REF__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE_3 = 0x00000003 # type: ignore
CP_COND_WRITE_3_MASK__MASK = 0xffffffff # type: ignore
CP_COND_WRITE_3_MASK__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE_4 = 0x00000004 # type: ignore
CP_COND_WRITE_4_WRITE_ADDR__MASK = 0xffffffff # type: ignore
CP_COND_WRITE_4_WRITE_ADDR__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE_5 = 0x00000005 # type: ignore
CP_COND_WRITE_5_WRITE_DATA__MASK = 0xffffffff # type: ignore
CP_COND_WRITE_5_WRITE_DATA__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE5_0 = 0x00000000 # type: ignore
CP_COND_WRITE5_0_FUNCTION__MASK = 0x00000007 # type: ignore
CP_COND_WRITE5_0_FUNCTION__SHIFT = 0 # type: ignore
CP_COND_WRITE5_0_SIGNED_COMPARE = 0x00000008 # type: ignore
CP_COND_WRITE5_0_POLL__MASK = 0x00000030 # type: ignore
CP_COND_WRITE5_0_POLL__SHIFT = 4 # type: ignore
CP_COND_WRITE5_0_WRITE_MEMORY = 0x00000100 # type: ignore
REG_CP_COND_WRITE5_1 = 0x00000001 # type: ignore
CP_COND_WRITE5_1_POLL_ADDR_LO__MASK = 0xffffffff # type: ignore
CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE5_2 = 0x00000002 # type: ignore
CP_COND_WRITE5_2_POLL_ADDR_HI__MASK = 0xffffffff # type: ignore
CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE5_3 = 0x00000003 # type: ignore
CP_COND_WRITE5_3_REF__MASK = 0xffffffff # type: ignore
CP_COND_WRITE5_3_REF__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE5_4 = 0x00000004 # type: ignore
CP_COND_WRITE5_4_MASK__MASK = 0xffffffff # type: ignore
CP_COND_WRITE5_4_MASK__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE5_5 = 0x00000005 # type: ignore
CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK = 0xffffffff # type: ignore
CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE5_6 = 0x00000006 # type: ignore
CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK = 0xffffffff # type: ignore
CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT = 0 # type: ignore
REG_CP_COND_WRITE5_7 = 0x00000007 # type: ignore
CP_COND_WRITE5_7_WRITE_DATA__MASK = 0xffffffff # type: ignore
CP_COND_WRITE5_7_WRITE_DATA__SHIFT = 0 # type: ignore
REG_CP_WAIT_MEM_GTE_0 = 0x00000000 # type: ignore
CP_WAIT_MEM_GTE_0_RESERVED__MASK = 0xffffffff # type: ignore
CP_WAIT_MEM_GTE_0_RESERVED__SHIFT = 0 # type: ignore
REG_CP_WAIT_MEM_GTE_1 = 0x00000001 # type: ignore
CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK = 0xffffffff # type: ignore
CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT = 0 # type: ignore
REG_CP_WAIT_MEM_GTE_2 = 0x00000002 # type: ignore
CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK = 0xffffffff # type: ignore
CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT = 0 # type: ignore
REG_CP_WAIT_MEM_GTE_3 = 0x00000003 # type: ignore
CP_WAIT_MEM_GTE_3_REF__MASK = 0xffffffff # type: ignore
CP_WAIT_MEM_GTE_3_REF__SHIFT = 0 # type: ignore
REG_CP_WAIT_REG_MEM_0 = 0x00000000 # type: ignore
CP_WAIT_REG_MEM_0_FUNCTION__MASK = 0x00000007 # type: ignore
CP_WAIT_REG_MEM_0_FUNCTION__SHIFT = 0 # type: ignore
CP_WAIT_REG_MEM_0_SIGNED_COMPARE = 0x00000008 # type: ignore
CP_WAIT_REG_MEM_0_POLL__MASK = 0x00000030 # type: ignore
CP_WAIT_REG_MEM_0_POLL__SHIFT = 4 # type: ignore
CP_WAIT_REG_MEM_0_WRITE_MEMORY = 0x00000100 # type: ignore
REG_CP_WAIT_REG_MEM_1 = 0x00000001 # type: ignore
CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK = 0xffffffff # type: ignore
CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT = 0 # type: ignore
REG_CP_WAIT_REG_MEM_2 = 0x00000002 # type: ignore
CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK = 0xffffffff # type: ignore
CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT = 0 # type: ignore
REG_CP_WAIT_REG_MEM_3 = 0x00000003 # type: ignore
CP_WAIT_REG_MEM_3_REF__MASK = 0xffffffff # type: ignore
CP_WAIT_REG_MEM_3_REF__SHIFT = 0 # type: ignore
REG_CP_WAIT_REG_MEM_4 = 0x00000004 # type: ignore
CP_WAIT_REG_MEM_4_MASK__MASK = 0xffffffff # type: ignore
CP_WAIT_REG_MEM_4_MASK__SHIFT = 0 # type: ignore
REG_CP_WAIT_REG_MEM_5 = 0x00000005 # type: ignore
CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK = 0xffffffff # type: ignore
CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT = 0 # type: ignore
REG_CP_WAIT_TWO_REGS_0 = 0x00000000 # type: ignore
CP_WAIT_TWO_REGS_0_REG0__MASK = 0x0003ffff # type: ignore
CP_WAIT_TWO_REGS_0_REG0__SHIFT = 0 # type: ignore
REG_CP_WAIT_TWO_REGS_1 = 0x00000001 # type: ignore
CP_WAIT_TWO_REGS_1_REG1__MASK = 0x0003ffff # type: ignore
CP_WAIT_TWO_REGS_1_REG1__SHIFT = 0 # type: ignore
REG_CP_WAIT_TWO_REGS_2 = 0x00000002 # type: ignore
CP_WAIT_TWO_REGS_2_REF__MASK = 0xffffffff # type: ignore
CP_WAIT_TWO_REGS_2_REF__SHIFT = 0 # type: ignore
REG_CP_DISPATCH_COMPUTE_0 = 0x00000000 # type: ignore
REG_CP_DISPATCH_COMPUTE_1 = 0x00000001 # type: ignore
CP_DISPATCH_COMPUTE_1_X__MASK = 0xffffffff # type: ignore
CP_DISPATCH_COMPUTE_1_X__SHIFT = 0 # type: ignore
REG_CP_DISPATCH_COMPUTE_2 = 0x00000002 # type: ignore
CP_DISPATCH_COMPUTE_2_Y__MASK = 0xffffffff # type: ignore
CP_DISPATCH_COMPUTE_2_Y__SHIFT = 0 # type: ignore
REG_CP_DISPATCH_COMPUTE_3 = 0x00000003 # type: ignore
CP_DISPATCH_COMPUTE_3_Z__MASK = 0xffffffff # type: ignore
CP_DISPATCH_COMPUTE_3_Z__SHIFT = 0 # type: ignore
REG_CP_SET_RENDER_MODE_0 = 0x00000000 # type: ignore
CP_SET_RENDER_MODE_0_MODE__MASK = 0x000001ff # type: ignore
CP_SET_RENDER_MODE_0_MODE__SHIFT = 0 # type: ignore
REG_CP_SET_RENDER_MODE_1 = 0x00000001 # type: ignore
CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK = 0xffffffff # type: ignore
CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT = 0 # type: ignore
REG_CP_SET_RENDER_MODE_2 = 0x00000002 # type: ignore
CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK = 0xffffffff # type: ignore
CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT = 0 # type: ignore
REG_CP_SET_RENDER_MODE_3 = 0x00000003 # type: ignore
CP_SET_RENDER_MODE_3_VSC_ENABLE = 0x00000008 # type: ignore
CP_SET_RENDER_MODE_3_GMEM_ENABLE = 0x00000010 # type: ignore
REG_CP_SET_RENDER_MODE_4 = 0x00000004 # type: ignore
REG_CP_SET_RENDER_MODE_5 = 0x00000005 # type: ignore
CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK = 0xffffffff # type: ignore
CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT = 0 # type: ignore
REG_CP_SET_RENDER_MODE_6 = 0x00000006 # type: ignore
CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK = 0xffffffff # type: ignore
CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT = 0 # type: ignore
REG_CP_SET_RENDER_MODE_7 = 0x00000007 # type: ignore
CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK = 0xffffffff # type: ignore
CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT = 0 # type: ignore
REG_CP_COMPUTE_CHECKPOINT_0 = 0x00000000 # type: ignore
CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK = 0xffffffff # type: ignore
CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT = 0 # type: ignore
REG_CP_COMPUTE_CHECKPOINT_1 = 0x00000001 # type: ignore
CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK = 0xffffffff # type: ignore
CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT = 0 # type: ignore
REG_CP_COMPUTE_CHECKPOINT_2 = 0x00000002 # type: ignore
REG_CP_COMPUTE_CHECKPOINT_3 = 0x00000003 # type: ignore
REG_CP_COMPUTE_CHECKPOINT_4 = 0x00000004 # type: ignore
CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK = 0xffffffff # type: ignore
CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT = 0 # type: ignore
REG_CP_COMPUTE_CHECKPOINT_5 = 0x00000005 # type: ignore
CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK = 0xffffffff # type: ignore
CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT = 0 # type: ignore
REG_CP_COMPUTE_CHECKPOINT_6 = 0x00000006 # type: ignore
CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK = 0xffffffff # type: ignore
CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT = 0 # type: ignore
REG_CP_COMPUTE_CHECKPOINT_7 = 0x00000007 # type: ignore
REG_CP_PERFCOUNTER_ACTION_0 = 0x00000000 # type: ignore
REG_CP_PERFCOUNTER_ACTION_1 = 0x00000001 # type: ignore
CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK = 0xffffffff # type: ignore
CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT = 0 # type: ignore
REG_CP_PERFCOUNTER_ACTION_2 = 0x00000002 # type: ignore
CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK = 0xffffffff # type: ignore
CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT = 0 # type: ignore
REG_CP_EVENT_WRITE_0 = 0x00000000 # type: ignore
CP_EVENT_WRITE_0_EVENT__MASK = 0x000000ff # type: ignore
CP_EVENT_WRITE_0_EVENT__SHIFT = 0 # type: ignore
CP_EVENT_WRITE_0_TIMESTAMP = 0x40000000 # type: ignore
CP_EVENT_WRITE_0_IRQ = 0x80000000 # type: ignore
REG_CP_EVENT_WRITE_1 = 0x00000001 # type: ignore
CP_EVENT_WRITE_1_ADDR_0_LO__MASK = 0xffffffff # type: ignore
CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT = 0 # type: ignore
REG_CP_EVENT_WRITE_2 = 0x00000002 # type: ignore
CP_EVENT_WRITE_2_ADDR_0_HI__MASK = 0xffffffff # type: ignore
CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT = 0 # type: ignore
REG_CP_EVENT_WRITE_3 = 0x00000003 # type: ignore
REG_CP_EVENT_WRITE7_0 = 0x00000000 # type: ignore
CP_EVENT_WRITE7_0_EVENT__MASK = 0x000000ff # type: ignore
CP_EVENT_WRITE7_0_EVENT__SHIFT = 0 # type: ignore
CP_EVENT_WRITE7_0_WRITE_SAMPLE_COUNT = 0x00001000 # type: ignore
CP_EVENT_WRITE7_0_SAMPLE_COUNT_END_OFFSET = 0x00002000 # type: ignore
CP_EVENT_WRITE7_0_WRITE_ACCUM_SAMPLE_COUNT_DIFF = 0x00004000 # type: ignore
CP_EVENT_WRITE7_0_INC_BV_COUNT = 0x00010000 # type: ignore
CP_EVENT_WRITE7_0_INC_BR_COUNT = 0x00020000 # type: ignore
CP_EVENT_WRITE7_0_CLEAR_RENDER_RESOURCE = 0x00040000 # type: ignore
CP_EVENT_WRITE7_0_CLEAR_LRZ_RESOURCE = 0x00080000 # type: ignore
CP_EVENT_WRITE7_0_WRITE_SRC__MASK = 0x00700000 # type: ignore
CP_EVENT_WRITE7_0_WRITE_SRC__SHIFT = 20 # type: ignore
CP_EVENT_WRITE7_0_WRITE_DST__MASK = 0x01000000 # type: ignore
CP_EVENT_WRITE7_0_WRITE_DST__SHIFT = 24 # type: ignore
CP_EVENT_WRITE7_0_WRITE_ENABLED = 0x08000000 # type: ignore
CP_EVENT_WRITE7_0_IRQ = 0x80000000 # type: ignore
REG_EV_DST_RAM_CP_EVENT_WRITE7_1 = 0x00000001 # type: ignore
REG_EV_DST_RAM_CP_EVENT_WRITE7_3 = 0x00000003 # type: ignore
EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK = 0xffffffff # type: ignore
EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT = 0 # type: ignore
REG_EV_DST_RAM_CP_EVENT_WRITE7_4 = 0x00000004 # type: ignore
EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK = 0xffffffff # type: ignore
EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT = 0 # type: ignore
REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_1 = 0x00000001 # type: ignore
EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__MASK = 0xffffffff # type: ignore
EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__SHIFT = 0 # type: ignore
REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_3 = 0x00000003 # type: ignore
EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK = 0xffffffff # type: ignore
EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT = 0 # type: ignore
REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_4 = 0x00000004 # type: ignore
EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK = 0xffffffff # type: ignore
EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT = 0 # type: ignore
REG_CP_BLIT_0 = 0x00000000 # type: ignore
CP_BLIT_0_OP__MASK = 0x0000000f # type: ignore
CP_BLIT_0_OP__SHIFT = 0 # type: ignore
REG_CP_BLIT_1 = 0x00000001 # type: ignore
CP_BLIT_1_SRC_X1__MASK = 0x00003fff # type: ignore
CP_BLIT_1_SRC_X1__SHIFT = 0 # type: ignore
CP_BLIT_1_SRC_Y1__MASK = 0x3fff0000 # type: ignore
CP_BLIT_1_SRC_Y1__SHIFT = 16 # type: ignore
REG_CP_BLIT_2 = 0x00000002 # type: ignore
CP_BLIT_2_SRC_X2__MASK = 0x00003fff # type: ignore
CP_BLIT_2_SRC_X2__SHIFT = 0 # type: ignore
CP_BLIT_2_SRC_Y2__MASK = 0x3fff0000 # type: ignore
CP_BLIT_2_SRC_Y2__SHIFT = 16 # type: ignore
REG_CP_BLIT_3 = 0x00000003 # type: ignore
CP_BLIT_3_DST_X1__MASK = 0x00003fff # type: ignore
CP_BLIT_3_DST_X1__SHIFT = 0 # type: ignore
CP_BLIT_3_DST_Y1__MASK = 0x3fff0000 # type: ignore
CP_BLIT_3_DST_Y1__SHIFT = 16 # type: ignore
REG_CP_BLIT_4 = 0x00000004 # type: ignore
CP_BLIT_4_DST_X2__MASK = 0x00003fff # type: ignore
CP_BLIT_4_DST_X2__SHIFT = 0 # type: ignore
CP_BLIT_4_DST_Y2__MASK = 0x3fff0000 # type: ignore
CP_BLIT_4_DST_Y2__SHIFT = 16 # type: ignore
REG_CP_EXEC_CS_0 = 0x00000000 # type: ignore
REG_CP_EXEC_CS_1 = 0x00000001 # type: ignore
CP_EXEC_CS_1_NGROUPS_X__MASK = 0xffffffff # type: ignore
CP_EXEC_CS_1_NGROUPS_X__SHIFT = 0 # type: ignore
REG_CP_EXEC_CS_2 = 0x00000002 # type: ignore
CP_EXEC_CS_2_NGROUPS_Y__MASK = 0xffffffff # type: ignore
CP_EXEC_CS_2_NGROUPS_Y__SHIFT = 0 # type: ignore
REG_CP_EXEC_CS_3 = 0x00000003 # type: ignore
CP_EXEC_CS_3_NGROUPS_Z__MASK = 0xffffffff # type: ignore
CP_EXEC_CS_3_NGROUPS_Z__SHIFT = 0 # type: ignore
REG_A4XX_CP_EXEC_CS_INDIRECT_0 = 0x00000000 # type: ignore
REG_A4XX_CP_EXEC_CS_INDIRECT_1 = 0x00000001 # type: ignore
A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK = 0xffffffff # type: ignore
A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT = 0 # type: ignore
REG_A4XX_CP_EXEC_CS_INDIRECT_2 = 0x00000002 # type: ignore
A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK = 0x00000ffc # type: ignore
A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT = 2 # type: ignore
A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK = 0x003ff000 # type: ignore
A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT = 12 # type: ignore
A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK = 0xffc00000 # type: ignore
A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT = 22 # type: ignore
REG_A5XX_CP_EXEC_CS_INDIRECT_1 = 0x00000001 # type: ignore
A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK = 0xffffffff # type: ignore
A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT = 0 # type: ignore
REG_A5XX_CP_EXEC_CS_INDIRECT_2 = 0x00000002 # type: ignore
A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK = 0xffffffff # type: ignore
A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT = 0 # type: ignore
REG_A5XX_CP_EXEC_CS_INDIRECT_3 = 0x00000003 # type: ignore
A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK = 0x00000ffc # type: ignore
A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT = 2 # type: ignore
A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK = 0x003ff000 # type: ignore
A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT = 12 # type: ignore
A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK = 0xffc00000 # type: ignore
A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT = 22 # type: ignore
REG_A6XX_CP_SET_MARKER_0 = 0x00000000 # type: ignore
A6XX_CP_SET_MARKER_0_MARKER_MODE__MASK = 0x00000100 # type: ignore
A6XX_CP_SET_MARKER_0_MARKER_MODE__SHIFT = 8 # type: ignore
A6XX_CP_SET_MARKER_0_MODE__MASK = 0x0000000f # type: ignore
A6XX_CP_SET_MARKER_0_MODE__SHIFT = 0 # type: ignore
A6XX_CP_SET_MARKER_0_USES_GMEM = 0x00000010 # type: ignore
A6XX_CP_SET_MARKER_0_IFPC_MODE__MASK = 0x00000001 # type: ignore
A6XX_CP_SET_MARKER_0_IFPC_MODE__SHIFT = 0 # type: ignore
A6XX_CP_SET_MARKER_0_SHADER_USES_RT = 0x00000200 # type: ignore
A6XX_CP_SET_MARKER_0_RT_WA_START = 0x00000400 # type: ignore
A6XX_CP_SET_MARKER_0_RT_WA_END = 0x00000800 # type: ignore
REG_A6XX_CP_SET_PSEUDO_REG_ = lambda i0: (0x00000000 + 0x3*i0 ) # type: ignore
A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK = 0x000007ff # type: ignore
A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT = 0 # type: ignore
A6XX_CP_SET_PSEUDO_REG__1_LO__MASK = 0xffffffff # type: ignore
A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT = 0 # type: ignore
A6XX_CP_SET_PSEUDO_REG__2_HI__MASK = 0xffffffff # type: ignore
A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT = 0 # type: ignore
REG_A6XX_CP_REG_TEST_0 = 0x00000000 # type: ignore
A6XX_CP_REG_TEST_0_REG__MASK = 0x0003ffff # type: ignore
A6XX_CP_REG_TEST_0_REG__SHIFT = 0 # type: ignore
A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__MASK = 0x0003ffff # type: ignore
A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__SHIFT = 0 # type: ignore
A6XX_CP_REG_TEST_0_SOURCE__MASK = 0x00040000 # type: ignore
A6XX_CP_REG_TEST_0_SOURCE__SHIFT = 18 # type: ignore
A6XX_CP_REG_TEST_0_BIT__MASK = 0x01f00000 # type: ignore
A6XX_CP_REG_TEST_0_BIT__SHIFT = 20 # type: ignore
A6XX_CP_REG_TEST_0_SKIP_WAIT_FOR_ME = 0x02000000 # type: ignore
A6XX_CP_REG_TEST_0_PRED_BIT__MASK = 0x7c000000 # type: ignore
A6XX_CP_REG_TEST_0_PRED_BIT__SHIFT = 26 # type: ignore
A6XX_CP_REG_TEST_0_PRED_UPDATE = 0x80000000 # type: ignore
REG_A6XX_CP_REG_TEST_PRED_MASK = 0x00000001 # type: ignore
REG_A6XX_CP_REG_TEST_PRED_VAL = 0x00000002 # type: ignore
REG_CP_COND_REG_EXEC_0 = 0x00000000 # type: ignore
CP_COND_REG_EXEC_0_REG0__MASK = 0x0003ffff # type: ignore
CP_COND_REG_EXEC_0_REG0__SHIFT = 0 # type: ignore
CP_COND_REG_EXEC_0_PRED_BIT__MASK = 0x007c0000 # type: ignore
CP_COND_REG_EXEC_0_PRED_BIT__SHIFT = 18 # type: ignore
CP_COND_REG_EXEC_0_SKIP_WAIT_FOR_ME = 0x00800000 # type: ignore
CP_COND_REG_EXEC_0_ONCHIP_MEM = 0x01000000 # type: ignore
CP_COND_REG_EXEC_0_BINNING = 0x02000000 # type: ignore
CP_COND_REG_EXEC_0_GMEM = 0x04000000 # type: ignore
CP_COND_REG_EXEC_0_SYSMEM = 0x08000000 # type: ignore
CP_COND_REG_EXEC_0_BV = 0x02000000 # type: ignore
CP_COND_REG_EXEC_0_BR = 0x04000000 # type: ignore
CP_COND_REG_EXEC_0_LPAC = 0x08000000 # type: ignore
CP_COND_REG_EXEC_0_MODE__MASK = 0xf0000000 # type: ignore
CP_COND_REG_EXEC_0_MODE__SHIFT = 28 # type: ignore
REG_PRED_TEST_CP_COND_REG_EXEC_1 = 0x00000001 # type: ignore
PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__MASK = 0x00ffffff # type: ignore
PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__SHIFT = 0 # type: ignore
REG_REG_COMPARE_CP_COND_REG_EXEC_1 = 0x00000001 # type: ignore
REG_COMPARE_CP_COND_REG_EXEC_1_REG1__MASK = 0x0003ffff # type: ignore
REG_COMPARE_CP_COND_REG_EXEC_1_REG1__SHIFT = 0 # type: ignore
REG_COMPARE_CP_COND_REG_EXEC_1_ONCHIP_MEM = 0x01000000 # type: ignore
REG_RENDER_MODE_CP_COND_REG_EXEC_1 = 0x00000001 # type: ignore
RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK = 0x00ffffff # type: ignore
RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT = 0 # type: ignore
REG_REG_COMPARE_IMM_CP_COND_REG_EXEC_1 = 0x00000001 # type: ignore
REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__MASK = 0xffffffff # type: ignore
REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__SHIFT = 0 # type: ignore
REG_THREAD_MODE_CP_COND_REG_EXEC_1 = 0x00000001 # type: ignore
THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK = 0x00ffffff # type: ignore
THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT = 0 # type: ignore
REG_CP_COND_REG_EXEC_2 = 0x00000002 # type: ignore
CP_COND_REG_EXEC_2_DWORDS__MASK = 0x00ffffff # type: ignore
CP_COND_REG_EXEC_2_DWORDS__SHIFT = 0 # type: ignore
REG_CP_COND_EXEC_0 = 0x00000000 # type: ignore
CP_COND_EXEC_0_ADDR0_LO__MASK = 0xffffffff # type: ignore
CP_COND_EXEC_0_ADDR0_LO__SHIFT = 0 # type: ignore
REG_CP_COND_EXEC_1 = 0x00000001 # type: ignore
CP_COND_EXEC_1_ADDR0_HI__MASK = 0xffffffff # type: ignore
CP_COND_EXEC_1_ADDR0_HI__SHIFT = 0 # type: ignore
REG_CP_COND_EXEC_2 = 0x00000002 # type: ignore
CP_COND_EXEC_2_ADDR1_LO__MASK = 0xffffffff # type: ignore
CP_COND_EXEC_2_ADDR1_LO__SHIFT = 0 # type: ignore
REG_CP_COND_EXEC_3 = 0x00000003 # type: ignore
CP_COND_EXEC_3_ADDR1_HI__MASK = 0xffffffff # type: ignore
CP_COND_EXEC_3_ADDR1_HI__SHIFT = 0 # type: ignore
REG_CP_COND_EXEC_4 = 0x00000004 # type: ignore
CP_COND_EXEC_4_REF__MASK = 0xffffffff # type: ignore
CP_COND_EXEC_4_REF__SHIFT = 0 # type: ignore
REG_CP_COND_EXEC_5 = 0x00000005 # type: ignore
CP_COND_EXEC_5_DWORDS__MASK = 0xffffffff # type: ignore
CP_COND_EXEC_5_DWORDS__SHIFT = 0 # type: ignore
REG_CP_SET_AMBLE_0 = 0x00000000 # type: ignore
CP_SET_AMBLE_0_ADDR_LO__MASK = 0xffffffff # type: ignore
CP_SET_AMBLE_0_ADDR_LO__SHIFT = 0 # type: ignore
REG_CP_SET_AMBLE_1 = 0x00000001 # type: ignore
CP_SET_AMBLE_1_ADDR_HI__MASK = 0xffffffff # type: ignore
CP_SET_AMBLE_1_ADDR_HI__SHIFT = 0 # type: ignore
REG_CP_SET_AMBLE_2 = 0x00000002 # type: ignore
CP_SET_AMBLE_2_DWORDS__MASK = 0x000fffff # type: ignore
CP_SET_AMBLE_2_DWORDS__SHIFT = 0 # type: ignore
CP_SET_AMBLE_2_TYPE__MASK = 0x00300000 # type: ignore
CP_SET_AMBLE_2_TYPE__SHIFT = 20 # type: ignore
REG_CP_REG_WRITE_0 = 0x00000000 # type: ignore
CP_REG_WRITE_0_TRACKER__MASK = 0x0000000f # type: ignore
CP_REG_WRITE_0_TRACKER__SHIFT = 0 # type: ignore
REG_CP_REG_WRITE_1 = 0x00000001 # type: ignore
REG_CP_REG_WRITE_2 = 0x00000002 # type: ignore
REG_CP_SMMU_TABLE_UPDATE_0 = 0x00000000 # type: ignore
CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK = 0xffffffff # type: ignore
CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT = 0 # type: ignore
REG_CP_SMMU_TABLE_UPDATE_1 = 0x00000001 # type: ignore
CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK = 0x0000ffff # type: ignore
CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT = 0 # type: ignore
CP_SMMU_TABLE_UPDATE_1_ASID__MASK = 0xffff0000 # type: ignore
CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT = 16 # type: ignore
REG_CP_SMMU_TABLE_UPDATE_2 = 0x00000002 # type: ignore
CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK = 0xffffffff # type: ignore
CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT = 0 # type: ignore
REG_CP_SMMU_TABLE_UPDATE_3 = 0x00000003 # type: ignore
CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK = 0xffffffff # type: ignore
CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT = 0 # type: ignore
REG_CP_START_BIN_BIN_COUNT = 0x00000000 # type: ignore
REG_CP_START_BIN_PREFIX_ADDR = 0x00000001 # type: ignore
REG_CP_START_BIN_PREFIX_DWORDS = 0x00000003 # type: ignore
REG_CP_START_BIN_BODY_DWORDS = 0x00000004 # type: ignore
REG_CP_WAIT_TIMESTAMP_0 = 0x00000000 # type: ignore
CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__MASK = 0x00000003 # type: ignore
CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__SHIFT = 0 # type: ignore
CP_WAIT_TIMESTAMP_0_WAIT_DST__MASK = 0x00000010 # type: ignore
CP_WAIT_TIMESTAMP_0_WAIT_DST__SHIFT = 4 # type: ignore
REG_TS_WAIT_RAM_CP_WAIT_TIMESTAMP_ADDR = 0x00000001 # type: ignore
REG_TS_WAIT_ONCHIP_CP_WAIT_TIMESTAMP_ONCHIP_ADDR_0 = 0x00000001 # type: ignore
REG_CP_WAIT_TIMESTAMP_SRC_0 = 0x00000003 # type: ignore
REG_CP_WAIT_TIMESTAMP_SRC_1 = 0x00000004 # type: ignore
REG_CP_BV_BR_COUNT_OPS_0 = 0x00000000 # type: ignore
CP_BV_BR_COUNT_OPS_0_OP__MASK = 0x0000000f # type: ignore
CP_BV_BR_COUNT_OPS_0_OP__SHIFT = 0 # type: ignore
REG_CP_BV_BR_COUNT_OPS_1 = 0x00000001 # type: ignore
CP_BV_BR_COUNT_OPS_1_BR_OFFSET__MASK = 0x0000ffff # type: ignore
CP_BV_BR_COUNT_OPS_1_BR_OFFSET__SHIFT = 0 # type: ignore
REG_CP_MODIFY_TIMESTAMP_0 = 0x00000000 # type: ignore
CP_MODIFY_TIMESTAMP_0_ADD__MASK = 0x000000ff # type: ignore
CP_MODIFY_TIMESTAMP_0_ADD__SHIFT = 0 # type: ignore
CP_MODIFY_TIMESTAMP_0_OP__MASK = 0xf0000000 # type: ignore
CP_MODIFY_TIMESTAMP_0_OP__SHIFT = 28 # type: ignore
REG_CP_MEM_TO_SCRATCH_MEM_0 = 0x00000000 # type: ignore
CP_MEM_TO_SCRATCH_MEM_0_CNT__MASK = 0x0000003f # type: ignore
CP_MEM_TO_SCRATCH_MEM_0_CNT__SHIFT = 0 # type: ignore
REG_CP_MEM_TO_SCRATCH_MEM_1 = 0x00000001 # type: ignore
CP_MEM_TO_SCRATCH_MEM_1_OFFSET__MASK = 0x0000003f # type: ignore
CP_MEM_TO_SCRATCH_MEM_1_OFFSET__SHIFT = 0 # type: ignore
REG_CP_MEM_TO_SCRATCH_MEM_2 = 0x00000002 # type: ignore
CP_MEM_TO_SCRATCH_MEM_2_SRC__MASK = 0xffffffff # type: ignore
CP_MEM_TO_SCRATCH_MEM_2_SRC__SHIFT = 0 # type: ignore
REG_CP_MEM_TO_SCRATCH_MEM_3 = 0x00000003 # type: ignore
CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__MASK = 0xffffffff # type: ignore
CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__SHIFT = 0 # type: ignore
REG_CP_THREAD_CONTROL_0 = 0x00000000 # type: ignore
CP_THREAD_CONTROL_0_THREAD__MASK = 0x00000003 # type: ignore
CP_THREAD_CONTROL_0_THREAD__SHIFT = 0 # type: ignore
CP_THREAD_CONTROL_0_CONCURRENT_BIN_DISABLE = 0x08000000 # type: ignore
CP_THREAD_CONTROL_0_SYNC_THREADS = 0x80000000 # type: ignore
REG_CP_FIXED_STRIDE_DRAW_TABLE_IB_BASE = 0x00000000 # type: ignore
REG_CP_FIXED_STRIDE_DRAW_TABLE_2 = 0x00000002 # type: ignore
CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__MASK = 0x00000fff # type: ignore
CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__SHIFT = 0 # type: ignore
CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__MASK = 0xfff00000 # type: ignore
CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__SHIFT = 20 # type: ignore
REG_CP_FIXED_STRIDE_DRAW_TABLE_3 = 0x00000003 # type: ignore
CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__MASK = 0xffffffff # type: ignore
CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__SHIFT = 0 # type: ignore
REG_CP_RESET_CONTEXT_STATE_0 = 0x00000000 # type: ignore
CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS = 0x00000001 # type: ignore
CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE = 0x00000002 # type: ignore
CP_RESET_CONTEXT_STATE_0_CLEAR_BV_BR_COUNTER = 0x00000004 # type: ignore
CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS = 0x00000008 # type: ignore
REG_CP_SCOPE_CNTL_0 = 0x00000000 # type: ignore
CP_SCOPE_CNTL_0_DISABLE_PREEMPTION = 0x00000001 # type: ignore
CP_SCOPE_CNTL_0_SCOPE__MASK = 0xf0000000 # type: ignore
CP_SCOPE_CNTL_0_SCOPE__SHIFT = 28 # type: ignore
REG_A5XX_CP_INDIRECT_BUFFER_IB_BASE = 0x00000000 # type: ignore
REG_A5XX_CP_INDIRECT_BUFFER_2 = 0x00000002 # type: ignore
A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE__MASK = 0x000fffff # type: ignore
A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE__SHIFT = 0 # type: ignore
__struct__cast = lambda X: (struct_X) # type: ignore
__struct__cast = lambda X: (struct_X) # type: ignore
REG_A6XX_TEX_SAMP_0 = 0x00000000 # type: ignore
A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR = 0x00000001 # type: ignore
A6XX_TEX_SAMP_0_XY_MAG__MASK = 0x00000006 # type: ignore
A6XX_TEX_SAMP_0_XY_MAG__SHIFT = 1 # type: ignore
A6XX_TEX_SAMP_0_XY_MIN__MASK = 0x00000018 # type: ignore
A6XX_TEX_SAMP_0_XY_MIN__SHIFT = 3 # type: ignore
A6XX_TEX_SAMP_0_WRAP_S__MASK = 0x000000e0 # type: ignore
A6XX_TEX_SAMP_0_WRAP_S__SHIFT = 5 # type: ignore
A6XX_TEX_SAMP_0_WRAP_T__MASK = 0x00000700 # type: ignore
A6XX_TEX_SAMP_0_WRAP_T__SHIFT = 8 # type: ignore
A6XX_TEX_SAMP_0_WRAP_R__MASK = 0x00003800 # type: ignore
A6XX_TEX_SAMP_0_WRAP_R__SHIFT = 11 # type: ignore
A6XX_TEX_SAMP_0_ANISO__MASK = 0x0001c000 # type: ignore
A6XX_TEX_SAMP_0_ANISO__SHIFT = 14 # type: ignore
A6XX_TEX_SAMP_0_LOD_BIAS__MASK = 0xfff80000 # type: ignore
A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT = 19 # type: ignore
REG_A6XX_TEX_SAMP_1 = 0x00000001 # type: ignore
A6XX_TEX_SAMP_1_CLAMPENABLE = 0x00000001 # type: ignore
A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK = 0x0000000e # type: ignore
A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT = 1 # type: ignore
A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF = 0x00000010 # type: ignore
A6XX_TEX_SAMP_1_UNNORM_COORDS = 0x00000020 # type: ignore
A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR = 0x00000040 # type: ignore
A6XX_TEX_SAMP_1_MAX_LOD__MASK = 0x000fff00 # type: ignore
A6XX_TEX_SAMP_1_MAX_LOD__SHIFT = 8 # type: ignore
A6XX_TEX_SAMP_1_MIN_LOD__MASK = 0xfff00000 # type: ignore
A6XX_TEX_SAMP_1_MIN_LOD__SHIFT = 20 # type: ignore
REG_A6XX_TEX_SAMP_2 = 0x00000002 # type: ignore
A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK = 0x00000003 # type: ignore
A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT = 0 # type: ignore
A6XX_TEX_SAMP_2_FASTBORDERCOLOR__MASK = 0x0000000c # type: ignore
A6XX_TEX_SAMP_2_FASTBORDERCOLOR__SHIFT = 2 # type: ignore
A6XX_TEX_SAMP_2_FASTBORDERCOLOREN = 0x00000010 # type: ignore
A6XX_TEX_SAMP_2_CHROMA_LINEAR = 0x00000020 # type: ignore
A6XX_TEX_SAMP_2_BCOLOR__MASK = 0xffffff80 # type: ignore
A6XX_TEX_SAMP_2_BCOLOR__SHIFT = 7 # type: ignore
REG_A6XX_TEX_SAMP_3 = 0x00000003 # type: ignore
REG_A6XX_TEX_CONST_0 = 0x00000000 # type: ignore
A6XX_TEX_CONST_0_TILE_MODE__MASK = 0x00000003 # type: ignore
A6XX_TEX_CONST_0_TILE_MODE__SHIFT = 0 # type: ignore
A6XX_TEX_CONST_0_SRGB = 0x00000004 # type: ignore
A6XX_TEX_CONST_0_SWIZ_X__MASK = 0x00000070 # type: ignore
A6XX_TEX_CONST_0_SWIZ_X__SHIFT = 4 # type: ignore
A6XX_TEX_CONST_0_SWIZ_Y__MASK = 0x00000380 # type: ignore
A6XX_TEX_CONST_0_SWIZ_Y__SHIFT = 7 # type: ignore
A6XX_TEX_CONST_0_SWIZ_Z__MASK = 0x00001c00 # type: ignore
A6XX_TEX_CONST_0_SWIZ_Z__SHIFT = 10 # type: ignore
A6XX_TEX_CONST_0_SWIZ_W__MASK = 0x0000e000 # type: ignore
A6XX_TEX_CONST_0_SWIZ_W__SHIFT = 13 # type: ignore
A6XX_TEX_CONST_0_MIPLVLS__MASK = 0x000f0000 # type: ignore
A6XX_TEX_CONST_0_MIPLVLS__SHIFT = 16 # type: ignore
A6XX_TEX_CONST_0_CHROMA_MIDPOINT_X = 0x00010000 # type: ignore
A6XX_TEX_CONST_0_CHROMA_MIDPOINT_Y = 0x00040000 # type: ignore
A6XX_TEX_CONST_0_SAMPLES__MASK = 0x00300000 # type: ignore
A6XX_TEX_CONST_0_SAMPLES__SHIFT = 20 # type: ignore
A6XX_TEX_CONST_0_FMT__MASK = 0x3fc00000 # type: ignore
A6XX_TEX_CONST_0_FMT__SHIFT = 22 # type: ignore
A6XX_TEX_CONST_0_SWAP__MASK = 0xc0000000 # type: ignore
A6XX_TEX_CONST_0_SWAP__SHIFT = 30 # type: ignore
REG_A6XX_TEX_CONST_1 = 0x00000001 # type: ignore
A6XX_TEX_CONST_1_WIDTH__MASK = 0x00007fff # type: ignore
A6XX_TEX_CONST_1_WIDTH__SHIFT = 0 # type: ignore
A6XX_TEX_CONST_1_HEIGHT__MASK = 0x3fff8000 # type: ignore
A6XX_TEX_CONST_1_HEIGHT__SHIFT = 15 # type: ignore
A6XX_TEX_CONST_1_MUTABLEEN = 0x80000000 # type: ignore
REG_A6XX_TEX_CONST_2 = 0x00000002 # type: ignore
A6XX_TEX_CONST_2_STRUCTSIZETEXELS__MASK = 0x0000fff0 # type: ignore
A6XX_TEX_CONST_2_STRUCTSIZETEXELS__SHIFT = 4 # type: ignore
A6XX_TEX_CONST_2_STARTOFFSETTEXELS__MASK = 0x003f0000 # type: ignore
A6XX_TEX_CONST_2_STARTOFFSETTEXELS__SHIFT = 16 # type: ignore
A6XX_TEX_CONST_2_PITCHALIGN__MASK = 0x0000000f # type: ignore
A6XX_TEX_CONST_2_PITCHALIGN__SHIFT = 0 # type: ignore
A6XX_TEX_CONST_2_PITCH__MASK = 0x1fffff80 # type: ignore
A6XX_TEX_CONST_2_PITCH__SHIFT = 7 # type: ignore
A6XX_TEX_CONST_2_TYPE__MASK = 0xe0000000 # type: ignore
A6XX_TEX_CONST_2_TYPE__SHIFT = 29 # type: ignore
REG_A6XX_TEX_CONST_3 = 0x00000003 # type: ignore
A6XX_TEX_CONST_3_ARRAY_PITCH__MASK = 0x007fffff # type: ignore
A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT = 0 # type: ignore
A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK = 0x07800000 # type: ignore
A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT = 23 # type: ignore
A6XX_TEX_CONST_3_TILE_ALL = 0x08000000 # type: ignore
A6XX_TEX_CONST_3_FLAG = 0x10000000 # type: ignore
REG_A6XX_TEX_CONST_4 = 0x00000004 # type: ignore
A6XX_TEX_CONST_4_BASE_LO__MASK = 0xffffffe0 # type: ignore
A6XX_TEX_CONST_4_BASE_LO__SHIFT = 5 # type: ignore
REG_A6XX_TEX_CONST_5 = 0x00000005 # type: ignore
A6XX_TEX_CONST_5_BASE_HI__MASK = 0x0001ffff # type: ignore
A6XX_TEX_CONST_5_BASE_HI__SHIFT = 0 # type: ignore
A6XX_TEX_CONST_5_DEPTH__MASK = 0x3ffe0000 # type: ignore
A6XX_TEX_CONST_5_DEPTH__SHIFT = 17 # type: ignore
REG_A6XX_TEX_CONST_6 = 0x00000006 # type: ignore
A6XX_TEX_CONST_6_MIN_LOD_CLAMP__MASK = 0x00000fff # type: ignore
A6XX_TEX_CONST_6_MIN_LOD_CLAMP__SHIFT = 0 # type: ignore
A6XX_TEX_CONST_6_PLANE_PITCH__MASK = 0xffffff00 # type: ignore
A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT = 8 # type: ignore
REG_A6XX_TEX_CONST_7 = 0x00000007 # type: ignore
A6XX_TEX_CONST_7_FLAG_LO__MASK = 0xffffffe0 # type: ignore
A6XX_TEX_CONST_7_FLAG_LO__SHIFT = 5 # type: ignore
REG_A6XX_TEX_CONST_8 = 0x00000008 # type: ignore
A6XX_TEX_CONST_8_FLAG_HI__MASK = 0x0001ffff # type: ignore
A6XX_TEX_CONST_8_FLAG_HI__SHIFT = 0 # type: ignore
REG_A6XX_TEX_CONST_9 = 0x00000009 # type: ignore
A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK = 0x0001ffff # type: ignore
A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT = 0 # type: ignore
REG_A6XX_TEX_CONST_10 = 0x0000000a # type: ignore
A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK = 0x0000007f # type: ignore
A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT = 0 # type: ignore
A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK = 0x00000f00 # type: ignore
A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT = 8 # type: ignore
A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK = 0x0000f000 # type: ignore
A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT = 12 # type: ignore
REG_A6XX_TEX_CONST_11 = 0x0000000b # type: ignore
REG_A6XX_TEX_CONST_12 = 0x0000000c # type: ignore
REG_A6XX_TEX_CONST_13 = 0x0000000d # type: ignore
REG_A6XX_TEX_CONST_14 = 0x0000000e # type: ignore
REG_A6XX_TEX_CONST_15 = 0x0000000f # type: ignore
REG_A6XX_UBO_0 = 0x00000000 # type: ignore
A6XX_UBO_0_BASE_LO__MASK = 0xffffffff # type: ignore
A6XX_UBO_0_BASE_LO__SHIFT = 0 # type: ignore
REG_A6XX_UBO_1 = 0x00000001 # type: ignore
A6XX_UBO_1_BASE_HI__MASK = 0x0001ffff # type: ignore
A6XX_UBO_1_BASE_HI__SHIFT = 0 # type: ignore
A6XX_UBO_1_SIZE__MASK = 0xfffe0000 # type: ignore
A6XX_UBO_1_SIZE__SHIFT = 17 # type: ignore
lvp_nir_options = gzip.decompress(base64.b64decode("H4sIAAAAAAAAA2NgZGRkYGAAkYxgCsQFsxigwgwQBoxmhCqFq2WEKwIrAEGIkQxoAEMALwCqVsCiGUwLMHA0QPn29nBJkswHANb8YpH4AAAA")) | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/mesa.py",
"license": "MIT License",
"lines": 14219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/support/compiler_mesa.py | import base64, ctypes, pathlib, tempfile, hashlib
from tinygrad.device import Compiler
from tinygrad.helpers import cpu_objdump, system, data64
from tinygrad.runtime.autogen import mesa, llvm
from tinygrad.runtime.support.compiler_cpu import CPULLVMCompiler, expect, cerr
# NB: compilers assume mesa's glsl type cache is managed externally with mesa.glsl_type_singleton_init_or_ref() and mesa.glsl_type_singleton_decref()
def rzalloc(typ, ctx=None, **kwargs):
s = ctypes.cast(mesa.rzalloc_size(ctypes.cast(ctx, ctypes.c_void_p), ctypes.sizeof(typ)), ctypes.POINTER(typ))
for k,v in kwargs.items(): setattr(s.contents, k, v)
return s
def deserialize(enc_src, opts):
blobreader = mesa.struct_blob_reader()
mesa.blob_reader_init(blobreader, src:=base64.b64decode(enc_src), len(src))
return mesa.nir_deserialize(None, ctypes.cast(opts, ctypes.POINTER(mesa.nir_shader_compiler_options)), blobreader)
class LVPCompiler(CPULLVMCompiler):
def __init__(self, cache_key="lvp"): CPULLVMCompiler.__init__(self, cache_key=f"compile_{cache_key}")
def compile(self, src) -> bytes:
shader, ctx = deserialize(src, mesa.lvp_nir_options), llvm.LLVMGetGlobalContext()
gallivm = mesa.gallivm_create(None, mesa.lp_context_ref(ctypes.cast(ctx, ctypes.POINTER(mesa.struct_LLVMOpaqueContext)), True), None).contents
module, builder = ctypes.cast(gallivm.module, llvm.LLVMModuleRef), ctypes.cast(gallivm.builder, llvm.LLVMBuilderRef)
params = mesa.struct_lp_build_tgsi_params(mesa.struct_lp_type(floating=True, sign=True, width=32, length=4),
resources_type=mesa.lp_build_jit_resources_type(gallivm), mask=ctypes.pointer(mesa.struct_lp_build_mask_context()))
pt = llvm.LLVMPointerType(ctypes.cast(params.resources_type, llvm.LLVMTypeRef), 0)
fn = llvm.LLVMAddFunction(module, shader.contents.info.name, llvm.LLVMFunctionType(llvm.LLVMVoidTypeInContext(ctx), pt, 1, 0))
llvm.LLVMPositionBuilderAtEnd(builder, llvm.LLVMAppendBasicBlockInContext(ctx, fn, b"entry"))
params.consts_ptr = mesa.lp_build_struct_get_ptr2(gallivm, params.resources_type,
ctypes.cast(llvm.LLVMGetParam(fn, 0), mesa.LLVMValueRef), mesa.LP_JIT_RES_CONSTANTS, b"constants")
mesa.lp_build_mask_begin(params.mask, gallivm, params.type, mesa.lp_build_one(gallivm, params.type))
mesa.lp_build_mask_end(params.mask)
mesa.lp_build_nir_soa(gallivm, shader, params, None)
llvm.LLVMBuildRetVoid(builder)
mesa.gallivm_verify_function(gallivm, ctypes.cast(fn, mesa.LLVMValueRef))
mesa.lp_passmgr_run(gallivm.passmgr, gallivm.module, ctypes.cast(self.target_machine, mesa.LLVMTargetMachineRef), gallivm.module_name)
obj_buf = expect(llvm.LLVMTargetMachineEmitToMemoryBuffer(self.target_machine, module, llvm.LLVMObjectFile, err:=cerr(),
ctypes.pointer(buf:=llvm.LLVMMemoryBufferRef())), err, buf)
obj = ctypes.string_at(llvm.LLVMGetBufferStart(obj_buf), llvm.LLVMGetBufferSize(obj_buf))
mesa.gallivm_destroy(gallivm)
mesa.ralloc_free(shader)
return obj
def disassemble(self, lib: bytes): cpu_objdump(lib)
class NAKCompiler(Compiler):
def __init__(self, arch, warps_per_sm, cache_key="nak"):
self.arch, self.warps_per_sm = arch, warps_per_sm
self.cc = mesa.nak_compiler_create(mesa.struct_nv_device_info(sm=int(arch[3:]), max_warps_per_mp=warps_per_sm))
self.nir_options = bytes(mesa.nak_nir_options(self.cc).contents)
super().__init__(f"compile_{cache_key}_{arch}")
def __del__(self): mesa.nak_compiler_destroy(self.cc)
def __reduce__(self): return NAKCompiler, (self.arch, self.warps_per_sm)
def compile(self, src) -> bytes:
shader = deserialize(src, self.nir_options)
mesa.nak_preprocess_nir(shader, self.cc)
ret = bytes((out:=mesa.nak_compile_shader(shader, False, self.cc, 0, None).contents).info) + ctypes.string_at(out.code, out.code_size)
mesa.nak_shader_bin_destroy(out)
mesa.ralloc_free(shader)
return ret
def disassemble(self, lib: bytes):
try:
fn = (pathlib.Path(tempfile.gettempdir()) / f"tinynak_{hashlib.md5(lib).hexdigest()}").as_posix()
with open(fn, "wb") as f: f.write(lib[ctypes.sizeof(mesa.struct_nak_shader_info):])
print(system(f"nvdisasm -b SM{self.arch[3:]} {fn}"))
except Exception as e: print("Failed to generate SASS", str(e), "Make sure your PATH contains nvdisasm binary of compatible version.")
def disas_adreno(lib:bytes, gpu_id=630):
with tempfile.TemporaryFile('w+', buffering=1) as tf:
@ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_uint32, ctypes.c_void_p)
def hd(data, n, instr):
fst, snd = data64(ctypes.cast(instr, ctypes.POINTER(ctypes.c_uint64)).contents.value)
print(f"{n:04} [{fst:08x}_{snd:08x}] ", end="", flush=True, file=tf)
ctypes.CDLL(None).setlinebuf(fp:=ctypes.cast(ctypes.CDLL(None).fdopen(tf.fileno(), b"w"), ctypes.POINTER(mesa.struct__IO_FILE)))
mesa.ir3_isa_disasm(lib, len(lib), fp, mesa.struct_isa_decode_options(gpu_id, True, 0, True, pre_instr_cb=hd))
tf.seek(0)
print(tf.read())
class IR3Compiler(Compiler):
def __init__(self, chip_id, cache_key="ir3"):
self.dev_id = mesa.struct_fd_dev_id(((chip_id >> 24) & 0xFF) * 100 + ((chip_id >> 16) & 0xFF) * 10 + ((chip_id >> 8) & 0xFF), chip_id)
self.cc = mesa.ir3_compiler_create(None, self.dev_id, mesa.fd_dev_info(self.dev_id),
mesa.struct_ir3_compiler_options(disable_cache=True)).contents
self.cc.has_preamble = False
self.nir_options = bytes(mesa.ir3_get_compiler_options(self.cc).contents)
super().__init__(f"compile_{cache_key}")
def __del__(self): mesa.ir3_compiler_destroy(self.cc)
def __reduce__(self): return IR3Compiler, (self.dev_id.chip_id,)
# ir3_shader_variant info: https://elixir.bootlin.com/mesa/mesa-25.3.0/source/src/freedreno/ir3/ir3_shader.c#L1099
def compile(self, src) -> bytes:
nir_shader = deserialize(src, self.nir_options)
mesa.ir3_nir_lower_io_vars_to_temporaries(nir_shader)
mesa.ir3_finalize_nir(self.cc, mesa.struct_ir3_shader_nir_options(), nir_shader)
shader = rzalloc(mesa.struct_ir3_shader, compiler=ctypes.pointer(self.cc), type=mesa.MESA_SHADER_COMPUTE, nir=nir_shader).contents
mesa.ir3_nir_post_finalize(shader)
v = rzalloc(mesa.struct_ir3_shader_variant, type=shader.type, compiler=ctypes.pointer(self.cc), key=mesa.struct_ir3_shader_key()).contents
v.const_state, shader.variants, shader.variant_count = rzalloc(mesa.struct_ir3_const_state, ctypes.pointer(v)), ctypes.pointer(v), 1
v.num_uavs = (info:=nir_shader.contents.info).num_ssbos + info.num_images
assert not mesa.ir3_compile_shader_nir(self.cc, shader, v), "compilation failed"
lib = ctypes.cast(mesa.ir3_shader_assemble(v), ctypes.POINTER(ctypes.c_uint32))
# NB: bytes(v) means the pointers in v are no longer safe! a custom __reduce__ that supports pointers for c.Struct would make this simpler
ret = bytes(v) + bytes(v.const_state.contents) + ctypes.string_at(v.imm_state.values, v.imm_state.count * 4) + ctypes.string_at(lib, v.info.size)
mesa.ralloc_free(ctypes.pointer(v))
return ret
@staticmethod
def unpack_lib(lib: bytes) -> tuple[mesa.struct_ir3_shader_variant, mesa.struct_ir3_const_state, bytes, bytes]:
shifted = lib[ctypes.sizeof(v:=mesa.struct_ir3_shader_variant.from_buffer_copy(lib)):]
shifted = shifted[ctypes.sizeof(cs:=mesa.struct_ir3_const_state.from_buffer_copy(shifted)):]
return v, cs, shifted[:v.imm_state.count * 4], shifted[v.imm_state.count * 4:]
def disassemble(self, lib: bytes): disas_adreno(self.unpack_lib(lib)[3], self.dev_id.gpu_id)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/support/compiler_mesa.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/tinyfs/fetch_raid.py | import json, multiprocessing, functools
from pathlib import Path
from tinygrad.tensor import Tensor
from tinygrad.helpers import tqdm, getenv
raid_root = Path(getenv("RAID_ROOT", "/raid"))
def fetch_file(item):
path, info = item
h, size = info["hash"], info["size"]
path = raid_root / Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
try:
pt = Tensor(bytes.fromhex(h), device="CPU").fs_load(size).to(f"disk:{path.as_posix()}").realize()
except Exception as e:
print(f"error fetching {path}, {h}, {size}: {e}")
raise
pt.uop.buffer.deallocate()
def fetch_mapping(h, l):
mapping_tensor = Tensor(bytes.fromhex(h)).fs_load(l).realize()
mapping = mapping_tensor.data().tobytes().decode()
mapping = json.loads(mapping)
mapped_files = mapping.items()
return list(mapped_files)
if __name__ == "__main__":
h, l = getenv("HASH", "d734f5e3be9f1e9d863bfaa4fc6c1ef2"), getenv("LENGTH", 175866113)
with multiprocessing.Pool(processes=1) as pool:
mapped_files = pool.apply(functools.partial(fetch_mapping, h, l))
print(f"fetched mapping for {len(mapped_files)} files")
with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
for _ in tqdm(pool.imap_unordered(fetch_file, mapped_files), total=len(mapped_files)):
pass
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/tinyfs/fetch_raid.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/tinyfs/upload_raid.py | from pathlib import Path
import multiprocessing, json
from tinygrad.tensor import Tensor
from tinygrad.helpers import tqdm
raid_root = Path("/raid")
def upload_file(path: Path):
pt = Tensor(path).realize()
h = pt.fs_store().realize()
pt.uop.realized.deallocate()
return h.data().hex(), path, pt.nbytes()
if __name__ == "__main__":
raid_files = sorted([p for p in raid_root.rglob("*") if p.is_file()])
print(f"found {len(raid_files)} files in /raid")
mapping = {}
with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
for h, p, s in tqdm(pool.imap_unordered(upload_file, raid_files), total=len(raid_files)):
mapping[p.relative_to(raid_root).as_posix()] = {"hash": h, "size": s}
# sort the mapping by key
mapping = dict(sorted(mapping.items()))
mapping = json.dumps(mapping).encode()
mapping_tensor = Tensor(mapping, device="CPU")
h = mapping_tensor.fs_store().realize()
print(f"final hash: {h.data().hex()}, size: {len(mapping)}")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/tinyfs/upload_raid.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/external/external_test_dev_var.py | import subprocess, unittest, os, sys
from tinygrad.device import Device
class TestTinygradSlow(unittest.TestCase):
def test_env_overwrite_default_device(self):
subprocess.run([f'{Device.DEFAULT}=1 python3 -c "from tinygrad import Device; assert Device.DEFAULT == \\"{Device.DEFAULT}\\""'],
shell=True, check=True)
subprocess.run([f'DISK=1 {Device.DEFAULT}=1 python3 -c "from tinygrad import Device; assert Device.DEFAULT == \\"{Device.DEFAULT}\\""'],
shell=True, check=True)
subprocess.run([f'NPY=1 {Device.DEFAULT}=1 python3 -c "from tinygrad import Device; assert Device.DEFAULT == \\"{Device.DEFAULT}\\""'],
shell=True, check=True)
if Device.DEFAULT != "CPU":
# setting multiple devices fail
with self.assertRaises(subprocess.CalledProcessError):
subprocess.run([f'{Device.DEFAULT}=1 CPU=1 python3 -c "from tinygrad import Device; assert Device.DEFAULT == \\"{Device.DEFAULT}\\""'],
shell=True, check=True)
# setting device via DEV
subprocess.run([f'DEV={Device.DEFAULT.capitalize()} python3 -c "from tinygrad import Device; assert Device.DEFAULT == \\"{Device.DEFAULT}\\""'],
shell=True, check=True)
subprocess.run([f'DEV={Device.DEFAULT.lower()} python3 -c "from tinygrad import Device; assert Device.DEFAULT == \\"{Device.DEFAULT}\\""'],
shell=True, check=True)
subprocess.run([f'DEV={Device.DEFAULT.upper()} python3 -c "from tinygrad import Device; assert Device.DEFAULT == \\"{Device.DEFAULT}\\""'],
shell=True, check=True)
with self.assertRaises(subprocess.CalledProcessError):
subprocess.run([f'DEV={Device.DEFAULT} CPU=1 python3 -c "from tinygrad import Device; assert Device.DEFAULT == \\"{Device.DEFAULT}\\""'],
shell=True, check=True)
class TestRunAsModule(unittest.TestCase):
def test_module_runs(self):
p = subprocess.run([sys.executable, "-m", "tinygrad.device"],stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env={**os.environ, "DEBUG": "1"}, timeout=40,)
out = (p.stdout + p.stderr).decode()
self.assertEqual(p.returncode, 0, msg=out)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_dev_var.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/runtime/ops_tinyfs.py | import socket, json, asyncio, threading, math
from contextlib import asynccontextmanager
from tinygrad.device import Compiled, Allocator
from tinygrad.helpers import DEBUG, getenv
from tinygrad import Tensor
TINYFS_ENDPOINT = getenv("TINYFS_ENDPOINT", "localhost:6767")
TINYFS_TIMEOUT = getenv("TINYFS_TIMEOUT", 60)
class TinyFSDevice(Compiled):
def __init__(self, device:str):
self.op = device[len("tinyfs:"):].upper()
super().__init__(device, TinyFSAllocator(self), None, None, None)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((TINYFS_ENDPOINT.rsplit(":", 1)[0], int(TINYFS_ENDPOINT.rsplit(":", 1)[1])))
self.sock.settimeout(TINYFS_TIMEOUT)
self.sfile = self.sock.makefile("rwb")
# fetch node info
self.sfile.write(b"INFO\r\n")
self.sfile.flush()
info = self.sfile.readline()
self.node_info = json.loads(info)
if DEBUG >= 2: print(f"nodes: {self.node_info}")
# spawn thread for async copyout
self.start_event = threading.Event()
self.t = threading.Thread(target=self._start_thread, daemon=True)
self.t.start()
self.start_event.wait()
# connection pools
self.conn_pools: dict[str, asyncio.Queue] = {}
self.conn_pools_lock = asyncio.Lock()
def finalize(self):
self.sfile.close()
for pool in self.conn_pools.values():
while not pool.empty():
_, w = pool.get_nowait()
w.close()
asyncio.run_coroutine_threadsafe(w.wait_closed(), self.loop).result()
if hasattr(self, "loop"):
self.loop.call_soon_threadsafe(self.loop.stop)
self.t.join()
def _start_thread(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.start_event.set()
self.loop.run_forever()
self.loop.close()
@asynccontextmanager
async def connection(self, loc):
if loc not in self.conn_pools:
await self.conn_pools_lock.acquire()
if loc not in self.conn_pools:
self.conn_pools[loc] = asyncio.Queue(nw:=getenv("ASYNC_COPY_WORKERS", 4))
conn_tasks = [asyncio.open_connection(*self.node_info[loc][-1].rsplit(":", 1)) for _ in range(nw)]
connections = await asyncio.gather(*conn_tasks)
for reader, writer in connections: self.conn_pools[loc].put_nowait((reader, writer))
self.conn_pools_lock.release()
reader, writer = await self.conn_pools[loc].get()
try:
yield reader, writer
finally:
await self.conn_pools[loc].put((reader, writer))
class TinyFSBuffer:
def __init__(self, device:TinyFSDevice, size:int, offset=0, copyout_queue=None, hash_buf=None):
self.device, self.size, self.offset = device, size, offset
self.copyout_queue = copyout_queue or []
self.hash_buf = hash_buf or bytearray()
def __repr__(self): return f"<TinyFSBuffer size={self.size} offset={self.offset}>"
class TinyFSAllocator(Allocator[TinyFSDevice]):
def _alloc(self, size, options):
return TinyFSBuffer(self.dev, size)
def _copyin(self, dest:TinyFSBuffer, src:memoryview):
if DEBUG >= 2: print(f"Copying in {dest.size} bytes to TINYFS:{dest.device.op}")
self.dev.sfile.write(f"{dest.device.op}_IN {dest.size}\r\n".encode())
self.dev.sfile.write(src)
self.dev.sfile.flush()
if dest.device.op == "LOAD":
locs = self.dev.sfile.readline()
dest.copyout_queue = json.loads(locs)
dest.hash_buf = src.tobytes()
elif dest.device.op == "STORE":
expected_hashes = math.ceil(dest.size / Tensor.CHUNK_SIZE)
dest.hash_buf = bytearray(expected_hashes * 16)
self.dev.sfile.readinto(dest.hash_buf)
def _copyout(self, dest:memoryview, src:TinyFSBuffer):
if DEBUG >= 2: print(f"Copying out {src.size} bytes from TINYFS:{src.device.op}")
if src.device.op == "LOAD":
asyncio.run_coroutine_threadsafe(self._copyout_async(dest, src), src.device.loop).result()
elif src.device.op == "STORE":
dest[:] = src.hash_buf
async def _copyout_async(self, dest:memoryview, src:TinyFSBuffer):
async def _worker(i, loc):
async with self.dev.connection(loc) as (reader, writer):
ptr = i * Tensor.CHUNK_SIZE
size = min(len(dest[ptr:ptr+Tensor.CHUNK_SIZE]), Tensor.CHUNK_SIZE)
writer.write(f"CHUNK_OUT {size}\r\n".encode())
writer.write(src.hash_buf[i*16:(i+1)*16])
await asyncio.wait_for(writer.drain(), timeout=TINYFS_TIMEOUT)
chunk = await asyncio.wait_for(reader.readexactly(size), timeout=TINYFS_TIMEOUT)
view = dest[ptr:ptr+len(chunk)]
view[:] = chunk
del view
workers = [asyncio.create_task(_worker(i, loc)) for i, loc in enumerate(src.copyout_queue)]
await asyncio.gather(*workers)
def _offset(self, buf:TinyFSBuffer, size:int, offset:int):
return TinyFSBuffer(buf.device, size, offset, buf.copyout_queue, buf.hash_buf)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/ops_tinyfs.py",
"license": "MIT License",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/sqtt/roc.py | #!/usr/bin/env python3
import ctypes, pathlib, argparse, pickle, dataclasses, threading
from typing import Generator
from tinygrad.helpers import temp, unwrap, DEBUG
from tinygrad.runtime.ops_amd import ProfileSQTTEvent
from tinygrad.runtime.autogen import rocprof
from tinygrad.renderer.amd.dsl import Inst
from test.amd.disasm import disasm
@dataclasses.dataclass(frozen=True)
class InstExec:
typ:str
pc:int
stall:int
dur:int
time:int
@dataclasses.dataclass(frozen=True)
class WaveSlot:
wave_id:int
cu:int
simd:int
se:int
@property
def cu_loc(self) -> str: return f"SE:{self.se} CU:{self.cu}"
@property
def wave_loc(self) -> str: return f"{self.cu_loc} SIMD:{self.simd} W:{self.wave_id}"
@dataclasses.dataclass(frozen=True)
class WaveExec(WaveSlot):
begin_time:int
end_time:int
insts:bytearray
def unpack_insts(self) -> Generator[InstExec, None, None]:
sz = ctypes.sizeof(struct:=rocprof.rocprofiler_thread_trace_decoder_inst_t)
insts_array = (struct*(len(self.insts)//sz)).from_buffer(self.insts)
for inst in insts_array:
inst_typ = rocprof.enum_rocprofiler_thread_trace_decoder_inst_category_t.get(inst.category)
yield InstExec(inst_typ, inst.pc.address, inst.stall, inst.duration, inst.time)
@dataclasses.dataclass(frozen=True)
class OccEvent(WaveSlot):
time:int
start:int
RunKey = tuple[str, int]
class _ROCParseCtx:
def __init__(self, sqtt_evs:list[ProfileSQTTEvent], disasms:dict[str, dict[int, Inst]]):
self.sqtt_evs, self.disasms = iter(sqtt_evs), {k:{k2:(disasm(v2), v2.size()) for k2,v2 in v.items()} for k,v in disasms.items()}
self.inst_execs:dict[RunKey, list[WaveExec]] = {}
self.occ_events:dict[RunKey, list[OccEvent]] = {}
def next_sqtt(self):
x = next(self.sqtt_evs, None)
self.active_run = (x.kern, x.exec_tag) if x is not None else None
self.active_se = x.se if x is not None else None
self.active_blob = (ctypes.c_ubyte * len(x.blob)).from_buffer_copy(x.blob) if x is not None else None
return self.active_blob
def on_occupancy_ev(self, ev:rocprof.rocprofiler_thread_trace_decoder_occupancy_t):
if DEBUG >= 5: print(f"OCC {ev.time=} {self.active_se=} {ev.cu=} {ev.simd=} {ev.wave_id=} {ev.start=}")
self.occ_events.setdefault(unwrap(self.active_run), []).append(OccEvent(ev.wave_id, ev.cu, ev.simd, unwrap(self.active_se), ev.time, ev.start))
def on_wave_ev(self, ev:rocprof.rocprofiler_thread_trace_decoder_wave_t):
if DEBUG >= 5: print(f"WAVE {ev.wave_id=} {self.active_se=} {ev.cu=} {ev.simd=} {ev.contexts=} {ev.begin_time=} {ev.end_time=}")
# Skip wave events without instruction timings, occupancy events give the start and duration.
if ev.instructions_size == 0: return
insts_blob = bytearray(sz:=ev.instructions_size * ctypes.sizeof(rocprof.rocprofiler_thread_trace_decoder_inst_t))
ctypes.memmove((ctypes.c_char * sz).from_buffer(insts_blob), ev.instructions_array, sz)
self.inst_execs.setdefault(unwrap(self.active_run), []).append(WaveExec(ev.wave_id, ev.cu, ev.simd, unwrap(self.active_se), ev.begin_time,
ev.end_time, insts_blob))
def decode(sqtt_evs:list[ProfileSQTTEvent], disasms:dict[str, dict[int, Inst]]) -> _ROCParseCtx:
ROCParseCtx = _ROCParseCtx(sqtt_evs, disasms)
@rocprof.rocprof_trace_decoder_se_data_callback_t
def copy_cb(buf, buf_size, _):
if (prof_info:=ROCParseCtx.next_sqtt()) is None: return 0
buf[0] = ctypes.cast(prof_info, ctypes.POINTER(ctypes.c_ubyte))
buf_size[0] = len(prof_info)
return len(prof_info)
@rocprof.rocprof_trace_decoder_trace_callback_t
def trace_cb(record_type, events_ptr, n, _):
match record_type:
case rocprof.ROCPROFILER_THREAD_TRACE_DECODER_RECORD_OCCUPANCY:
for ev in (rocprof.rocprofiler_thread_trace_decoder_occupancy_t * n).from_address(events_ptr): ROCParseCtx.on_occupancy_ev(ev)
case rocprof.ROCPROFILER_THREAD_TRACE_DECODER_RECORD_WAVE:
for ev in (rocprof.rocprofiler_thread_trace_decoder_wave_t * n).from_address(events_ptr): ROCParseCtx.on_wave_ev(ev)
case rocprof.ROCPROFILER_THREAD_TRACE_DECODER_RECORD_REALTIME:
if DEBUG >= 5:
pairs = [(ev.shader_clock, ev.realtime_clock) for ev in (rocprof.rocprofiler_thread_trace_decoder_realtime_t * n).from_address(events_ptr)]
print(f"REALTIME {pairs}")
case _:
if DEBUG >= 5: print(rocprof.enum_rocprofiler_thread_trace_decoder_record_type_t.get(record_type), events_ptr, n)
return rocprof.ROCPROFILER_THREAD_TRACE_DECODER_STATUS_SUCCESS
@rocprof.rocprof_trace_decoder_isa_callback_t
def isa_cb(instr_ptr, mem_size_ptr, size_ptr, pc, _):
instr, mem_size_ptr[0] = ROCParseCtx.disasms[unwrap(ROCParseCtx.active_run)[0]][pc.address]
# this is the number of bytes to next instruction, set to 0 for end_pgm
if instr == "s_endpgm": mem_size_ptr[0] = 0
if (max_sz:=size_ptr[0]) == 0: return rocprof.ROCPROFILER_THREAD_TRACE_DECODER_STATUS_ERROR_OUT_OF_RESOURCES
# truncate the instr if it doesn't fit
if (str_sz:=len(instr_bytes:=instr.encode()))+1 > max_sz: str_sz = max_sz
ctypes.memmove(instr_ptr, instr_bytes, str_sz)
size_ptr[0] = str_sz
return rocprof.ROCPROFILER_THREAD_TRACE_DECODER_STATUS_SUCCESS
exc:Exception|None = None
def worker():
nonlocal exc
try: rocprof.rocprof_trace_decoder_parse_data(copy_cb, trace_cb, isa_cb, None)
except AttributeError as e:
exc = RuntimeError("Failed to find rocprof-trace-decoder. Run sudo ./extra/sqtt/install_rocprof_decoder.py to install")
exc.__cause__ = e
(t:=threading.Thread(target=worker, daemon=True)).start()
t.join()
if exc is not None:
raise exc
return ROCParseCtx
def print_data(data:dict) -> None:
from tabulate import tabulate
# plaintext
if "src" in data: print(data["src"])
# table format
elif "cols" in data:
print(tabulate([r[:len(data["cols"])] for r in data["rows"]], headers=data["cols"], tablefmt="github"))
def main() -> None:
import tinygrad.viz.serve as viz
viz.ctxs = []
parser = argparse.ArgumentParser()
parser.add_argument('--profile', type=pathlib.Path, metavar="PATH", help='Path to profile (optional file, default: latest profile)',
default=pathlib.Path(temp("profile.pkl", append_user=True)))
parser.add_argument('--kernel', type=str, default=None, metavar="NAME", help='Kernel to focus on (optional name, default: all kernels)')
parser.add_argument('-n', type=int, default=3, metavar="NUM", help='Max traces to print (optional number, default: 3 traces)')
args = parser.parse_args()
with args.profile.open("rb") as f: profile = pickle.load(f)
viz.get_profile(profile)
# List all kernels
if args.kernel is None:
for c in viz.ctxs:
print(c["name"])
for s in c["steps"]: print(" "+s["name"])
return None
# Find kernel trace
trace = next((c for c in viz.ctxs if c["name"] == f"Exec {args.kernel}"), None)
if not trace: raise RuntimeError(f"no matching trace for {args.kernel}")
n = 0
for s in trace["steps"]:
if "PKTS" in s["name"]: continue
print(s["name"])
data = viz.get_render(s["query"])
print_data(data)
n += 1
if n > args.n: break
if __name__ == "__main__":
main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/sqtt/roc.py",
"license": "MIT License",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/weekly_commits_table.py | # extra/weekly_commits_table.py
import os, subprocess, datetime as dt
NAMES = ["chenyu","George Hotz","nimlgen","qazal","wozeparrot","Christopher Milan"]
REPO = os.environ.get("REPO_PATH",".")
today = dt.date.today()
days = [(today - dt.timedelta(i)).strftime("%Y-%m-%d") for i in range(6,-1,-1)]
seen = {d:{n:False for n in NAMES} for d in days}
cmd = ["git","-C",REPO,"log","--use-mailmap","--since=7 days ago","--no-merges",
"--date=short","--pretty=%ad%x09%aN%x09%ae"]
out = subprocess.run(cmd, capture_output=True, text=True).stdout.splitlines()
for line in out:
try: d, name, email = line.split("\t")
except: continue
if d in seen:
low = (name+" "+email).lower()
for n in NAMES:
if n.lower() in low: seen[d][n] = True
# --- width-aware padding so emoji align ---
try:
from wcwidth import wcswidth as _wcswidth
vlen = lambda s: _wcswidth(s)
except Exception:
vlen = lambda s: sum(2 if ch in "β
β" else 1 for ch in s)
pad = lambda s,w: s + " " * max(0, w - vlen(s))
w_date = 10
w_cols = [max(3, vlen(n)) for n in NAMES]
header = " | ".join([pad("date", w_date)] + [pad(n, w_cols[i]) for i,n in enumerate(NAMES)])
rule = "-+-".join(["-"*w_date] + ["-"*w for w in w_cols])
rows=[]
for d in days:
cells = ["β
" if seen[d][n] else "β" for n in NAMES]
rows.append(" | ".join([pad(d, w_date)] + [pad(c, w_cols[i]) for i,c in enumerate(cells)]))
print("** Commits by day (last 7) **")
print("```")
print("\n".join([header, rule] + rows))
print("```")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/weekly_commits_table.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/schedule/indexing.py | from typing import Iterator
import functools, itertools
from dataclasses import dataclass, field
from tinygrad.dtype import dtypes, AddrSpace
from tinygrad.uop.ops import PatternMatcher, UPat, Ops, UOp, resolve, GroupOp, graph_rewrite, sint, AxisType, profile_matches
from tinygrad.uop.ops import consumer_map_from_toposort, gate_kernel_sink
from tinygrad.uop.symbolic import symbolic, pm_simplify_valid, pm_drop_and_clauses
from tinygrad.helpers import argsort, all_same, cpu_profile, PCONTIG, colored
ALWAYS_CONTIGUOUS: set[Ops] = {Ops.CONTIGUOUS, Ops.ASSIGN, Ops.COPY, Ops.BUFFER, Ops.BUFFER_VIEW,
Ops.CONST, Ops.BIND, Ops.DEVICE, Ops.MSELECT, Ops.MSTACK, Ops.PARAM,
Ops.DEFINE_LOCAL, Ops.DEFINE_REG, Ops.LOAD, Ops.CALL, Ops.ENCDEC}
def realize(ctx:dict[UOp, None], tr:UOp) -> None: ctx[tr] = None
def realize_srcs(ctx:dict[UOp, None], rb:UOp) -> None:
for s in rb.src:
if s.base.op not in ALWAYS_CONTIGUOUS: ctx[s] = None
def realize_assign_src(ctx:dict[UOp, None], buf:UOp, x:UOp):
# don't realize COPY/BUFFER_VIEW/ENCDEC when they are the direct source of ASSIGN β the ASSIGN target buffer is the output
if x.op in {Ops.COPY, Ops.BUFFER_VIEW, Ops.ENCDEC} and x in ctx \
and not buf.op_in_backward_slice_with_self(Ops.SHRINK, Ops.PERMUTE, Ops.FLIP, Ops.PAD):
del ctx[x]
# you don't usually have to do this for assign unless there's a WAR hazard like TestAssign.test_assign_double_diamond_reduce
if buf.base in x.backward_slice_with_self: ctx[x] = None
pm_generate_realize_map = PatternMatcher([
# always realize SINK src
(UPat(Ops.SINK, name="s"), lambda ctx,s: ctx.update((x.base, None) for x in s.src if x.base.op not in ALWAYS_CONTIGUOUS)),
# always realize
(UPat({Ops.COPY, Ops.BUFFER_VIEW, Ops.CONTIGUOUS, Ops.STORE, Ops.ASSIGN, Ops.ENCDEC}, name="tr"), realize),
# realize srcs of these
(UPat((Ops.COPY, Ops.MSELECT, Ops.MSTACK, Ops.ENCDEC), name="rb"), realize_srcs),
# sometimes realize src of assign
(UPat(Ops.ASSIGN, src=(UPat.var("buf"), UPat.var("x"))), realize_assign_src),
])
@dataclass(frozen=True)
class BufferizeOpts:
# on AddrSpace.LOCAL, device is the id
device: str|tuple[str, ...]|int|None
addrspace: AddrSpace = AddrSpace.GLOBAL
removable: bool = True
@dataclass
class IndexingContext:
realize_map: dict[UOp, None|list[int]] = field(default_factory=dict)
range_map: dict[UOp, tuple[tuple[UOp, ...], tuple[UOp, ...]]] = field(default_factory=dict)
# create ranges
range_idx: Iterator[int] = field(default_factory=itertools.count)
def new_range(self, s:sint, axistype:AxisType=AxisType.LOOP) -> UOp:
if isinstance(s, UOp) and s.op is Ops.RANGE: return s
# if a range has a 1 src, it's the same as UOp.const(dtypes.index, 0)
return UOp.range(s, next(self.range_idx), axistype) if resolve(s!=1) else UOp.const(dtypes.index, 0)
def create_bufferize_and_index_based_on_ranges(ctx:IndexingContext, x:UOp):
if x.op in {Ops.BUFFERIZE, Ops.INDEX}: return None
new_srcs = []
for s in x.src:
new_src = s
if s.op in {Ops.PARAM, Ops.BUFFER_VIEW, Ops.MSTACK, Ops.MSELECT, Ops.AFTER}:
if x in ctx.range_map: new_src = new_src.index(*ctx.range_map[x][0])
elif s in ctx.realize_map:
realized_ranges = ctx.realize_map[s]
assert isinstance(realized_ranges, list), "realize map must contain range list"
closed_ranges = tuple([r for i,r in enumerate(ctx.range_map[s][1]) if i in realized_ranges])
if s.op is Ops.STORE:
# add the ends if this is a store
new_src = s.end(*[r for r in closed_ranges if r.op is Ops.RANGE])
del ctx.realize_map[s]
else:
# the Bufferize before a COPY is not removable. there should be a better way to do this
removable = x.op is not Ops.COPY and s.op not in ALWAYS_CONTIGUOUS
# None in the device assigns it a number later
opts = BufferizeOpts(device=s.device, removable=removable) if len(ctx.range_map[s][1]) == len(realized_ranges) else \
BufferizeOpts(device=s.device, addrspace=AddrSpace.LOCAL, removable=removable)
new_src = UOp(Ops.BUFFERIZE, s.dtype, src=(new_src,)+closed_ranges, arg=opts)
if x in ctx.range_map: new_src = new_src.index(*[r for i,r in enumerate(ctx.range_map[x][0]) if i in realized_ranges])
new_srcs.append(new_src)
# NOTE: do we need this?
return x.replace(src=tns) if x.src != (tns:=tuple(new_srcs)) else None
def convert_pad_to_where_to_keep_behavior_local(ctx:IndexingContext, x:UOp):
if x not in ctx.range_map: return None
valid: UOp = UOp.const(dtypes.bool, True).prod(*[r.get_valid() for r in ctx.range_map[x][0]])
ret = valid.where(x.src[0], UOp.const(x.dtype, 0))
ctx.range_map[ret] = ctx.range_map[x]
return ret
def convert_reduce_axis_to_reduce_with_ranges(ctx:IndexingContext, x:UOp):
# input ranges
new_ranges = [r for i,r in enumerate(ctx.range_map[x][0]) if i in x.arg[1]]
ret = UOp(Ops.REDUCE, x.dtype, src=(x.src[0],)+tuple(new_ranges), arg=x.arg[0])
ctx.range_map[ret] = ctx.range_map[x]
return ret
def remove_movement_op_after_rangeify(ctx:IndexingContext, x:UOp):
if x in ctx.range_map or x.src[0].op is Ops.INDEX: return x.src[0]
def handle_assign_mops(ctx:IndexingContext, assign:UOp, target:UOp, src:UOp):
if target.op in GroupOp.Movement and src.op is not Ops.CALL:
mops = []
while target.op in GroupOp.Movement:
mops.append((target.op, target.marg))
target = target.src[0]
if mops and assign in ctx.range_map:
ret = assign.replace(arg=tuple(mops))
ctx.range_map[ret] = ctx.range_map[assign]
return ret
return None
pm_apply_rangeify = PatternMatcher([
# REDUCE_AXIS -> REDUCE
(UPat(Ops.REDUCE_AXIS, name="x"), convert_reduce_axis_to_reduce_with_ranges),
# PAD -> WHERE
(UPat(Ops.PAD, name="x"), convert_pad_to_where_to_keep_behavior_local),
# store movement ops in ASSIGN arg
(UPat(Ops.ASSIGN, src=(UPat(name="target"), UPat(name="src")), name="assign"), handle_assign_mops),
# finally, apply_rangeify
(UPat(GroupOp.All, name="x"), create_bufferize_and_index_based_on_ranges),
# remove movement op
(UPat(GroupOp.Movement, name="x"), remove_movement_op_after_rangeify),
])
@functools.cache
def _apply_reshape(in_shape:tuple[sint,...], out_shape:tuple[sint, ...], urngs:UOp) -> UOp:
acc:sint = 1
axes_in:list[UOp] = []
for s,src in list(zip(out_shape, urngs.src))[::-1]:
axes_in.append(acc*src)
acc *= s
combined_axes = UOp.const(dtypes.index, 0).sum(*axes_in)
axes_out:list[UOp] = []
for s in in_shape[::-1]:
axes_out.append(combined_axes % s)
combined_axes //= s
# this simplify is doing a lot of heavy lifting. this is the replacement for the reshape view merging code
return graph_rewrite(UOp.sink(*axes_out[::-1]), symbolic+pm_simplify_valid+pm_drop_and_clauses, name="reshape")
# this is the definition of the movement ops
@functools.cache
def apply_movement_op(op:Ops, in_shape:tuple[sint,...], arg:tuple, rngs:tuple[UOp, ...]) -> tuple[UOp, ...]:
match op:
case Ops.SHRINK: rngs = tuple(a if ss == 0 else a+ss for a,(ss,_) in zip(rngs, arg))
case Ops.PERMUTE: rngs = tuple(rngs[p] for p in argsort(arg))
case Ops.FLIP: rngs = tuple(((s-1)-a) if f else a for a,s,f in zip(rngs, in_shape, arg))
case Ops.EXPAND: rngs = tuple(a if in_sh == out_sh else a.const_like(0) for a,in_sh,out_sh in zip(rngs, in_shape, arg))
case Ops.PAD:
# NOTE: the .where(r-s, i) is not inside the graph_rewrite so that `convert_pad_to_where_to_keep_behavior_local`
# wraps the pad with only the newly added valid
rngs = tuple(r if (s == 0 and e == 0) else graph_rewrite((r >= s) & (r < (sh+s)),
symbolic+pm_simplify_valid, name="pad").where(r-s, UOp.invalid()) for r,sh,(s,e) in zip(rngs, in_shape, arg))
case Ops.RESHAPE:
sink = UOp.sink(*rngs).simplify() # NOTE: this applies any commutative flips to the rngs early
sub_array = {r:UOp.range(r.src[0], i, AxisType.PLACEHOLDER) for i,r in enumerate(sink.ranges)}
rngs = _apply_reshape(in_shape, arg, sink.substitute(sub_array)).substitute({v:k for k,v in sub_array.items()}).src
case _: raise RuntimeError(f"{op} is not a MovementOp")
return rngs
@profile_matches
def run_rangeify(tsink:UOp, debug:bool=False) -> tuple[UOp, IndexingContext]:
if debug: print("**************************")
rctx = IndexingContext()
# get ops to realize
graph_rewrite(tsink, pm_generate_realize_map, ctx=rctx.realize_map, name="get realize")
# get the consumer map
with cpu_profile("consumer map in rangeify", "TINY"):
consumer_map = consumer_map_from_toposort(tsink_toposort:=tsink.toposort(gate_kernel_sink))
# explicit rangeify
ending_ranges: dict[UOp, list[UOp]] = {}
for x in reversed(tsink_toposort):
if x.op in {Ops.DEVICE, Ops.UNIQUE}: continue
# no ranges on kernels, they are internal
if x.op in {Ops.CALL, Ops.LINEAR}: continue
# no range on after
if x.op is Ops.AFTER: continue
# treat MSTACK/MSELECT like SINK
if x.op in {Ops.MSTACK, Ops.MSELECT}: continue
if x.dtype.scalar() == dtypes.index: continue # TODO: why do I need this?
ending_ranges[x] = sum([ending_ranges.get(u, []) for u in consumer_map[x]], [])
# *** the ranges on the output are
# 1. new if this op is realized
# 2. from the single consumer if this op only has one consumer
# 3. potentially new if this op has 2+ consumers
consumer_rngs = [rctx.range_map[c][0] for c in consumer_map[x] if c in rctx.range_map]
if x in rctx.realize_map:
# if this is in the realize_map, we create new ranges (at the output)
out_rngs = tuple(rctx.new_range(s) for s in x.shape)
# all ranges are ended now
ending_ranges[x] = []
# mark all ranges as ended
assert rctx.realize_map[x] is None
rctx.realize_map[x] = list(range(len(x.shape)))
elif len(consumer_rngs) == 0:
# if no consumers have ranges and this isn't realized, this doesn't have ranges either.
continue
elif len(consumer_rngs) == 1:
# if this has one consumer, it inherits the ranges from it
out_rngs = consumer_rngs[0]
elif len(consumer_rngs) > 1:
# if this has two consumers, we have to merge the ranges and might create new ones
all_rngs: list[tuple[UOp, ...]] = list(zip(*consumer_rngs))
rngs_valids = []
for valid_rngs in all_rngs:
local_rngs, valids = zip(*[(r.get_idx(), r.get_valid()) for r in valid_rngs])
rngs_valids.append((local_rngs, valids))
# TODO: in RANGEIFY > 1 all_all_same isn't required
all_all_same = all(all_same(local_rngs) for local_rngs,_ in rngs_valids)
_out_rngs = []
_realize_axis = []
for i,(local_rngs,valids) in enumerate(rngs_valids):
# we compare the ranges without their valids
if all_all_same or (PCONTIG and all_same(local_rngs)):
# the new valid is the OR of all the children valids
minimum_valid = UOp.const(dtypes.bool, False).sum(*valids)
_out_rngs.append(graph_rewrite(minimum_valid.where(local_rngs[0], UOp.invalid()), symbolic, name="minimum_valid"))
else:
_out_rngs.append(rctx.new_range(x.shape[i]))
_realize_axis.append(i)
out_rngs = tuple(_out_rngs)
# we have to (partially) realize here if there's new ranges
if len(_realize_axis): rctx.realize_map[x] = _realize_axis
# if this element is a reduce and there's ended ranges, we might have to end some other ranges
if len(ending_ranges[x]) and x.op in GroupOp.Elementwise.union({Ops.REDUCE_AXIS}):
_realize_axis = rctx.realize_map.get(x) or []
for i,r in enumerate(out_rngs):
if i in _realize_axis: continue
if not (PCONTIG > 1) or any(any(rr.arg > e.arg for e in ending_ranges[x]) for rr in r.ranges):
_realize_axis.append(i)
ending_ranges[x] = []
if len(_realize_axis):
rctx.realize_map[x] = _realize_axis
out_rngs = tuple([(rctx.new_range(x.shape[i]) if i in _realize_axis else r) for i,r in enumerate(out_rngs)])
# TODO: some ops don't have shape, enable this after the `.st` property is removed
#assert len(out_rngs) == len(x.shape), \
# f"shape len mismatch {len(out_rngs)} != {len(x.shape)} on {x.op} with {len(consumer_map[x])} consumers and realize {x in realize_map}"
# *** the ranges on the inputs are
# 1. swizzled for MovementOps
# 2. newly created for REDUCE_AXIS
# 3. passed through for everything else
rngs = out_rngs # rngs is the input ranges # pylint: disable=possibly-used-before-assignment
# apply movement ops
if x.op in GroupOp.Movement: rngs = apply_movement_op(x.op, x.src[0].shape, x.marg, rngs)
# if the EXPAND is used to inject a range, we don't mark it as ending_ranges. otherwise we do.
# NOTE: this doesn't actually always end a range, but this is why convs are realized, so for now we need it
if x.op is Ops.EXPAND and all(isinstance(y, int) or y.op is not Ops.RANGE for y in x.shape):
ending_ranges[x] += list(UOp.sink(*[ro for ri, ro in zip(rngs, out_rngs) if ri is not ro]).ranges.keys())
# REDUCE_AXIS creates ranges for the axes it is reducing
if x.op is Ops.REDUCE_AXIS:
rngs = tuple(rctx.new_range(s, axistype=AxisType.REDUCE) if i in x.arg[1] else r for i,(r,s) in enumerate(zip(rngs, x.src[0].shape)))
if debug:
realized_ranges = rctx.realize_map.get(x, None)
if x.op is Ops.RESHAPE or len(rngs) != len(out_rngs):
disp = render_ranges(rngs, realized=realized_ranges) + " -> " + render_ranges(out_rngs, realized=realized_ranges)
else:
disp = render_ranges(rngs, out_rngs, realized=realized_ranges)
print("***" if x in rctx.realize_map else " ",
f"{len(consumer_map[x]):2d} {str(x.op):20s} {str(x._shape):35s} {len(ending_ranges[x]):2d}", disp)
# assign to the range map. rngs are the input ranges, out_rngs are the output ranges, from the x op.
rctx.range_map[x] = (rngs, out_rngs)
tsink = graph_rewrite(tsink, pm_apply_rangeify, ctx=rctx, bottom_up=True, name="apply rangeify")
return tsink, rctx
def render_ranges(*rngs_list, realized) -> str:
disp = []
for i, rs in enumerate(zip(*[[r.render() for r in rngs] for rngs in rngs_list])):
rng = rs[0] if all_same(rs) else " -> ".join(rs)
if realized is not None and i in realized: rng = colored(rng, "yellow")
disp.append("["+rng+"]")
return ''.join(disp)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/schedule/indexing.py",
"license": "MIT License",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/external/mlperf_stable_diffusion/external_test_train.py | import unittest, os
from tempfile import TemporaryDirectory
from tinygrad import Tensor
from tinygrad.helpers import getenv
from examples.mlperf.model_train import train_stable_diffusion
class TestTrain(unittest.TestCase):
def test_train_to_ckpt(self):
# train for num_steps, save checkpoint, and stop training
num_steps = 42
os.environ.update({"MODEL": "stable_diffusion", "TOTAL_CKPTS": "1", "CKPT_STEP_INTERVAL": str(num_steps), "GPUS": "8", "BS": "304"})
# NOTE: update these based on where data/checkpoints are on your system
if not getenv("DATADIR", ""): os.environ["DATADIR"] = "/raid/datasets/stable_diffusion"
if not getenv("CKPTDIR", ""): os.environ["CKPTDIR"] = "/raid/weights/stable_diffusion"
with TemporaryDirectory(prefix="test-train") as tmp:
os.environ["UNET_CKPTDIR"] = tmp
with Tensor.train():
saved_ckpts = train_stable_diffusion()
expected_ckpt = f"{tmp}/{num_steps}.safetensors"
assert len(saved_ckpts) == 1 and saved_ckpts[0] == expected_ckpt
if __name__=="__main__":
unittest.main() | {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/mlperf_stable_diffusion/external_test_train.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/mlperf_stable_diffusion/external_test_eval.py | import unittest, os
import numpy as np
from pathlib import Path
from tempfile import TemporaryDirectory
from tinygrad import Device, Tensor
from tinygrad.helpers import getenv, Context
from tinygrad.nn.state import safe_save, torch_load, get_parameters
from examples.mlperf.model_eval import eval_stable_diffusion, vae_decode
from examples.stable_diffusion import AutoencoderKL
def set_eval_params():
# override these as needed from cli
for k,v in {"MODEL": "stable_diffusion", "GPUS": "8", "EVAL_SAMPLES": "600", "CONTEXT_BS": "816", "DENOISE_BS": "600", "DECODE_BS": "384",
"INCEPTION_BS": "560", "CLIP_BS": "240", "DATADIR": "/raid/datasets/stable_diffusion", "CKPTDIR": "/raid/weights/stable_diffusion",
"AMD_LLVM": "0"}.items():
os.environ[k] = getenv(k, v)
class TestEval(unittest.TestCase):
def test_eval_ckpt(self):
set_eval_params()
with TemporaryDirectory(prefix="test-eval") as tmp:
os.environ["EVAL_CKPT_DIR"] = tmp
# NOTE Although this checkpoint has the original fully trained model from StabilityAI, we are using mlperf code that uses different
# GroupNorm num_groups. Therefore, eval results may not reflect eval results on the original model.
# The purpose of using this checkpoint is to have reproducible eval outputs.
# Eval code expects file and weight names in a specific format, as .safetensors (not .ckpt), which is why we resave the checkpoint
sd_v2 = torch_load(Path(getenv("CKPTDIR", "")) / "sd" / "512-base-ema.ckpt")["state_dict"]
sd_v2 = {k.replace("model.diffusion_model.", "", 1): v for k,v in sd_v2.items() if k.startswith("model.diffusion_model.")}
safe_save(sd_v2, f"{tmp}/0.safetensors")
clip, fid, ckpt = eval_stable_diffusion()
assert ckpt == 0
if Device.DEFAULT == "NULL":
assert clip == 0
assert fid > 0 and fid < 1000
else:
# observed:
# clip=0.08369670808315277, fid=301.05236173709545 (if SEED=12345, commit=c01b2c93076e80ae6d1ebca64bb8e83a54dadba6)
# clip=0.08415728807449341, fid=300.3710877072948 (if SEED=12345, commit=179c7fcfe132f1a6344b57c9d8cef4eded586867)
# clip=0.0828116238117218, fid=301.241909555543 (if SEED=98765, commit=c01b2c93076e80ae6d1ebca64bb8e83a54dadba6)
np.testing.assert_allclose(fid, 301.147, rtol=0.1, atol=0)
np.testing.assert_allclose(clip, 0.08325, rtol=0.1, atol=0)
# only tested on 8xMI300x system
@unittest.skipUnless(getenv("HANG_OK"), "expected to hang")
def test_decoder_beam_hang(self):
set_eval_params()
for k,v in {"BEAM": "2", "HCQDEV_WAIT_TIMEOUT_MS": "300000", "BEAM_UOPS_MAX": "8000", "BEAM_UPCAST_MAX": "256", "BEAM_LOCAL_MAX": "1024",
"BEAM_MIN_PROGRESS": "5", "IGNORE_JIT_FIRST_BEAM": "1"}.items():
os.environ[k] = getenv(k, v)
with Context(BEAM=int(os.environ["BEAM"])): # necessary because helpers.py has already set BEAM=0 and cached getenv for "BEAM"
GPUS = [f"{Device.DEFAULT}:{i}" for i in range(getenv("GPUS", 8))]
vae = AutoencoderKL()
for p in get_parameters(vae): p.to_(GPUS).realize()
x = Tensor.zeros(48,4,64,64).contiguous().to(GPUS).realize()
x.uop = x.uop.multi(0)
for _ in range(2): vae_decode(x, vae)
if __name__=="__main__":
unittest.main() | {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/mlperf_stable_diffusion/external_test_eval.py",
"license": "MIT License",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/mlperf_stable_diffusion/external_test_models.py | import unittest
import numpy as np
from pathlib import Path
from tinygrad import Tensor, dtypes, Device
from tinygrad.helpers import getenv
from tinygrad.nn.state import get_parameters
from extra.models import clip
from examples.mlperf.initializers import gelu_erf, init_stable_diffusion, attn_f32_softmax
from typing import Literal
clip_params = {"dims": 1024, "n_heads": 16, "layers": 24, "return_pooled": False, "ln_penultimate": True, "clip_tokenizer_version": "sd_mlperf_v5_0"}
def get_cond_stage_model(GPUS:list[str]|None=None) -> clip.FrozenOpenClipEmbedder:
clip.gelu = gelu_erf
model = clip.FrozenOpenClipEmbedder(**clip_params)
if GPUS and len(GPUS) > 1:
for p in get_parameters(model): p.to_(GPUS)
return model
def get_tokens(BS:int) -> Tensor: return Tensor([0] * 77 * BS, dtype=dtypes.int32).reshape(-1, 77)
class TestOpenClip(unittest.TestCase):
def test_tokenizer(self):
prompt = "Beautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated."
model = get_cond_stage_model()
tokens = model.tokenizer.encode(prompt, pad_with_zeros=True)
expected = [49406, 1215, 533, 1539, 1126, 8159, 269, 33228, 533, 1539, 1126, 15269, 585, 269, 4129, 533, 1539, 1126, 6324, 269, 6324, 533,
1539, 1126, 16621, 269, 49407] + [0]*50
self.assertEqual(tokens, expected)
def test_clip_gelu_init(self):
for resblock in get_cond_stage_model().model.transformer.resblocks:
self.assertEqual(resblock.mlp.gelu, gelu_erf)
def test_multigpu_clip_embed(self):
BS = 304
GPUS = [f"{Device.DEFAULT}:{i}" for i in range(8)]
model = get_cond_stage_model(GPUS)
tokens = get_tokens(BS)
embeds = model.embed_tokens(tokens.shard(GPUS, axis=0)).realize()
self.assertEqual(embeds.shape, (BS, 77, 1024))
self.assertEqual(embeds.dtype, dtypes.float32)
def test_multigpu_clip_score(self):
BS = 240
GPUS = [f"{Device.DEFAULT}:{i}" for i in range(8)]
vision_cfg = {'width': 1280, 'layers': 32, 'd_head': 80, 'image_size': 224, 'patch_size': 14}
text_cfg = {'width': 1024, 'n_heads': 16, 'layers': 24, 'vocab_size': 49408, 'ctx_length': 77}
clip.gelu = gelu_erf
clip_encoder = clip.OpenClipEncoder(1024, text_cfg, vision_cfg)
for p in get_parameters(clip_encoder): p.to_(GPUS)
tokens = get_tokens(BS)
imgs = Tensor.zeros(BS,3,224,224).contiguous()
scores = clip_encoder.get_clip_score(tokens.shard(GPUS, axis=0), imgs.shard(GPUS, axis=0)).realize()
self.assertEqual(scores.shape, (BS,))
self.assertEqual(scores.dtype, dtypes.float32)
class TestInitStableDiffusion(unittest.TestCase):
def setUp(self):
# NOTE: set env variable based on where checkpoints are on the system
self.CKPTDIR = Path(getenv("CKPTDIR", "/raid/weights/stable_diffusion"))
def helper_test_init(self, version:Literal["v2-mlperf-train", "v2-mlperf-eval"]):
model, unet, sqrt_acp, sqrt_omacp = init_stable_diffusion(version, self.CKPTDIR / "sd" / "512-base-ema.ckpt", ["CPU"])
with self.subTest("test that StableDiffusion has correct models"):
self.assertEqual(model.model.diffusion_model, unet)
has_encoder = True if version=="v2-mlperf-eval" else False
self.assertEqual(hasattr(model, "first_stage_model"), has_encoder, "only the eval model uses the encoder")
self.assertTrue(isinstance(model.cond_stage_model, clip.FrozenOpenClipEmbedder))
with self.subTest("test for mlperf unique attributes"):
self.assertEqual(model.cond_stage_model.tokenizer.version, 'sd_mlperf_v5_0')
self.assertEqual(unet.out[0].num_groups, 16)
self.assertEqual(unet.input_blocks[1][1].norm.eps, 1e-6)
self.assertEqual(unet.input_blocks[1][1].transformer_blocks[0].attn1.attn, attn_f32_softmax)
with self.subTest("test loaded clip parameters"):
sample = model.cond_stage_model.model.transformer.resblocks[8].mlp.c_fc.bias.flatten()[42:46].numpy()
expected = np.array([-0.49812260270118713, -0.3039605915546417, -0.40284937620162964, -0.45069342851638794], dtype=np.float32)
np.testing.assert_allclose(sample, expected, rtol=1e-7, atol=0, err_msg="loaded clip parameters are incorrect")
if version=="v2-mlperf-train":
with self.subTest("test that zero_module worked"):
self.assertTrue((unet.out[2].weight == 0).all().item(), "expected all zeroes")
self.assertTrue((unet.out[2].bias == 0).all().item(), "expected all zeroes")
elif version=="v2-mlperf-eval":
with self.subTest("test loaded vae parameters"):
sample = model.first_stage_model.decoder.up[0]['block'][1].conv2.weight.flatten()[42:46].numpy()
expected = np.array([0.08192943036556244, 0.040095631033182144, 0.07541035860776901, 0.1475081741809845], dtype=np.float32)
np.testing.assert_allclose(sample, expected, rtol=1e-7, atol=0, err_msg="loaded vae parameters are incorrect")
with self.subTest("check schedules"):
expected = np.array([0.9995748996734619, 0.06826484948396683], dtype=np.float32)
np.testing.assert_allclose(sqrt_acp[[0,-1]].numpy(), expected, rtol=1e-7, atol=0, err_msg="sqrt_acp is incorrect")
expected = np.array([0.029155133292078972, 0.9976672530174255], dtype=np.float32)
np.testing.assert_allclose(sqrt_omacp[[0,-1]].numpy(), expected, rtol=1e-7, atol=0, err_msg="sqrt_omacp is incorrect")
with self.subTest("check mixed precision"):
out = unet.input_blocks[2][1].proj_in(Tensor.randn(320, dtype=dtypes.float32))
self.assertEqual(out.dtype, dtypes.bfloat16, "expected float32 to be downcast to bfloat16 by Linear")
out = unet.out[2](Tensor.randn(304,320,64,64, dtype=dtypes.float32))
self.assertEqual(out.dtype, dtypes.bfloat16, "expected float32 to be downcast to bfloat16 by Conv2d")
out = unet.input_blocks[1][1].transformer_blocks[0].norm1(Tensor.randn(320, dtype=dtypes.bfloat16))
self.assertEqual(out.dtype, dtypes.float32, "expected bfloat16 to be upcast to float32 by LayerNorm")
out = unet.input_blocks[5][0].in_layers[0](Tensor.randn(304, 640, dtype=dtypes.bfloat16))
self.assertEqual(out.dtype, dtypes.float32, "expected bfloat16 to be upcast to float32 by GroupNorm")
def test_train_model(self):
self.helper_test_init("v2-mlperf-train")
def test_eval_model(self):
self.helper_test_init("v2-mlperf-eval")
if __name__=="__main__":
unittest.main() | {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/mlperf_stable_diffusion/external_test_models.py",
"license": "MIT License",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/codegen/simplify.py | import itertools
from tinygrad.uop.ops import UOp, PatternMatcher, UPat, Ops, graph_rewrite, _substitute, range_start
from tinygrad.uop.symbolic import symbolic
from tinygrad.helpers import partition
from tinygrad.dtype import dtypes, ImageDType
def flatten_range(r:UOp) -> UOp|None:
off = range_start[r.op]
rngs = r.src[off:]
if not len(rngs): return None
new_rngs = [x for x in UOp.sink(*rngs).toposort() if x.op is Ops.RANGE]
return r.replace(src=r.src[:off]+tuple(new_rngs))
pm_flatten_range = PatternMatcher([
# real ranges only
(UPat((Ops.REDUCE, Ops.STORE, Ops.END), name="r"), flatten_range),
])
def count_divmod(x:UOp) -> int: return len([u for u in x.toposort() if u.op in {Ops.IDIV, Ops.MOD}])
def simplify_merge_adjacent(u:UOp) -> UOp|None:
reduce_ranges = [x.ranges for x in u.backward_slice_with_self if x.op is Ops.REDUCE]
# on END we only want to merge adjacent ranges, on REDUCE we want to try all combinations
for r0, r1 in (zip(u.ended_ranges, u.ended_ranges[1:]) if u.op is Ops.END else itertools.permutations(u.ended_ranges, 2)):
# check same type
if r0.arg[-1] == r1.arg[-1]:
# check if the ranges to merge are in the same reduces
if all((r0 in rngs) == (r1 in rngs) for rngs in reduce_ranges):
s0, s1 = r0.src[0], r1.src[0]
# do the merge
new_range = r0.replace(src=(s0*s1,))
nidx = graph_rewrite(u, _substitute+symbolic+pm_flatten_range, ctx={r0:new_range//s1, r1:new_range%s1},
name=f"check_merge_{r0.arg[0]}_{r1.arg[0]}")
# check if it simplifies
if count_divmod(nidx) <= count_divmod(u):
u = nidx
return u
pm_simplify_ranges = PatternMatcher([
(UPat((Ops.END, Ops.REDUCE), name="u"), simplify_merge_adjacent),
])
def mark_range_mod(ctx:dict[UOp, UOp|None], r:UOp, c:UOp) -> None:
if r not in ctx and r.src[0].op is Ops.CONST and r.src[0].divides(c.arg) is not None: ctx[r] = c
def do_substitute(ctx:dict[UOp, UOp|None], x: UOp) -> UOp|None:
subs = {}
for k,v in ctx.items():
if v is not None:
subs[k] = k.replace(src=(k.src[0]//v,), arg=k.arg[0:-1]+(0,k.arg[-1]))*v + k.replace(src=(v,), arg=k.arg[0:-1]+(1,k.arg[-1]))
if not len(subs): return None
ret = x.substitute(subs).simplify()
ctx.clear()
return ret
def dont_sub_ranges_for_image(ctx:dict[UOp, UOp|None], x:UOp) -> None:
if isinstance(x.src[0].src[0].dtype, ImageDType):
for s in x.src[0].ranges: ctx[s] = None
pm_split_ranges = PatternMatcher([
(UPat(Ops.RANGE, name="r")%UPat.cvar("c"), mark_range_mod),
(UPat(Ops.STORE, name="x"), dont_sub_ranges_for_image),
(UPat(Ops.SINK, name="x"), do_substitute),
])
# **** reduce simplification ****
def no_range(u:UOp) -> bool: return not any(x.op is Ops.RANGE for x in u.backward_slice_with_self)
def reduce_unparented(red:UOp) -> UOp|None:
if red.arg not in {Ops.ADD, Ops.MAX, Ops.MUL}: return None
assert all(x.op is Ops.RANGE for x in red.src[1:]), "some reduce srcs aren't ranges"
reduce_parented, reduce_unparented = partition(red.src[1:], lambda x: x in red.src[0].ranges)
if len(reduce_unparented) == 0: return None
ret = red.replace(src=(red.src[0],)+tuple(reduce_parented)) if len(reduce_parented) or red.dtype != red.src[0].dtype else red.src[0]
if red.arg is Ops.ADD:
for r in reduce_unparented: ret = ret * r.src[0].cast(ret.dtype.scalar()).broadcast(ret.dtype.count)
if red.arg is Ops.MUL:
for r in reduce_unparented: ret = ret ** r.src[0].cast(ret.dtype.scalar()).broadcast(ret.dtype.count)
return ret
pm_reduce_unparented = PatternMatcher([
# remove any ranges from a REDUCE that aren't referenced in the reduce source
(UPat(Ops.REDUCE, name="red"), reduce_unparented),
])
pm_reduce_collapse = pm_reduce_unparented + PatternMatcher([
# lift x+y out of reduce on lt
((UPat.var("x")+UPat.var("y")).or_casted() < UPat.var("c"), lambda x,y,c: (x < (c.cast(y.dtype)-y)) if no_range(y) and no_range(c) else None),
# lift x*y out of reduce
((UPat.var("x")*UPat.var("y")) < UPat.var("c"),
lambda x,y,c: (x < ((c+y-1) // y)) if no_range(y) and no_range(c) and dtypes.is_int(y.dtype) and y.vmin > 0 else None),
# fold the range
# bound from below
((UPat(Ops.RANGE, name="r") < UPat.var("cut")).where(0, UPat.var("val")).reduce(UPat.var("r"), arg=Ops.ADD),
lambda r,cut,val: (r.src[0]-cut).maximum(0).minimum(r.src[0]).cast(val.dtype) * val if no_range(val) else None),
# bound from two sides
(((UPat.var("r")<UPat.var("lower")).logical_not()&(UPat(Ops.RANGE, name="r")<UPat.var("upper"))).where(UPat.var("val"), 0).reduce(UPat.var("r"),
arg=Ops.ADD), lambda r,lower,upper,val:
(upper.minimum(r.src[0])-lower.maximum(0)).maximum(0).minimum(r.src[0]).cast(val.dtype) * val if no_range(val) else None),
# bound from above
((UPat(Ops.RANGE, name="r") < UPat.var("cut")).where(UPat.var("val"), 0).reduce(UPat.var("r"), arg=Ops.ADD),
lambda r,cut,val: cut.maximum(0).minimum(r.src[0]).cast(val.dtype) * val if no_range(val) else None),
# REDUCE on ADD
((UPat.var("x")+UPat.var("y")).reduce(arg=Ops.ADD, allow_any_len=True, name="r"),
lambda x,y,r: x.reduce(*r.src[1:], arg=Ops.ADD) + y.reduce(*r.src[1:],arg=Ops.ADD)),
# AND on WHERE
((UPat(Ops.DEFINE_VAR, name="x") & UPat.var("y")).where(UPat.var("c"), 0).reduce(arg=Ops.ADD, allow_any_len=True, name="r"),
lambda x,y,c,r: y.where(c, 0).reduce(*r.src[1:], arg=Ops.ADD)*x.cast(c.dtype)),
# MUL casted bool
((UPat.var("x") * UPat.var("gate", dtype=dtypes.bool).cast()), lambda x,gate: gate.where(x, 0)),
])+symbolic
pm_reduce_load_collapse = pm_reduce_collapse + PatternMatcher([
# lift x+y out of reduce on ne
((UPat.var("x")+UPat.var("y")).or_casted() != UPat.var("c"), lambda x,y,c: (x != (c.cast(y.dtype)-y)) if no_range(y) and no_range(c) else None),
# reduce on gated load becomes can substitute the range and remove the reduce
((UPat.var("idx")!=(UPat(Ops.RANGE, name="r").or_casted())).where(0, UPat.var("expr")).reduce(UPat.var("r"), arg=Ops.ADD),
lambda r,idx,expr: (v:=(idx.cast(r.dtype) >= 0) & (idx.cast(r.dtype) < r.src[0])).where(expr.substitute({r:idx.cast(r.dtype).valid(v)}),0)),
])
def reduce_collapse(red:UOp, u:UOp, pm:PatternMatcher=pm_reduce_collapse) -> UOp|None:
for r in red.src[1:]:
included = u.toposort(gate=lambda x: r in x.ranges)
if any(x.op in {Ops.STORE, Ops.REDUCE} for x in included): return None
replaces: dict[UOp, UOp] = {}
for u in included:
for s in u.src:
if s in included or s in replaces or s.op in {Ops.CONST, Ops.VCONST, Ops.PARAM, Ops.DEFINE_LOCAL, Ops.DEFINE_VAR}: continue
replaces[s] = UOp(Ops.DEFINE_VAR, dtype=s.dtype, arg=(f'in{len(replaces)}', s.vmin, s.vmax))
collapse_fxn = u.substitute(replaces).reduce(r, arg=Ops.ADD)
sink = graph_rewrite(collapse_fxn, pm, name="reduce_collapse")
if not no_range(sink): return None
u = sink.substitute({v:k for k,v in replaces.items()})
return u
def reduce_load_collapse(red:UOp, u:UOp) -> UOp|None: return reduce_collapse(red, u, pm=pm_reduce_load_collapse)
# remove REDUCE without loads (generic arange opt / indexing).
pm_reduce_simplify = pm_reduce_unparented + PatternMatcher([
(UPat(Ops.REDUCE, src=(UPat.var("u"),), allow_any_len=True, arg=Ops.ADD, name="red"), reduce_collapse),
])
# remove REDUCE on load, comes from indexing a tensor with another tensor
def no_load(u:UOp) -> bool: return not any(x.op is Ops.INDEX for x in u.backward_slice_with_self)
pm_load_collapse = PatternMatcher([
(UPat(Ops.REDUCE, arg=Ops.ADD, src=(UPat.var("u"), UPat()), name="red"), reduce_load_collapse),
# we want to make sure we dont do math on a loaded index since that can cause overflow, this undoes the rule in pm_reduce_load_collapse
((UPat.var("x", dtypes.index)+UPat.var("y"))<UPat.var("c"), lambda x,y,c: x < c-y if no_load(y) and no_load(c) and not no_load(x) else None),
])
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/codegen/simplify.py",
"license": "MIT License",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/opt/test_tensor_cores.py | import numpy as np
import unittest
from dataclasses import replace
from tinygrad import Device, Tensor, dtypes
from tinygrad.tensor import _to_np_dtype
from tinygrad.uop.ops import Ops
from tinygrad.dtype import DType
from tinygrad.device import is_dtype_supported
from tinygrad.helpers import AMX, AMD_LLVM, CPU_LLVM, Context
from test.helpers import slow
from tinygrad.engine.realize import CompiledRunner, get_program
from tinygrad.codegen.opt import Opt, OptOps, KernelOptError
from tinygrad.codegen.opt.tc import amd_cdna_1616128
# TODO: write a clean version of this
from test.backend.test_linearizer import helper_realized_ast, helper_linearizer_opt
# NOTE: get_program always passes in Device[Device.DEFAULT].renderer explicitly for process_replay!!!
def helper_tc_ensure_uops_and_opts_count(N: int, M:int, K:int, dtype_in:DType, dtype_out:DType, axis:int=0, tc_select:int=-1, tc_opt:int=0,
ensure_triggered:bool=True):
a, b = Tensor.rand(M, K, dtype=dtype_in), Tensor.rand(K, N, dtype=dtype_in)
r = a.matmul(b, dtype=dtype_out)
sched = r.schedule()
realized_ast = sched[-1].ast
opts_to_apply = [Opt(OptOps.TC, axis, (tc_select, tc_opt, 1))]
if ensure_triggered:
program = get_program(realized_ast, Device[Device.DEFAULT].renderer, opts=opts_to_apply)
wmmas = len([uop for uop in program.uops if uop.op is Ops.WMMA])
tcs = len([x for x in program.applied_opts if x.op is OptOps.TC])
assert wmmas > 0, "tensor core not triggered"
assert tcs == 1, "tensor core opt not included"
else:
try:
program = get_program(realized_ast, Device[Device.DEFAULT].renderer, opts=opts_to_apply)
assert False, "OptOps.TC triggered, expected KernelOptError"
except KernelOptError: pass
def helper_tc_allclose(N:int, M:int, K:int, dtype_in:DType, dtype_out:DType, axis:int=0, tc_select:int=-1, tc_opt:int=0, use_tensor_cores:int=1):
a, b = Tensor.rand(M, K, dtype=dtype_in), Tensor.rand(K, N, dtype=dtype_in)
np_a, np_b = a.numpy(), b.numpy()
r = a.matmul(b, dtype=dtype_out)
if dtype_in == dtypes.bfloat16: r = r.float()
realized_ast, bufs = helper_realized_ast(r)
opts = [Opt(op=OptOps.TC, axis=axis, arg=(tc_select, tc_opt, use_tensor_cores))]
prg = CompiledRunner(replace(get_program(realized_ast, Device[Device.DEFAULT].renderer, opts=opts), device=Device.DEFAULT))
if use_tensor_cores == 1: assert len([uop for uop in prg.p.uops if uop.op is Ops.WMMA]) > 0, "wmma not triggered"
assert len([x for x in prg.p.uops[-1].arg.applied_opts if x.op is OptOps.TC]) == 1, "tensor core opt not included"
prg.exec(bufs)
if dtype_in == dtypes.half: tc_atol, tc_rtol = 1e-2, 1e-3
elif dtype_in == dtypes.bfloat16: tc_atol, tc_rtol = (1e-1, 2e-2) if dtype_out == dtypes.bfloat16 else (1e-2, 1e-2)
else: tc_atol, tc_rtol = 5e-3, 1e-4
c = bufs[0].numpy().reshape((M,N))
np.testing.assert_allclose(c, np_a @ np_b, atol=tc_atol, rtol=tc_rtol)
class TestTensorCores(unittest.TestCase):
# TODO: don't skip bf16 for real device (METAL, AMD)
@Context(ALLOW_TF32=1)
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
def test_tensor_cores(self):
for tc in Device[Device.DEFAULT].renderer.tensor_cores:
if not is_dtype_supported(tc.dtype_in) or not is_dtype_supported(tc.dtype_out): continue
# for AMX, tc.dims[2] == 1 so reduceop is None thus tensor_cores are not triggered
helper_tc_allclose(tc.dims[0], tc.dims[1], 2 if AMX else tc.dims[2], tc.dtype_in, tc.dtype_out, axis=0, tc_opt=0)
@Context(ALLOW_TF32=1)
@unittest.skipIf(Device.DEFAULT == "PYTHON", "not generated on EMULATED device")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
def test_tensor_cores_codegen(self):
for tc in Device[Device.DEFAULT].renderer.tensor_cores:
if not is_dtype_supported(tc.dtype_in) or not is_dtype_supported(tc.dtype_out): continue
n, m, k = tc.dims[0], tc.dims[1], 2 if AMX else tc.dims[2]
a, b = Tensor.rand(m, k, dtype=tc.dtype_in), Tensor.rand(k, n, dtype=tc.dtype_in)
r = a.matmul(b, dtype=tc.dtype_out)
prg = get_program(r.schedule()[-1].ast, Device[Device.DEFAULT].renderer, opts=[Opt(op=OptOps.TC, axis=0, arg=(-1, 2, 1))])
if Device.DEFAULT == "CPU" and CPU_LLVM:
assert "0x201000" in prg.src
elif Device.DEFAULT == "AMD" and AMD_LLVM:
assert "@llvm.amdgcn.wmma" in prg.src
elif Device[Device.DEFAULT].renderer.suffix == "PTX":
assert "mma.sync.aligned" in prg.src
else:
assert "__WMMA_" in prg.src
@Context(ALLOW_TF32=1)
@unittest.skipIf((Device.DEFAULT == "AMD") or (Device.DEFAULT == "PYTHON" and Device.default.renderer.device == "AMD"), "broken for AMD")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
def test_tensor_cores_padded(self):
for tc in Device[Device.DEFAULT].renderer.tensor_cores:
if not is_dtype_supported(tc.dtype_in) or not is_dtype_supported(tc.dtype_out): continue
helper_tc_allclose(tc.dims[0]+(pad:=1), tc.dims[1]+pad, tc.dims[2]+pad, tc.dtype_in, tc.dtype_out, tc_opt=2)
# AMD compiler bug: AMD miscompiles non-zero padded tc kernels with -O3, producing wrong results, nans or hang (see #9606)
# Internal bug: zero-stride dimensions combined with a mask may produce wrong index/valid for pad == 1 on AMD
@unittest.skipUnless((Device.DEFAULT == "AMD") or (Device.DEFAULT == "PYTHON" and Device.default.renderer.device == "AMD"), "test for AMD's tc")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
@unittest.skip("warp elements not duplicated properly across lanes")
def test_tensor_cores_padded_amd(self):
for tc in Device[Device.DEFAULT].renderer.tensor_cores:
if not is_dtype_supported(tc.dtype_in) or not is_dtype_supported(tc.dtype_out): continue
helper_tc_allclose(tc.dims[0]+(pad:=1), tc.dims[1]+pad, tc.dims[2]+pad, tc.dtype_in, tc.dtype_out, tc_opt=2)
@Context(ALLOW_TF32=1)
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
def test_tensor_cores_padded_uops(self):
for tc in Device[Device.DEFAULT].renderer.tensor_cores:
pad = 1
# check that TC is triggered for TC_OPT=2
helper_tc_ensure_uops_and_opts_count(tc.dims[0]+pad, tc.dims[1]+pad, tc.dims[2]+pad,
tc.dtype_in, tc.dtype_out, tc_opt=2, ensure_triggered=True)
# check that TC is not triggered for TC_OPT<2
helper_tc_ensure_uops_and_opts_count(tc.dims[0]+pad, tc.dims[1]+pad, tc.dims[2]+pad,
tc.dtype_in, tc.dtype_out, tc_opt=1, ensure_triggered=False)
helper_tc_ensure_uops_and_opts_count(tc.dims[0]+pad, tc.dims[1]+pad, tc.dims[2]+pad,
tc.dtype_in, tc.dtype_out, tc_opt=0, ensure_triggered=False)
# check excessive padding doesn't trigger padded TC in TC_OPT=2
helper_tc_ensure_uops_and_opts_count(tc.dims[0]//4, tc.dims[1], tc.dims[2], tc.dtype_in, tc.dtype_out, tc_opt=2, ensure_triggered=False)
helper_tc_ensure_uops_and_opts_count(tc.dims[0], tc.dims[1]//4, tc.dims[2], tc.dtype_in, tc.dtype_out, tc_opt=2, ensure_triggered=False)
if not AMX and tc not in amd_cdna_1616128: # AMX tc.dims[2] == 1
helper_tc_ensure_uops_and_opts_count(tc.dims[0], tc.dims[1], tc.dims[2]//8, tc.dtype_in, tc.dtype_out, tc_opt=2, ensure_triggered=False)
@Context(ALLOW_TF32=1)
@unittest.skipIf(Device.DEFAULT == "PYTHON", "not generated on EMULATED device")
@slow
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
def test_tensor_cores_multi_reduce(self):
for tc in Device[Device.DEFAULT].renderer.tensor_cores:
if not is_dtype_supported(tc.dtype_in) or not is_dtype_supported(tc.dtype_out): continue
if tc.dtype_in is dtypes.bfloat16: continue # <-- broken with numpy
# this will be a M=G16, N=G32, M=G16, M=G16, K=R16, K=R16, K=R16 with 9 choices of TC MNK axes
golden_result = None
for axis in range(9):
a = Tensor.rand(16, 16, 29, 29, dtype=tc.dtype_in).realize()
b = Tensor.rand(32, 16, 16, 16, dtype=tc.dtype_in).realize()
c = a.conv2d(b, padding=1, dtype=tc.dtype_out)
realized_ast, real_bufs = helper_realized_ast(c)
program = get_program(realized_ast, Device[Device.DEFAULT].renderer, opts=[Opt(OptOps.TC, axis, (-1, 2, 1))])
assert len([uop for uop in program.uops if uop.op is Ops.WMMA]) > 0, "tensor core not triggered"
assert len([x for x in program.applied_opts if x.op is OptOps.TC]) == 1, "tensor core opt not included"
prg = CompiledRunner(program)
# TODO: support this even if numpy doesn't
if _to_np_dtype(real_bufs[0].dtype) is None: continue
real_bufs[0].copyin(np.zeros((real_bufs[0].size, ), dtype=_to_np_dtype(real_bufs[0].dtype)).data) # Zero to check that all values are filled
prg.exec(real_bufs)
result = np.frombuffer(real_bufs[0].as_memoryview(), _to_np_dtype(real_bufs[0].dtype))
# ensure the results for each choice of axis matches
if golden_result is None: golden_result = np.frombuffer(real_bufs[0].as_memoryview(), _to_np_dtype(real_bufs[0].dtype))
np.testing.assert_allclose(result, golden_result, atol=0.1, rtol=0.2)
@Context(ALLOW_TF32=1)
@unittest.skipIf(Device.DEFAULT == "PYTHON", "slow on EMULATED device")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
def test_tensor_cores_unroll_phi(self):
tc = Device[Device.DEFAULT].renderer.tensor_cores[0]
x, y = Tensor.rand(128, 128, dtype=tc.dtype_in), Tensor.rand(128, 128, dtype=tc.dtype_in)
r = x.matmul(y, dtype=tc.dtype_out)
opts = [Opt(OptOps.UNROLL, 0, 4)]
ast = helper_linearizer_opt(r, [opts], apply_tc=True, atol=3e-2, rtol=1e-3)
for u in get_program(ast, Device[Device.DEFAULT].renderer, opts=opts).uops:
if u.op is Ops.WMMA:
assert u.src[-1].src[0].op != Ops.STORE
@Context(ALLOW_TF32=1)
@unittest.skipIf(Device.DEFAULT == "PYTHON", "slow on EMULATED device")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
@unittest.skipIf(Device.DEFAULT in {"CPU"}, "CPU does not support using a different type for accumulation")
def test_tensor_cores_unroll_casted_phi(self):
tc = [tc for tc in Device[Device.DEFAULT].renderer.tensor_cores if tc.dtype_in != tc.dtype_out][0]
x, y = Tensor.rand(128, 128, dtype=tc.dtype_in), Tensor.rand(128, 128, dtype=tc.dtype_in)
r = x.matmul(y, dtype=tc.dtype_out)
opts = [Opt(OptOps.UNROLL, 0, 4)]
ast = helper_linearizer_opt(r, [opts], apply_tc=True, atol=3e-2, rtol=1e-3)
for u in get_program(ast, Device[Device.DEFAULT].renderer, opts=opts).uops:
if u.op is Ops.WMMA:
#assert u.src[-1].dtype == dtypes.float.vec(prod(tc.thread_local_sizes[2]))
assert u.src[-1].src[0].op != Ops.STORE
@Context(ALLOW_TF32=1)
@unittest.skipIf(Device.DEFAULT == "PYTHON", "slow on EMULATED device")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
@unittest.skipIf(Device.DEFAULT in {"CPU"}, "CPU does not support using a different type for accumulation")
def test_tensor_cores_unroll_casted_phi_with_children(self):
# all STORE children are outside the loop
tc = [tc for tc in Device[Device.DEFAULT].renderer.tensor_cores if tc.dtype_in != tc.dtype_out][0]
x, y = Tensor.rand(128, 128, dtype=tc.dtype_in), Tensor.rand(128, 128, dtype=tc.dtype_in)
r = x.matmul(y, dtype=tc.dtype_out).relu()
opts = [Opt(OptOps.UNROLL, 0, 4)]
ast = helper_linearizer_opt(r, [opts], apply_tc=True, atol=3e-2, rtol=1e-3)
for u in get_program(ast, Device[Device.DEFAULT].renderer, opts=opts).uops:
if u.op is Ops.WMMA:
#assert u.src[-1].dtype == dtypes.float.vec(prod(tc.thread_local_sizes[2]))
assert u.src[-1].src[0].op != Ops.STORE
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/opt/test_tensor_cores.py",
"license": "MIT License",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/opt/test_gen_float4.py | import unittest
from tinygrad import Device, Tensor, dtypes
from tinygrad.uop.ops import UOp, Ops
from tinygrad.codegen.opt import Opt, OptOps
from tinygrad.engine.realize import get_program
from tinygrad.helpers import AMX
@unittest.skipUnless(Device[Device.DEFAULT].renderer.supports_float4, "need backends that support float4")
class TestFloat4(unittest.TestCase):
@staticmethod
def count_float4(uops: list[UOp], n=4):
return (len([uop for uop in uops if uop.op is Ops.LOAD and uop.dtype == dtypes.float.vec(n)]),
len([uop for uop in uops if uop.op is Ops.STORE and uop.src[1].dtype == dtypes.float.vec(n)]))
@staticmethod
def count_half4(uops: list[UOp]):
return (len([uop for uop in uops if uop.op is Ops.LOAD and uop.dtype == dtypes.half.vec(4)]),
len([uop for uop in uops if uop.op is Ops.STORE and uop.src[1].dtype == dtypes.half.vec(4)]))
def test_float4_basic(self):
a = Tensor.empty(2, 8).realize()
b = Tensor.empty(2, 8).realize()
c = a + b
s = c.schedule()[0]
realized_ast = s.ast
opts_to_apply = [Opt(op=OptOps.UPCAST, axis=0, arg=4)]
program = get_program(realized_ast, renderer=Device[Device.DEFAULT].renderer, opts=opts_to_apply)
assert TestFloat4.count_float4(program.uops) == (2, 1)
@unittest.skipIf(Device.DEFAULT in {"CPU"} and AMX, "CPU with AMX upcasts float up to size 16")
def test_float4_multidim(self):
a = Tensor.empty(2, 8).realize()
b = Tensor.empty(2, 8).realize()
c = a + b
s = c.schedule()[0]
uops = get_program(s.ast, renderer=Device[Device.DEFAULT].renderer,
opts=[Opt(op=OptOps.UPCAST, axis=0, arg=4), Opt(op=OptOps.UPCAST, axis=0, arg=2)]).uops
assert TestFloat4.count_float4(uops) == (4, 2)
@unittest.skipUnless(Device.DEFAULT in {"CPU"} and AMX, "Only CPU with AMX upcasts float up to size 16")
def test_float4_multidim_amx(self):
def kernel_for_shape(size, shift):
a = Tensor.empty(2, size).realize()
b = Tensor.empty(2, size).realize()
c = a + b
s = c.schedule()[0]
return get_program(s.ast, renderer=Device[Device.DEFAULT].renderer,
opts=[Opt(op=OptOps.UPCAST, axis=0, arg=4), Opt(op=OptOps.UPCAST, axis=0, arg=shift)]).uops
sizes = [12, 8, 16]
shifts = [3, 2, 4]
expected_upcast_size = [4, 8, 16]
expected_output = [(6,3), (2,1), (2,1)]
for i in range(len(sizes)):
assert TestFloat4.count_float4(kernel_for_shape(sizes[i], shifts[i]), expected_upcast_size[i]) == expected_output[i]
def test_float4_unaligned_load(self):
a = Tensor.empty(9).realize().shrink(((1, 9),))
b = Tensor.empty(9).realize().shrink(((1, 9),))
c = a + b
s = c.schedule()[0]
realized_ast = s.ast
opts_to_apply = [Opt(op=OptOps.UPCAST, axis=0, arg=4)]
program = get_program(realized_ast, renderer=Device[Device.DEFAULT].renderer, opts=opts_to_apply)
assert TestFloat4.count_float4(program.uops) == (0, 1)
@unittest.skipIf(Device.DEFAULT in {"CPU"} and AMX, "CPU with AMX upcasts float up to size 16")
def test_float4_multidim_unaligned_load(self):
a = Tensor.empty(2, 9).realize().shrink(((0, 2), (1, 9),))
b = Tensor.empty(2, 9).realize().shrink(((0, 2), (1, 9),))
c = a + b
s = c.schedule()[0]
uops = get_program(s.ast, renderer=Device[Device.DEFAULT].renderer,
opts=[Opt(op=OptOps.UPCAST, axis=1, arg=4), Opt(op=OptOps.UPCAST, axis=1, arg=2)]).uops
assert TestFloat4.count_float4(uops) == (0, 2)
@unittest.skipUnless(Device.DEFAULT in {"CPU"} and AMX, "Only CPU with AMX upcasts float up to size 16")
def test_float4_multidim_unaligned_load_amx(self):
def kernel_for_shape(size, shift):
a = Tensor.empty(2, size).realize().shrink(((0, 2), (1, size),))
b = Tensor.empty(2, size).realize().shrink(((0, 2), (1, size),))
c = a + b
s = c.schedule()[0]
return get_program(s.ast, renderer=Device[Device.DEFAULT].renderer,
opts=[Opt(op=OptOps.UPCAST, axis=1, arg=4), Opt(op=OptOps.UPCAST, axis=1, arg=shift)]).uops
sizes = [13, 9, 17]
shifts = [3, 2, 4]
expected_upcast_size = [4, 8, 16]
expected_output = [(0,3), (0,1), (0,1)]
for i in range(len(sizes)):
assert TestFloat4.count_float4(kernel_for_shape(sizes[i], shifts[i]), expected_upcast_size[i]) == expected_output[i]
def test_float4_sometimes_unaligned(self):
a = Tensor.empty(1, 1, 8).realize()
b = Tensor.empty(1, 1, 5).realize().shrink(((0, 1), (0, 1), (1, 5)))
c = a.conv2d(b)
# only the first and last conv dot products are aligned in a, and b is never aligned, so no
# float4 should be emitted (the reduce axis of size 4 is the float4 axis here)
s = c.schedule()[0]
uops = get_program(s.ast, renderer=Device[Device.DEFAULT].renderer, opts=[Opt(op=OptOps.UNROLL, axis=0, arg=4)]).uops
assert TestFloat4.count_float4(uops) == (0, 0)
def test_float4_multidim_sometimes_unaligned(self):
a = Tensor.empty(1, 1, 7).realize()
b = Tensor.empty(1, 1, 5).realize().shrink(((0, 1), (0, 1), (1, 5)))
c = a.conv2d(b)
# the first conv dot product is aligned in a. If we upcast the output and reduce
# dimension, then we could do float4 for only that one set of loads, but we currently
# don't.
# UPDATE: now we do this fusion
s = c.schedule()[0]
uops = get_program(s.ast, renderer=Device[Device.DEFAULT].renderer,
opts=[Opt(op=OptOps.UPCAST, axis=0, arg=0), Opt(op=OptOps.UNROLL, axis=0, arg=0)]).uops
assert TestFloat4.count_float4(uops) in {(0,1), (1,1)}
def test_float4_expand(self):
a = Tensor.empty(9).realize().shrink(((1, 9),))
b = Tensor.empty(2).realize().reshape((2, 1)).expand((2,4)).reshape((8,))
c = a + b
# we will upcast the top axis of sz 4. they should not be coalesced into float4,
# since the top axis is not contiguous.
s = c.schedule()[0]
uops = get_program(s.ast, renderer=Device[Device.DEFAULT].renderer, opts=[Opt(op=OptOps.UPCAST, axis=0, arg=4)]).uops
assert TestFloat4.count_float4(uops) == (0, 1)
def test_float4_heterogeneous(self):
a = Tensor.empty(8).realize()
b = Tensor.empty(9).realize().shrink(((1, 9),))
c = a + b
# should float4 b but not a
s = c.schedule()[0]
uops = get_program(s.ast, renderer=Device[Device.DEFAULT].renderer, opts=[Opt(op=OptOps.UPCAST, axis=0, arg=4)]).uops
assert TestFloat4.count_float4(uops) == (1, 1)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/opt/test_gen_float4.py",
"license": "MIT License",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/opt/test_kernel_opts.py | import unittest
from tinygrad import Device, Tensor, dtypes
from tinygrad.codegen.opt import Opt, OptOps, KernelOptError
# TODO: write a clean version of this
from test.backend.test_linearizer import helper_linearizer_opt
class TestKernelOpts(unittest.TestCase):
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_local, "test requires locals")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_shared, "test requires shared")
def test_local_and_grouped_reduce(self):
N = 128
Tensor.manual_seed(1882)
a = Tensor.rand(4, 4, N, N)
b = Tensor.rand(4, 4, N)
r = (b.sqrt() + ((a+1).sum(axis=3).exp()))
helper_linearizer_opt(r, [
[Opt(OptOps.LOCAL, 0, 2)],
[Opt(OptOps.LOCAL, 0, 8)],
[Opt(OptOps.LOCAL, 0, 16)], # Checking how it works with locals
[Opt(OptOps.GROUPTOP, 0, 2)],
[Opt(OptOps.GROUPTOP, 0, 32)],
[Opt(OptOps.GROUPTOP, 0, 64)], # Checking how it works with grouped reduce
[Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.GROUPTOP, 0, 2)],
[Opt(OptOps.LOCAL, 0, 16), Opt(OptOps.GROUPTOP, 0, 16)],
[Opt(OptOps.LOCAL, 0, 32), Opt(OptOps.GROUPTOP, 0, 2)],
# Checking how it works with locals + grouped reduce
[Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.GROUPTOP, 0, 64)],
# Checking how it works with locals + grouped reduce + upcasts
[Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.GROUPTOP, 0, 2), Opt(OptOps.UPCAST, 0, 8), Opt(OptOps.UNROLL, 1, 4)],
# many local + many group
[Opt(OptOps.GROUP, 0, 2)] * 4,
[Opt(OptOps.LOCAL, 0, 2)] * 4,
[Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.GROUP, 0, 2)] * 4,
])
def test_upcasts(self):
N = 16
Tensor.manual_seed(1772)
a = Tensor.rand(N, N)
b = Tensor.rand(N, N)
r = (a+b).sqrt() * ((a+1).exp())
helper_linearizer_opt(r, [
[Opt(OptOps.UPCAST, 0, 2)],
[Opt(OptOps.UPCAST, 0, 4)],
[Opt(OptOps.UPCAST, 0, 8)], # Checking how it works with upcasts
])
def test_full_upcast(self):
Tensor.manual_seed(1772)
a = Tensor.rand(4)
b = Tensor.rand(4)
r = (a+b).sqrt() * ((a+1).exp())
helper_linearizer_opt(r, [
[Opt(OptOps.UPCAST, 0, 4)], # Checking how it works with upcasts
])
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_local, "test requires locals")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_shared, "test requires shared")
def test_matmul(self):
N = 128
Tensor.manual_seed(1552)
a = Tensor.rand(N, N)
b = Tensor.rand(N, N)
r = a@b
helper_linearizer_opt(r, [
[Opt(OptOps.UPCAST, 0, 2)],
[Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 4)], # Checking how it works with upcasts
[Opt(OptOps.LOCAL, 0, 2)],
[Opt(OptOps.LOCAL, 1, 32)],
[Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 1, 4)],
[Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 1, 32)],
[Opt(OptOps.LOCAL, 0, 16), Opt(OptOps.LOCAL, 1, 8)], # Checking how it works with locals
[Opt(OptOps.GROUPTOP, 0, 2)],
[Opt(OptOps.GROUPTOP, 0, 32)],
[Opt(OptOps.GROUPTOP, 0, 32), Opt(OptOps.UNROLL, 0, 4)], # Checking how it works with grouped_reduce
[Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.LOCAL, 1, 2), Opt(OptOps.GROUPTOP, 0, 32)],
[Opt(OptOps.LOCAL, 0, 8), Opt(OptOps.GROUPTOP, 0, 32)],
[Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 0, 8), Opt(OptOps.GROUPTOP, 0, 4)], # Checking how it works with local+grouped_reduce
# Checking all together
[Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.GROUPTOP, 0, 8), Opt(OptOps.UNROLL, 0, 4), Opt(OptOps.UPCAST, 0, 4),
Opt(OptOps.UPCAST, 1, 2)],
# Full global upcast + local
[Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.GROUPTOP, 0, 8), Opt(OptOps.UNROLL, 0, 4), Opt(OptOps.UPCAST, 0, 8)],
])
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_local, "test requires locals")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_shared, "test requires shared")
def test_double_reduce(self):
N = 128
Tensor.manual_seed(1552)
a = Tensor.rand(8, N, 8, N)
r = a.sum(axis=(1,3))
helper_linearizer_opt(r, [
# openCL / CL=1 is 256 max threads
[Opt(OptOps.GROUPTOP, 0, 2)], [Opt(OptOps.GROUPTOP, 0, 32)],
[Opt(OptOps.GROUPTOP, 1, 2)], [Opt(OptOps.GROUPTOP, 1, 32)], # Checking how it works with 1 grouped_reduce.
[Opt(OptOps.GROUPTOP, 0, 2), Opt(OptOps.GROUPTOP, 1, 2)],
[Opt(OptOps.GROUPTOP, 0, 16), Opt(OptOps.GROUPTOP, 1, 2)],
[Opt(OptOps.GROUPTOP, 0, 4), Opt(OptOps.GROUPTOP, 1, 64)], # Checking how it works with 2 grouped_reduces.
[Opt(OptOps.GROUPTOP, 0, 16), Opt(OptOps.GROUPTOP, 1, 2), Opt(OptOps.UNROLL, 0, 4)],
[Opt(OptOps.GROUPTOP, 0, 2), Opt(OptOps.GROUPTOP, 1, 32), Opt(OptOps.UNROLL, 2, 4)], # Checking how it works with 2 grouped_reduces + upcasts.
[Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 1, 4), Opt(OptOps.GROUPTOP, 0, 4), Opt(OptOps.GROUPTOP, 1, 4)],
# Checking how it works with 2 grouped_reduces + upcasts + locals.
[Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 1, 4), Opt(OptOps.GROUPTOP, 0, 2), Opt(OptOps.GROUPTOP, 1, 32), Opt(OptOps.UNROLL, 1, 4)],
[Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.LOCAL, 1, 2), Opt(OptOps.GROUPTOP, 0, 8), Opt(OptOps.GROUPTOP, 1, 4), Opt(OptOps.UPCAST, 0, 2)],
[Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.LOCAL, 1, 2), Opt(OptOps.GROUPTOP, 0, 8), Opt(OptOps.GROUPTOP, 1, 4), Opt(OptOps.UPCAST, 0, 2),
Opt(OptOps.UNROLL, 0, 4), Opt(OptOps.UNROLL, 1, 4)], # Checking how it works with 2 grouped_reduces + upcasts + locals.
[Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 1, 4), Opt(OptOps.GROUPTOP, 0, 4), Opt(OptOps.GROUPTOP, 1, 4), Opt(OptOps.UPCAST, 0, 2),
Opt(OptOps.UPCAST, 0, 2)], # No globals
])
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
@unittest.skipUnless(any(tc.dtype_in == tc.dtype_out == dtypes.half for tc in Device[Device.DEFAULT].renderer.tensor_cores),
"test requires tensor cores with accumulation in half") # testing with half suffices.
def test_tensor_core_opts(self):
N = 128
Tensor.manual_seed(1552)
a, b = Tensor.rand(N, N, dtype=dtypes.half), Tensor.rand(N, N, dtype=dtypes.half)
r = a.matmul(b, dtype=dtypes.half)
atol, rtol = 0.25, 0.01
helper_linearizer_opt(r, [
[],
[Opt(OptOps.UPCAST, 0, 4)],
[Opt(OptOps.UPCAST, 1, 4)],
[Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 4)], # check upcasts
[Opt(OptOps.UNROLL, 0, 2)], # check unroll
[Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UNROLL, 0, 2)], # check combo of unroll and local
[Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 4), Opt(OptOps.UNROLL, 0, 2)],
[Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 4), Opt(OptOps.UNROLL, 0, 4)],
[Opt(OptOps.UPCAST, 1, 4), Opt(OptOps.UPCAST, 0, 4)], # check permutations
[Opt(OptOps.UNROLL, 0, 2), Opt(OptOps.UPCAST, 0, 4)],
[Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UNROLL, 0, 2), Opt(OptOps.UPCAST, 1, 4)],
[Opt(OptOps.UNROLL, 0, 2), Opt(OptOps.UPCAST, 1, 4), Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UNROLL, 0, 4)],
], apply_tc=True, atol=atol, rtol=rtol)
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
@unittest.skipUnless(any(tc.dtype_in == tc.dtype_out == dtypes.half for tc in Device[Device.DEFAULT].renderer.tensor_cores),
"test requires tensor cores with accumulation in half") # testing with half suffices.
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_local, "test requires locals")
def test_tensor_core_opts_locals(self):
N = 128
Tensor.manual_seed(1552)
a, b = Tensor.rand(N, N, dtype=dtypes.half), Tensor.rand(N, N, dtype=dtypes.half)
r = a.matmul(b, dtype=dtypes.half)
atol, rtol = 0.25, 0.01
helper_linearizer_opt(r, [
[Opt(OptOps.UNROLL, 0, 0)], # check full unroll of reduce with locals
[Opt(OptOps.LOCAL, 0, 4)], # check local
[Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 4), Opt(OptOps.UNROLL, 0, 4), Opt(OptOps.LOCAL, 0, 2)],
[Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.UPCAST, 1, 4), Opt(OptOps.UNROLL, 0, 2), Opt(OptOps.UPCAST, 0, 4)],
], apply_tc=True, atol=atol, rtol=rtol)
@unittest.skipUnless(Device[Device.DEFAULT].renderer.tensor_cores, "test requires tensor cores")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_local, "test requires locals")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_shared, "test requires shared memory")
@unittest.skipUnless(any(tc.dtype_in == tc.dtype_out == dtypes.half for tc in Device[Device.DEFAULT].renderer.tensor_cores),
"test requires tensor cores with accumulation in half") # testing with half suffices.
# NOTE: the METAL test is broken, likely due to a compiler bug. passes on CI with -O0 and with default opt level locally on M3
@unittest.skipIf(Device.DEFAULT == "METAL", "broken for METAL")
@unittest.skip("feature was removed")
def test_tensor_core_opts_group(self):
N = 128
Tensor.manual_seed(1552)
a, b = Tensor.rand(N, N, dtype=dtypes.half), Tensor.rand(N, N, dtype=dtypes.half)
r = a.matmul(b, dtype=dtypes.half)
atol, rtol = 0.25, 0.01
helper_linearizer_opt(r, [
[Opt(OptOps.GROUP, 0, 2)],
[Opt(OptOps.GROUPTOP, 0, 4)],
[Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.GROUP, 0, 2)],
[Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.GROUP, 0, 2)],
[Opt(OptOps.UNROLL, 0, 4), Opt(OptOps.GROUP, 0, 2)],
[Opt(OptOps.UPCAST, 0, 2), Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.GROUP, 0, 2)],
[Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.GROUPTOP, 0, 8), Opt(OptOps.UNROLL, 0, 2), Opt(OptOps.UPCAST, 1, 2)],
], apply_tc=True, atol=atol, rtol=rtol)
def test_padto_matmul(self):
N = 17
Tensor.manual_seed(289)
a = Tensor.rand(N, N)
b = Tensor.rand(N, N)
helper_linearizer_opt(a@b, [
[Opt(OptOps.PADTO, 0, 32)],
[Opt(OptOps.PADTO, 1, 32)],
[Opt(OptOps.PADTO, 2, 32)],
[Opt(OptOps.PADTO, 0, 32), Opt(OptOps.PADTO, 1, 32)],
[Opt(OptOps.PADTO, 0, 32), Opt(OptOps.PADTO, 1, 32), Opt(OptOps.PADTO, 2, 32)],
# can optimize further post PADTO
[Opt(OptOps.PADTO, 0, 32), Opt(OptOps.PADTO, 1, 32), Opt(OptOps.UPCAST, 0, 2), Opt(OptOps.UPCAST, 1, 2),],
])
def test_padto_upcasted_not_ok(self):
N = 4
a = Tensor.rand(N, N)
b = Tensor.rand(N, N)
helper_linearizer_opt(a@b, [
[Opt(OptOps.UPCAST, 0, 0)],
[Opt(OptOps.UPCAST, 1, 0)],
[Opt(OptOps.UNROLL, 0, 0)],
[Opt(OptOps.PADTO, 0, 8)],
[Opt(OptOps.PADTO, 1, 8)],
[Opt(OptOps.PADTO, 2, 8)],
])
with self.assertRaises(KernelOptError):
helper_linearizer_opt(a@b, [[Opt(OptOps.UPCAST, 0, 0), Opt(OptOps.PADTO, 1, 8)]])
with self.assertRaises(KernelOptError):
helper_linearizer_opt(a@b, [[Opt(OptOps.UPCAST, 1, 0), Opt(OptOps.PADTO, 1, 8)]])
with self.assertRaises(KernelOptError):
helper_linearizer_opt(a@b, [[Opt(OptOps.UNROLL, 0, 0), Opt(OptOps.PADTO, 2, 8)]])
def test_padto_sum_ok(self):
N = 18
# NOTE: this setup prevents 17 * 17 contiguous merged into one dimension
a = Tensor.rand(N, N).realize().shrink(((0, 17), (0, 17))) * 100
b = (Tensor.rand(N, N) < 0.5).realize().shrink(((0, 17), (0, 17)))
helper_linearizer_opt(a.sum(0), [
[Opt(OptOps.PADTO, 0, 32)],
[Opt(OptOps.PADTO, 0, 32), Opt(OptOps.UPCAST, 0, 8),],
])
helper_linearizer_opt(a.sum(1), [
[Opt(OptOps.PADTO, 0, 32)],
[Opt(OptOps.PADTO, 0, 32), Opt(OptOps.UPCAST, 0, 8),],
])
# can pad sum reduce axis if there's no unsafe ops prior to sum
for axis in (0, 1):
helper_linearizer_opt(a.sum(), [[Opt(OptOps.PADTO, axis, 32)],])
helper_linearizer_opt(a.sum(0), [[Opt(OptOps.PADTO, axis, 32)],])
helper_linearizer_opt(b.sum(), [[Opt(OptOps.PADTO, axis, 32)],])
helper_linearizer_opt(b.sum(0), [[Opt(OptOps.PADTO, axis, 32)],])
helper_linearizer_opt(b.sum(dtype=dtypes.bool), [[Opt(OptOps.PADTO, axis, 32)],])
# TODO: why?
if Device.DEFAULT != "WEBGPU":
helper_linearizer_opt(b.sum(0, dtype=dtypes.bool), [[Opt(OptOps.PADTO, axis, 32)],])
helper_linearizer_opt(b.sum(1, dtype=dtypes.bool), [[Opt(OptOps.PADTO, axis, 32)],])
# having unsafe ops after sum is fine
helper_linearizer_opt(a.sum().exp(), [[Opt(OptOps.PADTO, 0, 32)],])
helper_linearizer_opt(a.sum(0).exp(), [[Opt(OptOps.PADTO, 1, 32)],])
def test_padto_sum_not_ok(self):
N = 18
# NOTE: this setup prevents 17 * 17 contiguous merged into one dimension
a = Tensor.rand(N, N).shrink(((0, 17), (0, 17))).exp()
# exp is not safe to pad
with self.assertRaises(KernelOptError):
helper_linearizer_opt(a.exp().sum(), [[Opt(OptOps.PADTO, 0, 32)],])
with self.assertRaises(KernelOptError):
helper_linearizer_opt(a.exp().sum(0), [[Opt(OptOps.PADTO, 1, 32)],])
b = a < 1
# lt is not safe to pad
with self.assertRaises(KernelOptError):
helper_linearizer_opt(b.sum(), [[Opt(OptOps.PADTO, 0, 32)],])
with self.assertRaises(KernelOptError):
helper_linearizer_opt(b.sum(0), [[Opt(OptOps.PADTO, 1, 32)],])
def test_padto_max(self):
N = 18
# NOTE: this setup prevents 17 * 17 contiguous merged into one axis
a = -Tensor.rand(N, N).shrink(((0, 17), (0, 17))) * 100
helper_linearizer_opt(a.max(0), [
[Opt(OptOps.PADTO, 0, 32)],
[Opt(OptOps.PADTO, 0, 32), Opt(OptOps.UPCAST, 0, 8),],
])
helper_linearizer_opt(a.max(1), [
[Opt(OptOps.PADTO, 0, 32)],
[Opt(OptOps.PADTO, 0, 32), Opt(OptOps.UPCAST, 0, 8),],
])
# cannot pad max kernel on reduce
with self.assertRaises(KernelOptError):
helper_linearizer_opt(a.max(), [[Opt(OptOps.PADTO, 0, 32)],])
with self.assertRaises(KernelOptError):
helper_linearizer_opt(a.max(0), [[Opt(OptOps.PADTO, 1, 32)],])
def test_padto_where(self):
Tensor.manual_seed(0)
N = 17
a = (Tensor.randn(N, N).realize().max(axis=0, keepdim=True) > 1).where(1, 0)
helper_linearizer_opt(a.max(0), [
[Opt(OptOps.PADTO, 0, 32)],
[Opt(OptOps.PADTO, 0, 32), Opt(OptOps.UPCAST, 0, 8),],
])
def test_padto_where_multioutput(self):
Tensor.manual_seed(0)
N = 17
r = Tensor.randn(N, N).realize().max(axis=0, keepdim=True) > 1
a0 = r.where(1, 0)
a1 = r.where(2, 0)
helper_linearizer_opt([a0.max(0), a1.max(0)], [
[Opt(OptOps.PADTO, 0, 32)],
[Opt(OptOps.PADTO, 0, 32), Opt(OptOps.UPCAST, 0, 8),],
])
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_local, "test requires locals")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_shared, "test requires shared")
def test_color_shapes_with_local(self):
N = 32
Tensor.manual_seed(1552)
a = Tensor.rand(N, N)
b = Tensor.rand(N, N)
r = a@b
opts_shapes = [
([Opt(OptOps.LOCAL, 0, 2)], [("blue",16),("blue",32),("cyan",2),("red",32)]),
([Opt(OptOps.LOCAL, 0, 2),Opt(OptOps.GROUP, 0, 2)], [("blue",16),("blue",32),("cyan",2),("green",2),("red",16)]),
# check to ensure local_dims are stable for full UNROLL of the first reduce
([Opt(OptOps.LOCAL, 0, 2),Opt(OptOps.UNROLL, 0, 0)], [("blue",16),("blue",32),("cyan",2),("magenta",32)]),
([Opt(OptOps.UNROLL, 0, 0),Opt(OptOps.LOCAL, 0, 2)], [("blue",16),("blue",32),("cyan",2),("magenta",32)]),
# check behavior for full UNROLL on an existing GROUP
([Opt(OptOps.LOCAL, 0, 2),Opt(OptOps.GROUP, 0, 0),Opt(OptOps.UNROLL, 0, 2)], [("blue",16),("blue",32),("cyan",2),("green",16),("magenta",2)]),
([Opt(OptOps.LOCAL, 0, 2),Opt(OptOps.GROUP, 0, 0),Opt(OptOps.UNROLL, 0, 0)], [("blue",16),("blue",32),("cyan",2),("magenta",32)]),
([Opt(OptOps.GROUP, 0, 0),Opt(OptOps.LOCAL, 0, 2),Opt(OptOps.UNROLL, 0, 0)], [("blue",16),("blue",32),("cyan",2),("magenta",32)]),
([Opt(OptOps.GROUP, 0, 2),Opt(OptOps.UNROLL, 0, 0)], [("blue",32),("blue",32),("red",16),("magenta",2)]),
]
helper_linearizer_opt(r, [x[0] for x in opts_shapes], color_sizes=[x[1] for x in opts_shapes])
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_local, "test requires locals")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_shared, "test requires shared")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.supports_float4, "test requires float4")
def test_arange_opts(self):
a = Tensor.arange(128)
# NOTE: arange no longer has reduce ops available for opt
helper_linearizer_opt(a, [
#[Opt(OptOps.GROUP, 0, 32)],
#[Opt(OptOps.GROUPTOP, 0, 32)],
[Opt(op=OptOps.LOCAL, axis=0, arg=8)],
[Opt(op=OptOps.LOCAL, axis=0, arg=8), Opt(op=OptOps.UPCAST, axis=0, arg=0)],
#[Opt(op=OptOps.LOCAL, axis=0, arg=8), Opt(op=OptOps.UPCAST, axis=0, arg=0), Opt(op=OptOps.GROUP, axis=0, arg=8)],
#[Opt(op=OptOps.LOCAL, axis=0, arg=8), Opt(op=OptOps.UPCAST, axis=0, arg=0), Opt(op=OptOps.GROUP, axis=0, arg=8), Opt(op=OptOps.UNROLL, axis=1, arg=4)], # noqa: E501
])
@unittest.skipUnless(Device[Device.DEFAULT].renderer.has_threads, "test requires threads")
@unittest.skipUnless(Device[Device.DEFAULT].renderer.global_max is not None and
Device[Device.DEFAULT].renderer.global_max[0] > 1, "test requires multicore")
def test_thread_opts(self):
a = Tensor.rand(4, 4, 4, 4)
b = Tensor.rand(4, 4, 4)
r = (b.sqrt() + ((a+1).sum(axis=3).exp()))
helper_linearizer_opt(r, [
[Opt(OptOps.THREAD, 0, 2)],
[Opt(OptOps.UPCAST, 0, 2), Opt(OptOps.THREAD, 0, 2)],
[Opt(OptOps.UPCAST, 0, 2), Opt(OptOps.THREAD, 0, 2), Opt(OptOps.UNROLL, 0, 2)],
] + [[Opt(OptOps.THREAD, 0, 4)] if Device[Device.DEFAULT].renderer.global_max[0] >= 4 else []]
+ [[Opt(OptOps.THREAD, 0, 8)] if Device[Device.DEFAULT].renderer.global_max[0] >= 8 else []])
def test_double_sum_group(self):
a = Tensor.rand(4, 4, 4)
r = a.sum((1, 2)).sum()
with self.assertRaises(KernelOptError):
helper_linearizer_opt(r, [[Opt(OptOps.GROUPTOP, 0, 16)],])
r = a.sum((1, 2)).sum()
with self.assertRaises(KernelOptError):
helper_linearizer_opt(r, [[Opt(OptOps.UNROLL, 1, 4), Opt(OptOps.GROUPTOP, 0, 16)],])
r = a.sum((1, 2)).sum()
with self.assertRaises(KernelOptError):
helper_linearizer_opt(r, [[Opt(OptOps.GROUPTOP, 1, 4), Opt(OptOps.GROUPTOP, 0, 16)],])
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/opt/test_kernel_opts.py",
"license": "MIT License",
"lines": 337,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/mockgpu/helpers.py | import ctypes, ctypes.util
from tinygrad.helpers import getenv
def _try_dlopen_gpuocelot():
GPUOCELOT_PATHS = [ctypes.util.find_library("gpuocelot")] if ctypes.util.find_library("gpuocelot") is not None else []
GPUOCELOT_PATHS += ["libgpuocelot.so", "/usr/local/lib/libgpuocelot.so",
"libgpuocelot.dylib", "/usr/local/lib/libgpuocelot.dylib", "/opt/homebrew/lib/libgpuocelot.dylib"]
for path in GPUOCELOT_PATHS:
try:
gpuocelot_lib = ctypes.CDLL(path)
gpuocelot_lib.ptx_run.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_void_p), ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]
except OSError: pass
else: return gpuocelot_lib
print("Could not find libgpuocelot.so")
return None
class PythonRemu:
"""Python RDNA3/RDNA4 emulator wrapper that matches the libremu.so interface."""
valid_mem_ranges: set[tuple[int, int]] = set()
rsrc2: int = 0x19c # Default: USER_SGPR_COUNT=14, enable X and Y workgroup IDs
scratch_size: int = 0 # private_segment_fixed_size from kernel descriptor
arch: str = "rdna3" # Architecture: rdna3 or rdna4
user_data: list[int] = [] # All COMPUTE_USER_DATA registers (loaded into s[0:N])
def run_asm(self, lib: int, lib_sz: int, gx: int, gy: int, gz: int, lx: int, ly: int, lz: int, args_ptr: int) -> int:
from test.mockgpu.amd.emu import run_asm
return run_asm(lib, lib_sz, gx, gy, gz, lx, ly, lz, args_ptr, self.rsrc2, self.scratch_size, self.arch, self.user_data)
def _try_dlopen_remu():
# Use Python emulator only if PYTHON_REMU=1
if int(getenv("PYTHON_REMU", "1")):
return PythonRemu()
REMU_PATHS = ["extra/remu/target/release/libremu.so", "libremu.so", "/usr/local/lib/libremu.so",
"extra/remu/target/release/libremu.dylib", "libremu.dylib", "/usr/local/lib/libremu.dylib", "/opt/homebrew/lib/libremu.dylib"]
for path in REMU_PATHS:
try:
remu = ctypes.CDLL(path)
remu.run_asm.restype = ctypes.c_int32
remu.run_asm.argtypes = [ctypes.c_void_p, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_void_p]
except OSError: pass
else: return remu
print("Could not find libremu.so")
return None
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/mockgpu/helpers.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/codegen/opt/postrange.py | from __future__ import annotations
import math, itertools
from collections import defaultdict
from typing import cast, Final
from tinygrad.uop.ops import PatternMatcher, UPat, Ops, UOp, KernelInfo, graph_rewrite, AxisType, ssimplify, GroupOp
from tinygrad.uop.ops import axis_letters, axis_colors, axis_to_pos
from tinygrad.device import Buffer
from tinygrad.dtype import dtypes, ImageDType
from tinygrad.helpers import colored, BEAM, getenv, DEBUG, to_function_name, NOOPT, argsort, round_up, prod, merge_dicts, get_single_element, flatten
from tinygrad.helpers import ALLOW_TF32, count, Context
from tinygrad.codegen.opt import Opt, OptOps, KernelOptError, check
from tinygrad.codegen.simplify import pm_flatten_range
from tinygrad.renderer import Renderer
remove_tags = PatternMatcher([(UPat(GroupOp.All, name="x"), lambda x: x.replace(tag=None) if x.tag is not None else None)])
class Scheduler:
def __init__(self, ast:UOp, ren:Renderer):
self.ast, self.ren = ast, ren
self.dont_use_locals = self.ast.arg.dont_use_locals if self.ast.arg is not None else False
self.applied_opts = list(self.ast.arg.applied_opts) if self.ast.arg is not None else []
self.opt_range = count(start=max([x.arg[0] for x in self.rngs], default=0)+1)
@property
def rngs(self):
# always in order by axistype
return sorted([u for u in self.ast.backward_slice if u.op is Ops.RANGE and u.vmax > 0], key=lambda x: (axis_to_pos[x.arg[-1]],) + x.arg[0:-1])
@property
def shape_len(self) -> int: return len(self.rngs)
@property
def full_shape(self): return [ssimplify(x.src[0]) for x in self.rngs]
@property
def axis_types(self) -> list[AxisType]: return [x.arg[-1] for x in self.rngs]
# strings like ['g0', 'g1', 'l0', 'l1', 'l2', 'l3', 'l4', 'l5', 'R0', 'r0', 'r1', 'r2', 'u0', 'u1', 'u2']
def shape_str(self) -> list[str]:
ret: list[str] = []
cnt: dict[AxisType, int] = {}
for x in self.axis_types:
cnt[x] = (cnt[x] + 1) if x in cnt else 0
ret.append(f"{axis_letters[x]}{cnt[x]}")
return ret
def shape_str_to_axis(self, nms:list[str]) -> tuple[int, ...]: return tuple([self.shape_str().index(x) for x in nms])
def copy(self) -> Scheduler:
ret = Scheduler(self.ast, self.ren)
ret.dont_use_locals = self.dont_use_locals
ret.applied_opts = self.applied_opts[:]
if hasattr(self, 'tensor_core'): ret.tensor_core = self.tensor_core
return ret
kernel_cnt: Final[defaultdict[str, int]] = defaultdict(int)
def get_optimized_ast(self, name_override:str|None=None) -> UOp:
if name_override is not None: name = name_override
else:
k_type = "r" if self.reduceop is not None else "E"
special_uops = sorted([x for x in self.ast.toposort() if x.op is Ops.SPECIAL], key=lambda x: x.arg)
special_ops = [colored(str(x.vmax+1), "blue" if x.arg[0] == "g" else "cyan") for x in special_uops]
name = k_type + colored('_', 'BLACK').join(['']+special_ops+[colored(x.src[0].render(), color) for x,color in zip(self.rngs, self.colors())])
Scheduler.kernel_cnt[(function_name := to_function_name(name))] += 1
num = f"n{Scheduler.kernel_cnt[function_name]-1}" if Scheduler.kernel_cnt[function_name] > 1 else ""
name += colored(num, 'BLACK')
self.ast = graph_rewrite(self.ast, pm_flatten_range, name="flatten range")
return self.ast.replace(arg=KernelInfo(name=name, applied_opts=tuple(self.applied_opts), dont_use_locals=self.dont_use_locals), tag=1)
def _output_rngs(self) -> list[UOp]:
return flatten([[r for r in UOp.sink(*s.src[1:]).ranges if r.arg[-1] != AxisType.REDUCE] for s in self.ast.src if s.op is Ops.END])
def _globalizable_rngs(self) -> list[UOp]:
ret = [r for r in self._output_rngs() if r.arg[-1] == AxisType.LOOP]
# exclude any output ranges from global that don't appear in all BUFFERIZE
for x in self.ast.toposort():
if x.op is Ops.BUFFERIZE:
ret = [r for r in ret if r in x.ranges]
return ret
def convert_loop_to_global(self) -> None:
if not self.ren.has_local: return
globalizible_rngs = self._globalizable_rngs()
rng = [x.replace(arg=x.arg[0:-1]+(AxisType.GLOBAL,)) if x in globalizible_rngs else x for x in self.rngs]
self.ast = self.ast.substitute(dict(zip(self.rngs, rng)))
def colors(self) -> list[str]:
output_rngs = self._output_rngs()
globalizible_rngs = self._globalizable_rngs()
ret = []
for x,r in zip(self.axis_types, self.rngs):
if self.dont_use_locals and x == AxisType.GLOBAL: ret.append("BLUE")
elif r not in output_rngs and x == AxisType.LOOP: ret.append("BLACK")
elif r not in globalizible_rngs and x == AxisType.LOOP: ret.append("white")
else: ret.append(axis_colors[x])
return ret
def colored_shape(self) -> str: return ' '.join([colored(f'{x.src[0].render():>4s}', color) for x,color in zip(self.rngs, self.colors())])
def shift_to(self, rng:UOp, amount:int, new_type:AxisType, top:bool=False, input_new_rng:UOp|None=None):
if (old_sz:=rng.src[0].divides(amount)) is None:
raise KernelOptError(f"{amount} can't divide {rng.src[0]} in {self.colored_shape()}")
new_rng = UOp.range(amount, next(self.opt_range), new_type) if input_new_rng is None else input_new_rng
replaced_rng = rng.replace(src=(UOp.const(dtypes.int, old_sz),))
sub_axis = (new_rng * old_sz + replaced_rng) if top else (replaced_rng * amount + new_rng)
self.ast = self.ast.substitute({rng:sub_axis}, name=f"shift {rng.arg[:-1]} {amount} {str(new_type).split('.')[1].lower()}")
return replaced_rng, new_rng
def ranges_of(self, *axis_type:AxisType) -> list[UOp]: return [r for r in self.rngs if r.arg[-1] in axis_type]
def axes_of(self, *axis_type:AxisType) -> list[int]: return [i for i,t in enumerate(self.axis_types) if t in axis_type]
def upcast_size(self): return prod(self.full_shape[a] for a in self.axes_of(AxisType.UPCAST, AxisType.UNROLL))
# copied from kernel.py
@property
def upcastable_dims(self) -> list[int]: return [i for i in self.axes_of(AxisType.GLOBAL, AxisType.LOCAL, AxisType.LOOP) \
if isinstance(s:=self.full_shape[i], int) and s > 1]
@property
def unrollable_dims(self) -> list[int]: return [i for i in self.axes_of(AxisType.GROUP_REDUCE, AxisType.REDUCE) \
if isinstance(s:=self.full_shape[i], int) and s > 1]
def real_axis(self, op:OptOps, axis:int|None) -> int:
try:
if axis is None or op is OptOps.TC: return -1
if op is OptOps.UNROLL: return self.unrollable_dims[axis]
if op in {OptOps.GROUP, OptOps.GROUPTOP}: return self.axes_of(AxisType.REDUCE)[axis]
check(axis < self.shape_len, f"invalid axis on {axis=} {op=} {self.shape_len=}")
return axis
except IndexError as e: raise KernelOptError from e
def apply_opt(self, opt:Opt, append_opt:bool=True):
if opt.op is OptOps.NOLOCALS:
check(all(x not in {AxisType.WARP, AxisType.LOCAL, AxisType.GROUP_REDUCE} for x in self.axis_types), "no locals can't have locals")
if append_opt: self.applied_opts.append(opt)
self.dont_use_locals = True
return
if opt.op in {OptOps.LOCAL, OptOps.GROUP, OptOps.GROUPTOP}:
check(self.ren.has_local, "locals needed for opt")
rng = self.rngs[real_axis] if (real_axis:=self.real_axis(opt.op, opt.axis)) >= 0 else UOp(Ops.NOOP)
opt_to_at = {
OptOps.LOCAL: AxisType.LOCAL, OptOps.UPCAST: AxisType.UPCAST,
OptOps.UNROLL: AxisType.UNROLL, OptOps.GROUP: AxisType.GROUP_REDUCE,
OptOps.GROUPTOP: AxisType.GROUP_REDUCE, OptOps.THREAD: AxisType.THREAD}
ret = None
if opt.op in opt_to_at:
amt:int = int(rng.vmax+1) if opt.arg == 0 else cast(int, opt.arg)
# copied from kernel.py. prevents METAL compiler hangs
if self.reduceop is not None and (opt.op in {OptOps.GROUP, OptOps.GROUPTOP} or \
(self.group_for_reduces and opt.op not in {OptOps.NOLOCALS, OptOps.PADTO})):
upcast_local_sz = prod([self.full_shape[a] for a in self.axes_of(AxisType.UPCAST, AxisType.WARP, AxisType.LOCAL, AxisType.GROUP_REDUCE)])
smem_sz = amt*upcast_local_sz*self.reduceop.dtype.itemsize
check(smem_sz <= self.ren.shared_max, f"exceeds maximum shared memory size: needs {smem_sz}, max {self.ren.shared_max}")
if self.reduceop is not None and (opt.op in {OptOps.GROUP, OptOps.GROUPTOP}):
# We currently dont support a group within another rudece, TODO: fix if-contexts
reduce = [u for u in self.ast.backward_slice if u.op is Ops.REDUCE and rng in merge_dicts([r.ranges for r in u.src[1:]])][0]
check(not any(u.arg[-1] in (AxisType.REDUCE, AxisType.UNROLL, AxisType.GROUP_REDUCE) for u in reduce.ranges),
"cannot have a GROUP_REDUCE inside another reduce")
if opt.op is OptOps.UNROLL:
check(amt <= 32, "don't unroll more than 32")
check(rng.arg[-1] in {AxisType.GROUP_REDUCE, AxisType.REDUCE}, "unroll is for GROUP_REDUCE/REDUCE")
if opt.op is OptOps.UPCAST:
check((self.ren is not None and self.ren.device == "DSP") or amt <= 16, "don't upcast more than 16")
check(rng.arg[-1] in {AxisType.GLOBAL, AxisType.LOCAL, AxisType.LOOP}, f"upcast is for GLOBAL/LOCAL/LOOP, not {rng.arg[-1]}")
if opt.op is OptOps.LOCAL:
check(not self.dont_use_locals, "can't use locals")
check(rng.arg[-1] in {AxisType.GLOBAL, AxisType.LOOP}, "local is for globals")
if opt.op is OptOps.THREAD:
check(self.ren is not None and self.ren.has_threads, "target does not support threads")
check(self.ren is not None and self.ren.global_max is not None and amt <= self.ren.global_max[0], "too many threads")
check(all(x is not AxisType.THREAD for x in self.axis_types), "already threaded")
check(rng in self._globalizable_rngs(), "can't apply range to this dim")
if opt.op in {OptOps.GROUP, OptOps.GROUPTOP}:
check(all(x.op is not OptOps.TC for x in self.applied_opts), "no grouping with tensor cores") # TODO: why is this wrong?
check(not self.dont_use_locals, "can't use locals")
check(rng.arg[-1] == AxisType.REDUCE, "group is for reduce")
ret = self.shift_to(rng, amt, opt_to_at[opt.op], top=opt.op in {OptOps.GROUPTOP, OptOps.THREAD})
elif opt.op is OptOps.TC:
check(len(self.applied_opts) == 0, "tensor core opts must be first") # TODO: remove the need for this by having warps
check(opt.axis is not None, "tensor core opts must have an axis")
check(opt.arg is not None and isinstance(opt.arg, tuple) and len(opt.arg) == 3, "tensor core opts must have valid arg")
check(-1 <= (tc_select:=cast(tuple, opt.arg)[0]) < len(self.ren.tensor_cores), "tensor core opts must have valid tc_select")
check(0 <= (tc_opt:=cast(tuple, opt.arg)[1]) <= 2, "tensor core opts must have valid tc_opt")
check(0 < (use_tensor_cores:=cast(tuple, opt.arg)[2]) <= 2, "use_tensor_cores value is not valid")
try: ret = self._apply_tc_opt(use_tensor_cores, cast(int, opt.axis), tc_select, tc_opt)
except ValueError as e: raise KernelOptError(str(e))
check(ret is not None, "no tensor core available")
elif opt.op is OptOps.PADTO:
check(rng.src[0].op is Ops.CONST, "only pad const axes")
check(rng.arg[-1] not in {AxisType.UPCAST, AxisType.UNROLL}, "cannot pad upcasted") # TODO: why is this wrong?
check(rng.arg[-1] is not AxisType.THREAD, "cannot pad thread")
# ok to pad SUM if all parent ALU ops have f(0) = 0
if (r:=self.reduceop) is not None and rng.arg[-1] in (AxisType.GROUP_REDUCE, AxisType.REDUCE):
check(r.arg[0] is Ops.ADD and not r.op_in_backward_slice_with_self(*GroupOp.UnsafePad), f"cannot pad {r}")
new_sz = round_up(int(rng.vmax+1), cast(int, opt.arg))
check(rng.vmax+1 > new_sz//4, "pad adds more than quadruple the work")
replaced_rng = UOp.range(new_sz, *rng.arg)
replaces = {rng:replaced_rng}
valid = replaced_rng < rng.vmax+1
for b in self.bufs:
if rng in (i:=b.src[1].get_idx()).backward_slice_with_self:
replaces[b] = b.replace(src=(b.src[0],(valid&b.src[1].get_valid()).where(i, UOp.invalid())))
self.ast = self.ast.substitute(replaces, f"padto {rng.arg[:-1]} {opt.arg}")
elif opt.op is OptOps.SWAP:
try:
altrng:UOp = self.rngs[opt.arg]
except IndexError:
raise KernelOptError
check(rng.arg[-1] == AxisType.GLOBAL and altrng.arg[-1] == AxisType.GLOBAL, "swap only for globals")
self.ast = self.ast.substitute({rng:rng.replace(arg=(*altrng.arg[0:-1], rng.arg[-1]), tag=1),
altrng:altrng.replace(arg=(*rng.arg[0:-1], altrng.arg[-1]), tag=1)},
name=f"swap {rng.arg[:-1]} {altrng.arg[:-1]}")
self.ast = graph_rewrite(self.ast, remove_tags, name="swap remove tags")
else:
raise KernelOptError(f"unsupported opt {opt.op}")
if append_opt: self.applied_opts.append(opt)
return ret
def _apply_tc_opt(self, use_tensor_cores:int, axis:int, tc_select:int, opt_level:int) -> None|list[UOp]:
if not (reduceops := self.reduceops): raise KernelOptError("no reduce ops for TensorCore")
reduceop = reduceops[0]
if use_tensor_cores and reduceop.arg is Ops.ADD:
mul = reduceop.src[0] if reduceop.src[0].op is not Ops.CAST else reduceop.src[0].src[0]
if mul.op is not Ops.MUL: return None
in0, in1 = mul.src
try:
tensor_cores = self.ren.tensor_cores if tc_select == -1 else [self.ren.tensor_cores[tc_select]]
except IndexError:
raise KernelOptError(f"invalid tensor core choice {tc_select}")
for tc in tensor_cores:
if self.ren.device in ("CUDA", "NV") and tc.dtype_in == dtypes.float and not ALLOW_TF32: continue
if tc.dtype_in == in0.dtype.scalar() and tc.dtype_in == in1.dtype.scalar() and tc.dtype_out == reduceop.dtype.scalar():
# tensor cores have three ranges. X, Y, and REDUCE
in0_ranges = sorted([u for u in in0.ranges if u not in in1.ranges], key=lambda x: x.arg[0], reverse=True)
in1_ranges = sorted([u for u in in1.ranges if u not in in0.ranges], key=lambda x: x.arg[0], reverse=True)
red_ranges = sorted(reduceop.src[1:], key=lambda x: x.arg[0], reverse=True)
if DEBUG >= 3:
print(f"TC({axis}): {[(x.arg[0],x.vmax+1) for x in in0_ranges]}",
f"{[(x.arg[0],x.vmax+1) for x in in1_ranges]} {[(x.arg[0],x.vmax+1) for x in red_ranges]}")
if not len(in0_ranges) or not len(in1_ranges) or not len(red_ranges): continue
# pick ranges
# NOTE: why are in1 and in0 switched?
axis_choices = list(itertools.product(in1_ranges, in0_ranges, red_ranges))
if not (axis < len(axis_choices)): continue
axes = list(axis_choices[axis])
# tag the reduceop
self.ast = self.ast.substitute({reduceop: reduceop.replace(tag="TC")})
# do optimizations and save the ranges
try:
for i,a in enumerate(axes):
idx = self.rngs.index(a)
if (a.vmax+1) % tc.dims[i] != 0:
if opt_level < 2: raise KernelOptError("tc padding requires opt_level >= 2")
# apply_opt should return the updated range?
self.apply_opt(Opt(OptOps.PADTO, idx, tc.dims[i]), append_opt=False) # PADTO might fail
axes[i] = self.rngs[idx]
except KernelOptError: continue
# we create the warp as a whole thing, in case some of these ranges are moved/removed later
warp = UOp.range(tc.threads, -1, AxisType.WARP)
ne: list[UOp] = []
for opt in tc.opts:
if opt[0] == "l":
axes[int(opt[1])], new_range = self.shift_to(axes[int(opt[1])], 2, AxisType.LOCAL, input_new_rng=warp%2)
warp //= 2
elif opt[0] == "u":
axes[int(opt[1])], new_range = self.shift_to(axes[int(opt[1])], 2, AxisType.UPCAST)
else: raise RuntimeError(f"unsupported opt {opt[0]} in tensor cores")
ne.append(new_range)
for _, amt in tc.get_reduce_axes():
axes[2], new_range = self.shift_to(axes[2], amt, AxisType.UNROLL)
ne.append(new_range)
if use_tensor_cores != 2:
# fix the srcs
reduceop = get_single_element([x for x in self.ast.toposort() if x.op is Ops.REDUCE and x.tag == "TC"])
tne = [x.replace(tag=1) for x in ne]
ret = reduceop.substitute(dict(zip(ne, tne)))
srcs = list((ret.src[0] if ret.src[0].op is not Ops.CAST else ret.src[0].src[0]).src)
srcs = [x.substitute(dict(zip(tne, [ne[i] for i in argsort(p)]))) for x,p in zip(srcs, tc.permutes_for_shape_str(tc.base_shape_str()))]
# get reduce/upcast axes for the tensor cores
tc_reduce_axes = self.shape_str_to_axis([f"r{i}" for i in range(len(tc.get_reduce_axes()))])
base_upcast_axes = tuple([(s,2) for s in self.shape_str_to_axis(tc.base_upcast_axes())])
tc_upcast_axes = tuple([base_upcast_axes[:int(math.log2(tc.elements_per_thread[i]))] for i in range(3)])
# axes to range number (was done in lowerer)
tc_upcast_axes = tuple([tuple([(self.rngs[a].arg[0], sz) for a,sz in v]) for v in tc_upcast_axes])
tc_reduce_axes = tuple([self.rngs[a].arg[0] for a in tc_reduce_axes])
# construct the op
# TODO: remove tc_upcast_axes from the arg
# do the reduce_axes always disappear? i think they don't
# they need to be moved into the WMMA srcs
wmma_arg = (str(tc), tc.dims, tc.dtype_in, tc.dtype_out, self.ren.device, tc.threads, tc_upcast_axes, ()) #, tc_reduce_axes)
wmma = UOp(Ops.WMMA, dtype=tc.dtype_out.vec(tc.elements_per_thread[2]), src=(
UOp(Ops.CONTRACT, dtype=srcs[0].dtype.vec(tc.elements_per_thread[0]), src=(srcs[0],), arg=tc_upcast_axes[0], tag=1),
UOp(Ops.CONTRACT, dtype=srcs[1].dtype.vec(tc.elements_per_thread[1]), src=(srcs[1],), arg=tc_upcast_axes[1], tag=1),
UOp.const(tc.dtype_out.vec(tc.elements_per_thread[2]), 0.0)), arg=wmma_arg, tag=1)
tc_uop = UOp(Ops.UNROLL, tc.dtype_out, (wmma,), arg=tc_upcast_axes[2], tag=1)
# preserve extra reduces
reduce_ranges = [x for x in UOp.sink(*reduceop.src[1:]).toposort() if x.op is Ops.RANGE and x.arg[0] not in tc_reduce_axes]
if len(reduce_ranges): tc_uop = UOp(Ops.REDUCE, tc_uop.dtype, (tc_uop,)+tuple(reduce_ranges), Ops.ADD)
self.ast = self.ast.substitute({reduceop: tc_uop})
self.tensor_core = tc
return axes
return None
# helpers for hand_coded_optimizations
@property
def reduceops(self) -> list[UOp]: return [x for x in self.ast.backward_slice if x.op is Ops.REDUCE]
@property
def reduceop(self) -> UOp|None:
if not (red := self.reduceops): return None
return UOp(Ops.REDUCE_AXIS, red[0].dtype, red[0].src, (red[0].arg, ()))
@property
def bufs(self) -> list[UOp]: return [x for x in self.ast.toposort() if x.op is Ops.INDEX][::-1]
@property
def output_shape(self):
return [s if at not in {AxisType.REDUCE, AxisType.UNROLL, AxisType.GROUP_REDUCE} else 1 for s,at in zip(self.full_shape, self.axis_types)]
@property
def upcasted(self) -> int: return len(self.axes_of(AxisType.UPCAST, AxisType.UNROLL))
@property
def group_for_reduces(self) -> int: return len(self.axes_of(AxisType.GROUP_REDUCE))
def bufs_from_ast(ast:UOp, dname:str) -> list[Buffer]:
glbls = sorted([x for x in ast.backward_slice if x.op is Ops.PARAM], key=lambda x: x.arg)
return [Buffer(dname, x.ptrdtype.size, x.dtype.base if not isinstance(x.dtype, ImageDType) else x.dtype) for x in glbls]
def apply_opts(ast:UOp, ren:Renderer) -> UOp:
if ast.tag is not None: return ast
k = Scheduler(ast, ren)
k.convert_loop_to_global()
if ast.arg is not None and ast.arg.opts_to_apply is not None:
for opt in ast.arg.opts_to_apply: k.apply_opt(opt)
elif BEAM >= 1:
from tinygrad.codegen.opt.search import beam_search
rawbufs = bufs_from_ast(ast, ren.device)
# beam search may open devices
with Context(ALLOW_DEVICE_USAGE=1):
k = beam_search(k, rawbufs, BEAM.value, bool(getenv("BEAM_ESTIMATE", 1)))
elif not NOOPT and (ast.arg is None or ast.arg.applied_opts == ()):
from tinygrad.codegen.opt.heuristic import hand_coded_optimizations
# NOTE: hand_coded_optimizations doesn't support multiblock opts yet
if not any(u.op is Ops.BUFFERIZE for u in ast.backward_slice):
k = hand_coded_optimizations(k)
return k.get_optimized_ast(name_override=ast.arg.name if ast.arg is not None and ast.arg.name != "test" else None)
def make_image(pa, off, idx):
if not isinstance(dt:=pa.dtype, ImageDType) and (idx.tag is None or idx.tag) and (shapes:=ImageDType.valid_dims(dt)):
new_pa = pa.replace(dtype=(dtypes.imageh if dt.base==dtypes.half else dtypes.imagef)(shapes[0] + (4,), shapes[0][1] * 4 * dt.itemsize))
new_idx = idx.replace(src=(new_pa, off), dtype=dtypes.float if dt.base == dtypes.half else idx.dtype)
return new_idx if idx.tag or dt.base == dtypes.float else new_idx.cast(dtypes.half)
pm_make_images = PatternMatcher([
# ensure we dont create an unfoldable image store
(UPat(Ops.STORE, src=(UPat.var("idx"),), allow_any_len=True, name="st"), lambda idx,st:
st.replace(src=(idx.rtag(is_image:=any(c.op is Ops.RANGE and (c.vmax+1)%4 == 0 for c in idx.src[1].get_idx().split_uop(Ops.ADD))),
st.src[1].cast(dtypes.float if is_image and ImageDType.valid_dims(idx.src[0].dtype) else idx.dtype.base)))),
(UPat(Ops.INDEX, src=(UPat(Ops.PARAM, name="pa"), UPat.var("off")), name="idx"), make_image),
# remove double cast from image loads / stores
(UPat(Ops.INDEX, src=(UPat(Ops.PARAM, name="pa"),), allow_any_len=True, name="idx").cast(dtypes.half).cast(dtypes.float), lambda idx,pa:
idx if isinstance(pa.dtype, ImageDType) else None),
(UPat(Ops.STORE, src=(UPat(Ops.PARAM, name="pa").index(UPat()), UPat.var("val").cast(dtypes.half).cast(dtypes.float)), name="st"), lambda st,pa,val:
st.replace(src=(st.src[0], val)) if isinstance(pa.dtype, ImageDType) else None),
])
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/codegen/opt/postrange.py",
"license": "MIT License",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/schedule/rangeify.py | from dataclasses import dataclass, field, replace
import itertools
from tinygrad.dtype import dtypes, PtrDType, ImageDType, AddrSpace, Invalid
from tinygrad.uop.ops import PatternMatcher, UPat, Ops, UOp, resolve, GroupOp, _substitute, KernelInfo
from tinygrad.uop.ops import graph_rewrite, sint, AxisType, BottomUpGate, profile_matches, should_resolve_call
from tinygrad.uop.symbolic import symbolic
from tinygrad.helpers import prod, all_same, getenv, dedup, all_int, DEBUG, SPLIT_REDUCEOP, DEBUG_RANGEIFY, VIZ, MAX_KERNEL_BUFFERS
from tinygrad.helpers import PCONTIG, partition, get_single_element
from tinygrad.codegen.simplify import pm_flatten_range, pm_reduce_simplify
from tinygrad.codegen.opt import Opt
from tinygrad.schedule.indexing import run_rangeify, BufferizeOpts, ALWAYS_CONTIGUOUS, IndexingContext, apply_movement_op
from tinygrad.schedule.multi import multi_pm
# creation can recurse a lot
import sys
sys.setrecursionlimit(10000)
pm_syntactic_sugar = PatternMatcher([
# INDEX on ptr INDEX concats them
(UPat(Ops.INDEX, name="i1").f(Ops.INDEX, name="i2", allow_any_len=True),
lambda i1,i2: i2.replace(src=i1.src+i2.src[1:]) if isinstance(i1.dtype, PtrDType) and not isinstance(i2.dtype, PtrDType) else None),
])
# movement op on INDEX as a PatternMatcher
pm_mops = PatternMatcher([
(UPat(GroupOp.Movement, name="r").f(Ops.INDEX, allow_any_len=True, name="idx"),
lambda r,idx: r.src[0].index(*apply_movement_op(r.op, r.src[0].shape, r.marg, idx.src[1:]), dtype=idx.dtype, arg=idx.arg)),
# move movement ops after AFTER
(UPat(GroupOp.Movement, name="r").after(name="a", allow_any_len=True),
lambda r,a: UOp(r.op, r.dtype, (a.replace(src=(r.src[0],)+a.src[1:]),)+r.src[1:], r.arg)),
(UPat(GroupOp.Movement, name="r").end(name="a", allow_any_len=True), lambda r,a: a.replace(src=(r.src[0],)+a.src[1:])),
])
# *****************
# 0. do some cleanup rewrites, mostly copied from the old stuff
def fix_assign_hazard(assign:UOp, target:UOp, src:UOp):
# PERMUTE and FLIP reorder indices, SHRINK can have overlapping regions when dest is also shrunk
unsafe = {Ops.PERMUTE, Ops.FLIP} | ({Ops.SHRINK} if target.op_in_backward_slice_with_self(Ops.SHRINK) else set())
if any(s.op in unsafe and target.base in s.backward_slice for s in src.toposort(gate=lambda s:s.op not in ALWAYS_CONTIGUOUS)):
return assign.replace(src=(target, src.contiguous()))
def normalize_assign_target_chain(assign:UOp, target:UOp, src:UOp):
root_target = target
while root_target.op is Ops.ASSIGN: root_target = root_target.src[0]
# when RHS depends on the previous assign result, break with contiguous
if target in src.toposort(): src = src.contiguous()
return assign.replace(src=(root_target, src))
def split_reduceop(reduce:UOp, x:UOp):
if prod(reduce.shape) == 0: return None
if not SPLIT_REDUCEOP or not all_int(x.shape) or (prod(x.shape)//prod(reduce.shape))<getenv("REDUCEOP_SPLIT_THRESHOLD", 32768): return None
# if there are few globals, make some reduces into globals by splitting into two kernels
# cap output buffer to 2**22: heuristic number of global outputs to achieve max occupancy with enough locals+upcasts for gemm
# ~2**10 should be enough if GROUP is used
# 256 split maximum should be "negligible reduce" for low prod(reduce.shape), 8 split minimum.
# split is moved to the end to provide maximum locality for the second phase reduce.
# get expanded by rangeifying the UOp x
indexed = x.index(*[UOp.range(s, i) if resolve(s>1) else UOp.const(dtypes.index, 0) for i,s in enumerate(x.shape)])
range_nums = [y.arg[0] for y in indexed.substitute({x.base:UOp(Ops.NOOP)}, extra_pm=pm_mops).ranges]
is_expanded = [i not in range_nums for i in range(len(x.shape))]
if not (split_candidates:=[(i,d) for i in reduce.arg[1] for d in range(min(256,2**getenv("REDUCEOP_SPLIT_SIZE",22)//prod(reduce.shape)),8-1,-1)
if x.shape[i]%d==0 and not is_expanded[i]]): return None
dim_to_split, divisor = split_candidates[0]
splitted_shape = x.shape[:dim_to_split]+(divisor,)+(x.shape[dim_to_split]//divisor,)+x.shape[dim_to_split+1:]
splitted = x.reshape(splitted_shape).permute(tuple([d for d in range(len(splitted_shape)) if d!=dim_to_split]+[dim_to_split]))
if DEBUG >= 3: print(f"split {divisor}: {x.shape} -> {splitted.shape} -> {reduce.shape}")
# reduce original axes, then split
return splitted.r(*reduce.arg).contiguous().r(reduce.arg[0], (len(reduce.shape),)).reshape(reduce.shape)
mop_cleanup = PatternMatcher([
# merge adjacent RESHAPES
(UPat(Ops.RESHAPE, src=(UPat(Ops.RESHAPE, name="x2"), UPat()), name="x"), lambda x,x2: x.replace(src=(x2.src[0], x.src[1]))),
])
pm_gather_params = PatternMatcher([ (UPat(Ops.PARAM, name="p"), lambda ctx, p: ctx.append(p)), ])
def resolve_call(c:UOp, allow_param_mismatch=True) -> UOp|None:
if not should_resolve_call(c): return None
params: list[UOp] = []
graph_rewrite(c.src[0], pm_gather_params, bottom_up=True, ctx=params, name="gather params")
params = sorted(params, key=lambda x: x.arg)
args = c.src[1:]
# NOTE: this isn't really needed. it's okay if there's unused args in the function
if not allow_param_mismatch:
if [x.arg for x in params] != list(range(len(params))): raise RuntimeError(f"params not in order: {[x.arg for x in params]}")
if len(params) != len(args): raise TypeError(f"expected {len(params)} args, got {len(args)}")
dict_map = {x:args[x.arg] for x in params}
for i, (p, a) in enumerate(dict_map.items()):
if p.axis != a.axis: raise TypeError(f"arg {i} axis mismatch: expected {p.axis}, got {a.axis}")
if p.max_shape != a.max_shape: raise TypeError(f"arg {i} shape mismatch: expected {p.shape}, got {a.shape}")
if p.dtype != a.dtype: raise TypeError(f"arg {i} dtype mismatch: expected {p.dtype}, got {a.dtype}")
return c.src[0].substitute(dict_map, walk=True)
earliest_rewrites = mop_cleanup+PatternMatcher([
# resolve calls
(UPat(Ops.CALL, name="c"), resolve_call),
# split_reduceop
(UPat(Ops.REDUCE_AXIS, name="reduce", src=(UPat.var("x"),)), split_reduceop),
# remove DETACH/CONTIGUOUS_BACKWARD (TODO: this is copied in allocations)
(UPat((Ops.DETACH, Ops.CONTIGUOUS_BACKWARD), name="x"), lambda x: x.src[0]),
# remove contiguous on movement ops before a copy on disk
(UPat(GroupOp.Movement-{Ops.SHRINK, Ops.RESHAPE}, name="x").f(Ops.CONTIGUOUS).f(Ops.COPY, allow_any_len=True, name="copy"),
lambda x,copy: copy.replace(src=(x,)+copy.src[1:]) if isinstance(x.device, str) and x.device.startswith("DISK") else None),
# push copy past movement ops to disk
(UPat(GroupOp.Movement-{Ops.SHRINK, Ops.RESHAPE}, name="x").f(Ops.COPY, allow_any_len=True, name="copy"),
lambda x,copy: x.replace(src=(copy.replace(src=(x.src[0],)+copy.src[1:]),)+x.src[1:]) \
if isinstance(x.device, str) and x.device.startswith("DISK") else None),
# ** copy rules **
# COPY and source size need to match
(UPat(Ops.COPY, src=(UPat(GroupOp.Movement, name="r"), UPat(name="d")), name="c"),
lambda c,r,d: c.replace(src=(r.contiguous(), d)) if r.size != r.base.size else None),
# copy only to different device
(UPat(Ops.COPY, src=(UPat.var("x"), UPat()), name="copy"), lambda x,copy: x.f(Ops.NOOP) if x.device == copy.device else None),
# ** assign rules **
# collapse nested ASSIGN to the same buffer (e.g. __iadd__ in __setitem__)
(UPat(Ops.ASSIGN, src=(UPat(name="target"), UPat(Ops.ASSIGN, src=(UPat(name="target"), UPat()), name="src"))), lambda target, src: src),
# move bitcast from assign target to source: a.bitcast(X).assign(src) -> a.assign(src.bitcast(a.dtype))
(UPat(Ops.ASSIGN, src=(UPat(Ops.BITCAST, src=(UPat(name="target"),)), UPat(name="src"))),
lambda target, src: target.assign(src.bitcast(target.dtype))),
# if assign target is itself an ASSIGN chain, canonicalize to the original buffer target
(UPat(Ops.ASSIGN, src=(UPat(Ops.ASSIGN, name="target"), UPat(name="src")), allow_any_len=True, name="assign"), normalize_assign_target_chain),
# make source contiguous if it has hazardous movement ops on the dest buffer
(UPat(Ops.ASSIGN, src=(UPat.var("target"), UPat.var("src")), name="assign"), fix_assign_hazard),
])
# *****************
# 3.5 cleanups
ALWAYS_RUN_OPS = {Ops.CONTIGUOUS, Ops.COPY, Ops.ASSIGN, Ops.ENCDEC, Ops.NOOP}
# you don't know in the first pass if axes are going to die, this happens if there's an EXPAND to the left
def cleanup_dead_axes(b:UOp):
# don't optimize ALWAYS_RUN_OPS
if b.src[0].op in ALWAYS_RUN_OPS: return None
new_rng = []
hit = False
reshape: list[sint] = []
for s,rng in zip(b.shape, b.src[1:]):
# skip for symbolic. TODO: fix this
if rng.op is Ops.RANGE and rng.src[0].op is not Ops.CONST: return None
# CONSTs are already dead axes
if rng.op is Ops.CONST or (rng.op is Ops.RANGE and rng not in b.src[0].ranges):
reshape.append(1)
hit = True
else:
reshape.append(s)
new_rng.append(rng)
if hit:
return b.replace(src=b.src[0:1]+tuple(new_rng)).reshape(tuple(reshape)).expand(b.shape)
def gate_substitute(ctx, b:UOp) -> None:
if not any(r in b.ranges for r in ctx.keys()): raise BottomUpGate()
pm_gate_substitute = PatternMatcher([(UPat(GroupOp.All, name="b"), gate_substitute)], compiled=False)
# if a buffer is being stored just for permutes or something, remove it
# we want to reexpress the indexes of idx2 in terms of the implied b1
def remove_bufferize(src:UOp, buf:UOp, idx:UOp):
# see if we can't do it, should this ever hit?
assert len(buf.src) == len(idx.src), f"index on wrong bufferize, {len(buf.src)} != {len(idx.src)}"
assert all(x.op in {Ops.RANGE, Ops.CONST} for x in buf.src[1:])
# if it's user contiguous, we never remove it
if src.op in ALWAYS_RUN_OPS or not buf.arg.removable: return None
# we don't want to bufferize threefry, also causes problems because not all platforms support long
if src.op is not Ops.THREEFRY:
# *** here is where we compute the cost ***
# if we return None, the bufferize is kept
accessed_buffers: list[UOp] = []
indexes: list[UOp] = []
reduces: list[UOp] = []
def red_gate(x:UOp):
if (x.op is Ops.BUFFERIZE and x.arg.addrspace == AddrSpace.GLOBAL) or x.op is Ops.MSTACK:
accessed_buffers.append(x)
return False
if x.op is Ops.PARAM:
accessed_buffers.append(x)
if x.op is Ops.INDEX:
indexes.append(x)
if x.op is Ops.REDUCE: reduces.append(x)
return True
src.toposort(gate=red_gate)
del red_gate
accessed_buffers = dedup(accessed_buffers)
# if this is generated from multiple buffers, don't remove this buffer
if len(accessed_buffers) > 3 and not (PCONTIG > 2): return None
# if any reduces access a buffer, don't remove this buffer
buffer_in_reduce = False
def buf_gate(x:UOp):
nonlocal buffer_in_reduce
if x.op in {Ops.PARAM, Ops.BUFFERIZE}: buffer_in_reduce = True
return not buffer_in_reduce
UOp.sink(*[x.src[0] for x in reduces]).toposort(gate=buf_gate)
del buf_gate
if buffer_in_reduce:
if PCONTIG > 2:
out_in_ratio = (prod(buf.shape)+1) / (sum([x.size for x in accessed_buffers])+1)
if out_in_ratio < 10: return None
# here we have to check the indexes, we might do a partial contig here
local_indexes = [x for x in indexes if x.src[0].op is Ops.BUFFERIZE and x.src[0].arg.addrspace == AddrSpace.LOCAL]
exclude_ranges = UOp.group(*[UOp.group(*x.src[1:]) for x in local_indexes]).ranges
subs = [(k,v) for k,v in zip(buf.src[1:], idx.src[1:]) if k.op is not Ops.CONST]
# if it's bufferized or a reduce, it's pcontig
is_pcontig, is_subs = partition(subs, lambda x: x[0] in exclude_ranges or any([r.arg[-1] == AxisType.REDUCE for r in x[1].ranges]))
if not len(is_subs):
return None
if len(is_pcontig):
ret = src.substitute(dict(is_subs), extra_pm=pm_gate_substitute)
return ret.bufferize(*[x[0] for x in is_pcontig], arg=BufferizeOpts(None, AddrSpace.LOCAL)).index(*[x[1] for x in is_pcontig])
else:
return None
# if it makes it here, the bufferize is removed
# this is the ranges replaced
# NOTE: if buf src is a const, we don't replace it. if idx is Invalid (dead load), don't replace it either
replaced = {k:v for k,v in zip(buf.src[1:], idx.src[1:]) if k.op is not Ops.CONST and not (v.op is Ops.CONST and v.arg is Invalid)}
return src.substitute(replaced, extra_pm=pm_gate_substitute)
def remove_noop_bufferize(idx,b2):
if idx.src[1:] != b2.src[1:] or idx.src[0].op is Ops.BUFFER_VIEW: return None
return idx.src[0].shrink(tuple((0, s) for s in b2.shape)) if b2.shape else idx.src[0]
pm_const_buffer_folding = pm_mops+PatternMatcher([
(UPat(Ops.BUFFERIZE, name="b"), cleanup_dead_axes),
(UPat(GroupOp.All-{Ops.BUFFERIZE, Ops.PARAM}, name="x"), lambda x: x.replace(dtype=x.dtype.base) if isinstance(x.dtype, ImageDType) else None),
(UPat((Ops.BUFFERIZE), name="x"), lambda x: x.replace(dtype=x.dtype.base) if isinstance(x.dtype, ImageDType)
and (resolve(prod(x.dtype.shape)!=prod(x.shape)) or x.shape[-1]%4!=0) else None),
# remove noop buffers. if we look at the next index we can remove even more of these
(UPat(Ops.INDEX, name="idx").f(Ops.BUFFERIZE, allow_any_len=True, name="b2"), remove_noop_bufferize),
# no buffers for const (ranges don't matter for const - it's the same value everywhere)
(UPat(Ops.CONST, name='c').f(Ops.BUFFERIZE, allow_any_len=True, name="b"), lambda c,b: b.const_like(c.arg)),
# indexing a const is a const
(UPat(Ops.INDEX, src=(UPat(Ops.CONST, name="c"),),), lambda c: c),
# copy on CONST is CONST
(UPat(Ops.COPY, src=(UPat.cvar("x"), UPat()), name="copy"), lambda copy,x: copy.const_like(x.arg)),
# hack if a noop turned to a const
(UPat(Ops.NOOP, src=(UPat.cvar("c"),), name="noop"), lambda c,noop: c),
# mstack on CONST is CONST
(UPat(Ops.MSTACK, src=(UPat.var("s"),), allow_any_len=True).f(Ops.INDEX, allow_any_len=True),
lambda s: UOp.const(c.dtype, c.arg) if (c:=s.base).op is Ops.CONST else None),
])
pm_remove_bufferize = PatternMatcher([
# remove reindexing with cost function
(UPat.var("src").f(Ops.BUFFERIZE, allow_any_len=True, name="buf").f(Ops.INDEX, allow_any_len=True, name="idx"), remove_bufferize),
])
def late_buffer_view(t:UOp, b:UOp):
if not (isinstance(b.device, str) and b.device.startswith(("DISK", "TINYFS"))): return b
shape = b.shape
size = prod(shape)
# walk up for the INDEX
x = t
while not any(u.op is Ops.INDEX for u in x.src):
assert x.op not in GroupOp.Elementwise, "can't buffer view elementwise"
x = x.src[0]
x = next(u for u in x.src if u.op is Ops.INDEX)
if len(shape) == 0: offset = x.src[1].arg
else: offset = max(sum(idx.vmin for idx in x.src[1:]), 0)
return b.replace(src=(UOp(Ops.BUFFER_VIEW, t.dtype, (x.base,), (size, offset)), b.src[1]))
to_bufferview = PatternMatcher([
(UPat(Ops.BUFFERIZE, src=(UPat((Ops.BITCAST, Ops.CONTIGUOUS), name="t"), UPat()), name="b"), late_buffer_view),
])
DEVICE_MAX_BUFS = {"METAL": 31, "WEBGPU": 8} # TODO: get from device?
def limit_bufs(ctx:IndexingContext, root:UOp):
if (device:=root._device) is None: return None # no device, index related calculations
device = device if isinstance(device, str) else device[0].split(":")[0]
if not (MAX_BUFS:=MAX_KERNEL_BUFFERS.value or DEVICE_MAX_BUFS.get(device, 0)): return None
bufs: set[UOp] = set()
def gate_input(u:UOp):
# TODO: add cache to fix n^2
if is_load:=(u.op in {Ops.BUFFERIZE, Ops.AFTER, Ops.PARAM, Ops.MSELECT, Ops.MSTACK, Ops.DEFINE_VAR}): bufs.add(u)
return not is_load
root.toposort(gate=gate_input)
if len(bufs) > MAX_BUFS - 1: # NOTE: this -1 is for the output buffer
srcs = []
for s in root.src:
if s.op in GroupOp.Elementwise and s._device is not None:
# Insert bufferize: all AxisType.REDUCE before bufferize are AxisType.LOOP
orig_ranges, end_ranges = s.ranges, [x.replace(arg=(next(ctx.range_idx), AxisType.LOOP)) if x.op is Ops.RANGE else x for x in s.ranges]
s = s.substitute(dict(zip(orig_ranges, end_ranges))).bufferize(*end_ranges, arg=BufferizeOpts(device=s.device)).index(*orig_ranges)
srcs.append(s)
return root.replace(src=tuple(srcs))
pm_limit_bufs = PatternMatcher([(UPat(set.union(GroupOp.Binary, GroupOp.Ternary), name="root"), limit_bufs)])
# *****************
# 4. put in buffers for bufferize
# TODO: should BUFFERIZE look a lot more like STORE
# BUFFERIZE has device in arg
# BUFFERIZE doesn't have indexing, that's implied by the ranges it closes
# BUFFERIZE returns the BUFFER ready for INDEXing (doing this will make splitting a lot easier)
# NOTE: this has been fixed up a bit
def bufferize_to_store(ctx:itertools.count, x:UOp, idx:UOp, allow_locals=True):
size = prod(x.shape)
rngs = sorted(idx.ranges, key=lambda x: x.arg)
assert size > 0 and isinstance(size, int), f"no zero sized or symbolic sized buffers {size}"
sdtype = x.dtype.ptr(size=size, addrspace=x.arg.addrspace)
if (assign := x.src[0]).op is Ops.ASSIGN:
assign_target, assign_src = assign.src[0], assign.src[1]
assert assign_target.op is Ops.INDEX, f"{assign_target.op} is not index"
while assign_src.op is Ops.NOOP: assign_src = assign_src.src[0]
# skip self-assign from same-device copy, otherwise create the store
# in assign, this is the buffer size, not the bufferize size
if assign_src is assign_target: ret = assign_target.src[0]
else: ret = assign_target.src[0].after(assign_target.replace(dtype=sdtype).store(assign_src).end(*rngs))
for op, marg in reversed(assign.arg or ()): ret = ret._mop(op, marg)
return ret
# NOTE: the DEFINE_LOCAL needs to be disambiguated here
if sdtype.addrspace == AddrSpace.GLOBAL:
buf = UOp(Ops.BUFFER, x.dtype, (UOp(Ops.LUNIQUE, arg=next(ctx)), UOp(Ops.DEVICE, arg=x.arg.device)), size)
do_store = buf.index(idx, dtype=sdtype).store(x.src[0]).end(*rngs)
return buf.after(do_store)
if allow_locals:
# handle locals
buf = UOp(Ops.DEFINE_LOCAL, sdtype, arg=next(ctx))
do_store = buf.broadcast(x.src[1].dtype.count).index(idx, dtype=sdtype).store(x.src[0]).end(*rngs)
return buf.after(do_store.barrier())
# collapse any BUFFERIZE to single input BUFFERIZE
def flatten_bufferize(x:UOp):
if len(x.src) == 2: return None
ret = x.replace(src=(x.src[0], get_single_element(apply_movement_op(Ops.RESHAPE, (prod(x.shape),), x.shape, x.src[1:]))))
rngs = x.src[1:]
ret = ret.reshape(x.shape)
if any(r.op is Ops.RANGE and r.src[0].op is not Ops.CONST for r in rngs):
sym_shape = tuple([r.src[0] if r.op is not Ops.CONST else 1 for r in rngs])
ret = ret.shrink(tuple([(0,x) for x in sym_shape]))
return ret
pm_flatten_bufferize = PatternMatcher([(UPat(Ops.BUFFERIZE, name="x"), flatten_bufferize)])
pm_add_buffers = pm_mops+pm_flatten_bufferize+to_bufferview+PatternMatcher([
(UPat(Ops.BUFFERIZE, src=(UPat(), UPat(name="idx")), name="x"), lambda ctx,x,idx: bufferize_to_store(ctx, x, idx, allow_locals=False)),
# move RESHAPEs through MSELECT/MSTACK
(UPat((Ops.MSELECT, Ops.MSTACK), src=UPat(Ops.RESHAPE), name="m"),
lambda m: m.replace(src=tuple([x.src[0].base for x in m.src])).reshape(m.shape)),
# remove any RESHAPEs on KERNEL
(UPat(Ops.CALL, name="k"), lambda k: k.replace(src=tuple(x.src[0] if x.op is Ops.RESHAPE else x for x in k.src))),
# remove MOP on AFTER
(UPat(Ops.AFTER, src=(UPat.var("x"), UPat(GroupOp.Movement, name="y"))), lambda x,y: x.after(y.src[0])),
# remove double AFTER
(UPat(Ops.AFTER, src=(UPat.var("x"), UPat(Ops.AFTER, name="y"))), lambda x,y: x.after(*y.src[1:]))
])
pm_add_buffers_local = pm_mops+pm_flatten_bufferize+to_bufferview+PatternMatcher([
(UPat(Ops.BUFFERIZE, src=(UPat(), UPat(name="idx")), name="x"), bufferize_to_store),
])
# *****************
# 5. split into kernels
@dataclass
class LocalAddBufferContext:
dg:int = 0
map:dict = field(default_factory=dict)
vars:dict = field(default_factory=dict)
range:int = 0
opts:tuple|None = None
def debuf(ctx:LocalAddBufferContext, buf:UOp):
ret = UOp(Ops.PARAM, buf.dtype.ptr(buf.size), arg=ctx.dg).reshape(buf.shape)
if buf not in ctx.map: ctx.map[buf] = buf
ctx.dg += 1
return ret
def unbind_kernel(ctx:LocalAddBufferContext, b:UOp):
ctx.vars[b] = None
return b.src[0]
def handle_after(ctx:LocalAddBufferContext, after:UOp):
if isinstance(after.dtype, PtrDType) and after.ptrdtype.addrspace == AddrSpace.LOCAL: return None
buf = after.buf_uop
# HACK to put the buffer in the MAP instead of MSTACK/MSELECT
if buf.op in {Ops.MSTACK, Ops.MSELECT}: buf = buf.src[0]
assert buf not in ctx.map
ctx.map[buf] = after
return buf
def renumber_range(ctx:LocalAddBufferContext, r:UOp):
if r.tag != (): return None
ret = r.replace(arg=(ctx.range,)+r.arg[1:], tag=None)
ctx.range += 1
return ret
def find_bufs(x:UOp):
idxs = [s for s in x.toposort(gate=lambda x: x.op is not Ops.AFTER) if s.op is Ops.INDEX]
read_from: dict[UOp, Ops] = {}
if any((buf:=idx.buf_uop).op in {Ops.BUFFER, Ops.PARAM} and read_from.setdefault(buf, op:=idx.src[0].op) is not op for idx in idxs):
raise RuntimeError(f"cycle detected while indexing {buf}")
to_define_global = PatternMatcher([
(UPat(Ops.STORE, name="x"), find_bufs),
(UPat(Ops.BUFFER, name="buf"), debuf),
(UPat(Ops.PARAM, src=(UPat(), UPat(Ops.DEVICE)), name="buf"), debuf),
(UPat(Ops.PARAM, src=(UPat(), UPat(), UPat.cvar('vmin'), UPat.cvar('vmax'), UPat.var("nm")), name="v"),
lambda v, vmin, vmax, nm: UOp.variable(nm.arg, vmin.arg, vmax.arg, v.dtype)),
(UPat(Ops.INDEX, src=(UPat(Ops.DEFINE_VAR, name="v"),)), lambda v: v),
(UPat(Ops.BIND, name="b"), unbind_kernel),
(UPat((Ops.MSTACK, Ops.MSELECT, Ops.AFTER), name="after"), handle_after),
# remove device from local BUFFERIZE
(UPat(Ops.BUFFERIZE, name="b"), lambda b: b.replace(arg=replace(b.arg, device=None))),
# remove UNIQUE/DEVICE to dedup CONST
(UPat(Ops.CONST, name="c"), lambda c: c.replace(src=()) if len(c.src) else None),
# renumber the ranges starting with 0 so that kernel deduping works
(UPat(Ops.RANGE, name="r"), renumber_range),
])
def get_contiguous(ctx:LocalAddBufferContext, x:UOp):
if isinstance(x.arg, tuple) and all(isinstance(y, Opt) for y in x.arg): ctx.opts = x.arg
return x.src[0]
rangeify_codegen = PatternMatcher([
(UPat(Ops.CONTIGUOUS, name="x"), get_contiguous),
# no NOOP in the kernel graph
# TODO: this can be moved into codegen?
(UPat(Ops.NOOP, name="x"), lambda x: x.src[0] if len(x.src) else None),
# fix broadcast dtype
(UPat(Ops.AFTER, name="a").broadcast(name="b"), lambda a,b: a.broadcast(len(b.src))),
(UPat(Ops.DEFINE_LOCAL).f(Ops.AFTER, allow_any_len=True).broadcast(name="dg").f(Ops.INDEX, name="idx", allow_any_len=True),
lambda dg,idx: None if isinstance(idx.dtype, (PtrDType, ImageDType)) else
idx.replace(dtype=dg.dtype, arg=None).load(dtype=dg.dtype.base.scalar().vec(dg.dtype.vcount))),
(UPat(Ops.AFTER, name="a").gep(name="b"), lambda a,b: a.gep(b.arg)),
(UPat(Ops.DEFINE_LOCAL).f(Ops.AFTER, allow_any_len=True).gep(name="dg").f(Ops.INDEX, name="idx", allow_any_len=True),
lambda dg,idx: None if isinstance(idx.dtype, (PtrDType, ImageDType)) else
idx.replace(dtype=dg.dtype, arg=None).load(dtype=dg.dtype.base.scalar().vec(dg.dtype.vcount))),
])
pm_add_range_tags = PatternMatcher([
(UPat(Ops.RANGE, name="x"), lambda x: x.rtag(())),
])
def split_store(x:UOp) -> UOp|None:
# if we have any open ranges here, we don't split
if x.ranges: return None
# local kernel rewrite
lctx = LocalAddBufferContext()
ret = graph_rewrite(x, to_define_global+pm_flatten_range+rangeify_codegen, ctx=lctx, name="kernel split", bottom_up=True)
# SINK requires all buffers on the same device, but COPY/BUFFER_VIEW/ENCDEC are cross-device or special hardware ops
if ret.op is Ops.STORE: stored = ret.src[1]
elif ret.op is Ops.END and ret.src[0].op is Ops.STORE: stored = ret.src[0].src[1]
else: raise RuntimeError(f"unknown kernel type {ret.op}")
if stored.op in {Ops.COPY, Ops.BUFFER_VIEW}: ret = stored.replace(src=stored.src + ret.ended_ranges)
elif stored.op is Ops.ENCDEC: ret = stored
else: ret = ret.sink(arg=KernelInfo(opts_to_apply=lctx.opts))
kernel = ret.call(*lctx.map.values(), *lctx.vars.keys())
if ret.op is Ops.SINK and not all_same([x.device for x in kernel.src[1:] if x.op is not Ops.BIND]):
raise RuntimeError(f"all buffers must be on the same device: {tuple(b.buf_uop for b in kernel.src[1:])}")
return kernel
split_kernels = PatternMatcher([
(UPat((Ops.STORE, Ops.END), name="x"), split_store),
])
@profile_matches
def get_kernel_graph(sink:UOp) -> UOp:
tsink = graph_rewrite(sink, multi_pm, name="multi_pm")
tsink = graph_rewrite(tsink, pm_syntactic_sugar+pm_mops+earliest_rewrites, bottom_up=True, name="earliest rewrites")
# convert movement ops to ranges
tsink, rctx = run_rangeify(tsink, bool(DEBUG_RANGEIFY))
tsink = graph_rewrite(tsink, symbolic+pm_reduce_simplify+pm_const_buffer_folding+pm_remove_bufferize, name="symbolic+reduce_collapse+debuf")
tsink = graph_rewrite(tsink, pm_limit_bufs, ctx=rctx, name="limit buffers")
if VIZ: graph_rewrite(tsink, PatternMatcher([]), name="View Rangeify")
# bufferize -> store
lunique_start: int = max([-1]+[x.arg for x in tsink.toposort() if x.op is Ops.LUNIQUE]) + 1
tsink = graph_rewrite(tsink, pm_add_buffers+pm_add_range_tags, ctx=itertools.count(lunique_start), bottom_up=True, name="bufferize to store")
tsink = graph_rewrite(tsink, split_kernels, bottom_up=True, name="split kernels")
# WAR deps: if kernel U reads buffer S, and S is also written by another kernel, S's write must wait for U to finish
afters = [u for u in tsink.toposort() if u.op is Ops.AFTER]
kernel_assign: dict[UOp, UOp] = {u.buf_uop:u for u in afters}
assign_rep: dict[UOp, UOp] = {}
for u in afters:
for s in u.src[1].src:
# TODO: this is probably broken for MSELECT/MSTACK
if s.op not in {Ops.BUFFER, Ops.PARAM} or s is u.buf_uop or (a:=kernel_assign.get(s)) is None: continue
if a.src[1] is u.src[1]: continue # same kernel (multi-output custom kernels)
if any(x.op is Ops.AFTER and x.buf_uop is s for x in kernel_assign[u.buf_uop].backward_slice):
raise RuntimeError(f"cycle detected in assign graph, buffers {s} and {u.buf_uop} have circular dependency")
assign_rep[a] = kernel_assign[s] = a.replace(src=a.src+(u,))
if assign_rep: tsink = graph_rewrite(tsink, _substitute, ctx=assign_rep, bottom_up=True, name="fix_assign")
if VIZ: graph_rewrite(tsink, PatternMatcher([]), name="View Kernel Graph")
return tsink
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/schedule/rangeify.py",
"license": "MIT License",
"lines": 440,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/huggingface_onnx/huggingface_manager.py | import yaml
import time
import requests
import argparse
from pathlib import Path
from huggingface_hub import list_models, HfApi, snapshot_download
from tinygrad.helpers import _ensure_downloads_dir
DOWNLOADS_DIR = _ensure_downloads_dir() / "models"
from tinygrad.helpers import tqdm
def snapshot_download_with_retry(*, repo_id: str, allow_patterns: list[str]|tuple[str, ...]|None=None, local_dir: str|Path|None=None,
tries: int=2, **kwargs) -> Path:
for attempt in range(tries):
try:
return Path(snapshot_download(
repo_id=repo_id,
allow_patterns=allow_patterns,
local_dir=str(local_dir) if local_dir is not None else None,
**kwargs
))
except Exception as e:
if attempt == tries-1: raise
time.sleep(1)
# Constants for filtering models
HUGGINGFACE_URL = "https://huggingface.co"
SKIPPED_FILES = [
"fp16", "int8", "uint8", "quantized", # numerical accuracy issues
"avx2", "arm64", "avx512", "avx512_vnni", # numerical accuracy issues
"q4", "q4f16", "bnb4", # unimplemented quantization
"model_O4", # requires non cpu ort runner and MemcpyFromHost op
"merged", # TODO implement attribute with graph type and Loop op
]
SKIPPED_REPO_PATHS = [
# Invalid model-index
"AdamCodd/vit-base-nsfw-detector",
# TODO: implement attribute with graph type and Loop op
"minishlab/potion-base-8M", "minishlab/M2V_base_output", "minishlab/potion-retrieval-32M",
# TODO: implement SimplifiedLayerNormalization, SkipSimplifiedLayerNormalization, GroupQueryAttention
"HuggingFaceTB/SmolLM2-360M-Instruct",
# TODO: implement SimplifiedLayerNormalization, SkipSimplifiedLayerNormalization, RotaryEmbedding, MultiHeadAttention
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
# TODO: implement RandomNormalLike
"stabilityai/stable-diffusion-xl-base-1.0", "stabilityai/sdxl-turbo", 'SimianLuo/LCM_Dreamshaper_v7',
# TODO: implement NonZero
"mangoapps/fb_zeroshot_mnli_onnx",
# TODO huge Concat in here with 1024 (1, 3, 32, 32) Tensors, and maybe a MOD bug with const folding
"briaai/RMBG-2.0",
]
class HuggingFaceONNXManager:
def __init__(self):
self.base_dir = Path(__file__).parent
self.models_dir = DOWNLOADS_DIR
self.api = HfApi()
def discover_models(self, limit: int, sort: str = "downloads") -> list[str]:
print(f"Discovering top {limit} ONNX models sorted by {sort}...")
repos = []
i = 0
for model in list_models(filter="onnx", sort=sort):
if model.id in SKIPPED_REPO_PATHS:
continue
print(f" {i+1}/{limit}: {model.id} ({getattr(model, sort)})")
repos.append(model.id)
i += 1
if i == limit:
break
print(f"Found {len(repos)} suitable ONNX models")
return repos
def collect_metadata(self, repos: list[str]) -> dict:
print(f"Collecting metadata for {len(repos)} repositories...")
metadata = {"repositories": {}}
total_size = 0
for repo in tqdm(repos, desc="Collecting metadata"):
try:
files_metadata = []
model_info = self.api.model_info(repo)
for file in model_info.siblings:
filename = file.rfilename
if not (filename.endswith('.onnx') or filename.endswith('.onnx_data')):
continue
if any(skip_str in filename for skip_str in SKIPPED_FILES):
continue
# Get file size from API or HEAD request
try:
head = requests.head(
f"{HUGGINGFACE_URL}/{repo}/resolve/main/{filename}",
allow_redirects=True,
timeout=10
)
file_size = file.size or int(head.headers.get('Content-Length', 0))
except requests.RequestException:
file_size = file.size or 0
files_metadata.append({
"file": filename,
"size": f"{file_size/1e6:.2f}MB"
})
total_size += file_size
if files_metadata: # Only add repos with valid ONNX files
metadata["repositories"][repo] = {
"url": f"{HUGGINGFACE_URL}/{repo}",
"download_path": None,
"files": files_metadata,
}
except Exception as e:
print(f"WARNING: Failed to collect metadata for {repo}: {e}")
continue
metadata['total_size'] = f"{total_size/1e9:.2f}GB"
metadata['created_at'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
print(f"Collected metadata for {len(metadata['repositories'])} repositories")
print(f"Total estimated download size: {metadata['total_size']}")
return metadata
def download_models(self, metadata: dict) -> dict:
self.models_dir.mkdir(parents=True, exist_ok=True)
repos = metadata["repositories"]
n = len(repos)
print(f"Downloading {n} repositories to {self.models_dir}...")
for i, (model_id, model_data) in enumerate(repos.items()):
print(f" Downloading {i+1}/{n}: {model_id}...")
try:
# Download ONNX model files
allow_patterns = [file_info["file"] for file_info in model_data["files"]]
root_path = snapshot_download_with_retry(
repo_id=model_id,
allow_patterns=allow_patterns,
local_dir=str(self.models_dir / model_id)
)
# Download config files (usually small)
snapshot_download_with_retry(
repo_id=model_id,
allow_patterns=["*config.json"],
local_dir=str(self.models_dir / model_id)
)
model_data["download_path"] = str(root_path)
print(f" Downloaded to: {root_path}")
except Exception as e:
print(f" ERROR: Failed to download {model_id}: {e}")
model_data["download_path"] = None
continue
successful_downloads = sum(1 for repo in repos.values() if repo["download_path"] is not None)
print(f"Successfully downloaded {successful_downloads}/{n} repositories")
print(f"All models saved to: {self.models_dir}")
return metadata
def save_metadata(self, metadata: dict, output_file: str):
yaml_path = self.base_dir / output_file
with open(yaml_path, 'w') as f:
yaml.dump(metadata, f, sort_keys=False)
print(f"Metadata saved to: {yaml_path}")
def discover_and_download(self, limit: int, output_file: str = "huggingface_repos.yaml",
sort: str = "downloads", download: bool = True):
print(f"Starting HuggingFace ONNX workflow...")
print(f" Limit: {limit} models")
print(f" Sort by: {sort}")
print(f" Download: {'Yes' if download else 'No'}")
print(f" Output: {output_file}")
print("-" * 50)
repos = self.discover_models(limit, sort)
metadata = self.collect_metadata(repos)
if download:
metadata = self.download_models(metadata)
self.save_metadata(metadata, output_file)
print("-" * 50)
print("Workflow completed successfully!")
if download:
successful = sum(1 for repo in metadata["repositories"].values()
if repo["download_path"] is not None)
print(f"{successful}/{len(metadata['repositories'])} models downloaded")
return metadata
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="HuggingFace ONNX Model Manager - Discover, collect metadata, and download ONNX models",
)
parser.add_argument("--limit", type=int, help="Number of top repositories to process")
parser.add_argument("--output", type=str, default="huggingface_repos.yaml",
help="Output YAML file name (default: huggingface_repos.yaml)")
parser.add_argument("--sort", type=str, default="downloads",
choices=["downloads", "likes", "created", "modified"],
help="Sort criteria for model discovery (default: downloads)")
parser.add_argument("--download", action="store_true", default=False,
help="Download models after collecting metadata")
args = parser.parse_args()
if not args.limit: parser.error("--limit is required")
manager = HuggingFaceONNXManager()
manager.discover_and_download(
limit=args.limit,
output_file=args.output,
sort=args.sort,
download=args.download
) | {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/huggingface_onnx/huggingface_manager.py",
"license": "MIT License",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/hcq/hcq_smi.py | #!/usr/bin/env python3
import argparse, glob, os, time, subprocess, sys
from tinygrad.helpers import temp
def scan_devs_based_on_lock(prefix:str, args) -> list[str]:
target_dev = args.pci_bus if 'pci_bus' in args.__dir__() else ""
devs = []
for dev in glob.glob(temp(f'{prefix}_*.lock')):
dev_id = dev.split('/')[-1][len(prefix)+1:-5]
if dev_id.startswith(target_dev): devs.append(dev_id)
return devs
def _do_reset_device(pci_bus): os.system(f"sudo sh -c 'echo 1 > /sys/bus/pci/devices/{pci_bus}/reset'")
def _is_module_loaded(name: str) -> bool: return os.path.isdir(f"/sys/module/{name}")
def cmd_remove_module(args):
modules = ["nvidia_drm", "nvidia_modeset", "nvidia_uvm", "nvidia", "ast"] if args.backend == "nv" else ["amdgpu"]
to_unload = [m for m in modules if _is_module_loaded(m)]
if not to_unload: print("Kernel modules are not loaded")
else:
print("Removing kernel modules:", ", ".join(to_unload))
try: subprocess.run(["sudo", "modprobe", "-r", *to_unload], check=True)
except subprocess.CalledProcessError as e:
print("Failed to unload all modules β they may be in use.", file=sys.stderr)
sys.exit(e.returncode)
def cmd_insert_module(args):
cmd_remove_module(args)
cmd_reset_devices(args)
module = "nvidia" if args.backend == "nv" else "amdgpu"
if _is_module_loaded(module):
print(f"{module} kernel module already loaded")
return
print(f"Inserting kernel module: {module}")
if args.backend == "nv":
subprocess.run(["nvidia-smi"], check=True)
elif args.backend == "amd":
subprocess.run(["sudo", "modprobe", "amdgpu"], check=True)
def cmd_reset_devices(args):
devs = scan_devs_based_on_lock({"amd":"am", "nv":"nv"}[args.backend], args)
for dev in devs:
print(f"Resetting device {dev}")
if args.backend != "amd": _do_reset_device(dev)
time.sleep(0.2)
def cmd_show_pids(args):
devs = scan_devs_based_on_lock(prefix:={"amd":"am", "nv":"nv"}[args.backend], args)
for dev in devs:
try:
pid = subprocess.check_output(['sudo', 'lsof', temp(f'{prefix}_{dev}.lock')]).decode('utf-8').strip().split('\n')[1].split()[1]
print(f"{dev}: {pid}")
except subprocess.CalledProcessError: print(f"{dev}: No processes found using this device")
def cmd_kill_pids(args):
devs = scan_devs_based_on_lock(prefix:={"amd":"am", "nv":"nv"}[args.backend], args)
for dev in devs:
for i in range(128):
if i > 0: time.sleep(0.2)
try:
try: pid = subprocess.check_output(['sudo', 'lsof', temp(f'{prefix}_{dev}.lock')]).decode('utf-8').strip().split('\n')[1].split()[1]
except subprocess.CalledProcessError: break
print(f"Killing process {pid} (which uses {dev})")
subprocess.run(['sudo', 'kill', '-9', pid], check=True)
except subprocess.CalledProcessError as e:
print(f"Failed to kill process for device {dev}: {e}", file=sys.stderr)
def add_common_commands(parent_subparsers):
p_insmod = parent_subparsers.add_parser("insmod", help="Insert a kernel module")
p_insmod.set_defaults(func=cmd_insert_module)
p_rmmod = parent_subparsers.add_parser("rmmod", help="Remove a kernel module")
p_rmmod.set_defaults(func=cmd_remove_module)
p_reset = parent_subparsers.add_parser("reset", help="Reset a device")
p_reset.add_argument("--pci_bus", default="", help="PCI bus ID of the device to reset")
p_reset.set_defaults(func=cmd_reset_devices)
p_reset = parent_subparsers.add_parser("pids", help="Show pids of processes using the device")
p_reset.add_argument("--pci_bus", default="", help="PCI bus ID of the device")
p_reset.set_defaults(func=cmd_show_pids)
p_reset = parent_subparsers.add_parser("kill_pids", help="Kill pids of processes using the device")
p_reset.add_argument("--pci_bus", default="", help="PCI bus ID of the device")
p_reset.set_defaults(func=cmd_kill_pids)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
backend_subparsers = parser.add_subparsers(dest="backend", required=True, metavar="{nv,amd}", help="Hardware backend to target")
nv_parser = backend_subparsers.add_parser("nv", help="NVIDIA GPUs")
nv_commands = nv_parser.add_subparsers(dest="command", required=True)
add_common_commands(nv_commands)
amd_parser = backend_subparsers.add_parser("amd", help="AMD GPUs")
amd_commands = amd_parser.add_subparsers(dest="command", required=True)
add_common_commands(amd_commands)
args = parser.parse_args()
if args.command is None:
parser.print_help(sys.stderr)
sys.exit(1)
args.func(args)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/hcq/hcq_smi.py",
"license": "MIT License",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/autogen/ib.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
dll = c.DLL('ib', 'ibverbs', use_errno=True)
@c.record
class union_ibv_gid(c.Struct):
SIZE = 16
raw: Annotated[c.Array[uint8_t, Literal[16]], 0]
_global: Annotated[union_ibv_gid_global, 0]
uint8_t: TypeAlias = Annotated[int, ctypes.c_ubyte]
@c.record
class union_ibv_gid_global(c.Struct):
SIZE = 16
subnet_prefix: Annotated[Annotated[int, ctypes.c_uint64], 0]
interface_id: Annotated[Annotated[int, ctypes.c_uint64], 8]
__be64: TypeAlias = Annotated[int, ctypes.c_uint64]
class enum_ibv_gid_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_GID_TYPE_IB = enum_ibv_gid_type.define('IBV_GID_TYPE_IB', 0)
IBV_GID_TYPE_ROCE_V1 = enum_ibv_gid_type.define('IBV_GID_TYPE_ROCE_V1', 1)
IBV_GID_TYPE_ROCE_V2 = enum_ibv_gid_type.define('IBV_GID_TYPE_ROCE_V2', 2)
@c.record
class struct_ibv_gid_entry(c.Struct):
SIZE = 32
gid: Annotated[union_ibv_gid, 0]
gid_index: Annotated[uint32_t, 16]
port_num: Annotated[uint32_t, 20]
gid_type: Annotated[uint32_t, 24]
ndev_ifindex: Annotated[uint32_t, 28]
uint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
class enum_ibv_node_type(Annotated[int, ctypes.c_int32], c.Enum): pass
IBV_NODE_UNKNOWN = enum_ibv_node_type.define('IBV_NODE_UNKNOWN', -1)
IBV_NODE_CA = enum_ibv_node_type.define('IBV_NODE_CA', 1)
IBV_NODE_SWITCH = enum_ibv_node_type.define('IBV_NODE_SWITCH', 2)
IBV_NODE_ROUTER = enum_ibv_node_type.define('IBV_NODE_ROUTER', 3)
IBV_NODE_RNIC = enum_ibv_node_type.define('IBV_NODE_RNIC', 4)
IBV_NODE_USNIC = enum_ibv_node_type.define('IBV_NODE_USNIC', 5)
IBV_NODE_USNIC_UDP = enum_ibv_node_type.define('IBV_NODE_USNIC_UDP', 6)
IBV_NODE_UNSPECIFIED = enum_ibv_node_type.define('IBV_NODE_UNSPECIFIED', 7)
class enum_ibv_transport_type(Annotated[int, ctypes.c_int32], c.Enum): pass
IBV_TRANSPORT_UNKNOWN = enum_ibv_transport_type.define('IBV_TRANSPORT_UNKNOWN', -1)
IBV_TRANSPORT_IB = enum_ibv_transport_type.define('IBV_TRANSPORT_IB', 0)
IBV_TRANSPORT_IWARP = enum_ibv_transport_type.define('IBV_TRANSPORT_IWARP', 1)
IBV_TRANSPORT_USNIC = enum_ibv_transport_type.define('IBV_TRANSPORT_USNIC', 2)
IBV_TRANSPORT_USNIC_UDP = enum_ibv_transport_type.define('IBV_TRANSPORT_USNIC_UDP', 3)
IBV_TRANSPORT_UNSPECIFIED = enum_ibv_transport_type.define('IBV_TRANSPORT_UNSPECIFIED', 4)
class enum_ibv_device_cap_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_DEVICE_RESIZE_MAX_WR = enum_ibv_device_cap_flags.define('IBV_DEVICE_RESIZE_MAX_WR', 1)
IBV_DEVICE_BAD_PKEY_CNTR = enum_ibv_device_cap_flags.define('IBV_DEVICE_BAD_PKEY_CNTR', 2)
IBV_DEVICE_BAD_QKEY_CNTR = enum_ibv_device_cap_flags.define('IBV_DEVICE_BAD_QKEY_CNTR', 4)
IBV_DEVICE_RAW_MULTI = enum_ibv_device_cap_flags.define('IBV_DEVICE_RAW_MULTI', 8)
IBV_DEVICE_AUTO_PATH_MIG = enum_ibv_device_cap_flags.define('IBV_DEVICE_AUTO_PATH_MIG', 16)
IBV_DEVICE_CHANGE_PHY_PORT = enum_ibv_device_cap_flags.define('IBV_DEVICE_CHANGE_PHY_PORT', 32)
IBV_DEVICE_UD_AV_PORT_ENFORCE = enum_ibv_device_cap_flags.define('IBV_DEVICE_UD_AV_PORT_ENFORCE', 64)
IBV_DEVICE_CURR_QP_STATE_MOD = enum_ibv_device_cap_flags.define('IBV_DEVICE_CURR_QP_STATE_MOD', 128)
IBV_DEVICE_SHUTDOWN_PORT = enum_ibv_device_cap_flags.define('IBV_DEVICE_SHUTDOWN_PORT', 256)
IBV_DEVICE_INIT_TYPE = enum_ibv_device_cap_flags.define('IBV_DEVICE_INIT_TYPE', 512)
IBV_DEVICE_PORT_ACTIVE_EVENT = enum_ibv_device_cap_flags.define('IBV_DEVICE_PORT_ACTIVE_EVENT', 1024)
IBV_DEVICE_SYS_IMAGE_GUID = enum_ibv_device_cap_flags.define('IBV_DEVICE_SYS_IMAGE_GUID', 2048)
IBV_DEVICE_RC_RNR_NAK_GEN = enum_ibv_device_cap_flags.define('IBV_DEVICE_RC_RNR_NAK_GEN', 4096)
IBV_DEVICE_SRQ_RESIZE = enum_ibv_device_cap_flags.define('IBV_DEVICE_SRQ_RESIZE', 8192)
IBV_DEVICE_N_NOTIFY_CQ = enum_ibv_device_cap_flags.define('IBV_DEVICE_N_NOTIFY_CQ', 16384)
IBV_DEVICE_MEM_WINDOW = enum_ibv_device_cap_flags.define('IBV_DEVICE_MEM_WINDOW', 131072)
IBV_DEVICE_UD_IP_CSUM = enum_ibv_device_cap_flags.define('IBV_DEVICE_UD_IP_CSUM', 262144)
IBV_DEVICE_XRC = enum_ibv_device_cap_flags.define('IBV_DEVICE_XRC', 1048576)
IBV_DEVICE_MEM_MGT_EXTENSIONS = enum_ibv_device_cap_flags.define('IBV_DEVICE_MEM_MGT_EXTENSIONS', 2097152)
IBV_DEVICE_MEM_WINDOW_TYPE_2A = enum_ibv_device_cap_flags.define('IBV_DEVICE_MEM_WINDOW_TYPE_2A', 8388608)
IBV_DEVICE_MEM_WINDOW_TYPE_2B = enum_ibv_device_cap_flags.define('IBV_DEVICE_MEM_WINDOW_TYPE_2B', 16777216)
IBV_DEVICE_RC_IP_CSUM = enum_ibv_device_cap_flags.define('IBV_DEVICE_RC_IP_CSUM', 33554432)
IBV_DEVICE_RAW_IP_CSUM = enum_ibv_device_cap_flags.define('IBV_DEVICE_RAW_IP_CSUM', 67108864)
IBV_DEVICE_MANAGED_FLOW_STEERING = enum_ibv_device_cap_flags.define('IBV_DEVICE_MANAGED_FLOW_STEERING', 536870912)
class enum_ibv_fork_status(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_FORK_DISABLED = enum_ibv_fork_status.define('IBV_FORK_DISABLED', 0)
IBV_FORK_ENABLED = enum_ibv_fork_status.define('IBV_FORK_ENABLED', 1)
IBV_FORK_UNNEEDED = enum_ibv_fork_status.define('IBV_FORK_UNNEEDED', 2)
class enum_ibv_atomic_cap(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_ATOMIC_NONE = enum_ibv_atomic_cap.define('IBV_ATOMIC_NONE', 0)
IBV_ATOMIC_HCA = enum_ibv_atomic_cap.define('IBV_ATOMIC_HCA', 1)
IBV_ATOMIC_GLOB = enum_ibv_atomic_cap.define('IBV_ATOMIC_GLOB', 2)
@c.record
class struct_ibv_alloc_dm_attr(c.Struct):
SIZE = 16
length: Annotated[size_t, 0]
log_align_req: Annotated[uint32_t, 8]
comp_mask: Annotated[uint32_t, 12]
size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
class enum_ibv_dm_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_DM_MASK_HANDLE = enum_ibv_dm_mask.define('IBV_DM_MASK_HANDLE', 1)
@c.record
class struct_ibv_dm(c.Struct):
SIZE = 32
context: Annotated[c.POINTER[struct_ibv_context], 0]
memcpy_to_dm: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_dm], uint64_t, ctypes.c_void_p, size_t]], 8]
memcpy_from_dm: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [ctypes.c_void_p, c.POINTER[struct_ibv_dm], uint64_t, size_t]], 16]
comp_mask: Annotated[uint32_t, 24]
handle: Annotated[uint32_t, 28]
@c.record
class struct_ibv_context(c.Struct):
SIZE = 328
device: Annotated[c.POINTER[struct_ibv_device], 0]
ops: Annotated[struct_ibv_context_ops, 8]
cmd_fd: Annotated[Annotated[int, ctypes.c_int32], 264]
async_fd: Annotated[Annotated[int, ctypes.c_int32], 268]
num_comp_vectors: Annotated[Annotated[int, ctypes.c_int32], 272]
mutex: Annotated[pthread_mutex_t, 280]
abi_compat: Annotated[ctypes.c_void_p, 320]
@c.record
class struct_ibv_device(c.Struct):
SIZE = 664
_ops: Annotated[struct__ibv_device_ops, 0]
node_type: Annotated[enum_ibv_node_type, 16]
transport_type: Annotated[enum_ibv_transport_type, 20]
name: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[64]], 24]
dev_name: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[64]], 88]
dev_path: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 152]
ibdev_path: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 408]
@c.record
class struct__ibv_device_ops(c.Struct):
SIZE = 16
_dummy1: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_context], [c.POINTER[struct_ibv_device], Annotated[int, ctypes.c_int32]]], 0]
_dummy2: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_context]]], 8]
@c.record
class struct_ibv_context_ops(c.Struct):
SIZE = 256
_compat_query_device: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_device_attr]]], 0]
_compat_query_port: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_context], uint8_t, c.POINTER[struct__compat_ibv_port_attr]]], 8]
_compat_alloc_pd: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 16]
_compat_dealloc_pd: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 24]
_compat_reg_mr: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 32]
_compat_rereg_mr: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 40]
_compat_dereg_mr: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 48]
alloc_mw: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_mw], [c.POINTER[struct_ibv_pd], enum_ibv_mw_type]], 56]
bind_mw: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_qp], c.POINTER[struct_ibv_mw], c.POINTER[struct_ibv_mw_bind]]], 64]
dealloc_mw: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_mw]]], 72]
_compat_create_cq: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 80]
poll_cq: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_cq], Annotated[int, ctypes.c_int32], c.POINTER[struct_ibv_wc]]], 88]
req_notify_cq: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_cq], Annotated[int, ctypes.c_int32]]], 96]
_compat_cq_event: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 104]
_compat_resize_cq: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 112]
_compat_destroy_cq: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 120]
_compat_create_srq: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 128]
_compat_modify_srq: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 136]
_compat_query_srq: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 144]
_compat_destroy_srq: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 152]
post_srq_recv: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_srq], c.POINTER[struct_ibv_recv_wr], c.POINTER[c.POINTER[struct_ibv_recv_wr]]]], 160]
_compat_create_qp: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 168]
_compat_query_qp: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 176]
_compat_modify_qp: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 184]
_compat_destroy_qp: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 192]
post_send: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_qp], c.POINTER[struct_ibv_send_wr], c.POINTER[c.POINTER[struct_ibv_send_wr]]]], 200]
post_recv: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_qp], c.POINTER[struct_ibv_recv_wr], c.POINTER[c.POINTER[struct_ibv_recv_wr]]]], 208]
_compat_create_ah: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 216]
_compat_destroy_ah: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 224]
_compat_attach_mcast: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 232]
_compat_detach_mcast: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 240]
_compat_async_event: Annotated[c.CFUNCTYPE[ctypes.c_void_p, []], 248]
@c.record
class struct_ibv_device_attr(c.Struct):
SIZE = 232
fw_ver: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[64]], 0]
node_guid: Annotated[Annotated[int, ctypes.c_uint64], 64]
sys_image_guid: Annotated[Annotated[int, ctypes.c_uint64], 72]
max_mr_size: Annotated[uint64_t, 80]
page_size_cap: Annotated[uint64_t, 88]
vendor_id: Annotated[uint32_t, 96]
vendor_part_id: Annotated[uint32_t, 100]
hw_ver: Annotated[uint32_t, 104]
max_qp: Annotated[Annotated[int, ctypes.c_int32], 108]
max_qp_wr: Annotated[Annotated[int, ctypes.c_int32], 112]
device_cap_flags: Annotated[Annotated[int, ctypes.c_uint32], 116]
max_sge: Annotated[Annotated[int, ctypes.c_int32], 120]
max_sge_rd: Annotated[Annotated[int, ctypes.c_int32], 124]
max_cq: Annotated[Annotated[int, ctypes.c_int32], 128]
max_cqe: Annotated[Annotated[int, ctypes.c_int32], 132]
max_mr: Annotated[Annotated[int, ctypes.c_int32], 136]
max_pd: Annotated[Annotated[int, ctypes.c_int32], 140]
max_qp_rd_atom: Annotated[Annotated[int, ctypes.c_int32], 144]
max_ee_rd_atom: Annotated[Annotated[int, ctypes.c_int32], 148]
max_res_rd_atom: Annotated[Annotated[int, ctypes.c_int32], 152]
max_qp_init_rd_atom: Annotated[Annotated[int, ctypes.c_int32], 156]
max_ee_init_rd_atom: Annotated[Annotated[int, ctypes.c_int32], 160]
atomic_cap: Annotated[enum_ibv_atomic_cap, 164]
max_ee: Annotated[Annotated[int, ctypes.c_int32], 168]
max_rdd: Annotated[Annotated[int, ctypes.c_int32], 172]
max_mw: Annotated[Annotated[int, ctypes.c_int32], 176]
max_raw_ipv6_qp: Annotated[Annotated[int, ctypes.c_int32], 180]
max_raw_ethy_qp: Annotated[Annotated[int, ctypes.c_int32], 184]
max_mcast_grp: Annotated[Annotated[int, ctypes.c_int32], 188]
max_mcast_qp_attach: Annotated[Annotated[int, ctypes.c_int32], 192]
max_total_mcast_qp_attach: Annotated[Annotated[int, ctypes.c_int32], 196]
max_ah: Annotated[Annotated[int, ctypes.c_int32], 200]
max_fmr: Annotated[Annotated[int, ctypes.c_int32], 204]
max_map_per_fmr: Annotated[Annotated[int, ctypes.c_int32], 208]
max_srq: Annotated[Annotated[int, ctypes.c_int32], 212]
max_srq_wr: Annotated[Annotated[int, ctypes.c_int32], 216]
max_srq_sge: Annotated[Annotated[int, ctypes.c_int32], 220]
max_pkeys: Annotated[uint16_t, 224]
local_ca_ack_delay: Annotated[uint8_t, 226]
phys_port_cnt: Annotated[uint8_t, 227]
uint64_t: TypeAlias = Annotated[int, ctypes.c_uint64]
uint16_t: TypeAlias = Annotated[int, ctypes.c_uint16]
class struct__compat_ibv_port_attr(ctypes.Structure): pass
@c.record
class struct_ibv_mw(c.Struct):
SIZE = 32
context: Annotated[c.POINTER[struct_ibv_context], 0]
pd: Annotated[c.POINTER[struct_ibv_pd], 8]
rkey: Annotated[uint32_t, 16]
handle: Annotated[uint32_t, 20]
type: Annotated[enum_ibv_mw_type, 24]
@c.record
class struct_ibv_pd(c.Struct):
SIZE = 16
context: Annotated[c.POINTER[struct_ibv_context], 0]
handle: Annotated[uint32_t, 8]
class enum_ibv_mw_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_MW_TYPE_1 = enum_ibv_mw_type.define('IBV_MW_TYPE_1', 1)
IBV_MW_TYPE_2 = enum_ibv_mw_type.define('IBV_MW_TYPE_2', 2)
@c.record
class struct_ibv_qp(c.Struct):
SIZE = 160
context: Annotated[c.POINTER[struct_ibv_context], 0]
qp_context: Annotated[ctypes.c_void_p, 8]
pd: Annotated[c.POINTER[struct_ibv_pd], 16]
send_cq: Annotated[c.POINTER[struct_ibv_cq], 24]
recv_cq: Annotated[c.POINTER[struct_ibv_cq], 32]
srq: Annotated[c.POINTER[struct_ibv_srq], 40]
handle: Annotated[uint32_t, 48]
qp_num: Annotated[uint32_t, 52]
state: Annotated[enum_ibv_qp_state, 56]
qp_type: Annotated[enum_ibv_qp_type, 60]
mutex: Annotated[pthread_mutex_t, 64]
cond: Annotated[pthread_cond_t, 104]
events_completed: Annotated[uint32_t, 152]
@c.record
class struct_ibv_cq(c.Struct):
SIZE = 128
context: Annotated[c.POINTER[struct_ibv_context], 0]
channel: Annotated[c.POINTER[struct_ibv_comp_channel], 8]
cq_context: Annotated[ctypes.c_void_p, 16]
handle: Annotated[uint32_t, 24]
cqe: Annotated[Annotated[int, ctypes.c_int32], 28]
mutex: Annotated[pthread_mutex_t, 32]
cond: Annotated[pthread_cond_t, 72]
comp_events_completed: Annotated[uint32_t, 120]
async_events_completed: Annotated[uint32_t, 124]
@c.record
class struct_ibv_comp_channel(c.Struct):
SIZE = 16
context: Annotated[c.POINTER[struct_ibv_context], 0]
fd: Annotated[Annotated[int, ctypes.c_int32], 8]
refcnt: Annotated[Annotated[int, ctypes.c_int32], 12]
@c.record
class pthread_mutex_t(c.Struct):
SIZE = 40
__data: Annotated[struct___pthread_mutex_s, 0]
__size: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[40]], 0]
__align: Annotated[Annotated[int, ctypes.c_int64], 0]
@c.record
class struct___pthread_mutex_s(c.Struct):
SIZE = 40
__lock: Annotated[Annotated[int, ctypes.c_int32], 0]
__count: Annotated[Annotated[int, ctypes.c_uint32], 4]
__owner: Annotated[Annotated[int, ctypes.c_int32], 8]
__nusers: Annotated[Annotated[int, ctypes.c_uint32], 12]
__kind: Annotated[Annotated[int, ctypes.c_int32], 16]
__spins: Annotated[Annotated[int, ctypes.c_int16], 20]
__elision: Annotated[Annotated[int, ctypes.c_int16], 22]
__list: Annotated[struct___pthread_internal_list, 24]
@c.record
class struct___pthread_internal_list(c.Struct):
SIZE = 16
__prev: Annotated[c.POINTER[struct___pthread_internal_list], 0]
__next: Annotated[c.POINTER[struct___pthread_internal_list], 8]
__pthread_list_t: TypeAlias = struct___pthread_internal_list
@c.record
class pthread_cond_t(c.Struct):
SIZE = 48
__data: Annotated[struct___pthread_cond_s, 0]
__size: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[48]], 0]
__align: Annotated[Annotated[int, ctypes.c_int64], 0]
@c.record
class struct___pthread_cond_s(c.Struct):
SIZE = 48
__wseq: Annotated[__atomic_wide_counter, 0]
__g1_start: Annotated[__atomic_wide_counter, 8]
__g_refs: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[2]], 16]
__g_size: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[2]], 24]
__g1_orig_size: Annotated[Annotated[int, ctypes.c_uint32], 32]
__wrefs: Annotated[Annotated[int, ctypes.c_uint32], 36]
__g_signals: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[2]], 40]
@c.record
class __atomic_wide_counter(c.Struct):
SIZE = 8
__value64: Annotated[Annotated[int, ctypes.c_uint64], 0]
__value32: Annotated[__atomic_wide_counter___value32, 0]
@c.record
class __atomic_wide_counter___value32(c.Struct):
SIZE = 8
__low: Annotated[Annotated[int, ctypes.c_uint32], 0]
__high: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ibv_srq(c.Struct):
SIZE = 128
context: Annotated[c.POINTER[struct_ibv_context], 0]
srq_context: Annotated[ctypes.c_void_p, 8]
pd: Annotated[c.POINTER[struct_ibv_pd], 16]
handle: Annotated[uint32_t, 24]
mutex: Annotated[pthread_mutex_t, 32]
cond: Annotated[pthread_cond_t, 72]
events_completed: Annotated[uint32_t, 120]
class enum_ibv_qp_state(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_QPS_RESET = enum_ibv_qp_state.define('IBV_QPS_RESET', 0)
IBV_QPS_INIT = enum_ibv_qp_state.define('IBV_QPS_INIT', 1)
IBV_QPS_RTR = enum_ibv_qp_state.define('IBV_QPS_RTR', 2)
IBV_QPS_RTS = enum_ibv_qp_state.define('IBV_QPS_RTS', 3)
IBV_QPS_SQD = enum_ibv_qp_state.define('IBV_QPS_SQD', 4)
IBV_QPS_SQE = enum_ibv_qp_state.define('IBV_QPS_SQE', 5)
IBV_QPS_ERR = enum_ibv_qp_state.define('IBV_QPS_ERR', 6)
IBV_QPS_UNKNOWN = enum_ibv_qp_state.define('IBV_QPS_UNKNOWN', 7)
class enum_ibv_qp_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_QPT_RC = enum_ibv_qp_type.define('IBV_QPT_RC', 2)
IBV_QPT_UC = enum_ibv_qp_type.define('IBV_QPT_UC', 3)
IBV_QPT_UD = enum_ibv_qp_type.define('IBV_QPT_UD', 4)
IBV_QPT_RAW_PACKET = enum_ibv_qp_type.define('IBV_QPT_RAW_PACKET', 8)
IBV_QPT_XRC_SEND = enum_ibv_qp_type.define('IBV_QPT_XRC_SEND', 9)
IBV_QPT_XRC_RECV = enum_ibv_qp_type.define('IBV_QPT_XRC_RECV', 10)
IBV_QPT_DRIVER = enum_ibv_qp_type.define('IBV_QPT_DRIVER', 255)
@c.record
class struct_ibv_mw_bind(c.Struct):
SIZE = 48
wr_id: Annotated[uint64_t, 0]
send_flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
bind_info: Annotated[struct_ibv_mw_bind_info, 16]
@c.record
class struct_ibv_mw_bind_info(c.Struct):
SIZE = 32
mr: Annotated[c.POINTER[struct_ibv_mr], 0]
addr: Annotated[uint64_t, 8]
length: Annotated[uint64_t, 16]
mw_access_flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
@c.record
class struct_ibv_mr(c.Struct):
SIZE = 48
context: Annotated[c.POINTER[struct_ibv_context], 0]
pd: Annotated[c.POINTER[struct_ibv_pd], 8]
addr: Annotated[ctypes.c_void_p, 16]
length: Annotated[size_t, 24]
handle: Annotated[uint32_t, 32]
lkey: Annotated[uint32_t, 36]
rkey: Annotated[uint32_t, 40]
@c.record
class struct_ibv_wc(c.Struct):
SIZE = 48
wr_id: Annotated[uint64_t, 0]
status: Annotated[enum_ibv_wc_status, 8]
opcode: Annotated[enum_ibv_wc_opcode, 12]
vendor_err: Annotated[uint32_t, 16]
byte_len: Annotated[uint32_t, 20]
imm_data: Annotated[Annotated[int, ctypes.c_uint32], 24]
invalidated_rkey: Annotated[uint32_t, 24]
qp_num: Annotated[uint32_t, 28]
src_qp: Annotated[uint32_t, 32]
wc_flags: Annotated[Annotated[int, ctypes.c_uint32], 36]
pkey_index: Annotated[uint16_t, 40]
slid: Annotated[uint16_t, 42]
sl: Annotated[uint8_t, 44]
dlid_path_bits: Annotated[uint8_t, 45]
class enum_ibv_wc_status(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WC_SUCCESS = enum_ibv_wc_status.define('IBV_WC_SUCCESS', 0)
IBV_WC_LOC_LEN_ERR = enum_ibv_wc_status.define('IBV_WC_LOC_LEN_ERR', 1)
IBV_WC_LOC_QP_OP_ERR = enum_ibv_wc_status.define('IBV_WC_LOC_QP_OP_ERR', 2)
IBV_WC_LOC_EEC_OP_ERR = enum_ibv_wc_status.define('IBV_WC_LOC_EEC_OP_ERR', 3)
IBV_WC_LOC_PROT_ERR = enum_ibv_wc_status.define('IBV_WC_LOC_PROT_ERR', 4)
IBV_WC_WR_FLUSH_ERR = enum_ibv_wc_status.define('IBV_WC_WR_FLUSH_ERR', 5)
IBV_WC_MW_BIND_ERR = enum_ibv_wc_status.define('IBV_WC_MW_BIND_ERR', 6)
IBV_WC_BAD_RESP_ERR = enum_ibv_wc_status.define('IBV_WC_BAD_RESP_ERR', 7)
IBV_WC_LOC_ACCESS_ERR = enum_ibv_wc_status.define('IBV_WC_LOC_ACCESS_ERR', 8)
IBV_WC_REM_INV_REQ_ERR = enum_ibv_wc_status.define('IBV_WC_REM_INV_REQ_ERR', 9)
IBV_WC_REM_ACCESS_ERR = enum_ibv_wc_status.define('IBV_WC_REM_ACCESS_ERR', 10)
IBV_WC_REM_OP_ERR = enum_ibv_wc_status.define('IBV_WC_REM_OP_ERR', 11)
IBV_WC_RETRY_EXC_ERR = enum_ibv_wc_status.define('IBV_WC_RETRY_EXC_ERR', 12)
IBV_WC_RNR_RETRY_EXC_ERR = enum_ibv_wc_status.define('IBV_WC_RNR_RETRY_EXC_ERR', 13)
IBV_WC_LOC_RDD_VIOL_ERR = enum_ibv_wc_status.define('IBV_WC_LOC_RDD_VIOL_ERR', 14)
IBV_WC_REM_INV_RD_REQ_ERR = enum_ibv_wc_status.define('IBV_WC_REM_INV_RD_REQ_ERR', 15)
IBV_WC_REM_ABORT_ERR = enum_ibv_wc_status.define('IBV_WC_REM_ABORT_ERR', 16)
IBV_WC_INV_EECN_ERR = enum_ibv_wc_status.define('IBV_WC_INV_EECN_ERR', 17)
IBV_WC_INV_EEC_STATE_ERR = enum_ibv_wc_status.define('IBV_WC_INV_EEC_STATE_ERR', 18)
IBV_WC_FATAL_ERR = enum_ibv_wc_status.define('IBV_WC_FATAL_ERR', 19)
IBV_WC_RESP_TIMEOUT_ERR = enum_ibv_wc_status.define('IBV_WC_RESP_TIMEOUT_ERR', 20)
IBV_WC_GENERAL_ERR = enum_ibv_wc_status.define('IBV_WC_GENERAL_ERR', 21)
IBV_WC_TM_ERR = enum_ibv_wc_status.define('IBV_WC_TM_ERR', 22)
IBV_WC_TM_RNDV_INCOMPLETE = enum_ibv_wc_status.define('IBV_WC_TM_RNDV_INCOMPLETE', 23)
class enum_ibv_wc_opcode(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WC_SEND = enum_ibv_wc_opcode.define('IBV_WC_SEND', 0)
IBV_WC_RDMA_WRITE = enum_ibv_wc_opcode.define('IBV_WC_RDMA_WRITE', 1)
IBV_WC_RDMA_READ = enum_ibv_wc_opcode.define('IBV_WC_RDMA_READ', 2)
IBV_WC_COMP_SWAP = enum_ibv_wc_opcode.define('IBV_WC_COMP_SWAP', 3)
IBV_WC_FETCH_ADD = enum_ibv_wc_opcode.define('IBV_WC_FETCH_ADD', 4)
IBV_WC_BIND_MW = enum_ibv_wc_opcode.define('IBV_WC_BIND_MW', 5)
IBV_WC_LOCAL_INV = enum_ibv_wc_opcode.define('IBV_WC_LOCAL_INV', 6)
IBV_WC_TSO = enum_ibv_wc_opcode.define('IBV_WC_TSO', 7)
IBV_WC_FLUSH = enum_ibv_wc_opcode.define('IBV_WC_FLUSH', 8)
IBV_WC_ATOMIC_WRITE = enum_ibv_wc_opcode.define('IBV_WC_ATOMIC_WRITE', 9)
IBV_WC_RECV = enum_ibv_wc_opcode.define('IBV_WC_RECV', 128)
IBV_WC_RECV_RDMA_WITH_IMM = enum_ibv_wc_opcode.define('IBV_WC_RECV_RDMA_WITH_IMM', 129)
IBV_WC_TM_ADD = enum_ibv_wc_opcode.define('IBV_WC_TM_ADD', 130)
IBV_WC_TM_DEL = enum_ibv_wc_opcode.define('IBV_WC_TM_DEL', 131)
IBV_WC_TM_SYNC = enum_ibv_wc_opcode.define('IBV_WC_TM_SYNC', 132)
IBV_WC_TM_RECV = enum_ibv_wc_opcode.define('IBV_WC_TM_RECV', 133)
IBV_WC_TM_NO_TAG = enum_ibv_wc_opcode.define('IBV_WC_TM_NO_TAG', 134)
IBV_WC_DRIVER1 = enum_ibv_wc_opcode.define('IBV_WC_DRIVER1', 135)
IBV_WC_DRIVER2 = enum_ibv_wc_opcode.define('IBV_WC_DRIVER2', 136)
IBV_WC_DRIVER3 = enum_ibv_wc_opcode.define('IBV_WC_DRIVER3', 137)
__be32: TypeAlias = Annotated[int, ctypes.c_uint32]
@c.record
class struct_ibv_recv_wr(c.Struct):
SIZE = 32
wr_id: Annotated[uint64_t, 0]
next: Annotated[c.POINTER[struct_ibv_recv_wr], 8]
sg_list: Annotated[c.POINTER[struct_ibv_sge], 16]
num_sge: Annotated[Annotated[int, ctypes.c_int32], 24]
@c.record
class struct_ibv_sge(c.Struct):
SIZE = 16
addr: Annotated[uint64_t, 0]
length: Annotated[uint32_t, 8]
lkey: Annotated[uint32_t, 12]
@c.record
class struct_ibv_send_wr(c.Struct):
SIZE = 128
wr_id: Annotated[uint64_t, 0]
next: Annotated[c.POINTER[struct_ibv_send_wr], 8]
sg_list: Annotated[c.POINTER[struct_ibv_sge], 16]
num_sge: Annotated[Annotated[int, ctypes.c_int32], 24]
opcode: Annotated[enum_ibv_wr_opcode, 28]
send_flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
imm_data: Annotated[Annotated[int, ctypes.c_uint32], 36]
invalidate_rkey: Annotated[uint32_t, 36]
wr: Annotated[struct_ibv_send_wr_wr, 40]
qp_type: Annotated[struct_ibv_send_wr_qp_type, 72]
bind_mw: Annotated[struct_ibv_send_wr_bind_mw, 80]
tso: Annotated[struct_ibv_send_wr_tso, 80]
class enum_ibv_wr_opcode(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WR_RDMA_WRITE = enum_ibv_wr_opcode.define('IBV_WR_RDMA_WRITE', 0)
IBV_WR_RDMA_WRITE_WITH_IMM = enum_ibv_wr_opcode.define('IBV_WR_RDMA_WRITE_WITH_IMM', 1)
IBV_WR_SEND = enum_ibv_wr_opcode.define('IBV_WR_SEND', 2)
IBV_WR_SEND_WITH_IMM = enum_ibv_wr_opcode.define('IBV_WR_SEND_WITH_IMM', 3)
IBV_WR_RDMA_READ = enum_ibv_wr_opcode.define('IBV_WR_RDMA_READ', 4)
IBV_WR_ATOMIC_CMP_AND_SWP = enum_ibv_wr_opcode.define('IBV_WR_ATOMIC_CMP_AND_SWP', 5)
IBV_WR_ATOMIC_FETCH_AND_ADD = enum_ibv_wr_opcode.define('IBV_WR_ATOMIC_FETCH_AND_ADD', 6)
IBV_WR_LOCAL_INV = enum_ibv_wr_opcode.define('IBV_WR_LOCAL_INV', 7)
IBV_WR_BIND_MW = enum_ibv_wr_opcode.define('IBV_WR_BIND_MW', 8)
IBV_WR_SEND_WITH_INV = enum_ibv_wr_opcode.define('IBV_WR_SEND_WITH_INV', 9)
IBV_WR_TSO = enum_ibv_wr_opcode.define('IBV_WR_TSO', 10)
IBV_WR_DRIVER1 = enum_ibv_wr_opcode.define('IBV_WR_DRIVER1', 11)
IBV_WR_FLUSH = enum_ibv_wr_opcode.define('IBV_WR_FLUSH', 14)
IBV_WR_ATOMIC_WRITE = enum_ibv_wr_opcode.define('IBV_WR_ATOMIC_WRITE', 15)
@c.record
class struct_ibv_send_wr_wr(c.Struct):
SIZE = 32
rdma: Annotated[struct_ibv_send_wr_wr_rdma, 0]
atomic: Annotated[struct_ibv_send_wr_wr_atomic, 0]
ud: Annotated[struct_ibv_send_wr_wr_ud, 0]
@c.record
class struct_ibv_send_wr_wr_rdma(c.Struct):
SIZE = 16
remote_addr: Annotated[uint64_t, 0]
rkey: Annotated[uint32_t, 8]
@c.record
class struct_ibv_send_wr_wr_atomic(c.Struct):
SIZE = 32
remote_addr: Annotated[uint64_t, 0]
compare_add: Annotated[uint64_t, 8]
swap: Annotated[uint64_t, 16]
rkey: Annotated[uint32_t, 24]
@c.record
class struct_ibv_send_wr_wr_ud(c.Struct):
SIZE = 16
ah: Annotated[c.POINTER[struct_ibv_ah], 0]
remote_qpn: Annotated[uint32_t, 8]
remote_qkey: Annotated[uint32_t, 12]
@c.record
class struct_ibv_ah(c.Struct):
SIZE = 24
context: Annotated[c.POINTER[struct_ibv_context], 0]
pd: Annotated[c.POINTER[struct_ibv_pd], 8]
handle: Annotated[uint32_t, 16]
@c.record
class struct_ibv_send_wr_qp_type(c.Struct):
SIZE = 4
xrc: Annotated[struct_ibv_send_wr_qp_type_xrc, 0]
@c.record
class struct_ibv_send_wr_qp_type_xrc(c.Struct):
SIZE = 4
remote_srqn: Annotated[uint32_t, 0]
@c.record
class struct_ibv_send_wr_bind_mw(c.Struct):
SIZE = 48
mw: Annotated[c.POINTER[struct_ibv_mw], 0]
rkey: Annotated[uint32_t, 8]
bind_info: Annotated[struct_ibv_mw_bind_info, 16]
@c.record
class struct_ibv_send_wr_tso(c.Struct):
SIZE = 16
hdr: Annotated[ctypes.c_void_p, 0]
hdr_sz: Annotated[uint16_t, 8]
mss: Annotated[uint16_t, 10]
@c.record
class struct_ibv_query_device_ex_input(c.Struct):
SIZE = 4
comp_mask: Annotated[uint32_t, 0]
class enum_ibv_odp_transport_cap_bits(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_ODP_SUPPORT_SEND = enum_ibv_odp_transport_cap_bits.define('IBV_ODP_SUPPORT_SEND', 1)
IBV_ODP_SUPPORT_RECV = enum_ibv_odp_transport_cap_bits.define('IBV_ODP_SUPPORT_RECV', 2)
IBV_ODP_SUPPORT_WRITE = enum_ibv_odp_transport_cap_bits.define('IBV_ODP_SUPPORT_WRITE', 4)
IBV_ODP_SUPPORT_READ = enum_ibv_odp_transport_cap_bits.define('IBV_ODP_SUPPORT_READ', 8)
IBV_ODP_SUPPORT_ATOMIC = enum_ibv_odp_transport_cap_bits.define('IBV_ODP_SUPPORT_ATOMIC', 16)
IBV_ODP_SUPPORT_SRQ_RECV = enum_ibv_odp_transport_cap_bits.define('IBV_ODP_SUPPORT_SRQ_RECV', 32)
@c.record
class struct_ibv_odp_caps(c.Struct):
SIZE = 24
general_caps: Annotated[uint64_t, 0]
per_transport_caps: Annotated[struct_ibv_odp_caps_per_transport_caps, 8]
@c.record
class struct_ibv_odp_caps_per_transport_caps(c.Struct):
SIZE = 12
rc_odp_caps: Annotated[uint32_t, 0]
uc_odp_caps: Annotated[uint32_t, 4]
ud_odp_caps: Annotated[uint32_t, 8]
class enum_ibv_odp_general_caps(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_ODP_SUPPORT = enum_ibv_odp_general_caps.define('IBV_ODP_SUPPORT', 1)
IBV_ODP_SUPPORT_IMPLICIT = enum_ibv_odp_general_caps.define('IBV_ODP_SUPPORT_IMPLICIT', 2)
@c.record
class struct_ibv_tso_caps(c.Struct):
SIZE = 8
max_tso: Annotated[uint32_t, 0]
supported_qpts: Annotated[uint32_t, 4]
class enum_ibv_rx_hash_function_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_RX_HASH_FUNC_TOEPLITZ = enum_ibv_rx_hash_function_flags.define('IBV_RX_HASH_FUNC_TOEPLITZ', 1)
class enum_ibv_rx_hash_fields(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_RX_HASH_SRC_IPV4 = enum_ibv_rx_hash_fields.define('IBV_RX_HASH_SRC_IPV4', 1)
IBV_RX_HASH_DST_IPV4 = enum_ibv_rx_hash_fields.define('IBV_RX_HASH_DST_IPV4', 2)
IBV_RX_HASH_SRC_IPV6 = enum_ibv_rx_hash_fields.define('IBV_RX_HASH_SRC_IPV6', 4)
IBV_RX_HASH_DST_IPV6 = enum_ibv_rx_hash_fields.define('IBV_RX_HASH_DST_IPV6', 8)
IBV_RX_HASH_SRC_PORT_TCP = enum_ibv_rx_hash_fields.define('IBV_RX_HASH_SRC_PORT_TCP', 16)
IBV_RX_HASH_DST_PORT_TCP = enum_ibv_rx_hash_fields.define('IBV_RX_HASH_DST_PORT_TCP', 32)
IBV_RX_HASH_SRC_PORT_UDP = enum_ibv_rx_hash_fields.define('IBV_RX_HASH_SRC_PORT_UDP', 64)
IBV_RX_HASH_DST_PORT_UDP = enum_ibv_rx_hash_fields.define('IBV_RX_HASH_DST_PORT_UDP', 128)
IBV_RX_HASH_IPSEC_SPI = enum_ibv_rx_hash_fields.define('IBV_RX_HASH_IPSEC_SPI', 256)
IBV_RX_HASH_INNER = enum_ibv_rx_hash_fields.define('IBV_RX_HASH_INNER', 2147483648)
@c.record
class struct_ibv_rss_caps(c.Struct):
SIZE = 32
supported_qpts: Annotated[uint32_t, 0]
max_rwq_indirection_tables: Annotated[uint32_t, 4]
max_rwq_indirection_table_size: Annotated[uint32_t, 8]
rx_hash_fields_mask: Annotated[uint64_t, 16]
rx_hash_function: Annotated[uint8_t, 24]
@c.record
class struct_ibv_packet_pacing_caps(c.Struct):
SIZE = 12
qp_rate_limit_min: Annotated[uint32_t, 0]
qp_rate_limit_max: Annotated[uint32_t, 4]
supported_qpts: Annotated[uint32_t, 8]
class enum_ibv_raw_packet_caps(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING = enum_ibv_raw_packet_caps.define('IBV_RAW_PACKET_CAP_CVLAN_STRIPPING', 1)
IBV_RAW_PACKET_CAP_SCATTER_FCS = enum_ibv_raw_packet_caps.define('IBV_RAW_PACKET_CAP_SCATTER_FCS', 2)
IBV_RAW_PACKET_CAP_IP_CSUM = enum_ibv_raw_packet_caps.define('IBV_RAW_PACKET_CAP_IP_CSUM', 4)
IBV_RAW_PACKET_CAP_DELAY_DROP = enum_ibv_raw_packet_caps.define('IBV_RAW_PACKET_CAP_DELAY_DROP', 8)
class enum_ibv_tm_cap_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_TM_CAP_RC = enum_ibv_tm_cap_flags.define('IBV_TM_CAP_RC', 1)
@c.record
class struct_ibv_tm_caps(c.Struct):
SIZE = 20
max_rndv_hdr_size: Annotated[uint32_t, 0]
max_num_tags: Annotated[uint32_t, 4]
flags: Annotated[uint32_t, 8]
max_ops: Annotated[uint32_t, 12]
max_sge: Annotated[uint32_t, 16]
@c.record
class struct_ibv_cq_moderation_caps(c.Struct):
SIZE = 4
max_cq_count: Annotated[uint16_t, 0]
max_cq_period: Annotated[uint16_t, 2]
class enum_ibv_pci_atomic_op_size(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_PCI_ATOMIC_OPERATION_4_BYTE_SIZE_SUP = enum_ibv_pci_atomic_op_size.define('IBV_PCI_ATOMIC_OPERATION_4_BYTE_SIZE_SUP', 1)
IBV_PCI_ATOMIC_OPERATION_8_BYTE_SIZE_SUP = enum_ibv_pci_atomic_op_size.define('IBV_PCI_ATOMIC_OPERATION_8_BYTE_SIZE_SUP', 2)
IBV_PCI_ATOMIC_OPERATION_16_BYTE_SIZE_SUP = enum_ibv_pci_atomic_op_size.define('IBV_PCI_ATOMIC_OPERATION_16_BYTE_SIZE_SUP', 4)
@c.record
class struct_ibv_pci_atomic_caps(c.Struct):
SIZE = 6
fetch_add: Annotated[uint16_t, 0]
swap: Annotated[uint16_t, 2]
compare_swap: Annotated[uint16_t, 4]
@c.record
class struct_ibv_device_attr_ex(c.Struct):
SIZE = 400
orig_attr: Annotated[struct_ibv_device_attr, 0]
comp_mask: Annotated[uint32_t, 232]
odp_caps: Annotated[struct_ibv_odp_caps, 240]
completion_timestamp_mask: Annotated[uint64_t, 264]
hca_core_clock: Annotated[uint64_t, 272]
device_cap_flags_ex: Annotated[uint64_t, 280]
tso_caps: Annotated[struct_ibv_tso_caps, 288]
rss_caps: Annotated[struct_ibv_rss_caps, 296]
max_wq_type_rq: Annotated[uint32_t, 328]
packet_pacing_caps: Annotated[struct_ibv_packet_pacing_caps, 332]
raw_packet_caps: Annotated[uint32_t, 344]
tm_caps: Annotated[struct_ibv_tm_caps, 348]
cq_mod_caps: Annotated[struct_ibv_cq_moderation_caps, 368]
max_dm_size: Annotated[uint64_t, 376]
pci_atomic_caps: Annotated[struct_ibv_pci_atomic_caps, 384]
xrc_odp_caps: Annotated[uint32_t, 392]
phys_port_cnt_ex: Annotated[uint32_t, 396]
class enum_ibv_mtu(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_MTU_256 = enum_ibv_mtu.define('IBV_MTU_256', 1)
IBV_MTU_512 = enum_ibv_mtu.define('IBV_MTU_512', 2)
IBV_MTU_1024 = enum_ibv_mtu.define('IBV_MTU_1024', 3)
IBV_MTU_2048 = enum_ibv_mtu.define('IBV_MTU_2048', 4)
IBV_MTU_4096 = enum_ibv_mtu.define('IBV_MTU_4096', 5)
class enum_ibv_port_state(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_PORT_NOP = enum_ibv_port_state.define('IBV_PORT_NOP', 0)
IBV_PORT_DOWN = enum_ibv_port_state.define('IBV_PORT_DOWN', 1)
IBV_PORT_INIT = enum_ibv_port_state.define('IBV_PORT_INIT', 2)
IBV_PORT_ARMED = enum_ibv_port_state.define('IBV_PORT_ARMED', 3)
IBV_PORT_ACTIVE = enum_ibv_port_state.define('IBV_PORT_ACTIVE', 4)
IBV_PORT_ACTIVE_DEFER = enum_ibv_port_state.define('IBV_PORT_ACTIVE_DEFER', 5)
class _anonenum0(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_LINK_LAYER_UNSPECIFIED = _anonenum0.define('IBV_LINK_LAYER_UNSPECIFIED', 0)
IBV_LINK_LAYER_INFINIBAND = _anonenum0.define('IBV_LINK_LAYER_INFINIBAND', 1)
IBV_LINK_LAYER_ETHERNET = _anonenum0.define('IBV_LINK_LAYER_ETHERNET', 2)
class enum_ibv_port_cap_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_PORT_SM = enum_ibv_port_cap_flags.define('IBV_PORT_SM', 2)
IBV_PORT_NOTICE_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_NOTICE_SUP', 4)
IBV_PORT_TRAP_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_TRAP_SUP', 8)
IBV_PORT_OPT_IPD_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_OPT_IPD_SUP', 16)
IBV_PORT_AUTO_MIGR_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_AUTO_MIGR_SUP', 32)
IBV_PORT_SL_MAP_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_SL_MAP_SUP', 64)
IBV_PORT_MKEY_NVRAM = enum_ibv_port_cap_flags.define('IBV_PORT_MKEY_NVRAM', 128)
IBV_PORT_PKEY_NVRAM = enum_ibv_port_cap_flags.define('IBV_PORT_PKEY_NVRAM', 256)
IBV_PORT_LED_INFO_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_LED_INFO_SUP', 512)
IBV_PORT_SYS_IMAGE_GUID_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_SYS_IMAGE_GUID_SUP', 2048)
IBV_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_PKEY_SW_EXT_PORT_TRAP_SUP', 4096)
IBV_PORT_EXTENDED_SPEEDS_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_EXTENDED_SPEEDS_SUP', 16384)
IBV_PORT_CAP_MASK2_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_CAP_MASK2_SUP', 32768)
IBV_PORT_CM_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_CM_SUP', 65536)
IBV_PORT_SNMP_TUNNEL_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_SNMP_TUNNEL_SUP', 131072)
IBV_PORT_REINIT_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_REINIT_SUP', 262144)
IBV_PORT_DEVICE_MGMT_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_DEVICE_MGMT_SUP', 524288)
IBV_PORT_VENDOR_CLASS_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_VENDOR_CLASS_SUP', 1048576)
IBV_PORT_DR_NOTICE_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_DR_NOTICE_SUP', 2097152)
IBV_PORT_CAP_MASK_NOTICE_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_CAP_MASK_NOTICE_SUP', 4194304)
IBV_PORT_BOOT_MGMT_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_BOOT_MGMT_SUP', 8388608)
IBV_PORT_LINK_LATENCY_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_LINK_LATENCY_SUP', 16777216)
IBV_PORT_CLIENT_REG_SUP = enum_ibv_port_cap_flags.define('IBV_PORT_CLIENT_REG_SUP', 33554432)
IBV_PORT_IP_BASED_GIDS = enum_ibv_port_cap_flags.define('IBV_PORT_IP_BASED_GIDS', 67108864)
class enum_ibv_port_cap_flags2(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_PORT_SET_NODE_DESC_SUP = enum_ibv_port_cap_flags2.define('IBV_PORT_SET_NODE_DESC_SUP', 1)
IBV_PORT_INFO_EXT_SUP = enum_ibv_port_cap_flags2.define('IBV_PORT_INFO_EXT_SUP', 2)
IBV_PORT_VIRT_SUP = enum_ibv_port_cap_flags2.define('IBV_PORT_VIRT_SUP', 4)
IBV_PORT_SWITCH_PORT_STATE_TABLE_SUP = enum_ibv_port_cap_flags2.define('IBV_PORT_SWITCH_PORT_STATE_TABLE_SUP', 8)
IBV_PORT_LINK_WIDTH_2X_SUP = enum_ibv_port_cap_flags2.define('IBV_PORT_LINK_WIDTH_2X_SUP', 16)
IBV_PORT_LINK_SPEED_HDR_SUP = enum_ibv_port_cap_flags2.define('IBV_PORT_LINK_SPEED_HDR_SUP', 32)
IBV_PORT_LINK_SPEED_NDR_SUP = enum_ibv_port_cap_flags2.define('IBV_PORT_LINK_SPEED_NDR_SUP', 1024)
IBV_PORT_LINK_SPEED_XDR_SUP = enum_ibv_port_cap_flags2.define('IBV_PORT_LINK_SPEED_XDR_SUP', 4096)
@c.record
class struct_ibv_port_attr(c.Struct):
SIZE = 56
state: Annotated[enum_ibv_port_state, 0]
max_mtu: Annotated[enum_ibv_mtu, 4]
active_mtu: Annotated[enum_ibv_mtu, 8]
gid_tbl_len: Annotated[Annotated[int, ctypes.c_int32], 12]
port_cap_flags: Annotated[uint32_t, 16]
max_msg_sz: Annotated[uint32_t, 20]
bad_pkey_cntr: Annotated[uint32_t, 24]
qkey_viol_cntr: Annotated[uint32_t, 28]
pkey_tbl_len: Annotated[uint16_t, 32]
lid: Annotated[uint16_t, 34]
sm_lid: Annotated[uint16_t, 36]
lmc: Annotated[uint8_t, 38]
max_vl_num: Annotated[uint8_t, 39]
sm_sl: Annotated[uint8_t, 40]
subnet_timeout: Annotated[uint8_t, 41]
init_type_reply: Annotated[uint8_t, 42]
active_width: Annotated[uint8_t, 43]
active_speed: Annotated[uint8_t, 44]
phys_state: Annotated[uint8_t, 45]
link_layer: Annotated[uint8_t, 46]
flags: Annotated[uint8_t, 47]
port_cap_flags2: Annotated[uint16_t, 48]
active_speed_ex: Annotated[uint32_t, 52]
class enum_ibv_event_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_EVENT_CQ_ERR = enum_ibv_event_type.define('IBV_EVENT_CQ_ERR', 0)
IBV_EVENT_QP_FATAL = enum_ibv_event_type.define('IBV_EVENT_QP_FATAL', 1)
IBV_EVENT_QP_REQ_ERR = enum_ibv_event_type.define('IBV_EVENT_QP_REQ_ERR', 2)
IBV_EVENT_QP_ACCESS_ERR = enum_ibv_event_type.define('IBV_EVENT_QP_ACCESS_ERR', 3)
IBV_EVENT_COMM_EST = enum_ibv_event_type.define('IBV_EVENT_COMM_EST', 4)
IBV_EVENT_SQ_DRAINED = enum_ibv_event_type.define('IBV_EVENT_SQ_DRAINED', 5)
IBV_EVENT_PATH_MIG = enum_ibv_event_type.define('IBV_EVENT_PATH_MIG', 6)
IBV_EVENT_PATH_MIG_ERR = enum_ibv_event_type.define('IBV_EVENT_PATH_MIG_ERR', 7)
IBV_EVENT_DEVICE_FATAL = enum_ibv_event_type.define('IBV_EVENT_DEVICE_FATAL', 8)
IBV_EVENT_PORT_ACTIVE = enum_ibv_event_type.define('IBV_EVENT_PORT_ACTIVE', 9)
IBV_EVENT_PORT_ERR = enum_ibv_event_type.define('IBV_EVENT_PORT_ERR', 10)
IBV_EVENT_LID_CHANGE = enum_ibv_event_type.define('IBV_EVENT_LID_CHANGE', 11)
IBV_EVENT_PKEY_CHANGE = enum_ibv_event_type.define('IBV_EVENT_PKEY_CHANGE', 12)
IBV_EVENT_SM_CHANGE = enum_ibv_event_type.define('IBV_EVENT_SM_CHANGE', 13)
IBV_EVENT_SRQ_ERR = enum_ibv_event_type.define('IBV_EVENT_SRQ_ERR', 14)
IBV_EVENT_SRQ_LIMIT_REACHED = enum_ibv_event_type.define('IBV_EVENT_SRQ_LIMIT_REACHED', 15)
IBV_EVENT_QP_LAST_WQE_REACHED = enum_ibv_event_type.define('IBV_EVENT_QP_LAST_WQE_REACHED', 16)
IBV_EVENT_CLIENT_REREGISTER = enum_ibv_event_type.define('IBV_EVENT_CLIENT_REREGISTER', 17)
IBV_EVENT_GID_CHANGE = enum_ibv_event_type.define('IBV_EVENT_GID_CHANGE', 18)
IBV_EVENT_WQ_FATAL = enum_ibv_event_type.define('IBV_EVENT_WQ_FATAL', 19)
@c.record
class struct_ibv_async_event(c.Struct):
SIZE = 16
element: Annotated[struct_ibv_async_event_element, 0]
event_type: Annotated[enum_ibv_event_type, 8]
@c.record
class struct_ibv_async_event_element(c.Struct):
SIZE = 8
cq: Annotated[c.POINTER[struct_ibv_cq], 0]
qp: Annotated[c.POINTER[struct_ibv_qp], 0]
srq: Annotated[c.POINTER[struct_ibv_srq], 0]
wq: Annotated[c.POINTER[struct_ibv_wq], 0]
port_num: Annotated[Annotated[int, ctypes.c_int32], 0]
@c.record
class struct_ibv_wq(c.Struct):
SIZE = 152
context: Annotated[c.POINTER[struct_ibv_context], 0]
wq_context: Annotated[ctypes.c_void_p, 8]
pd: Annotated[c.POINTER[struct_ibv_pd], 16]
cq: Annotated[c.POINTER[struct_ibv_cq], 24]
wq_num: Annotated[uint32_t, 32]
handle: Annotated[uint32_t, 36]
state: Annotated[enum_ibv_wq_state, 40]
wq_type: Annotated[enum_ibv_wq_type, 44]
post_recv: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_wq], c.POINTER[struct_ibv_recv_wr], c.POINTER[c.POINTER[struct_ibv_recv_wr]]]], 48]
mutex: Annotated[pthread_mutex_t, 56]
cond: Annotated[pthread_cond_t, 96]
events_completed: Annotated[uint32_t, 144]
comp_mask: Annotated[uint32_t, 148]
class enum_ibv_wq_state(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WQS_RESET = enum_ibv_wq_state.define('IBV_WQS_RESET', 0)
IBV_WQS_RDY = enum_ibv_wq_state.define('IBV_WQS_RDY', 1)
IBV_WQS_ERR = enum_ibv_wq_state.define('IBV_WQS_ERR', 2)
IBV_WQS_UNKNOWN = enum_ibv_wq_state.define('IBV_WQS_UNKNOWN', 3)
class enum_ibv_wq_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WQT_RQ = enum_ibv_wq_type.define('IBV_WQT_RQ', 0)
@dll.bind
def ibv_wc_status_str(status:enum_ibv_wc_status) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
class _anonenum1(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WC_IP_CSUM_OK_SHIFT = _anonenum1.define('IBV_WC_IP_CSUM_OK_SHIFT', 2)
class enum_ibv_create_cq_wc_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WC_EX_WITH_BYTE_LEN = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_BYTE_LEN', 1)
IBV_WC_EX_WITH_IMM = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_IMM', 2)
IBV_WC_EX_WITH_QP_NUM = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_QP_NUM', 4)
IBV_WC_EX_WITH_SRC_QP = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_SRC_QP', 8)
IBV_WC_EX_WITH_SLID = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_SLID', 16)
IBV_WC_EX_WITH_SL = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_SL', 32)
IBV_WC_EX_WITH_DLID_PATH_BITS = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_DLID_PATH_BITS', 64)
IBV_WC_EX_WITH_COMPLETION_TIMESTAMP = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_COMPLETION_TIMESTAMP', 128)
IBV_WC_EX_WITH_CVLAN = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_CVLAN', 256)
IBV_WC_EX_WITH_FLOW_TAG = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_FLOW_TAG', 512)
IBV_WC_EX_WITH_TM_INFO = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_TM_INFO', 1024)
IBV_WC_EX_WITH_COMPLETION_TIMESTAMP_WALLCLOCK = enum_ibv_create_cq_wc_flags.define('IBV_WC_EX_WITH_COMPLETION_TIMESTAMP_WALLCLOCK', 2048)
class _anonenum2(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WC_STANDARD_FLAGS = _anonenum2.define('IBV_WC_STANDARD_FLAGS', 127)
class _anonenum3(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_CREATE_CQ_SUP_WC_FLAGS = _anonenum3.define('IBV_CREATE_CQ_SUP_WC_FLAGS', 4095)
class enum_ibv_wc_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WC_GRH = enum_ibv_wc_flags.define('IBV_WC_GRH', 1)
IBV_WC_WITH_IMM = enum_ibv_wc_flags.define('IBV_WC_WITH_IMM', 2)
IBV_WC_IP_CSUM_OK = enum_ibv_wc_flags.define('IBV_WC_IP_CSUM_OK', 4)
IBV_WC_WITH_INV = enum_ibv_wc_flags.define('IBV_WC_WITH_INV', 8)
IBV_WC_TM_SYNC_REQ = enum_ibv_wc_flags.define('IBV_WC_TM_SYNC_REQ', 16)
IBV_WC_TM_MATCH = enum_ibv_wc_flags.define('IBV_WC_TM_MATCH', 32)
IBV_WC_TM_DATA_VALID = enum_ibv_wc_flags.define('IBV_WC_TM_DATA_VALID', 64)
class enum_ibv_access_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_ACCESS_LOCAL_WRITE = enum_ibv_access_flags.define('IBV_ACCESS_LOCAL_WRITE', 1)
IBV_ACCESS_REMOTE_WRITE = enum_ibv_access_flags.define('IBV_ACCESS_REMOTE_WRITE', 2)
IBV_ACCESS_REMOTE_READ = enum_ibv_access_flags.define('IBV_ACCESS_REMOTE_READ', 4)
IBV_ACCESS_REMOTE_ATOMIC = enum_ibv_access_flags.define('IBV_ACCESS_REMOTE_ATOMIC', 8)
IBV_ACCESS_MW_BIND = enum_ibv_access_flags.define('IBV_ACCESS_MW_BIND', 16)
IBV_ACCESS_ZERO_BASED = enum_ibv_access_flags.define('IBV_ACCESS_ZERO_BASED', 32)
IBV_ACCESS_ON_DEMAND = enum_ibv_access_flags.define('IBV_ACCESS_ON_DEMAND', 64)
IBV_ACCESS_HUGETLB = enum_ibv_access_flags.define('IBV_ACCESS_HUGETLB', 128)
IBV_ACCESS_FLUSH_GLOBAL = enum_ibv_access_flags.define('IBV_ACCESS_FLUSH_GLOBAL', 256)
IBV_ACCESS_FLUSH_PERSISTENT = enum_ibv_access_flags.define('IBV_ACCESS_FLUSH_PERSISTENT', 512)
IBV_ACCESS_RELAXED_ORDERING = enum_ibv_access_flags.define('IBV_ACCESS_RELAXED_ORDERING', 1048576)
@c.record
class struct_ibv_td_init_attr(c.Struct):
SIZE = 4
comp_mask: Annotated[uint32_t, 0]
@c.record
class struct_ibv_td(c.Struct):
SIZE = 8
context: Annotated[c.POINTER[struct_ibv_context], 0]
class enum_ibv_xrcd_init_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_XRCD_INIT_ATTR_FD = enum_ibv_xrcd_init_attr_mask.define('IBV_XRCD_INIT_ATTR_FD', 1)
IBV_XRCD_INIT_ATTR_OFLAGS = enum_ibv_xrcd_init_attr_mask.define('IBV_XRCD_INIT_ATTR_OFLAGS', 2)
IBV_XRCD_INIT_ATTR_RESERVED = enum_ibv_xrcd_init_attr_mask.define('IBV_XRCD_INIT_ATTR_RESERVED', 4)
@c.record
class struct_ibv_xrcd_init_attr(c.Struct):
SIZE = 12
comp_mask: Annotated[uint32_t, 0]
fd: Annotated[Annotated[int, ctypes.c_int32], 4]
oflags: Annotated[Annotated[int, ctypes.c_int32], 8]
@c.record
class struct_ibv_xrcd(c.Struct):
SIZE = 8
context: Annotated[c.POINTER[struct_ibv_context], 0]
class enum_ibv_rereg_mr_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_REREG_MR_CHANGE_TRANSLATION = enum_ibv_rereg_mr_flags.define('IBV_REREG_MR_CHANGE_TRANSLATION', 1)
IBV_REREG_MR_CHANGE_PD = enum_ibv_rereg_mr_flags.define('IBV_REREG_MR_CHANGE_PD', 2)
IBV_REREG_MR_CHANGE_ACCESS = enum_ibv_rereg_mr_flags.define('IBV_REREG_MR_CHANGE_ACCESS', 4)
IBV_REREG_MR_FLAGS_SUPPORTED = enum_ibv_rereg_mr_flags.define('IBV_REREG_MR_FLAGS_SUPPORTED', 7)
@c.record
class struct_ibv_global_route(c.Struct):
SIZE = 24
dgid: Annotated[union_ibv_gid, 0]
flow_label: Annotated[uint32_t, 16]
sgid_index: Annotated[uint8_t, 20]
hop_limit: Annotated[uint8_t, 21]
traffic_class: Annotated[uint8_t, 22]
@c.record
class struct_ibv_grh(c.Struct):
SIZE = 40
version_tclass_flow: Annotated[Annotated[int, ctypes.c_uint32], 0]
paylen: Annotated[Annotated[int, ctypes.c_uint16], 4]
next_hdr: Annotated[uint8_t, 6]
hop_limit: Annotated[uint8_t, 7]
sgid: Annotated[union_ibv_gid, 8]
dgid: Annotated[union_ibv_gid, 24]
__be16: TypeAlias = Annotated[int, ctypes.c_uint16]
class enum_ibv_rate(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_RATE_MAX = enum_ibv_rate.define('IBV_RATE_MAX', 0)
IBV_RATE_2_5_GBPS = enum_ibv_rate.define('IBV_RATE_2_5_GBPS', 2)
IBV_RATE_5_GBPS = enum_ibv_rate.define('IBV_RATE_5_GBPS', 5)
IBV_RATE_10_GBPS = enum_ibv_rate.define('IBV_RATE_10_GBPS', 3)
IBV_RATE_20_GBPS = enum_ibv_rate.define('IBV_RATE_20_GBPS', 6)
IBV_RATE_30_GBPS = enum_ibv_rate.define('IBV_RATE_30_GBPS', 4)
IBV_RATE_40_GBPS = enum_ibv_rate.define('IBV_RATE_40_GBPS', 7)
IBV_RATE_60_GBPS = enum_ibv_rate.define('IBV_RATE_60_GBPS', 8)
IBV_RATE_80_GBPS = enum_ibv_rate.define('IBV_RATE_80_GBPS', 9)
IBV_RATE_120_GBPS = enum_ibv_rate.define('IBV_RATE_120_GBPS', 10)
IBV_RATE_14_GBPS = enum_ibv_rate.define('IBV_RATE_14_GBPS', 11)
IBV_RATE_56_GBPS = enum_ibv_rate.define('IBV_RATE_56_GBPS', 12)
IBV_RATE_112_GBPS = enum_ibv_rate.define('IBV_RATE_112_GBPS', 13)
IBV_RATE_168_GBPS = enum_ibv_rate.define('IBV_RATE_168_GBPS', 14)
IBV_RATE_25_GBPS = enum_ibv_rate.define('IBV_RATE_25_GBPS', 15)
IBV_RATE_100_GBPS = enum_ibv_rate.define('IBV_RATE_100_GBPS', 16)
IBV_RATE_200_GBPS = enum_ibv_rate.define('IBV_RATE_200_GBPS', 17)
IBV_RATE_300_GBPS = enum_ibv_rate.define('IBV_RATE_300_GBPS', 18)
IBV_RATE_28_GBPS = enum_ibv_rate.define('IBV_RATE_28_GBPS', 19)
IBV_RATE_50_GBPS = enum_ibv_rate.define('IBV_RATE_50_GBPS', 20)
IBV_RATE_400_GBPS = enum_ibv_rate.define('IBV_RATE_400_GBPS', 21)
IBV_RATE_600_GBPS = enum_ibv_rate.define('IBV_RATE_600_GBPS', 22)
IBV_RATE_800_GBPS = enum_ibv_rate.define('IBV_RATE_800_GBPS', 23)
IBV_RATE_1200_GBPS = enum_ibv_rate.define('IBV_RATE_1200_GBPS', 24)
@dll.bind
def ibv_rate_to_mult(rate:enum_ibv_rate) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def mult_to_ibv_rate(mult:Annotated[int, ctypes.c_int32]) -> enum_ibv_rate: ...
@dll.bind
def ibv_rate_to_mbps(rate:enum_ibv_rate) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def mbps_to_ibv_rate(mbps:Annotated[int, ctypes.c_int32]) -> enum_ibv_rate: ...
@c.record
class struct_ibv_ah_attr(c.Struct):
SIZE = 32
grh: Annotated[struct_ibv_global_route, 0]
dlid: Annotated[uint16_t, 24]
sl: Annotated[uint8_t, 26]
src_path_bits: Annotated[uint8_t, 27]
static_rate: Annotated[uint8_t, 28]
is_global: Annotated[uint8_t, 29]
port_num: Annotated[uint8_t, 30]
class enum_ibv_srq_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_SRQ_MAX_WR = enum_ibv_srq_attr_mask.define('IBV_SRQ_MAX_WR', 1)
IBV_SRQ_LIMIT = enum_ibv_srq_attr_mask.define('IBV_SRQ_LIMIT', 2)
@c.record
class struct_ibv_srq_attr(c.Struct):
SIZE = 12
max_wr: Annotated[uint32_t, 0]
max_sge: Annotated[uint32_t, 4]
srq_limit: Annotated[uint32_t, 8]
@c.record
class struct_ibv_srq_init_attr(c.Struct):
SIZE = 24
srq_context: Annotated[ctypes.c_void_p, 0]
attr: Annotated[struct_ibv_srq_attr, 8]
class enum_ibv_srq_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_SRQT_BASIC = enum_ibv_srq_type.define('IBV_SRQT_BASIC', 0)
IBV_SRQT_XRC = enum_ibv_srq_type.define('IBV_SRQT_XRC', 1)
IBV_SRQT_TM = enum_ibv_srq_type.define('IBV_SRQT_TM', 2)
class enum_ibv_srq_init_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_SRQ_INIT_ATTR_TYPE = enum_ibv_srq_init_attr_mask.define('IBV_SRQ_INIT_ATTR_TYPE', 1)
IBV_SRQ_INIT_ATTR_PD = enum_ibv_srq_init_attr_mask.define('IBV_SRQ_INIT_ATTR_PD', 2)
IBV_SRQ_INIT_ATTR_XRCD = enum_ibv_srq_init_attr_mask.define('IBV_SRQ_INIT_ATTR_XRCD', 4)
IBV_SRQ_INIT_ATTR_CQ = enum_ibv_srq_init_attr_mask.define('IBV_SRQ_INIT_ATTR_CQ', 8)
IBV_SRQ_INIT_ATTR_TM = enum_ibv_srq_init_attr_mask.define('IBV_SRQ_INIT_ATTR_TM', 16)
IBV_SRQ_INIT_ATTR_RESERVED = enum_ibv_srq_init_attr_mask.define('IBV_SRQ_INIT_ATTR_RESERVED', 32)
@c.record
class struct_ibv_tm_cap(c.Struct):
SIZE = 8
max_num_tags: Annotated[uint32_t, 0]
max_ops: Annotated[uint32_t, 4]
@c.record
class struct_ibv_srq_init_attr_ex(c.Struct):
SIZE = 64
srq_context: Annotated[ctypes.c_void_p, 0]
attr: Annotated[struct_ibv_srq_attr, 8]
comp_mask: Annotated[uint32_t, 20]
srq_type: Annotated[enum_ibv_srq_type, 24]
pd: Annotated[c.POINTER[struct_ibv_pd], 32]
xrcd: Annotated[c.POINTER[struct_ibv_xrcd], 40]
cq: Annotated[c.POINTER[struct_ibv_cq], 48]
tm_cap: Annotated[struct_ibv_tm_cap, 56]
class enum_ibv_wq_init_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WQ_INIT_ATTR_FLAGS = enum_ibv_wq_init_attr_mask.define('IBV_WQ_INIT_ATTR_FLAGS', 1)
IBV_WQ_INIT_ATTR_RESERVED = enum_ibv_wq_init_attr_mask.define('IBV_WQ_INIT_ATTR_RESERVED', 2)
class enum_ibv_wq_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WQ_FLAGS_CVLAN_STRIPPING = enum_ibv_wq_flags.define('IBV_WQ_FLAGS_CVLAN_STRIPPING', 1)
IBV_WQ_FLAGS_SCATTER_FCS = enum_ibv_wq_flags.define('IBV_WQ_FLAGS_SCATTER_FCS', 2)
IBV_WQ_FLAGS_DELAY_DROP = enum_ibv_wq_flags.define('IBV_WQ_FLAGS_DELAY_DROP', 4)
IBV_WQ_FLAGS_PCI_WRITE_END_PADDING = enum_ibv_wq_flags.define('IBV_WQ_FLAGS_PCI_WRITE_END_PADDING', 8)
IBV_WQ_FLAGS_RESERVED = enum_ibv_wq_flags.define('IBV_WQ_FLAGS_RESERVED', 16)
@c.record
class struct_ibv_wq_init_attr(c.Struct):
SIZE = 48
wq_context: Annotated[ctypes.c_void_p, 0]
wq_type: Annotated[enum_ibv_wq_type, 8]
max_wr: Annotated[uint32_t, 12]
max_sge: Annotated[uint32_t, 16]
pd: Annotated[c.POINTER[struct_ibv_pd], 24]
cq: Annotated[c.POINTER[struct_ibv_cq], 32]
comp_mask: Annotated[uint32_t, 40]
create_flags: Annotated[uint32_t, 44]
class enum_ibv_wq_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WQ_ATTR_STATE = enum_ibv_wq_attr_mask.define('IBV_WQ_ATTR_STATE', 1)
IBV_WQ_ATTR_CURR_STATE = enum_ibv_wq_attr_mask.define('IBV_WQ_ATTR_CURR_STATE', 2)
IBV_WQ_ATTR_FLAGS = enum_ibv_wq_attr_mask.define('IBV_WQ_ATTR_FLAGS', 4)
IBV_WQ_ATTR_RESERVED = enum_ibv_wq_attr_mask.define('IBV_WQ_ATTR_RESERVED', 8)
@c.record
class struct_ibv_wq_attr(c.Struct):
SIZE = 20
attr_mask: Annotated[uint32_t, 0]
wq_state: Annotated[enum_ibv_wq_state, 4]
curr_wq_state: Annotated[enum_ibv_wq_state, 8]
flags: Annotated[uint32_t, 12]
flags_mask: Annotated[uint32_t, 16]
@c.record
class struct_ibv_rwq_ind_table(c.Struct):
SIZE = 24
context: Annotated[c.POINTER[struct_ibv_context], 0]
ind_tbl_handle: Annotated[Annotated[int, ctypes.c_int32], 8]
ind_tbl_num: Annotated[Annotated[int, ctypes.c_int32], 12]
comp_mask: Annotated[uint32_t, 16]
class enum_ibv_ind_table_init_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_CREATE_IND_TABLE_RESERVED = enum_ibv_ind_table_init_attr_mask.define('IBV_CREATE_IND_TABLE_RESERVED', 1)
@c.record
class struct_ibv_rwq_ind_table_init_attr(c.Struct):
SIZE = 24
log_ind_tbl_size: Annotated[uint32_t, 0]
ind_tbl: Annotated[c.POINTER[c.POINTER[struct_ibv_wq]], 8]
comp_mask: Annotated[uint32_t, 16]
@c.record
class struct_ibv_qp_cap(c.Struct):
SIZE = 20
max_send_wr: Annotated[uint32_t, 0]
max_recv_wr: Annotated[uint32_t, 4]
max_send_sge: Annotated[uint32_t, 8]
max_recv_sge: Annotated[uint32_t, 12]
max_inline_data: Annotated[uint32_t, 16]
@c.record
class struct_ibv_qp_init_attr(c.Struct):
SIZE = 64
qp_context: Annotated[ctypes.c_void_p, 0]
send_cq: Annotated[c.POINTER[struct_ibv_cq], 8]
recv_cq: Annotated[c.POINTER[struct_ibv_cq], 16]
srq: Annotated[c.POINTER[struct_ibv_srq], 24]
cap: Annotated[struct_ibv_qp_cap, 32]
qp_type: Annotated[enum_ibv_qp_type, 52]
sq_sig_all: Annotated[Annotated[int, ctypes.c_int32], 56]
class enum_ibv_qp_init_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_QP_INIT_ATTR_PD = enum_ibv_qp_init_attr_mask.define('IBV_QP_INIT_ATTR_PD', 1)
IBV_QP_INIT_ATTR_XRCD = enum_ibv_qp_init_attr_mask.define('IBV_QP_INIT_ATTR_XRCD', 2)
IBV_QP_INIT_ATTR_CREATE_FLAGS = enum_ibv_qp_init_attr_mask.define('IBV_QP_INIT_ATTR_CREATE_FLAGS', 4)
IBV_QP_INIT_ATTR_MAX_TSO_HEADER = enum_ibv_qp_init_attr_mask.define('IBV_QP_INIT_ATTR_MAX_TSO_HEADER', 8)
IBV_QP_INIT_ATTR_IND_TABLE = enum_ibv_qp_init_attr_mask.define('IBV_QP_INIT_ATTR_IND_TABLE', 16)
IBV_QP_INIT_ATTR_RX_HASH = enum_ibv_qp_init_attr_mask.define('IBV_QP_INIT_ATTR_RX_HASH', 32)
IBV_QP_INIT_ATTR_SEND_OPS_FLAGS = enum_ibv_qp_init_attr_mask.define('IBV_QP_INIT_ATTR_SEND_OPS_FLAGS', 64)
class enum_ibv_qp_create_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_QP_CREATE_BLOCK_SELF_MCAST_LB = enum_ibv_qp_create_flags.define('IBV_QP_CREATE_BLOCK_SELF_MCAST_LB', 2)
IBV_QP_CREATE_SCATTER_FCS = enum_ibv_qp_create_flags.define('IBV_QP_CREATE_SCATTER_FCS', 256)
IBV_QP_CREATE_CVLAN_STRIPPING = enum_ibv_qp_create_flags.define('IBV_QP_CREATE_CVLAN_STRIPPING', 512)
IBV_QP_CREATE_SOURCE_QPN = enum_ibv_qp_create_flags.define('IBV_QP_CREATE_SOURCE_QPN', 1024)
IBV_QP_CREATE_PCI_WRITE_END_PADDING = enum_ibv_qp_create_flags.define('IBV_QP_CREATE_PCI_WRITE_END_PADDING', 2048)
class enum_ibv_qp_create_send_ops_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_QP_EX_WITH_RDMA_WRITE = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_RDMA_WRITE', 1)
IBV_QP_EX_WITH_RDMA_WRITE_WITH_IMM = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_RDMA_WRITE_WITH_IMM', 2)
IBV_QP_EX_WITH_SEND = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_SEND', 4)
IBV_QP_EX_WITH_SEND_WITH_IMM = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_SEND_WITH_IMM', 8)
IBV_QP_EX_WITH_RDMA_READ = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_RDMA_READ', 16)
IBV_QP_EX_WITH_ATOMIC_CMP_AND_SWP = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_ATOMIC_CMP_AND_SWP', 32)
IBV_QP_EX_WITH_ATOMIC_FETCH_AND_ADD = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_ATOMIC_FETCH_AND_ADD', 64)
IBV_QP_EX_WITH_LOCAL_INV = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_LOCAL_INV', 128)
IBV_QP_EX_WITH_BIND_MW = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_BIND_MW', 256)
IBV_QP_EX_WITH_SEND_WITH_INV = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_SEND_WITH_INV', 512)
IBV_QP_EX_WITH_TSO = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_TSO', 1024)
IBV_QP_EX_WITH_FLUSH = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_FLUSH', 2048)
IBV_QP_EX_WITH_ATOMIC_WRITE = enum_ibv_qp_create_send_ops_flags.define('IBV_QP_EX_WITH_ATOMIC_WRITE', 4096)
@c.record
class struct_ibv_rx_hash_conf(c.Struct):
SIZE = 24
rx_hash_function: Annotated[uint8_t, 0]
rx_hash_key_len: Annotated[uint8_t, 1]
rx_hash_key: Annotated[c.POINTER[uint8_t], 8]
rx_hash_fields_mask: Annotated[uint64_t, 16]
@c.record
class struct_ibv_qp_init_attr_ex(c.Struct):
SIZE = 136
qp_context: Annotated[ctypes.c_void_p, 0]
send_cq: Annotated[c.POINTER[struct_ibv_cq], 8]
recv_cq: Annotated[c.POINTER[struct_ibv_cq], 16]
srq: Annotated[c.POINTER[struct_ibv_srq], 24]
cap: Annotated[struct_ibv_qp_cap, 32]
qp_type: Annotated[enum_ibv_qp_type, 52]
sq_sig_all: Annotated[Annotated[int, ctypes.c_int32], 56]
comp_mask: Annotated[uint32_t, 60]
pd: Annotated[c.POINTER[struct_ibv_pd], 64]
xrcd: Annotated[c.POINTER[struct_ibv_xrcd], 72]
create_flags: Annotated[uint32_t, 80]
max_tso_header: Annotated[uint16_t, 84]
rwq_ind_tbl: Annotated[c.POINTER[struct_ibv_rwq_ind_table], 88]
rx_hash_conf: Annotated[struct_ibv_rx_hash_conf, 96]
source_qpn: Annotated[uint32_t, 120]
send_ops_flags: Annotated[uint64_t, 128]
class enum_ibv_qp_open_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_QP_OPEN_ATTR_NUM = enum_ibv_qp_open_attr_mask.define('IBV_QP_OPEN_ATTR_NUM', 1)
IBV_QP_OPEN_ATTR_XRCD = enum_ibv_qp_open_attr_mask.define('IBV_QP_OPEN_ATTR_XRCD', 2)
IBV_QP_OPEN_ATTR_CONTEXT = enum_ibv_qp_open_attr_mask.define('IBV_QP_OPEN_ATTR_CONTEXT', 4)
IBV_QP_OPEN_ATTR_TYPE = enum_ibv_qp_open_attr_mask.define('IBV_QP_OPEN_ATTR_TYPE', 8)
IBV_QP_OPEN_ATTR_RESERVED = enum_ibv_qp_open_attr_mask.define('IBV_QP_OPEN_ATTR_RESERVED', 16)
@c.record
class struct_ibv_qp_open_attr(c.Struct):
SIZE = 32
comp_mask: Annotated[uint32_t, 0]
qp_num: Annotated[uint32_t, 4]
xrcd: Annotated[c.POINTER[struct_ibv_xrcd], 8]
qp_context: Annotated[ctypes.c_void_p, 16]
qp_type: Annotated[enum_ibv_qp_type, 24]
class enum_ibv_qp_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_QP_STATE = enum_ibv_qp_attr_mask.define('IBV_QP_STATE', 1)
IBV_QP_CUR_STATE = enum_ibv_qp_attr_mask.define('IBV_QP_CUR_STATE', 2)
IBV_QP_EN_SQD_ASYNC_NOTIFY = enum_ibv_qp_attr_mask.define('IBV_QP_EN_SQD_ASYNC_NOTIFY', 4)
IBV_QP_ACCESS_FLAGS = enum_ibv_qp_attr_mask.define('IBV_QP_ACCESS_FLAGS', 8)
IBV_QP_PKEY_INDEX = enum_ibv_qp_attr_mask.define('IBV_QP_PKEY_INDEX', 16)
IBV_QP_PORT = enum_ibv_qp_attr_mask.define('IBV_QP_PORT', 32)
IBV_QP_QKEY = enum_ibv_qp_attr_mask.define('IBV_QP_QKEY', 64)
IBV_QP_AV = enum_ibv_qp_attr_mask.define('IBV_QP_AV', 128)
IBV_QP_PATH_MTU = enum_ibv_qp_attr_mask.define('IBV_QP_PATH_MTU', 256)
IBV_QP_TIMEOUT = enum_ibv_qp_attr_mask.define('IBV_QP_TIMEOUT', 512)
IBV_QP_RETRY_CNT = enum_ibv_qp_attr_mask.define('IBV_QP_RETRY_CNT', 1024)
IBV_QP_RNR_RETRY = enum_ibv_qp_attr_mask.define('IBV_QP_RNR_RETRY', 2048)
IBV_QP_RQ_PSN = enum_ibv_qp_attr_mask.define('IBV_QP_RQ_PSN', 4096)
IBV_QP_MAX_QP_RD_ATOMIC = enum_ibv_qp_attr_mask.define('IBV_QP_MAX_QP_RD_ATOMIC', 8192)
IBV_QP_ALT_PATH = enum_ibv_qp_attr_mask.define('IBV_QP_ALT_PATH', 16384)
IBV_QP_MIN_RNR_TIMER = enum_ibv_qp_attr_mask.define('IBV_QP_MIN_RNR_TIMER', 32768)
IBV_QP_SQ_PSN = enum_ibv_qp_attr_mask.define('IBV_QP_SQ_PSN', 65536)
IBV_QP_MAX_DEST_RD_ATOMIC = enum_ibv_qp_attr_mask.define('IBV_QP_MAX_DEST_RD_ATOMIC', 131072)
IBV_QP_PATH_MIG_STATE = enum_ibv_qp_attr_mask.define('IBV_QP_PATH_MIG_STATE', 262144)
IBV_QP_CAP = enum_ibv_qp_attr_mask.define('IBV_QP_CAP', 524288)
IBV_QP_DEST_QPN = enum_ibv_qp_attr_mask.define('IBV_QP_DEST_QPN', 1048576)
IBV_QP_RATE_LIMIT = enum_ibv_qp_attr_mask.define('IBV_QP_RATE_LIMIT', 33554432)
class enum_ibv_query_qp_data_in_order_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_QUERY_QP_DATA_IN_ORDER_RETURN_CAPS = enum_ibv_query_qp_data_in_order_flags.define('IBV_QUERY_QP_DATA_IN_ORDER_RETURN_CAPS', 1)
class enum_ibv_query_qp_data_in_order_caps(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_QUERY_QP_DATA_IN_ORDER_WHOLE_MSG = enum_ibv_query_qp_data_in_order_caps.define('IBV_QUERY_QP_DATA_IN_ORDER_WHOLE_MSG', 1)
IBV_QUERY_QP_DATA_IN_ORDER_ALIGNED_128_BYTES = enum_ibv_query_qp_data_in_order_caps.define('IBV_QUERY_QP_DATA_IN_ORDER_ALIGNED_128_BYTES', 2)
class enum_ibv_mig_state(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_MIG_MIGRATED = enum_ibv_mig_state.define('IBV_MIG_MIGRATED', 0)
IBV_MIG_REARM = enum_ibv_mig_state.define('IBV_MIG_REARM', 1)
IBV_MIG_ARMED = enum_ibv_mig_state.define('IBV_MIG_ARMED', 2)
@c.record
class struct_ibv_qp_attr(c.Struct):
SIZE = 144
qp_state: Annotated[enum_ibv_qp_state, 0]
cur_qp_state: Annotated[enum_ibv_qp_state, 4]
path_mtu: Annotated[enum_ibv_mtu, 8]
path_mig_state: Annotated[enum_ibv_mig_state, 12]
qkey: Annotated[uint32_t, 16]
rq_psn: Annotated[uint32_t, 20]
sq_psn: Annotated[uint32_t, 24]
dest_qp_num: Annotated[uint32_t, 28]
qp_access_flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
cap: Annotated[struct_ibv_qp_cap, 36]
ah_attr: Annotated[struct_ibv_ah_attr, 56]
alt_ah_attr: Annotated[struct_ibv_ah_attr, 88]
pkey_index: Annotated[uint16_t, 120]
alt_pkey_index: Annotated[uint16_t, 122]
en_sqd_async_notify: Annotated[uint8_t, 124]
sq_draining: Annotated[uint8_t, 125]
max_rd_atomic: Annotated[uint8_t, 126]
max_dest_rd_atomic: Annotated[uint8_t, 127]
min_rnr_timer: Annotated[uint8_t, 128]
port_num: Annotated[uint8_t, 129]
timeout: Annotated[uint8_t, 130]
retry_cnt: Annotated[uint8_t, 131]
rnr_retry: Annotated[uint8_t, 132]
alt_port_num: Annotated[uint8_t, 133]
alt_timeout: Annotated[uint8_t, 134]
rate_limit: Annotated[uint32_t, 136]
@c.record
class struct_ibv_qp_rate_limit_attr(c.Struct):
SIZE = 16
rate_limit: Annotated[uint32_t, 0]
max_burst_sz: Annotated[uint32_t, 4]
typical_pkt_sz: Annotated[uint16_t, 8]
comp_mask: Annotated[uint32_t, 12]
@dll.bind
def ibv_wr_opcode_str(opcode:enum_ibv_wr_opcode) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
class enum_ibv_send_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_SEND_FENCE = enum_ibv_send_flags.define('IBV_SEND_FENCE', 1)
IBV_SEND_SIGNALED = enum_ibv_send_flags.define('IBV_SEND_SIGNALED', 2)
IBV_SEND_SOLICITED = enum_ibv_send_flags.define('IBV_SEND_SOLICITED', 4)
IBV_SEND_INLINE = enum_ibv_send_flags.define('IBV_SEND_INLINE', 8)
IBV_SEND_IP_CSUM = enum_ibv_send_flags.define('IBV_SEND_IP_CSUM', 16)
class enum_ibv_placement_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_FLUSH_GLOBAL = enum_ibv_placement_type.define('IBV_FLUSH_GLOBAL', 1)
IBV_FLUSH_PERSISTENT = enum_ibv_placement_type.define('IBV_FLUSH_PERSISTENT', 2)
class enum_ibv_selectivity_level(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_FLUSH_RANGE = enum_ibv_selectivity_level.define('IBV_FLUSH_RANGE', 0)
IBV_FLUSH_MR = enum_ibv_selectivity_level.define('IBV_FLUSH_MR', 1)
@c.record
class struct_ibv_data_buf(c.Struct):
SIZE = 16
addr: Annotated[ctypes.c_void_p, 0]
length: Annotated[size_t, 8]
class enum_ibv_ops_wr_opcode(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_WR_TAG_ADD = enum_ibv_ops_wr_opcode.define('IBV_WR_TAG_ADD', 0)
IBV_WR_TAG_DEL = enum_ibv_ops_wr_opcode.define('IBV_WR_TAG_DEL', 1)
IBV_WR_TAG_SYNC = enum_ibv_ops_wr_opcode.define('IBV_WR_TAG_SYNC', 2)
class enum_ibv_ops_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_OPS_SIGNALED = enum_ibv_ops_flags.define('IBV_OPS_SIGNALED', 1)
IBV_OPS_TM_SYNC = enum_ibv_ops_flags.define('IBV_OPS_TM_SYNC', 2)
@c.record
class struct_ibv_ops_wr(c.Struct):
SIZE = 72
wr_id: Annotated[uint64_t, 0]
next: Annotated[c.POINTER[struct_ibv_ops_wr], 8]
opcode: Annotated[enum_ibv_ops_wr_opcode, 16]
flags: Annotated[Annotated[int, ctypes.c_int32], 20]
tm: Annotated[struct_ibv_ops_wr_tm, 24]
@c.record
class struct_ibv_ops_wr_tm(c.Struct):
SIZE = 48
unexpected_cnt: Annotated[uint32_t, 0]
handle: Annotated[uint32_t, 4]
add: Annotated[struct_ibv_ops_wr_tm_add, 8]
@c.record
class struct_ibv_ops_wr_tm_add(c.Struct):
SIZE = 40
recv_wr_id: Annotated[uint64_t, 0]
sg_list: Annotated[c.POINTER[struct_ibv_sge], 8]
num_sge: Annotated[Annotated[int, ctypes.c_int32], 16]
tag: Annotated[uint64_t, 24]
mask: Annotated[uint64_t, 32]
@c.record
class struct_ibv_qp_ex(c.Struct):
SIZE = 360
qp_base: Annotated[struct_ibv_qp, 0]
comp_mask: Annotated[uint64_t, 160]
wr_id: Annotated[uint64_t, 168]
wr_flags: Annotated[Annotated[int, ctypes.c_uint32], 176]
wr_atomic_cmp_swp: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t, uint64_t, uint64_t, uint64_t]], 184]
wr_atomic_fetch_add: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t, uint64_t, uint64_t]], 192]
wr_bind_mw: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], c.POINTER[struct_ibv_mw], uint32_t, c.POINTER[struct_ibv_mw_bind_info]]], 200]
wr_local_inv: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t]], 208]
wr_rdma_read: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t, uint64_t]], 216]
wr_rdma_write: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t, uint64_t]], 224]
wr_rdma_write_imm: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t, uint64_t, Annotated[int, ctypes.c_uint32]]], 232]
wr_send: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex]]], 240]
wr_send_imm: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], Annotated[int, ctypes.c_uint32]]], 248]
wr_send_inv: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t]], 256]
wr_send_tso: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], ctypes.c_void_p, uint16_t, uint16_t]], 264]
wr_set_ud_addr: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], c.POINTER[struct_ibv_ah], uint32_t, uint32_t]], 272]
wr_set_xrc_srqn: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t]], 280]
wr_set_inline_data: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], ctypes.c_void_p, size_t]], 288]
wr_set_inline_data_list: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], size_t, c.POINTER[struct_ibv_data_buf]]], 296]
wr_set_sge: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t, uint64_t, uint32_t]], 304]
wr_set_sge_list: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], size_t, c.POINTER[struct_ibv_sge]]], 312]
wr_start: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex]]], 320]
wr_complete: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_qp_ex]]], 328]
wr_abort: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex]]], 336]
wr_atomic_write: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t, uint64_t, ctypes.c_void_p]], 344]
wr_flush: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_qp_ex], uint32_t, uint64_t, size_t, uint8_t, uint8_t]], 352]
@dll.bind
def ibv_qp_to_qp_ex(qp:c.POINTER[struct_ibv_qp]) -> c.POINTER[struct_ibv_qp_ex]: ...
@c.record
class struct_ibv_ece(c.Struct):
SIZE = 12
vendor_id: Annotated[uint32_t, 0]
options: Annotated[uint32_t, 4]
comp_mask: Annotated[uint32_t, 8]
@c.record
class struct_ibv_poll_cq_attr(c.Struct):
SIZE = 4
comp_mask: Annotated[uint32_t, 0]
@c.record
class struct_ibv_wc_tm_info(c.Struct):
SIZE = 16
tag: Annotated[uint64_t, 0]
priv: Annotated[uint32_t, 8]
@c.record
class struct_ibv_cq_ex(c.Struct):
SIZE = 288
context: Annotated[c.POINTER[struct_ibv_context], 0]
channel: Annotated[c.POINTER[struct_ibv_comp_channel], 8]
cq_context: Annotated[ctypes.c_void_p, 16]
handle: Annotated[uint32_t, 24]
cqe: Annotated[Annotated[int, ctypes.c_int32], 28]
mutex: Annotated[pthread_mutex_t, 32]
cond: Annotated[pthread_cond_t, 72]
comp_events_completed: Annotated[uint32_t, 120]
async_events_completed: Annotated[uint32_t, 124]
comp_mask: Annotated[uint32_t, 128]
status: Annotated[enum_ibv_wc_status, 132]
wr_id: Annotated[uint64_t, 136]
start_poll: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_cq_ex], c.POINTER[struct_ibv_poll_cq_attr]]], 144]
next_poll: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_cq_ex]]], 152]
end_poll: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_cq_ex]]], 160]
read_opcode: Annotated[c.CFUNCTYPE[enum_ibv_wc_opcode, [c.POINTER[struct_ibv_cq_ex]]], 168]
read_vendor_err: Annotated[c.CFUNCTYPE[uint32_t, [c.POINTER[struct_ibv_cq_ex]]], 176]
read_byte_len: Annotated[c.CFUNCTYPE[uint32_t, [c.POINTER[struct_ibv_cq_ex]]], 184]
read_imm_data: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_uint32], [c.POINTER[struct_ibv_cq_ex]]], 192]
read_qp_num: Annotated[c.CFUNCTYPE[uint32_t, [c.POINTER[struct_ibv_cq_ex]]], 200]
read_src_qp: Annotated[c.CFUNCTYPE[uint32_t, [c.POINTER[struct_ibv_cq_ex]]], 208]
read_wc_flags: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_uint32], [c.POINTER[struct_ibv_cq_ex]]], 216]
read_slid: Annotated[c.CFUNCTYPE[uint32_t, [c.POINTER[struct_ibv_cq_ex]]], 224]
read_sl: Annotated[c.CFUNCTYPE[uint8_t, [c.POINTER[struct_ibv_cq_ex]]], 232]
read_dlid_path_bits: Annotated[c.CFUNCTYPE[uint8_t, [c.POINTER[struct_ibv_cq_ex]]], 240]
read_completion_ts: Annotated[c.CFUNCTYPE[uint64_t, [c.POINTER[struct_ibv_cq_ex]]], 248]
read_cvlan: Annotated[c.CFUNCTYPE[uint16_t, [c.POINTER[struct_ibv_cq_ex]]], 256]
read_flow_tag: Annotated[c.CFUNCTYPE[uint32_t, [c.POINTER[struct_ibv_cq_ex]]], 264]
read_tm_info: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_cq_ex], c.POINTER[struct_ibv_wc_tm_info]]], 272]
read_completion_wallclock_ns: Annotated[c.CFUNCTYPE[uint64_t, [c.POINTER[struct_ibv_cq_ex]]], 280]
class enum_ibv_cq_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_CQ_ATTR_MODERATE = enum_ibv_cq_attr_mask.define('IBV_CQ_ATTR_MODERATE', 1)
IBV_CQ_ATTR_RESERVED = enum_ibv_cq_attr_mask.define('IBV_CQ_ATTR_RESERVED', 2)
@c.record
class struct_ibv_moderate_cq(c.Struct):
SIZE = 4
cq_count: Annotated[uint16_t, 0]
cq_period: Annotated[uint16_t, 2]
@c.record
class struct_ibv_modify_cq_attr(c.Struct):
SIZE = 8
attr_mask: Annotated[uint32_t, 0]
moderate: Annotated[struct_ibv_moderate_cq, 4]
class enum_ibv_flow_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_FLOW_ATTR_FLAGS_DONT_TRAP = enum_ibv_flow_flags.define('IBV_FLOW_ATTR_FLAGS_DONT_TRAP', 2)
IBV_FLOW_ATTR_FLAGS_EGRESS = enum_ibv_flow_flags.define('IBV_FLOW_ATTR_FLAGS_EGRESS', 4)
class enum_ibv_flow_attr_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_FLOW_ATTR_NORMAL = enum_ibv_flow_attr_type.define('IBV_FLOW_ATTR_NORMAL', 0)
IBV_FLOW_ATTR_ALL_DEFAULT = enum_ibv_flow_attr_type.define('IBV_FLOW_ATTR_ALL_DEFAULT', 1)
IBV_FLOW_ATTR_MC_DEFAULT = enum_ibv_flow_attr_type.define('IBV_FLOW_ATTR_MC_DEFAULT', 2)
IBV_FLOW_ATTR_SNIFFER = enum_ibv_flow_attr_type.define('IBV_FLOW_ATTR_SNIFFER', 3)
class enum_ibv_flow_spec_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_FLOW_SPEC_ETH = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_ETH', 32)
IBV_FLOW_SPEC_IPV4 = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_IPV4', 48)
IBV_FLOW_SPEC_IPV6 = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_IPV6', 49)
IBV_FLOW_SPEC_IPV4_EXT = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_IPV4_EXT', 50)
IBV_FLOW_SPEC_ESP = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_ESP', 52)
IBV_FLOW_SPEC_TCP = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_TCP', 64)
IBV_FLOW_SPEC_UDP = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_UDP', 65)
IBV_FLOW_SPEC_VXLAN_TUNNEL = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_VXLAN_TUNNEL', 80)
IBV_FLOW_SPEC_GRE = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_GRE', 81)
IBV_FLOW_SPEC_MPLS = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_MPLS', 96)
IBV_FLOW_SPEC_INNER = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_INNER', 256)
IBV_FLOW_SPEC_ACTION_TAG = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_ACTION_TAG', 4096)
IBV_FLOW_SPEC_ACTION_DROP = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_ACTION_DROP', 4097)
IBV_FLOW_SPEC_ACTION_HANDLE = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_ACTION_HANDLE', 4098)
IBV_FLOW_SPEC_ACTION_COUNT = enum_ibv_flow_spec_type.define('IBV_FLOW_SPEC_ACTION_COUNT', 4099)
@c.record
class struct_ibv_flow_eth_filter(c.Struct):
SIZE = 16
dst_mac: Annotated[c.Array[uint8_t, Literal[6]], 0]
src_mac: Annotated[c.Array[uint8_t, Literal[6]], 6]
ether_type: Annotated[uint16_t, 12]
vlan_tag: Annotated[uint16_t, 14]
@c.record
class struct_ibv_flow_spec_eth(c.Struct):
SIZE = 40
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
val: Annotated[struct_ibv_flow_eth_filter, 6]
mask: Annotated[struct_ibv_flow_eth_filter, 22]
@c.record
class struct_ibv_flow_ipv4_filter(c.Struct):
SIZE = 8
src_ip: Annotated[uint32_t, 0]
dst_ip: Annotated[uint32_t, 4]
@c.record
class struct_ibv_flow_spec_ipv4(c.Struct):
SIZE = 24
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
val: Annotated[struct_ibv_flow_ipv4_filter, 8]
mask: Annotated[struct_ibv_flow_ipv4_filter, 16]
@c.record
class struct_ibv_flow_ipv4_ext_filter(c.Struct):
SIZE = 12
src_ip: Annotated[uint32_t, 0]
dst_ip: Annotated[uint32_t, 4]
proto: Annotated[uint8_t, 8]
tos: Annotated[uint8_t, 9]
ttl: Annotated[uint8_t, 10]
flags: Annotated[uint8_t, 11]
@c.record
class struct_ibv_flow_spec_ipv4_ext(c.Struct):
SIZE = 32
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
val: Annotated[struct_ibv_flow_ipv4_ext_filter, 8]
mask: Annotated[struct_ibv_flow_ipv4_ext_filter, 20]
@c.record
class struct_ibv_flow_ipv6_filter(c.Struct):
SIZE = 40
src_ip: Annotated[c.Array[uint8_t, Literal[16]], 0]
dst_ip: Annotated[c.Array[uint8_t, Literal[16]], 16]
flow_label: Annotated[uint32_t, 32]
next_hdr: Annotated[uint8_t, 36]
traffic_class: Annotated[uint8_t, 37]
hop_limit: Annotated[uint8_t, 38]
@c.record
class struct_ibv_flow_spec_ipv6(c.Struct):
SIZE = 88
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
val: Annotated[struct_ibv_flow_ipv6_filter, 8]
mask: Annotated[struct_ibv_flow_ipv6_filter, 48]
@c.record
class struct_ibv_flow_esp_filter(c.Struct):
SIZE = 8
spi: Annotated[uint32_t, 0]
seq: Annotated[uint32_t, 4]
@c.record
class struct_ibv_flow_spec_esp(c.Struct):
SIZE = 24
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
val: Annotated[struct_ibv_flow_esp_filter, 8]
mask: Annotated[struct_ibv_flow_esp_filter, 16]
@c.record
class struct_ibv_flow_tcp_udp_filter(c.Struct):
SIZE = 4
dst_port: Annotated[uint16_t, 0]
src_port: Annotated[uint16_t, 2]
@c.record
class struct_ibv_flow_spec_tcp_udp(c.Struct):
SIZE = 16
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
val: Annotated[struct_ibv_flow_tcp_udp_filter, 6]
mask: Annotated[struct_ibv_flow_tcp_udp_filter, 10]
@c.record
class struct_ibv_flow_gre_filter(c.Struct):
SIZE = 8
c_ks_res0_ver: Annotated[uint16_t, 0]
protocol: Annotated[uint16_t, 2]
key: Annotated[uint32_t, 4]
@c.record
class struct_ibv_flow_spec_gre(c.Struct):
SIZE = 24
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
val: Annotated[struct_ibv_flow_gre_filter, 8]
mask: Annotated[struct_ibv_flow_gre_filter, 16]
@c.record
class struct_ibv_flow_mpls_filter(c.Struct):
SIZE = 4
label: Annotated[uint32_t, 0]
@c.record
class struct_ibv_flow_spec_mpls(c.Struct):
SIZE = 16
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
val: Annotated[struct_ibv_flow_mpls_filter, 8]
mask: Annotated[struct_ibv_flow_mpls_filter, 12]
@c.record
class struct_ibv_flow_tunnel_filter(c.Struct):
SIZE = 4
tunnel_id: Annotated[uint32_t, 0]
@c.record
class struct_ibv_flow_spec_tunnel(c.Struct):
SIZE = 16
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
val: Annotated[struct_ibv_flow_tunnel_filter, 8]
mask: Annotated[struct_ibv_flow_tunnel_filter, 12]
@c.record
class struct_ibv_flow_spec_action_tag(c.Struct):
SIZE = 12
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
tag_id: Annotated[uint32_t, 8]
@c.record
class struct_ibv_flow_spec_action_drop(c.Struct):
SIZE = 8
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
@c.record
class struct_ibv_flow_spec_action_handle(c.Struct):
SIZE = 16
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
action: Annotated[c.POINTER[struct_ibv_flow_action], 8]
@c.record
class struct_ibv_flow_action(c.Struct):
SIZE = 8
context: Annotated[c.POINTER[struct_ibv_context], 0]
@c.record
class struct_ibv_flow_spec_counter_action(c.Struct):
SIZE = 16
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
counters: Annotated[c.POINTER[struct_ibv_counters], 8]
@c.record
class struct_ibv_counters(c.Struct):
SIZE = 8
context: Annotated[c.POINTER[struct_ibv_context], 0]
@c.record
class struct_ibv_flow_spec(c.Struct):
SIZE = 88
hdr: Annotated[struct_ibv_flow_spec_hdr, 0]
eth: Annotated[struct_ibv_flow_spec_eth, 0]
ipv4: Annotated[struct_ibv_flow_spec_ipv4, 0]
tcp_udp: Annotated[struct_ibv_flow_spec_tcp_udp, 0]
ipv4_ext: Annotated[struct_ibv_flow_spec_ipv4_ext, 0]
ipv6: Annotated[struct_ibv_flow_spec_ipv6, 0]
esp: Annotated[struct_ibv_flow_spec_esp, 0]
tunnel: Annotated[struct_ibv_flow_spec_tunnel, 0]
gre: Annotated[struct_ibv_flow_spec_gre, 0]
mpls: Annotated[struct_ibv_flow_spec_mpls, 0]
flow_tag: Annotated[struct_ibv_flow_spec_action_tag, 0]
drop: Annotated[struct_ibv_flow_spec_action_drop, 0]
handle: Annotated[struct_ibv_flow_spec_action_handle, 0]
flow_count: Annotated[struct_ibv_flow_spec_counter_action, 0]
@c.record
class struct_ibv_flow_spec_hdr(c.Struct):
SIZE = 8
type: Annotated[enum_ibv_flow_spec_type, 0]
size: Annotated[uint16_t, 4]
@c.record
class struct_ibv_flow_attr(c.Struct):
SIZE = 20
comp_mask: Annotated[uint32_t, 0]
type: Annotated[enum_ibv_flow_attr_type, 4]
size: Annotated[uint16_t, 8]
priority: Annotated[uint16_t, 10]
num_of_specs: Annotated[uint8_t, 12]
port: Annotated[uint8_t, 13]
flags: Annotated[uint32_t, 16]
@c.record
class struct_ibv_flow(c.Struct):
SIZE = 24
comp_mask: Annotated[uint32_t, 0]
context: Annotated[c.POINTER[struct_ibv_context], 8]
handle: Annotated[uint32_t, 16]
class enum_ibv_flow_action_esp_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_FLOW_ACTION_ESP_MASK_ESN = enum_ibv_flow_action_esp_mask.define('IBV_FLOW_ACTION_ESP_MASK_ESN', 1)
@c.record
class struct_ibv_flow_action_esp_attr(c.Struct):
SIZE = 56
esp_attr: Annotated[c.POINTER[struct_ib_uverbs_flow_action_esp], 0]
keymat_proto: Annotated[enum_ib_uverbs_flow_action_esp_keymat, 8]
keymat_len: Annotated[uint16_t, 12]
keymat_ptr: Annotated[ctypes.c_void_p, 16]
replay_proto: Annotated[enum_ib_uverbs_flow_action_esp_replay, 24]
replay_len: Annotated[uint16_t, 28]
replay_ptr: Annotated[ctypes.c_void_p, 32]
esp_encap: Annotated[c.POINTER[struct_ib_uverbs_flow_action_esp_encap], 40]
comp_mask: Annotated[uint32_t, 48]
esn: Annotated[uint32_t, 52]
@c.record
class struct_ib_uverbs_flow_action_esp(c.Struct):
SIZE = 24
spi: Annotated[Annotated[int, ctypes.c_uint32], 0]
seq: Annotated[Annotated[int, ctypes.c_uint32], 4]
tfc_pad: Annotated[Annotated[int, ctypes.c_uint32], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 12]
hard_limit_pkts: Annotated[Annotated[int, ctypes.c_uint64], 16]
__u32: TypeAlias = Annotated[int, ctypes.c_uint32]
__u64: TypeAlias = Annotated[int, ctypes.c_uint64]
class enum_ib_uverbs_flow_action_esp_keymat(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM = enum_ib_uverbs_flow_action_esp_keymat.define('IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM', 0)
class enum_ib_uverbs_flow_action_esp_replay(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE = enum_ib_uverbs_flow_action_esp_replay.define('IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE', 0)
IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP = enum_ib_uverbs_flow_action_esp_replay.define('IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP', 1)
@c.record
class struct_ib_uverbs_flow_action_esp_encap(c.Struct):
SIZE = 24
val_ptr: Annotated[ctypes.c_void_p, 0]
val_ptr_data_u64: Annotated[Annotated[int, ctypes.c_uint64], 0]
next_ptr: Annotated[c.POINTER[struct_ib_uverbs_flow_action_esp_encap], 8]
next_ptr_data_u64: Annotated[Annotated[int, ctypes.c_uint64], 8]
len: Annotated[Annotated[int, ctypes.c_uint16], 16]
type: Annotated[Annotated[int, ctypes.c_uint16], 18]
__u16: TypeAlias = Annotated[int, ctypes.c_uint16]
class _anonenum4(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_SYSFS_NAME_MAX = _anonenum4.define('IBV_SYSFS_NAME_MAX', 64)
IBV_SYSFS_PATH_MAX = _anonenum4.define('IBV_SYSFS_PATH_MAX', 256)
class enum_ibv_cq_init_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_CQ_INIT_ATTR_MASK_FLAGS = enum_ibv_cq_init_attr_mask.define('IBV_CQ_INIT_ATTR_MASK_FLAGS', 1)
IBV_CQ_INIT_ATTR_MASK_PD = enum_ibv_cq_init_attr_mask.define('IBV_CQ_INIT_ATTR_MASK_PD', 2)
class enum_ibv_create_cq_attr_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_CREATE_CQ_ATTR_SINGLE_THREADED = enum_ibv_create_cq_attr_flags.define('IBV_CREATE_CQ_ATTR_SINGLE_THREADED', 1)
IBV_CREATE_CQ_ATTR_IGNORE_OVERRUN = enum_ibv_create_cq_attr_flags.define('IBV_CREATE_CQ_ATTR_IGNORE_OVERRUN', 2)
@c.record
class struct_ibv_cq_init_attr_ex(c.Struct):
SIZE = 56
cqe: Annotated[uint32_t, 0]
cq_context: Annotated[ctypes.c_void_p, 8]
channel: Annotated[c.POINTER[struct_ibv_comp_channel], 16]
comp_vector: Annotated[uint32_t, 24]
wc_flags: Annotated[uint64_t, 32]
comp_mask: Annotated[uint32_t, 40]
flags: Annotated[uint32_t, 44]
parent_domain: Annotated[c.POINTER[struct_ibv_pd], 48]
class enum_ibv_parent_domain_init_attr_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_PARENT_DOMAIN_INIT_ATTR_ALLOCATORS = enum_ibv_parent_domain_init_attr_mask.define('IBV_PARENT_DOMAIN_INIT_ATTR_ALLOCATORS', 1)
IBV_PARENT_DOMAIN_INIT_ATTR_PD_CONTEXT = enum_ibv_parent_domain_init_attr_mask.define('IBV_PARENT_DOMAIN_INIT_ATTR_PD_CONTEXT', 2)
@c.record
class struct_ibv_parent_domain_init_attr(c.Struct):
SIZE = 48
pd: Annotated[c.POINTER[struct_ibv_pd], 0]
td: Annotated[c.POINTER[struct_ibv_td], 8]
comp_mask: Annotated[uint32_t, 16]
alloc: Annotated[c.CFUNCTYPE[ctypes.c_void_p, [c.POINTER[struct_ibv_pd], ctypes.c_void_p, size_t, size_t, uint64_t]], 24]
free: Annotated[c.CFUNCTYPE[None, [c.POINTER[struct_ibv_pd], ctypes.c_void_p, ctypes.c_void_p, uint64_t]], 32]
pd_context: Annotated[ctypes.c_void_p, 40]
@c.record
class struct_ibv_counters_init_attr(c.Struct):
SIZE = 4
comp_mask: Annotated[uint32_t, 0]
class enum_ibv_counter_description(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_COUNTER_PACKETS = enum_ibv_counter_description.define('IBV_COUNTER_PACKETS', 0)
IBV_COUNTER_BYTES = enum_ibv_counter_description.define('IBV_COUNTER_BYTES', 1)
@c.record
class struct_ibv_counter_attach_attr(c.Struct):
SIZE = 12
counter_desc: Annotated[enum_ibv_counter_description, 0]
index: Annotated[uint32_t, 4]
comp_mask: Annotated[uint32_t, 8]
class enum_ibv_read_counters_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_READ_COUNTERS_ATTR_PREFER_CACHED = enum_ibv_read_counters_flags.define('IBV_READ_COUNTERS_ATTR_PREFER_CACHED', 1)
class enum_ibv_values_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IBV_VALUES_MASK_RAW_CLOCK = enum_ibv_values_mask.define('IBV_VALUES_MASK_RAW_CLOCK', 1)
IBV_VALUES_MASK_RESERVED = enum_ibv_values_mask.define('IBV_VALUES_MASK_RESERVED', 2)
@c.record
class struct_ibv_values_ex(c.Struct):
SIZE = 24
comp_mask: Annotated[uint32_t, 0]
raw_clock: Annotated[struct_timespec, 8]
@c.record
class struct_timespec(c.Struct):
SIZE = 16
tv_sec: Annotated[Annotated[int, ctypes.c_int64], 0]
tv_nsec: Annotated[Annotated[int, ctypes.c_int64], 8]
__time_t: TypeAlias = Annotated[int, ctypes.c_int64]
__syscall_slong_t: TypeAlias = Annotated[int, ctypes.c_int64]
@c.record
class struct_verbs_context(c.Struct):
SIZE = 648
query_port: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_context], uint8_t, c.POINTER[struct_ibv_port_attr], size_t]], 0]
advise_mr: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_pd], enum_ib_uverbs_advise_mr_advice, uint32_t, c.POINTER[struct_ibv_sge], uint32_t]], 8]
alloc_null_mr: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_mr], [c.POINTER[struct_ibv_pd]]], 16]
read_counters: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_counters], c.POINTER[uint64_t], uint32_t, uint32_t]], 24]
attach_counters_point_flow: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_counters], c.POINTER[struct_ibv_counter_attach_attr], c.POINTER[struct_ibv_flow]]], 32]
create_counters: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_counters], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_counters_init_attr]]], 40]
destroy_counters: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_counters]]], 48]
reg_dm_mr: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_mr], [c.POINTER[struct_ibv_pd], c.POINTER[struct_ibv_dm], uint64_t, size_t, Annotated[int, ctypes.c_uint32]]], 56]
alloc_dm: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_dm], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_alloc_dm_attr]]], 64]
free_dm: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_dm]]], 72]
modify_flow_action_esp: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_flow_action], c.POINTER[struct_ibv_flow_action_esp_attr]]], 80]
destroy_flow_action: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_flow_action]]], 88]
create_flow_action_esp: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_flow_action], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_flow_action_esp_attr]]], 96]
modify_qp_rate_limit: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_qp], c.POINTER[struct_ibv_qp_rate_limit_attr]]], 104]
alloc_parent_domain: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_pd], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_parent_domain_init_attr]]], 112]
dealloc_td: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_td]]], 120]
alloc_td: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_td], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_td_init_attr]]], 128]
modify_cq: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_cq], c.POINTER[struct_ibv_modify_cq_attr]]], 136]
post_srq_ops: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_srq], c.POINTER[struct_ibv_ops_wr], c.POINTER[c.POINTER[struct_ibv_ops_wr]]]], 144]
destroy_rwq_ind_table: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_rwq_ind_table]]], 152]
create_rwq_ind_table: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_rwq_ind_table], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_rwq_ind_table_init_attr]]], 160]
destroy_wq: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_wq]]], 168]
modify_wq: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_wq], c.POINTER[struct_ibv_wq_attr]]], 176]
create_wq: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_wq], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_wq_init_attr]]], 184]
query_rt_values: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_values_ex]]], 192]
create_cq_ex: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_cq_ex], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_cq_init_attr_ex]]], 200]
priv: Annotated[c.POINTER[struct_verbs_ex_private], 208]
query_device_ex: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_query_device_ex_input], c.POINTER[struct_ibv_device_attr_ex], size_t]], 216]
ibv_destroy_flow: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_flow]]], 224]
ABI_placeholder2: Annotated[c.CFUNCTYPE[None, []], 232]
ibv_create_flow: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_flow], [c.POINTER[struct_ibv_qp], c.POINTER[struct_ibv_flow_attr]]], 240]
ABI_placeholder1: Annotated[c.CFUNCTYPE[None, []], 248]
open_qp: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_qp], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_qp_open_attr]]], 256]
create_qp_ex: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_qp], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_qp_init_attr_ex]]], 264]
get_srq_num: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_srq], c.POINTER[uint32_t]]], 272]
create_srq_ex: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_srq], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_srq_init_attr_ex]]], 280]
open_xrcd: Annotated[c.CFUNCTYPE[c.POINTER[struct_ibv_xrcd], [c.POINTER[struct_ibv_context], c.POINTER[struct_ibv_xrcd_init_attr]]], 288]
close_xrcd: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_ibv_xrcd]]], 296]
_ABI_placeholder3: Annotated[uint64_t, 304]
sz: Annotated[size_t, 312]
context: Annotated[struct_ibv_context, 320]
class enum_ib_uverbs_advise_mr_advice(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH = enum_ib_uverbs_advise_mr_advice.define('IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH', 0)
IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE = enum_ib_uverbs_advise_mr_advice.define('IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE', 1)
IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT = enum_ib_uverbs_advise_mr_advice.define('IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT', 2)
class struct_verbs_ex_private(ctypes.Structure): pass
@dll.bind
def ibv_get_device_list(num_devices:c.POINTER[Annotated[int, ctypes.c_int32]]) -> c.POINTER[c.POINTER[struct_ibv_device]]: ...
@dll.bind
def ibv_free_device_list(list:c.POINTER[c.POINTER[struct_ibv_device]]) -> None: ...
@dll.bind
def ibv_get_device_name(device:c.POINTER[struct_ibv_device]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def ibv_get_device_index(device:c.POINTER[struct_ibv_device]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_get_device_guid(device:c.POINTER[struct_ibv_device]) -> Annotated[int, ctypes.c_uint64]: ...
@dll.bind
def ibv_open_device(device:c.POINTER[struct_ibv_device]) -> c.POINTER[struct_ibv_context]: ...
@dll.bind
def ibv_close_device(context:c.POINTER[struct_ibv_context]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_import_device(cmd_fd:Annotated[int, ctypes.c_int32]) -> c.POINTER[struct_ibv_context]: ...
@dll.bind
def ibv_import_pd(context:c.POINTER[struct_ibv_context], pd_handle:uint32_t) -> c.POINTER[struct_ibv_pd]: ...
@dll.bind
def ibv_unimport_pd(pd:c.POINTER[struct_ibv_pd]) -> None: ...
@dll.bind
def ibv_import_mr(pd:c.POINTER[struct_ibv_pd], mr_handle:uint32_t) -> c.POINTER[struct_ibv_mr]: ...
@dll.bind
def ibv_unimport_mr(mr:c.POINTER[struct_ibv_mr]) -> None: ...
@dll.bind
def ibv_import_dm(context:c.POINTER[struct_ibv_context], dm_handle:uint32_t) -> c.POINTER[struct_ibv_dm]: ...
@dll.bind
def ibv_unimport_dm(dm:c.POINTER[struct_ibv_dm]) -> None: ...
@dll.bind
def ibv_get_async_event(context:c.POINTER[struct_ibv_context], event:c.POINTER[struct_ibv_async_event]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_ack_async_event(event:c.POINTER[struct_ibv_async_event]) -> None: ...
@dll.bind
def ibv_query_device(context:c.POINTER[struct_ibv_context], device_attr:c.POINTER[struct_ibv_device_attr]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_query_port(context:c.POINTER[struct_ibv_context], port_num:uint8_t, port_attr:c.POINTER[struct__compat_ibv_port_attr]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_query_gid(context:c.POINTER[struct_ibv_context], port_num:uint8_t, index:Annotated[int, ctypes.c_int32], gid:c.POINTER[union_ibv_gid]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def _ibv_query_gid_ex(context:c.POINTER[struct_ibv_context], port_num:uint32_t, gid_index:uint32_t, entry:c.POINTER[struct_ibv_gid_entry], flags:uint32_t, entry_size:size_t) -> Annotated[int, ctypes.c_int32]: ...
ssize_t: TypeAlias = Annotated[int, ctypes.c_int64]
@dll.bind
def _ibv_query_gid_table(context:c.POINTER[struct_ibv_context], entries:c.POINTER[struct_ibv_gid_entry], max_entries:size_t, flags:uint32_t, entry_size:size_t) -> ssize_t: ...
@dll.bind
def ibv_query_pkey(context:c.POINTER[struct_ibv_context], port_num:uint8_t, index:Annotated[int, ctypes.c_int32], pkey:c.POINTER[Annotated[int, ctypes.c_uint16]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_get_pkey_index(context:c.POINTER[struct_ibv_context], port_num:uint8_t, pkey:Annotated[int, ctypes.c_uint16]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_alloc_pd(context:c.POINTER[struct_ibv_context]) -> c.POINTER[struct_ibv_pd]: ...
@dll.bind
def ibv_dealloc_pd(pd:c.POINTER[struct_ibv_pd]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_reg_mr_iova2(pd:c.POINTER[struct_ibv_pd], addr:ctypes.c_void_p, length:size_t, iova:uint64_t, access:Annotated[int, ctypes.c_uint32]) -> c.POINTER[struct_ibv_mr]: ...
@dll.bind
def ibv_reg_mr(pd:c.POINTER[struct_ibv_pd], addr:ctypes.c_void_p, length:size_t, access:Annotated[int, ctypes.c_int32]) -> c.POINTER[struct_ibv_mr]: ...
@dll.bind
def ibv_reg_mr_iova(pd:c.POINTER[struct_ibv_pd], addr:ctypes.c_void_p, length:size_t, iova:uint64_t, access:Annotated[int, ctypes.c_int32]) -> c.POINTER[struct_ibv_mr]: ...
@dll.bind
def ibv_reg_dmabuf_mr(pd:c.POINTER[struct_ibv_pd], offset:uint64_t, length:size_t, iova:uint64_t, fd:Annotated[int, ctypes.c_int32], access:Annotated[int, ctypes.c_int32]) -> c.POINTER[struct_ibv_mr]: ...
class enum_ibv_rereg_mr_err_code(Annotated[int, ctypes.c_int32], c.Enum): pass
IBV_REREG_MR_ERR_INPUT = enum_ibv_rereg_mr_err_code.define('IBV_REREG_MR_ERR_INPUT', -1)
IBV_REREG_MR_ERR_DONT_FORK_NEW = enum_ibv_rereg_mr_err_code.define('IBV_REREG_MR_ERR_DONT_FORK_NEW', -2)
IBV_REREG_MR_ERR_DO_FORK_OLD = enum_ibv_rereg_mr_err_code.define('IBV_REREG_MR_ERR_DO_FORK_OLD', -3)
IBV_REREG_MR_ERR_CMD = enum_ibv_rereg_mr_err_code.define('IBV_REREG_MR_ERR_CMD', -4)
IBV_REREG_MR_ERR_CMD_AND_DO_FORK_NEW = enum_ibv_rereg_mr_err_code.define('IBV_REREG_MR_ERR_CMD_AND_DO_FORK_NEW', -5)
@dll.bind
def ibv_rereg_mr(mr:c.POINTER[struct_ibv_mr], flags:Annotated[int, ctypes.c_int32], pd:c.POINTER[struct_ibv_pd], addr:ctypes.c_void_p, length:size_t, access:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_dereg_mr(mr:c.POINTER[struct_ibv_mr]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_create_comp_channel(context:c.POINTER[struct_ibv_context]) -> c.POINTER[struct_ibv_comp_channel]: ...
@dll.bind
def ibv_destroy_comp_channel(channel:c.POINTER[struct_ibv_comp_channel]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_create_cq(context:c.POINTER[struct_ibv_context], cqe:Annotated[int, ctypes.c_int32], cq_context:ctypes.c_void_p, channel:c.POINTER[struct_ibv_comp_channel], comp_vector:Annotated[int, ctypes.c_int32]) -> c.POINTER[struct_ibv_cq]: ...
@dll.bind
def ibv_resize_cq(cq:c.POINTER[struct_ibv_cq], cqe:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_destroy_cq(cq:c.POINTER[struct_ibv_cq]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_get_cq_event(channel:c.POINTER[struct_ibv_comp_channel], cq:c.POINTER[c.POINTER[struct_ibv_cq]], cq_context:c.POINTER[ctypes.c_void_p]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_ack_cq_events(cq:c.POINTER[struct_ibv_cq], nevents:Annotated[int, ctypes.c_uint32]) -> None: ...
@dll.bind
def ibv_create_srq(pd:c.POINTER[struct_ibv_pd], srq_init_attr:c.POINTER[struct_ibv_srq_init_attr]) -> c.POINTER[struct_ibv_srq]: ...
@dll.bind
def ibv_modify_srq(srq:c.POINTER[struct_ibv_srq], srq_attr:c.POINTER[struct_ibv_srq_attr], srq_attr_mask:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_query_srq(srq:c.POINTER[struct_ibv_srq], srq_attr:c.POINTER[struct_ibv_srq_attr]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_destroy_srq(srq:c.POINTER[struct_ibv_srq]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_create_qp(pd:c.POINTER[struct_ibv_pd], qp_init_attr:c.POINTER[struct_ibv_qp_init_attr]) -> c.POINTER[struct_ibv_qp]: ...
@dll.bind
def ibv_modify_qp(qp:c.POINTER[struct_ibv_qp], attr:c.POINTER[struct_ibv_qp_attr], attr_mask:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_query_qp_data_in_order(qp:c.POINTER[struct_ibv_qp], op:enum_ibv_wr_opcode, flags:uint32_t) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_query_qp(qp:c.POINTER[struct_ibv_qp], attr:c.POINTER[struct_ibv_qp_attr], attr_mask:Annotated[int, ctypes.c_int32], init_attr:c.POINTER[struct_ibv_qp_init_attr]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_destroy_qp(qp:c.POINTER[struct_ibv_qp]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_create_ah(pd:c.POINTER[struct_ibv_pd], attr:c.POINTER[struct_ibv_ah_attr]) -> c.POINTER[struct_ibv_ah]: ...
@dll.bind
def ibv_init_ah_from_wc(context:c.POINTER[struct_ibv_context], port_num:uint8_t, wc:c.POINTER[struct_ibv_wc], grh:c.POINTER[struct_ibv_grh], ah_attr:c.POINTER[struct_ibv_ah_attr]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_create_ah_from_wc(pd:c.POINTER[struct_ibv_pd], wc:c.POINTER[struct_ibv_wc], grh:c.POINTER[struct_ibv_grh], port_num:uint8_t) -> c.POINTER[struct_ibv_ah]: ...
@dll.bind
def ibv_destroy_ah(ah:c.POINTER[struct_ibv_ah]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_attach_mcast(qp:c.POINTER[struct_ibv_qp], gid:c.POINTER[union_ibv_gid], lid:uint16_t) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_detach_mcast(qp:c.POINTER[struct_ibv_qp], gid:c.POINTER[union_ibv_gid], lid:uint16_t) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_fork_init() -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_is_fork_initialized() -> enum_ibv_fork_status: ...
@dll.bind
def ibv_node_type_str(node_type:enum_ibv_node_type) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def ibv_port_state_str(port_state:enum_ibv_port_state) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def ibv_event_type_str(event:enum_ibv_event_type) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def ibv_resolve_eth_l2_from_gid(context:c.POINTER[struct_ibv_context], attr:c.POINTER[struct_ibv_ah_attr], eth_mac:c.Array[uint8_t, Literal[6]], vid:c.POINTER[uint16_t]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_set_ece(qp:c.POINTER[struct_ibv_qp], ece:c.POINTER[struct_ibv_ece]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def ibv_query_ece(qp:c.POINTER[struct_ibv_qp], ece:c.POINTER[struct_ibv_ece]) -> Annotated[int, ctypes.c_int32]: ...
class enum_ib_uverbs_core_support(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS = enum_ib_uverbs_core_support.define('IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS', 1)
class enum_ib_uverbs_access_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_ACCESS_LOCAL_WRITE = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_LOCAL_WRITE', 1)
IB_UVERBS_ACCESS_REMOTE_WRITE = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_REMOTE_WRITE', 2)
IB_UVERBS_ACCESS_REMOTE_READ = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_REMOTE_READ', 4)
IB_UVERBS_ACCESS_REMOTE_ATOMIC = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_REMOTE_ATOMIC', 8)
IB_UVERBS_ACCESS_MW_BIND = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_MW_BIND', 16)
IB_UVERBS_ACCESS_ZERO_BASED = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_ZERO_BASED', 32)
IB_UVERBS_ACCESS_ON_DEMAND = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_ON_DEMAND', 64)
IB_UVERBS_ACCESS_HUGETLB = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_HUGETLB', 128)
IB_UVERBS_ACCESS_FLUSH_GLOBAL = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_FLUSH_GLOBAL', 256)
IB_UVERBS_ACCESS_FLUSH_PERSISTENT = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_FLUSH_PERSISTENT', 512)
IB_UVERBS_ACCESS_RELAXED_ORDERING = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_RELAXED_ORDERING', 1048576)
IB_UVERBS_ACCESS_OPTIONAL_RANGE = enum_ib_uverbs_access_flags.define('IB_UVERBS_ACCESS_OPTIONAL_RANGE', 1072693248)
class enum_ib_uverbs_srq_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_SRQT_BASIC = enum_ib_uverbs_srq_type.define('IB_UVERBS_SRQT_BASIC', 0)
IB_UVERBS_SRQT_XRC = enum_ib_uverbs_srq_type.define('IB_UVERBS_SRQT_XRC', 1)
IB_UVERBS_SRQT_TM = enum_ib_uverbs_srq_type.define('IB_UVERBS_SRQT_TM', 2)
class enum_ib_uverbs_wq_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_WQT_RQ = enum_ib_uverbs_wq_type.define('IB_UVERBS_WQT_RQ', 0)
class enum_ib_uverbs_wq_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = enum_ib_uverbs_wq_flags.define('IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING', 1)
IB_UVERBS_WQ_FLAGS_SCATTER_FCS = enum_ib_uverbs_wq_flags.define('IB_UVERBS_WQ_FLAGS_SCATTER_FCS', 2)
IB_UVERBS_WQ_FLAGS_DELAY_DROP = enum_ib_uverbs_wq_flags.define('IB_UVERBS_WQ_FLAGS_DELAY_DROP', 4)
IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = enum_ib_uverbs_wq_flags.define('IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING', 8)
class enum_ib_uverbs_qp_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_QPT_RC = enum_ib_uverbs_qp_type.define('IB_UVERBS_QPT_RC', 2)
IB_UVERBS_QPT_UC = enum_ib_uverbs_qp_type.define('IB_UVERBS_QPT_UC', 3)
IB_UVERBS_QPT_UD = enum_ib_uverbs_qp_type.define('IB_UVERBS_QPT_UD', 4)
IB_UVERBS_QPT_RAW_PACKET = enum_ib_uverbs_qp_type.define('IB_UVERBS_QPT_RAW_PACKET', 8)
IB_UVERBS_QPT_XRC_INI = enum_ib_uverbs_qp_type.define('IB_UVERBS_QPT_XRC_INI', 9)
IB_UVERBS_QPT_XRC_TGT = enum_ib_uverbs_qp_type.define('IB_UVERBS_QPT_XRC_TGT', 10)
IB_UVERBS_QPT_DRIVER = enum_ib_uverbs_qp_type.define('IB_UVERBS_QPT_DRIVER', 255)
class enum_ib_uverbs_qp_create_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = enum_ib_uverbs_qp_create_flags.define('IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK', 2)
IB_UVERBS_QP_CREATE_SCATTER_FCS = enum_ib_uverbs_qp_create_flags.define('IB_UVERBS_QP_CREATE_SCATTER_FCS', 256)
IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = enum_ib_uverbs_qp_create_flags.define('IB_UVERBS_QP_CREATE_CVLAN_STRIPPING', 512)
IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = enum_ib_uverbs_qp_create_flags.define('IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING', 2048)
IB_UVERBS_QP_CREATE_SQ_SIG_ALL = enum_ib_uverbs_qp_create_flags.define('IB_UVERBS_QP_CREATE_SQ_SIG_ALL', 4096)
class enum_ib_uverbs_query_port_cap_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_PCF_SM = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_SM', 2)
IB_UVERBS_PCF_NOTICE_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_NOTICE_SUP', 4)
IB_UVERBS_PCF_TRAP_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_TRAP_SUP', 8)
IB_UVERBS_PCF_OPT_IPD_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_OPT_IPD_SUP', 16)
IB_UVERBS_PCF_AUTO_MIGR_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_AUTO_MIGR_SUP', 32)
IB_UVERBS_PCF_SL_MAP_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_SL_MAP_SUP', 64)
IB_UVERBS_PCF_MKEY_NVRAM = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_MKEY_NVRAM', 128)
IB_UVERBS_PCF_PKEY_NVRAM = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_PKEY_NVRAM', 256)
IB_UVERBS_PCF_LED_INFO_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_LED_INFO_SUP', 512)
IB_UVERBS_PCF_SM_DISABLED = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_SM_DISABLED', 1024)
IB_UVERBS_PCF_SYS_IMAGE_GUID_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_SYS_IMAGE_GUID_SUP', 2048)
IB_UVERBS_PCF_PKEY_SW_EXT_PORT_TRAP_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_PKEY_SW_EXT_PORT_TRAP_SUP', 4096)
IB_UVERBS_PCF_EXTENDED_SPEEDS_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_EXTENDED_SPEEDS_SUP', 16384)
IB_UVERBS_PCF_CM_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_CM_SUP', 65536)
IB_UVERBS_PCF_SNMP_TUNNEL_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_SNMP_TUNNEL_SUP', 131072)
IB_UVERBS_PCF_REINIT_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_REINIT_SUP', 262144)
IB_UVERBS_PCF_DEVICE_MGMT_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_DEVICE_MGMT_SUP', 524288)
IB_UVERBS_PCF_VENDOR_CLASS_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_VENDOR_CLASS_SUP', 1048576)
IB_UVERBS_PCF_DR_NOTICE_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_DR_NOTICE_SUP', 2097152)
IB_UVERBS_PCF_CAP_MASK_NOTICE_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_CAP_MASK_NOTICE_SUP', 4194304)
IB_UVERBS_PCF_BOOT_MGMT_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_BOOT_MGMT_SUP', 8388608)
IB_UVERBS_PCF_LINK_LATENCY_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_LINK_LATENCY_SUP', 16777216)
IB_UVERBS_PCF_CLIENT_REG_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_CLIENT_REG_SUP', 33554432)
IB_UVERBS_PCF_LINK_SPEED_WIDTH_TABLE_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_LINK_SPEED_WIDTH_TABLE_SUP', 134217728)
IB_UVERBS_PCF_VENDOR_SPECIFIC_MADS_TABLE_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_VENDOR_SPECIFIC_MADS_TABLE_SUP', 268435456)
IB_UVERBS_PCF_MCAST_PKEY_TRAP_SUPPRESSION_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_MCAST_PKEY_TRAP_SUPPRESSION_SUP', 536870912)
IB_UVERBS_PCF_MCAST_FDB_TOP_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_MCAST_FDB_TOP_SUP', 1073741824)
IB_UVERBS_PCF_HIERARCHY_INFO_SUP = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_HIERARCHY_INFO_SUP', 2147483648)
IB_UVERBS_PCF_IP_BASED_GIDS = enum_ib_uverbs_query_port_cap_flags.define('IB_UVERBS_PCF_IP_BASED_GIDS', 67108864)
class enum_ib_uverbs_query_port_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_QPF_GRH_REQUIRED = enum_ib_uverbs_query_port_flags.define('IB_UVERBS_QPF_GRH_REQUIRED', 1)
class enum_ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ = enum_ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo.define('IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ', 0)
@c.record
class struct_ib_uverbs_flow_action_esp_keymat_aes_gcm(c.Struct):
SIZE = 56
iv: Annotated[Annotated[int, ctypes.c_uint64], 0]
iv_algo: Annotated[Annotated[int, ctypes.c_uint32], 8]
salt: Annotated[Annotated[int, ctypes.c_uint32], 12]
icv_len: Annotated[Annotated[int, ctypes.c_uint32], 16]
key_len: Annotated[Annotated[int, ctypes.c_uint32], 20]
aes_key: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[8]], 24]
@c.record
class struct_ib_uverbs_flow_action_esp_replay_bmp(c.Struct):
SIZE = 4
size: Annotated[Annotated[int, ctypes.c_uint32], 0]
class enum_ib_uverbs_flow_action_esp_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO = enum_ib_uverbs_flow_action_esp_flags.define('IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO', 0)
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD = enum_ib_uverbs_flow_action_esp_flags.define('IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD', 1)
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL = enum_ib_uverbs_flow_action_esp_flags.define('IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL', 0)
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT = enum_ib_uverbs_flow_action_esp_flags.define('IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT', 2)
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT = enum_ib_uverbs_flow_action_esp_flags.define('IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT', 0)
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT = enum_ib_uverbs_flow_action_esp_flags.define('IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT', 4)
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW = enum_ib_uverbs_flow_action_esp_flags.define('IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW', 8)
class enum_ib_uverbs_read_counters_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_READ_COUNTERS_PREFER_CACHED = enum_ib_uverbs_read_counters_flags.define('IB_UVERBS_READ_COUNTERS_PREFER_CACHED', 1)
class enum_ib_uverbs_advise_mr_flag(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_ADVISE_MR_FLAG_FLUSH = enum_ib_uverbs_advise_mr_flag.define('IB_UVERBS_ADVISE_MR_FLAG_FLUSH', 1)
@c.record
class struct_ib_uverbs_query_port_resp_ex(c.Struct):
SIZE = 48
legacy_resp: Annotated[struct_ib_uverbs_query_port_resp, 0]
port_cap_flags2: Annotated[Annotated[int, ctypes.c_uint16], 40]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[2]], 42]
active_speed_ex: Annotated[Annotated[int, ctypes.c_uint32], 44]
@c.record
class struct_ib_uverbs_query_port_resp(c.Struct):
SIZE = 40
port_cap_flags: Annotated[Annotated[int, ctypes.c_uint32], 0]
max_msg_sz: Annotated[Annotated[int, ctypes.c_uint32], 4]
bad_pkey_cntr: Annotated[Annotated[int, ctypes.c_uint32], 8]
qkey_viol_cntr: Annotated[Annotated[int, ctypes.c_uint32], 12]
gid_tbl_len: Annotated[Annotated[int, ctypes.c_uint32], 16]
pkey_tbl_len: Annotated[Annotated[int, ctypes.c_uint16], 20]
lid: Annotated[Annotated[int, ctypes.c_uint16], 22]
sm_lid: Annotated[Annotated[int, ctypes.c_uint16], 24]
state: Annotated[Annotated[int, ctypes.c_ubyte], 26]
max_mtu: Annotated[Annotated[int, ctypes.c_ubyte], 27]
active_mtu: Annotated[Annotated[int, ctypes.c_ubyte], 28]
lmc: Annotated[Annotated[int, ctypes.c_ubyte], 29]
max_vl_num: Annotated[Annotated[int, ctypes.c_ubyte], 30]
sm_sl: Annotated[Annotated[int, ctypes.c_ubyte], 31]
subnet_timeout: Annotated[Annotated[int, ctypes.c_ubyte], 32]
init_type_reply: Annotated[Annotated[int, ctypes.c_ubyte], 33]
active_width: Annotated[Annotated[int, ctypes.c_ubyte], 34]
active_speed: Annotated[Annotated[int, ctypes.c_ubyte], 35]
phys_state: Annotated[Annotated[int, ctypes.c_ubyte], 36]
link_layer: Annotated[Annotated[int, ctypes.c_ubyte], 37]
flags: Annotated[Annotated[int, ctypes.c_ubyte], 38]
reserved: Annotated[Annotated[int, ctypes.c_ubyte], 39]
__u8: TypeAlias = Annotated[int, ctypes.c_ubyte]
@c.record
class struct_ib_uverbs_qp_cap(c.Struct):
SIZE = 20
max_send_wr: Annotated[Annotated[int, ctypes.c_uint32], 0]
max_recv_wr: Annotated[Annotated[int, ctypes.c_uint32], 4]
max_send_sge: Annotated[Annotated[int, ctypes.c_uint32], 8]
max_recv_sge: Annotated[Annotated[int, ctypes.c_uint32], 12]
max_inline_data: Annotated[Annotated[int, ctypes.c_uint32], 16]
class enum_rdma_driver_id(Annotated[int, ctypes.c_uint32], c.Enum): pass
RDMA_DRIVER_UNKNOWN = enum_rdma_driver_id.define('RDMA_DRIVER_UNKNOWN', 0)
RDMA_DRIVER_MLX5 = enum_rdma_driver_id.define('RDMA_DRIVER_MLX5', 1)
RDMA_DRIVER_MLX4 = enum_rdma_driver_id.define('RDMA_DRIVER_MLX4', 2)
RDMA_DRIVER_CXGB3 = enum_rdma_driver_id.define('RDMA_DRIVER_CXGB3', 3)
RDMA_DRIVER_CXGB4 = enum_rdma_driver_id.define('RDMA_DRIVER_CXGB4', 4)
RDMA_DRIVER_MTHCA = enum_rdma_driver_id.define('RDMA_DRIVER_MTHCA', 5)
RDMA_DRIVER_BNXT_RE = enum_rdma_driver_id.define('RDMA_DRIVER_BNXT_RE', 6)
RDMA_DRIVER_OCRDMA = enum_rdma_driver_id.define('RDMA_DRIVER_OCRDMA', 7)
RDMA_DRIVER_NES = enum_rdma_driver_id.define('RDMA_DRIVER_NES', 8)
RDMA_DRIVER_I40IW = enum_rdma_driver_id.define('RDMA_DRIVER_I40IW', 9)
RDMA_DRIVER_IRDMA = enum_rdma_driver_id.define('RDMA_DRIVER_IRDMA', 9)
RDMA_DRIVER_VMW_PVRDMA = enum_rdma_driver_id.define('RDMA_DRIVER_VMW_PVRDMA', 10)
RDMA_DRIVER_QEDR = enum_rdma_driver_id.define('RDMA_DRIVER_QEDR', 11)
RDMA_DRIVER_HNS = enum_rdma_driver_id.define('RDMA_DRIVER_HNS', 12)
RDMA_DRIVER_USNIC = enum_rdma_driver_id.define('RDMA_DRIVER_USNIC', 13)
RDMA_DRIVER_RXE = enum_rdma_driver_id.define('RDMA_DRIVER_RXE', 14)
RDMA_DRIVER_HFI1 = enum_rdma_driver_id.define('RDMA_DRIVER_HFI1', 15)
RDMA_DRIVER_QIB = enum_rdma_driver_id.define('RDMA_DRIVER_QIB', 16)
RDMA_DRIVER_EFA = enum_rdma_driver_id.define('RDMA_DRIVER_EFA', 17)
RDMA_DRIVER_SIW = enum_rdma_driver_id.define('RDMA_DRIVER_SIW', 18)
RDMA_DRIVER_ERDMA = enum_rdma_driver_id.define('RDMA_DRIVER_ERDMA', 19)
RDMA_DRIVER_MANA = enum_rdma_driver_id.define('RDMA_DRIVER_MANA', 20)
class enum_ib_uverbs_gid_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_GID_TYPE_IB = enum_ib_uverbs_gid_type.define('IB_UVERBS_GID_TYPE_IB', 0)
IB_UVERBS_GID_TYPE_ROCE_V1 = enum_ib_uverbs_gid_type.define('IB_UVERBS_GID_TYPE_ROCE_V1', 1)
IB_UVERBS_GID_TYPE_ROCE_V2 = enum_ib_uverbs_gid_type.define('IB_UVERBS_GID_TYPE_ROCE_V2', 2)
@c.record
class struct_ib_uverbs_gid_entry(c.Struct):
SIZE = 32
gid: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[2]], 0]
gid_index: Annotated[Annotated[int, ctypes.c_uint32], 16]
port_num: Annotated[Annotated[int, ctypes.c_uint32], 20]
gid_type: Annotated[Annotated[int, ctypes.c_uint32], 24]
netdev_ifindex: Annotated[Annotated[int, ctypes.c_uint32], 28]
class enum_ib_uverbs_write_cmds(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_USER_VERBS_CMD_GET_CONTEXT = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_GET_CONTEXT', 0)
IB_USER_VERBS_CMD_QUERY_DEVICE = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_QUERY_DEVICE', 1)
IB_USER_VERBS_CMD_QUERY_PORT = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_QUERY_PORT', 2)
IB_USER_VERBS_CMD_ALLOC_PD = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_ALLOC_PD', 3)
IB_USER_VERBS_CMD_DEALLOC_PD = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_DEALLOC_PD', 4)
IB_USER_VERBS_CMD_CREATE_AH = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_CREATE_AH', 5)
IB_USER_VERBS_CMD_MODIFY_AH = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_MODIFY_AH', 6)
IB_USER_VERBS_CMD_QUERY_AH = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_QUERY_AH', 7)
IB_USER_VERBS_CMD_DESTROY_AH = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_DESTROY_AH', 8)
IB_USER_VERBS_CMD_REG_MR = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_REG_MR', 9)
IB_USER_VERBS_CMD_REG_SMR = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_REG_SMR', 10)
IB_USER_VERBS_CMD_REREG_MR = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_REREG_MR', 11)
IB_USER_VERBS_CMD_QUERY_MR = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_QUERY_MR', 12)
IB_USER_VERBS_CMD_DEREG_MR = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_DEREG_MR', 13)
IB_USER_VERBS_CMD_ALLOC_MW = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_ALLOC_MW', 14)
IB_USER_VERBS_CMD_BIND_MW = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_BIND_MW', 15)
IB_USER_VERBS_CMD_DEALLOC_MW = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_DEALLOC_MW', 16)
IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL', 17)
IB_USER_VERBS_CMD_CREATE_CQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_CREATE_CQ', 18)
IB_USER_VERBS_CMD_RESIZE_CQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_RESIZE_CQ', 19)
IB_USER_VERBS_CMD_DESTROY_CQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_DESTROY_CQ', 20)
IB_USER_VERBS_CMD_POLL_CQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_POLL_CQ', 21)
IB_USER_VERBS_CMD_PEEK_CQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_PEEK_CQ', 22)
IB_USER_VERBS_CMD_REQ_NOTIFY_CQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_REQ_NOTIFY_CQ', 23)
IB_USER_VERBS_CMD_CREATE_QP = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_CREATE_QP', 24)
IB_USER_VERBS_CMD_QUERY_QP = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_QUERY_QP', 25)
IB_USER_VERBS_CMD_MODIFY_QP = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_MODIFY_QP', 26)
IB_USER_VERBS_CMD_DESTROY_QP = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_DESTROY_QP', 27)
IB_USER_VERBS_CMD_POST_SEND = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_POST_SEND', 28)
IB_USER_VERBS_CMD_POST_RECV = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_POST_RECV', 29)
IB_USER_VERBS_CMD_ATTACH_MCAST = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_ATTACH_MCAST', 30)
IB_USER_VERBS_CMD_DETACH_MCAST = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_DETACH_MCAST', 31)
IB_USER_VERBS_CMD_CREATE_SRQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_CREATE_SRQ', 32)
IB_USER_VERBS_CMD_MODIFY_SRQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_MODIFY_SRQ', 33)
IB_USER_VERBS_CMD_QUERY_SRQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_QUERY_SRQ', 34)
IB_USER_VERBS_CMD_DESTROY_SRQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_DESTROY_SRQ', 35)
IB_USER_VERBS_CMD_POST_SRQ_RECV = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_POST_SRQ_RECV', 36)
IB_USER_VERBS_CMD_OPEN_XRCD = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_OPEN_XRCD', 37)
IB_USER_VERBS_CMD_CLOSE_XRCD = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_CLOSE_XRCD', 38)
IB_USER_VERBS_CMD_CREATE_XSRQ = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_CREATE_XSRQ', 39)
IB_USER_VERBS_CMD_OPEN_QP = enum_ib_uverbs_write_cmds.define('IB_USER_VERBS_CMD_OPEN_QP', 40)
class _anonenum5(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_USER_VERBS_EX_CMD_QUERY_DEVICE = _anonenum5.define('IB_USER_VERBS_EX_CMD_QUERY_DEVICE', 1)
IB_USER_VERBS_EX_CMD_CREATE_CQ = _anonenum5.define('IB_USER_VERBS_EX_CMD_CREATE_CQ', 18)
IB_USER_VERBS_EX_CMD_CREATE_QP = _anonenum5.define('IB_USER_VERBS_EX_CMD_CREATE_QP', 24)
IB_USER_VERBS_EX_CMD_MODIFY_QP = _anonenum5.define('IB_USER_VERBS_EX_CMD_MODIFY_QP', 26)
IB_USER_VERBS_EX_CMD_CREATE_FLOW = _anonenum5.define('IB_USER_VERBS_EX_CMD_CREATE_FLOW', 50)
IB_USER_VERBS_EX_CMD_DESTROY_FLOW = _anonenum5.define('IB_USER_VERBS_EX_CMD_DESTROY_FLOW', 51)
IB_USER_VERBS_EX_CMD_CREATE_WQ = _anonenum5.define('IB_USER_VERBS_EX_CMD_CREATE_WQ', 52)
IB_USER_VERBS_EX_CMD_MODIFY_WQ = _anonenum5.define('IB_USER_VERBS_EX_CMD_MODIFY_WQ', 53)
IB_USER_VERBS_EX_CMD_DESTROY_WQ = _anonenum5.define('IB_USER_VERBS_EX_CMD_DESTROY_WQ', 54)
IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL = _anonenum5.define('IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL', 55)
IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL = _anonenum5.define('IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL', 56)
IB_USER_VERBS_EX_CMD_MODIFY_CQ = _anonenum5.define('IB_USER_VERBS_EX_CMD_MODIFY_CQ', 57)
class enum_ib_placement_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_FLUSH_GLOBAL = enum_ib_placement_type.define('IB_FLUSH_GLOBAL', 1)
IB_FLUSH_PERSISTENT = enum_ib_placement_type.define('IB_FLUSH_PERSISTENT', 2)
class enum_ib_selectivity_level(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_FLUSH_RANGE = enum_ib_selectivity_level.define('IB_FLUSH_RANGE', 0)
IB_FLUSH_MR = enum_ib_selectivity_level.define('IB_FLUSH_MR', 1)
@c.record
class struct_ib_uverbs_async_event_desc(c.Struct):
SIZE = 16
element: Annotated[Annotated[int, ctypes.c_uint64], 0]
event_type: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_comp_event_desc(c.Struct):
SIZE = 8
cq_handle: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_ib_uverbs_cq_moderation_caps(c.Struct):
SIZE = 8
max_cq_moderation_count: Annotated[Annotated[int, ctypes.c_uint16], 0]
max_cq_moderation_period: Annotated[Annotated[int, ctypes.c_uint16], 2]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_cmd_hdr(c.Struct):
SIZE = 8
command: Annotated[Annotated[int, ctypes.c_uint32], 0]
in_words: Annotated[Annotated[int, ctypes.c_uint16], 4]
out_words: Annotated[Annotated[int, ctypes.c_uint16], 6]
@c.record
class struct_ib_uverbs_ex_cmd_hdr(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
provider_in_words: Annotated[Annotated[int, ctypes.c_uint16], 8]
provider_out_words: Annotated[Annotated[int, ctypes.c_uint16], 10]
cmd_hdr_reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_get_context(c.Struct):
SIZE = 8
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 8]
@c.record
class struct_ib_uverbs_get_context_resp(c.Struct):
SIZE = 8
async_fd: Annotated[Annotated[int, ctypes.c_uint32], 0]
num_comp_vectors: Annotated[Annotated[int, ctypes.c_uint32], 4]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 8]
@c.record
class struct_ib_uverbs_query_device(c.Struct):
SIZE = 8
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 8]
@c.record
class struct_ib_uverbs_query_device_resp(c.Struct):
SIZE = 176
fw_ver: Annotated[Annotated[int, ctypes.c_uint64], 0]
node_guid: Annotated[Annotated[int, ctypes.c_uint64], 8]
sys_image_guid: Annotated[Annotated[int, ctypes.c_uint64], 16]
max_mr_size: Annotated[Annotated[int, ctypes.c_uint64], 24]
page_size_cap: Annotated[Annotated[int, ctypes.c_uint64], 32]
vendor_id: Annotated[Annotated[int, ctypes.c_uint32], 40]
vendor_part_id: Annotated[Annotated[int, ctypes.c_uint32], 44]
hw_ver: Annotated[Annotated[int, ctypes.c_uint32], 48]
max_qp: Annotated[Annotated[int, ctypes.c_uint32], 52]
max_qp_wr: Annotated[Annotated[int, ctypes.c_uint32], 56]
device_cap_flags: Annotated[Annotated[int, ctypes.c_uint32], 60]
max_sge: Annotated[Annotated[int, ctypes.c_uint32], 64]
max_sge_rd: Annotated[Annotated[int, ctypes.c_uint32], 68]
max_cq: Annotated[Annotated[int, ctypes.c_uint32], 72]
max_cqe: Annotated[Annotated[int, ctypes.c_uint32], 76]
max_mr: Annotated[Annotated[int, ctypes.c_uint32], 80]
max_pd: Annotated[Annotated[int, ctypes.c_uint32], 84]
max_qp_rd_atom: Annotated[Annotated[int, ctypes.c_uint32], 88]
max_ee_rd_atom: Annotated[Annotated[int, ctypes.c_uint32], 92]
max_res_rd_atom: Annotated[Annotated[int, ctypes.c_uint32], 96]
max_qp_init_rd_atom: Annotated[Annotated[int, ctypes.c_uint32], 100]
max_ee_init_rd_atom: Annotated[Annotated[int, ctypes.c_uint32], 104]
atomic_cap: Annotated[Annotated[int, ctypes.c_uint32], 108]
max_ee: Annotated[Annotated[int, ctypes.c_uint32], 112]
max_rdd: Annotated[Annotated[int, ctypes.c_uint32], 116]
max_mw: Annotated[Annotated[int, ctypes.c_uint32], 120]
max_raw_ipv6_qp: Annotated[Annotated[int, ctypes.c_uint32], 124]
max_raw_ethy_qp: Annotated[Annotated[int, ctypes.c_uint32], 128]
max_mcast_grp: Annotated[Annotated[int, ctypes.c_uint32], 132]
max_mcast_qp_attach: Annotated[Annotated[int, ctypes.c_uint32], 136]
max_total_mcast_qp_attach: Annotated[Annotated[int, ctypes.c_uint32], 140]
max_ah: Annotated[Annotated[int, ctypes.c_uint32], 144]
max_fmr: Annotated[Annotated[int, ctypes.c_uint32], 148]
max_map_per_fmr: Annotated[Annotated[int, ctypes.c_uint32], 152]
max_srq: Annotated[Annotated[int, ctypes.c_uint32], 156]
max_srq_wr: Annotated[Annotated[int, ctypes.c_uint32], 160]
max_srq_sge: Annotated[Annotated[int, ctypes.c_uint32], 164]
max_pkeys: Annotated[Annotated[int, ctypes.c_uint16], 168]
local_ca_ack_delay: Annotated[Annotated[int, ctypes.c_ubyte], 170]
phys_port_cnt: Annotated[Annotated[int, ctypes.c_ubyte], 171]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[4]], 172]
@c.record
class struct_ib_uverbs_ex_query_device(c.Struct):
SIZE = 8
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_odp_caps(c.Struct):
SIZE = 24
general_caps: Annotated[Annotated[int, ctypes.c_uint64], 0]
per_transport_caps: Annotated[struct_ib_uverbs_odp_caps_per_transport_caps, 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_ib_uverbs_odp_caps_per_transport_caps(c.Struct):
SIZE = 12
rc_odp_caps: Annotated[Annotated[int, ctypes.c_uint32], 0]
uc_odp_caps: Annotated[Annotated[int, ctypes.c_uint32], 4]
ud_odp_caps: Annotated[Annotated[int, ctypes.c_uint32], 8]
@c.record
class struct_ib_uverbs_rss_caps(c.Struct):
SIZE = 16
supported_qpts: Annotated[Annotated[int, ctypes.c_uint32], 0]
max_rwq_indirection_tables: Annotated[Annotated[int, ctypes.c_uint32], 4]
max_rwq_indirection_table_size: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_tm_caps(c.Struct):
SIZE = 24
max_rndv_hdr_size: Annotated[Annotated[int, ctypes.c_uint32], 0]
max_num_tags: Annotated[Annotated[int, ctypes.c_uint32], 4]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
max_ops: Annotated[Annotated[int, ctypes.c_uint32], 12]
max_sge: Annotated[Annotated[int, ctypes.c_uint32], 16]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_ib_uverbs_ex_query_device_resp(c.Struct):
SIZE = 304
base: Annotated[struct_ib_uverbs_query_device_resp, 0]
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 176]
response_length: Annotated[Annotated[int, ctypes.c_uint32], 180]
odp_caps: Annotated[struct_ib_uverbs_odp_caps, 184]
timestamp_mask: Annotated[Annotated[int, ctypes.c_uint64], 208]
hca_core_clock: Annotated[Annotated[int, ctypes.c_uint64], 216]
device_cap_flags_ex: Annotated[Annotated[int, ctypes.c_uint64], 224]
rss_caps: Annotated[struct_ib_uverbs_rss_caps, 232]
max_wq_type_rq: Annotated[Annotated[int, ctypes.c_uint32], 248]
raw_packet_caps: Annotated[Annotated[int, ctypes.c_uint32], 252]
tm_caps: Annotated[struct_ib_uverbs_tm_caps, 256]
cq_moderation_caps: Annotated[struct_ib_uverbs_cq_moderation_caps, 280]
max_dm_size: Annotated[Annotated[int, ctypes.c_uint64], 288]
xrc_odp_caps: Annotated[Annotated[int, ctypes.c_uint32], 296]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 300]
@c.record
class struct_ib_uverbs_query_port(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
port_num: Annotated[Annotated[int, ctypes.c_ubyte], 8]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[7]], 9]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 16]
@c.record
class struct_ib_uverbs_alloc_pd(c.Struct):
SIZE = 8
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 8]
@c.record
class struct_ib_uverbs_alloc_pd_resp(c.Struct):
SIZE = 4
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[0]], 4]
@c.record
class struct_ib_uverbs_dealloc_pd(c.Struct):
SIZE = 4
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_open_xrcd(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
fd: Annotated[Annotated[int, ctypes.c_uint32], 8]
oflags: Annotated[Annotated[int, ctypes.c_uint32], 12]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 16]
@c.record
class struct_ib_uverbs_open_xrcd_resp(c.Struct):
SIZE = 4
xrcd_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[0]], 4]
@c.record
class struct_ib_uverbs_close_xrcd(c.Struct):
SIZE = 4
xrcd_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_reg_mr(c.Struct):
SIZE = 40
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
start: Annotated[Annotated[int, ctypes.c_uint64], 8]
length: Annotated[Annotated[int, ctypes.c_uint64], 16]
hca_va: Annotated[Annotated[int, ctypes.c_uint64], 24]
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 32]
access_flags: Annotated[Annotated[int, ctypes.c_uint32], 36]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 40]
@c.record
class struct_ib_uverbs_reg_mr_resp(c.Struct):
SIZE = 12
mr_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
lkey: Annotated[Annotated[int, ctypes.c_uint32], 4]
rkey: Annotated[Annotated[int, ctypes.c_uint32], 8]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[0]], 12]
@c.record
class struct_ib_uverbs_rereg_mr(c.Struct):
SIZE = 48
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
mr_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 12]
start: Annotated[Annotated[int, ctypes.c_uint64], 16]
length: Annotated[Annotated[int, ctypes.c_uint64], 24]
hca_va: Annotated[Annotated[int, ctypes.c_uint64], 32]
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 40]
access_flags: Annotated[Annotated[int, ctypes.c_uint32], 44]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 48]
@c.record
class struct_ib_uverbs_rereg_mr_resp(c.Struct):
SIZE = 8
lkey: Annotated[Annotated[int, ctypes.c_uint32], 0]
rkey: Annotated[Annotated[int, ctypes.c_uint32], 4]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 8]
@c.record
class struct_ib_uverbs_dereg_mr(c.Struct):
SIZE = 4
mr_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_alloc_mw(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
mw_type: Annotated[Annotated[int, ctypes.c_ubyte], 12]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[3]], 13]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 16]
@c.record
class struct_ib_uverbs_alloc_mw_resp(c.Struct):
SIZE = 8
mw_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
rkey: Annotated[Annotated[int, ctypes.c_uint32], 4]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 8]
@c.record
class struct_ib_uverbs_dealloc_mw(c.Struct):
SIZE = 4
mw_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_create_comp_channel(c.Struct):
SIZE = 8
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_ib_uverbs_create_comp_channel_resp(c.Struct):
SIZE = 4
fd: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_create_cq(c.Struct):
SIZE = 32
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
user_handle: Annotated[Annotated[int, ctypes.c_uint64], 8]
cqe: Annotated[Annotated[int, ctypes.c_uint32], 16]
comp_vector: Annotated[Annotated[int, ctypes.c_uint32], 20]
comp_channel: Annotated[Annotated[int, ctypes.c_int32], 24]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 28]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 32]
__s32: TypeAlias = Annotated[int, ctypes.c_int32]
class enum_ib_uverbs_ex_create_cq_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = enum_ib_uverbs_ex_create_cq_flags.define('IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION', 1)
IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = enum_ib_uverbs_ex_create_cq_flags.define('IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN', 2)
@c.record
class struct_ib_uverbs_ex_create_cq(c.Struct):
SIZE = 32
user_handle: Annotated[Annotated[int, ctypes.c_uint64], 0]
cqe: Annotated[Annotated[int, ctypes.c_uint32], 8]
comp_vector: Annotated[Annotated[int, ctypes.c_uint32], 12]
comp_channel: Annotated[Annotated[int, ctypes.c_int32], 16]
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 20]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 28]
@c.record
class struct_ib_uverbs_create_cq_resp(c.Struct):
SIZE = 8
cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
cqe: Annotated[Annotated[int, ctypes.c_uint32], 4]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 8]
@c.record
class struct_ib_uverbs_ex_create_cq_resp(c.Struct):
SIZE = 16
base: Annotated[struct_ib_uverbs_create_cq_resp, 0]
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 8]
response_length: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_resize_cq(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
cqe: Annotated[Annotated[int, ctypes.c_uint32], 12]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 16]
@c.record
class struct_ib_uverbs_resize_cq_resp(c.Struct):
SIZE = 8
cqe: Annotated[Annotated[int, ctypes.c_uint32], 0]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 4]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 8]
@c.record
class struct_ib_uverbs_poll_cq(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
ne: Annotated[Annotated[int, ctypes.c_uint32], 12]
class enum_ib_uverbs_wc_opcode(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_WC_SEND = enum_ib_uverbs_wc_opcode.define('IB_UVERBS_WC_SEND', 0)
IB_UVERBS_WC_RDMA_WRITE = enum_ib_uverbs_wc_opcode.define('IB_UVERBS_WC_RDMA_WRITE', 1)
IB_UVERBS_WC_RDMA_READ = enum_ib_uverbs_wc_opcode.define('IB_UVERBS_WC_RDMA_READ', 2)
IB_UVERBS_WC_COMP_SWAP = enum_ib_uverbs_wc_opcode.define('IB_UVERBS_WC_COMP_SWAP', 3)
IB_UVERBS_WC_FETCH_ADD = enum_ib_uverbs_wc_opcode.define('IB_UVERBS_WC_FETCH_ADD', 4)
IB_UVERBS_WC_BIND_MW = enum_ib_uverbs_wc_opcode.define('IB_UVERBS_WC_BIND_MW', 5)
IB_UVERBS_WC_LOCAL_INV = enum_ib_uverbs_wc_opcode.define('IB_UVERBS_WC_LOCAL_INV', 6)
IB_UVERBS_WC_TSO = enum_ib_uverbs_wc_opcode.define('IB_UVERBS_WC_TSO', 7)
IB_UVERBS_WC_FLUSH = enum_ib_uverbs_wc_opcode.define('IB_UVERBS_WC_FLUSH', 8)
IB_UVERBS_WC_ATOMIC_WRITE = enum_ib_uverbs_wc_opcode.define('IB_UVERBS_WC_ATOMIC_WRITE', 9)
@c.record
class struct_ib_uverbs_wc(c.Struct):
SIZE = 48
wr_id: Annotated[Annotated[int, ctypes.c_uint64], 0]
status: Annotated[Annotated[int, ctypes.c_uint32], 8]
opcode: Annotated[Annotated[int, ctypes.c_uint32], 12]
vendor_err: Annotated[Annotated[int, ctypes.c_uint32], 16]
byte_len: Annotated[Annotated[int, ctypes.c_uint32], 20]
ex: Annotated[struct_ib_uverbs_wc_ex, 24]
qp_num: Annotated[Annotated[int, ctypes.c_uint32], 28]
src_qp: Annotated[Annotated[int, ctypes.c_uint32], 32]
wc_flags: Annotated[Annotated[int, ctypes.c_uint32], 36]
pkey_index: Annotated[Annotated[int, ctypes.c_uint16], 40]
slid: Annotated[Annotated[int, ctypes.c_uint16], 42]
sl: Annotated[Annotated[int, ctypes.c_ubyte], 44]
dlid_path_bits: Annotated[Annotated[int, ctypes.c_ubyte], 45]
port_num: Annotated[Annotated[int, ctypes.c_ubyte], 46]
reserved: Annotated[Annotated[int, ctypes.c_ubyte], 47]
@c.record
class struct_ib_uverbs_wc_ex(c.Struct):
SIZE = 4
imm_data: Annotated[Annotated[int, ctypes.c_uint32], 0]
invalidate_rkey: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_poll_cq_resp(c.Struct):
SIZE = 8
count: Annotated[Annotated[int, ctypes.c_uint32], 0]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 4]
wc: Annotated[c.Array[struct_ib_uverbs_wc, Literal[0]], 8]
@c.record
class struct_ib_uverbs_req_notify_cq(c.Struct):
SIZE = 8
cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
solicited_only: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_destroy_cq(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_destroy_cq_resp(c.Struct):
SIZE = 8
comp_events_reported: Annotated[Annotated[int, ctypes.c_uint32], 0]
async_events_reported: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_global_route(c.Struct):
SIZE = 24
dgid: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[16]], 0]
flow_label: Annotated[Annotated[int, ctypes.c_uint32], 16]
sgid_index: Annotated[Annotated[int, ctypes.c_ubyte], 20]
hop_limit: Annotated[Annotated[int, ctypes.c_ubyte], 21]
traffic_class: Annotated[Annotated[int, ctypes.c_ubyte], 22]
reserved: Annotated[Annotated[int, ctypes.c_ubyte], 23]
@c.record
class struct_ib_uverbs_ah_attr(c.Struct):
SIZE = 32
grh: Annotated[struct_ib_uverbs_global_route, 0]
dlid: Annotated[Annotated[int, ctypes.c_uint16], 24]
sl: Annotated[Annotated[int, ctypes.c_ubyte], 26]
src_path_bits: Annotated[Annotated[int, ctypes.c_ubyte], 27]
static_rate: Annotated[Annotated[int, ctypes.c_ubyte], 28]
is_global: Annotated[Annotated[int, ctypes.c_ubyte], 29]
port_num: Annotated[Annotated[int, ctypes.c_ubyte], 30]
reserved: Annotated[Annotated[int, ctypes.c_ubyte], 31]
@c.record
class struct_ib_uverbs_qp_attr(c.Struct):
SIZE = 144
qp_attr_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
qp_state: Annotated[Annotated[int, ctypes.c_uint32], 4]
cur_qp_state: Annotated[Annotated[int, ctypes.c_uint32], 8]
path_mtu: Annotated[Annotated[int, ctypes.c_uint32], 12]
path_mig_state: Annotated[Annotated[int, ctypes.c_uint32], 16]
qkey: Annotated[Annotated[int, ctypes.c_uint32], 20]
rq_psn: Annotated[Annotated[int, ctypes.c_uint32], 24]
sq_psn: Annotated[Annotated[int, ctypes.c_uint32], 28]
dest_qp_num: Annotated[Annotated[int, ctypes.c_uint32], 32]
qp_access_flags: Annotated[Annotated[int, ctypes.c_uint32], 36]
ah_attr: Annotated[struct_ib_uverbs_ah_attr, 40]
alt_ah_attr: Annotated[struct_ib_uverbs_ah_attr, 72]
max_send_wr: Annotated[Annotated[int, ctypes.c_uint32], 104]
max_recv_wr: Annotated[Annotated[int, ctypes.c_uint32], 108]
max_send_sge: Annotated[Annotated[int, ctypes.c_uint32], 112]
max_recv_sge: Annotated[Annotated[int, ctypes.c_uint32], 116]
max_inline_data: Annotated[Annotated[int, ctypes.c_uint32], 120]
pkey_index: Annotated[Annotated[int, ctypes.c_uint16], 124]
alt_pkey_index: Annotated[Annotated[int, ctypes.c_uint16], 126]
en_sqd_async_notify: Annotated[Annotated[int, ctypes.c_ubyte], 128]
sq_draining: Annotated[Annotated[int, ctypes.c_ubyte], 129]
max_rd_atomic: Annotated[Annotated[int, ctypes.c_ubyte], 130]
max_dest_rd_atomic: Annotated[Annotated[int, ctypes.c_ubyte], 131]
min_rnr_timer: Annotated[Annotated[int, ctypes.c_ubyte], 132]
port_num: Annotated[Annotated[int, ctypes.c_ubyte], 133]
timeout: Annotated[Annotated[int, ctypes.c_ubyte], 134]
retry_cnt: Annotated[Annotated[int, ctypes.c_ubyte], 135]
rnr_retry: Annotated[Annotated[int, ctypes.c_ubyte], 136]
alt_port_num: Annotated[Annotated[int, ctypes.c_ubyte], 137]
alt_timeout: Annotated[Annotated[int, ctypes.c_ubyte], 138]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[5]], 139]
@c.record
class struct_ib_uverbs_create_qp(c.Struct):
SIZE = 56
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
user_handle: Annotated[Annotated[int, ctypes.c_uint64], 8]
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 16]
send_cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 20]
recv_cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 24]
srq_handle: Annotated[Annotated[int, ctypes.c_uint32], 28]
max_send_wr: Annotated[Annotated[int, ctypes.c_uint32], 32]
max_recv_wr: Annotated[Annotated[int, ctypes.c_uint32], 36]
max_send_sge: Annotated[Annotated[int, ctypes.c_uint32], 40]
max_recv_sge: Annotated[Annotated[int, ctypes.c_uint32], 44]
max_inline_data: Annotated[Annotated[int, ctypes.c_uint32], 48]
sq_sig_all: Annotated[Annotated[int, ctypes.c_ubyte], 52]
qp_type: Annotated[Annotated[int, ctypes.c_ubyte], 53]
is_srq: Annotated[Annotated[int, ctypes.c_ubyte], 54]
reserved: Annotated[Annotated[int, ctypes.c_ubyte], 55]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 56]
class enum_ib_uverbs_create_qp_mask(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_CREATE_QP_MASK_IND_TABLE = enum_ib_uverbs_create_qp_mask.define('IB_UVERBS_CREATE_QP_MASK_IND_TABLE', 1)
class _anonenum6(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_CREATE_QP_SUP_COMP_MASK = _anonenum6.define('IB_UVERBS_CREATE_QP_SUP_COMP_MASK', 1)
@c.record
class struct_ib_uverbs_ex_create_qp(c.Struct):
SIZE = 64
user_handle: Annotated[Annotated[int, ctypes.c_uint64], 0]
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
send_cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 12]
recv_cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 16]
srq_handle: Annotated[Annotated[int, ctypes.c_uint32], 20]
max_send_wr: Annotated[Annotated[int, ctypes.c_uint32], 24]
max_recv_wr: Annotated[Annotated[int, ctypes.c_uint32], 28]
max_send_sge: Annotated[Annotated[int, ctypes.c_uint32], 32]
max_recv_sge: Annotated[Annotated[int, ctypes.c_uint32], 36]
max_inline_data: Annotated[Annotated[int, ctypes.c_uint32], 40]
sq_sig_all: Annotated[Annotated[int, ctypes.c_ubyte], 44]
qp_type: Annotated[Annotated[int, ctypes.c_ubyte], 45]
is_srq: Annotated[Annotated[int, ctypes.c_ubyte], 46]
reserved: Annotated[Annotated[int, ctypes.c_ubyte], 47]
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 48]
create_flags: Annotated[Annotated[int, ctypes.c_uint32], 52]
rwq_ind_tbl_handle: Annotated[Annotated[int, ctypes.c_uint32], 56]
source_qpn: Annotated[Annotated[int, ctypes.c_uint32], 60]
@c.record
class struct_ib_uverbs_open_qp(c.Struct):
SIZE = 32
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
user_handle: Annotated[Annotated[int, ctypes.c_uint64], 8]
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 16]
qpn: Annotated[Annotated[int, ctypes.c_uint32], 20]
qp_type: Annotated[Annotated[int, ctypes.c_ubyte], 24]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[7]], 25]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 32]
@c.record
class struct_ib_uverbs_create_qp_resp(c.Struct):
SIZE = 32
qp_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
qpn: Annotated[Annotated[int, ctypes.c_uint32], 4]
max_send_wr: Annotated[Annotated[int, ctypes.c_uint32], 8]
max_recv_wr: Annotated[Annotated[int, ctypes.c_uint32], 12]
max_send_sge: Annotated[Annotated[int, ctypes.c_uint32], 16]
max_recv_sge: Annotated[Annotated[int, ctypes.c_uint32], 20]
max_inline_data: Annotated[Annotated[int, ctypes.c_uint32], 24]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 28]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[0]], 32]
@c.record
class struct_ib_uverbs_ex_create_qp_resp(c.Struct):
SIZE = 40
base: Annotated[struct_ib_uverbs_create_qp_resp, 0]
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 32]
response_length: Annotated[Annotated[int, ctypes.c_uint32], 36]
@c.record
class struct_ib_uverbs_qp_dest(c.Struct):
SIZE = 32
dgid: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[16]], 0]
flow_label: Annotated[Annotated[int, ctypes.c_uint32], 16]
dlid: Annotated[Annotated[int, ctypes.c_uint16], 20]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 22]
sgid_index: Annotated[Annotated[int, ctypes.c_ubyte], 24]
hop_limit: Annotated[Annotated[int, ctypes.c_ubyte], 25]
traffic_class: Annotated[Annotated[int, ctypes.c_ubyte], 26]
sl: Annotated[Annotated[int, ctypes.c_ubyte], 27]
src_path_bits: Annotated[Annotated[int, ctypes.c_ubyte], 28]
static_rate: Annotated[Annotated[int, ctypes.c_ubyte], 29]
is_global: Annotated[Annotated[int, ctypes.c_ubyte], 30]
port_num: Annotated[Annotated[int, ctypes.c_ubyte], 31]
@c.record
class struct_ib_uverbs_query_qp(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
qp_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
attr_mask: Annotated[Annotated[int, ctypes.c_uint32], 12]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 16]
@c.record
class struct_ib_uverbs_query_qp_resp(c.Struct):
SIZE = 128
dest: Annotated[struct_ib_uverbs_qp_dest, 0]
alt_dest: Annotated[struct_ib_uverbs_qp_dest, 32]
max_send_wr: Annotated[Annotated[int, ctypes.c_uint32], 64]
max_recv_wr: Annotated[Annotated[int, ctypes.c_uint32], 68]
max_send_sge: Annotated[Annotated[int, ctypes.c_uint32], 72]
max_recv_sge: Annotated[Annotated[int, ctypes.c_uint32], 76]
max_inline_data: Annotated[Annotated[int, ctypes.c_uint32], 80]
qkey: Annotated[Annotated[int, ctypes.c_uint32], 84]
rq_psn: Annotated[Annotated[int, ctypes.c_uint32], 88]
sq_psn: Annotated[Annotated[int, ctypes.c_uint32], 92]
dest_qp_num: Annotated[Annotated[int, ctypes.c_uint32], 96]
qp_access_flags: Annotated[Annotated[int, ctypes.c_uint32], 100]
pkey_index: Annotated[Annotated[int, ctypes.c_uint16], 104]
alt_pkey_index: Annotated[Annotated[int, ctypes.c_uint16], 106]
qp_state: Annotated[Annotated[int, ctypes.c_ubyte], 108]
cur_qp_state: Annotated[Annotated[int, ctypes.c_ubyte], 109]
path_mtu: Annotated[Annotated[int, ctypes.c_ubyte], 110]
path_mig_state: Annotated[Annotated[int, ctypes.c_ubyte], 111]
sq_draining: Annotated[Annotated[int, ctypes.c_ubyte], 112]
max_rd_atomic: Annotated[Annotated[int, ctypes.c_ubyte], 113]
max_dest_rd_atomic: Annotated[Annotated[int, ctypes.c_ubyte], 114]
min_rnr_timer: Annotated[Annotated[int, ctypes.c_ubyte], 115]
port_num: Annotated[Annotated[int, ctypes.c_ubyte], 116]
timeout: Annotated[Annotated[int, ctypes.c_ubyte], 117]
retry_cnt: Annotated[Annotated[int, ctypes.c_ubyte], 118]
rnr_retry: Annotated[Annotated[int, ctypes.c_ubyte], 119]
alt_port_num: Annotated[Annotated[int, ctypes.c_ubyte], 120]
alt_timeout: Annotated[Annotated[int, ctypes.c_ubyte], 121]
sq_sig_all: Annotated[Annotated[int, ctypes.c_ubyte], 122]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[5]], 123]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 128]
@c.record
class struct_ib_uverbs_modify_qp(c.Struct):
SIZE = 112
dest: Annotated[struct_ib_uverbs_qp_dest, 0]
alt_dest: Annotated[struct_ib_uverbs_qp_dest, 32]
qp_handle: Annotated[Annotated[int, ctypes.c_uint32], 64]
attr_mask: Annotated[Annotated[int, ctypes.c_uint32], 68]
qkey: Annotated[Annotated[int, ctypes.c_uint32], 72]
rq_psn: Annotated[Annotated[int, ctypes.c_uint32], 76]
sq_psn: Annotated[Annotated[int, ctypes.c_uint32], 80]
dest_qp_num: Annotated[Annotated[int, ctypes.c_uint32], 84]
qp_access_flags: Annotated[Annotated[int, ctypes.c_uint32], 88]
pkey_index: Annotated[Annotated[int, ctypes.c_uint16], 92]
alt_pkey_index: Annotated[Annotated[int, ctypes.c_uint16], 94]
qp_state: Annotated[Annotated[int, ctypes.c_ubyte], 96]
cur_qp_state: Annotated[Annotated[int, ctypes.c_ubyte], 97]
path_mtu: Annotated[Annotated[int, ctypes.c_ubyte], 98]
path_mig_state: Annotated[Annotated[int, ctypes.c_ubyte], 99]
en_sqd_async_notify: Annotated[Annotated[int, ctypes.c_ubyte], 100]
max_rd_atomic: Annotated[Annotated[int, ctypes.c_ubyte], 101]
max_dest_rd_atomic: Annotated[Annotated[int, ctypes.c_ubyte], 102]
min_rnr_timer: Annotated[Annotated[int, ctypes.c_ubyte], 103]
port_num: Annotated[Annotated[int, ctypes.c_ubyte], 104]
timeout: Annotated[Annotated[int, ctypes.c_ubyte], 105]
retry_cnt: Annotated[Annotated[int, ctypes.c_ubyte], 106]
rnr_retry: Annotated[Annotated[int, ctypes.c_ubyte], 107]
alt_port_num: Annotated[Annotated[int, ctypes.c_ubyte], 108]
alt_timeout: Annotated[Annotated[int, ctypes.c_ubyte], 109]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[2]], 110]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 112]
@c.record
class struct_ib_uverbs_ex_modify_qp(c.Struct):
SIZE = 120
base: Annotated[struct_ib_uverbs_modify_qp, 0]
rate_limit: Annotated[Annotated[int, ctypes.c_uint32], 112]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 116]
@c.record
class struct_ib_uverbs_ex_modify_qp_resp(c.Struct):
SIZE = 8
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
response_length: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_destroy_qp(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
qp_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_destroy_qp_resp(c.Struct):
SIZE = 4
events_reported: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_sge(c.Struct):
SIZE = 16
addr: Annotated[Annotated[int, ctypes.c_uint64], 0]
length: Annotated[Annotated[int, ctypes.c_uint32], 8]
lkey: Annotated[Annotated[int, ctypes.c_uint32], 12]
class enum_ib_uverbs_wr_opcode(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_WR_RDMA_WRITE = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_RDMA_WRITE', 0)
IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_RDMA_WRITE_WITH_IMM', 1)
IB_UVERBS_WR_SEND = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_SEND', 2)
IB_UVERBS_WR_SEND_WITH_IMM = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_SEND_WITH_IMM', 3)
IB_UVERBS_WR_RDMA_READ = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_RDMA_READ', 4)
IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_ATOMIC_CMP_AND_SWP', 5)
IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD', 6)
IB_UVERBS_WR_LOCAL_INV = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_LOCAL_INV', 7)
IB_UVERBS_WR_BIND_MW = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_BIND_MW', 8)
IB_UVERBS_WR_SEND_WITH_INV = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_SEND_WITH_INV', 9)
IB_UVERBS_WR_TSO = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_TSO', 10)
IB_UVERBS_WR_RDMA_READ_WITH_INV = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_RDMA_READ_WITH_INV', 11)
IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP', 12)
IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD', 13)
IB_UVERBS_WR_FLUSH = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_FLUSH', 14)
IB_UVERBS_WR_ATOMIC_WRITE = enum_ib_uverbs_wr_opcode.define('IB_UVERBS_WR_ATOMIC_WRITE', 15)
@c.record
class struct_ib_uverbs_send_wr(c.Struct):
SIZE = 56
wr_id: Annotated[Annotated[int, ctypes.c_uint64], 0]
num_sge: Annotated[Annotated[int, ctypes.c_uint32], 8]
opcode: Annotated[Annotated[int, ctypes.c_uint32], 12]
send_flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
ex: Annotated[struct_ib_uverbs_send_wr_ex, 20]
wr: Annotated[struct_ib_uverbs_send_wr_wr, 24]
@c.record
class struct_ib_uverbs_send_wr_ex(c.Struct):
SIZE = 4
imm_data: Annotated[Annotated[int, ctypes.c_uint32], 0]
invalidate_rkey: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_send_wr_wr(c.Struct):
SIZE = 32
rdma: Annotated[struct_ib_uverbs_send_wr_wr_rdma, 0]
atomic: Annotated[struct_ib_uverbs_send_wr_wr_atomic, 0]
ud: Annotated[struct_ib_uverbs_send_wr_wr_ud, 0]
@c.record
class struct_ib_uverbs_send_wr_wr_rdma(c.Struct):
SIZE = 16
remote_addr: Annotated[Annotated[int, ctypes.c_uint64], 0]
rkey: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_send_wr_wr_atomic(c.Struct):
SIZE = 32
remote_addr: Annotated[Annotated[int, ctypes.c_uint64], 0]
compare_add: Annotated[Annotated[int, ctypes.c_uint64], 8]
swap: Annotated[Annotated[int, ctypes.c_uint64], 16]
rkey: Annotated[Annotated[int, ctypes.c_uint32], 24]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 28]
@c.record
class struct_ib_uverbs_send_wr_wr_ud(c.Struct):
SIZE = 16
ah: Annotated[Annotated[int, ctypes.c_uint32], 0]
remote_qpn: Annotated[Annotated[int, ctypes.c_uint32], 4]
remote_qkey: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_post_send(c.Struct):
SIZE = 24
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
qp_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
wr_count: Annotated[Annotated[int, ctypes.c_uint32], 12]
sge_count: Annotated[Annotated[int, ctypes.c_uint32], 16]
wqe_size: Annotated[Annotated[int, ctypes.c_uint32], 20]
send_wr: Annotated[c.Array[struct_ib_uverbs_send_wr, Literal[0]], 24]
@c.record
class struct_ib_uverbs_post_send_resp(c.Struct):
SIZE = 4
bad_wr: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_recv_wr(c.Struct):
SIZE = 16
wr_id: Annotated[Annotated[int, ctypes.c_uint64], 0]
num_sge: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_post_recv(c.Struct):
SIZE = 24
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
qp_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
wr_count: Annotated[Annotated[int, ctypes.c_uint32], 12]
sge_count: Annotated[Annotated[int, ctypes.c_uint32], 16]
wqe_size: Annotated[Annotated[int, ctypes.c_uint32], 20]
recv_wr: Annotated[c.Array[struct_ib_uverbs_recv_wr, Literal[0]], 24]
@c.record
class struct_ib_uverbs_post_recv_resp(c.Struct):
SIZE = 4
bad_wr: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_post_srq_recv(c.Struct):
SIZE = 24
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
srq_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
wr_count: Annotated[Annotated[int, ctypes.c_uint32], 12]
sge_count: Annotated[Annotated[int, ctypes.c_uint32], 16]
wqe_size: Annotated[Annotated[int, ctypes.c_uint32], 20]
recv: Annotated[c.Array[struct_ib_uverbs_recv_wr, Literal[0]], 24]
@c.record
class struct_ib_uverbs_post_srq_recv_resp(c.Struct):
SIZE = 4
bad_wr: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_create_ah(c.Struct):
SIZE = 56
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
user_handle: Annotated[Annotated[int, ctypes.c_uint64], 8]
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 16]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 20]
attr: Annotated[struct_ib_uverbs_ah_attr, 24]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 56]
@c.record
class struct_ib_uverbs_create_ah_resp(c.Struct):
SIZE = 4
ah_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[0]], 4]
@c.record
class struct_ib_uverbs_destroy_ah(c.Struct):
SIZE = 4
ah_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_attach_mcast(c.Struct):
SIZE = 24
gid: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[16]], 0]
qp_handle: Annotated[Annotated[int, ctypes.c_uint32], 16]
mlid: Annotated[Annotated[int, ctypes.c_uint16], 20]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 22]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 24]
@c.record
class struct_ib_uverbs_detach_mcast(c.Struct):
SIZE = 24
gid: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[16]], 0]
qp_handle: Annotated[Annotated[int, ctypes.c_uint32], 16]
mlid: Annotated[Annotated[int, ctypes.c_uint16], 20]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 22]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 24]
@c.record
class struct_ib_uverbs_flow_spec_hdr(c.Struct):
SIZE = 8
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
flow_spec_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 8]
@c.record
class struct_ib_uverbs_flow_eth_filter(c.Struct):
SIZE = 16
dst_mac: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[6]], 0]
src_mac: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[6]], 6]
ether_type: Annotated[Annotated[int, ctypes.c_uint16], 12]
vlan_tag: Annotated[Annotated[int, ctypes.c_uint16], 14]
@c.record
class struct_ib_uverbs_flow_spec_eth(c.Struct):
SIZE = 40
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
val: Annotated[struct_ib_uverbs_flow_eth_filter, 8]
mask: Annotated[struct_ib_uverbs_flow_eth_filter, 24]
@c.record
class struct_ib_uverbs_flow_ipv4_filter(c.Struct):
SIZE = 12
src_ip: Annotated[Annotated[int, ctypes.c_uint32], 0]
dst_ip: Annotated[Annotated[int, ctypes.c_uint32], 4]
proto: Annotated[Annotated[int, ctypes.c_ubyte], 8]
tos: Annotated[Annotated[int, ctypes.c_ubyte], 9]
ttl: Annotated[Annotated[int, ctypes.c_ubyte], 10]
flags: Annotated[Annotated[int, ctypes.c_ubyte], 11]
@c.record
class struct_ib_uverbs_flow_spec_ipv4(c.Struct):
SIZE = 32
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
val: Annotated[struct_ib_uverbs_flow_ipv4_filter, 8]
mask: Annotated[struct_ib_uverbs_flow_ipv4_filter, 20]
@c.record
class struct_ib_uverbs_flow_tcp_udp_filter(c.Struct):
SIZE = 4
dst_port: Annotated[Annotated[int, ctypes.c_uint16], 0]
src_port: Annotated[Annotated[int, ctypes.c_uint16], 2]
@c.record
class struct_ib_uverbs_flow_spec_tcp_udp(c.Struct):
SIZE = 16
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
val: Annotated[struct_ib_uverbs_flow_tcp_udp_filter, 8]
mask: Annotated[struct_ib_uverbs_flow_tcp_udp_filter, 12]
@c.record
class struct_ib_uverbs_flow_ipv6_filter(c.Struct):
SIZE = 40
src_ip: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[16]], 0]
dst_ip: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[16]], 16]
flow_label: Annotated[Annotated[int, ctypes.c_uint32], 32]
next_hdr: Annotated[Annotated[int, ctypes.c_ubyte], 36]
traffic_class: Annotated[Annotated[int, ctypes.c_ubyte], 37]
hop_limit: Annotated[Annotated[int, ctypes.c_ubyte], 38]
reserved: Annotated[Annotated[int, ctypes.c_ubyte], 39]
@c.record
class struct_ib_uverbs_flow_spec_ipv6(c.Struct):
SIZE = 88
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
val: Annotated[struct_ib_uverbs_flow_ipv6_filter, 8]
mask: Annotated[struct_ib_uverbs_flow_ipv6_filter, 48]
@c.record
class struct_ib_uverbs_flow_spec_action_tag(c.Struct):
SIZE = 16
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
tag_id: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved1: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_flow_spec_action_drop(c.Struct):
SIZE = 8
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
@c.record
class struct_ib_uverbs_flow_spec_action_handle(c.Struct):
SIZE = 16
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved1: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_flow_spec_action_count(c.Struct):
SIZE = 16
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved1: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_flow_tunnel_filter(c.Struct):
SIZE = 4
tunnel_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_flow_spec_tunnel(c.Struct):
SIZE = 16
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
val: Annotated[struct_ib_uverbs_flow_tunnel_filter, 8]
mask: Annotated[struct_ib_uverbs_flow_tunnel_filter, 12]
@c.record
class struct_ib_uverbs_flow_spec_esp_filter(c.Struct):
SIZE = 8
spi: Annotated[Annotated[int, ctypes.c_uint32], 0]
seq: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_flow_spec_esp(c.Struct):
SIZE = 24
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
val: Annotated[struct_ib_uverbs_flow_spec_esp_filter, 8]
mask: Annotated[struct_ib_uverbs_flow_spec_esp_filter, 16]
@c.record
class struct_ib_uverbs_flow_gre_filter(c.Struct):
SIZE = 8
c_ks_res0_ver: Annotated[Annotated[int, ctypes.c_uint16], 0]
protocol: Annotated[Annotated[int, ctypes.c_uint16], 2]
key: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_flow_spec_gre(c.Struct):
SIZE = 24
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
val: Annotated[struct_ib_uverbs_flow_gre_filter, 8]
mask: Annotated[struct_ib_uverbs_flow_gre_filter, 16]
@c.record
class struct_ib_uverbs_flow_mpls_filter(c.Struct):
SIZE = 4
label: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_flow_spec_mpls(c.Struct):
SIZE = 16
hdr: Annotated[struct_ib_uverbs_flow_spec_hdr, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
reserved: Annotated[Annotated[int, ctypes.c_uint16], 6]
val: Annotated[struct_ib_uverbs_flow_mpls_filter, 8]
mask: Annotated[struct_ib_uverbs_flow_mpls_filter, 12]
@c.record
class struct_ib_uverbs_flow_attr(c.Struct):
SIZE = 16
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[Annotated[int, ctypes.c_uint16], 4]
priority: Annotated[Annotated[int, ctypes.c_uint16], 6]
num_of_specs: Annotated[Annotated[int, ctypes.c_ubyte], 8]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[2]], 9]
port: Annotated[Annotated[int, ctypes.c_ubyte], 11]
flags: Annotated[Annotated[int, ctypes.c_uint32], 12]
flow_specs: Annotated[c.Array[struct_ib_uverbs_flow_spec_hdr, Literal[0]], 16]
@c.record
class struct_ib_uverbs_create_flow(c.Struct):
SIZE = 24
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
qp_handle: Annotated[Annotated[int, ctypes.c_uint32], 4]
flow_attr: Annotated[struct_ib_uverbs_flow_attr, 8]
@c.record
class struct_ib_uverbs_create_flow_resp(c.Struct):
SIZE = 8
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
flow_handle: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_destroy_flow(c.Struct):
SIZE = 8
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
flow_handle: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_create_srq(c.Struct):
SIZE = 32
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
user_handle: Annotated[Annotated[int, ctypes.c_uint64], 8]
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 16]
max_wr: Annotated[Annotated[int, ctypes.c_uint32], 20]
max_sge: Annotated[Annotated[int, ctypes.c_uint32], 24]
srq_limit: Annotated[Annotated[int, ctypes.c_uint32], 28]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 32]
@c.record
class struct_ib_uverbs_create_xsrq(c.Struct):
SIZE = 48
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
user_handle: Annotated[Annotated[int, ctypes.c_uint64], 8]
srq_type: Annotated[Annotated[int, ctypes.c_uint32], 16]
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 20]
max_wr: Annotated[Annotated[int, ctypes.c_uint32], 24]
max_sge: Annotated[Annotated[int, ctypes.c_uint32], 28]
srq_limit: Annotated[Annotated[int, ctypes.c_uint32], 32]
max_num_tags: Annotated[Annotated[int, ctypes.c_uint32], 36]
xrcd_handle: Annotated[Annotated[int, ctypes.c_uint32], 40]
cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 44]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 48]
@c.record
class struct_ib_uverbs_create_srq_resp(c.Struct):
SIZE = 16
srq_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
max_wr: Annotated[Annotated[int, ctypes.c_uint32], 4]
max_sge: Annotated[Annotated[int, ctypes.c_uint32], 8]
srqn: Annotated[Annotated[int, ctypes.c_uint32], 12]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[0]], 16]
@c.record
class struct_ib_uverbs_modify_srq(c.Struct):
SIZE = 16
srq_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
attr_mask: Annotated[Annotated[int, ctypes.c_uint32], 4]
max_wr: Annotated[Annotated[int, ctypes.c_uint32], 8]
srq_limit: Annotated[Annotated[int, ctypes.c_uint32], 12]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 16]
@c.record
class struct_ib_uverbs_query_srq(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
srq_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
driver_data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[0]], 16]
@c.record
class struct_ib_uverbs_query_srq_resp(c.Struct):
SIZE = 16
max_wr: Annotated[Annotated[int, ctypes.c_uint32], 0]
max_sge: Annotated[Annotated[int, ctypes.c_uint32], 4]
srq_limit: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_destroy_srq(c.Struct):
SIZE = 16
response: Annotated[Annotated[int, ctypes.c_uint64], 0]
srq_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_destroy_srq_resp(c.Struct):
SIZE = 4
events_reported: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_ib_uverbs_ex_create_wq(c.Struct):
SIZE = 40
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
wq_type: Annotated[Annotated[int, ctypes.c_uint32], 4]
user_handle: Annotated[Annotated[int, ctypes.c_uint64], 8]
pd_handle: Annotated[Annotated[int, ctypes.c_uint32], 16]
cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 20]
max_wr: Annotated[Annotated[int, ctypes.c_uint32], 24]
max_sge: Annotated[Annotated[int, ctypes.c_uint32], 28]
create_flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 36]
@c.record
class struct_ib_uverbs_ex_create_wq_resp(c.Struct):
SIZE = 24
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
response_length: Annotated[Annotated[int, ctypes.c_uint32], 4]
wq_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
max_wr: Annotated[Annotated[int, ctypes.c_uint32], 12]
max_sge: Annotated[Annotated[int, ctypes.c_uint32], 16]
wqn: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_ib_uverbs_ex_destroy_wq(c.Struct):
SIZE = 8
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
wq_handle: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_ex_destroy_wq_resp(c.Struct):
SIZE = 16
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
response_length: Annotated[Annotated[int, ctypes.c_uint32], 4]
events_reported: Annotated[Annotated[int, ctypes.c_uint32], 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_ex_modify_wq(c.Struct):
SIZE = 24
attr_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
wq_handle: Annotated[Annotated[int, ctypes.c_uint32], 4]
wq_state: Annotated[Annotated[int, ctypes.c_uint32], 8]
curr_wq_state: Annotated[Annotated[int, ctypes.c_uint32], 12]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
flags_mask: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_ib_uverbs_ex_create_rwq_ind_table(c.Struct):
SIZE = 8
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
log_ind_tbl_size: Annotated[Annotated[int, ctypes.c_uint32], 4]
wq_handles: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[0]], 8]
@c.record
class struct_ib_uverbs_ex_create_rwq_ind_table_resp(c.Struct):
SIZE = 16
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
response_length: Annotated[Annotated[int, ctypes.c_uint32], 4]
ind_tbl_handle: Annotated[Annotated[int, ctypes.c_uint32], 8]
ind_tbl_num: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_ib_uverbs_ex_destroy_rwq_ind_table(c.Struct):
SIZE = 8
comp_mask: Annotated[Annotated[int, ctypes.c_uint32], 0]
ind_tbl_handle: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_ib_uverbs_cq_moderation(c.Struct):
SIZE = 4
cq_count: Annotated[Annotated[int, ctypes.c_uint16], 0]
cq_period: Annotated[Annotated[int, ctypes.c_uint16], 2]
@c.record
class struct_ib_uverbs_ex_modify_cq(c.Struct):
SIZE = 16
cq_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
attr_mask: Annotated[Annotated[int, ctypes.c_uint32], 4]
attr: Annotated[struct_ib_uverbs_cq_moderation, 8]
reserved: Annotated[Annotated[int, ctypes.c_uint32], 12]
class enum_ib_uverbs_device_cap_flags(Annotated[int, ctypes.c_uint64], c.Enum): pass
IB_UVERBS_DEVICE_RESIZE_MAX_WR = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_RESIZE_MAX_WR', 1)
IB_UVERBS_DEVICE_BAD_PKEY_CNTR = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_BAD_PKEY_CNTR', 2)
IB_UVERBS_DEVICE_BAD_QKEY_CNTR = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_BAD_QKEY_CNTR', 4)
IB_UVERBS_DEVICE_RAW_MULTI = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_RAW_MULTI', 8)
IB_UVERBS_DEVICE_AUTO_PATH_MIG = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_AUTO_PATH_MIG', 16)
IB_UVERBS_DEVICE_CHANGE_PHY_PORT = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_CHANGE_PHY_PORT', 32)
IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE', 64)
IB_UVERBS_DEVICE_CURR_QP_STATE_MOD = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_CURR_QP_STATE_MOD', 128)
IB_UVERBS_DEVICE_SHUTDOWN_PORT = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_SHUTDOWN_PORT', 256)
IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT', 1024)
IB_UVERBS_DEVICE_SYS_IMAGE_GUID = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_SYS_IMAGE_GUID', 2048)
IB_UVERBS_DEVICE_RC_RNR_NAK_GEN = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_RC_RNR_NAK_GEN', 4096)
IB_UVERBS_DEVICE_SRQ_RESIZE = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_SRQ_RESIZE', 8192)
IB_UVERBS_DEVICE_N_NOTIFY_CQ = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_N_NOTIFY_CQ', 16384)
IB_UVERBS_DEVICE_MEM_WINDOW = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_MEM_WINDOW', 131072)
IB_UVERBS_DEVICE_UD_IP_CSUM = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_UD_IP_CSUM', 262144)
IB_UVERBS_DEVICE_XRC = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_XRC', 1048576)
IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS', 2097152)
IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A', 8388608)
IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B', 16777216)
IB_UVERBS_DEVICE_RC_IP_CSUM = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_RC_IP_CSUM', 33554432)
IB_UVERBS_DEVICE_RAW_IP_CSUM = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_RAW_IP_CSUM', 67108864)
IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING', 536870912)
IB_UVERBS_DEVICE_RAW_SCATTER_FCS = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_RAW_SCATTER_FCS', 17179869184)
IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING', 68719476736)
IB_UVERBS_DEVICE_FLUSH_GLOBAL = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_FLUSH_GLOBAL', 274877906944)
IB_UVERBS_DEVICE_FLUSH_PERSISTENT = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_FLUSH_PERSISTENT', 549755813888)
IB_UVERBS_DEVICE_ATOMIC_WRITE = enum_ib_uverbs_device_cap_flags.define('IB_UVERBS_DEVICE_ATOMIC_WRITE', 1099511627776)
class enum_ib_uverbs_raw_packet_caps(Annotated[int, ctypes.c_uint32], c.Enum): pass
IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING = enum_ib_uverbs_raw_packet_caps.define('IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING', 1)
IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS = enum_ib_uverbs_raw_packet_caps.define('IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS', 2)
IB_UVERBS_RAW_PACKET_CAP_IP_CSUM = enum_ib_uverbs_raw_packet_caps.define('IB_UVERBS_RAW_PACKET_CAP_IP_CSUM', 4)
IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP = enum_ib_uverbs_raw_packet_caps.define('IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP', 8)
c.init_records()
vext_field_avail = lambda type,fld,sz: (offsetof(type, fld) < (sz)) # type: ignore
IBV_DEVICE_RAW_SCATTER_FCS = (1 << 34) # type: ignore
IBV_DEVICE_PCI_WRITE_END_PADDING = (1 << 36) # type: ignore
ibv_query_port = lambda context,port_num,port_attr: ___ibv_query_port(context, port_num, port_attr) # type: ignore
ibv_reg_mr = lambda pd,addr,length,access: __ibv_reg_mr(pd, addr, length, access, __builtin_constant_p( ((int)(access) & IBV_ACCESS_OPTIONAL_RANGE) == 0)) # type: ignore
ibv_reg_mr_iova = lambda pd,addr,length,iova,access: __ibv_reg_mr_iova(pd, addr, length, iova, access, __builtin_constant_p( ((access) & IBV_ACCESS_OPTIONAL_RANGE) == 0)) # type: ignore
ETHERNET_LL_SIZE = 6 # type: ignore
IB_ROCE_UDP_ENCAP_VALID_PORT_MIN = (0xC000) # type: ignore
IB_ROCE_UDP_ENCAP_VALID_PORT_MAX = (0xFFFF) # type: ignore
IB_GRH_FLOWLABEL_MASK = (0x000FFFFF) # type: ignore
IBV_FLOW_ACTION_ESP_KEYMAT_AES_GCM = IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM # type: ignore
IBV_FLOW_ACTION_IV_ALGO_SEQ = IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ # type: ignore
IBV_FLOW_ACTION_ESP_REPLAY_NONE = IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE # type: ignore
IBV_FLOW_ACTION_ESP_REPLAY_BMP = IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP # type: ignore
IBV_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO = IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO # type: ignore
IBV_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD = IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD # type: ignore
IBV_FLOW_ACTION_ESP_FLAGS_TUNNEL = IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL # type: ignore
IBV_FLOW_ACTION_ESP_FLAGS_TRANSPORT = IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT # type: ignore
IBV_FLOW_ACTION_ESP_FLAGS_DECRYPT = IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT # type: ignore
IBV_FLOW_ACTION_ESP_FLAGS_ENCRYPT = IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT # type: ignore
IBV_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW = IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW # type: ignore
IBV_ADVISE_MR_ADVICE_PREFETCH = IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH # type: ignore
IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE = IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE # type: ignore
IBV_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT = IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT # type: ignore
IBV_ADVISE_MR_FLAG_FLUSH = IB_UVERBS_ADVISE_MR_FLAG_FLUSH # type: ignore
IBV_QPF_GRH_REQUIRED = IB_UVERBS_QPF_GRH_REQUIRED # type: ignore
IBV_ACCESS_OPTIONAL_RANGE = IB_UVERBS_ACCESS_OPTIONAL_RANGE # type: ignore
IB_UVERBS_ACCESS_OPTIONAL_FIRST = (1 << 20) # type: ignore
IB_UVERBS_ACCESS_OPTIONAL_LAST = (1 << 29) # type: ignore
IB_USER_VERBS_ABI_VERSION = 6 # type: ignore
IB_USER_VERBS_CMD_THRESHOLD = 50 # type: ignore
IB_USER_VERBS_CMD_COMMAND_MASK = 0xff # type: ignore
IB_USER_VERBS_CMD_FLAG_EXTENDED = 0x80000000 # type: ignore
IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE = 0x0d # type: ignore
IB_DEVICE_NAME_MAX = 64 # type: ignore | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/ib.py",
"license": "MIT License",
"lines": 3114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/external/external_test_simple_tokenizer.py | import functools, multiprocessing
from transformers import AutoTokenizer
from datasets import load_dataset
from tinygrad.apps.llm import SimpleTokenizer
from tinygrad.helpers import tqdm, getenv, partition
@functools.cache
def get_tokenizers():
print("getting tokenizers")
base_tokenizer = AutoTokenizer.from_pretrained("NousResearch/Meta-Llama-3-8B-Instruct")
special_tokens, normal_tokens = partition(((t, tid) for t, tid in base_tokenizer.vocab.items()), lambda e: e[1] in base_tokenizer.all_special_ids)
simple_tokenizer = SimpleTokenizer(dict(normal_tokens), dict(special_tokens))
return base_tokenizer, simple_tokenizer
def test_tokenize(samp) -> bool:
base_tokenizer, simple_tokenizer = get_tokenizers()
idx, txt = samp
try: simple_tokens = tuple(simple_tokenizer.encode(txt))
except RuntimeError: simple_tokens = ()
base_tokens = tuple(base_tokenizer.encode(txt, add_special_tokens=False))
if simple_tokens != base_tokens:
print(f"tokens mismatch at index: {idx}.\n")
color_codes = [91, 92, 94, 93, 95]
def color_tokens(tids):
return "".join(f"\033[{color_codes[i%len(color_codes)]}m{base_tokenizer.decode([t])}" for i, t in enumerate(tids)) + "\033[0m"
print("simple: ", color_tokens(simple_tokens))
print("official:", color_tokens(base_tokens) + "\n")
return False
if simple_tokenizer.decode(simple_tokens) != txt:
print(f"decode mismatch at {idx}")
return False
return True
# use ALLOW_FAILED=-1 to go over the entire dataset without printing.
if __name__ == "__main__":
print("loading datasets")
ds = load_dataset("OpenAssistant/oasst1")
loaded_ds = [(idx, el["text"]) for idx, el in enumerate(ds["train"])]
print(f"loaded {len(loaded_ds)}")
allow_failed = getenv("ALLOW_FAILED", 10)
fail_count, total = 0, 0
with multiprocessing.Pool(16) as pool:
for good in tqdm(pool.imap_unordered(test_tokenize, loaded_ds), total=len(loaded_ds)):
total += 1
if not good:
fail_count += 1
allow_failed -= 1
if allow_failed == 0: break
print(f"{fail_count}/{total} samples are inconsistent with the official tokenizer.")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_simple_tokenizer.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/gemm/amd_uop_matmul.py | import numpy as np
from tinygrad import Tensor, Device, Context, GlobalCounters, dtypes
from tinygrad.uop.ops import UOp, KernelInfo, sint, AxisType
from tinygrad.engine.realize import ExecItem, get_runner
from tinygrad.dtype import AddrSpace
from tinygrad.helpers import getenv
N = getenv("N", 4096)
M = K = N
run_count = getenv("CNT", 5)
# ---------------------------
# launch/config constants
# ---------------------------
WARP_SIZE = 32
# Threadblock tile sizes (block-level tile of C that a block computes)
BLOCK_N = 128 # columns of C (N-dim) per block
BLOCK_M = 128 # rows of C (M-dim) per block
BLOCK_K = 8 # K-slice per block iteration
# Register tile sizes (per-thread accumulator tile of C)
TN = 4 # columns per thread
TM = 4 # rows per thread
is_kernel5 = getenv("K5", 0)
THREADS_PER_BLOCK = 128 if is_kernel5 else 256
assert THREADS_PER_BLOCK % BLOCK_N == 0, "THREADS_PER_BLOCK must be divisible by BLOCK_N"
assert THREADS_PER_BLOCK % BLOCK_K == 0, "THREADS_PER_BLOCK must be divisible by BLOCK_K"
assert (BLOCK_N * BLOCK_K) % THREADS_PER_BLOCK == 0
assert (BLOCK_M * BLOCK_K) % THREADS_PER_BLOCK == 0
WARPS_PER_BLOCK = THREADS_PER_BLOCK // WARP_SIZE
WAVE_TILE_N = 128 if is_kernel5 else 64
WAVE_TILE_M = BLOCK_N * BLOCK_M // WARPS_PER_BLOCK // WAVE_TILE_N
assert BLOCK_N % WAVE_TILE_N == 0, "BN must be a multiple of WN"
assert BLOCK_M % WAVE_TILE_M == 0, "BM must be a multiple of WM"
WAVES_IN_BLOCK_X = BLOCK_N // WAVE_TILE_N
WAVES_IN_BLOCK_Y = BLOCK_M // WAVE_TILE_M
assert WAVES_IN_BLOCK_X * WAVES_IN_BLOCK_Y == WARPS_PER_BLOCK, "wave grid must match warps/block"
LANES_PER_WAVE_X = 8
LANES_PER_WAVE_Y = 4
ITERS_PER_WAVE_N = WAVE_TILE_N // (LANES_PER_WAVE_X * TN)
ITERS_PER_WAVE_M = WAVE_TILE_M // (LANES_PER_WAVE_Y * TM)
assert WAVE_TILE_N % (LANES_PER_WAVE_X * TN) == 0, "WAVE_TILE_N must be divisible by LANES_PER_WAVE_X*TN"
assert WAVE_TILE_M % (LANES_PER_WAVE_Y * TM) == 0, "WAVE_TILE_M must be divisible by LANES_PER_WAVE_Y*TM"
def rngs_for_shape(shape:tuple[sint, ...], rng:int, axis_type=AxisType.LOOP): return [UOp.range(s, rng+i, axis_type) for i,s in enumerate(shape)]
def copy(dest:UOp, src:UOp, rng:int, set=False, upcast=False):
assert dest.shape == src.shape
rngs = rngs_for_shape(src.shape, rng, AxisType.UPCAST if upcast else AxisType.LOOP)
copy = dest[*rngs].store(src[*rngs]).end(*rngs)
return dest.after(copy) if set else copy
def hand_spec_kernel3():
# ---------------------------
# block indices & placeholders
# ---------------------------
blockIdx_x = UOp.special(N // BLOCK_N, "gidx0")
blockIdx_y = UOp.special(N // BLOCK_M, "gidx1")
a = UOp.placeholder((N, N), dtypes.float, slot=1)
b = UOp.placeholder((N, N), dtypes.float, slot=2)
c = UOp.placeholder((N, N), dtypes.float, slot=0)
# index the output with the globals
c = c.reshape(M // BLOCK_M, BLOCK_M, N // BLOCK_N, BLOCK_N)[blockIdx_y, :, blockIdx_x, :]
# open the main reduction range
k_tile_range = UOp.range(N // BLOCK_K, 0, AxisType.REDUCE)
a = a.reshape(M // BLOCK_M, BLOCK_M, N // BLOCK_K, BLOCK_K)[blockIdx_y, :, k_tile_range, :]
b = b.reshape(N // BLOCK_K, BLOCK_K, N // BLOCK_N, BLOCK_N)[k_tile_range, :, blockIdx_x, :]
# globals are no longer used, they are already in the indexes
del blockIdx_y, blockIdx_x
# ---------------------------
# GLOBAL -> LOCAL (As, Bs)
# ---------------------------
tid = UOp.special(THREADS_PER_BLOCK, "lidx0")
# A: read BM x BK tiles (permute on store into locals)
BM_As_stride = (BLOCK_M + 4) if is_kernel5 else BLOCK_M
As = UOp.placeholder((BLOCK_K, BM_As_stride), dtypes.float, slot=0, addrspace=AddrSpace.LOCAL).shrink_to((BLOCK_K, BLOCK_M))
As_store = copy(As.permute((1,0)).reshape(-1, THREADS_PER_BLOCK)[:, tid], a.reshape(-1, THREADS_PER_BLOCK)[:, tid], rng=100)
# B: read BK x BN tiles
Bs = UOp.placeholder((BLOCK_K, BLOCK_N), dtypes.float, slot=1, addrspace=AddrSpace.LOCAL)
Bs_store = copy(Bs.reshape(-1, THREADS_PER_BLOCK)[:, tid], b.reshape(-1, THREADS_PER_BLOCK)[:, tid], rng=200)
# TODO: can we automate barrier?
barrier = UOp.barrier(As_store, Bs_store)
As, Bs = As.after(barrier), Bs.after(barrier)
# open inner k range
k = UOp.range(BLOCK_K, 3, AxisType.REDUCE)
# ---------------------------
# LOCAL -> REG (per-wave tiles)
# ---------------------------
waveIdx = (tid // WARP_SIZE) % WAVES_IN_BLOCK_X
waveIdy = (tid // WARP_SIZE) // WAVES_IN_BLOCK_X
assert waveIdy.vmax+1 == WAVES_IN_BLOCK_Y
laneIdx = (tid % WARP_SIZE) % LANES_PER_WAVE_X
laneIdy = (tid % WARP_SIZE) // LANES_PER_WAVE_X
assert laneIdy.vmax+1 == LANES_PER_WAVE_Y
A_col = UOp.placeholder((ITERS_PER_WAVE_M, TM), dtypes.float, slot=0, addrspace=AddrSpace.REG)
A_col = copy(A_col, As[k, :].reshape(WAVES_IN_BLOCK_Y, ITERS_PER_WAVE_M, LANES_PER_WAVE_Y, TM)[waveIdy, :, laneIdy, :], 300, set=True, upcast=True)
B_row = UOp.placeholder((ITERS_PER_WAVE_N, TN), dtypes.float, slot=1, addrspace=AddrSpace.REG)
B_row = copy(B_row, Bs[k, :].reshape(WAVES_IN_BLOCK_X, ITERS_PER_WAVE_N, LANES_PER_WAVE_X, TN)[waveIdx, :, laneIdx, :], 400, set=True, upcast=True)
# ---------------------------
# FMA: c_regs += A_col * B_row
# ---------------------------
c_regs = UOp.placeholder((ITERS_PER_WAVE_M, TM, ITERS_PER_WAVE_N, TN), dtypes.float, slot=2, addrspace=AddrSpace.REG)
i = UOp.range(c_regs.size, 16)
c_regs = c_regs.after(c_regs.flatten()[i].store(0.0).end(i))
# TODO: why don't these work as upcast?
# why if the ranges merge is it slow?!? (if you change the order on end, they will merge. big slowdown on METAL)
iterWaveM, yt, iterWaveN, xt = rngs = rngs_for_shape(c_regs.shape, 500)
sink = c_regs[*rngs].store(c_regs.after(k)[*rngs] + A_col[iterWaveM, yt] * B_row[iterWaveN, xt]).end(iterWaveM, iterWaveN, yt, xt)
# Close k, sync, and close K tiles
sink = sink.end(k).barrier().end(k_tile_range)
# ---------------------------
# REG -> GLOBAL (epilogue)
# ---------------------------
c = c.reshape(WAVES_IN_BLOCK_Y, ITERS_PER_WAVE_M, LANES_PER_WAVE_Y, TM,
WAVES_IN_BLOCK_X, ITERS_PER_WAVE_N, LANES_PER_WAVE_X, TN)
c = c[waveIdy, :, laneIdy, :,
waveIdx, :, laneIdx, :]
sink = copy(c, c_regs.after(sink), rng=600)
return sink.sink(arg=KernelInfo(opts_to_apply=())).simplify()
def test_matmul(sink:UOp, dtype=dtypes.float32, N=N):
rng = np.random.default_rng()
a = Tensor(rng.random((N, N), dtype=np.float32)-0.5, dtype=dtype)
b = Tensor(rng.random((N, N), dtype=np.float32)-0.5, dtype=dtype)
hc = Tensor.empty(N, N, dtype=dtype)
Tensor.realize(a, b, hc)
ei = ExecItem(sink, [t.uop.buffer for t in [hc, a, b]], prg=get_runner(Device.DEFAULT, sink))
ets = []
with Context(DEBUG=2):
for _ in range(run_count):
ets.append(ei.run(wait=True))
print(f"REAL TFLOPS {N * N * N * 2 / min(ets) * 1e-12:.2f}")
if getenv("VERIFY", 1):
GlobalCounters.reset()
with Context(DEBUG=2):
tc = (a @ b).realize()
with Context(DEBUG=0):
err = (hc - tc).square().mean().item()
print(f"mean squared error {err}")
if err > 1e-06:
raise RuntimeError("matmul is wrong!")
if __name__ == "__main__":
test_matmul(hand_spec_kernel3(), N=N)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/gemm/amd_uop_matmul.py",
"license": "MIT License",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/external/external_llm_eval.py | # eval for tinygrad.apps.llm
import pyarrow.parquet as pq
from tinygrad.helpers import fetch, colored
from tinygrad.apps.llm import Transformer, SimpleTokenizer, models
from tinygrad import Tensor
if __name__ == "__main__":
dat = fetch("https://huggingface.co/datasets/allenai/ai2_arc/resolve/main/ARC-Challenge/test-00000-of-00001.parquet")
table = pq.read_table(dat)
model, kv = Transformer.from_gguf(Tensor.from_url(models["1B"]), max_context=4096)
tok = SimpleTokenizer.from_gguf_kv(kv)
bos_id: int = kv['tokenizer.ggml.bos_token_id']
eos_id: int = kv['tokenizer.ggml.eos_token_id']
num_correct, num_answered = 0, 0
total_questions = len(table["question"])
for question, choices, answer in zip(table["question"], table["choices"], table["answerKey"]):
phrasing = f"Question: {question}\n\n" + \
'\n'.join([f"{k}) {v}" for k,v in zip(choices['label'], choices['text'])]) +\
"\n\nReply with the letter of the correct answer only."
try:
ids = [bos_id] + tok.role("user") + tok.encode(phrasing) + [eos_id] + tok.role("assistant") + tok.encode("Answer: ")
except RuntimeError:
# TODO: fix the tokenizer
pass
next_id = next(model.generate(ids))
correct, given = answer.as_py().strip(), tok.decode([next_id]).strip()
num_correct += correct == given
num_answered += 1
print(f"{num_answered:4d}/{total_questions:4d} "+\
f"Correct Answer: {correct} "+\
f"Given Answer: {colored(given, 'green' if correct==given else 'red')} "+\
f"Percent: {num_correct*100.0/num_answered:.2f}%")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_llm_eval.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/apps/llm.py | from __future__ import annotations
import sys, argparse, typing, re, unicodedata, json, uuid, time, functools
from tinygrad import Tensor, nn, UOp, TinyJit, getenv, function
from tinygrad.helpers import partition, DEBUG, Timing, GlobalCounters, stderr_log, colored
from tinygrad.viz.serve import TCPServerWithReuse, HTTPRequestHandler
class SimpleTokenizer:
def __init__(self, normal_tokens:dict[str, int], special_tokens:dict[str, int], preset:str="llama3"):
if preset not in ("llama3","llama-v3","llama-bpe","qwen2","olmo"): raise ValueError(f"Invalid tokenizer preset '{preset}'")
# https://github.com/openai/gpt-2/blob/9b63575ef42771a015060c964af2c3da4cf7c8ab/src/encoder.py#L9
bs = [*range(33, 127), *range(161, 173), *range(174, 256)] # bytes that map to themselves
self._byte_decoder = {chr(b): b for b in bs} | {chr(256+i): b for i,b in enumerate(b for b in range(256) if b not in bs)}
# https://github.com/ggml-org/llama.cpp/blob/94933c8c2eeaa9a7983e3f6c08af76bd86724094/src/llama-vocab.cpp#L286
# 0x323b0 is one past the max codepoint in unicode categories L/N/Z (0x323af is max L)
def ucat_range(pre: str): return "".join(re.escape(chr(cp)) for cp in range(0x323b0) if unicodedata.category(chr(cp)).startswith(pre))
r_ws, r_p_N, r_p_L = r"\t\n\x0b\x0c\r\x85" + ucat_range("Z"), ucat_range("N"), ucat_range("L")
self._split_to_word = re.compile("(?i:'s|'t|'re|'ve|'m|'ll|'d)|" + \
f"[^\\r\\n{r_p_N}{r_p_L}]?[{r_p_L}]+|[{r_p_N}]{{1,3}}| ?[^{r_ws}{r_p_N}{r_p_L}]+[\\r\\n]*|[{r_ws}]*[\\r\\n]+|[{r_ws}]+(?![^{r_ws}])|[{r_ws}]+")
self._split_to_sentence = re.compile("|".join(re.escape(tok) for tok in special_tokens.keys()) if special_tokens else r"(?!)")
self._normal_tokens = {bytes(self._byte_decoder[c] for c in tok): tid for tok, tid in normal_tokens.items()}
self._special_tokens = special_tokens
self._tok2bytes = {tid: tok for tok, tid in self._normal_tokens.items()} | {tid: tok.encode() for tok, tid in self._special_tokens.items()}
self.preset = preset
@staticmethod
def from_gguf_kv(kv:dict):
# https://github.com/ggml-org/llama.cpp/blob/94933c8c2eeaa9a7983e3f6c08af76bd86724094/src/llama-vocab.cpp#L1818-L1820
vocab: typing.Iterable[tuple[str, int]] = ((tok, idx) for idx, tok in enumerate(kv["tokenizer.ggml.tokens"]))
normal_tokens, special_tokens = partition(vocab, lambda e: kv["tokenizer.ggml.token_type"][e[1]] == 1)
return SimpleTokenizer(dict(normal_tokens), dict(special_tokens), kv["tokenizer.ggml.pre"])
def _encode_word(self, word:bytes) -> list[int]:
if (early_token:=self._normal_tokens.get(word)) is not None: return [early_token]
parts = [bytes([b]) for b in word]
# greedily merge any parts that we can
while True:
i = min([(sys.maxsize, -1)] + [(self._normal_tokens.get(parts[j]+parts[j+1], sys.maxsize), j) for j in range(len(parts)-1)])[1]
if i == -1: break
parts[i:i+2] = [parts[i] + parts[i+1]]
try: return [self._normal_tokens[p] for p in parts]
except KeyError: raise RuntimeError("token not found")
def _encode_sentence(self, chunk:str) -> list[int]:
return [tok for word in self._split_to_word.findall(chunk) for tok in self._encode_word(word.encode())]
def encode(self, text:str) -> list[int]:
tokens: list[int] = []
pos = 0
for match in self._split_to_sentence.finditer(text):
tokens.extend(self._encode_sentence(text[pos:match.start(0)]) + [self._special_tokens[text[match.start(0):match.end(0)]]])
pos = match.end(0)
return tokens + self._encode_sentence(text[pos:])
def decode(self, ids:list[int]) -> str: return b''.join(self._tok2bytes[tid] for tid in ids).decode(errors='replace')
def role(self, role:str):
if self.preset == 'olmo': return self.encode("<|" + role + "|>\n") # OLMoE Instruct format
if self.preset == 'qwen2': return self.encode("<|im_start|>" + role + "\n")
return self.encode("<|start_header_id|>" + role + "<|end_header_id|>\n\n")
def end_turn(self, eos_id:int):
if self.preset == 'olmo': return self.encode("\n")
if self.preset == 'qwen2': return [eos_id] + self.encode("\n")
return [eos_id]
@functools.cache
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0) -> Tensor:
freqs = 1.0 / (theta ** (Tensor.arange(0, dim, 2)[:(dim // 2)] / dim))
freqs = Tensor.arange(end).unsqueeze(dim=1) * freqs.unsqueeze(dim=0)
return freqs.cos().cat(freqs.sin(), dim=-1).contiguous()
class ExpertWeights:
"""Like nn.Linear but with num_experts dimension. Weight shape: (num_experts, out_features, in_features)."""
def __init__(self, num_experts:int, in_features:int, out_features:int):
self.weight = Tensor.zeros(num_experts, out_features, in_features)
def __call__(self, sel:Tensor, x:Tensor) -> Tensor:
# sel: (B, T, k), x: (B, T, 1, in) or (B, T, k, in) -> output: (B, T, k, out)
return (x.unsqueeze(-2) @ self.weight[sel].transpose(-1, -2)).squeeze(-2)
def apply_rope(x:Tensor, freqs_cis:Tensor) -> Tensor:
assert x.shape[-1] % 2 == 0
cos, sin = freqs_cis.reshape(1, 1, x.shape[2], -1).chunk(2, dim=-1)
x1, x2 = x.chunk(2, dim=-1)
return (x1 * cos - x2 * sin).cat(x2 * cos + x1 * sin, dim=-1)
class TransformerBlock:
def __init__(self, dim:int, hidden_dim:int, n_heads:int, n_kv_heads:int, norm_eps:float, head_dim:int, rope_theta:float,
max_context:int=0, qk_norm:int=0, num_experts:int=0, num_experts_per_tok:int=0):
self.n_heads = n_heads
self.n_kv_heads = n_kv_heads
self.head_dim = head_dim
self.rope_theta = rope_theta
self.max_context = max_context
self.qk_norm = qk_norm
# --- attention projections (all linear, bias-free) ------------------
q_proj_out = self.head_dim * n_heads
kv_proj_out = self.head_dim * n_kv_heads
self.attn_q = nn.Linear(dim, q_proj_out, bias=False)
self.attn_k = nn.Linear(dim, kv_proj_out, bias=False)
self.attn_v = nn.Linear(dim, kv_proj_out, bias=False)
self.attn_output = nn.Linear(q_proj_out, dim, bias=False)
# --- RMSNorms --------------------------------------------------------
self.attn_norm = nn.RMSNorm(dim, norm_eps)
self.ffn_norm = nn.RMSNorm(dim, norm_eps)
if qk_norm: self.attn_q_norm, self.attn_k_norm = nn.RMSNorm(qk_norm, norm_eps), nn.RMSNorm(qk_norm, norm_eps)
# --- feed-forward (MoE or dense) -------------------------------------
if num_experts > 0:
self.num_experts_per_tok = num_experts_per_tok
self.ffn_gate_inp = nn.Linear(dim, num_experts, bias=False) # router
self.ffn_gate_exps = ExpertWeights(num_experts, dim, hidden_dim)
self.ffn_up_exps = ExpertWeights(num_experts, dim, hidden_dim)
self.ffn_down_exps = ExpertWeights(num_experts, hidden_dim, dim)
else:
self.ffn_gate = nn.Linear(dim, hidden_dim, bias=False)
self.ffn_up = nn.Linear(dim, hidden_dim, bias=False)
self.ffn_down = nn.Linear(hidden_dim, dim, bias=False)
@function
def _attention(self, x:Tensor, start_pos:int|UOp) -> Tensor:
x_norm = self.attn_norm(x) # (B,T,D)
q, k, v = self.attn_q(x_norm), self.attn_k(x_norm), self.attn_v(x_norm)
if self.qk_norm and self.qk_norm != self.head_dim: q, k = self.attn_q_norm(q), self.attn_k_norm(k)
B, T, _ = x.shape
q = q.reshape(B, T, self.n_heads, self.head_dim).transpose(1, 2) # (B,H,T,Hd)
k = k.reshape(B, T, self.n_kv_heads, self.head_dim).transpose(1, 2) # (B,KvH,T,Hd)
v = v.reshape(B, T, self.n_kv_heads, self.head_dim).transpose(1, 2) # (B,KvH,T,Hd)
if self.qk_norm == self.head_dim: q, k = self.attn_q_norm(q), self.attn_k_norm(k)
freqs_cis = precompute_freqs_cis(self.head_dim, self.max_context, self.rope_theta)[start_pos:start_pos+T]
q = apply_rope(q, freqs_cis)
k = apply_rope(k, freqs_cis)
# TODO: fix assign to behave like this
assigned_kv = self.cache_kv.uop.after(self.cache_kv[:, :, :, start_pos:start_pos+T, :].uop.assign(Tensor.stack(k, v).contiguous().uop))
tensor_assigned_kv = Tensor(assigned_kv, device=assigned_kv.device)
k = tensor_assigned_kv[0, :, :, 0:start_pos+T, :]
v = tensor_assigned_kv[1, :, :, 0:start_pos+T, :]
#self.cache_kv[:, :, :, start_pos:start_pos+T, :].assign(Tensor.stack(k, v))
#k = self.cache_kv[0, :, :, 0:start_pos+T, :]
#v = self.cache_kv[1, :, :, 0:start_pos+T, :]
# NOTE: this mask is causal_lower_right, not the causal_upper_left generated by is_casual = True
# TODO: this if statement should be removed and it shouldn't generate extra kernels
mask = Tensor.full((1, 1, T, start_pos+T), float("-inf"), dtype=x.dtype, device=x.device).triu(start_pos+1) if T > 1 else None
attn = q.scaled_dot_product_attention(k, v, attn_mask=mask, enable_gqa=True) # (B,H,T,Hd)
attn = attn.transpose(1, 2).reshape(B, T, -1) # back to (B,T,D)
attn = self.attn_output(attn)
return x + attn
@function(precompile=bool(getenv("PRECOMPILE", 0)))
def _feed_forward(self, h: Tensor) -> Tensor:
h_norm = self.ffn_norm(h)
if hasattr(self, 'ffn_gate_exps'):
x = h_norm.unsqueeze(2) # (B, T, 1, D) - add expert dim for broadcasting
probs, sel = self.ffn_gate_inp(h_norm).softmax(-1).topk(self.num_experts_per_tok) # (B, T, k) each
x_down = self.ffn_down_exps(sel, self.ffn_gate_exps(sel, x).silu() * self.ffn_up_exps(sel, x)) # (B, T, k, D)
return h + (x_down * probs.unsqueeze(-1)).sum(axis=2) # (B, T, D)
# TODO: remove the need for this contiguous
gated = self.ffn_gate(h_norm).silu().contiguous() * self.ffn_up(h_norm)
return h + self.ffn_down(gated)
def __call__(self, x: Tensor, start_pos: int|UOp):
if not hasattr(self, "cache_kv"):
# TODO: how is the dtype of this determined?
self.cache_kv = Tensor.zeros(2, x.shape[0], self.n_kv_heads, self.max_context, self.head_dim, device=x.device).contiguous().realize()
return self._feed_forward(self._attention(x, start_pos)).contiguous()
class Transformer:
def __init__(self, *, num_blocks, dim, hidden_dim, n_heads, n_kv_heads, norm_eps, vocab_size, head_dim:int, rope_theta:float,
max_context:int=0, qk_norm:int=0, num_experts:int=0, num_experts_per_tok:int=0):
self.blk = [TransformerBlock(dim, hidden_dim, n_heads, n_kv_heads, norm_eps, head_dim, rope_theta, max_context, qk_norm,
num_experts, num_experts_per_tok) for _ in range(num_blocks)]
self.token_embd = nn.Embedding(vocab_size, dim)
self.output_norm = nn.RMSNorm(dim, norm_eps)
self.output = nn.Linear(dim, vocab_size, bias=False)
self.max_context = max_context
# JIT is used if T=1 and start_pos is a UOp. TODO: make this not needed by including T in the JIT and making start_pos always a UOp
self.forward_jit = TinyJit(self.forward)
def forward(self, tokens:Tensor, start_pos:int|UOp) -> Tensor:
x = self.token_embd(tokens) # (B, T, D)
for block in self.blk: x = block(x, start_pos)
# TODO: add temperature
return self.output(self.output_norm(x))[:, -1, :].softmax(-1, dtype="float").argmax(-1, keepdim=True)
def __call__(self, tokens:Tensor, start_pos:int|UOp=0) -> Tensor:
return (self.forward_jit if getenv("JIT", 1) and tokens.shape[1] == 1 and isinstance(start_pos, UOp) else self.forward)(tokens, start_pos)
@staticmethod
def from_gguf(gguf:Tensor, max_context:int|None=None, realize=bool(getenv("REALIZE", 1))) -> tuple[Transformer, dict]:
# TODO: remove the need for copy to default device
kv, state_dict = nn.state.gguf_load(gguf.to(None).realize())
# all state items should be float16, not float32
state_dict = {k:v.cast('float16') if getenv("HALF", 1) else v for k,v in state_dict.items()}
# some models like Llama 3.2 don't have an output.weight, they just tie to the token_embd.weight
if 'output.weight' not in state_dict: state_dict['output.weight'] = state_dict['token_embd.weight']
arch = kv['general.architecture']
max_context = min(max_context, kv[f'{arch}.context_length']) if max_context is not None else kv[f'{arch}.context_length']
n_heads, n_kv_heads = kv[f'{arch}.attention.head_count'], kv[f'{arch}.attention.head_count_kv']
# Permute Q/K weights from interleaved to half-split RoPE layout (llama-style models only)
if arch == 'llama':
for name in state_dict:
if 'attn_q.weight' in name: state_dict[name] = state_dict[name].rearrange("(n h two) d -> (n two h) d", n=n_heads, two=2)
if 'attn_k.weight' in name: state_dict[name] = state_dict[name].rearrange("(n h two) d -> (n two h) d", n=n_kv_heads, two=2)
model = Transformer(num_blocks=kv[f'{arch}.block_count'], dim=kv[f'{arch}.embedding_length'],
hidden_dim=kv.get(f'{arch}.expert_feed_forward_length', kv[f'{arch}.feed_forward_length']),
n_heads=n_heads, n_kv_heads=n_kv_heads, norm_eps=kv[f'{arch}.attention.layer_norm_rms_epsilon'],
vocab_size=len(kv['tokenizer.ggml.tokens']),
head_dim=kv.get(f'{arch}.attention.key_length', kv[f'{arch}.embedding_length'] // n_heads),
rope_theta=kv[f'{arch}.rope.freq_base'], max_context=max_context,
qk_norm=int(state_dict['blk.0.attn_q_norm.weight'].shape[0]) if 'blk.0.attn_q_norm.weight' in state_dict else 0,
num_experts=kv.get(f'{arch}.expert_count', 0), num_experts_per_tok=kv.get(f'{arch}.expert_used_count', 0))
nn.state.load_state_dict(model, state_dict, verbose=False, consume=True, realize=False) # NOTE: rope_freqs.weight (32,) is unused
# NOTE: without this contiguous, it unpacks the weights from the model every time. we shouldn't need this, but for now it's faster
if realize:
for s in (params:=nn.state.get_parameters(model)): s.replace(s.contiguous())
Tensor.realize(*params)
return model, kv
def generate(self, tokens:list[int], start_pos=0):
v_start_pos = UOp.variable("start_pos", 1, self.max_context-1)
t = Tensor([tokens[start_pos:]], dtype="int32")
while len(tokens) < self.max_context:
t = self(t, v_start_pos.bind(start_pos) if getenv("SYM", 1) and start_pos != 0 and t.shape[-1] == 1 else start_pos)
next_id = int(t.item())
tokens.append(next_id)
start_pos = len(tokens) - 1
yield next_id
models = {
"llama3.2:1b": "https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q6_K.gguf",
"llama3.2:1b-q4": "https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_K_M.gguf",
"llama3.2:3b": "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q6_K.gguf",
"llama3.2:3b-f16": "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-f16.gguf",
"llama3.1:8b": "https://huggingface.co/bartowski/Meta-Llama-3.1-8B-Instruct-GGUF/resolve/main/Meta-Llama-3.1-8B-Instruct-Q8_0.gguf",
"qwen3:0.6b": "https://huggingface.co/Qwen/Qwen3-0.6B-GGUF/resolve/main/Qwen3-0.6B-Q8_0.gguf",
"qwen3:1.7b": "https://huggingface.co/unsloth/Qwen3-1.7B-GGUF/resolve/main/Qwen3-1.7B-Q4_K_M.gguf",
"qwen3:8b": "https://huggingface.co/Qwen/Qwen3-8B-GGUF/resolve/main/Qwen3-8B-Q4_K_M.gguf",
"qwen3:30b-a3b": "https://huggingface.co/Qwen/Qwen3-30B-A3B-GGUF/resolve/main/Qwen3-30B-A3B-Q4_K_M.gguf",
"olmoe": "https://huggingface.co/allenai/OLMoE-1B-7B-0924-Instruct-GGUF/resolve/main/olmoe-1b-7b-0924-instruct-q4_k_m.gguf",
}
# *** simple OpenAI compatible server on 11434 to match ollama ***
# OPENAI_BASE_URL=http://localhost:11434/v1 OPENAI_API_KEY=ollama uvx --from gpt-command-line gpt
CHAT_HTML = b'''<!DOCTYPE html><html><head><title>tinygrad chat</title><style>
* { margin: 0 }
body { background: #212121; color: #e3e3e3; font-family: system-ui;
height: 100vh; display: flex; flex-direction: column }
#chat { flex: 1; overflow-y: auto; padding: 20px }
.msg { padding: 10px 16px; margin: 8px 0; white-space: pre-wrap; border-radius: 18px }
.user { background: #2f2f2f; margin-left: auto; width: fit-content; max-width: 70% }
#input { max-width: 768px; width: 100%; margin: 20px auto; padding: 14px 20px;
background: #2f2f2f; color: inherit; font: inherit;
border: none; outline: none; resize: none; border-radius: 24px; field-sizing: content }
</style></head><body><div id="chat"></div>
<textarea id="input" rows="1" placeholder="Ask anything"></textarea>
<script>
input.onkeydown = (e) => { if (e.key === 'Enter' && !e.shiftKey && !e.isComposing) { e.preventDefault(); send() } }
const msgs = [];
async function send() {
if (!input.value.trim()) return;
msgs.push({role: 'user', content: input.value.trim()});
chat.innerHTML += '<div class="msg user">' + input.value.trim().replace(/</g, '<') + '</div>';
input.value = '';
const d = document.createElement('div'); d.className = 'msg'; chat.appendChild(d);
const r = await fetch('/v1/chat/completions', {method: 'POST', headers: {'Content-Type': 'application/json'},
body: JSON.stringify({model: 'llama', messages: msgs, stream: true})});
for (const rd = r.body.getReader(), dec = new TextDecoder();;) {
const {done, value} = await rd.read();
if (done) break;
for (const ln of dec.decode(value).split('\\n'))
if (ln.startsWith('data: ') && !ln.includes('[DONE]'))
try { d.textContent += JSON.parse(ln.slice(6)).choices[0]?.delta?.content || '' } catch {}
chat.scrollTop = chat.scrollHeight;
}
msgs.push({role: 'assistant', content: d.textContent});
}
</script></body></html>'''
class Handler(HTTPRequestHandler):
def log_request(self, code='-', size='-'): pass
def do_GET(self): self.send_data(CHAT_HTML, content_type="text/html")
def run_model(self, ids:list[int], model_name:str, include_usage=False):
stderr_log(f"{self.path} {colored('--', 'BLACK')} in:{len(ids):5d} {colored('--', 'BLACK')} ")
tmpl = {"id":f"chatcmpl-{uuid.uuid4().hex[:24]}", "object":"chat.completion.chunk", "created":int(time.time()), "model":model_name}
yield {"choices": [{"index":0, "delta":{"role":"assistant","content":""}, "finish_reason":None}], **tmpl}
out: list[int] = []
st = time.perf_counter()
for next_id in model.generate(ids):
if len(out) == 0: stderr_log(f"prefill:{len(ids)/((pt:=time.perf_counter())-st):4.0f} tok/s {colored('--', 'BLACK')} ")
if next_id == eos_id: break
out.append(next_id)
yield {"choices": [{"index":0, "delta":{"content":tok.decode([next_id])}, "finish_reason":None}], **tmpl}
yield {"choices": [{"index":0, "delta":{},"finish_reason":"stop"}], **tmpl}
if include_usage:
yield {"choices": [], "usage": {"prompt_tokens": len(ids), "completion_tokens": len(out), "total_tokens": len(ids) + len(out)}, **tmpl}
stderr_log(f"out:{len(out):5d} {colored('--', 'BLACK')} gen: {len(out)/(time.perf_counter()-pt):4.0f} tok/s\n")
def do_POST(self):
raw_body = self.rfile.read(int(self.headers.get("Content-Length", "0")))
body: dict[str, typing.Any] = json.loads(raw_body.decode("utf-8"))
if DEBUG >= 1: print(json.dumps(body, indent=2))
if self.path == "/v1/chat/completions":
# extract tokens
ids: list[int] = [bos_id] if bos_id is not None else []
for msg in body["messages"]:
ids += tok.role(msg["role"])
# content can be a str or a list
content = msg["content"]
if isinstance(content, str): ids += tok.encode(content)
elif isinstance(content, list):
for c in content:
if c["type"] == "text": ids += tok.encode(c["text"])
else: raise RuntimeError(f"unhandled type: {c['type']}")
else: raise RuntimeError(f"unknown content type: {type(content)}")
ids += tok.end_turn(eos_id)
ids += tok.role("assistant")
# reply
chunks = self.run_model(ids, body["model"], not body.get("stream") or body.get("stream_options",{}).get("include_usage", False))
if body.get("stream"): self.stream_json(chunks)
else:
out = []
for c in chunks: out.append(c["choices"][0]["delta"].get("content", "") if c["choices"] else "")
self.send_data(json.dumps({**c, "object":"chat.completion",
"choices":[{"index":0, "message":{"role":"assistant","content":"".join(out)}, "finish_reason":"stop"}]}).encode())
else:
raise RuntimeError(f"unhandled path {self.path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", choices=list(models.keys()), default=list(models.keys())[0], help="Model choice")
parser.add_argument("--max_context", type=int, default=4096, help="Max Context Length")
parser.add_argument("--serve", nargs='?', type=int, const=11434, metavar="PORT", help="Run OpenAI compatible API (optional port, default 11434)")
parser.add_argument("--benchmark", nargs='?', type=int, const=20, metavar="COUNT", help="Benchmark tok/s (optional count, default 20)")
args = parser.parse_args()
# load the model
raw_model = Tensor.from_url(models[args.model])
model, kv = Transformer.from_gguf(raw_model, args.max_context)
if DEBUG >= 1 or args.benchmark:
print(f"using model {args.model} with {raw_model.nbytes():,} bytes and {sum(x.numel() for x in nn.state.get_parameters(model)):,} params")
del raw_model
# TODO: why this is required to free the RAM of the GGUF copy?
import gc
gc.collect()
# extract some metadata
tok = SimpleTokenizer.from_gguf_kv(kv)
bos_id: int|None = kv.get('tokenizer.ggml.bos_token_id') if kv.get('tokenizer.ggml.add_bos_token', True) else None
eos_id: int = kv['tokenizer.ggml.eos_token_id']
# do benchmark
if args.benchmark:
gen = model.generate(toks:=[bos_id or 0], 0)
for _ in range(args.benchmark):
GlobalCounters.reset()
with Timing(on_exit=lambda x: f", {1e9/x:6.2f} tok/s, {GlobalCounters.global_mem/x:7.2f} GB/s,"
f" {GlobalCounters.global_mem//1000000}/{GlobalCounters.mem_used//1000000} MB -- "+\
tok.decode(toks).replace("\n", "\\n")): next(gen)
exit(0)
# start server
if args.serve: TCPServerWithReuse(('', args.serve), Handler).serve_forever()
# interactive chat
ids: list[int] = [bos_id] if bos_id is not None else []
while 1:
start_pos = max(len(ids) - 1, 0)
try:
ids += tok.role("user") + tok.encode(input('>>> ')) + tok.end_turn(eos_id) + tok.role("assistant")
except EOFError:
break
for next_id in model.generate(ids, start_pos):
sys.stdout.write(tok.decode([next_id]) if next_id != eos_id else "\n\n")
sys.stdout.flush()
if next_id == eos_id: break
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/apps/llm.py",
"license": "MIT License",
"lines": 343,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/codegen/gpudims.py | import math
from tinygrad.uop.ops import UOp, Ops, sint, PatternMatcher, UPat, KernelInfo, ssimplify, AxisType, sint_to_uop
from tinygrad.helpers import all_int, dedup, get_contraction
from tinygrad.dtype import dtypes, AddrSpace, Invalid
from tinygrad.renderer import Renderer
def _group_dims(dims:tuple[sint, ...], max_sizes:tuple[int, ...]):
# TODO: symbolic shape
if not all_int(dims): return dims
while len(dims) > len(max_sizes) or any(d > m for d,m in zip(dims, max_sizes)):
for i,m in enumerate(max_sizes):
if i < (len(dims)-1) and dims[i] * dims[i+1] <= m:
dims = dims[:i] + (dims[i]*dims[i+1],) + dims[i+2:]
break
else: return None
return dims
def _split_dims(dims, max_sizes):
if all(d <= m for d,m in zip(dims, max_sizes)): return dims
_dims = list(dims) + [1]*(3-len(dims))
for i in range(len(_dims)):
while _dims[i] > max_sizes[i]:
div = next((d for d in range(2, math.ceil(math.sqrt(_dims[i])) + 1) if (_dims[i] % d) == 0), 1)
if div == 1: raise RuntimeError(f"cannot limit dim {dims=}, {max_sizes=}")
_dims[i], _dims[(i+1)%len(_dims)] = _dims[i]//div, _dims[(i+1)%len(_dims)]*div
return tuple(_dims[:2] if _dims[2] == 1 else _dims[0] if _dims[1:3] == [1,1] else _dims)
def get_grouped_dims(prefix, dims:tuple[sint, ...], max_sizes:tuple[int, ...]|None, reverse=False) -> list[UOp]:
if reverse: return get_grouped_dims(prefix, dims[::-1], max_sizes)[::-1]
if max_sizes is None: limited = dims
else:
# try to group first: (a, b, c, d) -> (ab, c, d)
limited = grouped if (grouped := _group_dims(dims, max_sizes)) else dims
# check if grouping failed
if len(limited) > len(max_sizes): raise RuntimeError(f"cannot limit dim {dims=}, {max_sizes=}")
# try to split up dims: (a,) -> (b, c)
if limited == dims: limited = _split_dims(dims, max_sizes)
raw_idxs = [UOp(Ops.SPECIAL, dtypes.index, (sint_to_uop(s),), (f"{prefix}{i}")) for i,s in enumerate(limited)]
if len(limited) < len(dims):
ret = []
if (contraction:=get_contraction(dims, limited)) is None: raise RuntimeError(f"get_contraction should not be None {dims=} {limited=}")
for idx, contraction_group in zip(raw_idxs, contraction):
for c in contraction_group[:-1]:
ret.append(idx % dims[c])
idx //= dims[c]
ret.append(idx)
return ret
elif (a:=len(limited)) > (b:=len(dims)):
if a == 2 and b == 1: return [raw_idxs[0] * limited[1] + raw_idxs[1]]
if a == 3 and b == 1: return [(raw_idxs[0] * limited[1] + raw_idxs[1]) * limited[2] + raw_idxs[2]]
if limited != dims:
# Convert to 1D
flat = raw_idxs[0]*limited[1]+raw_idxs[1] if len(limited) == 2 else raw_idxs[0]*(limited[1]*limited[2])+raw_idxs[1]*limited[2]+raw_idxs[2]
# Get back original indices from 1D
return [flat//dims[1], flat%dims[1]] if len(dims) == 2 else [flat//(dims[2]*dims[1]), (flat//dims[2])%dims[1], flat%dims[2]]
return raw_idxs
def add_gpudims(ctx:Renderer, s:UOp):
if s.arg is None: return None
s_topo = list(s.toposort())
if any(x.op is Ops.SPECIAL for x in s_topo): return None
# get ranges
all_ranges = {x.arg[0:-1]:x for x in s_topo if x.op is Ops.RANGE}
# extract global/local dims
global_dims = sorted(dedup([x.arg[0:-1] for x in all_ranges.values() if x.arg[-1] in (AxisType.GLOBAL, AxisType.THREAD)]))
local_dims = sorted(dedup([x.arg[0:-1] for x in all_ranges.values() if x.arg[-1] in (AxisType.WARP, AxisType.LOCAL, AxisType.GROUP_REDUCE)]))
if not global_dims and not local_dims: return None
# get global and local shape
ranges = [all_ranges[r] for r in global_dims+local_dims if r in all_ranges]
global_shape = tuple([ssimplify(r.src[0]) for r in ranges if r.arg[0:-1] in global_dims])
local_shape = tuple([ssimplify(r.src[0]) for r in ranges if r.arg[0:-1] in local_dims])
# get the idxs
ki: KernelInfo = s.arg
if ctx.has_threads: idxs = [UOp.variable("core_id", 0, int(global_shape[0])-1, dtypes.int).cast(dtypes.index)]
elif ki.dont_use_locals:
assert not local_dims, "can't use locals if there's no local dims"
idxs = get_grouped_dims("idx", global_shape, ctx.global_max, reverse=True)
else:
# define indexes for GPU-like execution
idxs = get_grouped_dims("gidx", global_shape, ctx.global_max, reverse=True) + get_grouped_dims("lidx", local_shape, ctx.local_max)
# apply to multiple ranges
subs = {}
for r in s_topo:
# look for local INDEXes that are not used in the GLOBAL store, then add them as an INVALID
if r.op is Ops.STORE and (idx := r.src[0]).src[0].ptrdtype.addrspace == AddrSpace.GLOBAL:
missing_locals = [all_ranges[rng] for rng in local_dims if all_ranges[rng] not in idx.ranges]
if len(missing_locals):
assert len(idx.src) == 2, "index has 2 sources"
mask: UOp = UOp.prod(*[x.eq(0) for x in missing_locals])
subs[idx] = idx.replace(src=(idx.src[0], mask.broadcast(idx.src[1].dtype.count).where(idx.src[1], Invalid)))
if r.op is not Ops.RANGE: continue
try:
ii = (global_dims+local_dims).index(r.arg[0:-1])
if r.arg[1] == AxisType.REDUCE: continue
subs[r] = idxs[ii]
except ValueError: continue
return s.substitute(subs)
pm_add_gpudims = PatternMatcher([
# add gpudims must be last
(UPat(Ops.SINK, name="s"), add_gpudims),
])
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/codegen/gpudims.py",
"license": "MIT License",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/gemm/halide_gemm.py | import numpy as np
import halide as hl
from tinygrad.helpers import Timing, getenv
# HL_DEBUG_CODEGEN=1
N = getenv("N", 1024)
def gemm_pipeline(gpu=False):
# ---------------- Vars & Parameters ----------------
i, j = hl.Var("i"), hl.Var("j") # output tile coordinates
A = hl.InputBuffer(hl.Float(32), 2) # [M, K]
B = hl.InputBuffer(hl.Float(32), 2) # [K, N]
A.dim(0).set_bounds(0, N)
A.dim(1).set_bounds(0, N)
B.dim(0).set_bounds(0, N)
B.dim(1).set_bounds(0, N)
# ---------------- Definition ----------------
k = hl.RDom([(0, N)])
partial = hl.Func("partial")
partial[i, j] = 0.0
partial[i, j] += A[i, k] * B[k, j]
C = hl.Func("C")
C[i, j] = partial[i, j]
if not gpu:
# ---------------- Schedule ----------------
VEC = 16
TILE_I = 64
TILE_J = 64
io, jo, ii, ji = hl.Var("io"), hl.Var("jo"), hl.Var("ii"), hl.Var("ji")
C.update().tile(i, j, io, jo, ii, ji, TILE_I, TILE_J).fuse(io, jo, io).parallel(io).vectorize(ji, VEC)
else:
# ---------------- Schedule ----------------
GRP_I = 8 # output tile size
GRP_J = 16
#partial.store_in(hl.MemoryType.Register)
#partial.update().unroll(k, 4)
io, jo, ii, ji = hl.Var(), hl.Var(), hl.Var(), hl.Var()
C.gpu_tile(i, j, io, jo, ii, ji, GRP_I, GRP_J, hl.TailStrategy.RoundUp)
return C, A, B
if __name__ == "__main__":
pipe, A, B = gemm_pipeline(gpu=True)
# NOTE: meteal does nothing
target = hl.get_host_target().with_feature(hl.TargetFeature.Metal)
a_np = np.random.randn(N, N).astype(np.float32)
b_np = np.random.randn(N, N).astype(np.float32)
# reverse order is correct!
a_hal = hl.Buffer(b_np)
b_hal = hl.Buffer(a_np)
A.set(a_hal)
B.set(b_hal)
pipe.compile_to_lowered_stmt("/tmp/my_function.html", [A, B], hl.StmtOutputFormat.HTML, target=target)
#exit(0)
c_hal = hl.Buffer(hl.Float(32), [N,N])
with Timing("halide gemm "):
pipe.realize(c_hal, target)
c_hal.copy_to_host()
c_out = np.array(c_hal)
print(c_out)
# tinygrad gets 60 ms with no BEAM, 20 ms with BEAM on CPU
with Timing("halide gemm "):
pipe.realize(c_hal, target)
c_hal.copy_to_host()
# Check correctness
with Timing("numpy gemm "):
ref = a_np @ b_np
max_err = np.abs(ref - c_out).max()
print("Max absolute error:", max_err)
assert max_err < 1e-4, "GEMM result incorrect!"
print("Pipeline ran on", target)
print("Success - GEMM Halide-Python output matches NumPy.")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/gemm/halide_gemm.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/perfetto/to_perfetto.py | import sys, pickle, decimal, json
from tinygrad.device import ProfileDeviceEvent, ProfileGraphEvent
from tinygrad.helpers import tqdm, temp, ProfileEvent, ProfileRangeEvent, TracingKey
devices:dict[str, tuple[decimal.Decimal, int]] = {}
def prep_ts(device:str, ts:decimal.Decimal): return int(decimal.Decimal(ts) + devices[device][0])
def dev_to_pid(device:str): return {"pid": devices[device][1], "tid": 0}
def dev_ev_to_perfetto_json(ev:ProfileDeviceEvent):
devices[ev.device] = (ev.tdiff, len(devices))
return [{"name": "process_name", "ph": "M", "pid": dev_to_pid(ev.device)['pid'], "args": {"name": ev.device}},
{"name": "thread_name", "ph": "M", "pid": dev_to_pid(ev.device)['pid'], "tid": 0, "args": {"name": ev.device}}]
def range_ev_to_perfetto_json(ev:ProfileRangeEvent):
name = ev.name.display_name if isinstance(ev.name, TracingKey) else ev.name
return [{"name": name, "ph": "X", "ts": prep_ts(ev.device, ev.st), "dur": float(ev.en-ev.st), **dev_to_pid(ev.device)}]
def graph_ev_to_perfetto_json(ev:ProfileGraphEvent, reccnt):
ret = []
for i,e in enumerate(ev.ents):
st, en = ev.sigs[e.st_id], ev.sigs[e.en_id]
name = e.name.display_name if isinstance(e.name, TracingKey) else e.name
ret += [{"name": name, "ph": "X", "ts": prep_ts(e.device, st), "dur": float(en-st), **dev_to_pid(e.device)}]
for dep in ev.deps[i]:
d = ev.ents[dep]
ret += [{"ph": "s", **dev_to_pid(d.device), "id": reccnt+len(ret), "ts": prep_ts(d.device, ev.sigs[d.en_id]), "bp": "e"}]
ret += [{"ph": "f", **dev_to_pid(e.device), "id": reccnt+len(ret)-1, "ts": prep_ts(e.device, st), "bp": "e"}]
return ret
def to_perfetto(profile:list[ProfileEvent]):
# Start json with devices.
profile += [ProfileDeviceEvent("TINY")]
prof_json = [x for ev in profile if isinstance(ev, ProfileDeviceEvent) for x in dev_ev_to_perfetto_json(ev)]
for ev in tqdm(profile, desc="preparing profile"):
if isinstance(ev, ProfileRangeEvent): prof_json += range_ev_to_perfetto_json(ev)
elif isinstance(ev, ProfileGraphEvent): prof_json += graph_ev_to_perfetto_json(ev, reccnt=len(prof_json))
return {"traceEvents": prof_json}
if __name__ == "__main__":
fp = sys.argv[1]
with open(fp, "rb") as f: profile = pickle.load(f)
ret = to_perfetto(profile)
with open(fp:=temp("perfetto.json", append_user=True), "w") as f: json.dump(ret, f)
print(f"Saved perfetto output to {fp}. You can use upload this to the perfetto UI or Chrome devtools.")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/perfetto/to_perfetto.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/support/nv/ip.py | from __future__ import annotations
import ctypes, time, array, struct, itertools, dataclasses
from typing import cast, Any
from tinygrad.runtime.autogen import nv, nv_570 as nv_gpu, pci
from tinygrad.helpers import to_mv, lo32, hi32, DEBUG, round_up, round_down, mv_address, fetch, wait_cond, ceildiv
from tinygrad.runtime.support.system import System
from tinygrad.runtime.support.elf import elf_loader
@dataclasses.dataclass(frozen=True)
class GRBufDesc: size:int; virt:bool; phys:bool; local:bool=False # noqa: E702
class NV_IP:
def __init__(self, nvdev): self.nvdev = nvdev
def init_sw(self): pass # Prepare sw/allocations for this IP
def init_hw(self): pass # Initialize hw for this IP
def fini_hw(self): pass # Finalize hw for this IP
class NVRpcQueue:
def __init__(self, gsp:NV_GSP, va:int, completion_q_va:int|None=None):
self.tx = nv.msgqTxHeader.from_address(va)
wait_cond(lambda: self.tx.entryOff, value=0x1000, msg="RPC queue not initialized")
if completion_q_va is not None: self.rx = nv.msgqRxHeader.from_address(completion_q_va + nv.msgqTxHeader.from_address(completion_q_va).rxHdrOff)
self.gsp, self.va, self.queue_va, self.seq = gsp, va, va + self.tx.entryOff, 0
self.queue_mv = to_mv(self.queue_va, self.tx.msgSize * self.tx.msgCount)
def _checksum(self, data:bytes):
if (pad_len:=(-len(data)) % 8): data += b'\x00' * pad_len
checksum = 0
for offset in range(0, len(data), 8): checksum ^= struct.unpack_from('Q', data, offset)[0]
return hi32(checksum) ^ lo32(checksum)
def _send_rpc_record(self, func:int, msg:bytes):
header = nv.rpc_message_header_v(signature=nv.NV_VGPU_MSG_SIGNATURE_VALID, rpc_result=nv.NV_VGPU_MSG_RESULT_RPC_PENDING,
rpc_result_private=nv.NV_VGPU_MSG_RESULT_RPC_PENDING, header_version=(3<<24), function=func, length=len(msg) + 0x20)
msg = bytes(header) + msg
phdr = nv.GSP_MSG_QUEUE_ELEMENT(elemCount=ceildiv(len(msg) + ctypes.sizeof(nv.GSP_MSG_QUEUE_ELEMENT), self.tx.msgSize), seqNum=self.seq)
phdr.checkSum = self._checksum(bytes(phdr) + msg)
msg = (bytes(phdr) + msg).ljust(phdr.elemCount * self.tx.msgSize, b'\x00')
off, first = self.tx.writePtr * self.tx.msgSize, min(len(msg), len(self.queue_mv) - self.tx.writePtr * self.tx.msgSize)
self.queue_mv[off:off+first] = msg[:first]
if first < len(msg): self.queue_mv[:len(msg)-first] = msg[first:]
self.tx.writePtr = (self.tx.writePtr + phdr.elemCount) % self.tx.msgCount
System.memory_barrier()
self.seq += 1
self.gsp.nvdev.NV_PGSP_QUEUE_HEAD[0].write(0x0)
def send_rpc(self, func:int, msg:bytes):
max_payload = self.tx.msgSize * 16 - ctypes.sizeof(nv.GSP_MSG_QUEUE_ELEMENT) - ctypes.sizeof(nv.rpc_message_header_v)
self._send_rpc_record(func, msg[:max_payload])
for off in range(max_payload, len(msg), max_payload): self._send_rpc_record(nv.NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, msg[off:off+max_payload])
def read_resp(self):
System.memory_barrier()
while self.rx.readPtr != self.tx.writePtr:
off = self.rx.readPtr * self.tx.msgSize
hdr = nv.rpc_message_header_v.from_address(self.queue_va + off + 0x30)
msg = self.queue_mv[off + 0x50 : off + 0x50 + hdr.length]
# Handling special functions
if hdr.function == nv.NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER: self.gsp.run_cpu_seq(msg)
elif hdr.function == nv.NV_VGPU_MSG_EVENT_OS_ERROR_LOG:
print(f"nv {self.gsp.nvdev.devfmt}: GSP LOG: {msg[12:].tobytes().rstrip(bytes([0])).decode('utf-8')}")
self.gsp.nvdev.is_err_state |= hdr.function in {nv.NV_VGPU_MSG_EVENT_OS_ERROR_LOG, nv.NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED}
# Update the read pointer
self.rx.readPtr = (self.rx.readPtr + round_up(hdr.length, self.tx.msgSize) // self.tx.msgSize) % self.tx.msgCount
System.memory_barrier()
if DEBUG >= 3:
nm = nv.rpc_fns.get(hdr.function, nv.rpc_events.get(hdr.function, f'ev:{hdr.function:x}'))
print(f"nv {self.gsp.nvdev.devfmt}: in RPC: {nm}, res:{hdr.rpc_result:#x}")
if hdr.rpc_result != 0: raise RuntimeError(f"RPC call {hdr.function} failed with result {hdr.rpc_result}")
yield hdr.function, msg
def wait_resp(self, cmd:int, timeout:int=10000) -> memoryview:
start_time = int(time.perf_counter() * 1000)
while (int(time.perf_counter() * 1000) - start_time) < timeout:
if (msg:=next((message for func, message in self.read_resp() if func == cmd), None)) is not None: return msg
raise RuntimeError(f"Timeout waiting for RPC response for command {cmd}")
class NV_FLCN(NV_IP):
def wait_for_reset(self):
wait_cond(lambda _: self.nvdev.NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK.read_bitfields()['read_protection_level0'] == 1 and
self.nvdev.NV_PGC6_AON_SECURE_SCRATCH_GROUP_05[0].read() & 0xff == 0xff, "waiting for reset")
def init_sw(self):
self.nvdev.include("src/common/inc/swref/published/ampere/ga102/dev_gsp.h")
self.nvdev.include("src/common/inc/swref/published/ampere/ga102/dev_falcon_v4.h")
self.nvdev.include("src/common/inc/swref/published/ampere/ga102/dev_falcon_v4_addendum.h")
self.nvdev.include("src/common/inc/swref/published/ampere/ga102/dev_riscv_pri.h")
self.nvdev.include("src/common/inc/swref/published/ampere/ga102/dev_fbif_v4.h")
self.nvdev.include("src/common/inc/swref/published/ampere/ga102/dev_falcon_second_pri.h")
self.nvdev.include("src/common/inc/swref/published/ampere/ga102/dev_sec_pri.h")
self.nvdev.include("src/common/inc/swref/published/turing/tu102/dev_bus.h")
self.prep_ucode()
self.prep_booter()
def prep_ucode(self):
vbios_bytes, vbios_off = memoryview(bytes(array.array('I', self.nvdev.mmio[0x00300000//4:(0x00300000+0x100000)//4]))), 0
while True:
pci_blck = vbios_bytes[vbios_off + nv.OFFSETOF_PCI_EXP_ROM_PCI_DATA_STRUCT_PTR:].cast('H')[0]
imglen = vbios_bytes[vbios_off + pci_blck + nv.OFFSETOF_PCI_DATA_STRUCT_IMAGE_LEN:].cast('H')[0] * nv.PCI_ROM_IMAGE_BLOCK_SIZE
match vbios_bytes[vbios_off + pci_blck + nv.OFFSETOF_PCI_DATA_STRUCT_CODE_TYPE]:
case nv.NV_BCRT_HASH_INFO_BASE_CODE_TYPE_VBIOS_BASE: block_size = imglen
case nv.NV_BCRT_HASH_INFO_BASE_CODE_TYPE_VBIOS_EXT:
expansion_rom_off = vbios_off - block_size
break
vbios_off += imglen
bit_header = nv.BIT_HEADER_V1_00.from_buffer_copy(vbios_bytes[(bit_addr:=0x1b0):bit_addr + ctypes.sizeof(nv.BIT_HEADER_V1_00)])
assert bit_header.Signature == 0x00544942, f"Invalid BIT header signature {hex(bit_header.Signature)}"
for i in range(bit_header.TokenEntries):
bit = nv.BIT_TOKEN_V1_00.from_buffer_copy(vbios_bytes[bit_addr + bit_header.HeaderSize + i * bit_header.TokenSize:])
if bit.TokenId != nv.BIT_TOKEN_FALCON_DATA or bit.DataVersion != 2 or bit.DataSize < nv.BIT_DATA_FALCON_DATA_V2_SIZE_4: continue
falcon_data = nv.BIT_DATA_FALCON_DATA_V2.from_buffer_copy(vbios_bytes[bit.DataPtr & 0xffff:])
ucode_hdr = nv.FALCON_UCODE_TABLE_HDR_V1.from_buffer_copy(vbios_bytes[(table_ptr:=expansion_rom_off + falcon_data.FalconUcodeTablePtr):])
for j in range(ucode_hdr.EntryCount):
ucode_entry = nv.FALCON_UCODE_TABLE_ENTRY_V1.from_buffer_copy(vbios_bytes[table_ptr + ucode_hdr.HeaderSize + j * ucode_hdr.EntrySize:])
if ucode_entry.ApplicationID != nv.FALCON_UCODE_ENTRY_APPID_FWSEC_PROD: continue
ucode_desc_hdr = nv.FALCON_UCODE_DESC_HEADER.from_buffer_copy(vbios_bytes[expansion_rom_off + ucode_entry.DescPtr:])
ucode_desc_off = expansion_rom_off + ucode_entry.DescPtr
ucode_desc_size = ucode_desc_hdr.vDesc >> 16
self.desc_v3 = nv.FALCON_UCODE_DESC_V3.from_buffer_copy(vbios_bytes[ucode_desc_off:ucode_desc_off + ucode_desc_size])
sig_total_size = ucode_desc_size - nv.FALCON_UCODE_DESC_V3_SIZE_44
signature = vbios_bytes[ucode_desc_off + nv.FALCON_UCODE_DESC_V3_SIZE_44:][:sig_total_size]
image = vbios_bytes[ucode_desc_off + ucode_desc_size:][:round_up(self.desc_v3.StoredSize, 256)]
self.frts_offset = self.nvdev.vram_size - 0x100000 - 0x100000
read_vbios_desc = nv.FWSECLIC_READ_VBIOS_DESC(version=0x1, size=ctypes.sizeof(nv.FWSECLIC_READ_VBIOS_DESC), flags=2)
frst_reg_desc = nv.FWSECLIC_FRTS_REGION_DESC(version=0x1, size=ctypes.sizeof(nv.FWSECLIC_FRTS_REGION_DESC),
frtsRegionOffset4K=self.frts_offset >> 12, frtsRegionSize=0x100, frtsRegionMediaType=2)
frts_cmd = nv.FWSECLIC_FRTS_CMD(readVbiosDesc=read_vbios_desc, frtsRegionDesc=frst_reg_desc)
def __patch(cmd_id, cmd):
patched_image = bytearray(image)
dmem_offset = 0
hdr = nv.FALCON_APPLICATION_INTERFACE_HEADER_V1.from_buffer_copy(image[(app_hdr_off:=self.desc_v3.IMEMLoadSize+self.desc_v3.InterfaceOffset):])
ents = (nv.FALCON_APPLICATION_INTERFACE_ENTRY_V1 * hdr.entryCount).from_buffer_copy(image[app_hdr_off + ctypes.sizeof(hdr):])
for i in range(hdr.entryCount):
if ents[i].id == nv.FALCON_APPLICATION_INTERFACE_ENTRY_ID_DMEMMAPPER: dmem_offset = ents[i].dmemOffset
# Patch image
dmem = nv.FALCON_APPLICATION_INTERFACE_DMEM_MAPPER_V3.from_buffer_copy(image[(dmem_mapper_offset:=self.desc_v3.IMEMLoadSize+dmem_offset):])
dmem.init_cmd = cmd_id
patched_image[dmem_mapper_offset : dmem_mapper_offset+len(bytes(dmem))] = bytes(dmem)
patched_image[(cmd_off:=self.desc_v3.IMEMLoadSize+dmem.cmd_in_buffer_offset) : cmd_off+len(cmd)] = cmd
patched_image[(sig_off:=self.desc_v3.IMEMLoadSize+self.desc_v3.PKCDataOffset) : sig_off+0x180] = signature[-0x180:]
return self.nvdev._alloc_sysmem(len(patched_image), contiguous=True, data=patched_image)
_, self.frts_image_sysmem = __patch(0x15, bytes(frts_cmd))
def prep_booter(self):
image = self.nvdev.extract_fw("kgspBinArchiveBooterLoadUcode", "image_prod_data")
sig = self.nvdev.extract_fw("kgspBinArchiveBooterLoadUcode", "sig_prod_data")
header = self.nvdev.extract_fw("kgspBinArchiveBooterLoadUcode", "header_prod_data")
patch_loc = int.from_bytes(self.nvdev.extract_fw("kgspBinArchiveBooterLoadUcode", "patch_loc_data"), 'little')
sig_len = len(sig) // int.from_bytes(self.nvdev.extract_fw("kgspBinArchiveBooterLoadUcode", "num_sigs_data"), 'little')
patched_image = bytearray(image)
patched_image[patch_loc:patch_loc+sig_len] = sig[:sig_len]
_, self.booter_image_sysmem = self.nvdev._alloc_sysmem(len(patched_image), contiguous=True, data=patched_image)
_, _, self.booter_data_off, self.booter_data_sz, _, self.booter_code_off, self.booter_code_sz, _, _ = struct.unpack("9I", header)
def init_hw(self):
self.falcon, self.sec2 = 0x00110000, 0x00840000
self.reset(self.falcon)
self.execute_hs(self.falcon, self.frts_image_sysmem[0], code_off=0x0, data_off=self.desc_v3.IMEMLoadSize,
imemPa=self.desc_v3.IMEMPhysBase, imemVa=self.desc_v3.IMEMVirtBase, imemSz=self.desc_v3.IMEMLoadSize,
dmemPa=self.desc_v3.DMEMPhysBase, dmemVa=0x0, dmemSz=self.desc_v3.DMEMLoadSize,
pkc_off=self.desc_v3.PKCDataOffset, engid=self.desc_v3.EngineIdMask, ucodeid=self.desc_v3.UcodeId)
assert self.nvdev.NV_PFB_PRI_MMU_WPR2_ADDR_HI.read() != 0, "WPR2 is not initialized"
self.reset(self.falcon, riscv=True)
# set up the mailbox
self.nvdev.NV_PGSP_FALCON_MAILBOX0.write(lo32(self.nvdev.gsp.libos_args_sysmem[0]))
self.nvdev.NV_PGSP_FALCON_MAILBOX1.write(hi32(self.nvdev.gsp.libos_args_sysmem[0]))
# booter
self.reset(self.sec2)
mbx = self.execute_hs(self.sec2, self.booter_image_sysmem[0], code_off=self.booter_code_off, data_off=self.booter_data_off,
imemPa=0x0, imemVa=self.booter_code_off, imemSz=self.booter_code_sz, dmemPa=0x0, dmemVa=0x0, dmemSz=self.booter_data_sz,
pkc_off=0x10, engid=1, ucodeid=3, mailbox=self.nvdev.gsp.wpr_meta_sysmem)
assert mbx[0] == 0x0, f"Booter failed to execute, mailbox is {mbx[0]:08x}, {mbx[1]:08x}"
self.nvdev.NV_PFALCON_FALCON_OS.with_base(self.falcon).write(0x0)
assert self.nvdev.NV_PRISCV_RISCV_CPUCTL.with_base(self.falcon).read_bitfields()['active_stat'] == 1, "GSP Core is not active"
def execute_dma(self, base:int, cmd:int, dest:int, mem_off:int, sysmem:int, size:int):
wait_cond(lambda: self.nvdev.NV_PFALCON_FALCON_DMATRFCMD.with_base(base).read_bitfields()['full'], value=0, msg="DMA does not progress")
self.nvdev.NV_PFALCON_FALCON_DMATRFBASE.with_base(base).write(lo32(sysmem >> 8))
self.nvdev.NV_PFALCON_FALCON_DMATRFBASE1.with_base(base).write(hi32(sysmem >> 8) & 0x1ff)
xfered = 0
while xfered < size:
wait_cond(lambda: self.nvdev.NV_PFALCON_FALCON_DMATRFCMD.with_base(base).read_bitfields()['full'], value=0, msg="DMA does not progress")
self.nvdev.NV_PFALCON_FALCON_DMATRFMOFFS.with_base(base).write(dest + xfered)
self.nvdev.NV_PFALCON_FALCON_DMATRFFBOFFS.with_base(base).write(mem_off + xfered)
self.nvdev.NV_PFALCON_FALCON_DMATRFCMD.with_base(base).write(cmd)
xfered += 256
wait_cond(lambda: self.nvdev.NV_PFALCON_FALCON_DMATRFCMD.with_base(base).read_bitfields()['idle'], msg="DMA does not complete")
def start_cpu(self, base:int):
if self.nvdev.NV_PFALCON_FALCON_CPUCTL.with_base(base).read_bitfields()['alias_en'] == 1:
self.nvdev.wreg(base + self.nvdev.NV_PFALCON_FALCON_CPUCTL_ALIAS, 0x2)
else: self.nvdev.NV_PFALCON_FALCON_CPUCTL.with_base(base).write(startcpu=1)
def wait_cpu_halted(self, base): wait_cond(lambda: self.nvdev.NV_PFALCON_FALCON_CPUCTL.with_base(base).read_bitfields()['halted'], msg="not halted")
def execute_hs(self, base, img_sysmem, code_off, data_off, imemPa, imemVa, imemSz, dmemPa, dmemVa, dmemSz, pkc_off, engid, ucodeid, mailbox=None):
self.disable_ctx_req(base)
self.nvdev.NV_PFALCON_FBIF_TRANSCFG.with_base(base)[ctx_dma:=0].update(target=self.nvdev.NV_PFALCON_FBIF_TRANSCFG_TARGET_COHERENT_SYSMEM,
mem_type=self.nvdev.NV_PFALCON_FBIF_TRANSCFG_MEM_TYPE_PHYSICAL)
cmd = self.nvdev.NV_PFALCON_FALCON_DMATRFCMD.with_base(base).encode(write=0, size=self.nvdev.NV_PFALCON_FALCON_DMATRFCMD_SIZE_256B,
ctxdma=ctx_dma, imem=1, sec=1)
self.execute_dma(base, cmd, dest=imemPa, mem_off=imemVa, sysmem=img_sysmem+code_off-imemVa, size=imemSz)
cmd = self.nvdev.NV_PFALCON_FALCON_DMATRFCMD.with_base(base).encode(write=0, size=self.nvdev.NV_PFALCON_FALCON_DMATRFCMD_SIZE_256B,
ctxdma=ctx_dma, imem=0, sec=0)
self.execute_dma(base, cmd, dest=dmemPa, mem_off=dmemVa, sysmem=img_sysmem+data_off-dmemVa, size=dmemSz)
self.nvdev.NV_PFALCON2_FALCON_BROM_PARAADDR.with_base(base)[0].write(pkc_off)
self.nvdev.NV_PFALCON2_FALCON_BROM_ENGIDMASK.with_base(base).write(engid)
self.nvdev.NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID.with_base(base).write(val=ucodeid)
self.nvdev.NV_PFALCON2_FALCON_MOD_SEL.with_base(base).write(algo=self.nvdev.NV_PFALCON2_FALCON_MOD_SEL_ALGO_RSA3K)
self.nvdev.NV_PFALCON_FALCON_BOOTVEC.with_base(base).write(imemVa)
if mailbox is not None:
self.nvdev.NV_PFALCON_FALCON_MAILBOX0.with_base(base).write(lo32(mailbox))
self.nvdev.NV_PFALCON_FALCON_MAILBOX1.with_base(base).write(hi32(mailbox))
self.start_cpu(base)
self.wait_cpu_halted(base)
if mailbox is not None:
return self.nvdev.NV_PFALCON_FALCON_MAILBOX0.with_base(base).read(), self.nvdev.NV_PFALCON_FALCON_MAILBOX1.with_base(base).read()
def disable_ctx_req(self, base:int):
self.nvdev.NV_PFALCON_FBIF_CTL.with_base(base).update(allow_phys_no_ctx=1)
self.nvdev.NV_PFALCON_FALCON_DMACTL.with_base(base).write(0x0)
def reset(self, base:int, riscv=False):
engine_reg = self.nvdev.NV_PGSP_FALCON_ENGINE if base == self.falcon else self.nvdev.NV_PSEC_FALCON_ENGINE
engine_reg.write(reset=1)
time.sleep(0.1)
engine_reg.write(reset=0)
wait_cond(lambda: self.nvdev.NV_PFALCON_FALCON_HWCFG2.with_base(base).read_bitfields()['mem_scrubbing'], value=0, msg="Scrubbing not completed")
if riscv: self.nvdev.NV_PRISCV_RISCV_BCR_CTRL.with_base(base).write(core_select=1, valid=0, brfetch=1)
elif self.nvdev.NV_PFALCON_FALCON_HWCFG2.with_base(base).read_bitfields()['riscv'] == 1:
self.nvdev.NV_PRISCV_RISCV_BCR_CTRL.with_base(base).write(core_select=0)
wait_cond(lambda: self.nvdev.NV_PRISCV_RISCV_BCR_CTRL.with_base(base).read_bitfields()['valid'], msg="RISCV core not booted")
self.nvdev.NV_PFALCON_FALCON_RM.with_base(base).write(self.nvdev.chip_id)
class NV_FLCN_COT(NV_IP):
def wait_for_reset(self):
self.nvdev.include("src/common/inc/swref/published/blackwell/gb202/dev_therm.h")
wait_cond(lambda _: self.nvdev.NV_THERM_I2CS_SCRATCH.read() == 0xff, "waiting for reset")
def init_sw(self):
self.nvdev.include("src/common/inc/swref/published/ampere/ga102/dev_gsp.h")
self.nvdev.include("src/common/inc/swref/published/hopper/gh100/dev_falcon_v4.h")
self.nvdev.include("src/common/inc/swref/published/hopper/gh100/dev_vm.h")
self.nvdev.include("src/common/inc/swref/published/hopper/gh100/dev_fsp_pri.h")
self.nvdev.include("src/common/inc/swref/published/turing/tu102/dev_bus.h")
self.nvdev.include("src/nvidia/arch/nvalloc/common/inc/fsp/fsp_mctp_format.h")
self.nvdev.include("src/nvidia/arch/nvalloc/common/inc/fsp/fsp_emem_channels.h")
self.fmc_boot_args, self.fmc_boot_args_sysmem = self.nvdev._alloc_boot_struct(nv.GSP_FMC_BOOT_PARAMS())
self.init_fmc_image()
def init_fmc_image(self):
self.fmc_booter_image = self.nvdev.extract_fw("kgspBinArchiveGspRmFmcGfwProdSigned", "ucode_image_data")
self.fmc_booter_hash = memoryview(self.nvdev.extract_fw("kgspBinArchiveGspRmFmcGfwProdSigned", "ucode_hash_data")).cast('I')
self.fmc_booter_sig = memoryview(self.nvdev.extract_fw("kgspBinArchiveGspRmFmcGfwProdSigned", "ucode_sig_data")).cast('I')
self.fmc_booter_pkey = memoryview(self.nvdev.extract_fw("kgspBinArchiveGspRmFmcGfwProdSigned", "ucode_pkey_data") + b'\x00\x00\x00').cast('I')
_, self.fmc_booter_sysmem = self.nvdev._alloc_sysmem(len(self.fmc_booter_image), contiguous=True, data=self.fmc_booter_image)
def init_hw(self):
self.falcon = 0x00110000
self.fmc_boot_args.bootGspRmParams = nv.GSP_ACR_BOOT_GSP_RM_PARAMS(gspRmDescOffset=self.nvdev.gsp.wpr_meta_sysmem,
gspRmDescSize=ctypes.sizeof(nv.GspFwWprMeta), target=nv.GSP_DMA_TARGET_COHERENT_SYSTEM, bIsGspRmBoot=True)
self.fmc_boot_args.gspRmParams = nv.GSP_RM_PARAMS(bootArgsOffset=self.nvdev.gsp.libos_args_sysmem[0], target=nv.GSP_DMA_TARGET_COHERENT_SYSTEM)
cot_payload = nv.NVDM_PAYLOAD_COT(version=0x2, size=ctypes.sizeof(nv.NVDM_PAYLOAD_COT), frtsVidmemOffset=0x1c00000, frtsVidmemSize=0x100000,
gspBootArgsSysmemOffset=self.fmc_boot_args_sysmem, gspFmcSysmemOffset=self.fmc_booter_sysmem[0])
for i,x in enumerate(self.fmc_booter_hash): cot_payload.hash384[i] = x
for i,x in enumerate(self.fmc_booter_sig): cot_payload.signature[i] = x
for i,x in enumerate(self.fmc_booter_pkey): cot_payload.publicKey[i] = x
self.kfsp_send_msg(nv.NVDM_TYPE_COT, bytes(cot_payload))
wait_cond(lambda: self.nvdev.NV_PFALCON_FALCON_HWCFG2.with_base(self.falcon).read_bitfields()['riscv_br_priv_lockdown'], value=0)
def kfsp_send_msg(self, nvmd:int, buf:bytes):
# All single-packets go to seid 0
headers = int.to_bytes((1 << 31) | (1 << 30), 4, 'little') + int.to_bytes((0x7e << 0) | (0x10de << 8) | (nvmd << 24), 4, 'little')
buf = headers + buf + (4 - (len(buf) % 4)) * b'\x00'
assert len(buf) < 0x400, f"FSP message too long: {len(buf)} bytes, max 1024 bytes"
self.nvdev.NV_PFSP_EMEMC[0].write(offs=0, blk=0, aincw=1, aincr=0)
for i in range(0, len(buf), 4): self.nvdev.NV_PFSP_EMEMD[0].write(int.from_bytes(buf[i:i+4], 'little'))
self.nvdev.NV_PFSP_QUEUE_TAIL[0].write(len(buf) - 4)
self.nvdev.NV_PFSP_QUEUE_HEAD[0].write(0)
# Waiting for a response
wait_cond(lambda: self.nvdev.NV_PFSP_MSGQ_HEAD[0].read() != self.nvdev.NV_PFSP_MSGQ_TAIL[0].read(), msg="FSP didn't respond to message")
self.nvdev.NV_PFSP_EMEMC[0].write(offs=0, blk=0, aincw=0, aincr=1)
self.nvdev.NV_PFSP_MSGQ_TAIL[0].write(self.nvdev.NV_PFSP_MSGQ_HEAD[0].read())
class NV_GSP(NV_IP):
def init_sw(self):
self.handle_gen = itertools.count(0xcf000000)
self.init_rm_args()
self.init_libos_args()
self.init_wpr_meta()
# Prefill cmd queue with info for gsp to start.
self.rpc_set_gsp_system_info()
self.rpc_set_registry_table()
self.gpfifo_class, self.compute_class, self.dma_class = nv_gpu.AMPERE_CHANNEL_GPFIFO_A, nv_gpu.AMPERE_COMPUTE_B, nv_gpu.AMPERE_DMA_COPY_B
match self.nvdev.chip_name[:2]:
case "AD": self.compute_class = nv_gpu.ADA_COMPUTE_A
case "GB":
self.gpfifo_class,self.compute_class,self.dma_class=nv_gpu.BLACKWELL_CHANNEL_GPFIFO_A,nv_gpu.BLACKWELL_COMPUTE_B,nv_gpu.BLACKWELL_DMA_COPY_B
def init_rm_args(self, queue_size=0x40000):
# Alloc queues
pte_cnt = ((queue_pte_cnt:=(queue_size * 2) // 0x1000)) + round_up(queue_pte_cnt * 8, 0x1000) // 0x1000
pt_size = round_up(pte_cnt * 8, 0x1000)
queues_view, queues_sysmem = self.nvdev._alloc_sysmem(pt_size + queue_size * 2, contiguous=False)
# Fill up ptes
for i, sysmem in enumerate(queues_sysmem): queues_view.view(i * 0x8, 0x8, fmt='Q')[0] = sysmem
# Fill up arguments
queue_args = nv.MESSAGE_QUEUE_INIT_ARGUMENTS(sharedMemPhysAddr=queues_sysmem[0], pageTableEntryCount=pte_cnt, cmdQueueOffset=pt_size,
statQueueOffset=pt_size + queue_size)
_, self.rm_args_sysmem = self.nvdev._alloc_boot_struct(nv.GSP_ARGUMENTS_CACHED(bDmemStack=True, messageQueueInitArguments=queue_args))
# Build command queue header
self.cmd_q_va, self.stat_q_va = queues_view.addr + pt_size, queues_view.addr + pt_size + queue_size
cmd_q_tx = nv.msgqTxHeader(version=0, size=queue_size, entryOff=0x1000, msgSize=0x1000, msgCount=(queue_size - 0x1000) // 0x1000,
writePtr=0, flags=1, rxHdrOff=ctypes.sizeof(nv.msgqTxHeader))
to_mv(self.cmd_q_va, ctypes.sizeof(nv.msgqTxHeader))[:] = bytes(cmd_q_tx)
self.cmd_q = NVRpcQueue(self, self.cmd_q_va, None)
def init_libos_args(self):
_, logbuf_sysmem = self.nvdev._alloc_sysmem((2 << 20), contiguous=True)
libos_args_view, self.libos_args_sysmem = self.nvdev._alloc_sysmem(0x1000, contiguous=True)
libos_structs = (nv.LibosMemoryRegionInitArgument * 6).from_address(libos_args_view.addr)
for i, name in enumerate(["INIT", "INTR", "RM", "MNOC", "KRNL"]):
libos_structs[i] = nv.LibosMemoryRegionInitArgument(kind=nv.LIBOS_MEMORY_REGION_CONTIGUOUS, loc=nv.LIBOS_MEMORY_REGION_LOC_SYSMEM, size=0x10000,
id8=int.from_bytes(bytes(f"LOG{name}", 'utf-8'), 'big'), pa=logbuf_sysmem[0] + 0x10000 * i)
libos_structs[5] = nv.LibosMemoryRegionInitArgument(kind=nv.LIBOS_MEMORY_REGION_CONTIGUOUS, loc=nv.LIBOS_MEMORY_REGION_LOC_SYSMEM, size=0x1000,
id8=int.from_bytes(bytes("RMARGS", 'utf-8'), 'big'), pa=self.rm_args_sysmem)
def init_gsp_image(self):
fw = fetch("https://github.com/NVIDIA/linux-firmware/raw/refs/heads/nvidia-staging/nvidia/ga102/gsp/gsp-570.144.bin", subdir="fw").read_bytes()
_, sections, _ = elf_loader(fw)
self.gsp_image = next((sh.content for sh in sections if sh.name == ".fwimage"))
signature = next((sh.content for sh in sections if sh.name == (f".fwsignature_{self.nvdev.chip_name[:4].lower()}x")))
# Build radix3
npages = [0, 0, 0, round_up(len(self.gsp_image), 0x1000) // 0x1000]
for i in range(3, 0, -1): npages[i-1] = ((npages[i] - 1) >> (nv.LIBOS_MEMORY_REGION_RADIX_PAGE_LOG2 - 3)) + 1
offsets = [sum(npages[:i]) * 0x1000 for i in range(4)]
radix_view, self.gsp_radix3_sysmem = self.nvdev._alloc_sysmem(offsets[-1] + len(self.gsp_image), contiguous=False)
# Copy image
radix_view.view(offsets[-1], len(self.gsp_image))[:] = self.gsp_image
# Copy level and image pages.
for i in range(0, 3):
cur_offset = sum(npages[:i+1])
radix_view.view(offsets[i], npages[i+1] * 8, fmt='Q')[:] = array.array('Q', self.gsp_radix3_sysmem[cur_offset:cur_offset+npages[i+1]])
# Copy signature
_, self.gsp_signature_sysmem = self.nvdev._alloc_sysmem(len(signature), contiguous=True, data=signature)
def init_boot_binary_image(self):
self.booter_image = self.nvdev.extract_fw("kgspBinArchiveGspRmBoot", "ucode_image_prod_data")
self.booter_desc = nv.RM_RISCV_UCODE_DESC.from_buffer_copy(self.nvdev.extract_fw("kgspBinArchiveGspRmBoot", "ucode_desc_prod_data"))
_, self.booter_sysmem = self.nvdev._alloc_sysmem(len(self.booter_image), contiguous=True, data=self.booter_image)
def init_wpr_meta(self):
self.init_gsp_image()
self.init_boot_binary_image()
common = {'sizeOfBootloader':(boot_sz:=len(self.booter_image)), 'sysmemAddrOfBootloader':self.booter_sysmem[0],
'sizeOfRadix3Elf':(radix3_sz:=len(self.gsp_image)), 'sysmemAddrOfRadix3Elf': self.gsp_radix3_sysmem[0],
'sizeOfSignature': 0x1000, 'sysmemAddrOfSignature': self.gsp_signature_sysmem[0],
'bootloaderCodeOffset': self.booter_desc.monitorCodeOffset, 'bootloaderDataOffset': self.booter_desc.monitorDataOffset,
'bootloaderManifestOffset': self.booter_desc.manifestOffset, 'revision':nv.GSP_FW_WPR_META_REVISION, 'magic':nv.GSP_FW_WPR_META_MAGIC}
if self.nvdev.fmc_boot:
m = nv.GspFwWprMeta(**common, vgaWorkspaceSize=0x20000, pmuReservedSize=0x1820000, nonWprHeapSize=0x220000, gspFwHeapSize=0x8700000,
frtsSize=0x100000)
else:
m = nv.GspFwWprMeta(**common, vgaWorkspaceSize=(vga_sz:=0x100000), vgaWorkspaceOffset=(vga_off:=self.nvdev.vram_size-vga_sz),
gspFwWprEnd=vga_off, frtsSize=(frts_sz:=0x100000), frtsOffset=(frts_off:=vga_off-frts_sz), bootBinOffset=(boot_off:=frts_off-boot_sz),
gspFwOffset=(gsp_off:=round_down(boot_off-radix3_sz, 0x10000)), gspFwHeapSize=(gsp_heap_sz:=0x8100000), fbSize=self.nvdev.vram_size,
gspFwHeapOffset=(gsp_heap_off:=round_down(gsp_off-gsp_heap_sz, 0x100000)), gspFwWprStart=(wpr_st:=round_down(gsp_heap_off-0x1000, 0x100000)),
nonWprHeapSize=(non_wpr_sz:=0x100000), nonWprHeapOffset=(non_wpr_off:=round_down(wpr_st-non_wpr_sz, 0x100000)), gspFwRsvdStart=non_wpr_off)
assert self.nvdev.flcn.frts_offset == m.frtsOffset, f"FRTS mismatch: {self.nvdev.flcn.frts_offset} != {m.frtsOffset}"
self.wpr_meta, self.wpr_meta_sysmem = self.nvdev._alloc_boot_struct(m)
def promote_ctx(self, client:int, subdevice:int, obj:int, ctxbufs:dict[int, GRBufDesc], bufs=None, virt=None, phys=None):
res, prom = {}, nv_gpu.NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS(entryCount=len(ctxbufs), engineType=0x1, hChanClient=client, hObject=obj)
for i,(buf,desc) in enumerate(ctxbufs.items()):
use_v, use_p = (desc.virt if virt is None else virt), (desc.phys if phys is None else phys)
x = (bufs or {}).get(buf, self.nvdev.mm.valloc(desc.size, contiguous=True)) # allocate buffers
prom.promoteEntry[i] = nv_gpu.NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY(bufferId=buf, gpuVirtAddr=x.va_addr if use_v else 0, bInitialize=use_p,
gpuPhysAddr=x.paddrs[0][0] if use_p else 0, size=desc.size if use_p else 0, physAttr=0x4 if use_p else 0, bNonmapped=(use_p and not use_v))
res[buf] = x
self.rpc_rm_control(hObject=subdevice, cmd=nv_gpu.NV2080_CTRL_CMD_GPU_PROMOTE_CTX, params=prom, client=client)
return res
def init_golden_image(self):
self.rpc_rm_alloc(hParent=0x0, hClass=0x0, params=nv_gpu.NV0000_ALLOC_PARAMETERS())
dev = self.rpc_rm_alloc(hParent=self.priv_root, hClass=nv_gpu.NV01_DEVICE_0, params=nv_gpu.NV0080_ALLOC_PARAMETERS(hClientShare=self.priv_root))
subdev = self.rpc_rm_alloc(hParent=dev, hClass=nv_gpu.NV20_SUBDEVICE_0, params=nv_gpu.NV2080_ALLOC_PARAMETERS())
vaspace = self.rpc_rm_alloc(hParent=dev, hClass=nv_gpu.FERMI_VASPACE_A, params=nv_gpu.NV_VASPACE_ALLOCATION_PARAMETERS())
# reserve 512MB for the reserved PDES
res_va = self.nvdev.mm.alloc_vaddr(res_sz:=(512 << 20))
bufs_p = nv_gpu.struct_NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS(pageSize=res_sz, numLevelsToCopy=3,
virtAddrLo=res_va, virtAddrHi=res_va + res_sz - 1)
for i,pt in enumerate(self.nvdev.mm.page_tables(res_va, size=res_sz)):
bufs_p.levels[i] = nv_gpu.struct_NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_level(physAddress=pt.paddr,
size=self.nvdev.mm.pte_cnt[0] * 8 if i == 0 else 0x1000, pageShift=self.nvdev.mm.pte_covers[i].bit_length() - 1, aperture=1)
self.rpc_rm_control(hObject=vaspace, cmd=nv_gpu.NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES, params=bufs_p)
gpfifo_area = self.nvdev.mm.valloc(4 << 10, contiguous=True)
userd = nv_gpu.NV_MEMORY_DESC_PARAMS(base=gpfifo_area.paddrs[0][0] + 0x20 * 8, size=0x20, addressSpace=2, cacheAttrib=0)
gg_params = nv_gpu.NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS(gpFifoOffset=gpfifo_area.va_addr, gpFifoEntries=32, engineType=0x1, cid=3,
hVASpace=vaspace, userdOffset=(ctypes.c_uint64*8)(0x20 * 8), userdMem=userd, internalFlags=0x1a, flags=0x200320)
ch_gpfifo = self.rpc_rm_alloc(hParent=dev, hClass=self.gpfifo_class, params=gg_params)
gr_ctx_bufs_info = self.rpc_rm_control(hObject=subdev, cmd=nv_gpu.NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
params=nv_gpu.NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS()).engineContextBuffersInfo[0]
def _ctx_info(idx, add=0, align=None): return round_up(gr_ctx_bufs_info.engine[idx].size + add, align or gr_ctx_bufs_info.engine[idx].alignment)
# Setup graphics context
gr_size = _ctx_info(nv_gpu.NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS, add=0x40000)
patch_size = _ctx_info(nv_gpu.NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH)
cfgs_sizes = {x: _ctx_info(x + 14, align=(2 << 20) if x == 5 else None) for x in range(3, 11)} # indices 3β10 are mapped to 17β24
self.grctx_bufs = {0: GRBufDesc(gr_size, phys=True, virt=True), 1: GRBufDesc(patch_size, phys=True, virt=True, local=True),
2: GRBufDesc(patch_size, phys=True, virt=True), **{x: GRBufDesc(cfgs_sizes[x], phys=False, virt=True) for x in range(3, 7)},
9: GRBufDesc(cfgs_sizes[9], phys=True, virt=True), 10: GRBufDesc(cfgs_sizes[10], phys=True, virt=False),
11: GRBufDesc(cfgs_sizes[10], phys=True, virt=True)} # NOTE: 11 reuses cfgs_sizes[10]
self.promote_ctx(self.priv_root, subdev, ch_gpfifo, {k:v for k, v in self.grctx_bufs.items() if not v.local})
self.rpc_rm_alloc(hParent=ch_gpfifo, hClass=self.compute_class, params=None)
self.rpc_rm_alloc(hParent=ch_gpfifo, hClass=self.dma_class, params=None)
def init_hw(self):
self.stat_q = NVRpcQueue(self, self.stat_q_va, self.cmd_q_va)
self.cmd_q.rx = nv.msgqRxHeader.from_address(self.stat_q.va + self.stat_q.tx.rxHdrOff)
self.stat_q.wait_resp(nv.NV_VGPU_MSG_EVENT_GSP_INIT_DONE)
self.nvdev.NV_PBUS_BAR1_BLOCK.write(mode=0, target=0, ptr=0)
if self.nvdev.fmc_boot: self.nvdev.NV_VIRTUAL_FUNCTION_PRIV_FUNC_BAR1_BLOCK_LOW_ADDR.write(mode=0, target=0, ptr=0)
self.priv_root = 0xc1e00004
self.init_golden_image()
def fini_hw(self): self.rpc_unloading_guest_driver()
### RPCs
def rpc_alloc_memory(self, hDevice:int, hClass:int, paddrs:list[tuple[int,int]], length:int, flags:int, client:int|None=None) -> int:
assert all(sz == 0x1000 for _, sz in paddrs), f"all pages must be 4KB, got {[(hex(p), hex(sz)) for p, sz in paddrs]}"
rpc = nv.rpc_alloc_memory_v(hClient=(client:=client or self.priv_root), hDevice=hDevice, hMemory=(handle:=next(self.handle_gen)),
hClass=hClass, flags=flags, pteAdjust=0, format=6, length=length, pageCount=len(paddrs))
rpc.pteDesc.idr, rpc.pteDesc.length = nv.NV_VGPU_PTEDESC_IDR_NONE, (len(paddrs) & 0xffff)
payload = bytes(rpc) + b''.join(bytes(nv.struct_pte_desc_pte_pde(pte=(paddr >> 12))) for paddr, _ in paddrs)
self.cmd_q.send_rpc(nv.NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, bytes(payload))
self.stat_q.wait_resp(nv.NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY)
return handle
def rpc_rm_alloc(self, hParent:int, hClass:int, params:Any, client=None) -> int:
if hClass == self.gpfifo_class:
ramfc_alloc = self.nvdev.mm.valloc(0x1000, contiguous=True)
params.ramfcMem = nv_gpu.NV_MEMORY_DESC_PARAMS(base=ramfc_alloc.paddrs[0][0], size=0x200, addressSpace=2, cacheAttrib=0)
params.instanceMem = nv_gpu.NV_MEMORY_DESC_PARAMS(base=ramfc_alloc.paddrs[0][0], size=0x1000, addressSpace=2, cacheAttrib=0)
_, method_sysmem = self.nvdev._alloc_sysmem(0x5000, contiguous=True)
params.mthdbufMem = nv_gpu.NV_MEMORY_DESC_PARAMS(base=method_sysmem[0], size=0x5000, addressSpace=1, cacheAttrib=0)
if client is not None and client != self.priv_root and params.hObjectError != 0:
params.errorNotifierMem = nv_gpu.NV_MEMORY_DESC_PARAMS(base=0, size=0xecc, addressSpace=0, cacheAttrib=0)
params.userdMem = nv_gpu.NV_MEMORY_DESC_PARAMS(base=params.hUserdMemory[0] + params.userdOffset[0], size=0x400, addressSpace=2, cacheAttrib=0)
alloc_args = nv.rpc_gsp_rm_alloc_v(hClient=(client:=client or self.priv_root), hParent=hParent, hObject=(obj:=next(self.handle_gen)),
hClass=hClass, flags=0x0, paramsSize=ctypes.sizeof(params) if params is not None else 0x0)
self.cmd_q.send_rpc(nv.NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, bytes(alloc_args) + (bytes(params) if params is not None else b''))
self.stat_q.wait_resp(nv.NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC)
if hClass == nv_gpu.FERMI_VASPACE_A and client != self.priv_root:
self.rpc_set_page_directory(device=hParent, hVASpace=obj, pdir_paddr=self.nvdev.mm.root_page_table.paddr, client=client)
if hClass == nv_gpu.NV01_DEVICE_0 and client != self.priv_root: self.device = obj # save user device handle
if hClass == nv_gpu.NV20_SUBDEVICE_0: self.subdevice = obj # save subdevice handle
if hClass == self.compute_class and client != self.priv_root:
phys_gr_ctx = self.promote_ctx(client, self.subdevice, hParent, {k:v for k,v in self.grctx_bufs.items() if k in [0, 1, 2]}, virt=False)
self.promote_ctx(client, self.subdevice, hParent, {k:v for k,v in self.grctx_bufs.items() if k in [0, 1, 2]}, phys_gr_ctx, phys=False)
return obj if hClass != nv_gpu.NV1_ROOT else client
def rpc_rm_control(self, hObject:int, cmd:int, params:Any, client=None, extra=None):
if cmd == nv_gpu.NVB0CC_CTRL_CMD_POWER_REQUEST_FEATURES:
self.rpc_rm_control(hObject, nv_gpu.NVB0CC_CTRL_CMD_INTERNAL_PERMISSIONS_INIT, nv_gpu.NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS(
bAdminProfilingPermitted=1, bDevProfilingPermitted=1, bCtxProfilingPermitted=1, bVideoMemoryProfilingPermitted=1,
bSysMemoryProfilingPermitted=1), client=client)
elif cmd == nv_gpu.NVB0CC_CTRL_CMD_ALLOC_PMA_STREAM:
params.hMemPmaBuffer = self.rpc_alloc_memory(self.device, nv_gpu.NV01_MEMORY_LIST_SYSTEM, extra[0].meta.mapping.paddrs, extra[0].size,
pma_flags:=(nv_gpu.NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS << 4 | nv_gpu.NVOS02_FLAGS_MAPPING_NO_MAP << 30), client=client)
params.hMemPmaBytesAvailable = self.rpc_alloc_memory(self.device, nv_gpu.NV01_MEMORY_LIST_SYSTEM, extra[1].meta.mapping.paddrs, extra[1].size,
pma_flags | nv_gpu.NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES << 21, client=client)
control_args = nv.rpc_gsp_rm_control_v(hClient=(client:=client or self.priv_root), hObject=hObject, cmd=cmd, flags=0x0,
paramsSize=ctypes.sizeof(params) if params is not None else 0x0)
self.cmd_q.send_rpc(nv.NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, bytes(control_args) + (bytes(params) if params is not None else b''))
res = self.stat_q.wait_resp(nv.NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL)
st = type(params).from_buffer_copy(res[len(bytes(control_args)):]) if params is not None else None
# NOTE: gb20x requires the enable bit for token submission. Patch workSubmitToken here to maintain userspace compatibility.
if self.nvdev.chip_name.startswith("GB2") and cmd == nv_gpu.NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN:
cast(nv_gpu.NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS, st).workSubmitToken |= (1 << 30)
return st
def rpc_set_page_directory(self, device:int, hVASpace:int, pdir_paddr:int, client=None, pasid=0xffffffff):
params = nv.struct_NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05(physAddress=pdir_paddr,
numEntries=self.nvdev.mm.pte_cnt[0], flags=0x8, hVASpace=hVASpace, pasid=pasid, subDeviceId=1, chId=0) # flags field is all channels.
alloc_args = nv.rpc_set_page_directory_v(hClient=client or self.priv_root, hDevice=device, pasid=pasid, params=params)
self.cmd_q.send_rpc(nv.NV_VGPU_MSG_FUNCTION_SET_PAGE_DIRECTORY, bytes(alloc_args))
self.stat_q.wait_resp(nv.NV_VGPU_MSG_FUNCTION_SET_PAGE_DIRECTORY)
def rpc_set_gsp_system_info(self):
def bdf_as_int(s): return 0x000 if s.startswith("usb") or s.startswith("remote") else (int(s[5:7],16)<<8) | (int(s[8:10],16)<<3) | int(s[-1],16)
pcidev = self.nvdev.pci_dev
data = nv.GspSystemInfo(gpuPhysAddr=pcidev.bar_info[0].addr, gpuPhysFbAddr=pcidev.bar_info[1].addr, gpuPhysInstAddr=pcidev.bar_info[3].addr,
pciConfigMirrorBase=[0x88000, 0x92000][self.nvdev.fmc_boot], pciConfigMirrorSize=0x1000, nvDomainBusDeviceFunc=bdf_as_int(self.nvdev.devfmt),
bIsPassthru=1, PCIDeviceID=pcidev.read_config(pci.PCI_VENDOR_ID, 4), PCISubDeviceID=pcidev.read_config(pci.PCI_SUBSYSTEM_VENDOR_ID, 4),
PCIRevisionID=pcidev.read_config(pci.PCI_REVISION_ID, 1), maxUserVa=0x7ffffffff000)
self.cmd_q.send_rpc(nv.NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, bytes(data))
def rpc_unloading_guest_driver(self):
data = nv.rpc_unloading_guest_driver_v(bInPMTransition=0, bGc6Entering=0, newLevel=(__GPU_STATE_FLAGS_FAST_UNLOAD:=1 << 6))
self.cmd_q.send_rpc(nv.NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, bytes(data))
self.stat_q.wait_resp(nv.NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER)
def rpc_set_registry_table(self):
table = {'RMForcePcieConfigSave': 0x1, 'RMSecBusResetEnable': 0x1}
entries_bytes, data_bytes = bytes(), bytes()
hdr_size, entries_size = ctypes.sizeof(nv.PACKED_REGISTRY_TABLE), ctypes.sizeof(nv.PACKED_REGISTRY_ENTRY) * len(table)
for k,v in table.items():
entries_bytes += bytes(nv.PACKED_REGISTRY_ENTRY(nameOffset=hdr_size + entries_size + len(data_bytes),
type=nv.REGISTRY_TABLE_ENTRY_TYPE_DWORD, data=v, length=4))
data_bytes += k.encode('utf-8') + b'\x00'
header = nv.PACKED_REGISTRY_TABLE(size=hdr_size + len(entries_bytes) + len(data_bytes), numEntries=len(table))
self.cmd_q.send_rpc(nv.NV_VGPU_MSG_FUNCTION_SET_REGISTRY, bytes(header) + entries_bytes + data_bytes)
def run_cpu_seq(self, seq_buf:memoryview):
hdr = nv.rpc_run_cpu_sequencer_v17_00.from_address(mv_address(seq_buf))
cmd_iter = iter(seq_buf[ctypes.sizeof(nv.rpc_run_cpu_sequencer_v17_00):].cast('I')[:hdr.cmdIndex])
for op in cmd_iter:
if op == 0x0: self.nvdev.wreg(next(cmd_iter), next(cmd_iter)) # reg write
elif op == 0x1: # reg modify
addr, val, mask = next(cmd_iter), next(cmd_iter), next(cmd_iter)
self.nvdev.wreg(addr, (self.nvdev.rreg(addr) & ~mask) | (val & mask))
elif op == 0x2: # reg poll
addr, mask, val, _, _ = next(cmd_iter), next(cmd_iter), next(cmd_iter), next(cmd_iter), next(cmd_iter)
wait_cond(lambda a, m: (self.nvdev.rreg(a) & m), addr, mask, value=val, msg=f"Register {addr:#x} not equal to {val:#x} after polling")
elif op == 0x3: time.sleep(next(cmd_iter) / 1e6) # delay us
elif op == 0x4: # save reg
addr, index = next(cmd_iter), next(cmd_iter)
hdr.regSaveArea[index] = self.nvdev.rreg(addr)
elif op == 0x5: # core reset
self.nvdev.flcn.reset(self.nvdev.flcn.falcon)
self.nvdev.flcn.disable_ctx_req(self.nvdev.flcn.falcon)
elif op == 0x6: self.nvdev.flcn.start_cpu(self.nvdev.flcn.falcon)
elif op == 0x7: self.nvdev.flcn.wait_cpu_halted(self.nvdev.flcn.falcon)
elif op == 0x8: # core resume
self.nvdev.flcn.reset(self.nvdev.flcn.falcon, riscv=True)
self.nvdev.NV_PGSP_FALCON_MAILBOX0.write(lo32(self.libos_args_sysmem[0]))
self.nvdev.NV_PGSP_FALCON_MAILBOX1.write(hi32(self.libos_args_sysmem[0]))
self.nvdev.flcn.start_cpu(self.nvdev.flcn.sec2)
wait_cond(lambda: self.nvdev.NV_PGC6_BSI_SECURE_SCRATCH_14.read_bitfields()['boot_stage_3_handoff'], msg="SEC2 didn't hand off")
mailbox = self.nvdev.NV_PFALCON_FALCON_MAILBOX0.with_base(self.nvdev.flcn.sec2).read()
assert mailbox == 0x0, f"Falcon SEC2 failed to execute, mailbox is {mailbox:08x}"
else: raise ValueError(f"Unknown op code {op} in run_cpu_seq")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/support/nv/ip.py",
"license": "MIT License",
"lines": 499,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/support/nv/nvdev.py | from __future__ import annotations
import ctypes, time, functools, re, gzip, struct
from tinygrad.helpers import getenv, DEBUG, fetch, getbits
from tinygrad.runtime.support.memory import TLSFAllocator, MemoryManager, AddrSpace
from tinygrad.runtime.support.nv.ip import NV_FLCN, NV_FLCN_COT, NV_GSP
from tinygrad.runtime.support.system import PCIDevice, PCIDevImplBase, MMIOInterface
NV_DEBUG = getenv("NV_DEBUG", 0)
class NVReg:
def __init__(self, nvdev, base, off, fields=None): self.nvdev, self.base, self.off, self.fields = nvdev, base, off, fields
def __getitem__(self, idx:int): return NVReg(self.nvdev, self.base, self.off(idx), fields=self.fields)
def add_field(self, name:str, start:int, end:int): self.fields[name] = (start, end)
def with_base(self, base:int): return NVReg(self.nvdev, base + self.base, self.off, self.fields)
def read(self): return self.nvdev.rreg(self.base + self.off)
def read_bitfields(self) -> dict[str, int]: return self.decode(self.read())
def write(self, _ini_val:int=0, **kwargs): self.nvdev.wreg(self.base + self.off, _ini_val | self.encode(**kwargs))
def update(self, **kwargs): self.write(self.read() & ~self.mask(*kwargs.keys()), **kwargs)
def mask(self, *names):
return functools.reduce(int.__or__, ((((1 << (self.fields[nm][1]-self.fields[nm][0] + 1)) - 1) << self.fields[nm][0]) for nm in names), 0)
def encode(self, **kwargs) -> int: return functools.reduce(int.__or__, (value << self.fields[name][0] for name,value in kwargs.items()), 0)
def decode(self, val: int) -> dict: return {name:getbits(val, start, end) for name,(start,end) in self.fields.items()}
class NVPageTableEntry:
def __init__(self, nvdev, paddr, lv): self.nvdev, self.paddr, self.lv, self.entries = nvdev, paddr, lv, nvdev.vram.view(paddr, 0x1000, fmt='Q')
def _is_dual_pde(self) -> bool: return self.lv == self.nvdev.mm.level_cnt - 2
def set_entry(self, entry_id:int, paddr:int, table=False, uncached=False, aspace=AddrSpace.PHYS, snooped=False, frag=0, valid=True):
if not table:
x = self.nvdev.pte_t.encode(valid=valid, address_sys=paddr >> 12, aperture=2 if aspace is AddrSpace.SYS else 0, kind=6,
**({'pcf': int(uncached)} if self.nvdev.mmu_ver == 3 else {'vol': uncached}))
else:
pde = self.nvdev.dual_pde_t if self._is_dual_pde() else self.nvdev.pde_t
small, sys = ("_small" if self._is_dual_pde() else ""), "" if self.nvdev.mmu_ver == 3 else "_sys"
x = pde.encode(is_pte=False, **{f'aperture{small}': 1 if valid else 0, f'address{small}{sys}': paddr >> 12},
**({f'pcf{small}': 0b10} if self.nvdev.mmu_ver == 3 else {'no_ats': 1}))
if self._is_dual_pde(): self.entries[2*entry_id], self.entries[2*entry_id+1] = x & 0xffffffffffffffff, x >> 64
else: self.entries[entry_id] = x
def entry(self, entry_id:int) -> int:
return (self.entries[2*entry_id+1]<<64) | self.entries[2*entry_id] if self._is_dual_pde() else self.entries[entry_id]
def read_fields(self, entry_id:int) -> dict:
if self.is_page(entry_id): return self.nvdev.pte_t.decode(self.entry(entry_id))
return (self.nvdev.dual_pde_t if self._is_dual_pde() else self.nvdev.pde_t).decode(self.entry(entry_id))
def is_page(self, entry_id) -> bool: return (self.entry(entry_id) & 1 == 1) if self.lv < self.nvdev.mm.level_cnt - 1 else True
def supports_huge_page(self, paddr:int): return self.lv >= self.nvdev.mm.level_cnt - 3 and paddr % self.nvdev.mm.pte_covers[self.lv] == 0
def valid(self, entry_id):
if self.is_page(entry_id): return self.read_fields(entry_id)['valid']
return self.read_fields(entry_id)['aperture_small' if self._is_dual_pde() else 'aperture'] != 0
def address(self, entry_id:int) -> int:
small, sys = ("_small" if self._is_dual_pde() else ""), "_sys" if self.nvdev.mmu_ver == 2 or self.lv == self.nvdev.mm.level_cnt - 1 else ""
return self.read_fields(entry_id)[f'address{small}{sys}'] << 12
class NVMemoryManager(MemoryManager):
va_allocator = TLSFAllocator((1 << 44), base=0x1000000000) # global for all devices.
def on_range_mapped(self): self.dev.NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE.write((1 << 0) | (1 << 1) | (1 << 6) | (1 << 31))
class NVDev(PCIDevImplBase):
def __init__(self, pci_dev:PCIDevice):
self.pci_dev, self.devfmt, self.mmio = pci_dev, pci_dev.pcibus, pci_dev.map_bar(0, fmt='I')
self.smi_dev, self.is_booting, self.is_err_state = False, True, False
self._early_ip_init()
self._early_mmu_init()
# No booting state, gsp client is reinited every run.
self.is_booting = False
for ip in [self.flcn, self.gsp]: ip.init_sw()
for ip in [self.flcn, self.gsp]: ip.init_hw()
def fini(self):
for ip in [self.gsp, self.flcn]: ip.fini_hw()
def reg(self, reg:str) -> NVReg: return self.__dict__[reg]
def wreg(self, addr:int, value:int):
self.mmio[addr // 4] = value
if NV_DEBUG >= 4: print(f"wreg: {hex(addr)} = {hex(value)}")
def rreg(self, addr:int) -> int: return self.mmio[addr // 4]
def _early_ip_init(self):
self.reg_names:set[str] = set()
self.reg_offsets:dict[str, tuple[int, int]] = {}
self.include("src/common/inc/swref/published/nv_ref.h")
self.include("src/common/inc/swref/published/turing/tu102/dev_fb.h")
self.include("src/common/inc/swref/published/ampere/ga102/dev_gc6_island.h")
self.include("src/common/inc/swref/published/ampere/ga102/dev_gc6_island_addendum.h")
if (needs_reset:=self.reg("NV_PFB_PRI_MMU_WPR2_ADDR_HI").read() != 0):
if DEBUG >= 2: print(f"nv {self.devfmt}: WPR2 is up. Issuing a full reset.", flush=True)
self.pci_dev.reset()
time.sleep(0.1) # wait until device can respond again
self.chip_id = self.reg("NV_PMC_BOOT_0").read()
self.chip_details = self.reg("NV_PMC_BOOT_42").read_bitfields()
self.chip_name = {0x17: "GA1", 0x19: "AD1", 0x1b: "GB2"}[self.chip_details['architecture']] + f"{self.chip_details['implementation']:02d}"
self.fw_name = {"GB2": "GB202", "AD1": "AD102", "GA1": "GA102"}[self.chip_name[:3]]
self.mmu_ver, self.fmc_boot = (3, True) if self.chip_details['architecture'] >= 0x1a else (2, False)
self.flcn:NV_FLCN|NV_FLCN_COT = NV_FLCN_COT(self) if self.fmc_boot else NV_FLCN(self)
self.gsp:NV_GSP = NV_GSP(self)
if needs_reset: self.flcn.wait_for_reset()
def _early_mmu_init(self):
self.include("src/common/inc/swref/published/turing/tu102/dev_vm.h")
# MMU Init
self.reg_names.update(mmu_pd_names:=[f'NV_MMU_VER{self.mmu_ver}_PTE', f'NV_MMU_VER{self.mmu_ver}_PDE', f'NV_MMU_VER{self.mmu_ver}_DUAL_PDE'])
for name in mmu_pd_names: self.__dict__[name] = NVReg(self, None, None, fields={})
self.include(f"kernel-open/nvidia-uvm/hwref/{'hopper/gh100' if self.mmu_ver == 3 else 'turing/tu102'}/dev_mmu.h")
self.pte_t, self.pde_t, self.dual_pde_t = tuple([self.__dict__[name] for name in mmu_pd_names])
self.vram_size = self.reg("NV_PGC6_AON_SECURE_SCRATCH_GROUP_42").read() << 20
self.vram, self.mmio = self.pci_dev.map_bar(1), self.pci_dev.map_bar(0, fmt='I')
self.large_bar = self.vram.nbytes >= self.vram_size
# UVM depth HW level VA bits
# 0 PDE4 56:56 (hopper+)
# 1 PDE3 55:47
# 2 PDE2 46:38
# 3 PDE1 (or 512M PTE) 37:29
# 4 PDE0 (dual 64k/4k PDE, or 2M PTE) 28:21
# 5 PTE_64K / PTE_4K 20:16 / 20:12
bits, shifts = (56, [12, 21, 29, 38, 47, 56]) if self.mmu_ver == 3 else (48, [12, 21, 29, 38, 47])
# tail vram reserved for falcon structs
self.mm = NVMemoryManager(self, self.vram_size - (64 << 20), boot_size=(2 << 20), pt_t=NVPageTableEntry, va_bits=bits, va_shifts=shifts,
va_base=0, palloc_ranges=[(x, x) for x in [512 << 20, 2 << 20, 4 << 10]], reserve_ptable=not self.large_bar)
def _alloc_sysmem(self, size:int, vaddr:int=0, contiguous:bool=False, data:bytes|None=None) -> tuple[MMIOInterface, list[int]]:
view, paddrs = self.pci_dev.alloc_sysmem(size, vaddr, contiguous=contiguous)
if data is not None: view[:size] = data
return view, paddrs
def _alloc_boot_struct(self, struct:ctypes.Structure) -> tuple[ctypes.Structure, int]:
view, paddrs = self._alloc_sysmem(sz:=ctypes.sizeof(type(struct)), contiguous=True)
view[:sz] = bytes(struct)
return type(struct).from_address(view.addr), paddrs[0]
def _download(self, file:str) -> str:
url = f"https://raw.githubusercontent.com/NVIDIA/open-gpu-kernel-modules/8ec351aeb96a93a4bb69ccc12a542bf8a8df2b6f/{file}"
return fetch(url, subdir="defines").read_text()
def extract_fw(self, file:str, dname:str) -> bytes:
# Extracts the firmware binary from the given header
tname = file.replace("kgsp", "kgspGet")
text = self._download(f"src/nvidia/generated/g_bindata_{tname}_{self.fw_name}.c")
info, sl = text[text[:text.index(dnm:=f'{file}_{self.fw_name}_{dname}')].rindex("COMPRESSION:"):][:16], text[text.index(dnm) + len(dnm) + 7:]
image = bytes.fromhex(sl[:sl.find("};")].strip().replace("0x", "").replace(",", "").replace(" ", "").replace("\n", ""))
return gzip.decompress(struct.pack("<4BL2B", 0x1f, 0x8b, 8, 0, 0, 0, 3) + image) if "COMPRESSION: YES" in info else image
def include(self, file:str):
def _do_eval(s:str): return eval(s) # pylint: disable=eval-used
regs_off = {'NV_PFALCON_FALCON': 0x0, 'NV_PGSP_FALCON': 0x0, 'NV_PSEC_FALCON': 0x0, 'NV_PRISCV_RISCV': 0x1000, 'NV_PGC6_AON': 0x0, 'NV_PFSP': 0x0,
'NV_PGC6_BSI': 0x0, 'NV_PFALCON_FBIF': 0x600, 'NV_PFALCON2_FALCON': 0x1000, 'NV_PBUS': 0x0, 'NV_PFB': 0x0, 'NV_PMC': 0x0, 'NV_PGSP_QUEUE': 0x0,
'NV_VIRTUAL_FUNCTION':0xb80000, "NV_THERM": 0x0}
for raw in self._download(file).splitlines():
if not raw.startswith("#define "): continue
if m:=re.match(r'#define\s+(\w+)\s+([0-9\+\-\*\(\)]+):([0-9\+\-\*\(\)]+)', raw): # bitfields
name, hi, lo = m.groups()
reg = next((r for r in self.reg_names if name.startswith(r+"_")), None)
if reg is not None: self.__dict__[reg].add_field(name[len(reg)+1:].lower(), _do_eval(lo), _do_eval(hi))
else: self.reg_offsets[name] = (_do_eval(lo), _do_eval(hi))
continue
if m:=re.match(r'#define\s+(\w+)\s*\(\s*(\w+)\s*\)\s*(.+)', raw): # reg set
fn = m.groups()[2].strip().rstrip('\\').split('/*')[0].rstrip()
name, value = m.groups()[0], _do_eval(f"lambda {m.groups()[1]}: {fn}")
elif m:=re.match(r'#define\s+(\w+)\s+([0-9A-Fa-fx]+)(?![^\n]*:)', raw): name, value = m.groups()[0], int(m.groups()[1], 0) # reg value
else: continue
reg_pref = next((prefix for prefix in regs_off.keys() if name.startswith(prefix)), None)
not_already_reg = not any(name.startswith(r+"_") for r in self.reg_names)
if reg_pref is not None and not_already_reg:
fields = {k[len(name)+1:]: v for k, v in self.reg_offsets.items() if k.startswith(name+'_')}
self.__dict__[name] = NVReg(self, regs_off[reg_pref], value, fields=fields)
self.reg_names.add(name)
else: self.__dict__[name] = value
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/support/nv/nvdev.py",
"license": "MIT License",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/external/external_benchmark_llama_schedule.py | from tinygrad import nn, Tensor, Device, dtypes
from tinygrad.helpers import Timing
from extra.models.llama import Transformer
from examples.llama3 import MODEL_PARAMS
if __name__ == "__main__":
Device.DEFAULT = "NULL"
Tensor.training = True
#model_size = "8B"
model_size = "405B"
with Timing("total "):
with Timing("***** create model in "):
model = Transformer(**MODEL_PARAMS[model_size]["args"], linear=nn.Linear, embedding=nn.Embedding,
max_context=1024, jit=True, disable_kv_cache=True)
with Timing("***** fake state in "):
Tensor.realize(*[p.assign(Tensor.empty(*p.shape, device=p.device, dtype=p.dtype)) for p in nn.state.get_parameters(model)])
with Timing("***** create optim in "):
opt = nn.optim.AdamW(nn.state.get_parameters(model))
with Timing("***** run model in "):
toks = Tensor.empty(1, 1024, dtype=dtypes.int)
out = model(toks, 0, temperature=float('nan'))
with Timing("***** backward in "):
out.mean().backward()
with Timing("***** realize in "):
out.realize()
with Timing("***** step in "):
opt.step()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_benchmark_llama_schedule.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/sglang_llama/external_llama_eval.py | from lm_eval import simple_evaluate
from lm_eval.api.instance import Instance
from lm_eval.api.model import LM
from lm_eval.tasks import TaskManager
from pathlib import Path
import json, argparse
from examples.llama3 import build_transformer, Tokenizer, MODEL_PARAMS
from tinygrad import Tensor, Device
from tinygrad.helpers import tqdm
class LLaMaAdaptor(LM):
def __init__(
self,
model_size: str,
checkpoint_path: Path,
max_length: int,
quantize: str | None,
):
super().__init__()
self.max_length = max_length
self.tokenizer = Tokenizer(str((checkpoint_path if checkpoint_path.is_dir() else checkpoint_path.parent) / "tokenizer.model"))
self.model = build_transformer(checkpoint_path, model_size=model_size, quantize=quantize, max_context=self.max_length)
self.last_seen_toks = []
def _prefill(self, toks, temperature) -> int:
start_pos = 0
# we can skip part of the prompt if it is the same as last
for i, (a, b) in enumerate(zip(toks, self.last_seen_toks)):
if a != b: break
else: i = min(len(toks), len(self.last_seen_toks))
start_pos += i
self.last_seen_toks = toks
toks = toks[i:]
# prefill the model
for tok in toks:
self.model(Tensor([[tok]]), start_pos, temperature).realize()
start_pos += 1
return start_pos
@property
def tokenizer_name(self) -> str: pass
def chat_template(self, chat_template: bool | str = False) -> str: pass
def apply_chat_template(self, chat_history: list[dict[str, str]], add_generation_prompt: bool = True) -> str:
ret = ""
for message in chat_history:
ret += f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content'].strip()}<|eot_id|>"
if add_generation_prompt: ret += "<|start_header_id|>assistant<|end_header_id|>\n\n"
return ret
def generate_until(self, requests: list[Instance]) -> list[str]:
continuations = []
for request in tqdm(requests):
prompt, args = request.args
until = [self.tokenizer.encode(tok) for tok in args.get("until", [])]
toks = [self.tokenizer.bos_id] + self.tokenizer.encode(prompt,allow_special=True)
prompt_len = len(toks)
max_gen_toks = args.get("max_gen_toks") or args.get("max_length") or self.max_length-prompt_len
assert self.max_length >= max_gen_toks, "This eval needs a longer context length"
temperature = args.get("temperature", 0.0)
start_pos = self._prefill(toks[:-1], temperature)
for _ in range(max_gen_toks):
next_tok = self.model(Tensor([toks[start_pos:]]), start_pos, temperature).item()
if next_tok in self.tokenizer.stop_tokens or next_tok in until: break
toks.append(next_tok)
start_pos += 1
continuations.append(self.tokenizer.decode(toks[prompt_len:]))
return continuations
def loglikelihood(self, requests: list[Instance]) -> list[tuple[float, bool]]: raise NotImplementedError() # needs changes to extra/models/llama.py
def loglikelihood_rolling(self, requests: list[Instance]) -> list[tuple[float, bool]]: raise NotImplementedError()
if __name__ == '__main__':
print(f"using {Device.DEFAULT} backend")
parser = argparse.ArgumentParser(description='Run LLaMA evals in tinygrad', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--size', type=str, default="8B", help=f"Size of model to use [{', '.join(list(MODEL_PARAMS.keys()))}]")
parser.add_argument('--chat', action='store_true', help="Use chat model")
parser.add_argument('--ctx', type=int, default=8192, help="Max context length")
parser.add_argument('--quantize', type=str, default=None, help="Quantize the weights to int8 or int4 in memory")
parser.add_argument('--eval', type=str, default="mgsm_en_cot_sglang", help="Run in evaluation mode")
parser.add_argument('--limit', type=int, default=None, help="Limit tests in eval")
parser.add_argument('--num_fewshot', type=int, default=None, help="Number of examples to add to context")
parser.add_argument('--model', type=Path, default="./weights/LLaMa/", help="Location of the weights")
parser.add_argument('--output_path', type=Path, default=None, help="Location of the log file")
args = parser.parse_args()
# run eval and exit
adaptor = LLaMaAdaptor(model_size=args.size, quantize=args.quantize,
checkpoint_path=args.model, max_length=args.ctx)
task_manager = TaskManager(include_path="./")
results = simple_evaluate(model=adaptor, tasks=args.eval.split(","), task_manager=task_manager, apply_chat_template=args.chat,
num_fewshot=args.num_fewshot, limit=args.limit)
if args.output_path: args.output_path.write_text(json.dumps(results, indent=2))
for task_name, val in results["results"].items():
print(f"{task_name}:")
print("\n".join(f"\t{k}: {v}" for k, v in val.items() if k != "alias"))
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/sglang_llama/external_llama_eval.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_test_onnx_runner.py | import unittest, onnx, tempfile, pathlib
import numpy as np
from tinygrad import dtypes, Tensor
from tinygrad.uop.ops import Ops
from tinygrad.device import is_dtype_supported
from tinygrad.nn.onnx import OnnxRunner, OnnxDataType
from hypothesis import given, strategies as st
# copied from test_const_folding.py
def _check_ast_count(desired_count:int, t:Tensor):
# NOTE: this has side effect because everything can be scheduled only once
schedule = t.schedule()
asts = [s for s in schedule if s.ast.op is Ops.SINK]
assert len(asts) == desired_count, f"{len(asts)} != {desired_count}"
def build_onnx(nodes, from_disk:bool=True, **kwargs):
"""Helper to build and return an OnnxRunner from ONNX nodes."""
graph = onnx.helper.make_graph(nodes, 'test', kwargs.get('inputs', []), kwargs.get('outputs', []), kwargs.get('initializers', []))
model = onnx.helper.make_model(graph)
if from_disk:
with tempfile.TemporaryDirectory() as tmpdir:
tmp_path = pathlib.Path(tmpdir)
model_path = tmp_path / "model.onnx"
onnx.save(model, model_path)
runner = OnnxRunner(model_path)
else:
# use the in-memory method
runner = OnnxRunner(Tensor(model.SerializeToString(), device="PYTHON"))
return runner
class TestOnnxRunner(unittest.TestCase):
def _test_const_fold_unary_op(self, from_disk:bool):
runner = build_onnx(
nodes=[
onnx.helper.make_node('Expand', ['inp', 'shape'], ['expanded']),
onnx.helper.make_node('Exp', ['expanded'], ['output'])
],
outputs=[onnx.helper.make_tensor_value_info('output', onnx.TensorProto.FLOAT, (5,))],
initializers=[
onnx.helper.make_tensor('inp', onnx.TensorProto.FLOAT, (), [1.0]),
onnx.helper.make_tensor('shape', onnx.TensorProto.INT64, (1,), [5])
],
from_disk=from_disk)
output = runner({'inp': Tensor([1.0])})['output']
_check_ast_count(0, output)
def _test_const_fold_binary_op(self, from_disk:bool):
runner = build_onnx(
nodes=[onnx.helper.make_node('Add', ['inp', 'const'], ['output'])],
outputs=[onnx.helper.make_tensor_value_info('output', onnx.TensorProto.FLOAT, (4,))],
initializers=[
onnx.helper.make_tensor('inp', onnx.TensorProto.FLOAT, (4,), [1, 2, 3, 4]),
onnx.helper.make_tensor('const', onnx.TensorProto.FLOAT, (), [0])
],
from_disk=from_disk)
output = runner({'inp': Tensor([1, 2, 3, 4])})['output']
_check_ast_count(0, output)
@unittest.skip("const folding is removed")
def test_const_fold_from_disk(self):
self._test_const_fold_unary_op(True)
self._test_const_fold_binary_op(True)
@unittest.skip("const folding is removed")
def test_const_fold_from_memory(self):
self._test_const_fold_unary_op(False)
# TODO: understand this and fix this, bitcast related
# self._test_const_fold_binary_op(False)
def test_external_data_loading(self):
weights = np.arange(4, dtype=np.float32)
tensor_with_data = onnx.helper.make_tensor('weights', onnx.TensorProto.FLOAT, weights.shape, weights.tobytes(), raw=True)
graph = onnx.helper.make_graph(
nodes=[onnx.helper.make_node('Add', ['inp', 'weights'], ['output'])],
name='test_external',
inputs=[onnx.helper.make_tensor_value_info('inp', onnx.TensorProto.FLOAT, (1,))],
outputs=[onnx.helper.make_tensor_value_info('output', onnx.TensorProto.FLOAT, weights.shape)],
initializer=[tensor_with_data]
)
model = onnx.helper.make_model(graph)
with tempfile.TemporaryDirectory() as tmpdir:
tmp_path = pathlib.Path(tmpdir)
model_path = tmp_path / "model.onnx"
onnx.save_model(model, model_path, save_as_external_data=True, all_tensors_to_one_file=True, size_threshold=0, location="weights.onnx_data")
runner = OnnxRunner(model_path)
output = runner({'inp': Tensor([1])})['output']
np.testing.assert_equal(output.numpy(), weights + 1)
all_dtypes = list(OnnxDataType)
device_supported_dtypes = {odt for odt in OnnxDataType if is_dtype_supported(odt.to_dtype())}
class TestOnnxRunnerDtypes(unittest.TestCase):
"""
Internal tensors (initializers, attributes) fallback to default dtype if unsupported by device.
External tensors (inputs) preserve their original dtype - user must ensure compatibility with device.
"""
def _get_expected_dtype(self, onnx_dtype: int, is_input: bool):
true_dtype = OnnxDataType(onnx_dtype).to_dtype()
# inputs always preserve their true dtype.
if is_input:
return true_dtype
# supported types are always themselves.
if onnx_dtype in device_supported_dtypes:
return true_dtype
# otherwise it's an unsupported dtype that's internal to the ONNX model, which should fallback to default.
return dtypes.default_int if dtypes.is_int(true_dtype) else dtypes.default_float
@given(onnx_dtype=st.sampled_from(all_dtypes))
def test_input_dtype(self, onnx_dtype: int):
expected_dtype = self._get_expected_dtype(onnx_dtype, True)
runner = build_onnx(
nodes=[onnx.helper.make_node('Identity', ['input'], ['output'])],
inputs=[onnx.helper.make_tensor_value_info('input', onnx_dtype, ())],
outputs=[onnx.helper.make_tensor_value_info('output', onnx_dtype, ())],
from_disk=False)
self.assertEqual(runner.graph_inputs['input'].dtype, expected_dtype)
@given(onnx_dtype=st.sampled_from(all_dtypes))
def test_initializer_dtype(self, onnx_dtype: int):
expected_dtype = self._get_expected_dtype(onnx_dtype, False)
runner = build_onnx(
nodes=[onnx.helper.make_node('Identity', ['initializer'], ['output'])],
outputs=[onnx.helper.make_tensor_value_info('output', onnx_dtype, (2,))],
initializers=[onnx.helper.make_tensor('initializer', onnx_dtype, (2,), [1, 2])],
from_disk=False)
self.assertEqual(runner.graph_values['initializer'].dtype, expected_dtype)
@given(onnx_dtype=st.sampled_from(all_dtypes))
def test_node_attribute_dtype(self, onnx_dtype: int):
expected_dtype = self._get_expected_dtype(onnx_dtype, False)
value_tensor = onnx.helper.make_tensor('value', onnx_dtype, (2,), [1, 2])
runner = build_onnx(
nodes=[onnx.helper.make_node('Constant', [], ['output'], value=value_tensor)],
outputs=[onnx.helper.make_tensor_value_info('output', onnx_dtype, (2,))],
from_disk=False)
self.assertEqual(runner.graph_nodes[0].opts['value'].dtype, expected_dtype)
if __name__ == '__main__':
unittest.main() | {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_onnx_runner.py",
"license": "MIT License",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/runtime/support/memory.py | import collections, functools, dataclasses, enum
from typing import Any, ClassVar
from tinygrad.helpers import round_up, getenv
class BumpAllocator:
def __init__(self, size:int, base:int=0, wrap:bool=True): self.size, self.ptr, self.base, self.wrap = size, 0, base, wrap
def alloc(self, size:int, alignment:int=1) -> int:
if round_up(self.ptr, alignment) + size > self.size:
if not self.wrap: raise RuntimeError("Out of memory")
self.ptr = 0
self.ptr = (res:=round_up(self.ptr, alignment)) + size
return res + self.base
class TLSFAllocator:
"""
The allocator is based on the Two-Level Segregated Fit (TLSF) algorithm. The allocator maintains 2 level of buckets:
* 1st level is determined by the most significant bit of the size.
* 2nd level splits the covered memory of 1st level into @lv2_cnt entries.
For each allocation request, the allocator searches for the smallest block that can fit the requested size.
For each deallocation request, the allocator merges the block with its neighbors if they are free.
"""
def __init__(self, size:int, base:int=0, block_size:int=16, lv2_cnt:int=16):
self.size, self.base, self.block_size, self.l2_cnt = size, base, block_size, lv2_cnt.bit_length()
self.storage:list = [collections.defaultdict(list) for _ in range(size.bit_length() + 1)]
self.lv1_entries:list[int] = [0] * len(self.storage)
# self.blocks is more like a linked list, where each entry is a contiguous block.
self.blocks:dict[int, tuple[int, int|None, int|None, bool]] = {0: (size, None, None, True)} # size, next, prev, is_free
if size > 0: self._insert_block(0, size)
@functools.cache # pylint: disable=method-cache-max-size-none
def lv1(self, size): return size.bit_length()
@functools.cache # pylint: disable=method-cache-max-size-none
def lv2(self, size): return (size - (1 << (size.bit_length() - 1))) // (1 << max(0, size.bit_length() - self.l2_cnt))
def _insert_block(self, start:int, size:int, prev:int|None=None):
if prev is None: prev = self.blocks[start][2]
self.storage[self.lv1(size)][self.lv2(size)].append(start)
self.lv1_entries[self.lv1(size)] += 1
self.blocks[start] = (size, start + size, prev, True)
return self
def _remove_block(self, start:int, size:int, prev:int|None=None):
if prev is None: prev = self.blocks[start][2]
self.storage[self.lv1(size)][self.lv2(size)].remove(start)
self.lv1_entries[self.lv1(size)] -= 1
self.blocks[start] = (size, start + size, prev, False)
return self
def _split_block(self, start:int, size:int, new_size:int):
nxt = self.blocks[start][1]
assert self.blocks[start][3], "block must be free"
self._remove_block(start, size)._insert_block(start, new_size)._insert_block(start + new_size, size - new_size, prev=start)
if nxt in self.blocks: self.blocks[nxt] = (self.blocks[nxt][0], self.blocks[nxt][1], start + new_size, self.blocks[nxt][3])
return self
def _merge_right(self, start:int):
size, nxt, _, is_free = self.blocks[start]
assert is_free, "block must be free"
while is_free and nxt in self.blocks:
if (blk:=self.blocks[nxt])[3] is False: break
self._remove_block(start, size)._remove_block(nxt, blk[0])._insert_block(start, size:=size + blk[0])
assert self.blocks[start][1] == blk[1]
_, nxt, _, _ = self.blocks.pop(nxt)
if nxt in self.blocks: self.blocks[nxt] = (self.blocks[nxt][0], self.blocks[nxt][1], start, self.blocks[nxt][3])
def _merge_block(self, start:int):
# Go left while blocks are free. Then merge all them right.
while (x:=self.blocks[start][2]) is not None and self.blocks[x][3] is True: start = x
self._merge_right(start)
def alloc(self, req_size:int, align:int=1) -> int:
req_size = max(self.block_size, req_size) # at least block size.
size = max(self.block_size, req_size + align - 1)
# Round up the allocation size to the next bucket, so any entry there can fit the requested size.
size = round_up(size, (1 << size.bit_length() - self.l2_cnt))
# Search for the smallest block that can fit the requested size. Start with its bucket and go up until any block is found.
for l1 in range(self.lv1(size), len(self.storage)):
if self.lv1_entries[l1] == 0: continue
for l2 in range(self.lv2(size) if l1 == size.bit_length() else 0, (1 << self.l2_cnt)):
if len(self.storage[l1][l2]) > 0:
# Block start address.
start = self.storage[l1][l2][0]
nsize = self.blocks[start][0]
assert nsize >= size, "block must be larger"
# If request contains alignment, split the block into two parts.
if (new_start:=round_up(start, align)) != start:
self._split_block(start, nsize, new_start - start)
start, nsize = new_start, self.blocks[new_start][0]
# If the block is larger than the requested size, split it into two parts.
if nsize > req_size: self._split_block(start, nsize, req_size)
self._remove_block(start, req_size) # Mark the block as allocated.
return start + self.base
raise MemoryError(f"Can't allocate {req_size} bytes")
def free(self, start:int):
self._insert_block(start - self.base, self.blocks[start - self.base][0])._merge_block(start - self.base)
# Memory Management
class AddrSpace(enum.Enum): PHYS = enum.auto(); SYS = enum.auto(); PEER = enum.auto() # noqa: E702
@dataclasses.dataclass(frozen=True)
class VirtMapping: va_addr:int; size:int; paddrs:list[tuple[int, int]]; aspace:AddrSpace; uncached:bool=False; snooped:bool=False # noqa: E702
class PageTableTraverseContext:
def __init__(self, dev, pt, vaddr, create_pts=False, free_pts=False, boot=False):
self.dev, self.vaddr, self.create_pts, self.free_pts, self.boot = dev, vaddr - dev.mm.va_base, create_pts, free_pts, boot
self.pt_stack:list[tuple[Any, int, int]] = [(pt, self._pt_pte_idx(pt, self.vaddr), self._pt_pte_size(pt))]
def _pt_pte_cnt(self, lv): return self.dev.mm.pte_cnt[lv]
def _pt_pte_size(self, pt): return self.dev.mm.pte_covers[pt.lv]
def _pt_pte_idx(self, pt, va): return (va // self._pt_pte_size(pt)) % self._pt_pte_cnt(pt.lv)
def level_down(self):
pt, pte_idx, _ = self.pt_stack[-1]
if not pt.valid(pte_idx):
assert self.create_pts, "Not allowed to create new page table"
pt.set_entry(pte_idx, self.dev.mm.palloc(0x1000, zero=True, boot=self.boot, ptable=True), table=True, valid=True)
assert not pt.is_page(pte_idx), f"Must be table pt={pt.paddr:#x}, {pt.lv=} {pte_idx=} {pt.entry(pte_idx)=:#x}"
child_page_table = self.dev.mm.pt_t(self.dev, pt.address(pte_idx), lv=pt.lv+1)
self.pt_stack.append((child_page_table, self._pt_pte_idx(child_page_table, self.vaddr), self._pt_pte_size(child_page_table)))
return self.pt_stack[-1]
def _try_free_pt(self) -> bool:
pt, _, _ = self.pt_stack[-1]
if self.free_pts and pt != self.dev.mm.root_page_table and all(not pt.valid(i) for i in range(self._pt_pte_cnt(self.pt_stack[-1][0].lv))):
self.dev.mm.pfree(pt.paddr, ptable=True)
parent_pt, parent_pte_idx, _ = self.pt_stack[-2]
parent_pt.set_entry(parent_pte_idx, 0x0, valid=False)
return True
return False
def level_up(self):
while self._try_free_pt() or self.pt_stack[-1][1] == self._pt_pte_cnt(self.pt_stack[-1][0].lv):
pt, pt_cnt, _ = self.pt_stack.pop()
if pt_cnt == self._pt_pte_cnt(pt.lv): self.pt_stack[-1] = (self.pt_stack[-1][0], self.pt_stack[-1][1] + 1, self.pt_stack[-1][2])
def next(self, size:int, paddr:int|None=None, off:int=0):
while size > 0:
pt, pte_idx, pte_covers = self.pt_stack[-1]
if self.create_pts:
assert paddr is not None, "paddr must be provided when allocating new page tables"
while pte_covers > size or not pt.supports_huge_page(paddr+off) or self.vaddr&(pte_covers-1) != 0: pt, pte_idx, pte_covers = self.level_down()
else:
while not pt.is_page(pte_idx): pt, pte_idx, pte_covers = self.level_down()
entries = min(size // pte_covers, self._pt_pte_cnt(pt.lv) - pte_idx)
assert entries > 0, f"Invalid entries {size=:#x}, {pte_covers=:#x}"
yield off, pt, pte_idx, entries, pte_covers
size, off, self.vaddr = size - entries * pte_covers, off + entries * pte_covers, self.vaddr + entries * pte_covers
self.pt_stack[-1] = (pt, pte_idx + entries, pte_covers)
self.level_up()
class MemoryManager:
va_allocator: ClassVar[TLSFAllocator|None] = None
def __init__(self, dev, vram_size:int, boot_size:int, pt_t, va_bits:int, va_shifts:list[int], va_base:int,
palloc_ranges:list[tuple[int, int]], first_lv:int=0, reserve_ptable=False):
self.dev, self.vram_size, self.va_shifts, self.va_base, lvl_msb = dev, vram_size, va_shifts, va_base, va_shifts + [va_bits + 1]
self.pte_covers, self.pte_cnt = [1 << x for x in va_shifts][::-1], [1 << (lvl_msb[i+1] - lvl_msb[i]) for i in range(len(lvl_msb) - 1)][::-1]
self.pt_t, self.palloc_ranges, self.level_cnt, self.va_bits, self.reserve_ptable = pt_t, palloc_ranges, len(va_shifts), va_bits, reserve_ptable
self.boot_allocator = TLSFAllocator(boot_size, base=0)
self.ptable_allocator = TLSFAllocator(round_up(vram_size // 512, 1 << 20) if self.reserve_ptable else 0, base=self.boot_allocator.size)
self.pa_allocator = TLSFAllocator(vram_size - (off_sz:=self.boot_allocator.size + self.ptable_allocator.size), base=off_sz)
self.root_page_table = pt_t(self.dev, self.palloc(0x1000, zero=not self.dev.smi_dev, boot=True), lv=first_lv)
def _frag_size(self, va, sz, must_cover=True):
"""
Calculate the tlb fragment size for a given virtual address and size.
If must_cover is True, the fragment size must cover the size, otherwise the biggest fragment size that fits the size is returned.
Fragment 0 is 4KB, 1 is 8KB and so on.
"""
va_pwr2_div, sz_pwr2_div, sz_pwr2_max = va & -(va) if va > 0 else (1 << 63), sz & -(sz), (1 << (sz.bit_length() - 1))
return (min(va_pwr2_div, sz_pwr2_div) if must_cover else min(va_pwr2_div, sz_pwr2_max)).bit_length() - 1 - 12
def page_tables(self, vaddr:int, size:int):
ctx = PageTableTraverseContext(self.dev, self.root_page_table, vaddr, create_pts=True)
for _ in ctx.next(size, paddr=0): return [pt for pt, _, _ in ctx.pt_stack]
def map_range(self, vaddr:int, size:int, paddrs:list[tuple[int, int]], aspace:AddrSpace, uncached=False, snooped=False, boot=False) -> VirtMapping:
if getenv("MM_DEBUG", 0): print(f"mm {self.dev.devfmt}: mapping {vaddr=:#x} ({size=:#x})")
assert size == sum(p[1] for p in paddrs), f"Size mismatch {size=} {sum(p[1] for p in paddrs)=}"
ctx = PageTableTraverseContext(self.dev, self.root_page_table, vaddr, create_pts=True, boot=boot)
for paddr, psize in paddrs:
for off, pt, pte_idx, pte_cnt, pte_covers in ctx.next(psize, paddr=paddr):
for pte_off in range(pte_cnt):
assert not pt.valid(pte_idx + pte_off), f"PTE already mapped: {pt.entry(pte_idx + pte_off):#x}"
pt.set_entry(pte_idx + pte_off, paddr + off + pte_off * pte_covers, uncached=uncached, aspace=aspace, snooped=snooped,
frag=self._frag_size(ctx.vaddr+off, pte_cnt * pte_covers), valid=True)
self.on_range_mapped()
return VirtMapping(vaddr, size, paddrs, aspace=aspace, uncached=uncached, snooped=snooped)
def unmap_range(self, vaddr:int, size:int):
if getenv("MM_DEBUG", 0): print(f"mm {self.dev.devfmt}: unmapping {vaddr=:#x} ({size=:#x})")
ctx = PageTableTraverseContext(self.dev, self.root_page_table, vaddr, free_pts=True)
for _, pt, pte_idx, pte_cnt, _ in ctx.next(size):
for pte_id in range(pte_idx, pte_idx + pte_cnt):
assert pt.valid(pte_id), f"PTE not mapped: {pt.entry(pte_id):#x}"
pt.set_entry(pte_id, paddr=0x0, valid=False)
def on_range_mapped(self): pass
@classmethod
def alloc_vaddr(cls, size:int, align=0x1000) -> int:
assert cls.va_allocator is not None, "must be set"
return cls.va_allocator.alloc(size, max((1 << (size.bit_length() - 1)), align))
def valloc(self, size:int, align=0x1000, uncached=False, contiguous=False) -> VirtMapping:
# Alloc physical memory and map it to the virtual address
va = self.alloc_vaddr(size:=round_up(size, 0x1000), align)
if contiguous: paddrs = [(self.palloc(size, zero=True), size)]
else:
# Traverse the PT to find the largest contiguous sizes we need to allocate. Try to allocate the longest segment to reduce TLB pressure.
nxt_range, rem_size, paddrs = 0, size, []
while rem_size > 0:
while self.palloc_ranges[nxt_range][0] > rem_size: nxt_range += 1
try: paddrs += [(self.palloc(try_sz:=self.palloc_ranges[nxt_range][0], self.palloc_ranges[nxt_range][1], zero=False), try_sz)]
except MemoryError:
# Move to a smaller size and try again.
nxt_range += 1
if nxt_range == len(self.palloc_ranges):
for paddr, _ in paddrs: self.pa_allocator.free(paddr)
raise MemoryError(f"Failed to allocate memory. (total allocation size={size:#x}, current try={self.palloc_ranges[nxt_range-1]})")
continue
rem_size -= self.palloc_ranges[nxt_range][0]
return self.map_range(va, size, paddrs, aspace=AddrSpace.PHYS, uncached=uncached)
def vfree(self, vm:VirtMapping):
assert self.va_allocator is not None, "must be set"
self.unmap_range(vm.va_addr, vm.size)
self.va_allocator.free(vm.va_addr)
for paddr, _ in vm.paddrs: self.pa_allocator.free(paddr)
def palloc(self, size:int, align:int=0x1000, zero=True, boot=False, ptable=False) -> int:
assert self.dev.is_booting == boot, "During booting, only boot memory can be allocated"
allocator = self.boot_allocator if boot else (self.ptable_allocator if self.reserve_ptable and ptable else self.pa_allocator)
paddr = allocator.alloc(round_up(size, 0x1000), align)
if zero: self.dev.vram[paddr:paddr+size] = bytes(size)
return paddr
def pfree(self, paddr:int, ptable=False): (self.ptable_allocator if self.reserve_ptable and ptable else self.pa_allocator).free(paddr)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/support/memory.py",
"license": "MIT License",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/support/system.py | from __future__ import annotations
import os, mmap, array, functools, ctypes, select, contextlib, dataclasses, sys, itertools, struct, socket, subprocess, time, enum
from typing import ClassVar
from tinygrad.helpers import round_up, getenv, OSX, temp, ceildiv, unwrap, fetch, system
from tinygrad.runtime.autogen import libc, pci, vfio, iokit, corefoundation
from tinygrad.runtime.support.hcq import FileIOInterface, MMIOInterface, HCQBuffer, hcq_filter_visible_devices
from tinygrad.runtime.support.memory import MemoryManager, VirtMapping, AddrSpace
from tinygrad.runtime.support.usb import ASM24Controller, USBMMIOInterface
MAP_FIXED, MAP_FIXED_NOREPLACE = 0x10, 0x100000
MAP_LOCKED, MAP_POPULATE, MAP_NORESERVE = 0 if OSX else 0x2000, getattr(mmap, "MAP_POPULATE", 0 if OSX else 0x008000), 0x400
@dataclasses.dataclass(frozen=True)
class PCIBarInfo: addr:int; size:int # noqa: E702
class _System:
def write_sysfs(self, path:str, value:str, msg:str, expected:str|None=None):
if FileIOInterface(path, os.O_RDONLY).read().splitlines()[0] != (expected or value):
os.system(cmd:=f"sudo sh -c 'echo {value} > {path}'")
if FileIOInterface(path, os.O_RDONLY).read().splitlines()[0] != (expected or value): raise RuntimeError(f"{msg}. Please run {cmd} manually.")
@functools.cached_property
def atomic_lib(self): return ctypes.CDLL(ctypes.util.find_library('atomic')) if sys.platform == "linux" else None
@functools.cached_property
def libsys(self): return ctypes.CDLL(ctypes.util.find_library("System"))
@functools.cached_property
def pagemap(self) -> FileIOInterface:
self.write_sysfs("/proc/sys/vm/compact_unevictable_allowed", "0", "Failed to disable migration of locked pages")
return FileIOInterface("/proc/self/pagemap", os.O_RDONLY)
@functools.cached_property
def vfio(self) -> FileIOInterface|None:
try:
if not FileIOInterface.exists("/sys/module/vfio"): os.system("sudo modprobe vfio-pci disable_idle_d3=1")
FileIOInterface("/sys/module/vfio/parameters/enable_unsafe_noiommu_mode", os.O_RDWR).write("1")
vfio_fd = FileIOInterface("/dev/vfio/vfio", os.O_RDWR)
vfio.VFIO_CHECK_EXTENSION(vfio_fd, vfio.VFIO_NOIOMMU_IOMMU)
return vfio_fd
except OSError: return None
def reserve_hugepages(self, cnt): os.system(f"sudo sh -c 'echo {cnt} > /proc/sys/vm/nr_hugepages'")
def memory_barrier(self): lib.atomic_thread_fence(__ATOMIC_SEQ_CST:=5) if (lib:=self.libsys if OSX else self.atomic_lib) is not None else None
def lock_memory(self, addr:int, size:int):
if libc.mlock(ctypes.c_void_p(addr), size): raise RuntimeError(f"Failed to lock memory at {addr:#x} with size {size:#x}")
def system_paddrs(self, vaddr:int, size:int) -> list[int]:
self.pagemap.seek(vaddr // mmap.PAGESIZE * 8)
return [(x & ((1<<55) - 1)) * mmap.PAGESIZE for x in array.array('Q', self.pagemap.read(size//mmap.PAGESIZE*8, binary=True))]
def pci_scan_bus(self, vendor:int, devices:list[tuple[int, list[int]]], base_class:int|None=None) -> list[str]:
all_devs = []
if OSX:
def read_prop(svc, key) -> int:
cfkey = corefoundation.CFStringCreateWithCString(None, key.encode(), corefoundation.kCFStringEncodingUTF8)
cfdata = ctypes.cast(iokit.IORegistryEntryCreateCFProperty(svc, ctypes.cast(cfkey, iokit.CFStringRef), None, 0), corefoundation.CFDataRef)
corefoundation.CFDataGetBytes(cfdata, corefoundation.CFRange(0, corefoundation.CFDataGetLength(cfdata)), buf:=(ctypes.c_uint8*8)())
return int.from_bytes(bytes(buf), "little")
iokit.IOServiceGetMatchingServices(0, iokit.IOServiceMatching(b"IOPCIDevice"), ctypes.byref(iterator:=ctypes.c_uint()))
while svc:=iokit.IOIteratorNext(iterator): all_devs.append((v:=read_prop(svc, "vendor-id"), d:=read_prop(svc, "device-id"), f"{v:x}:{d:x}"))
else:
for pcibus in FileIOInterface("/sys/bus/pci/devices").listdir():
if base_class is not None and int(FileIOInterface(f"/sys/bus/pci/devices/{pcibus}/class").read(), 16) >> 16 != base_class: continue
all_devs.append((int(FileIOInterface(f"/sys/bus/pci/devices/{pcibus}/vendor").read(), 16),
int(FileIOInterface(f"/sys/bus/pci/devices/{pcibus}/device").read(), 16), pcibus))
return sorted([val for vndr, device, val in all_devs if vndr == vendor and any((device & mask) in devlist for mask, devlist in devices)])
def pci_setup_usb_bars(self, usb:ASM24Controller, gpu_bus:int, mem_base:int, pref_mem_base:int) -> dict[int, PCIBarInfo]:
for bus in range(gpu_bus):
# All 3 values must be written at the same time.
buses = (0 << 0) | ((bus+1) << 8) | ((gpu_bus) << 16)
usb.pcie_cfg_req(pci.PCI_PRIMARY_BUS, bus=bus, dev=0, fn=0, value=buses, size=4)
usb.pcie_cfg_req(pci.PCI_MEMORY_BASE, bus=bus, dev=0, fn=0, value=(mem_base>>16) & 0xffff, size=2)
usb.pcie_cfg_req(pci.PCI_MEMORY_LIMIT, bus=bus, dev=0, fn=0, value=0xffff, size=2)
usb.pcie_cfg_req(pci.PCI_PREF_MEMORY_BASE, bus=bus, dev=0, fn=0, value=(pref_mem_base>>16) & 0xffff, size=2)
usb.pcie_cfg_req(pci.PCI_PREF_MEMORY_LIMIT, bus=bus, dev=0, fn=0, value=0xffff, size=2)
usb.pcie_cfg_req(pci.PCI_PREF_BASE_UPPER32, bus=bus, dev=0, fn=0, value=pref_mem_base >> 32, size=4)
usb.pcie_cfg_req(pci.PCI_PREF_LIMIT_UPPER32, bus=bus, dev=0, fn=0, value=0xffffffff, size=4)
usb.pcie_cfg_req(pci.PCI_COMMAND, bus=bus, dev=0, fn=0, value=pci.PCI_COMMAND_IO | pci.PCI_COMMAND_MEMORY | pci.PCI_COMMAND_MASTER, size=1)
# resize bar 0
cap_ptr = 0x100
while cap_ptr:
if pci.PCI_EXT_CAP_ID(hdr:=usb.pcie_cfg_req(cap_ptr, bus=gpu_bus, dev=0, fn=0, size=4)) == pci.PCI_EXT_CAP_ID_REBAR:
cap = usb.pcie_cfg_req(cap_ptr + 0x04, bus=gpu_bus, dev=0, fn=0, size=4)
new_ctrl = (usb.pcie_cfg_req(cap_ptr + 0x08, bus=gpu_bus, dev=0, fn=0, size=4) & ~0x1F00) | ((int(cap >> 4).bit_length() - 1) << 8)
usb.pcie_cfg_req(cap_ptr + 0x08, bus=gpu_bus, dev=0, fn=0, value=new_ctrl, size=4)
cap_ptr = pci.PCI_EXT_CAP_NEXT(hdr)
mem_space_addr, bar_off, bars = [mem_base, pref_mem_base], 0, {}
while bar_off < 24:
cfg = usb.pcie_cfg_req(pci.PCI_BASE_ADDRESS_0 + bar_off, bus=gpu_bus, dev=0, fn=0, size=4)
bar_mem, bar_64 = bool(cfg & pci.PCI_BASE_ADDRESS_MEM_PREFETCH), cfg & pci.PCI_BASE_ADDRESS_MEM_TYPE_64
if (cfg & pci.PCI_BASE_ADDRESS_SPACE) == pci.PCI_BASE_ADDRESS_SPACE_MEMORY:
usb.pcie_cfg_req(pci.PCI_BASE_ADDRESS_0 + bar_off, bus=gpu_bus, dev=0, fn=0, value=0xffffffff, size=4)
lo = (usb.pcie_cfg_req(pci.PCI_BASE_ADDRESS_0 + bar_off, bus=gpu_bus, dev=0, fn=0, size=4) & 0xfffffff0)
if bar_64: usb.pcie_cfg_req(pci.PCI_BASE_ADDRESS_0 + bar_off + 4, bus=gpu_bus, dev=0, fn=0, value=0xffffffff, size=4)
hi = (usb.pcie_cfg_req(pci.PCI_BASE_ADDRESS_0 + bar_off + 4, bus=gpu_bus, dev=0, fn=0, size=4) if bar_64 else 0)
bar_size = ((~(((hi << 32) | lo) & ~0xf)) + 1) & (0xffffffffffffffff if bar_64 else 0xffffffff)
usb.pcie_cfg_req(pci.PCI_BASE_ADDRESS_0 + bar_off, bus=gpu_bus, dev=0, fn=0, value=mem_space_addr[bar_mem] & 0xffffffff, size=4)
if bar_64: usb.pcie_cfg_req(pci.PCI_BASE_ADDRESS_0 + bar_off + 4, bus=gpu_bus, dev=0, fn=0, value=mem_space_addr[bar_mem] >> 32, size=4)
bars[bar_off // 4] = PCIBarInfo(mem_space_addr[bar_mem], bar_size)
mem_space_addr[bar_mem] += round_up(bar_size, 2 << 20)
bar_off += 8 if bar_64 else 4
usb.pcie_cfg_req(pci.PCI_COMMAND, bus=gpu_bus, dev=0, fn=0, value=pci.PCI_COMMAND_IO | pci.PCI_COMMAND_MEMORY | pci.PCI_COMMAND_MASTER, size=1)
return bars
def flock_acquire(self, name:str) -> int:
import fcntl # to support windows
os.umask(0) # Set umask to 0 to allow creating files with 0666 permissions
# Avoid O_CREAT because we donβt want to re-create/replace an existing file (triggers extra perms checks) when opening as non-owner.
if os.path.exists(lock_name:=temp(name)): self.lock_fd = os.open(lock_name, os.O_RDWR)
else: self.lock_fd = os.open(lock_name, os.O_RDWR | os.O_CREAT | os.O_CLOEXEC, 0o666)
try: fcntl.flock(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError: raise RuntimeError(f"Failed to acquire lock file {name}. `sudo lsof {lock_name}` may help identify the process holding the lock.")
return self.lock_fd
System = _System()
# *** PCI Devices
class PCIDevice:
def __init__(self, devpref:str, pcibus:str, bars:list[int], resize_bars:list[int]|None=None):
self.lock_fd = System.flock_acquire(f"{devpref.lower()}_{pcibus.lower()}.lock")
self.pcibus, self.irq_poller = pcibus, None
try: FileIOInterface(f"/sys/bus/pci/devices/{self.pcibus}/enable", os.O_RDWR)
except PermissionError: raise PermissionError(f"Cannot access PCI device {pcibus}: run `extra/amdpci/setup_python_cap.sh` or use sudo")
if FileIOInterface.exists(f"/sys/bus/pci/devices/{self.pcibus}/driver"):
FileIOInterface(f"/sys/bus/pci/devices/{self.pcibus}/driver/unbind", os.O_WRONLY).write(self.pcibus)
for i in resize_bars or []:
if FileIOInterface.exists(rpath:=f"/sys/bus/pci/devices/{self.pcibus}/resource{i}_resize"):
try: FileIOInterface(rpath, os.O_RDWR).write(str(int(FileIOInterface(rpath, os.O_RDONLY).read(), 16).bit_length() - 1))
except OSError as e: raise RuntimeError(f"Cannot resize BAR {i}: {e}. Ensure the resizable BAR option is enabled.") from e
if getenv("VFIO", 0) and (vfio_fd:=System.vfio) is not None:
FileIOInterface(f"/sys/bus/pci/devices/{self.pcibus}/driver_override", os.O_WRONLY).write("vfio-pci")
FileIOInterface("/sys/bus/pci/drivers_probe", os.O_WRONLY).write(self.pcibus)
iommu_group = FileIOInterface.readlink(f"/sys/bus/pci/devices/{self.pcibus}/iommu_group").split('/')[-1]
self.vfio_group = FileIOInterface(f"/dev/vfio/noiommu-{iommu_group}", os.O_RDWR)
vfio.VFIO_GROUP_SET_CONTAINER(self.vfio_group, ctypes.c_int(vfio_fd.fd))
with contextlib.suppress(OSError): vfio.VFIO_SET_IOMMU(vfio_fd, vfio.VFIO_NOIOMMU_IOMMU) # set iommu works only once for the fd.
self.vfio_dev = FileIOInterface(fd=vfio.VFIO_GROUP_GET_DEVICE_FD(self.vfio_group, ctypes.create_string_buffer(self.pcibus.encode())))
self.irq_fd = FileIOInterface.eventfd(0, 0)
self.irq_poller = select.poll()
self.irq_poller.register(self.irq_fd.fd, select.POLLIN)
irqs = vfio.struct_vfio_irq_set(index=vfio.VFIO_PCI_MSI_IRQ_INDEX, flags=vfio.VFIO_IRQ_SET_DATA_EVENTFD|vfio.VFIO_IRQ_SET_ACTION_TRIGGER,
argsz=ctypes.sizeof(vfio.struct_vfio_irq_set), count=1, data=(ctypes.c_int * 1)(self.irq_fd.fd))
vfio.VFIO_DEVICE_SET_IRQS(self.vfio_dev, irqs)
else: FileIOInterface(f"/sys/bus/pci/devices/{self.pcibus}/enable", os.O_RDWR).write("1")
self.cfg_fd = FileIOInterface(f"/sys/bus/pci/devices/{self.pcibus}/config", os.O_RDWR | os.O_SYNC | os.O_CLOEXEC)
self.bar_fds = {b: FileIOInterface(f"/sys/bus/pci/devices/{self.pcibus}/resource{b}", os.O_RDWR | os.O_SYNC | os.O_CLOEXEC) for b in bars}
res = FileIOInterface(f"/sys/bus/pci/devices/{self.pcibus}/resource", os.O_RDONLY).read().splitlines()
self.bar_info = {j:PCIBarInfo(int(s,16), int(e,16)-int(s,16)+1) for j,(s,e,_) in enumerate(l.split() for l in res)}
def alloc_sysmem(self, size:int, vaddr:int=0, contiguous:bool=False) -> tuple[MMIOInterface, list[int]]:
assert not contiguous or size <= (2 << 20), "Contiguous allocation is only supported for sizes up to 2MB"
flags = (libc.MAP_HUGETLB if contiguous and (size:=round_up(size, mmap.PAGESIZE)) > mmap.PAGESIZE else 0) | (MAP_FIXED if vaddr else 0)
va = FileIOInterface.anon_mmap(vaddr, size, mmap.PROT_READ|mmap.PROT_WRITE, mmap.MAP_SHARED|mmap.MAP_ANONYMOUS|MAP_POPULATE|MAP_LOCKED|flags, 0)
sysmem_view, paddrs = MMIOInterface(va, size), [(x, mmap.PAGESIZE) for x in System.system_paddrs(va, size)]
return sysmem_view, [p + i for p, sz in paddrs for i in range(0, sz, 0x1000)][:ceildiv(size, 0x1000)]
def read_config(self, offset:int, size:int): return int.from_bytes(self.cfg_fd.read(size, binary=True, offset=offset), byteorder='little')
def write_config(self, offset:int, value:int, size:int): self.cfg_fd.write(value.to_bytes(size, byteorder='little'), binary=True, offset=offset)
def map_bar(self, bar:int, off:int=0, addr:int=0, size:int|None=None, fmt='B') -> MMIOInterface:
fd, sz = self.bar_fds[bar], size or (self.bar_info[bar].size - off)
libc.madvise(loc:=fd.mmap(addr, sz, mmap.PROT_READ | mmap.PROT_WRITE, mmap.MAP_SHARED | (MAP_FIXED if addr else 0), off), sz, libc.MADV_DONTFORK)
return MMIOInterface(loc, sz, fmt=fmt)
def reset(self): os.system(f"sudo sh -c 'echo 1 > /sys/bus/pci/devices/{self.pcibus}/reset'")
class USBPCIDevice(PCIDevice):
def __init__(self, devpref:str, pcibus:str, bars:list[int], resize_bars:list[int]|None=None):
self.lock_fd = System.flock_acquire(f"{devpref.lower()}_{pcibus.lower()}.lock")
self.usb = ASM24Controller()
self.pcibus, self.bar_info = pcibus, System.pci_setup_usb_bars(self.usb, gpu_bus=4, mem_base=0x10000000, pref_mem_base=(32 << 30))
def map_bar(self, bar, off=0, addr=0, size=None, fmt='B'):
return USBMMIOInterface(self.usb, self.bar_info[bar].addr + off, size or self.bar_info[bar].size, fmt)
def dma_view(self, ctrl_addr, size): return USBMMIOInterface(self.usb, ctrl_addr, size, fmt='B', pcimem=False)
class PCIDevImplBase:
mm: MemoryManager
@dataclasses.dataclass
class PCIAllocationMeta: mapping:VirtMapping; has_cpu_mapping:bool; hMemory:int=0 # noqa: E702
class LNXPCIIfaceBase:
dev_impl:PCIDevImplBase
gpus:ClassVar[list[str]] = []
def __init__(self, dev, dev_id, vendor, devices:list[tuple[int, list[int]]], bars, vram_bar, va_start, va_size, base_class:int|None=None):
if len((cls:=type(self)).gpus) == 0:
cls.gpus = hcq_filter_visible_devices(System.pci_scan_bus(vendor, devices, base_class))
# Acquire va range to avoid collisions.
FileIOInterface.anon_mmap(va_start, va_size, 0, mmap.MAP_PRIVATE | mmap.MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED_NOREPLACE, 0)
self.pci_dev, self.dev, self.vram_bar = PCIDevice(dev.__class__.__name__[:2], cls.gpus[dev_id], bars=bars, resize_bars=[vram_bar]), dev, vram_bar
self.p2p_base_addr = self.pci_dev.bar_info[vram_bar].addr
def alloc(self, size:int, host=False, uncached=False, cpu_access=False, contiguous=False, force_devmem=False, **kwargs) -> HCQBuffer:
# NOTE: logic on macos is different, since bar is small
should_use_sysmem = host or ((cpu_access if OSX else (uncached and cpu_access)) and not force_devmem)
if should_use_sysmem:
vaddr = self.dev_impl.mm.alloc_vaddr(size:=round_up(size, mmap.PAGESIZE), align=mmap.PAGESIZE)
memview, paddrs = self.pci_dev.alloc_sysmem(size, vaddr=vaddr, contiguous=contiguous)
mapping = self.dev_impl.mm.map_range(vaddr, size, [(paddr, 0x1000) for paddr in paddrs], aspace=AddrSpace.SYS, snooped=True, uncached=True)
return HCQBuffer(vaddr, size, meta=PCIAllocationMeta(mapping, has_cpu_mapping=True, hMemory=paddrs[0]), view=memview, owner=self.dev)
mapping = self.dev_impl.mm.valloc(size:=round_up(size, 0x1000), uncached=uncached, contiguous=cpu_access)
barview = self.pci_dev.map_bar(bar=self.vram_bar, off=mapping.paddrs[0][0], size=mapping.size) if cpu_access else None
return HCQBuffer(mapping.va_addr, size, view=barview, meta=PCIAllocationMeta(mapping, cpu_access, hMemory=mapping.paddrs[0][0]), owner=self.dev)
def free(self, b:HCQBuffer):
for dev in b.mapped_devs[1:]: dev.iface.dev_impl.mm.unmap_range(b.va_addr, b.size)
if b.meta.mapping.aspace is AddrSpace.PHYS: self.dev_impl.mm.vfree(b.meta.mapping)
if b.owner == self.dev and b.meta.has_cpu_mapping and not OSX: FileIOInterface.munmap(b.va_addr, b.size)
def map(self, b:HCQBuffer):
if b.owner is not None and b.owner._is_cpu():
System.lock_memory(int(b.va_addr), b.size)
paddrs, aspace = [(x, 0x1000) for x in System.system_paddrs(int(b.va_addr), round_up(b.size, 0x1000))], AddrSpace.SYS
snooped, uncached = True, True
elif (ifa:=getattr(b.owner, "iface", None)) is not None and isinstance(ifa, LNXPCIIfaceBase):
snooped, uncached = True, b.meta.mapping.uncached
if b.meta.mapping.aspace is AddrSpace.SYS: paddrs, aspace = b.meta.mapping.paddrs, AddrSpace.SYS
elif hasattr(ifa.dev_impl, 'paddr2xgmi') and ifa.dev_impl.gmc.xgmi_seg_sz > 0:
paddrs, aspace = [(ifa.dev_impl.paddr2xgmi(p), sz) for p, sz in b.meta.mapping.paddrs], AddrSpace.PEER
else: paddrs, aspace = [(p + ifa.p2p_base_addr, sz) for p, sz in b.meta.mapping.paddrs], AddrSpace.SYS
else: raise RuntimeError(f"map failed: {b.owner} -> {self.dev}")
self.dev_impl.mm.map_range(int(b.va_addr), round_up(b.size, 0x1000), paddrs, aspace=aspace, snooped=snooped, uncached=uncached)
# *** Remote PCI Devices
class RemoteCmd(enum.IntEnum): MAP_BAR, MAP_SYSMEM_FD, CFG_READ, CFG_WRITE, RESET, MMIO_READ, MMIO_WRITE = 1, 2, 3, 4, 5, 6, 7
class RemoteMMIOInterface(MMIOInterface):
def __init__(self, dev:RemotePCIDevice, residx:int, nbytes:int, fmt='B', off=0):
self.dev, self.residx, self.nbytes, self.fmt, self.off, self.el_sz = dev, residx, nbytes, fmt, off, struct.calcsize(fmt)
def __getitem__(self, index):
sl = index if isinstance(index, slice) else slice(index, index + 1)
start, stop = (sl.start or 0) * self.el_sz, (sl.stop or len(self)) * self.el_sz
data = self.dev._bulk_read(RemoteCmd.MMIO_READ, self.residx, self.off + start, stop - start)
result = data if self.fmt == 'B' else list(struct.unpack(f'<{(stop - start) // self.el_sz}{self.fmt}', data))
return result if isinstance(index, slice) else result[0]
def __setitem__(self, index, val):
start = (index.start or 0) * self.el_sz if isinstance(index, slice) else index * self.el_sz
data = (val if self.fmt == 'B' else struct.pack(f'<{len(val)}{self.fmt}', *val)) if isinstance(index, slice) else struct.pack(f'<{self.fmt}', val)
self.dev._bulk_write(RemoteCmd.MMIO_WRITE, self.residx, self.off + start, data)
def view(self, offset:int=0, size:int|None=None, fmt=None):
return RemoteMMIOInterface(self.dev, self.residx, size or (self.nbytes - offset), fmt or self.fmt, self.off + offset)
class RemotePCIDevice(PCIDevice):
def __init__(self, devpref:str, pcibus:str, bars:list[int], sock:socket.socket):
self.lock_fd = System.flock_acquire(f"{devpref.lower()}_{pcibus.lower()}.lock")
self.pcibus, self.sock = pcibus, sock
for buft in [socket.SO_SNDBUF, socket.SO_RCVBUF]: self.sock.setsockopt(socket.SOL_SOCKET, buft, 64 << 20)
self.bar_info = {b: PCIBarInfo(0, self._rpc(RemoteCmd.MAP_BAR, b)[0]) for b in bars}
def _recvall(self, n:int) -> bytes:
data = b''
while len(data) < n and (chunk:=self.sock.recv(n - len(data))): data += chunk
if len(data) < n: raise RuntimeError("Connection closed")
return data
def _recv_with_fd(self) -> tuple[bytes, int|None]:
msg, anc, _, _ = self.sock.recvmsg(17, socket.CMSG_LEN(4))
return msg, struct.unpack('<i', anc[0][2][:4])[0]
def _rpc(self, cmd:int, *args:int, readout_size:int=0, has_fd=False) -> tuple[int, int, bytes|None, int|None]:
self.sock.sendall(struct.pack('<BBQQQ', cmd, *(*args, 0, 0, 0, 0)[:4]))
msg, fd = self._recv_with_fd() if has_fd else (self._recvall(17), None)
if (resp:=struct.unpack('<BQQ', msg))[0] != 0:
raise RuntimeError(f"RPC failed: {self._recvall(resp[1]).decode('utf-8') if resp[1] > 0 else 'unknown error'}")
return (resp[1], resp[2]) + ((self._recvall(readout_size) if readout_size > 0 else None),) + (fd,)
def _bulk_read(self, cmd:int, idx:int, offset:int, size:int) -> bytes: return unwrap(self._rpc(cmd, idx, offset, size, readout_size=size)[2])
def _bulk_write(self, cmd:int, idx:int, offset:int, data:bytes): self.sock.sendall(struct.pack('<BBQQQ', cmd, idx, offset, len(data), 0) + data)
def alloc_sysmem(self, size:int, vaddr:int=0, contiguous:bool=False) -> tuple[MMIOInterface, list[int]]:
mapped_size, _, _, fd = self._rpc(RemoteCmd.MAP_SYSMEM_FD, 0, 0, size, has_fd=True)
memview = MMIOInterface(FileIOInterface(fd=fd).mmap(0, mapped_size, mmap.PROT_READ | mmap.PROT_WRITE, mmap.MAP_SHARED, 0), mapped_size, fmt='B')
# paddrs are returned as (paddr, size) pairs until a (paddr=0, size=0) terminator in the beginning of the mapping.
paddrs_raw = list(itertools.takewhile(lambda p: p[1] != 0, zip(memview.view(fmt='Q')[0::2], memview.view(fmt='Q')[1::2])))
return memview, [p + i for p, sz in paddrs_raw for i in range(0, sz, 0x1000)][:ceildiv(size, 0x1000)]
def read_config(self, offset:int, size:int): return self._rpc(RemoteCmd.CFG_READ, 0, offset, size)[0]
def write_config(self, offset:int, value:int, size:int): self._rpc(RemoteCmd.CFG_WRITE, 0, offset, size, value)
def reset(self): self._rpc(RemoteCmd.RESET, 0, 0, 0)
def map_bar(self, bar:int, off:int=0, addr:int=0, size:int|None=None, fmt='B') -> MMIOInterface:
return RemoteMMIOInterface(self, bar, size or self.bar_info[bar].size, fmt).view(off, size, fmt)
class APLRemotePCIDevice(RemotePCIDevice):
APP_PATH = "/Applications/TinyGPU.app/Contents/MacOS/TinyGPU"
@staticmethod
def install_tinygpu():
print("Downloading TinyGPU.app...")
system(f"ditto -xk {fetch('https://github.com/nimlgen/tinygpu_releases/raw/8120b5508b43149d27bf22f9a4e6d7c5a4b401e9/TinyGPU.zip')} /Applications")
print(system(f"{APLRemotePCIDevice.APP_PATH} install"))
def __init__(self, devpref:str, pcibus:str, bars:list[int], resize_bars:list[int]|None=None):
sock_path, sock = getenv("APL_REMOTE_SOCK", temp("tinygpu.sock")), socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for i in range(100):
with contextlib.suppress(ConnectionRefusedError, FileNotFoundError):
sock.connect(sock_path)
break
if i == 0: subprocess.Popen([self.APP_PATH, "server", sock_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
time.sleep(0.05)
else: raise RuntimeError(f"Failed to connect to TinyGPU server at {sock_path}.")
super().__init__(devpref, pcibus, bars, sock)
class APLRemoteIfaceBase(LNXPCIIfaceBase):
def __init__(self, dev, dev_id, vendor, devices:list[tuple[int, list[int]]], bars, vram_bar, va_start, va_size, base_class:int|None=None):
if not (cls:=type(self)).gpus:
cls.gpus = System.pci_scan_bus(vendor, devices, base_class)
if not cls.gpus: raise RuntimeError("No supported GPUs found")
if not os.path.exists(APLRemotePCIDevice.APP_PATH): APLRemotePCIDevice.install_tinygpu()
if dev_id >= len(cls.gpus): raise RuntimeError(f"No device found for {dev_id}. Requesting more devices than the system has ({cls.gpus})?")
self.pci_dev = APLRemotePCIDevice(dev.__class__.__name__[:2], f'remote:{dev_id}', bars)
self.dev, self.vram_bar = dev, vram_bar
def free(self, b:HCQBuffer):
for dev in b.mapped_devs[1:]: dev.iface.dev_impl.mm.unmap_range(b.va_addr, b.size)
def map(self, b:HCQBuffer): raise RuntimeError(f"P2P mapping not supported for remote devices: {b.owner} -> {self.dev}")
PCIIfaceBase:type = APLRemoteIfaceBase if OSX else LNXPCIIfaceBase
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/support/system.py",
"license": "MIT License",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/unit/test_dtype_spec.py | import unittest, math, subprocess
from tinygrad.tensor import Tensor, dtypes, Device
from tinygrad.dtype import DType, DTYPES_DICT
from tinygrad.device import is_dtype_supported
from tinygrad.helpers import getenv, DEBUG
from test.helpers import slow
from hypothesis import given, settings, strategies as strat
import numpy as np
import torch
settings.register_profile("my_profile", max_examples=50, deadline=None, derandomize=getenv("DERANDOMIZE_CI", False))
settings.load_profile("my_profile")
core_dtypes = list(DTYPES_DICT.values())
dtype_ints = [dt for dt in core_dtypes if dtypes.is_int(dt) and is_dtype_supported(dt)]
dtype_floats = [dt for dt in core_dtypes if dtypes.is_float(dt) and is_dtype_supported(dt)]
FP8E4M3_MAX = 448.0
FP8E5M2_MAX = 57344.0
def _assert_eq(tensor:Tensor, target_dtype:DType, target, tol_target_dtype:float=1e-7):
if DEBUG >= 2: print(tensor.numpy())
try:
assert tensor.dtype == target_dtype
np.testing.assert_allclose(tensor.numpy(), target, rtol={dtypes.float16:1e-3, dtypes.bfloat16:1e-2,
dtypes.fp8e4m3:1e-1, dtypes.fp8e5m2:5e-1}.get(target_dtype, tol_target_dtype))
except AssertionError as e:
raise AssertionError(f"\ntensor {tensor.numpy()} dtype {tensor.dtype} does not match target {target} with dtype {target_dtype}") from e
class TestTypeSpec(unittest.TestCase):
def setUp(self):
self.old_default_int, self.old_default_float = dtypes.default_int, dtypes.default_float
def tearDown(self):
dtypes.default_int, dtypes.default_float = self.old_default_int, self.old_default_float
@unittest.skip("this test is slow and spawning whole pythons")
def test_env_set_default_float(self):
# check default
subprocess.run(['python3 -c "from tinygrad import dtypes; assert dtypes.default_float == dtypes.float"'],
shell=True, check=True)
# check change
subprocess.run(['DEFAULT_FLOAT=HALF python3 -c "from tinygrad import dtypes; assert dtypes.default_float == dtypes.half"'],
shell=True, check=True)
# check invalid
with self.assertRaises(subprocess.CalledProcessError):
subprocess.run(['DEFAULT_FLOAT=INT32 python3 -c "from tinygrad import dtypes"'],
shell=True, check=True)
with self.assertRaises(subprocess.CalledProcessError):
subprocess.run(['DEFAULT_FLOAT=TYPO python3 -c "from tinygrad import dtypes"'],
shell=True, check=True)
@unittest.skipUnless(is_dtype_supported(dtypes.int8), f"no int8 on {Device.DEFAULT}")
def test_dtype_str_arg(self):
n = np.random.normal(0, 1, (10, 10)).astype(np.float32)
tested = 0
for dtype_str, dtype in [
("bool", dtypes.bool), ("int8", dtypes.int8), ("int", dtypes.int), ("uint32", dtypes.uint32), ("float32", dtypes.float32)]:
np.testing.assert_equal(Tensor(n, dtype=dtype_str).numpy(), Tensor(n, dtype=dtype).numpy())
np.testing.assert_equal(Tensor(n).cast(dtype_str).numpy(), Tensor(n).cast(dtype).numpy())
if dtype.itemsize == 4:
np.testing.assert_equal(Tensor(n).bitcast(dtype_str).numpy(), Tensor(n).bitcast(dtype).numpy())
tested += 1
assert tested == 3
with self.assertRaises(AttributeError): Tensor([1, 2, 3], dtype="nonexistdtype")
with self.assertRaises(AttributeError): Tensor([1, 2, 3], dtype="")
np.testing.assert_equal(Tensor(n).sum(dtype="int16").numpy(), Tensor(n).sum(dtype=dtypes.int16).numpy())
@given(strat.sampled_from(dtype_ints), strat.sampled_from(dtype_floats))
def test_creation(self, default_int, default_float):
dtypes.default_int, dtypes.default_float = default_int, default_float
_assert_eq(Tensor(True), dtypes.bool, True)
_assert_eq(Tensor(None), dtypes.default_float, [])
_assert_eq(Tensor(2), dtypes.default_int, 2)
_assert_eq(Tensor(2.34), dtypes.default_float, 2.34)
_assert_eq(Tensor([]), dtypes.default_float, [])
_assert_eq(Tensor([1]), dtypes.default_int, [1])
_assert_eq(Tensor([1.1]), dtypes.default_float, [1.1])
_assert_eq(Tensor.eye(0), dtypes.default_float, np.eye(0))
_assert_eq(Tensor.eye(3), dtypes.default_float, np.eye(3))
_assert_eq(Tensor.eye(3, dtype=dtypes.int64), dtypes.int64, np.eye(3))
if is_dtype_supported(dtypes.float16):
_assert_eq(Tensor.eye(3, dtype=dtypes.float16), dtypes.float16, np.eye(3))
@given(strat.sampled_from(dtype_ints), strat.sampled_from(dtype_floats))
def test_full(self, default_int, default_float):
dtypes.default_int, dtypes.default_float = default_int, default_float
_assert_eq(Tensor.zeros((2, 3)), dtypes.default_float, np.zeros((2, 3)))
_assert_eq(Tensor.zeros((2, 3), dtype=dtypes.int64), dtypes.int64, np.zeros((2, 3)))
if is_dtype_supported(dtypes.float16):
_assert_eq(Tensor.zeros((2, 3), dtype=dtypes.float16), dtypes.float16, np.zeros((2, 3)))
_assert_eq(Tensor.ones((2, 3)), dtypes.default_float, np.ones((2, 3)))
_assert_eq(Tensor.ones((2, 3), dtype=dtypes.int64), dtypes.int64, np.ones((2, 3)))
if is_dtype_supported(dtypes.float16):
_assert_eq(Tensor.ones((2, 3), dtype=dtypes.float16), dtypes.float16, np.ones((2, 3)))
_assert_eq(Tensor.full((2, 3), 3.0), dtypes.default_float, np.full((2, 3), 3.0))
_assert_eq(Tensor.full((2, 3), 3), dtypes.default_int, np.full((2, 3), 3))
_assert_eq(Tensor.full((2, 3), True), dtypes.bool, np.full((2, 3), True))
_assert_eq(Tensor.full((2, 3), 3, dtype=dtypes.int64), dtypes.int64, np.full((2, 3), 3))
_assert_eq(Tensor.full((2, 3), 3.0, dtype=dtypes.int64), dtypes.int64, np.full((2, 3), 3))
if is_dtype_supported(dtypes.float16):
_assert_eq(Tensor.full((2, 3), 3, dtype=dtypes.float16), dtypes.float16, np.full((2, 3), 3))
_assert_eq(Tensor.full((2, 3), 3.0, dtype=dtypes.float16), dtypes.float16, np.full((2, 3), 3))
@given(strat.sampled_from(dtype_ints), strat.sampled_from(dtype_floats))
def test_reduce_0d_default(self, default_int, default_float):
dtypes.default_int, dtypes.default_float = default_int, default_float
_assert_eq(Tensor.ones((2,3,0)).sum(2), dtypes.default_float, np.zeros((2, 3)))
# TODO: what should this one be?
# _assert_eq(Tensor.ones((2,3,0), dtype=dtypes.default_int).sum(2), dtypes.default_int, np.zeros((2, 3)))
_assert_eq(Tensor.ones((2,3,0), dtype=dtypes.int32).sum(2), dtypes.int32, np.zeros((2, 3)))
@given(strat.sampled_from(dtype_ints), strat.sampled_from(dtype_floats))
def test_arange(self, default_int, default_float):
dtypes.default_int, dtypes.default_float = default_int, default_float
_assert_eq(Tensor.arange(5), dtypes.default_int, np.arange(5))
_assert_eq(Tensor.arange(120), dtypes.default_int, np.arange(120))
_assert_eq(Tensor.arange(5.0), dtypes.default_float, np.arange(5))
if is_dtype_supported(dtypes.int16):
_assert_eq(Tensor.arange(5, dtype=dtypes.int16), dtypes.int16, np.arange(5))
_assert_eq(Tensor.arange(5, dtype=dtypes.int64), dtypes.int64, np.arange(5))
if is_dtype_supported(dtypes.float16):
_assert_eq(Tensor.arange(5, dtype=dtypes.float16), dtypes.float16, np.arange(5))
_assert_eq(Tensor.arange(3, 9, 0.7), dtypes.default_float, np.arange(3, 9, 0.7), 1e-6 if Device.DEFAULT == "WEBGPU" else 1e-7)
_assert_eq(Tensor.arange(3, 8.5, 3), dtypes.default_float, np.arange(3, 8.5, 3))
# stop-start and step have different signs
_assert_eq(Tensor.arange(3, 5, -2), dtypes.default_int, np.arange(3, 5, -2))
_assert_eq(Tensor.arange(5.0, 3.0), dtypes.default_float, np.arange(5.0, 3.0))
class TestAutoCastType(unittest.TestCase):
def setUp(self):
self.old_default_int, self.old_default_float = dtypes.default_int, dtypes.default_float
def tearDown(self):
dtypes.default_int, dtypes.default_float = self.old_default_int, self.old_default_float
@given(strat.sampled_from([d for d in core_dtypes if dtypes.is_int(d) and is_dtype_supported(d)]))
def test_int_to_float_unary_func(self, dtype):
for func in [
lambda t: t.exp(),
lambda t: t.exp2(),
lambda t: t.log(),
lambda t: t.log2(),
lambda t: t.sqrt(),
lambda t: t.rsqrt(),
lambda t: t.sin(),
lambda t: t.cos(),
lambda t: t.tan(),
lambda t: t.sigmoid(),
]:
a = [2, 3, 4]
# float16 can have larger precision errors
np.testing.assert_allclose(func(Tensor(a, dtype=dtype)).numpy(), func(torch.tensor(a)), rtol=1e-3, atol=1e-3)
@unittest.skipUnless(is_dtype_supported(dtypes.float16), "need float16")
def test_sum_dtype_arg(self):
t = Tensor([40000, 40000], dtype=dtypes.float16)
# default float16 sum returns in float16, overflowed in this case
assert t.sum().dtype == dtypes.float16
assert math.isinf(t.sum().numpy().item())
# specifiying dtype and it's not downcasted
assert t.sum(dtype=dtypes.float32).dtype == dtypes.float32
np.testing.assert_allclose(t.sum(dtype=dtypes.float32).numpy(), 80000)
def test_prod_dtype_arg(self):
t = Tensor([100, 200], dtype=dtypes.int32)
assert t.prod().dtype == dtypes.int32
np.testing.assert_allclose(t.prod().numpy(), 20000)
assert t.prod(dtype=dtypes.float32).dtype == dtypes.float32
np.testing.assert_allclose(t.prod(dtype=dtypes.float32).numpy(), 20000)
def test_gradient_dtype(self):
old_default_float = dtypes.default_float
for default_dtype in dtypes.floats:
if not is_dtype_supported(default_dtype): continue
dtypes.default_float = default_dtype
for dtype in dtypes.floats:
if not is_dtype_supported(dtype): continue
if DEBUG >= 2:
print(f"testing {default_dtype=}, {dtype=}")
a = Tensor([1, 2, 3], dtype=dtype, requires_grad=True)
b = (a * 5).sum()
b.backward() # if there is dtype mismatch, lazy should assert
assert a.grad.dtype == a.dtype
np.testing.assert_allclose(a.grad.numpy(), [5, 5, 5])
dtypes.default_float = old_default_float
@unittest.skipIf(Device.DEFAULT == "PYTHON", "very slow")
@slow
@unittest.skipIf(Device.DEFAULT == "WEBGPU", "Binding size is larger than the maximum storage buffer binding size")
@unittest.skipUnless(is_dtype_supported(dtypes.half), "need half")
def test_mean_half_precision_underflow(self):
N = 10000
x = 0.001
t = Tensor([[x]], dtype=dtypes.half, requires_grad=True).expand(N, N).contiguous()
np.testing.assert_allclose(t.mean(axis=1).numpy(), np.array([x] * N, dtype=np.float16), rtol=1e-3)
@unittest.skip("this test only works with SPLIT_REDUCEOP=1")
@unittest.skipUnless(is_dtype_supported(dtypes.half), "need half")
def test_mean_half_precision_overflow(self):
N = 256
t = Tensor([60000] * N*N, dtype=dtypes.half, requires_grad=True).reshape(N, N)
np.testing.assert_allclose(t.mean().numpy(), 60000)
t.square().mean().backward()
np.testing.assert_allclose(t.grad.numpy().flatten(), [60000 * 2 / (N*N)] * N*N)
@unittest.skipIf(Device.DEFAULT == "WEBGPU", "Precision error")
@unittest.skipUnless(is_dtype_supported(dtypes.half), "need half")
def test_softmax_dtype(self):
data = [1, 2, 3]
t = Tensor(data, dtype=dtypes.half)
tt = torch.tensor(data, dtype=torch.half)
out = t.softmax(0)
self.assertEqual(out.dtype, dtypes.half)
np.testing.assert_allclose(out.numpy(), tt.softmax(0).numpy(), rtol=1e-3)
out = t.softmax(0, dtype=dtypes.float)
self.assertEqual(out.dtype, dtypes.float)
np.testing.assert_allclose(out.numpy(), tt.softmax(0, dtype=torch.float).numpy(), rtol=1e-3)
out = t.log_softmax(0)
self.assertEqual(out.dtype, dtypes.half)
np.testing.assert_allclose(out.numpy(), tt.log_softmax(0).numpy(), rtol=1e-3)
out = t.log_softmax(0, dtype=dtypes.float)
self.assertEqual(out.dtype, dtypes.float)
np.testing.assert_allclose(out.numpy(), tt.log_softmax(0, dtype=torch.float).numpy(), rtol=1e-3)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_dtype_spec.py",
"license": "MIT License",
"lines": 203,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_benchmark_keccak.py | from tinygrad import Tensor, dtypes
from tinygrad.engine.jit import TinyJit
from tinygrad.helpers import Timing, getenv
if __name__ == "__main__":
BS = getenv("BS", 2**14)
BLOCKSIZE = getenv("BLOCKSIZE", 4096)
HASHFN = getenv("HASHFN", "shake_128")
NRUNS = getenv("NRUNS", 5)
@TinyJit
def hasher(data: Tensor): return data.keccak(HASHFN)
t = Tensor.randn(BS, BLOCKSIZE, dtype=dtypes.uint8).realize()
ds_mib = t.nbytes() / 1024**2
print(f"--- benchmarking (hash: {HASHFN}, data size: {ds_mib} MiB, block size: {BLOCKSIZE} B, batch size: {BS})")
for i in range(NRUNS):
with Timing(f"run: {i+1}, elapsed time: ", (lambda et: f", throughput: {ds_mib / (et*1e-9):.2f} MiB/s")):
hasher(t).realize()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_benchmark_keccak.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_test_keccak.py | import unittest, zipfile, re
from tinygrad import Tensor
from tinygrad.helpers import fetch, tqdm
SHA3_URL = "https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Algorithm-Validation-Program/documents/sha3/sha-3bytetestvectors.zip"
SHAKE_URL = "https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Algorithm-Validation-Program/documents/sha3/shakebytetestvectors.zip"
class TestExternalKeccak(unittest.TestCase):
def test_sha3_224(self): self.check_nist_vectors(SHA3_URL, ["SHA3_224LongMsg.rsp", "SHA3_224ShortMsg.rsp"], "sha3_224")
def test_sha3_256(self): self.check_nist_vectors(SHA3_URL, ["SHA3_256LongMsg.rsp", "SHA3_256ShortMsg.rsp"], "sha3_256")
def test_shake_128(self): self.check_nist_vectors(SHAKE_URL, ["SHAKE128LongMsg.rsp", "SHAKE128ShortMsg.rsp"], "shake_128")
def check_nist_vectors(self, url: str, filenames: list[str], preset: str):
pattern = r"Len\s*=\s*(?P<Len>\d+)\s+Msg\s*=\s*(?P<Msg>[0-9a-fA-F\s]+)\s+(MD|Output)\s*=\s*(?P<Output>[0-9a-fA-F]+)"
vecs_zip = fetch(url)
for filename in filenames:
vecs = zipfile.ZipFile(vecs_zip).open(filename).read().decode()
vectors = [ (l, bytes.fromhex(match["Msg"].lower()), bytes.fromhex(match["Output"].lower()))
for match in re.finditer(pattern, vecs) if (l:=int(match["Len"])) < 8192 ]
self.assertTrue(len(vectors) > 0)
print("file", filename)
for data_len, data, output in tqdm(vectors):
tinyout = bytes(Tensor(data[:data_len//8]).keccak(preset).data())
self.assertEqual(tinyout, output)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_keccak.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/mmapeak/mmapeak.py | import os
# TODO: there is a timing bug without this
os.environ["AMD_AQL"] = "1"
from tinygrad import Tensor, Device
from tinygrad.helpers import getenv
from tinygrad.uop.ops import UOp, Ops, KernelInfo
from tinygrad.renderer import Estimates
from tinygrad.renderer.amd.dsl import Reg, Inst, s, v
NUM_WORKGROUPS = 96
WAVE_SIZE = 32
NUM_WAVES = 4
FLOPS_PER_MATMUL = 16*16*16*2
INTERNAL_LOOP = getenv("LOOP", 10_000)
INSTRUCTIONS_PER_LOOP = 200
def repeat(insts:list[Inst], n:int, counter_sreg:Reg) -> list[Inst]:
insts_bytes = b"".join([inst.to_bytes() for inst in insts])
sub_inst, cmp_inst = s_sub_u32(counter_sreg, counter_sreg, 1), s_cmp_lg_i32(counter_sreg, 0)
loop_sz = len(insts_bytes) + sub_inst.size() + cmp_inst.size()
branch_inst = s_cbranch_scc1(simm16=-((loop_sz // 4) + 1) & 0xFFFF)
return [s_mov_b32(counter_sreg, n)] + insts + [sub_inst, cmp_inst, branch_inst, s_endpgm()]
def launchBenchmark(instruction, vgprIndices, dense=True, accum=False, **kwargs):
if accum:
inst = instruction(v[0:vgprIndices[0]], v[vgprIndices[1]:vgprIndices[2]], v[vgprIndices[1]:vgprIndices[2]], 1, acc_cd=1, **kwargs)
elif dense:
inst = instruction(v[0:vgprIndices[0]], v[vgprIndices[1]:vgprIndices[2]], v[vgprIndices[1]:vgprIndices[2]], 1)
else:
inst = instruction(v[0:vgprIndices[0]], v[vgprIndices[1]:vgprIndices[2]], v[vgprIndices[3]:vgprIndices[4]], v[vgprIndices[5]])
insts = repeat([inst for _ in range(INSTRUCTIONS_PER_LOOP)], n=INTERNAL_LOOP, counter_sreg=s[1])
def fxn(A:UOp) -> UOp:
threads = UOp.special(WAVE_SIZE * NUM_WAVES, "lidx0")
gidx = UOp.special(NUM_WORKGROUPS, "gidx0")
FLOPs = FLOPS_PER_MATMUL * NUM_WAVES * NUM_WORKGROUPS * INTERNAL_LOOP * INSTRUCTIONS_PER_LOOP
sink = UOp.sink(A.base, threads, gidx, arg=KernelInfo(inst.op.name.lower(), estimates=Estimates(ops=FLOPs, mem=0)))
return UOp(Ops.PROGRAM, src=(sink, UOp(Ops.DEVICE, arg="AMD"), UOp(Ops.LINEAR, src=tuple([UOp(Ops.INS, arg=x) for x in insts]))))
dummy = Tensor.zeros(1).contiguous().realize()
out = Tensor.custom_kernel(dummy, fxn=fxn)[0]
ei = out.schedule()[-1].lower()
elapsed = min([ei.run(wait=True) for _ in range(2)])
FLOPs = FLOPS_PER_MATMUL * NUM_WAVES * NUM_WORKGROUPS * INTERNAL_LOOP * INSTRUCTIONS_PER_LOOP
print(f"{inst.op_name.lower():<29} : {FLOPs/elapsed/10**12:.2f} T(FL)OPS")
if __name__=="__main__":
DEV = Device[Device.DEFAULT]
arch = DEV.renderer.arch
if arch in {'gfx1100', 'gfx1103', 'gfx1151'}:
from tinygrad.runtime.autogen.amd.rdna3.ins import *
if arch == 'gfx1103': NUM_WORKGROUPS = 8
if arch == 'gfx1151': NUM_WORKGROUPS = 32
launchBenchmark(v_wmma_bf16_16x16x16_bf16, (7,8,15))
launchBenchmark(v_wmma_f16_16x16x16_f16, (7,8,15))
launchBenchmark(v_wmma_f32_16x16x16_bf16, (7,8,15))
launchBenchmark(v_wmma_f32_16x16x16_f16, (7,8,15))
launchBenchmark(v_wmma_i32_16x16x16_iu4, (7,8,9))
launchBenchmark(v_wmma_i32_16x16x16_iu8, (7,8,11))
elif arch in {'gfx1200', 'gfx1201'}:
from tinygrad.runtime.autogen.amd.rdna4.ins import *
# this instruction does not exist in the rdna4 isa, use the co version
s_sub_u32 = s_sub_co_u32
NUM_WORKGROUPS = 64
launchBenchmark(v_wmma_bf16_16x16x16_bf16, (3,4,7))
launchBenchmark(v_wmma_f16_16x16x16_f16, (3,4,7))
launchBenchmark(v_wmma_f32_16x16x16_bf16, (7,8,11))
launchBenchmark(v_wmma_f32_16x16x16_f16, (7,8,11))
launchBenchmark(v_wmma_i32_16x16x16_iu4, (7,8,8))
launchBenchmark(v_wmma_i32_16x16x16_iu8, (7,8,9))
launchBenchmark(v_wmma_f32_16x16x16_fp8_fp8, (7,8,9))
launchBenchmark(v_wmma_f32_16x16x16_fp8_bf8, (7,8,9))
launchBenchmark(v_wmma_f32_16x16x16_bf8_fp8, (7,8,9))
launchBenchmark(v_wmma_f32_16x16x16_bf8_bf8, (7,8,9))
FLOPS_PER_MATMUL = 16*16*32*2
launchBenchmark(v_wmma_i32_16x16x32_iu4, (7,8,9))
launchBenchmark(v_swmmac_f32_16x16x32_f16, (7,8,11,12,19,20), False)
launchBenchmark(v_swmmac_f32_16x16x32_bf16, (7,8,11,12,19,20), False)
launchBenchmark(v_swmmac_f16_16x16x32_f16, (3,4,7,8,15,16), False)
launchBenchmark(v_swmmac_bf16_16x16x32_bf16, (3,4,7,8,15,16), False)
launchBenchmark(v_swmmac_i32_16x16x32_iu8, (7,8,9,10,13,14), False)
launchBenchmark(v_swmmac_i32_16x16x32_iu4, (7,8,8,9,10,11), False)
launchBenchmark(v_swmmac_f32_16x16x32_fp8_fp8, (7,8,9,10,13,14), False)
launchBenchmark(v_swmmac_f32_16x16x32_fp8_bf8, (7,8,9,10,13,14), False)
launchBenchmark(v_swmmac_f32_16x16x32_bf8_fp8, (7,8,9,10,13,14), False)
launchBenchmark(v_swmmac_f32_16x16x32_bf8_bf8, (7,8,9,10,13,14), False)
FLOPS_PER_MATMUL = 16*16*64*2
launchBenchmark(v_swmmac_i32_16x16x64_iu4, (7,8,9,10,13,14), False)
elif arch == 'gfx950':
from tinygrad.runtime.autogen.amd.cdna.ins import *
NUM_WORKGROUPS = 256
WAVE_SIZE = 64
NUM_WAVES = 4
launchBenchmark(v_mfma_f32_16x16x16_f16, (3,0,1), accum=True)
launchBenchmark(v_mfma_f32_16x16x16_bf16, (3,0,1), accum=True)
FLOPS_PER_MATMUL = 16*16*32*2
launchBenchmark(v_mfma_f32_16x16x32_f16, (3,0,3), accum=True)
launchBenchmark(v_mfma_f32_16x16x32_bf16, (3,0,3), accum=True)
FLOPS_PER_MATMUL = 16*16*128*2
launchBenchmark(v_mfma_f32_16x16x128_f8f6f4, (3,0,7), accum=True) # fp8
launchBenchmark(v_mfma_f32_16x16x128_f8f6f4, (3,0,5), accum=True, cbsz=2, blgp=2) # fp6
launchBenchmark(v_mfma_f32_16x16x128_f8f6f4, (3,0,3), accum=True, cbsz=4, blgp=4) # fp4
else:
raise RuntimeError(f"arch {arch} not supported.")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/mmapeak/mmapeak.py",
"license": "MIT License",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/autogen/comgr_3.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
import os
dll = c.DLL('comgr_3', [os.getenv('ROCM_PATH', '/opt/rocm')+'/lib/libamd_comgr.so', 'amd_comgr'])
class amd_comgr_status_s(Annotated[int, ctypes.c_uint32], c.Enum): pass
AMD_COMGR_STATUS_SUCCESS = amd_comgr_status_s.define('AMD_COMGR_STATUS_SUCCESS', 0)
AMD_COMGR_STATUS_ERROR = amd_comgr_status_s.define('AMD_COMGR_STATUS_ERROR', 1)
AMD_COMGR_STATUS_ERROR_INVALID_ARGUMENT = amd_comgr_status_s.define('AMD_COMGR_STATUS_ERROR_INVALID_ARGUMENT', 2)
AMD_COMGR_STATUS_ERROR_OUT_OF_RESOURCES = amd_comgr_status_s.define('AMD_COMGR_STATUS_ERROR_OUT_OF_RESOURCES', 3)
amd_comgr_status_t: TypeAlias = amd_comgr_status_s
class amd_comgr_language_s(Annotated[int, ctypes.c_uint32], c.Enum): pass
AMD_COMGR_LANGUAGE_NONE = amd_comgr_language_s.define('AMD_COMGR_LANGUAGE_NONE', 0)
AMD_COMGR_LANGUAGE_OPENCL_1_2 = amd_comgr_language_s.define('AMD_COMGR_LANGUAGE_OPENCL_1_2', 1)
AMD_COMGR_LANGUAGE_OPENCL_2_0 = amd_comgr_language_s.define('AMD_COMGR_LANGUAGE_OPENCL_2_0', 2)
AMD_COMGR_LANGUAGE_HIP = amd_comgr_language_s.define('AMD_COMGR_LANGUAGE_HIP', 3)
AMD_COMGR_LANGUAGE_LLVM_IR = amd_comgr_language_s.define('AMD_COMGR_LANGUAGE_LLVM_IR', 4)
AMD_COMGR_LANGUAGE_LAST = amd_comgr_language_s.define('AMD_COMGR_LANGUAGE_LAST', 4)
amd_comgr_language_t: TypeAlias = amd_comgr_language_s
@dll.bind
def amd_comgr_status_string(status:amd_comgr_status_t, status_string:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]]) -> amd_comgr_status_t: ...
size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def amd_comgr_get_version(major:c.POINTER[size_t], minor:c.POINTER[size_t]) -> None: ...
class amd_comgr_data_kind_s(Annotated[int, ctypes.c_uint32], c.Enum): pass
AMD_COMGR_DATA_KIND_UNDEF = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_UNDEF', 0)
AMD_COMGR_DATA_KIND_SOURCE = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_SOURCE', 1)
AMD_COMGR_DATA_KIND_INCLUDE = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_INCLUDE', 2)
AMD_COMGR_DATA_KIND_PRECOMPILED_HEADER = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_PRECOMPILED_HEADER', 3)
AMD_COMGR_DATA_KIND_DIAGNOSTIC = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_DIAGNOSTIC', 4)
AMD_COMGR_DATA_KIND_LOG = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_LOG', 5)
AMD_COMGR_DATA_KIND_BC = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_BC', 6)
AMD_COMGR_DATA_KIND_RELOCATABLE = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_RELOCATABLE', 7)
AMD_COMGR_DATA_KIND_EXECUTABLE = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_EXECUTABLE', 8)
AMD_COMGR_DATA_KIND_BYTES = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_BYTES', 9)
AMD_COMGR_DATA_KIND_FATBIN = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_FATBIN', 16)
AMD_COMGR_DATA_KIND_AR = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_AR', 17)
AMD_COMGR_DATA_KIND_BC_BUNDLE = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_BC_BUNDLE', 18)
AMD_COMGR_DATA_KIND_AR_BUNDLE = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_AR_BUNDLE', 19)
AMD_COMGR_DATA_KIND_OBJ_BUNDLE = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_OBJ_BUNDLE', 20)
AMD_COMGR_DATA_KIND_SPIRV = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_SPIRV', 21)
AMD_COMGR_DATA_KIND_LAST = amd_comgr_data_kind_s.define('AMD_COMGR_DATA_KIND_LAST', 21)
amd_comgr_data_kind_t: TypeAlias = amd_comgr_data_kind_s
@c.record
class amd_comgr_data_s(c.Struct):
SIZE = 8
handle: Annotated[uint64_t, 0]
uint64_t: TypeAlias = Annotated[int, ctypes.c_uint64]
amd_comgr_data_t: TypeAlias = amd_comgr_data_s
@c.record
class amd_comgr_data_set_s(c.Struct):
SIZE = 8
handle: Annotated[uint64_t, 0]
amd_comgr_data_set_t: TypeAlias = amd_comgr_data_set_s
@c.record
class amd_comgr_action_info_s(c.Struct):
SIZE = 8
handle: Annotated[uint64_t, 0]
amd_comgr_action_info_t: TypeAlias = amd_comgr_action_info_s
@c.record
class amd_comgr_metadata_node_s(c.Struct):
SIZE = 8
handle: Annotated[uint64_t, 0]
amd_comgr_metadata_node_t: TypeAlias = amd_comgr_metadata_node_s
@c.record
class amd_comgr_symbol_s(c.Struct):
SIZE = 8
handle: Annotated[uint64_t, 0]
amd_comgr_symbol_t: TypeAlias = amd_comgr_symbol_s
@c.record
class amd_comgr_disassembly_info_s(c.Struct):
SIZE = 8
handle: Annotated[uint64_t, 0]
amd_comgr_disassembly_info_t: TypeAlias = amd_comgr_disassembly_info_s
@c.record
class amd_comgr_symbolizer_info_s(c.Struct):
SIZE = 8
handle: Annotated[uint64_t, 0]
amd_comgr_symbolizer_info_t: TypeAlias = amd_comgr_symbolizer_info_s
@dll.bind
def amd_comgr_get_isa_count(count:c.POINTER[size_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_isa_name(index:size_t, isa_name:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_isa_metadata(isa_name:c.POINTER[Annotated[bytes, ctypes.c_char]], metadata:c.POINTER[amd_comgr_metadata_node_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_create_data(kind:amd_comgr_data_kind_t, data:c.POINTER[amd_comgr_data_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_release_data(data:amd_comgr_data_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_data_kind(data:amd_comgr_data_t, kind:c.POINTER[amd_comgr_data_kind_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_set_data(data:amd_comgr_data_t, size:size_t, bytes:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_set_data_from_file_slice(data:amd_comgr_data_t, file_descriptor:Annotated[int, ctypes.c_int32], offset:uint64_t, size:uint64_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_set_data_name(data:amd_comgr_data_t, name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_data(data:amd_comgr_data_t, size:c.POINTER[size_t], bytes:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_data_name(data:amd_comgr_data_t, size:c.POINTER[size_t], name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_data_isa_name(data:amd_comgr_data_t, size:c.POINTER[size_t], isa_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_create_symbolizer_info(code_object:amd_comgr_data_t, print_symbol_callback:c.CFUNCTYPE[None, [c.POINTER[Annotated[bytes, ctypes.c_char]], ctypes.c_void_p]], symbolizer_info:c.POINTER[amd_comgr_symbolizer_info_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_destroy_symbolizer_info(symbolizer_info:amd_comgr_symbolizer_info_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_symbolize(symbolizer_info:amd_comgr_symbolizer_info_t, address:uint64_t, is_code:Annotated[bool, ctypes.c_bool], user_data:ctypes.c_void_p) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_data_metadata(data:amd_comgr_data_t, metadata:c.POINTER[amd_comgr_metadata_node_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_destroy_metadata(metadata:amd_comgr_metadata_node_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_create_data_set(data_set:c.POINTER[amd_comgr_data_set_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_destroy_data_set(data_set:amd_comgr_data_set_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_data_set_add(data_set:amd_comgr_data_set_t, data:amd_comgr_data_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_data_set_remove(data_set:amd_comgr_data_set_t, data_kind:amd_comgr_data_kind_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_data_count(data_set:amd_comgr_data_set_t, data_kind:amd_comgr_data_kind_t, count:c.POINTER[size_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_data_get_data(data_set:amd_comgr_data_set_t, data_kind:amd_comgr_data_kind_t, index:size_t, data:c.POINTER[amd_comgr_data_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_create_action_info(action_info:c.POINTER[amd_comgr_action_info_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_destroy_action_info(action_info:amd_comgr_action_info_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_set_isa_name(action_info:amd_comgr_action_info_t, isa_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_get_isa_name(action_info:amd_comgr_action_info_t, size:c.POINTER[size_t], isa_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_set_language(action_info:amd_comgr_action_info_t, language:amd_comgr_language_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_get_language(action_info:amd_comgr_action_info_t, language:c.POINTER[amd_comgr_language_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_set_option_list(action_info:amd_comgr_action_info_t, options:c.Array[c.POINTER[Annotated[bytes, ctypes.c_char]], Literal[0]], count:size_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_get_option_list_count(action_info:amd_comgr_action_info_t, count:c.POINTER[size_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_get_option_list_item(action_info:amd_comgr_action_info_t, index:size_t, size:c.POINTER[size_t], option:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_set_bundle_entry_ids(action_info:amd_comgr_action_info_t, bundle_entry_ids:c.Array[c.POINTER[Annotated[bytes, ctypes.c_char]], Literal[0]], count:size_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_get_bundle_entry_id_count(action_info:amd_comgr_action_info_t, count:c.POINTER[size_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_get_bundle_entry_id(action_info:amd_comgr_action_info_t, index:size_t, size:c.POINTER[size_t], bundle_entry_id:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_set_vfs(action_info:amd_comgr_action_info_t, should_use_vfs:Annotated[bool, ctypes.c_bool]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_set_device_lib_linking(action_info:amd_comgr_action_info_t, should_link_device_libs:Annotated[bool, ctypes.c_bool]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_set_working_directory_path(action_info:amd_comgr_action_info_t, path:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_get_working_directory_path(action_info:amd_comgr_action_info_t, size:c.POINTER[size_t], path:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_set_logging(action_info:amd_comgr_action_info_t, logging:Annotated[bool, ctypes.c_bool]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_action_info_get_logging(action_info:amd_comgr_action_info_t, logging:c.POINTER[Annotated[bool, ctypes.c_bool]]) -> amd_comgr_status_t: ...
class amd_comgr_action_kind_s(Annotated[int, ctypes.c_uint32], c.Enum): pass
AMD_COMGR_ACTION_SOURCE_TO_PREPROCESSOR = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_SOURCE_TO_PREPROCESSOR', 0)
AMD_COMGR_ACTION_ADD_PRECOMPILED_HEADERS = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_ADD_PRECOMPILED_HEADERS', 1)
AMD_COMGR_ACTION_COMPILE_SOURCE_TO_BC = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_COMPILE_SOURCE_TO_BC', 2)
AMD_COMGR_ACTION_LINK_BC_TO_BC = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_LINK_BC_TO_BC', 3)
AMD_COMGR_ACTION_CODEGEN_BC_TO_RELOCATABLE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_CODEGEN_BC_TO_RELOCATABLE', 4)
AMD_COMGR_ACTION_CODEGEN_BC_TO_ASSEMBLY = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_CODEGEN_BC_TO_ASSEMBLY', 5)
AMD_COMGR_ACTION_LINK_RELOCATABLE_TO_RELOCATABLE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_LINK_RELOCATABLE_TO_RELOCATABLE', 6)
AMD_COMGR_ACTION_LINK_RELOCATABLE_TO_EXECUTABLE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_LINK_RELOCATABLE_TO_EXECUTABLE', 7)
AMD_COMGR_ACTION_ASSEMBLE_SOURCE_TO_RELOCATABLE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_ASSEMBLE_SOURCE_TO_RELOCATABLE', 8)
AMD_COMGR_ACTION_DISASSEMBLE_RELOCATABLE_TO_SOURCE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_DISASSEMBLE_RELOCATABLE_TO_SOURCE', 9)
AMD_COMGR_ACTION_DISASSEMBLE_EXECUTABLE_TO_SOURCE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_DISASSEMBLE_EXECUTABLE_TO_SOURCE', 10)
AMD_COMGR_ACTION_DISASSEMBLE_BYTES_TO_SOURCE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_DISASSEMBLE_BYTES_TO_SOURCE', 11)
AMD_COMGR_ACTION_COMPILE_SOURCE_WITH_DEVICE_LIBS_TO_BC = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_COMPILE_SOURCE_WITH_DEVICE_LIBS_TO_BC', 12)
AMD_COMGR_ACTION_COMPILE_SOURCE_TO_RELOCATABLE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_COMPILE_SOURCE_TO_RELOCATABLE', 13)
AMD_COMGR_ACTION_COMPILE_SOURCE_TO_EXECUTABLE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_COMPILE_SOURCE_TO_EXECUTABLE', 14)
AMD_COMGR_ACTION_UNBUNDLE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_UNBUNDLE', 15)
AMD_COMGR_ACTION_COMPILE_SPIRV_TO_RELOCATABLE = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_COMPILE_SPIRV_TO_RELOCATABLE', 16)
AMD_COMGR_ACTION_TRANSLATE_SPIRV_TO_BC = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_TRANSLATE_SPIRV_TO_BC', 19)
AMD_COMGR_ACTION_LAST = amd_comgr_action_kind_s.define('AMD_COMGR_ACTION_LAST', 19)
amd_comgr_action_kind_t: TypeAlias = amd_comgr_action_kind_s
@dll.bind
def amd_comgr_do_action(kind:amd_comgr_action_kind_t, info:amd_comgr_action_info_t, input:amd_comgr_data_set_t, result:amd_comgr_data_set_t) -> amd_comgr_status_t: ...
class amd_comgr_metadata_kind_s(Annotated[int, ctypes.c_uint32], c.Enum): pass
AMD_COMGR_METADATA_KIND_NULL = amd_comgr_metadata_kind_s.define('AMD_COMGR_METADATA_KIND_NULL', 0)
AMD_COMGR_METADATA_KIND_STRING = amd_comgr_metadata_kind_s.define('AMD_COMGR_METADATA_KIND_STRING', 1)
AMD_COMGR_METADATA_KIND_MAP = amd_comgr_metadata_kind_s.define('AMD_COMGR_METADATA_KIND_MAP', 2)
AMD_COMGR_METADATA_KIND_LIST = amd_comgr_metadata_kind_s.define('AMD_COMGR_METADATA_KIND_LIST', 3)
AMD_COMGR_METADATA_KIND_LAST = amd_comgr_metadata_kind_s.define('AMD_COMGR_METADATA_KIND_LAST', 3)
amd_comgr_metadata_kind_t: TypeAlias = amd_comgr_metadata_kind_s
@dll.bind
def amd_comgr_get_metadata_kind(metadata:amd_comgr_metadata_node_t, kind:c.POINTER[amd_comgr_metadata_kind_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_metadata_string(metadata:amd_comgr_metadata_node_t, size:c.POINTER[size_t], string:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_metadata_map_size(metadata:amd_comgr_metadata_node_t, size:c.POINTER[size_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_iterate_map_metadata(metadata:amd_comgr_metadata_node_t, callback:c.CFUNCTYPE[amd_comgr_status_t, [amd_comgr_metadata_node_t, amd_comgr_metadata_node_t, ctypes.c_void_p]], user_data:ctypes.c_void_p) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_metadata_lookup(metadata:amd_comgr_metadata_node_t, key:c.POINTER[Annotated[bytes, ctypes.c_char]], value:c.POINTER[amd_comgr_metadata_node_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_metadata_list_size(metadata:amd_comgr_metadata_node_t, size:c.POINTER[size_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_index_list_metadata(metadata:amd_comgr_metadata_node_t, index:size_t, value:c.POINTER[amd_comgr_metadata_node_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_iterate_symbols(data:amd_comgr_data_t, callback:c.CFUNCTYPE[amd_comgr_status_t, [amd_comgr_symbol_t, ctypes.c_void_p]], user_data:ctypes.c_void_p) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_symbol_lookup(data:amd_comgr_data_t, name:c.POINTER[Annotated[bytes, ctypes.c_char]], symbol:c.POINTER[amd_comgr_symbol_t]) -> amd_comgr_status_t: ...
class amd_comgr_symbol_type_s(Annotated[int, ctypes.c_int32], c.Enum): pass
AMD_COMGR_SYMBOL_TYPE_UNKNOWN = amd_comgr_symbol_type_s.define('AMD_COMGR_SYMBOL_TYPE_UNKNOWN', -1)
AMD_COMGR_SYMBOL_TYPE_NOTYPE = amd_comgr_symbol_type_s.define('AMD_COMGR_SYMBOL_TYPE_NOTYPE', 0)
AMD_COMGR_SYMBOL_TYPE_OBJECT = amd_comgr_symbol_type_s.define('AMD_COMGR_SYMBOL_TYPE_OBJECT', 1)
AMD_COMGR_SYMBOL_TYPE_FUNC = amd_comgr_symbol_type_s.define('AMD_COMGR_SYMBOL_TYPE_FUNC', 2)
AMD_COMGR_SYMBOL_TYPE_SECTION = amd_comgr_symbol_type_s.define('AMD_COMGR_SYMBOL_TYPE_SECTION', 3)
AMD_COMGR_SYMBOL_TYPE_FILE = amd_comgr_symbol_type_s.define('AMD_COMGR_SYMBOL_TYPE_FILE', 4)
AMD_COMGR_SYMBOL_TYPE_COMMON = amd_comgr_symbol_type_s.define('AMD_COMGR_SYMBOL_TYPE_COMMON', 5)
AMD_COMGR_SYMBOL_TYPE_AMDGPU_HSA_KERNEL = amd_comgr_symbol_type_s.define('AMD_COMGR_SYMBOL_TYPE_AMDGPU_HSA_KERNEL', 10)
amd_comgr_symbol_type_t: TypeAlias = amd_comgr_symbol_type_s
class amd_comgr_symbol_info_s(Annotated[int, ctypes.c_uint32], c.Enum): pass
AMD_COMGR_SYMBOL_INFO_NAME_LENGTH = amd_comgr_symbol_info_s.define('AMD_COMGR_SYMBOL_INFO_NAME_LENGTH', 0)
AMD_COMGR_SYMBOL_INFO_NAME = amd_comgr_symbol_info_s.define('AMD_COMGR_SYMBOL_INFO_NAME', 1)
AMD_COMGR_SYMBOL_INFO_TYPE = amd_comgr_symbol_info_s.define('AMD_COMGR_SYMBOL_INFO_TYPE', 2)
AMD_COMGR_SYMBOL_INFO_SIZE = amd_comgr_symbol_info_s.define('AMD_COMGR_SYMBOL_INFO_SIZE', 3)
AMD_COMGR_SYMBOL_INFO_IS_UNDEFINED = amd_comgr_symbol_info_s.define('AMD_COMGR_SYMBOL_INFO_IS_UNDEFINED', 4)
AMD_COMGR_SYMBOL_INFO_VALUE = amd_comgr_symbol_info_s.define('AMD_COMGR_SYMBOL_INFO_VALUE', 5)
AMD_COMGR_SYMBOL_INFO_LAST = amd_comgr_symbol_info_s.define('AMD_COMGR_SYMBOL_INFO_LAST', 5)
amd_comgr_symbol_info_t: TypeAlias = amd_comgr_symbol_info_s
@dll.bind
def amd_comgr_symbol_get_info(symbol:amd_comgr_symbol_t, attribute:amd_comgr_symbol_info_t, value:ctypes.c_void_p) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_create_disassembly_info(isa_name:c.POINTER[Annotated[bytes, ctypes.c_char]], read_memory_callback:c.CFUNCTYPE[uint64_t, [uint64_t, c.POINTER[Annotated[bytes, ctypes.c_char]], uint64_t, ctypes.c_void_p]], print_instruction_callback:c.CFUNCTYPE[None, [c.POINTER[Annotated[bytes, ctypes.c_char]], ctypes.c_void_p]], print_address_annotation_callback:c.CFUNCTYPE[None, [uint64_t, ctypes.c_void_p]], disassembly_info:c.POINTER[amd_comgr_disassembly_info_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_destroy_disassembly_info(disassembly_info:amd_comgr_disassembly_info_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_disassemble_instruction(disassembly_info:amd_comgr_disassembly_info_t, address:uint64_t, user_data:ctypes.c_void_p, size:c.POINTER[uint64_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_demangle_symbol_name(mangled_symbol_name:amd_comgr_data_t, demangled_symbol_name:c.POINTER[amd_comgr_data_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_populate_mangled_names(data:amd_comgr_data_t, count:c.POINTER[size_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_get_mangled_name(data:amd_comgr_data_t, index:size_t, size:c.POINTER[size_t], mangled_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_populate_name_expression_map(data:amd_comgr_data_t, count:c.POINTER[size_t]) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_map_name_expression_to_symbol_name(data:amd_comgr_data_t, size:c.POINTER[size_t], name_expression:c.POINTER[Annotated[bytes, ctypes.c_char]], symbol_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> amd_comgr_status_t: ...
@c.record
class code_object_info_s(c.Struct):
SIZE = 24
isa: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
size: Annotated[size_t, 8]
offset: Annotated[uint64_t, 16]
amd_comgr_code_object_info_t: TypeAlias = code_object_info_s
@dll.bind
def amd_comgr_lookup_code_object(data:amd_comgr_data_t, info_list:c.POINTER[amd_comgr_code_object_info_t], info_list_size:size_t) -> amd_comgr_status_t: ...
@dll.bind
def amd_comgr_map_elf_virtual_address_to_code_object_offset(data:amd_comgr_data_t, elf_virtual_address:uint64_t, code_object_offset:c.POINTER[uint64_t], slice_size:c.POINTER[uint64_t], nobits:c.POINTER[Annotated[bool, ctypes.c_bool]]) -> amd_comgr_status_t: ...
c.init_records()
AMD_COMGR_DEPRECATED = lambda msg: __attribute__((deprecated(msg))) # type: ignore
AMD_COMGR_INTERFACE_VERSION_MAJOR = 3 # type: ignore
AMD_COMGR_INTERFACE_VERSION_MINOR = 0 # type: ignore | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/comgr_3.py",
"license": "MIT License",
"lines": 264,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/bench_log.py | import time, atexit, uuid
from enum import Enum
from tinygrad.device import Device
from tinygrad.helpers import DEBUG, ContextVar, getenv, GlobalCounters
BENCHMARK_LOG = ContextVar("BENCHMARK_LOG", "")
if BENCHMARK_LOG:
from influxdb_client_3 import InfluxDBClient3, Point, WriteOptions, write_client_options
from influxdb_client_3.write_client.client.write_api import WriteType
class BenchEvent(Enum):
LOAD_WEIGHTS = "load_weights"
STEP = "step"
FULL = "full"
MLPERF_INIT = "mlperf_init"
MLPERF_RUN = "mlperf_run"
class InstantBenchEvent(Enum):
GFLOPS = "gflops"
_events = {}
def clear_events():
for event in BenchEvent:
_events[event] = {"wall": [], "kernel": []}
for event in InstantBenchEvent:
_events[event] = []
clear_events()
class WallTimeEvent:
def __init__(self, event:BenchEvent):
self.event = event
def __enter__(self):
self.start = time.monotonic()
return self
def __exit__(self, *_):
self.time = time.monotonic() - self.start
_events[self.event]["wall"].append(self.time)
return False
class KernelTimeEvent:
def __init__(self, event:BenchEvent):
if DEBUG < 2:
raise Exception("KernelTimeEvent should only be used in DEBUG >= 2")
self.event = event
def __enter__(self):
self.start = GlobalCounters.time_sum_s
return self
def __exit__(self, *_):
_events[self.event]["kernel"].append(GlobalCounters.time_sum_s - self.start)
return False
def log_event_instant(event:InstantBenchEvent, value:float):
_events[event].append(value)
if BENCHMARK_LOG:
INFLUXDB_HOST = getenv("INFLUXDB_HOST", "")
INFLUXDB_ORG = getenv("INFLUXDB_ORG", "tiny")
INFLUXDB_TOKEN = getenv("INFLUXDB_TOKEN", "")
def _create_point(run_id, i, attempt, ref, commit, name, value, run):
point = Point(BENCHMARK_LOG.value).tag("id", run_id).tag("index", i)
point = point.tag("device", Device.DEFAULT)
point = point.tag("attempt", attempt).tag("ref", ref).tag("commit", commit)
point = point.field(name, value).field("x", run)
return point
@atexit.register
def write_events():
# see if there are any events to write
have_events = False
for event in _events:
if isinstance(event, BenchEvent):
for event_type, values in _events[event].items():
if len(values) > 0:
have_events = True
else:
if len(_events[event]) > 0:
have_events = True
if not have_events:
return
# pull from github envvars
ref = getenv("GITHUB_REF_NAME", "")
commit = getenv("GITHUB_SHA", "")
run = getenv("GITHUB_RUN_NUMBER", "")
attempt = getenv("GITHUB_RUN_ATTEMPT", "")
points = []
for event in _events:
run_id = str(uuid.uuid4())
if isinstance(event, BenchEvent):
for event_type, values in _events[event].items():
for i, value in enumerate(values):
point = _create_point(run_id, i, attempt, ref, commit, f"{event.value}_{event_type}", value, run)
points.append(point)
else:
for i, value in enumerate(_events[event]):
point = _create_point(run_id, i, attempt, ref, commit, event.value, value, run)
points.append(point)
write_options = WriteOptions(write_type=WriteType.synchronous, retry_interval=5000, max_retries=5, max_retry_delay=30000, exponential_base=2)
wco = write_client_options(write_options=write_options)
with InfluxDBClient3(
host=INFLUXDB_HOST,
org=INFLUXDB_ORG,
token=INFLUXDB_TOKEN,
auth_scheme="Basic",
database="benchmarks",
write_client_options=wco) as client:
client.write(points)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/bench_log.py",
"license": "MIT License",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/testextra/test_bench_log.py | import unittest, time
from unittest.case import skipIf
from extra.bench_log import BenchEvent, InstantBenchEvent, WallTimeEvent, KernelTimeEvent, log_event_instant, _events, clear_events
from tinygrad.helpers import Context, CI
from tinygrad.tensor import Tensor
from tinygrad.device import Device
_SKIP_KERNEL_TIMING = Device.DEFAULT == "WEBGPU" # WEBGPU kernel timing not supported
class TestBenchLog(unittest.TestCase):
def setUp(self):
clear_events()
def test_log_single_wall_time(self):
for event in BenchEvent:
with WallTimeEvent(event):
time.sleep(0.1)
# check event list
for event in BenchEvent:
self.assertEqual(len(_events[event]["wall"]), 1)
self.assertGreater(_events[event]["wall"][0], 0)
def test_log_double_wall_time(self):
for event in BenchEvent:
with WallTimeEvent(event):
time.sleep(0.1)
for event in reversed(BenchEvent):
with WallTimeEvent(event):
time.sleep(0.2)
# check event list
for event in BenchEvent:
self.assertEqual(len(_events[event]["wall"]), 2)
self.assertGreater(_events[event]["wall"][0], 0)
self.assertGreater(_events[event]["wall"][1], 0)
@skipIf(CI or _SKIP_KERNEL_TIMING, "ci timing is not accurate")
def test_log_single_kernel_time(self):
wall_times = []
with Context(DEBUG=2):
for event in BenchEvent:
with KernelTimeEvent(event):
st = time.perf_counter()
Tensor.rand(32, 32).sum().realize().item()
wall_times.append(time.perf_counter() - st)
# check event list
for event in BenchEvent:
self.assertEqual(len(_events[event]["kernel"]), 1)
self.assertLess(_events[event]["kernel"][0], wall_times[0])
self.assertGreater(_events[event]["kernel"][0], 0)
@skipIf((CI and Device.DEFAULT == "CUDA") or _SKIP_KERNEL_TIMING, "ci cuda timing is not accurate")
def test_interleaved_wall_kernel_time(self):
wall_times = []
with Context(DEBUG=2):
for event in BenchEvent:
with KernelTimeEvent(event):
st = time.perf_counter()
Tensor.rand(32, 32).sum().realize().item()
wall_times.append(time.perf_counter() - st)
with WallTimeEvent(event):
st = time.perf_counter()
Tensor.rand(32, 32).sum().realize().item()
wall_times.append(time.perf_counter() - st)
# check event list
for event in BenchEvent:
self.assertEqual(len(_events[event]["wall"]), 1)
self.assertEqual(len(_events[event]["kernel"]), 1)
self.assertLess(_events[event]["kernel"][0], wall_times[0])
self.assertGreater(_events[event]["kernel"][0], 0)
@skipIf((CI and Device.DEFAULT == "CUDA") or _SKIP_KERNEL_TIMING, "ci cuda timing is not accurate")
def test_stacked_wall_kernel_time(self):
with Context(DEBUG=2):
for event in BenchEvent:
with KernelTimeEvent(event):
with WallTimeEvent(event):
Tensor.rand(32, 32).sum().realize().item()
for event in BenchEvent:
with WallTimeEvent(event):
with KernelTimeEvent(event):
Tensor.rand(32, 32).sum().realize().item()
for event in BenchEvent:
self.assertEqual(len(_events[event]["wall"]), 2)
self.assertEqual(len(_events[event]["kernel"]), 2)
self.assertLess(_events[event]["kernel"][0], _events[event]["wall"][0])
self.assertGreater(_events[event]["kernel"][0], 0)
self.assertLess(_events[event]["kernel"][1], _events[event]["wall"][1])
self.assertGreater(_events[event]["kernel"][1], 0)
def test_log_instant_event(self):
for event in InstantBenchEvent:
log_event_instant(event, 1000)
# check event list
for event in InstantBenchEvent:
self.assertEqual(len(_events[event]), 1)
self.assertEqual(_events[event][0], 1000)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/testextra/test_bench_log.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:examples/minrf.py | # much taken from https://github.com/cloneofsimo/minRF
from tinygrad import Tensor, nn, GlobalCounters, TinyJit
from tinygrad.helpers import getenv, trange
from extra.models.llama import Attention, FeedForward, precompute_freqs_cis
def modulate(x:Tensor, shift:Tensor, scale:Tensor) -> Tensor: return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
# TODO: why doesn't the TimestepEmbedder from minRF work?
class TimestepEmbedder:
def __init__(self, hidden_size): self.mlp = [nn.Linear(1, hidden_size), Tensor.silu, nn.Linear(hidden_size, hidden_size)]
def __call__(self, t:Tensor): return t.reshape(-1, 1).sequential(self.mlp)
class TransformerBlock:
def __init__(self, dim, n_heads, norm_eps=1e-5):
self.attention = Attention(dim, n_heads)
self.feed_forward = FeedForward(dim, 4*dim)
self.attention_norm = nn.LayerNorm(dim, eps=norm_eps)
self.ffn_norm = nn.LayerNorm(dim, eps=norm_eps)
self.adaLN_modulation = nn.Linear(dim, 6 * dim, bias=True)
def __call__(self, x:Tensor, freqs_cis:Tensor, adaln_input:Tensor):
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(adaln_input.silu()).chunk(6, dim=1)
x = x + gate_msa.unsqueeze(1) * self.attention(modulate(self.attention_norm(x), shift_msa, scale_msa), 0, freqs_cis)
x = x + gate_mlp.unsqueeze(1) * self.feed_forward(modulate(self.ffn_norm(x), shift_mlp, scale_mlp))
return x.contiguous().contiguous_backward()
class FinalLayer:
def __init__(self, dim, patch_size, out_channels):
self.norm_final = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
self.linear = nn.Linear(dim, patch_size*patch_size*out_channels, bias=True)
self.adaLN_modulation = nn.Linear(dim, 2 * dim, bias=True)
# init weights/bias to 0
self.linear.weight.replace(self.linear.weight.zeros_like().contiguous())
self.linear.bias.replace(self.linear.bias.zeros_like().contiguous())
def __call__(self, x:Tensor, c:Tensor):
shift, scale = self.adaLN_modulation(c.silu()).chunk(2, dim=1)
x = modulate(self.norm_final(x), shift, scale)
return self.linear(x)
# channels=1, input_size=32, dim=64, n_layers=6, n_heads=4, num_classes=10
class DiT_Llama:
def __init__(self, in_channels=1, dim=64, n_layers=6, n_heads=4, num_classes=10, patch_size=2):
self.patch_size = patch_size
self.out_channels = in_channels
self.num_classes = num_classes
self.init_conv_seq = [
nn.Conv2d(in_channels, dim // 2, kernel_size=5, padding=2, stride=1), Tensor.silu, nn.GroupNorm(32, dim//2),
nn.Conv2d(dim //2, dim // 2, kernel_size=5, padding=2, stride=1), Tensor.silu, nn.GroupNorm(32, dim//2),
]
self.x_embedder = nn.Linear(self.patch_size * self.patch_size * dim // 2, dim, bias=True)
self.t_embedder = TimestepEmbedder(dim)
self.y_embedder = nn.Embedding(num_classes+1, dim)
self.final_layer = FinalLayer(dim, self.patch_size, self.out_channels)
self.freqs_cis = precompute_freqs_cis(dim // n_heads, 4096)
self.layers = [TransformerBlock(dim, n_heads) for _ in range(n_layers)]
def unpatchify(self, x:Tensor):
c, p = self.out_channels, self.patch_size
h = w = int(x.shape[1] ** 0.5)
x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
x = x.rearrange("n h w p q c -> n c h p w q")
return x.reshape(shape=(x.shape[0], c, h * p, h * p))
def patchify(self, x:Tensor):
B, C, H, W = x.shape
x = x.reshape(B, C, H // self.patch_size, self.patch_size, W // self.patch_size, self.patch_size)
x = x.permute(0, 2, 4, 1, 3, 5).flatten(-3).flatten(1, 2)
return x # B <H*W ish> <C*patch_size*patch_size>
def __call__(self, x:Tensor, t:Tensor, y:Tensor) -> Tensor:
x = x.sequential(self.init_conv_seq)
x = self.patchify(x)
x = self.x_embedder(x)
adaln_input = self.t_embedder(t) + self.y_embedder(y)
adaln_input = adaln_input.contiguous()
for layer in self.layers:
x = layer(x, self.freqs_cis[:, :x.size(1)], adaln_input=adaln_input)
x = self.final_layer(x, adaln_input)
return self.unpatchify(x)
def rf(self, x:Tensor, cond:Tensor):
b = x.shape[0]
# self.ln is True
t = Tensor.randn((b,)).sigmoid()
texp = t.view([b, *([1] * len(x.shape[1:]))])
# conditional dropout
dropout_prob = 0.1
cond = (Tensor.rand(cond.shape[0]) < dropout_prob).where(cond.full_like(self.num_classes), cond)
# this is rectified flow
z1 = x.randn_like()
zt = (1 - texp) * x + texp * z1
vtheta = self(zt, t, cond)
# MSE loss
return ((z1 - x) - vtheta).square().mean()
def sample(self, z, cond, null_cond, sample_steps=50, cfg=2.0):
b = z.size(0)
dt = Tensor.full((b,)+(1,)*len(z.shape[1:]), fill_value=1.0/sample_steps).contiguous()
images = [z]
for i in range(sample_steps, 0, -1):
t = Tensor.full((b,), fill_value=i/sample_steps).contiguous()
vc = self(z, t, cond)
vu = self(z, t, null_cond)
vc = vu + cfg * (vc - vu)
z = z - dt * vc
z = z.contiguous()
images.append(z)
return images
def mviz(t:Tensor):
assert len(t.shape) == 4 and t.shape[1] == 1
ft = t.permute(1,2,0,3).reshape(32, -1)
assert ft.shape[-1]%32 == 0
print("")
for y in ((ft+1)/2).clamp(0,1).tolist():
ln = [f"\033[38;5;{232+int(x*23)}mββ" for x in y]
print(''.join(ln) + "\033[0m")
if __name__ == "__main__":
X_train, Y_train, X_test, Y_test = nn.datasets.mnist()
X_train = X_train.pad((2,2,2,2))
X_train = ((X_train.float()/255)-0.5)/0.5
Y_train = Y_train.int()
model = DiT_Llama(patch_size=getenv("PATCH_SIZE", 2))
for r in nn.state.get_parameters(model): r.realize()
optimizer = nn.optim.Adam(nn.state.get_parameters(model), lr=5e-4)
@TinyJit
@Tensor.train()
def train_step():
if getenv("OVERFIT"): samples = Tensor.zeros(getenv("BS", 256), dtype='int')
else: samples = Tensor.randint(getenv("BS", 256), high=X_train.shape[0])
optimizer.zero_grad()
loss = model.rf(X_train[samples], Y_train[samples])
loss.backward()
optimizer.step()
return loss
@TinyJit
def sample(z:Tensor, cond:Tensor) -> Tensor:
return model.sample(z, cond, Tensor.full_like(cond, 10), sample_steps=getenv("SAMPLE_STEPS", 20))[-1]
for steps in (t:=trange(getenv("STEPS", 5000))):
if steps%10 == 0: mviz(sample(Tensor.randn(3, 1, 32, 32), Tensor([5,0,4], dtype='int')))
GlobalCounters.reset()
loss = train_step()
t.set_description(f"loss: {loss.item():9.2f}")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "examples/minrf.py",
"license": "MIT License",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/usbgpu/legacy/nvme_speed.py | import array, time, ctypes, struct, random
from hexdump import hexdump
from tinygrad.runtime.support.usb import ASM24Controller, WriteOp, ScsiWriteOp
from tinygrad.runtime.autogen import pci
from tinygrad.helpers import Timing
from tinygrad import Device
usb = ASM24Controller()
def real_scsi_write():
self.exec_ops([ScsiWriteOp(buf, lba)])
for i in range(256):
xxx = (ctypes.c_uint8 * 4096)()
dfg = random.randint(0, 255)
for i in range(len(xxx)): xxx[i] = dfg
# print(dfg, usb.read(0xf000, 0x10))
st = time.perf_counter_ns()
usb.scsi_write(bytes(xxx), lba=0x1000 + i)
en = time.perf_counter_ns()
print("mb/s is ", (0x1000) / (en - st) * 1e9 / 1024 / 1024)
exit(0)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/usbgpu/legacy/nvme_speed.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/usbgpu/legacy/patch_exp.py | #!/usr/bin/env python3
import sys
import zlib
def patch(input_filepath, output_filepath, patches):
with open(input_filepath, 'rb') as infile: data = bytearray(infile.read())
for offset, expected_bytes, new_bytes in patches:
if len(expected_bytes) != len(new_bytes):
print(len(expected_bytes), len(new_bytes))
raise ValueError("Expected bytes and new bytes must be the same length")
if offset + len(new_bytes) > len(data): return False
current_bytes = data[offset:offset + len(expected_bytes)]
assert bytes(current_bytes) == expected_bytes, f"Expected {expected_bytes} at offset {offset:x}, but got {current_bytes}"
data[offset:offset + len(new_bytes)] = new_bytes
checksum = sum(data[4:-6]) & 0xff
crc32 = zlib.crc32(data[4:-6]).to_bytes(4, 'little')
data[-5] = checksum
data[-4] = crc32[0]
data[-3] = crc32[1]
data[-2] = crc32[2]
data[-1] = crc32[3]
with open(output_filepath, 'wb') as outfile:
outfile.write(data)
return True
patches = [
# (0x3903 + 1 + 4, b'\x8a', b'\x8b'),
# (0x3cf9 + 1 + 4, b'\x8a', b'\x8b'), # this is the one which triggered...
(0x2a0d + 1 + 4, b'\x0a', b'\x05'), # write handle exit with code 5 (?)
# (0x40e1 + 4, b'\x90\x06\xe6\x04\xf0\x78\x0d\xe6\xfe\x24\x71\x12\x1b\x0b\x60\x0b\x74\x08', b'\x7f\x00\x12\x53\x21\x12\x1c\xfc\x74\x01\xf6\x90\x90\x94\x74\x10\xf0\x22')
# (0x29ad + 1 + 4, b'\x09', b'\x05'), # write handle exit with code 5 (?)
# (0x40ef + 0 + 4, b'\x60', b'\x70'), # jz -> jnz
# (0x40e1 + 0 + 4, b'\x90', b'\x22'), # jmp -> ret
# (0x40fa + 0 + 4, b'\x80', b'\x22'),
# (0x40e1 + 0 + 4, b'\x90\x06\xe6\x04\xf0', b'\x7f\x00\x02\x41\x7c'), # jmp -> ret
]
next_traphandler = 0
def add_traphandler(addr, sec):
global next_traphandler, patches
trap_addr = 0x6000 + next_traphandler * 0x20
return_addr = addr + len(sec)
cntr_addr = 0x3000 + next_traphandler
patches += [
(addr + 4, sec, b'\x02' + trap_addr.to_bytes(2, 'big') + b'\x22'*(len(sec)-3)),
(trap_addr + 4, b'\x00' * (21 + len(sec)),
b'\xc0\xe0\xc0\x82\xc0\x83\x90' + cntr_addr.to_bytes(2, 'big') + b'\xe0\x04\xf0\xd0\x83\xd0\x82\xd0\xe0' + sec + b'\x02' + return_addr.to_bytes(2, 'big')),
]
next_traphandler += 1
# add_traphandler(0x0206, b'\xed\x54\x06') # fill_scsi_resp
# add_traphandler(0x40d9, b'\x78\x6a\xe6') # fill_scsi_to_usb_transport
# add_traphandler(0x4d44, b'\x78\x6a\xe6') # FUN_CODE_4d44
# add_traphandler(0x4784, b'\x78\x6a\xe6') # FUN_CODE_4784
# add_traphandler(0x3e81, b'\x90\xc5\x16') # FUN_CODE_3e81
# add_traphandler(0x32a5, b'\x78\x6a\xe6') # FUN_CODE_32a5
# add_traphandler(0x2a10, b'\x90\xc4\x51') # FUN_CODE_2a10
# add_traphandler(0x2608, b'\x12\x16\x87') # FUN_CODE_2608
# add_traphandler(0x0e78, b'\x90\xc8\x02') # main usb entry
# add_traphandler(0x102f, b'\x12\x18\x0d') # possible scsi entry parser
# add_traphandler(0x1198, b'\x12\x18\x0d') # close_to_scsi_parse_1_and_set_c47a_to_0xff caller to scsi
# add_traphandler(0x180d, b'\x90\x0a\x7d') # close_to_scsi_parse
# add_traphandler(0x1114, b'\x75\x37\x00') # entry into if ((DAT_EXTMEM_c802 >> 2 & 1) != 0) { in main usb entry
# add_traphandler(0x113a, b'\x90\x90\x00') # exit from scsi parse loop
# add_traphandler(0x117b, b'\xd0\x07\xd0\x06') # exit from main usb entry
# add_traphandler(0x2f81, b'\x90\x0a\x59') # main loop? 8
# add_traphandler(0xc7a7, b'\x90\x09\xfa') # call smth in write path 9
# add_traphandler(0x2fcb, b'\x90\x0a\x59') # if ((DAT_EXTMEM_0ae2 != 0) && (DAT_EXTMEM_0ae2 != 0x10)) {
# add_traphandler(0x2fc0, b'\x90\x0a\xe2') # submain loop 11
# add_traphandler(0x30be, b'\x90\x0a\x5a') # aft sub loop 12
# add_traphandler(0x3076, b'\x12\x03\x59') # call to call_wait_for_nvme??(); 13
# add_traphandler(0x30ad, b'\x12\x04\xe4') # call to call_wait_for_nvme??(); 14
# add_traphandler(0x2608, b'\x12\x16\x87') # FUN_CODE_2608
# add_traphandler(0x10ee, b'\x90\x04\x64') # iniside trap handler
# add_traphandler(0x10e0, b'\x90\xc8\x06') # iniside trap handler
# add_traphandler(0x4977, b'\x90\x0a\xa8') # waiter for nvme???
assert patch(sys.argv[1], sys.argv[2], patches) is True
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/usbgpu/legacy/patch_exp.py",
"license": "MIT License",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/usbgpu/legacy/wr_speed.py | import array, time, ctypes, struct, random
from hexdump import hexdump
from tinygrad.runtime.support.usb import ASMController, WriteOp
from tinygrad.runtime.autogen import pci
from tinygrad.helpers import Timing
from tinygrad import Device
usb = ASMController()
xxx = (ctypes.c_uint8 * 4096)()
dfg = random.randint(0, 255)
for i in range(len(xxx)): xxx[i] = dfg
print(dfg, usb.read(0xf000, 0x10))
with Timing():
for i in range(64): usb.scsi_write(xxx)
with Timing():
for i in range(64): usb.read(0xf000, 0x1000)
exit(0)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/usbgpu/legacy/wr_speed.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/usbgpu/patch.py | #!/usr/bin/env python3
import os, zlib, struct, hashlib
from tinygrad.helpers import getenv
from tinygrad.runtime.support.usb import USB3
SUPPORTED_CONTROLLERS = [
(0x174C, 0x2464),
(0x174C, 0x2463),
(0xADD1, 0x0001),
]
if getenv("USBDEV", ""): SUPPORTED_CONTROLLERS.insert(0, (int(x, 16) for x in getenv("USBDEV", "").split(":")))
def patch(input_filepath, file_hash, patches):
with open(input_filepath, 'rb') as infile: data = bytearray(infile.read())
if_hash = hashlib.md5(data).hexdigest()
if if_hash != file_hash:
raise ValueError(f"File hash mismatch: expected {file_hash}, got {if_hash}")
for offset, expected_bytes, new_bytes in patches:
if len(expected_bytes) != len(new_bytes):
raise ValueError("Expected bytes and new bytes must be the same length")
if offset + len(new_bytes) > len(data): return False
current_bytes = data[offset:offset + len(expected_bytes)]
assert bytes(current_bytes) == expected_bytes, f"Expected {expected_bytes} at offset {offset:x}, but got {current_bytes}"
data[offset:offset + len(new_bytes)] = new_bytes
checksum = sum(data[4:-6]) & 0xff
crc32 = zlib.crc32(data[4:-6]).to_bytes(4, 'little')
data[-5] = checksum
data[-4] = crc32[0]
data[-3] = crc32[1]
data[-2] = crc32[2]
data[-1] = crc32[3]
return data
path = os.path.dirname(os.path.abspath(__file__))
file_hash = "5284e618d96ef804c06f47f3b73656b7"
file_path = os.path.join(path, "Software/AS_USB4_240417_85_00_00.bin")
if not os.path.exists(file_path):
url = "https://web.archive.org/web/20250430124720/https://www.station-drivers.com/index.php/en/component/remository/func-download/6341/chk,3ef8b04704a18eb2fc57ff60382379ad/no_html,1/lang,en-gb/"
os.system(f'curl -o "{path}/fw.zip" "{url}"')
os.system(f'unzip -o "{path}/fw.zip" "Software/AS_USB4_240417_85_00_00.bin" -d "{path}"')
patches = [(0x2a0d + 1 + 4, b'\x0a', b'\x05')]
patched_fw = patch(file_path, file_hash, patches)
dev = None
for vendor, device in SUPPORTED_CONTROLLERS:
try:
dev = USB3(vendor, device, 0x81, 0x83, 0x02, 0x04, use_bot=True)
break
except RuntimeError: pass
if dev is None:
raise RuntimeError('Could not open controller. You can set USBDEV environment variable to your device\'s vendor and device ID (e.g., USBDEV="174C:2464")')
config1 = bytes([
0xFF, 0xFF, 0xFF, 0xFF, 0x41, 0x41, 0x41, 0x41, 0x42, 0x42, 0x42, 0x42, 0x30, 0x30, 0x36, 0x30,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x74, 0x69, 0x6E, 0x79, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x74, 0x69, 0x6E, 0x79,
0xFF, 0xFF, 0xFF, 0xFF, 0x55, 0x53, 0x42, 0x20, 0x33, 0x2E, 0x32, 0x20, 0x50, 0x43, 0x49, 0x65,
0x20, 0x54, 0x69, 0x6E, 0x79, 0x45, 0x6E, 0x63, 0x6C, 0x6F, 0x73, 0x75, 0x72, 0x65, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0x54, 0x69, 0x6E, 0x79, 0x45, 0x6E, 0x63, 0x6C, 0x6F, 0x73, 0x75, 0x72,
0x65, 0xFF, 0xFF, 0xFF, 0xD1, 0xAD, 0x01, 0x00, 0x00, 0x01, 0xCF, 0xFF, 0x02, 0xFF, 0x5A, 0x94])
config2 = bytes([
0xFF, 0xFF, 0xFF, 0xFF, 0x47, 0x6F, 0x70, 0x6F, 0x64, 0x20, 0x47, 0x72, 0x6F, 0x75, 0x70, 0x20,
0x4C, 0x69, 0x6D, 0x69, 0x74, 0x65, 0x64, 0x2E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x55, 0x53, 0x42, 0x34,
0x20, 0x4E, 0x56, 0x4D, 0x65, 0x20, 0x53, 0x53, 0x44, 0x20, 0x50, 0x72, 0x6F, 0x20, 0x45, 0x6E,
0x63, 0x6C, 0x6F, 0x73, 0x75, 0x72, 0x65, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0x8C, 0xBF, 0xFF, 0x97, 0xC1, 0xF3, 0xFF, 0xFF, 0x01, 0x2D, 0x66, 0xD6,
0x66, 0x06, 0x00, 0xC0, 0x87, 0x01, 0x5A, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCA, 0x01, 0x66, 0xD6,
0xE3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0x01, 0x00, 0xA5, 0x67])
part1 = patched_fw[:0xff00]
part2 = patched_fw[0xff00:]
# config patch
cdb = struct.pack('>BBB12x', 0xe1, 0x50, 0x0)
dev.send_batch(cdbs=[cdb], odata=[config1])
cdb = struct.pack('>BBB12x', 0xe1, 0x50, 0x1)
dev.send_batch(cdbs=[cdb], odata=[config2])
cdb = struct.pack('>BBI', 0xe3, 0x50, len(part1))
dev.send_batch(cdbs=[cdb], odata=[part1])
cdb = struct.pack('>BBI', 0xe3, 0xd0, len(part2))
dev.send_batch(cdbs=[cdb], odata=[part2])
cdb = struct.pack('>BB13x', 0xe8, 0x51)
dev.send_batch(cdbs=[cdb])
print("done, you can disconnect the controller!")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/usbgpu/patch.py",
"license": "MIT License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/usbgpu/scan_pci.py | import array, time
from hexdump import hexdump
from tinygrad.runtime.support.usb import ASM24Controller
from tinygrad.runtime.autogen import pci
usb = ASM24Controller()
def print_cfg(bus, dev):
cfg = []
for i in range(0, 256, 4):
cfg.append(usb.pcie_cfg_req(i, bus=bus, dev=dev, fn=0, value=None, size=4))
print("bus={}, dev={}".format(bus, dev))
dmp = bytearray(array.array('I', cfg))
hexdump(dmp)
return dmp
def rescan_bus(bus, gpu_bus):
print("set PCI_SUBORDINATE_BUS bus={} to {}".format(bus, gpu_bus))
usb.pcie_cfg_req(pci.PCI_SUBORDINATE_BUS, bus=bus, dev=0, fn=0, value=gpu_bus, size=1)
usb.pcie_cfg_req(pci.PCI_SECONDARY_BUS, bus=bus, dev=0, fn=0, value=bus+1, size=1)
usb.pcie_cfg_req(pci.PCI_PRIMARY_BUS, bus=bus, dev=0, fn=0, value=max(0, bus-1), size=1)
print("rescan bus={}".format(bus))
usb.pcie_cfg_req(pci.PCI_BRIDGE_CONTROL, bus=bus, dev=0, fn=0, value=pci.PCI_BRIDGE_CTL_BUS_RESET, size=1)
time.sleep(0.1)
usb.pcie_cfg_req(pci.PCI_BRIDGE_CONTROL, bus=bus, dev=0, fn=0, value=pci.PCI_BRIDGE_CTL_PARITY|pci.PCI_BRIDGE_CTL_SERR, size=1)
usb.pcie_cfg_req(pci.PCI_MEMORY_BASE, bus=bus, dev=0, fn=0, value=0x1000, size=2)
usb.pcie_cfg_req(pci.PCI_MEMORY_LIMIT, bus=bus, dev=0, fn=0, value=0x2000, size=2)
usb.pcie_cfg_req(pci.PCI_PREF_MEMORY_BASE, bus=bus, dev=0, fn=0, value=0x2000, size=2)
usb.pcie_cfg_req(pci.PCI_PREF_MEMORY_LIMIT, bus=bus, dev=0, fn=0, value=0xffff, size=2)
print_cfg(0, 0)
rescan_bus(0, gpu_bus=4)
print_cfg(1, 0)
rescan_bus(1, gpu_bus=4)
time.sleep(0.1)
print_cfg(2, 0)
def setup_bus(bus, gpu_bus):
print("setup bus={}".format(bus))
usb.pcie_cfg_req(pci.PCI_SUBORDINATE_BUS, bus=bus, dev=0, fn=0, value=gpu_bus, size=1)
usb.pcie_cfg_req(pci.PCI_SECONDARY_BUS, bus=bus, dev=0, fn=0, value=bus+1, size=1)
usb.pcie_cfg_req(pci.PCI_PRIMARY_BUS, bus=bus, dev=0, fn=0, value=max(0, bus-1), size=1)
usb.pcie_cfg_req(pci.PCI_BRIDGE_CONTROL, bus=bus, dev=0, fn=0, value=pci.PCI_BRIDGE_CTL_BUS_RESET, size=1)
usb.pcie_cfg_req(pci.PCI_BRIDGE_CONTROL, bus=bus, dev=0, fn=0, value=pci.PCI_BRIDGE_CTL_PARITY|pci.PCI_BRIDGE_CTL_SERR, size=1)
usb.pcie_cfg_req(pci.PCI_COMMAND, bus=bus, dev=0, fn=0, value=pci.PCI_COMMAND_IO | pci.PCI_COMMAND_MEMORY | pci.PCI_COMMAND_MASTER, size=1)
usb.pcie_cfg_req(pci.PCI_MEMORY_BASE, bus=bus, dev=0, fn=0, value=0x1000, size=2)
usb.pcie_cfg_req(pci.PCI_MEMORY_LIMIT, bus=bus, dev=0, fn=0, value=0x2000, size=2)
usb.pcie_cfg_req(pci.PCI_PREF_MEMORY_BASE, bus=bus, dev=0, fn=0, value=0x2000, size=2)
usb.pcie_cfg_req(pci.PCI_PREF_MEMORY_LIMIT, bus=bus, dev=0, fn=0, value=0xffff, size=2)
setup_bus(2, gpu_bus=4)
print_cfg(3, 0)
setup_bus(3, gpu_bus=4)
dmp = print_cfg(4, 0)
print(dmp[0:4])
assert dmp[0:4] in (b"\x02\x10\x80\x74", b"\x02\x10\x4c\x74", b"\x02\x10\x50\x75"), "GPU NOT FOUND!"
print("GPU FOUND!")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/usbgpu/scan_pci.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/external/external_test_usb_asm24.py | import unittest, time
from tinygrad.runtime.support.usb import ASM24Controller
from tinygrad.helpers import Timing
from tinygrad import Tensor, Device
import numpy as np
class TestASMController(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ctrl = ASM24Controller()
def test_write_and_read(self):
base = 0xF000
data = b"hello!"
self.ctrl.write(base, data)
out = self.ctrl.read(base, len(data))
self.assertEqual(out, data)
def test_scsi_write_and_read_from_f000(self):
payload = bytes([0x5B]) * 4096
self.ctrl.scsi_write(payload, lba=0)
back = self.ctrl.read(0xF000, len(payload))
self.assertEqual(back, payload)
def test_scsi_write_speed_4k(self):
payload = bytes([0x5A]) * 4096
start = time.perf_counter()
self.ctrl.scsi_write(payload, lba=0)
dur_ms = (time.perf_counter() - start) * 1000
print(f"scsi_write 4K took {dur_ms:.3f} ms")
def test_read_speed_4k(self):
payload = bytes([0xA5]) * 4096
self.ctrl.write(0xF000, payload)
start = time.perf_counter()
out = self.ctrl.read(0xF000, 4096)
dur_ms = (time.perf_counter() - start) * 1000
print(f"read 4K took {dur_ms:.3f} ms")
self.assertEqual(out, payload)
class TestDevCopySpeeds(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sz = 512
cls.dev = Device["AMD"]
if not cls.dev.is_usb(): raise unittest.SkipTest("only test this on USB devices")
def testCopyCPUtoDefault(self):
for _ in range(10):
t = Tensor.ones(self.sz, self.sz, device="CPU").contiguous().realize()
with Timing(f"copyin of {t.nbytes()/1e6:.2f} MB: ", on_exit=lambda ns: f" @ {t.nbytes()/ns * 1e3:.2f} MB/s"): # noqa: F821
t.to(Device.DEFAULT).realize()
Device[Device.DEFAULT].synchronize()
del t
def testCopyDefaulttoCPU(self):
t = Tensor.ones(self.sz, self.sz).contiguous().realize()
for _ in range(10):
with Timing(f"copyout of {t.nbytes()/1e6:.2f} MB: ", on_exit=lambda ns: f" @ {t.nbytes()/ns * 1e3:.2f} MB/s"):
t.to('CPU').realize()
def testValidateCopies(self):
t = Tensor.randn(self.sz, self.sz, device="CPU").contiguous().realize()
x = t.to(Device.DEFAULT).realize()
Device[Device.DEFAULT].synchronize()
y = x.to('CPU').realize()
np.testing.assert_equal(t.numpy(), y.numpy())
del x, y, t
if __name__ == "__main__":
unittest.main() | {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_usb_asm24.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/runtime/autogen/libusb.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
dll = c.DLL('libusb', 'usb-1.0')
class enum_libusb_class_code(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_CLASS_PER_INTERFACE = enum_libusb_class_code.define('LIBUSB_CLASS_PER_INTERFACE', 0)
LIBUSB_CLASS_AUDIO = enum_libusb_class_code.define('LIBUSB_CLASS_AUDIO', 1)
LIBUSB_CLASS_COMM = enum_libusb_class_code.define('LIBUSB_CLASS_COMM', 2)
LIBUSB_CLASS_HID = enum_libusb_class_code.define('LIBUSB_CLASS_HID', 3)
LIBUSB_CLASS_PHYSICAL = enum_libusb_class_code.define('LIBUSB_CLASS_PHYSICAL', 5)
LIBUSB_CLASS_IMAGE = enum_libusb_class_code.define('LIBUSB_CLASS_IMAGE', 6)
LIBUSB_CLASS_PTP = enum_libusb_class_code.define('LIBUSB_CLASS_PTP', 6)
LIBUSB_CLASS_PRINTER = enum_libusb_class_code.define('LIBUSB_CLASS_PRINTER', 7)
LIBUSB_CLASS_MASS_STORAGE = enum_libusb_class_code.define('LIBUSB_CLASS_MASS_STORAGE', 8)
LIBUSB_CLASS_HUB = enum_libusb_class_code.define('LIBUSB_CLASS_HUB', 9)
LIBUSB_CLASS_DATA = enum_libusb_class_code.define('LIBUSB_CLASS_DATA', 10)
LIBUSB_CLASS_SMART_CARD = enum_libusb_class_code.define('LIBUSB_CLASS_SMART_CARD', 11)
LIBUSB_CLASS_CONTENT_SECURITY = enum_libusb_class_code.define('LIBUSB_CLASS_CONTENT_SECURITY', 13)
LIBUSB_CLASS_VIDEO = enum_libusb_class_code.define('LIBUSB_CLASS_VIDEO', 14)
LIBUSB_CLASS_PERSONAL_HEALTHCARE = enum_libusb_class_code.define('LIBUSB_CLASS_PERSONAL_HEALTHCARE', 15)
LIBUSB_CLASS_DIAGNOSTIC_DEVICE = enum_libusb_class_code.define('LIBUSB_CLASS_DIAGNOSTIC_DEVICE', 220)
LIBUSB_CLASS_WIRELESS = enum_libusb_class_code.define('LIBUSB_CLASS_WIRELESS', 224)
LIBUSB_CLASS_MISCELLANEOUS = enum_libusb_class_code.define('LIBUSB_CLASS_MISCELLANEOUS', 239)
LIBUSB_CLASS_APPLICATION = enum_libusb_class_code.define('LIBUSB_CLASS_APPLICATION', 254)
LIBUSB_CLASS_VENDOR_SPEC = enum_libusb_class_code.define('LIBUSB_CLASS_VENDOR_SPEC', 255)
class enum_libusb_descriptor_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_DT_DEVICE = enum_libusb_descriptor_type.define('LIBUSB_DT_DEVICE', 1)
LIBUSB_DT_CONFIG = enum_libusb_descriptor_type.define('LIBUSB_DT_CONFIG', 2)
LIBUSB_DT_STRING = enum_libusb_descriptor_type.define('LIBUSB_DT_STRING', 3)
LIBUSB_DT_INTERFACE = enum_libusb_descriptor_type.define('LIBUSB_DT_INTERFACE', 4)
LIBUSB_DT_ENDPOINT = enum_libusb_descriptor_type.define('LIBUSB_DT_ENDPOINT', 5)
LIBUSB_DT_INTERFACE_ASSOCIATION = enum_libusb_descriptor_type.define('LIBUSB_DT_INTERFACE_ASSOCIATION', 11)
LIBUSB_DT_BOS = enum_libusb_descriptor_type.define('LIBUSB_DT_BOS', 15)
LIBUSB_DT_DEVICE_CAPABILITY = enum_libusb_descriptor_type.define('LIBUSB_DT_DEVICE_CAPABILITY', 16)
LIBUSB_DT_HID = enum_libusb_descriptor_type.define('LIBUSB_DT_HID', 33)
LIBUSB_DT_REPORT = enum_libusb_descriptor_type.define('LIBUSB_DT_REPORT', 34)
LIBUSB_DT_PHYSICAL = enum_libusb_descriptor_type.define('LIBUSB_DT_PHYSICAL', 35)
LIBUSB_DT_HUB = enum_libusb_descriptor_type.define('LIBUSB_DT_HUB', 41)
LIBUSB_DT_SUPERSPEED_HUB = enum_libusb_descriptor_type.define('LIBUSB_DT_SUPERSPEED_HUB', 42)
LIBUSB_DT_SS_ENDPOINT_COMPANION = enum_libusb_descriptor_type.define('LIBUSB_DT_SS_ENDPOINT_COMPANION', 48)
class enum_libusb_endpoint_direction(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_ENDPOINT_OUT = enum_libusb_endpoint_direction.define('LIBUSB_ENDPOINT_OUT', 0)
LIBUSB_ENDPOINT_IN = enum_libusb_endpoint_direction.define('LIBUSB_ENDPOINT_IN', 128)
class enum_libusb_endpoint_transfer_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_ENDPOINT_TRANSFER_TYPE_CONTROL = enum_libusb_endpoint_transfer_type.define('LIBUSB_ENDPOINT_TRANSFER_TYPE_CONTROL', 0)
LIBUSB_ENDPOINT_TRANSFER_TYPE_ISOCHRONOUS = enum_libusb_endpoint_transfer_type.define('LIBUSB_ENDPOINT_TRANSFER_TYPE_ISOCHRONOUS', 1)
LIBUSB_ENDPOINT_TRANSFER_TYPE_BULK = enum_libusb_endpoint_transfer_type.define('LIBUSB_ENDPOINT_TRANSFER_TYPE_BULK', 2)
LIBUSB_ENDPOINT_TRANSFER_TYPE_INTERRUPT = enum_libusb_endpoint_transfer_type.define('LIBUSB_ENDPOINT_TRANSFER_TYPE_INTERRUPT', 3)
class enum_libusb_standard_request(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_REQUEST_GET_STATUS = enum_libusb_standard_request.define('LIBUSB_REQUEST_GET_STATUS', 0)
LIBUSB_REQUEST_CLEAR_FEATURE = enum_libusb_standard_request.define('LIBUSB_REQUEST_CLEAR_FEATURE', 1)
LIBUSB_REQUEST_SET_FEATURE = enum_libusb_standard_request.define('LIBUSB_REQUEST_SET_FEATURE', 3)
LIBUSB_REQUEST_SET_ADDRESS = enum_libusb_standard_request.define('LIBUSB_REQUEST_SET_ADDRESS', 5)
LIBUSB_REQUEST_GET_DESCRIPTOR = enum_libusb_standard_request.define('LIBUSB_REQUEST_GET_DESCRIPTOR', 6)
LIBUSB_REQUEST_SET_DESCRIPTOR = enum_libusb_standard_request.define('LIBUSB_REQUEST_SET_DESCRIPTOR', 7)
LIBUSB_REQUEST_GET_CONFIGURATION = enum_libusb_standard_request.define('LIBUSB_REQUEST_GET_CONFIGURATION', 8)
LIBUSB_REQUEST_SET_CONFIGURATION = enum_libusb_standard_request.define('LIBUSB_REQUEST_SET_CONFIGURATION', 9)
LIBUSB_REQUEST_GET_INTERFACE = enum_libusb_standard_request.define('LIBUSB_REQUEST_GET_INTERFACE', 10)
LIBUSB_REQUEST_SET_INTERFACE = enum_libusb_standard_request.define('LIBUSB_REQUEST_SET_INTERFACE', 11)
LIBUSB_REQUEST_SYNCH_FRAME = enum_libusb_standard_request.define('LIBUSB_REQUEST_SYNCH_FRAME', 12)
LIBUSB_REQUEST_SET_SEL = enum_libusb_standard_request.define('LIBUSB_REQUEST_SET_SEL', 48)
LIBUSB_SET_ISOCH_DELAY = enum_libusb_standard_request.define('LIBUSB_SET_ISOCH_DELAY', 49)
class enum_libusb_request_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_REQUEST_TYPE_STANDARD = enum_libusb_request_type.define('LIBUSB_REQUEST_TYPE_STANDARD', 0)
LIBUSB_REQUEST_TYPE_CLASS = enum_libusb_request_type.define('LIBUSB_REQUEST_TYPE_CLASS', 32)
LIBUSB_REQUEST_TYPE_VENDOR = enum_libusb_request_type.define('LIBUSB_REQUEST_TYPE_VENDOR', 64)
LIBUSB_REQUEST_TYPE_RESERVED = enum_libusb_request_type.define('LIBUSB_REQUEST_TYPE_RESERVED', 96)
class enum_libusb_request_recipient(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_RECIPIENT_DEVICE = enum_libusb_request_recipient.define('LIBUSB_RECIPIENT_DEVICE', 0)
LIBUSB_RECIPIENT_INTERFACE = enum_libusb_request_recipient.define('LIBUSB_RECIPIENT_INTERFACE', 1)
LIBUSB_RECIPIENT_ENDPOINT = enum_libusb_request_recipient.define('LIBUSB_RECIPIENT_ENDPOINT', 2)
LIBUSB_RECIPIENT_OTHER = enum_libusb_request_recipient.define('LIBUSB_RECIPIENT_OTHER', 3)
class enum_libusb_iso_sync_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_ISO_SYNC_TYPE_NONE = enum_libusb_iso_sync_type.define('LIBUSB_ISO_SYNC_TYPE_NONE', 0)
LIBUSB_ISO_SYNC_TYPE_ASYNC = enum_libusb_iso_sync_type.define('LIBUSB_ISO_SYNC_TYPE_ASYNC', 1)
LIBUSB_ISO_SYNC_TYPE_ADAPTIVE = enum_libusb_iso_sync_type.define('LIBUSB_ISO_SYNC_TYPE_ADAPTIVE', 2)
LIBUSB_ISO_SYNC_TYPE_SYNC = enum_libusb_iso_sync_type.define('LIBUSB_ISO_SYNC_TYPE_SYNC', 3)
class enum_libusb_iso_usage_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_ISO_USAGE_TYPE_DATA = enum_libusb_iso_usage_type.define('LIBUSB_ISO_USAGE_TYPE_DATA', 0)
LIBUSB_ISO_USAGE_TYPE_FEEDBACK = enum_libusb_iso_usage_type.define('LIBUSB_ISO_USAGE_TYPE_FEEDBACK', 1)
LIBUSB_ISO_USAGE_TYPE_IMPLICIT = enum_libusb_iso_usage_type.define('LIBUSB_ISO_USAGE_TYPE_IMPLICIT', 2)
class enum_libusb_supported_speed(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_LOW_SPEED_OPERATION = enum_libusb_supported_speed.define('LIBUSB_LOW_SPEED_OPERATION', 1)
LIBUSB_FULL_SPEED_OPERATION = enum_libusb_supported_speed.define('LIBUSB_FULL_SPEED_OPERATION', 2)
LIBUSB_HIGH_SPEED_OPERATION = enum_libusb_supported_speed.define('LIBUSB_HIGH_SPEED_OPERATION', 4)
LIBUSB_SUPER_SPEED_OPERATION = enum_libusb_supported_speed.define('LIBUSB_SUPER_SPEED_OPERATION', 8)
class enum_libusb_usb_2_0_extension_attributes(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_BM_LPM_SUPPORT = enum_libusb_usb_2_0_extension_attributes.define('LIBUSB_BM_LPM_SUPPORT', 2)
class enum_libusb_ss_usb_device_capability_attributes(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_BM_LTM_SUPPORT = enum_libusb_ss_usb_device_capability_attributes.define('LIBUSB_BM_LTM_SUPPORT', 2)
class enum_libusb_bos_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_BT_WIRELESS_USB_DEVICE_CAPABILITY = enum_libusb_bos_type.define('LIBUSB_BT_WIRELESS_USB_DEVICE_CAPABILITY', 1)
LIBUSB_BT_USB_2_0_EXTENSION = enum_libusb_bos_type.define('LIBUSB_BT_USB_2_0_EXTENSION', 2)
LIBUSB_BT_SS_USB_DEVICE_CAPABILITY = enum_libusb_bos_type.define('LIBUSB_BT_SS_USB_DEVICE_CAPABILITY', 3)
LIBUSB_BT_CONTAINER_ID = enum_libusb_bos_type.define('LIBUSB_BT_CONTAINER_ID', 4)
LIBUSB_BT_PLATFORM_DESCRIPTOR = enum_libusb_bos_type.define('LIBUSB_BT_PLATFORM_DESCRIPTOR', 5)
@c.record
class struct_libusb_device_descriptor(c.Struct):
SIZE = 18
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
bcdUSB: Annotated[uint16_t, 2]
bDeviceClass: Annotated[uint8_t, 4]
bDeviceSubClass: Annotated[uint8_t, 5]
bDeviceProtocol: Annotated[uint8_t, 6]
bMaxPacketSize0: Annotated[uint8_t, 7]
idVendor: Annotated[uint16_t, 8]
idProduct: Annotated[uint16_t, 10]
bcdDevice: Annotated[uint16_t, 12]
iManufacturer: Annotated[uint8_t, 14]
iProduct: Annotated[uint8_t, 15]
iSerialNumber: Annotated[uint8_t, 16]
bNumConfigurations: Annotated[uint8_t, 17]
uint8_t: TypeAlias = Annotated[int, ctypes.c_ubyte]
uint16_t: TypeAlias = Annotated[int, ctypes.c_uint16]
@c.record
class struct_libusb_endpoint_descriptor(c.Struct):
SIZE = 32
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
bEndpointAddress: Annotated[uint8_t, 2]
bmAttributes: Annotated[uint8_t, 3]
wMaxPacketSize: Annotated[uint16_t, 4]
bInterval: Annotated[uint8_t, 6]
bRefresh: Annotated[uint8_t, 7]
bSynchAddress: Annotated[uint8_t, 8]
extra: Annotated[c.POINTER[Annotated[int, ctypes.c_ubyte]], 16]
extra_length: Annotated[Annotated[int, ctypes.c_int32], 24]
@c.record
class struct_libusb_interface_association_descriptor(c.Struct):
SIZE = 8
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
bFirstInterface: Annotated[uint8_t, 2]
bInterfaceCount: Annotated[uint8_t, 3]
bFunctionClass: Annotated[uint8_t, 4]
bFunctionSubClass: Annotated[uint8_t, 5]
bFunctionProtocol: Annotated[uint8_t, 6]
iFunction: Annotated[uint8_t, 7]
@c.record
class struct_libusb_interface_association_descriptor_array(c.Struct):
SIZE = 16
iad: Annotated[c.POINTER[struct_libusb_interface_association_descriptor], 0]
length: Annotated[Annotated[int, ctypes.c_int32], 8]
@c.record
class struct_libusb_interface_descriptor(c.Struct):
SIZE = 40
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
bInterfaceNumber: Annotated[uint8_t, 2]
bAlternateSetting: Annotated[uint8_t, 3]
bNumEndpoints: Annotated[uint8_t, 4]
bInterfaceClass: Annotated[uint8_t, 5]
bInterfaceSubClass: Annotated[uint8_t, 6]
bInterfaceProtocol: Annotated[uint8_t, 7]
iInterface: Annotated[uint8_t, 8]
endpoint: Annotated[c.POINTER[struct_libusb_endpoint_descriptor], 16]
extra: Annotated[c.POINTER[Annotated[int, ctypes.c_ubyte]], 24]
extra_length: Annotated[Annotated[int, ctypes.c_int32], 32]
@c.record
class struct_libusb_interface(c.Struct):
SIZE = 16
altsetting: Annotated[c.POINTER[struct_libusb_interface_descriptor], 0]
num_altsetting: Annotated[Annotated[int, ctypes.c_int32], 8]
@c.record
class struct_libusb_config_descriptor(c.Struct):
SIZE = 40
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
wTotalLength: Annotated[uint16_t, 2]
bNumInterfaces: Annotated[uint8_t, 4]
bConfigurationValue: Annotated[uint8_t, 5]
iConfiguration: Annotated[uint8_t, 6]
bmAttributes: Annotated[uint8_t, 7]
MaxPower: Annotated[uint8_t, 8]
interface: Annotated[c.POINTER[struct_libusb_interface], 16]
extra: Annotated[c.POINTER[Annotated[int, ctypes.c_ubyte]], 24]
extra_length: Annotated[Annotated[int, ctypes.c_int32], 32]
@c.record
class struct_libusb_ss_endpoint_companion_descriptor(c.Struct):
SIZE = 6
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
bMaxBurst: Annotated[uint8_t, 2]
bmAttributes: Annotated[uint8_t, 3]
wBytesPerInterval: Annotated[uint16_t, 4]
@c.record
class struct_libusb_bos_dev_capability_descriptor(c.Struct):
SIZE = 3
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
bDevCapabilityType: Annotated[uint8_t, 2]
dev_capability_data: Annotated[c.Array[uint8_t, Literal[0]], 3]
@c.record
class struct_libusb_bos_descriptor(c.Struct):
SIZE = 8
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
wTotalLength: Annotated[uint16_t, 2]
bNumDeviceCaps: Annotated[uint8_t, 4]
dev_capability: Annotated[c.Array[c.POINTER[struct_libusb_bos_dev_capability_descriptor], Literal[0]], 8]
@c.record
class struct_libusb_usb_2_0_extension_descriptor(c.Struct):
SIZE = 8
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
bDevCapabilityType: Annotated[uint8_t, 2]
bmAttributes: Annotated[uint32_t, 4]
uint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@c.record
class struct_libusb_ss_usb_device_capability_descriptor(c.Struct):
SIZE = 10
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
bDevCapabilityType: Annotated[uint8_t, 2]
bmAttributes: Annotated[uint8_t, 3]
wSpeedSupported: Annotated[uint16_t, 4]
bFunctionalitySupport: Annotated[uint8_t, 6]
bU1DevExitLat: Annotated[uint8_t, 7]
bU2DevExitLat: Annotated[uint16_t, 8]
@c.record
class struct_libusb_container_id_descriptor(c.Struct):
SIZE = 20
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
bDevCapabilityType: Annotated[uint8_t, 2]
bReserved: Annotated[uint8_t, 3]
ContainerID: Annotated[c.Array[uint8_t, Literal[16]], 4]
@c.record
class struct_libusb_platform_descriptor(c.Struct):
SIZE = 20
bLength: Annotated[uint8_t, 0]
bDescriptorType: Annotated[uint8_t, 1]
bDevCapabilityType: Annotated[uint8_t, 2]
bReserved: Annotated[uint8_t, 3]
PlatformCapabilityUUID: Annotated[c.Array[uint8_t, Literal[16]], 4]
CapabilityData: Annotated[c.Array[uint8_t, Literal[0]], 20]
@c.record
class struct_libusb_control_setup(c.Struct):
SIZE = 8
bmRequestType: Annotated[uint8_t, 0]
bRequest: Annotated[uint8_t, 1]
wValue: Annotated[uint16_t, 2]
wIndex: Annotated[uint16_t, 4]
wLength: Annotated[uint16_t, 6]
class struct_libusb_context(ctypes.Structure): pass
class struct_libusb_device(ctypes.Structure): pass
class struct_libusb_device_handle(ctypes.Structure): pass
@c.record
class struct_libusb_version(c.Struct):
SIZE = 24
major: Annotated[uint16_t, 0]
minor: Annotated[uint16_t, 2]
micro: Annotated[uint16_t, 4]
nano: Annotated[uint16_t, 6]
rc: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
describe: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
libusb_context: TypeAlias = struct_libusb_context
libusb_device: TypeAlias = struct_libusb_device
libusb_device_handle: TypeAlias = struct_libusb_device_handle
class enum_libusb_speed(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_SPEED_UNKNOWN = enum_libusb_speed.define('LIBUSB_SPEED_UNKNOWN', 0)
LIBUSB_SPEED_LOW = enum_libusb_speed.define('LIBUSB_SPEED_LOW', 1)
LIBUSB_SPEED_FULL = enum_libusb_speed.define('LIBUSB_SPEED_FULL', 2)
LIBUSB_SPEED_HIGH = enum_libusb_speed.define('LIBUSB_SPEED_HIGH', 3)
LIBUSB_SPEED_SUPER = enum_libusb_speed.define('LIBUSB_SPEED_SUPER', 4)
LIBUSB_SPEED_SUPER_PLUS = enum_libusb_speed.define('LIBUSB_SPEED_SUPER_PLUS', 5)
class enum_libusb_error(Annotated[int, ctypes.c_int32], c.Enum): pass
LIBUSB_SUCCESS = enum_libusb_error.define('LIBUSB_SUCCESS', 0)
LIBUSB_ERROR_IO = enum_libusb_error.define('LIBUSB_ERROR_IO', -1)
LIBUSB_ERROR_INVALID_PARAM = enum_libusb_error.define('LIBUSB_ERROR_INVALID_PARAM', -2)
LIBUSB_ERROR_ACCESS = enum_libusb_error.define('LIBUSB_ERROR_ACCESS', -3)
LIBUSB_ERROR_NO_DEVICE = enum_libusb_error.define('LIBUSB_ERROR_NO_DEVICE', -4)
LIBUSB_ERROR_NOT_FOUND = enum_libusb_error.define('LIBUSB_ERROR_NOT_FOUND', -5)
LIBUSB_ERROR_BUSY = enum_libusb_error.define('LIBUSB_ERROR_BUSY', -6)
LIBUSB_ERROR_TIMEOUT = enum_libusb_error.define('LIBUSB_ERROR_TIMEOUT', -7)
LIBUSB_ERROR_OVERFLOW = enum_libusb_error.define('LIBUSB_ERROR_OVERFLOW', -8)
LIBUSB_ERROR_PIPE = enum_libusb_error.define('LIBUSB_ERROR_PIPE', -9)
LIBUSB_ERROR_INTERRUPTED = enum_libusb_error.define('LIBUSB_ERROR_INTERRUPTED', -10)
LIBUSB_ERROR_NO_MEM = enum_libusb_error.define('LIBUSB_ERROR_NO_MEM', -11)
LIBUSB_ERROR_NOT_SUPPORTED = enum_libusb_error.define('LIBUSB_ERROR_NOT_SUPPORTED', -12)
LIBUSB_ERROR_OTHER = enum_libusb_error.define('LIBUSB_ERROR_OTHER', -99)
class enum_libusb_transfer_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_TRANSFER_TYPE_CONTROL = enum_libusb_transfer_type.define('LIBUSB_TRANSFER_TYPE_CONTROL', 0)
LIBUSB_TRANSFER_TYPE_ISOCHRONOUS = enum_libusb_transfer_type.define('LIBUSB_TRANSFER_TYPE_ISOCHRONOUS', 1)
LIBUSB_TRANSFER_TYPE_BULK = enum_libusb_transfer_type.define('LIBUSB_TRANSFER_TYPE_BULK', 2)
LIBUSB_TRANSFER_TYPE_INTERRUPT = enum_libusb_transfer_type.define('LIBUSB_TRANSFER_TYPE_INTERRUPT', 3)
LIBUSB_TRANSFER_TYPE_BULK_STREAM = enum_libusb_transfer_type.define('LIBUSB_TRANSFER_TYPE_BULK_STREAM', 4)
class enum_libusb_transfer_status(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_TRANSFER_COMPLETED = enum_libusb_transfer_status.define('LIBUSB_TRANSFER_COMPLETED', 0)
LIBUSB_TRANSFER_ERROR = enum_libusb_transfer_status.define('LIBUSB_TRANSFER_ERROR', 1)
LIBUSB_TRANSFER_TIMED_OUT = enum_libusb_transfer_status.define('LIBUSB_TRANSFER_TIMED_OUT', 2)
LIBUSB_TRANSFER_CANCELLED = enum_libusb_transfer_status.define('LIBUSB_TRANSFER_CANCELLED', 3)
LIBUSB_TRANSFER_STALL = enum_libusb_transfer_status.define('LIBUSB_TRANSFER_STALL', 4)
LIBUSB_TRANSFER_NO_DEVICE = enum_libusb_transfer_status.define('LIBUSB_TRANSFER_NO_DEVICE', 5)
LIBUSB_TRANSFER_OVERFLOW = enum_libusb_transfer_status.define('LIBUSB_TRANSFER_OVERFLOW', 6)
class enum_libusb_transfer_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_TRANSFER_SHORT_NOT_OK = enum_libusb_transfer_flags.define('LIBUSB_TRANSFER_SHORT_NOT_OK', 1)
LIBUSB_TRANSFER_FREE_BUFFER = enum_libusb_transfer_flags.define('LIBUSB_TRANSFER_FREE_BUFFER', 2)
LIBUSB_TRANSFER_FREE_TRANSFER = enum_libusb_transfer_flags.define('LIBUSB_TRANSFER_FREE_TRANSFER', 4)
LIBUSB_TRANSFER_ADD_ZERO_PACKET = enum_libusb_transfer_flags.define('LIBUSB_TRANSFER_ADD_ZERO_PACKET', 8)
@c.record
class struct_libusb_iso_packet_descriptor(c.Struct):
SIZE = 12
length: Annotated[Annotated[int, ctypes.c_uint32], 0]
actual_length: Annotated[Annotated[int, ctypes.c_uint32], 4]
status: Annotated[enum_libusb_transfer_status, 8]
@c.record
class struct_libusb_transfer(c.Struct):
SIZE = 64
dev_handle: Annotated[c.POINTER[libusb_device_handle], 0]
flags: Annotated[uint8_t, 8]
endpoint: Annotated[Annotated[int, ctypes.c_ubyte], 9]
type: Annotated[Annotated[int, ctypes.c_ubyte], 10]
timeout: Annotated[Annotated[int, ctypes.c_uint32], 12]
status: Annotated[enum_libusb_transfer_status, 16]
length: Annotated[Annotated[int, ctypes.c_int32], 20]
actual_length: Annotated[Annotated[int, ctypes.c_int32], 24]
callback: Annotated[libusb_transfer_cb_fn, 32]
user_data: Annotated[ctypes.c_void_p, 40]
buffer: Annotated[c.POINTER[Annotated[int, ctypes.c_ubyte]], 48]
num_iso_packets: Annotated[Annotated[int, ctypes.c_int32], 56]
iso_packet_desc: Annotated[c.Array[struct_libusb_iso_packet_descriptor, Literal[0]], 60]
libusb_transfer_cb_fn: TypeAlias = c.CFUNCTYPE[None, [c.POINTER[struct_libusb_transfer]]]
class enum_libusb_capability(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_CAP_HAS_CAPABILITY = enum_libusb_capability.define('LIBUSB_CAP_HAS_CAPABILITY', 0)
LIBUSB_CAP_HAS_HOTPLUG = enum_libusb_capability.define('LIBUSB_CAP_HAS_HOTPLUG', 1)
LIBUSB_CAP_HAS_HID_ACCESS = enum_libusb_capability.define('LIBUSB_CAP_HAS_HID_ACCESS', 256)
LIBUSB_CAP_SUPPORTS_DETACH_KERNEL_DRIVER = enum_libusb_capability.define('LIBUSB_CAP_SUPPORTS_DETACH_KERNEL_DRIVER', 257)
class enum_libusb_log_level(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_LOG_LEVEL_NONE = enum_libusb_log_level.define('LIBUSB_LOG_LEVEL_NONE', 0)
LIBUSB_LOG_LEVEL_ERROR = enum_libusb_log_level.define('LIBUSB_LOG_LEVEL_ERROR', 1)
LIBUSB_LOG_LEVEL_WARNING = enum_libusb_log_level.define('LIBUSB_LOG_LEVEL_WARNING', 2)
LIBUSB_LOG_LEVEL_INFO = enum_libusb_log_level.define('LIBUSB_LOG_LEVEL_INFO', 3)
LIBUSB_LOG_LEVEL_DEBUG = enum_libusb_log_level.define('LIBUSB_LOG_LEVEL_DEBUG', 4)
class enum_libusb_log_cb_mode(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_LOG_CB_GLOBAL = enum_libusb_log_cb_mode.define('LIBUSB_LOG_CB_GLOBAL', 1)
LIBUSB_LOG_CB_CONTEXT = enum_libusb_log_cb_mode.define('LIBUSB_LOG_CB_CONTEXT', 2)
class enum_libusb_option(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_OPTION_LOG_LEVEL = enum_libusb_option.define('LIBUSB_OPTION_LOG_LEVEL', 0)
LIBUSB_OPTION_USE_USBDK = enum_libusb_option.define('LIBUSB_OPTION_USE_USBDK', 1)
LIBUSB_OPTION_NO_DEVICE_DISCOVERY = enum_libusb_option.define('LIBUSB_OPTION_NO_DEVICE_DISCOVERY', 2)
LIBUSB_OPTION_LOG_CB = enum_libusb_option.define('LIBUSB_OPTION_LOG_CB', 3)
LIBUSB_OPTION_MAX = enum_libusb_option.define('LIBUSB_OPTION_MAX', 4)
libusb_log_cb: TypeAlias = c.CFUNCTYPE[None, [c.POINTER[struct_libusb_context], enum_libusb_log_level, c.POINTER[Annotated[bytes, ctypes.c_char]]]]
@c.record
class struct_libusb_init_option(c.Struct):
SIZE = 16
option: Annotated[enum_libusb_option, 0]
value: Annotated[struct_libusb_init_option_value, 8]
@c.record
class struct_libusb_init_option_value(c.Struct):
SIZE = 8
ival: Annotated[Annotated[int, ctypes.c_int32], 0]
log_cbval: Annotated[libusb_log_cb, 0]
@dll.bind
def libusb_init(ctx:c.POINTER[c.POINTER[libusb_context]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_init_context(ctx:c.POINTER[c.POINTER[libusb_context]], options:c.Array[struct_libusb_init_option, Literal[0]], num_options:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_exit(ctx:c.POINTER[libusb_context]) -> None: ...
@dll.bind
def libusb_set_debug(ctx:c.POINTER[libusb_context], level:Annotated[int, ctypes.c_int32]) -> None: ...
@dll.bind
def libusb_set_log_cb(ctx:c.POINTER[libusb_context], cb:libusb_log_cb, mode:Annotated[int, ctypes.c_int32]) -> None: ...
@dll.bind
def libusb_get_version() -> c.POINTER[struct_libusb_version]: ...
@dll.bind
def libusb_has_capability(capability:uint32_t) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_error_name(errcode:Annotated[int, ctypes.c_int32]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def libusb_setlocale(locale:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_strerror(errcode:Annotated[int, ctypes.c_int32]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
ssize_t: TypeAlias = Annotated[int, ctypes.c_int64]
@dll.bind
def libusb_get_device_list(ctx:c.POINTER[libusb_context], list:c.POINTER[c.POINTER[c.POINTER[libusb_device]]]) -> ssize_t: ...
@dll.bind
def libusb_free_device_list(list:c.POINTER[c.POINTER[libusb_device]], unref_devices:Annotated[int, ctypes.c_int32]) -> None: ...
@dll.bind
def libusb_ref_device(dev:c.POINTER[libusb_device]) -> c.POINTER[libusb_device]: ...
@dll.bind
def libusb_unref_device(dev:c.POINTER[libusb_device]) -> None: ...
@dll.bind
def libusb_get_configuration(dev:c.POINTER[libusb_device_handle], config:c.POINTER[Annotated[int, ctypes.c_int32]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_device_descriptor(dev:c.POINTER[libusb_device], desc:c.POINTER[struct_libusb_device_descriptor]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_active_config_descriptor(dev:c.POINTER[libusb_device], config:c.POINTER[c.POINTER[struct_libusb_config_descriptor]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_config_descriptor(dev:c.POINTER[libusb_device], config_index:uint8_t, config:c.POINTER[c.POINTER[struct_libusb_config_descriptor]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_config_descriptor_by_value(dev:c.POINTER[libusb_device], bConfigurationValue:uint8_t, config:c.POINTER[c.POINTER[struct_libusb_config_descriptor]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_free_config_descriptor(config:c.POINTER[struct_libusb_config_descriptor]) -> None: ...
@dll.bind
def libusb_get_ss_endpoint_companion_descriptor(ctx:c.POINTER[libusb_context], endpoint:c.POINTER[struct_libusb_endpoint_descriptor], ep_comp:c.POINTER[c.POINTER[struct_libusb_ss_endpoint_companion_descriptor]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_free_ss_endpoint_companion_descriptor(ep_comp:c.POINTER[struct_libusb_ss_endpoint_companion_descriptor]) -> None: ...
@dll.bind
def libusb_get_bos_descriptor(dev_handle:c.POINTER[libusb_device_handle], bos:c.POINTER[c.POINTER[struct_libusb_bos_descriptor]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_free_bos_descriptor(bos:c.POINTER[struct_libusb_bos_descriptor]) -> None: ...
@dll.bind
def libusb_get_usb_2_0_extension_descriptor(ctx:c.POINTER[libusb_context], dev_cap:c.POINTER[struct_libusb_bos_dev_capability_descriptor], usb_2_0_extension:c.POINTER[c.POINTER[struct_libusb_usb_2_0_extension_descriptor]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_free_usb_2_0_extension_descriptor(usb_2_0_extension:c.POINTER[struct_libusb_usb_2_0_extension_descriptor]) -> None: ...
@dll.bind
def libusb_get_ss_usb_device_capability_descriptor(ctx:c.POINTER[libusb_context], dev_cap:c.POINTER[struct_libusb_bos_dev_capability_descriptor], ss_usb_device_cap:c.POINTER[c.POINTER[struct_libusb_ss_usb_device_capability_descriptor]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_free_ss_usb_device_capability_descriptor(ss_usb_device_cap:c.POINTER[struct_libusb_ss_usb_device_capability_descriptor]) -> None: ...
@dll.bind
def libusb_get_container_id_descriptor(ctx:c.POINTER[libusb_context], dev_cap:c.POINTER[struct_libusb_bos_dev_capability_descriptor], container_id:c.POINTER[c.POINTER[struct_libusb_container_id_descriptor]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_free_container_id_descriptor(container_id:c.POINTER[struct_libusb_container_id_descriptor]) -> None: ...
@dll.bind
def libusb_get_platform_descriptor(ctx:c.POINTER[libusb_context], dev_cap:c.POINTER[struct_libusb_bos_dev_capability_descriptor], platform_descriptor:c.POINTER[c.POINTER[struct_libusb_platform_descriptor]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_free_platform_descriptor(platform_descriptor:c.POINTER[struct_libusb_platform_descriptor]) -> None: ...
@dll.bind
def libusb_get_bus_number(dev:c.POINTER[libusb_device]) -> uint8_t: ...
@dll.bind
def libusb_get_port_number(dev:c.POINTER[libusb_device]) -> uint8_t: ...
@dll.bind
def libusb_get_port_numbers(dev:c.POINTER[libusb_device], port_numbers:c.POINTER[uint8_t], port_numbers_len:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_port_path(ctx:c.POINTER[libusb_context], dev:c.POINTER[libusb_device], path:c.POINTER[uint8_t], path_length:uint8_t) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_parent(dev:c.POINTER[libusb_device]) -> c.POINTER[libusb_device]: ...
@dll.bind
def libusb_get_device_address(dev:c.POINTER[libusb_device]) -> uint8_t: ...
@dll.bind
def libusb_get_device_speed(dev:c.POINTER[libusb_device]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_max_packet_size(dev:c.POINTER[libusb_device], endpoint:Annotated[int, ctypes.c_ubyte]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_max_iso_packet_size(dev:c.POINTER[libusb_device], endpoint:Annotated[int, ctypes.c_ubyte]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_max_alt_packet_size(dev:c.POINTER[libusb_device], interface_number:Annotated[int, ctypes.c_int32], alternate_setting:Annotated[int, ctypes.c_int32], endpoint:Annotated[int, ctypes.c_ubyte]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_interface_association_descriptors(dev:c.POINTER[libusb_device], config_index:uint8_t, iad_array:c.POINTER[c.POINTER[struct_libusb_interface_association_descriptor_array]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_active_interface_association_descriptors(dev:c.POINTER[libusb_device], iad_array:c.POINTER[c.POINTER[struct_libusb_interface_association_descriptor_array]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_free_interface_association_descriptors(iad_array:c.POINTER[struct_libusb_interface_association_descriptor_array]) -> None: ...
intptr_t: TypeAlias = Annotated[int, ctypes.c_int64]
@dll.bind
def libusb_wrap_sys_device(ctx:c.POINTER[libusb_context], sys_dev:intptr_t, dev_handle:c.POINTER[c.POINTER[libusb_device_handle]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_open(dev:c.POINTER[libusb_device], dev_handle:c.POINTER[c.POINTER[libusb_device_handle]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_close(dev_handle:c.POINTER[libusb_device_handle]) -> None: ...
@dll.bind
def libusb_get_device(dev_handle:c.POINTER[libusb_device_handle]) -> c.POINTER[libusb_device]: ...
@dll.bind
def libusb_set_configuration(dev_handle:c.POINTER[libusb_device_handle], configuration:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_claim_interface(dev_handle:c.POINTER[libusb_device_handle], interface_number:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_release_interface(dev_handle:c.POINTER[libusb_device_handle], interface_number:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_open_device_with_vid_pid(ctx:c.POINTER[libusb_context], vendor_id:uint16_t, product_id:uint16_t) -> c.POINTER[libusb_device_handle]: ...
@dll.bind
def libusb_set_interface_alt_setting(dev_handle:c.POINTER[libusb_device_handle], interface_number:Annotated[int, ctypes.c_int32], alternate_setting:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_clear_halt(dev_handle:c.POINTER[libusb_device_handle], endpoint:Annotated[int, ctypes.c_ubyte]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_reset_device(dev_handle:c.POINTER[libusb_device_handle]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_alloc_streams(dev_handle:c.POINTER[libusb_device_handle], num_streams:uint32_t, endpoints:c.POINTER[Annotated[int, ctypes.c_ubyte]], num_endpoints:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_free_streams(dev_handle:c.POINTER[libusb_device_handle], endpoints:c.POINTER[Annotated[int, ctypes.c_ubyte]], num_endpoints:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def libusb_dev_mem_alloc(dev_handle:c.POINTER[libusb_device_handle], length:size_t) -> c.POINTER[Annotated[int, ctypes.c_ubyte]]: ...
@dll.bind
def libusb_dev_mem_free(dev_handle:c.POINTER[libusb_device_handle], buffer:c.POINTER[Annotated[int, ctypes.c_ubyte]], length:size_t) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_kernel_driver_active(dev_handle:c.POINTER[libusb_device_handle], interface_number:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_detach_kernel_driver(dev_handle:c.POINTER[libusb_device_handle], interface_number:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_attach_kernel_driver(dev_handle:c.POINTER[libusb_device_handle], interface_number:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_set_auto_detach_kernel_driver(dev_handle:c.POINTER[libusb_device_handle], enable:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_alloc_transfer(iso_packets:Annotated[int, ctypes.c_int32]) -> c.POINTER[struct_libusb_transfer]: ...
@dll.bind
def libusb_submit_transfer(transfer:c.POINTER[struct_libusb_transfer]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_cancel_transfer(transfer:c.POINTER[struct_libusb_transfer]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_free_transfer(transfer:c.POINTER[struct_libusb_transfer]) -> None: ...
@dll.bind
def libusb_transfer_set_stream_id(transfer:c.POINTER[struct_libusb_transfer], stream_id:uint32_t) -> None: ...
@dll.bind
def libusb_transfer_get_stream_id(transfer:c.POINTER[struct_libusb_transfer]) -> uint32_t: ...
@dll.bind
def libusb_control_transfer(dev_handle:c.POINTER[libusb_device_handle], request_type:uint8_t, bRequest:uint8_t, wValue:uint16_t, wIndex:uint16_t, data:c.POINTER[Annotated[int, ctypes.c_ubyte]], wLength:uint16_t, timeout:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_bulk_transfer(dev_handle:c.POINTER[libusb_device_handle], endpoint:Annotated[int, ctypes.c_ubyte], data:c.POINTER[Annotated[int, ctypes.c_ubyte]], length:Annotated[int, ctypes.c_int32], actual_length:c.POINTER[Annotated[int, ctypes.c_int32]], timeout:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_interrupt_transfer(dev_handle:c.POINTER[libusb_device_handle], endpoint:Annotated[int, ctypes.c_ubyte], data:c.POINTER[Annotated[int, ctypes.c_ubyte]], length:Annotated[int, ctypes.c_int32], actual_length:c.POINTER[Annotated[int, ctypes.c_int32]], timeout:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_string_descriptor_ascii(dev_handle:c.POINTER[libusb_device_handle], desc_index:uint8_t, data:c.POINTER[Annotated[int, ctypes.c_ubyte]], length:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_try_lock_events(ctx:c.POINTER[libusb_context]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_lock_events(ctx:c.POINTER[libusb_context]) -> None: ...
@dll.bind
def libusb_unlock_events(ctx:c.POINTER[libusb_context]) -> None: ...
@dll.bind
def libusb_event_handling_ok(ctx:c.POINTER[libusb_context]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_event_handler_active(ctx:c.POINTER[libusb_context]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_interrupt_event_handler(ctx:c.POINTER[libusb_context]) -> None: ...
@dll.bind
def libusb_lock_event_waiters(ctx:c.POINTER[libusb_context]) -> None: ...
@dll.bind
def libusb_unlock_event_waiters(ctx:c.POINTER[libusb_context]) -> None: ...
@c.record
class struct_timeval(c.Struct):
SIZE = 16
tv_sec: Annotated[Annotated[int, ctypes.c_int64], 0]
tv_usec: Annotated[Annotated[int, ctypes.c_int64], 8]
__time_t: TypeAlias = Annotated[int, ctypes.c_int64]
__suseconds_t: TypeAlias = Annotated[int, ctypes.c_int64]
@dll.bind
def libusb_wait_for_event(ctx:c.POINTER[libusb_context], tv:c.POINTER[struct_timeval]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_handle_events_timeout(ctx:c.POINTER[libusb_context], tv:c.POINTER[struct_timeval]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_handle_events_timeout_completed(ctx:c.POINTER[libusb_context], tv:c.POINTER[struct_timeval], completed:c.POINTER[Annotated[int, ctypes.c_int32]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_handle_events(ctx:c.POINTER[libusb_context]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_handle_events_completed(ctx:c.POINTER[libusb_context], completed:c.POINTER[Annotated[int, ctypes.c_int32]]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_handle_events_locked(ctx:c.POINTER[libusb_context], tv:c.POINTER[struct_timeval]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_pollfds_handle_timeouts(ctx:c.POINTER[libusb_context]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_get_next_timeout(ctx:c.POINTER[libusb_context], tv:c.POINTER[struct_timeval]) -> Annotated[int, ctypes.c_int32]: ...
@c.record
class struct_libusb_pollfd(c.Struct):
SIZE = 8
fd: Annotated[Annotated[int, ctypes.c_int32], 0]
events: Annotated[Annotated[int, ctypes.c_int16], 4]
libusb_pollfd_added_cb: TypeAlias = c.CFUNCTYPE[None, [Annotated[int, ctypes.c_int32], Annotated[int, ctypes.c_int16], ctypes.c_void_p]]
libusb_pollfd_removed_cb: TypeAlias = c.CFUNCTYPE[None, [Annotated[int, ctypes.c_int32], ctypes.c_void_p]]
@dll.bind
def libusb_get_pollfds(ctx:c.POINTER[libusb_context]) -> c.POINTER[c.POINTER[struct_libusb_pollfd]]: ...
@dll.bind
def libusb_free_pollfds(pollfds:c.POINTER[c.POINTER[struct_libusb_pollfd]]) -> None: ...
@dll.bind
def libusb_set_pollfd_notifiers(ctx:c.POINTER[libusb_context], added_cb:libusb_pollfd_added_cb, removed_cb:libusb_pollfd_removed_cb, user_data:ctypes.c_void_p) -> None: ...
libusb_hotplug_callback_handle: TypeAlias = Annotated[int, ctypes.c_int32]
class libusb_hotplug_event(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED = libusb_hotplug_event.define('LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED', 1)
LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT = libusb_hotplug_event.define('LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT', 2)
class libusb_hotplug_flag(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBUSB_HOTPLUG_ENUMERATE = libusb_hotplug_flag.define('LIBUSB_HOTPLUG_ENUMERATE', 1)
libusb_hotplug_callback_fn: TypeAlias = c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [c.POINTER[struct_libusb_context], c.POINTER[struct_libusb_device], libusb_hotplug_event, ctypes.c_void_p]]
@dll.bind
def libusb_hotplug_register_callback(ctx:c.POINTER[libusb_context], events:Annotated[int, ctypes.c_int32], flags:Annotated[int, ctypes.c_int32], vendor_id:Annotated[int, ctypes.c_int32], product_id:Annotated[int, ctypes.c_int32], dev_class:Annotated[int, ctypes.c_int32], cb_fn:libusb_hotplug_callback_fn, user_data:ctypes.c_void_p, callback_handle:c.POINTER[libusb_hotplug_callback_handle]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def libusb_hotplug_deregister_callback(ctx:c.POINTER[libusb_context], callback_handle:libusb_hotplug_callback_handle) -> None: ...
@dll.bind
def libusb_hotplug_get_user_data(ctx:c.POINTER[libusb_context], callback_handle:libusb_hotplug_callback_handle) -> ctypes.c_void_p: ...
@dll.bind
def libusb_set_option(ctx:c.POINTER[libusb_context], option:enum_libusb_option) -> Annotated[int, ctypes.c_int32]: ...
c.init_records()
LIBUSB_DEPRECATED_FOR = lambda f: __attribute__ ((deprecated)) # type: ignore
LIBUSB_API_VERSION = 0x0100010A # type: ignore
LIBUSBX_API_VERSION = LIBUSB_API_VERSION # type: ignore
LIBUSB_DT_DEVICE_SIZE = 18 # type: ignore
LIBUSB_DT_CONFIG_SIZE = 9 # type: ignore
LIBUSB_DT_INTERFACE_SIZE = 9 # type: ignore
LIBUSB_DT_ENDPOINT_SIZE = 7 # type: ignore
LIBUSB_DT_ENDPOINT_AUDIO_SIZE = 9 # type: ignore
LIBUSB_DT_HUB_NONVAR_SIZE = 7 # type: ignore
LIBUSB_DT_SS_ENDPOINT_COMPANION_SIZE = 6 # type: ignore
LIBUSB_DT_BOS_SIZE = 5 # type: ignore
LIBUSB_DT_DEVICE_CAPABILITY_SIZE = 3 # type: ignore
LIBUSB_BT_USB_2_0_EXTENSION_SIZE = 7 # type: ignore
LIBUSB_BT_SS_USB_DEVICE_CAPABILITY_SIZE = 10 # type: ignore
LIBUSB_BT_CONTAINER_ID_SIZE = 20 # type: ignore
LIBUSB_BT_PLATFORM_DESCRIPTOR_MIN_SIZE = 20 # type: ignore
LIBUSB_DT_BOS_MAX_SIZE = (LIBUSB_DT_BOS_SIZE + LIBUSB_BT_USB_2_0_EXTENSION_SIZE + LIBUSB_BT_SS_USB_DEVICE_CAPABILITY_SIZE + LIBUSB_BT_CONTAINER_ID_SIZE) # type: ignore
LIBUSB_ENDPOINT_ADDRESS_MASK = 0x0f # type: ignore
LIBUSB_ENDPOINT_DIR_MASK = 0x80 # type: ignore
LIBUSB_TRANSFER_TYPE_MASK = 0x03 # type: ignore
LIBUSB_ISO_SYNC_TYPE_MASK = 0x0c # type: ignore
LIBUSB_ISO_USAGE_TYPE_MASK = 0x30 # type: ignore
LIBUSB_ERROR_COUNT = 14 # type: ignore
LIBUSB_OPTION_WEAK_AUTHORITY = LIBUSB_OPTION_NO_DEVICE_DISCOVERY # type: ignore
LIBUSB_HOTPLUG_NO_FLAGS = 0 # type: ignore
LIBUSB_HOTPLUG_MATCH_ANY = -1 # type: ignore | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/libusb.py",
"license": "MIT License",
"lines": 603,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:tinygrad/runtime/support/usb.py | import ctypes, struct, dataclasses, array, itertools
from typing import Sequence
from tinygrad.runtime.autogen import libusb
from tinygrad.helpers import DEBUG, to_mv, round_up, OSX, getenv
from tinygrad.runtime.support.hcq import MMIOInterface
class USB3:
def __init__(self, vendor:int, dev:int, ep_data_in:int, ep_stat_in:int, ep_data_out:int, ep_cmd_out:int, max_streams:int=31, use_bot=False):
self.vendor, self.dev = vendor, dev
self.ep_data_in, self.ep_stat_in, self.ep_data_out, self.ep_cmd_out = ep_data_in, ep_stat_in, ep_data_out, ep_cmd_out
self.max_streams, self.use_bot = max_streams, use_bot
self.ctx = ctypes.POINTER(libusb.struct_libusb_context)()
if libusb.libusb_init(ctypes.byref(self.ctx)): raise RuntimeError("libusb_init failed")
if DEBUG >= 6: libusb.libusb_set_option(self.ctx, libusb.LIBUSB_OPTION_LOG_LEVEL, 4)
self.handle = libusb.libusb_open_device_with_vid_pid(self.ctx, self.vendor, self.dev)
if not self.handle: raise RuntimeError(f"device {self.vendor:04x}:{self.dev:04x} not found. sudo required?")
# Detach kernel driver if needed
if libusb.libusb_kernel_driver_active(self.handle, 0):
libusb.libusb_detach_kernel_driver(self.handle, 0)
libusb.libusb_reset_device(self.handle)
# Set configuration and claim interface
if libusb.libusb_set_configuration(self.handle, 1): raise RuntimeError("set_configuration failed")
if libusb.libusb_claim_interface(self.handle, 0): raise RuntimeError("claim_interface failed. sudo required?")
if use_bot:
self._tag = 0
else:
if libusb.libusb_set_interface_alt_setting(self.handle, 0, 1): raise RuntimeError("alt_setting failed")
# Clear any stalled endpoints
all_eps = (self.ep_data_out, self.ep_data_in, self.ep_stat_in, self.ep_cmd_out)
for ep in all_eps: libusb.libusb_clear_halt(self.handle, ep)
# Allocate streams
stream_eps = (ctypes.c_uint8 * 3)(self.ep_data_out, self.ep_data_in, self.ep_stat_in)
if (rc:=libusb.libusb_alloc_streams(self.handle, self.max_streams * len(stream_eps), stream_eps, len(stream_eps))) < 0:
raise RuntimeError(f"alloc_streams failed: {rc}")
# Base cmd
cmd_template = bytes([0x01, 0x00, 0x00, 0x01, *([0] * 12), 0xE4, 0x24, 0x00, 0xB2, 0x1A, 0x00, 0x00, 0x00, *([0] * 8)])
# Init pools
self.tr = {ep: [libusb.libusb_alloc_transfer(0) for _ in range(self.max_streams)] for ep in all_eps}
self.buf_cmd = [(ctypes.c_uint8 * len(cmd_template))(*cmd_template) for _ in range(self.max_streams)]
self.buf_stat = [(ctypes.c_uint8 * 64)() for _ in range(self.max_streams)]
self.buf_data_in = [(ctypes.c_uint8 * 0x1000)() for _ in range(self.max_streams)]
self.buf_data_out = [(ctypes.c_uint8 * 0x80000)() for _ in range(self.max_streams)]
self.buf_data_out_mvs = [to_mv(ctypes.addressof(self.buf_data_out[i]), 0x80000) for i in range(self.max_streams)]
for slot in range(self.max_streams): struct.pack_into(">B", self.buf_cmd[slot], 3, slot + 1)
def _prep_transfer(self, tr, ep, stream_id, buf, length):
tr.contents.dev_handle, tr.contents.endpoint, tr.contents.length, tr.contents.buffer = self.handle, ep, length, buf
tr.contents.status, tr.contents.flags, tr.contents.timeout, tr.contents.num_iso_packets = 0xff, 0, 1000, 0
tr.contents.type = (libusb.LIBUSB_TRANSFER_TYPE_BULK_STREAM if stream_id is not None else libusb.LIBUSB_TRANSFER_TYPE_BULK)
if stream_id is not None: libusb.libusb_transfer_set_stream_id(tr, stream_id)
return tr
def _submit_and_wait(self, cmds):
for tr in cmds: libusb.libusb_submit_transfer(tr)
running = len(cmds)
while running:
libusb.libusb_handle_events(self.ctx)
running = len(cmds)
for tr in cmds:
if tr.contents.status == libusb.LIBUSB_TRANSFER_COMPLETED: running -= 1
elif tr.contents.status != 0xFF: raise RuntimeError(f"EP 0x{tr.contents.endpoint:02X} error: {tr.contents.status}")
def _bulk_out(self, ep: int, payload: bytes, timeout: int = 1000):
transferred = ctypes.c_int(0)
rc = libusb.libusb_bulk_transfer(
self.handle,
ep,
(ctypes.c_ubyte * len(payload))(*payload),
len(payload),
ctypes.byref(transferred),
timeout,
)
assert rc == 0, f"bulk OUT 0x{ep:02X} failed: {rc}"
assert transferred.value == len(payload), f"bulk OUT short write on 0x{ep:02X}: {transferred.value}/{len(payload)} bytes"
def _bulk_in(self, ep: int, length: int, timeout: int = 1000) -> bytes:
buf, transferred = (ctypes.c_ubyte * length)(), ctypes.c_int(0)
rc = libusb.libusb_bulk_transfer(
self.handle,
ep,
buf,
length,
ctypes.byref(transferred),
timeout,
)
assert rc == 0, f"bulk IN 0x{ep:02X} failed: {rc}"
return bytes(buf[:transferred.value])
def send_batch(self, cdbs:list[bytes], idata:list[int]|None=None, odata:list[bytes|None]|None=None) -> list[bytes|None]:
idata, odata = idata or [0] * len(cdbs), odata or [None] * len(cdbs)
results:list[bytes|None] = []
tr_window, op_window = [], []
for idx, (cdb, rlen, send_data) in enumerate(zip(cdbs, idata, odata)):
if self.use_bot:
dir_in = rlen > 0
data_len = rlen if dir_in else (len(send_data) if send_data is not None else 0)
assert (data_len == 0) if dir_in else (rlen == 0), "BOT mode only supports either read or write per command"
# CBW
self._tag += 1
flags = 0x80 if dir_in else 0x00
cbw = struct.pack("<IIIBBB", 0x43425355, self._tag, data_len, flags, 0, len(cdb)) + cdb + b"\x00" * (16 - len(cdb))
self._bulk_out(self.ep_data_out, cbw)
# DAT
if dir_in:
results.append(self._bulk_in(self.ep_data_in, rlen))
else:
if send_data is not None:
self._bulk_out(self.ep_data_out, send_data)
results.append(None)
# CSW
sig, rtag, residue, status = struct.unpack("<IIIB", self._bulk_in(self.ep_data_in, 13, timeout=2000))
assert sig == 0x53425355, f"Bad CSW signature 0x{sig:08X}, expected 0x53425355"
assert rtag == self._tag, f"CSW tag mismatch: got {rtag}, expected {self._tag}"
assert status == 0, f"SCSI command failed, CSW status=0x{status:02X}, residue={residue}"
else:
# allocate slot and stream. stream is 1-based
slot, stream = idx % self.max_streams, (idx % self.max_streams) + 1
# build cmd packet
self.buf_cmd[slot][16:16+len(cdb)] = list(cdb)
# cmd + stat transfers
tr_window.append(self._prep_transfer(self.tr[self.ep_cmd_out][slot], self.ep_cmd_out, None, self.buf_cmd[slot], len(self.buf_cmd[slot])))
tr_window.append(self._prep_transfer(self.tr[self.ep_stat_in][slot], self.ep_stat_in, stream, self.buf_stat[slot], 64))
if rlen:
if rlen > len(self.buf_data_in[slot]): self.buf_data_in[slot] = (ctypes.c_uint8 * round_up(rlen, 0x1000))()
tr_window.append(self._prep_transfer(self.tr[self.ep_data_in][slot], self.ep_data_in, stream, self.buf_data_in[slot], rlen))
if send_data is not None:
if len(send_data) > len(self.buf_data_out[slot]):
self.buf_data_out[slot] = (ctypes.c_uint8 * len(send_data))()
self.buf_data_out_mvs[slot] = to_mv(ctypes.addressof(self.buf_data_out[slot]), len(send_data))
self.buf_data_out_mvs[slot][:len(send_data)] = bytes(send_data)
tr_window.append(self._prep_transfer(self.tr[self.ep_data_out][slot], self.ep_data_out, stream, self.buf_data_out[slot], len(send_data)))
op_window.append((idx, slot, rlen))
if (idx + 1 == len(cdbs)) or len(op_window) >= self.max_streams:
self._submit_and_wait(tr_window)
for idx, slot, rlen in op_window: results.append(bytes(self.buf_data_in[slot][:rlen]) if rlen else None)
tr_window = []
return results
@dataclasses.dataclass(frozen=True)
class WriteOp: addr:int; data:bytes; ignore_cache:bool=True # noqa: E702
@dataclasses.dataclass(frozen=True)
class ReadOp: addr:int; size:int # noqa: E702
@dataclasses.dataclass(frozen=True)
class ScsiWriteOp: data:bytes; lba:int=0 # noqa: E702
class ASM24Controller:
def __init__(self):
self.usb = USB3(0xADD1, 0x0001, 0x81, 0x83, 0x02, 0x04)
self._cache: dict[int, int|None] = {}
self._pci_cacheable: list[tuple[int, int]] = []
self._pci_cache: dict[int, int|None] = {}
# Init controller.
self.exec_ops([WriteOp(0x54b, b' '), WriteOp(0x54e, b'\x04'), WriteOp(0x5a8, b'\x02'), WriteOp(0x5f8, b'\x04'),
WriteOp(0x7ec, b'\x01\x00\x00\x00'), WriteOp(0xc422, b'\x02'), WriteOp(0x0, b'\x33')])
def exec_ops(self, ops:Sequence[WriteOp|ReadOp|ScsiWriteOp]):
cdbs:list[bytes] = []
idata:list[int] = []
odata:list[bytes|None] = []
def _add_req(cdb:bytes, i:int, o:bytes|None):
nonlocal cdbs, idata, odata
cdbs, idata, odata = cdbs + [cdb], idata + [i], odata + [o]
for op in ops:
if isinstance(op, WriteOp):
for off, value in enumerate(op.data):
addr = ((op.addr + off) & 0x1FFFF) | 0x500000
if not op.ignore_cache and self._cache.get(addr) == value: continue
_add_req(struct.pack('>BBBHB', 0xE5, value, addr >> 16, addr & 0xFFFF, 0), 0, None)
self._cache[addr] = value
elif isinstance(op, ReadOp):
assert op.size <= 0xff
addr = (op.addr & 0x1FFFF) | 0x500000
_add_req(struct.pack('>BBBHB', 0xE4, op.size, addr >> 16, addr & 0xFFFF, 0), op.size, None)
for i in range(op.size): self._cache[addr + i] = None
elif isinstance(op, ScsiWriteOp):
sectors = round_up(len(op.data), 512) // 512
_add_req(struct.pack('>BBQIBB', 0x8A, 0, op.lba, sectors, 0, 0), 0, op.data+b'\x00'*((sectors*512)-len(op.data)))
return self.usb.send_batch(cdbs, idata, odata)
def write(self, base_addr:int, data:bytes, ignore_cache:bool=True): return self.exec_ops([WriteOp(base_addr, data, ignore_cache)])
def scsi_write(self, buf:bytes, lba:int=0):
if len(buf) > 0x4000: buf += b'\x00' * (round_up(len(buf), 0x10000) - len(buf))
for i in range(0, len(buf), 0x10000):
self.exec_ops([ScsiWriteOp(buf[i:i+0x10000], lba), WriteOp(0x171, b'\xff\xff\xff', ignore_cache=True)])
self.exec_ops([WriteOp(0xce6e, b'\x00\x00', ignore_cache=True)])
if len(buf) > 0x4000:
for i in range(4): self.exec_ops([WriteOp(0xce40 + i, b'\x00', ignore_cache=True)])
def read(self, base_addr:int, length:int, stride:int=0xff) -> bytes:
parts = self.exec_ops([ReadOp(base_addr + off, min(stride, length - off)) for off in range(0, length, stride)])
return b''.join(p or b'' for p in parts)[:length]
def _is_pci_cacheable(self, addr:int) -> bool: return any(x <= addr <= x + sz for x, sz in self._pci_cacheable)
def pcie_prep_request(self, fmt_type:int, address:int, value:int|None=None, size:int=4) -> list[WriteOp]:
if fmt_type == 0x60 and size == 4 and self._is_pci_cacheable(address) and self._pci_cache.get(address) == value: return []
assert fmt_type >> 8 == 0 and size > 0 and size <= 4, f"Invalid fmt_type {fmt_type} or size {size}"
if DEBUG >= 5: print("pcie_request", hex(fmt_type), hex(address), value, size)
masked_address, offset = address & 0xFFFFFFFC, address & 0x3
assert size + offset <= 4 and (value is None or value >> (8 * size) == 0)
self._pci_cache[address] = value if size == 4 and fmt_type == 0x60 else None
return ([WriteOp(0xB220, struct.pack('>I', value << (8 * offset)), ignore_cache=False)] if value is not None else []) + \
[WriteOp(0xB218, struct.pack('>I', masked_address), ignore_cache=False), WriteOp(0xB21c, struct.pack('>I', address>>32), ignore_cache=False),
WriteOp(0xB217, bytes([((1 << size) - 1) << offset]), ignore_cache=False), WriteOp(0xB210, bytes([fmt_type]), ignore_cache=False),
WriteOp(0xB254, b"\x0f", ignore_cache=True), WriteOp(0xB296, b"\x04", ignore_cache=True)]
def pcie_request(self, fmt_type, address, value=None, size=4, cnt=10):
self.exec_ops(self.pcie_prep_request(fmt_type, address, value, size))
# Fast path for write requests
if ((fmt_type & 0b11011111) == 0b01000000) or ((fmt_type & 0b10111000) == 0b00110000): return
while (stat:=self.read(0xB296, 1)[0]) & 2 == 0:
if stat & 1:
self.write(0xB296, bytes([0x01]))
if cnt > 0: return self.pcie_request(fmt_type, address, value, size, cnt=cnt-1)
assert stat == 2, f"stat read 2 was {stat}"
# Retrieve completion data from Link Status (0xB22A, 0xB22B)
b284 = self.read(0xB284, 1)[0]
completion = struct.unpack('>H', self.read(0xB22A, 2))
# Validate completion status based on PCIe request typ
# Completion TLPs for configuration requests always have a byte count of 4.
assert completion[0] & 0xfff == (4 if (fmt_type & 0xbe == 0x04) else size)
# Extract completion status field
status = (completion[0] >> 13) & 0x7
# Handle completion errors or inconsistencies
if status or ((fmt_type & 0xbe == 0x04) and (((value is None) and (not (b284 & 0x01))) or ((value is not None) and (b284 & 0x01)))):
status_map = {0b001: f"Unsupported Request: invalid address/function (target might not be reachable): {address:#x}",
0b100: "Completer Abort: abort due to internal error", 0b010: "Configuration Request Retry Status: configuration space busy"}
raise RuntimeError(f"TLP status: {status_map.get(status, 'Reserved (0b{:03b})'.format(status))}")
if value is None: return (struct.unpack('>I', self.read(0xB220, 4))[0] >> (8 * (address & 0x3))) & ((1 << (8 * size)) - 1)
def pcie_cfg_req(self, byte_addr, bus=1, dev=0, fn=0, value=None, size=4):
assert byte_addr >> 12 == 0 and bus >> 8 == 0 and dev >> 5 == 0 and fn >> 3 == 0, f"Invalid byte_addr {byte_addr}, bus {bus}, dev {dev}, fn {fn}"
fmt_type = (0x44 if value is not None else 0x4) | int(bus > 0)
address = (bus << 24) | (dev << 19) | (fn << 16) | (byte_addr & 0xfff)
return self.pcie_request(fmt_type, address, value, size)
def pcie_mem_req(self, address, value=None, size=4): return self.pcie_request(0x60 if value is not None else 0x20, address, value, size)
def pcie_mem_write(self, address, values, size):
ops = [self.pcie_prep_request(0x60, address + i * size, value, size) for i, value in enumerate(values)]
# Send in batches of 4 for OSX and 16 for Linux (benchmarked values)
for i in range(0, len(ops), bs:=(4 if OSX else 16)): self.exec_ops(list(itertools.chain.from_iterable(ops[i:i+bs])))
class USBMMIOInterface(MMIOInterface):
def __init__(self, usb, addr, size, fmt, pcimem=True): # pylint: disable=super-init-not-called
self.usb, self.addr, self.nbytes, self.fmt, self.pcimem, self.el_sz = usb, addr, size, fmt, pcimem, struct.calcsize(fmt)
def __getitem__(self, index): return self._access_items(index)
def __setitem__(self, index, val): self._access_items(index, val)
def _access_items(self, index, val=None):
if isinstance(index, slice): return self._acc((index.start or 0) * self.el_sz, ((index.stop or len(self))-(index.start or 0)) * self.el_sz, val)
return self._acc_one(index * self.el_sz, self.el_sz, val) if self.pcimem else self._acc(index * self.el_sz, self.el_sz, val)
def view(self, offset:int=0, size:int|None=None, fmt=None):
return USBMMIOInterface(self.usb, self.addr+offset, size or (self.nbytes - offset), fmt=fmt or self.fmt, pcimem=self.pcimem)
def _acc_size(self, sz): return next(x for x in [('I', 4), ('H', 2), ('B', 1)] if sz % x[1] == 0)
def _acc_one(self, off, sz, val=None):
upper = 0 if sz < 8 else self.usb.pcie_mem_req(self.addr + off + 4, val if val is None else (val >> 32), 4)
lower = self.usb.pcie_mem_req(self.addr + off, val if val is None else val & 0xffffffff, min(sz, 4))
if val is None: return lower | (upper << 32)
def _acc(self, off, sz, data=None):
if data is None: # read op
if not self.pcimem:
return int.from_bytes(self.usb.read(self.addr + off, sz), "little") if sz == self.el_sz else self.usb.read(self.addr + off, sz)
acc, acc_size = self._acc_size(sz)
return bytes(array.array(acc, [self._acc_one(off + i * acc_size, acc_size) for i in range(sz // acc_size)]))
# write op
data = struct.pack(self.fmt, data) if isinstance(data, int) else bytes(data)
if not self.pcimem:
# Fast path for writing into buffer 0xf000
use_cache = 0xa800 <= self.addr <= 0xb000
return self.usb.scsi_write(bytes(data)) if self.addr == 0xf000 else self.usb.write(self.addr + off, bytes(data), ignore_cache=not use_cache)
_, acc_sz = self._acc_size(len(data) * struct.calcsize(self.fmt))
self.usb.pcie_mem_write(self.addr+off, [int.from_bytes(data[i:i+acc_sz], "little") for i in range(0, len(data), acc_sz)], acc_sz)
if getenv("MOCKGPU"): from test.mockgpu.usb import MockUSB3 as USB3 # type: ignore # noqa: F811
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/support/usb.py",
"license": "MIT License",
"lines": 255,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
unclecode/crawl4ai:crawl4ai/cache_validator.py | """
Cache validation using HTTP conditional requests and head fingerprinting.
Uses httpx for fast, lightweight HTTP requests (no browser needed).
This module enables smart cache validation to avoid unnecessary full browser crawls
when content hasn't changed.
Validation Strategy:
1. Send HEAD request with If-None-Match / If-Modified-Since headers
2. If server returns 304 Not Modified β cache is FRESH
3. If server returns 200 β fetch <head> and compare fingerprint
4. If fingerprint matches β cache is FRESH (minor changes only)
5. Otherwise β cache is STALE, need full recrawl
"""
import httpx
from dataclasses import dataclass
from typing import Optional, Tuple
from enum import Enum
from .utils import compute_head_fingerprint
class CacheValidationResult(Enum):
"""Result of cache validation check."""
FRESH = "fresh" # Content unchanged, use cache
STALE = "stale" # Content changed, need recrawl
UNKNOWN = "unknown" # Couldn't determine, need recrawl
ERROR = "error" # Request failed, use cache as fallback
@dataclass
class ValidationResult:
"""Detailed result of a cache validation attempt."""
status: CacheValidationResult
new_etag: Optional[str] = None
new_last_modified: Optional[str] = None
new_head_fingerprint: Optional[str] = None
reason: str = ""
class CacheValidator:
"""
Validates cache freshness using lightweight HTTP requests.
This validator uses httpx to make fast HTTP requests without needing
a full browser. It supports two validation methods:
1. HTTP Conditional Requests (Layer 3):
- Uses If-None-Match with stored ETag
- Uses If-Modified-Since with stored Last-Modified
- Server returns 304 if content unchanged
2. Head Fingerprinting (Layer 4):
- Fetches only the <head> section (~5KB)
- Compares fingerprint of key meta tags
- Catches changes even without server support for conditional requests
"""
def __init__(self, timeout: float = 10.0, user_agent: Optional[str] = None):
"""
Initialize the cache validator.
Args:
timeout: Request timeout in seconds
user_agent: Custom User-Agent string (optional)
"""
self.timeout = timeout
self.user_agent = user_agent or "Mozilla/5.0 (compatible; Crawl4AI/1.0)"
self._client: Optional[httpx.AsyncClient] = None
async def _get_client(self) -> httpx.AsyncClient:
"""Get or create the httpx client."""
if self._client is None:
self._client = httpx.AsyncClient(
http2=True,
timeout=self.timeout,
follow_redirects=True,
headers={"User-Agent": self.user_agent}
)
return self._client
async def validate(
self,
url: str,
stored_etag: Optional[str] = None,
stored_last_modified: Optional[str] = None,
stored_head_fingerprint: Optional[str] = None,
) -> ValidationResult:
"""
Validate if cached content is still fresh.
Args:
url: The URL to validate
stored_etag: Previously stored ETag header value
stored_last_modified: Previously stored Last-Modified header value
stored_head_fingerprint: Previously computed head fingerprint
Returns:
ValidationResult with status and any updated metadata
"""
client = await self._get_client()
# Build conditional request headers
headers = {}
if stored_etag:
headers["If-None-Match"] = stored_etag
if stored_last_modified:
headers["If-Modified-Since"] = stored_last_modified
try:
# Step 1: Try HEAD request with conditional headers
if headers:
response = await client.head(url, headers=headers)
if response.status_code == 304:
return ValidationResult(
status=CacheValidationResult.FRESH,
reason="Server returned 304 Not Modified"
)
# Got 200, extract new headers for potential update
new_etag = response.headers.get("etag")
new_last_modified = response.headers.get("last-modified")
# If we have fingerprint, compare it
if stored_head_fingerprint:
head_html, _, _ = await self._fetch_head(url)
if head_html:
new_fingerprint = compute_head_fingerprint(head_html)
if new_fingerprint and new_fingerprint == stored_head_fingerprint:
return ValidationResult(
status=CacheValidationResult.FRESH,
new_etag=new_etag,
new_last_modified=new_last_modified,
new_head_fingerprint=new_fingerprint,
reason="Head fingerprint matches"
)
elif new_fingerprint:
return ValidationResult(
status=CacheValidationResult.STALE,
new_etag=new_etag,
new_last_modified=new_last_modified,
new_head_fingerprint=new_fingerprint,
reason="Head fingerprint changed"
)
# Headers changed and no fingerprint match
return ValidationResult(
status=CacheValidationResult.STALE,
new_etag=new_etag,
new_last_modified=new_last_modified,
reason="Server returned 200, content may have changed"
)
# Step 2: No conditional headers available, try fingerprint only
if stored_head_fingerprint:
head_html, new_etag, new_last_modified = await self._fetch_head(url)
if head_html:
new_fingerprint = compute_head_fingerprint(head_html)
if new_fingerprint and new_fingerprint == stored_head_fingerprint:
return ValidationResult(
status=CacheValidationResult.FRESH,
new_etag=new_etag,
new_last_modified=new_last_modified,
new_head_fingerprint=new_fingerprint,
reason="Head fingerprint matches"
)
elif new_fingerprint:
return ValidationResult(
status=CacheValidationResult.STALE,
new_etag=new_etag,
new_last_modified=new_last_modified,
new_head_fingerprint=new_fingerprint,
reason="Head fingerprint changed"
)
# Step 3: No validation data available
return ValidationResult(
status=CacheValidationResult.UNKNOWN,
reason="No validation data available (no etag, last-modified, or fingerprint)"
)
except httpx.TimeoutException:
return ValidationResult(
status=CacheValidationResult.ERROR,
reason="Validation request timed out"
)
except httpx.RequestError as e:
return ValidationResult(
status=CacheValidationResult.ERROR,
reason=f"Validation request failed: {type(e).__name__}"
)
except Exception as e:
# On unexpected error, prefer using cache over failing
return ValidationResult(
status=CacheValidationResult.ERROR,
reason=f"Validation error: {str(e)}"
)
async def _fetch_head(self, url: str) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""
Fetch only the <head> section of a page.
Uses streaming to stop reading after </head> is found,
minimizing bandwidth usage.
Args:
url: The URL to fetch
Returns:
Tuple of (head_html, etag, last_modified)
"""
client = await self._get_client()
try:
async with client.stream(
"GET",
url,
headers={"Accept-Encoding": "identity"} # Disable compression for easier parsing
) as response:
etag = response.headers.get("etag")
last_modified = response.headers.get("last-modified")
if response.status_code != 200:
return None, etag, last_modified
# Read until </head> or max 64KB
chunks = []
total_bytes = 0
max_bytes = 65536
async for chunk in response.aiter_bytes(4096):
chunks.append(chunk)
total_bytes += len(chunk)
content = b''.join(chunks)
# Check for </head> (case insensitive)
if b'</head>' in content.lower() or b'</HEAD>' in content:
break
if total_bytes >= max_bytes:
break
html = content.decode('utf-8', errors='replace')
# Extract just the head section
head_end = html.lower().find('</head>')
if head_end != -1:
html = html[:head_end + 7]
return html, etag, last_modified
except Exception:
return None, None, None
async def close(self):
"""Close the HTTP client and release resources."""
if self._client:
await self._client.aclose()
self._client = None
async def __aenter__(self):
"""Async context manager entry."""
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Async context manager exit."""
await self.close()
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "crawl4ai/cache_validator.py",
"license": "Apache License 2.0",
"lines": 224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
unclecode/crawl4ai:deploy/docker/tests/run_security_tests.py | #!/usr/bin/env python3
"""
Security Integration Tests for Crawl4AI Docker API.
Tests that security fixes are working correctly against a running server.
Usage:
python run_security_tests.py [base_url]
Example:
python run_security_tests.py http://localhost:11235
"""
import subprocess
import sys
import re
# Colors for terminal output
GREEN = '\033[0;32m'
RED = '\033[0;31m'
YELLOW = '\033[1;33m'
NC = '\033[0m' # No Color
PASSED = 0
FAILED = 0
def run_curl(args: list) -> str:
"""Run curl command and return output."""
try:
result = subprocess.run(
['curl', '-s'] + args,
capture_output=True,
text=True,
timeout=30
)
return result.stdout + result.stderr
except subprocess.TimeoutExpired:
return "TIMEOUT"
except Exception as e:
return str(e)
def test_expect(name: str, expect_pattern: str, curl_args: list) -> bool:
"""Run a test and check if output matches expected pattern."""
global PASSED, FAILED
result = run_curl(curl_args)
if re.search(expect_pattern, result, re.IGNORECASE):
print(f"{GREEN}β{NC} {name}")
PASSED += 1
return True
else:
print(f"{RED}β{NC} {name}")
print(f" Expected pattern: {expect_pattern}")
print(f" Got: {result[:200]}")
FAILED += 1
return False
def main():
global PASSED, FAILED
base_url = sys.argv[1] if len(sys.argv) > 1 else "http://localhost:11235"
print("=" * 60)
print("Crawl4AI Security Integration Tests")
print(f"Target: {base_url}")
print("=" * 60)
print()
# Check server availability
print("Checking server availability...")
result = run_curl(['-o', '/dev/null', '-w', '%{http_code}', f'{base_url}/health'])
if '200' not in result:
print(f"{RED}ERROR: Server not reachable at {base_url}{NC}")
print("Please start the server first.")
sys.exit(1)
print(f"{GREEN}Server is running{NC}")
print()
# === Part A: Security Tests ===
print("=== Part A: Security Tests ===")
print("(Vulnerabilities must be BLOCKED)")
print()
test_expect(
"A1: Hooks disabled by default (403)",
r"403|disabled|Hooks are disabled",
['-X', 'POST', f'{base_url}/crawl',
'-H', 'Content-Type: application/json',
'-d', '{"urls":["https://example.com"],"hooks":{"code":{"on_page_context_created":"async def hook(page, context, **kwargs): return page"}}}']
)
test_expect(
"A2: file:// blocked on /execute_js (400)",
r"400|must start with",
['-X', 'POST', f'{base_url}/execute_js',
'-H', 'Content-Type: application/json',
'-d', '{"url":"file:///etc/passwd","scripts":["1"]}']
)
test_expect(
"A3: file:// blocked on /screenshot (400)",
r"400|must start with",
['-X', 'POST', f'{base_url}/screenshot',
'-H', 'Content-Type: application/json',
'-d', '{"url":"file:///etc/passwd"}']
)
test_expect(
"A4: file:// blocked on /pdf (400)",
r"400|must start with",
['-X', 'POST', f'{base_url}/pdf',
'-H', 'Content-Type: application/json',
'-d', '{"url":"file:///etc/passwd"}']
)
test_expect(
"A5: file:// blocked on /html (400)",
r"400|must start with",
['-X', 'POST', f'{base_url}/html',
'-H', 'Content-Type: application/json',
'-d', '{"url":"file:///etc/passwd"}']
)
print()
# === Part B: Functionality Tests ===
print("=== Part B: Functionality Tests ===")
print("(Normal operations must WORK)")
print()
test_expect(
"B1: Basic crawl works",
r"success.*true|results",
['-X', 'POST', f'{base_url}/crawl',
'-H', 'Content-Type: application/json',
'-d', '{"urls":["https://example.com"]}']
)
test_expect(
"B2: /md works with https://",
r"success.*true|markdown",
['-X', 'POST', f'{base_url}/md',
'-H', 'Content-Type: application/json',
'-d', '{"url":"https://example.com"}']
)
test_expect(
"B3: Health endpoint works",
r"ok",
[f'{base_url}/health']
)
print()
# === Part C: Edge Cases ===
print("=== Part C: Edge Cases ===")
print("(Malformed input must be REJECTED)")
print()
test_expect(
"C1: javascript: URL rejected (400)",
r"400|must start with",
['-X', 'POST', f'{base_url}/execute_js',
'-H', 'Content-Type: application/json',
'-d', '{"url":"javascript:alert(1)","scripts":["1"]}']
)
test_expect(
"C2: data: URL rejected (400)",
r"400|must start with",
['-X', 'POST', f'{base_url}/execute_js',
'-H', 'Content-Type: application/json',
'-d', '{"url":"data:text/html,<h1>test</h1>","scripts":["1"]}']
)
print()
print("=" * 60)
print("Results")
print("=" * 60)
print(f"Passed: {GREEN}{PASSED}{NC}")
print(f"Failed: {RED}{FAILED}{NC}")
print()
if FAILED > 0:
print(f"{RED}SOME TESTS FAILED{NC}")
sys.exit(1)
else:
print(f"{GREEN}ALL TESTS PASSED{NC}")
sys.exit(0)
if __name__ == '__main__':
main()
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/run_security_tests.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:deploy/docker/tests/test_security_fixes.py | #!/usr/bin/env python3
"""
Unit tests for security fixes.
These tests verify the security fixes at the code level without needing a running server.
"""
import sys
import os
# Add parent directory to path to import modules
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
class TestURLValidation(unittest.TestCase):
"""Test URL scheme validation helper."""
def setUp(self):
"""Set up test fixtures."""
# Import the validation constants and function
self.ALLOWED_URL_SCHEMES = ("http://", "https://")
self.ALLOWED_URL_SCHEMES_WITH_RAW = ("http://", "https://", "raw:", "raw://")
def validate_url_scheme(self, url: str, allow_raw: bool = False) -> bool:
"""Local version of validate_url_scheme for testing."""
allowed = self.ALLOWED_URL_SCHEMES_WITH_RAW if allow_raw else self.ALLOWED_URL_SCHEMES
return url.startswith(allowed)
# === SECURITY TESTS: These URLs must be BLOCKED ===
def test_file_url_blocked(self):
"""file:// URLs must be blocked (LFI vulnerability)."""
self.assertFalse(self.validate_url_scheme("file:///etc/passwd"))
self.assertFalse(self.validate_url_scheme("file:///etc/passwd", allow_raw=True))
def test_file_url_blocked_windows(self):
"""file:// URLs with Windows paths must be blocked."""
self.assertFalse(self.validate_url_scheme("file:///C:/Windows/System32/config/sam"))
def test_javascript_url_blocked(self):
"""javascript: URLs must be blocked (XSS)."""
self.assertFalse(self.validate_url_scheme("javascript:alert(1)"))
def test_data_url_blocked(self):
"""data: URLs must be blocked."""
self.assertFalse(self.validate_url_scheme("data:text/html,<script>alert(1)</script>"))
def test_ftp_url_blocked(self):
"""ftp: URLs must be blocked."""
self.assertFalse(self.validate_url_scheme("ftp://example.com/file"))
def test_empty_url_blocked(self):
"""Empty URLs must be blocked."""
self.assertFalse(self.validate_url_scheme(""))
def test_relative_url_blocked(self):
"""Relative URLs must be blocked."""
self.assertFalse(self.validate_url_scheme("/etc/passwd"))
self.assertFalse(self.validate_url_scheme("../../../etc/passwd"))
# === FUNCTIONALITY TESTS: These URLs must be ALLOWED ===
def test_http_url_allowed(self):
"""http:// URLs must be allowed."""
self.assertTrue(self.validate_url_scheme("http://example.com"))
self.assertTrue(self.validate_url_scheme("http://localhost:8080"))
def test_https_url_allowed(self):
"""https:// URLs must be allowed."""
self.assertTrue(self.validate_url_scheme("https://example.com"))
self.assertTrue(self.validate_url_scheme("https://example.com/path?query=1"))
def test_raw_url_allowed_when_enabled(self):
"""raw: URLs must be allowed when allow_raw=True."""
self.assertTrue(self.validate_url_scheme("raw:<html></html>", allow_raw=True))
self.assertTrue(self.validate_url_scheme("raw://<html></html>", allow_raw=True))
def test_raw_url_blocked_when_disabled(self):
"""raw: URLs must be blocked when allow_raw=False."""
self.assertFalse(self.validate_url_scheme("raw:<html></html>", allow_raw=False))
self.assertFalse(self.validate_url_scheme("raw://<html></html>", allow_raw=False))
class TestHookBuiltins(unittest.TestCase):
"""Test that dangerous builtins are removed from hooks."""
def test_import_not_in_allowed_builtins(self):
"""__import__ must NOT be in allowed_builtins."""
allowed_builtins = [
'print', 'len', 'str', 'int', 'float', 'bool',
'list', 'dict', 'set', 'tuple', 'range', 'enumerate',
'zip', 'map', 'filter', 'any', 'all', 'sum', 'min', 'max',
'sorted', 'reversed', 'abs', 'round', 'isinstance', 'type',
'getattr', 'hasattr', 'setattr', 'callable', 'iter', 'next',
'__build_class__' # Required for class definitions in exec
]
self.assertNotIn('__import__', allowed_builtins)
self.assertNotIn('eval', allowed_builtins)
self.assertNotIn('exec', allowed_builtins)
self.assertNotIn('compile', allowed_builtins)
self.assertNotIn('open', allowed_builtins)
def test_build_class_in_allowed_builtins(self):
"""__build_class__ must be in allowed_builtins (needed for class definitions)."""
allowed_builtins = [
'print', 'len', 'str', 'int', 'float', 'bool',
'list', 'dict', 'set', 'tuple', 'range', 'enumerate',
'zip', 'map', 'filter', 'any', 'all', 'sum', 'min', 'max',
'sorted', 'reversed', 'abs', 'round', 'isinstance', 'type',
'getattr', 'hasattr', 'setattr', 'callable', 'iter', 'next',
'__build_class__'
]
self.assertIn('__build_class__', allowed_builtins)
class TestHooksEnabled(unittest.TestCase):
"""Test HOOKS_ENABLED environment variable logic."""
def test_hooks_disabled_by_default(self):
"""Hooks must be disabled by default."""
# Simulate the default behavior
hooks_enabled = os.environ.get("CRAWL4AI_HOOKS_ENABLED", "false").lower() == "true"
# Clear any existing env var to test default
original = os.environ.pop("CRAWL4AI_HOOKS_ENABLED", None)
try:
hooks_enabled = os.environ.get("CRAWL4AI_HOOKS_ENABLED", "false").lower() == "true"
self.assertFalse(hooks_enabled)
finally:
if original is not None:
os.environ["CRAWL4AI_HOOKS_ENABLED"] = original
def test_hooks_enabled_when_true(self):
"""Hooks must be enabled when CRAWL4AI_HOOKS_ENABLED=true."""
original = os.environ.get("CRAWL4AI_HOOKS_ENABLED")
try:
os.environ["CRAWL4AI_HOOKS_ENABLED"] = "true"
hooks_enabled = os.environ.get("CRAWL4AI_HOOKS_ENABLED", "false").lower() == "true"
self.assertTrue(hooks_enabled)
finally:
if original is not None:
os.environ["CRAWL4AI_HOOKS_ENABLED"] = original
else:
os.environ.pop("CRAWL4AI_HOOKS_ENABLED", None)
def test_hooks_disabled_when_false(self):
"""Hooks must be disabled when CRAWL4AI_HOOKS_ENABLED=false."""
original = os.environ.get("CRAWL4AI_HOOKS_ENABLED")
try:
os.environ["CRAWL4AI_HOOKS_ENABLED"] = "false"
hooks_enabled = os.environ.get("CRAWL4AI_HOOKS_ENABLED", "false").lower() == "true"
self.assertFalse(hooks_enabled)
finally:
if original is not None:
os.environ["CRAWL4AI_HOOKS_ENABLED"] = original
else:
os.environ.pop("CRAWL4AI_HOOKS_ENABLED", None)
if __name__ == '__main__':
print("=" * 60)
print("Crawl4AI Security Fixes - Unit Tests")
print("=" * 60)
print()
# Run tests with verbosity
unittest.main(verbosity=2)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/test_security_fixes.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:docs/examples/deep_crawl_crash_recovery.py | #!/usr/bin/env python3
"""
Deep Crawl Crash Recovery Example
This example demonstrates how to implement crash recovery for long-running
deep crawls. The feature is useful for:
- Cloud deployments with spot/preemptible instances
- Long-running crawls that may be interrupted
- Distributed crawling with state coordination
Key concepts:
- `on_state_change`: Callback fired after each URL is processed
- `resume_state`: Pass saved state to continue from a checkpoint
- `export_state()`: Get the last captured state manually
Works with all strategies: BFSDeepCrawlStrategy, DFSDeepCrawlStrategy,
BestFirstCrawlingStrategy
"""
import asyncio
import json
import os
from pathlib import Path
from typing import Dict, Any, List
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
from crawl4ai.deep_crawling import BFSDeepCrawlStrategy
# File to store crawl state (in production, use Redis/database)
STATE_FILE = Path("crawl_state.json")
async def save_state_to_file(state: Dict[str, Any]) -> None:
"""
Callback to save state after each URL is processed.
In production, you might save to:
- Redis: await redis.set("crawl_state", json.dumps(state))
- Database: await db.execute("UPDATE crawls SET state = ?", json.dumps(state))
- S3: await s3.put_object(Bucket="crawls", Key="state.json", Body=json.dumps(state))
"""
with open(STATE_FILE, "w") as f:
json.dump(state, f, indent=2)
print(f" [State saved] Pages: {state['pages_crawled']}, Pending: {len(state['pending'])}")
def load_state_from_file() -> Dict[str, Any] | None:
"""Load previously saved state, if it exists."""
if STATE_FILE.exists():
with open(STATE_FILE, "r") as f:
return json.load(f)
return None
async def example_basic_state_persistence():
"""
Example 1: Basic state persistence with file storage.
The on_state_change callback is called after each URL is processed,
allowing you to save progress in real-time.
"""
print("\n" + "=" * 60)
print("Example 1: Basic State Persistence")
print("=" * 60)
# Clean up any previous state
if STATE_FILE.exists():
STATE_FILE.unlink()
strategy = BFSDeepCrawlStrategy(
max_depth=2,
max_pages=5,
on_state_change=save_state_to_file, # Save after each URL
)
config = CrawlerRunConfig(
deep_crawl_strategy=strategy,
verbose=False,
)
print("\nStarting crawl with state persistence...")
async with AsyncWebCrawler(verbose=False) as crawler:
results = await crawler.arun("https://books.toscrape.com", config=config)
# Show final state
if STATE_FILE.exists():
with open(STATE_FILE, "r") as f:
final_state = json.load(f)
print(f"\nFinal state saved to {STATE_FILE}:")
print(f" - Strategy: {final_state['strategy_type']}")
print(f" - Pages crawled: {final_state['pages_crawled']}")
print(f" - URLs visited: {len(final_state['visited'])}")
print(f" - URLs pending: {len(final_state['pending'])}")
print(f"\nCrawled {len(results)} pages total")
async def example_crash_and_resume():
"""
Example 2: Simulate a crash and resume from checkpoint.
This demonstrates the full crash recovery workflow:
1. Start crawling with state persistence
2. "Crash" after N pages
3. Resume from saved state
4. Verify no duplicate work
"""
print("\n" + "=" * 60)
print("Example 2: Crash and Resume")
print("=" * 60)
# Clean up any previous state
if STATE_FILE.exists():
STATE_FILE.unlink()
crash_after = 3
crawled_urls_phase1: List[str] = []
async def save_and_maybe_crash(state: Dict[str, Any]) -> None:
"""Save state, then simulate crash after N pages."""
# Always save state first
await save_state_to_file(state)
crawled_urls_phase1.clear()
crawled_urls_phase1.extend(state["visited"])
# Simulate crash after reaching threshold
if state["pages_crawled"] >= crash_after:
raise Exception("Simulated crash! (This is intentional)")
# Phase 1: Start crawl that will "crash"
print(f"\n--- Phase 1: Crawl until 'crash' after {crash_after} pages ---")
strategy1 = BFSDeepCrawlStrategy(
max_depth=2,
max_pages=10,
on_state_change=save_and_maybe_crash,
)
config = CrawlerRunConfig(
deep_crawl_strategy=strategy1,
verbose=False,
)
try:
async with AsyncWebCrawler(verbose=False) as crawler:
await crawler.arun("https://books.toscrape.com", config=config)
except Exception as e:
print(f"\n Crash occurred: {e}")
print(f" URLs crawled before crash: {len(crawled_urls_phase1)}")
# Phase 2: Resume from checkpoint
print("\n--- Phase 2: Resume from checkpoint ---")
saved_state = load_state_from_file()
if not saved_state:
print(" ERROR: No saved state found!")
return
print(f" Loaded state: {saved_state['pages_crawled']} pages, {len(saved_state['pending'])} pending")
crawled_urls_phase2: List[str] = []
async def track_resumed_crawl(state: Dict[str, Any]) -> None:
"""Track new URLs crawled in phase 2."""
await save_state_to_file(state)
new_urls = set(state["visited"]) - set(saved_state["visited"])
for url in new_urls:
if url not in crawled_urls_phase2:
crawled_urls_phase2.append(url)
strategy2 = BFSDeepCrawlStrategy(
max_depth=2,
max_pages=10,
resume_state=saved_state, # Resume from checkpoint!
on_state_change=track_resumed_crawl,
)
config2 = CrawlerRunConfig(
deep_crawl_strategy=strategy2,
verbose=False,
)
async with AsyncWebCrawler(verbose=False) as crawler:
results = await crawler.arun("https://books.toscrape.com", config=config2)
# Verify no duplicates
already_crawled = set(saved_state["visited"])
duplicates = set(crawled_urls_phase2) & already_crawled
print(f"\n--- Results ---")
print(f" Phase 1 URLs: {len(crawled_urls_phase1)}")
print(f" Phase 2 new URLs: {len(crawled_urls_phase2)}")
print(f" Duplicate crawls: {len(duplicates)} (should be 0)")
print(f" Total results: {len(results)}")
if len(duplicates) == 0:
print("\n SUCCESS: No duplicate work after resume!")
else:
print(f"\n WARNING: Found duplicates: {duplicates}")
async def example_export_state():
"""
Example 3: Manual state export using export_state().
If you don't need real-time persistence, you can export
the state manually after the crawl completes.
"""
print("\n" + "=" * 60)
print("Example 3: Manual State Export")
print("=" * 60)
strategy = BFSDeepCrawlStrategy(
max_depth=1,
max_pages=3,
# No callback - state is still tracked internally
)
config = CrawlerRunConfig(
deep_crawl_strategy=strategy,
verbose=False,
)
print("\nCrawling without callback...")
async with AsyncWebCrawler(verbose=False) as crawler:
results = await crawler.arun("https://books.toscrape.com", config=config)
# Export state after crawl completes
# Note: This only works if on_state_change was set during crawl
# For this example, we'd need to set on_state_change to get state
print(f"\nCrawled {len(results)} pages")
print("(For manual export, set on_state_change to capture state)")
async def example_state_structure():
"""
Example 4: Understanding the state structure.
Shows the complete state dictionary that gets saved.
"""
print("\n" + "=" * 60)
print("Example 4: State Structure")
print("=" * 60)
captured_state = None
async def capture_state(state: Dict[str, Any]) -> None:
nonlocal captured_state
captured_state = state
strategy = BFSDeepCrawlStrategy(
max_depth=1,
max_pages=2,
on_state_change=capture_state,
)
config = CrawlerRunConfig(
deep_crawl_strategy=strategy,
verbose=False,
)
async with AsyncWebCrawler(verbose=False) as crawler:
await crawler.arun("https://books.toscrape.com", config=config)
if captured_state:
print("\nState structure:")
print(json.dumps(captured_state, indent=2, default=str)[:1000] + "...")
print("\n\nKey fields:")
print(f" strategy_type: '{captured_state['strategy_type']}'")
print(f" visited: List of {len(captured_state['visited'])} URLs")
print(f" pending: List of {len(captured_state['pending'])} queued items")
print(f" depths: Dict mapping URL -> depth level")
print(f" pages_crawled: {captured_state['pages_crawled']}")
async def main():
"""Run all examples."""
print("=" * 60)
print("Deep Crawl Crash Recovery Examples")
print("=" * 60)
await example_basic_state_persistence()
await example_crash_and_resume()
await example_state_structure()
# # Cleanup
# if STATE_FILE.exists():
# STATE_FILE.unlink()
# print(f"\n[Cleaned up {STATE_FILE}]")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "docs/examples/deep_crawl_crash_recovery.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
unclecode/crawl4ai:docs/examples/prefetch_two_phase_crawl.py | #!/usr/bin/env python3
"""
Prefetch Mode and Two-Phase Crawling Example
Prefetch mode is a fast path that skips heavy processing and returns
only HTML + links. This is ideal for:
- Site mapping: Quickly discover all URLs
- Selective crawling: Find URLs first, then process only what you need
- Link validation: Check which pages exist without full processing
- Crawl planning: Estimate size before committing resources
Key concept:
- `prefetch=True` in CrawlerRunConfig enables fast link-only extraction
- Skips: markdown generation, content scraping, media extraction, LLM extraction
- Returns: HTML and links dictionary
Performance benefit: ~5-10x faster than full processing
"""
import asyncio
import time
from typing import List, Dict
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
async def example_basic_prefetch():
"""
Example 1: Basic prefetch mode.
Shows how prefetch returns HTML and links without heavy processing.
"""
print("\n" + "=" * 60)
print("Example 1: Basic Prefetch Mode")
print("=" * 60)
async with AsyncWebCrawler(verbose=False) as crawler:
# Enable prefetch mode
config = CrawlerRunConfig(prefetch=True)
print("\nFetching with prefetch=True...")
result = await crawler.arun("https://books.toscrape.com", config=config)
print(f"\nResult summary:")
print(f" Success: {result.success}")
print(f" HTML length: {len(result.html) if result.html else 0} chars")
print(f" Internal links: {len(result.links.get('internal', []))}")
print(f" External links: {len(result.links.get('external', []))}")
# These should be None/empty in prefetch mode
print(f"\n Skipped processing:")
print(f" Markdown: {result.markdown}")
print(f" Cleaned HTML: {result.cleaned_html}")
print(f" Extracted content: {result.extracted_content}")
# Show some discovered links
internal_links = result.links.get("internal", [])
if internal_links:
print(f"\n Sample internal links:")
for link in internal_links[:5]:
print(f" - {link['href'][:60]}...")
async def example_performance_comparison():
"""
Example 2: Compare prefetch vs full processing performance.
"""
print("\n" + "=" * 60)
print("Example 2: Performance Comparison")
print("=" * 60)
url = "https://books.toscrape.com"
async with AsyncWebCrawler(verbose=False) as crawler:
# Warm up - first request is slower due to browser startup
await crawler.arun(url, config=CrawlerRunConfig())
# Prefetch mode timing
start = time.time()
prefetch_result = await crawler.arun(url, config=CrawlerRunConfig(prefetch=True))
prefetch_time = time.time() - start
# Full processing timing
start = time.time()
full_result = await crawler.arun(url, config=CrawlerRunConfig())
full_time = time.time() - start
print(f"\nTiming comparison:")
print(f" Prefetch mode: {prefetch_time:.3f}s")
print(f" Full processing: {full_time:.3f}s")
print(f" Speedup: {full_time / prefetch_time:.1f}x faster")
print(f"\nOutput comparison:")
print(f" Prefetch - Links found: {len(prefetch_result.links.get('internal', []))}")
print(f" Full - Links found: {len(full_result.links.get('internal', []))}")
print(f" Full - Markdown length: {len(full_result.markdown.raw_markdown) if full_result.markdown else 0}")
async def example_two_phase_crawl():
"""
Example 3: Two-phase crawling pattern.
Phase 1: Fast discovery with prefetch
Phase 2: Full processing on selected URLs
"""
print("\n" + "=" * 60)
print("Example 3: Two-Phase Crawling")
print("=" * 60)
async with AsyncWebCrawler(verbose=False) as crawler:
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# Phase 1: Fast URL discovery
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
print("\n--- Phase 1: Fast Discovery ---")
prefetch_config = CrawlerRunConfig(prefetch=True)
start = time.time()
discovery = await crawler.arun("https://books.toscrape.com", config=prefetch_config)
discovery_time = time.time() - start
all_urls = [link["href"] for link in discovery.links.get("internal", [])]
print(f" Discovered {len(all_urls)} URLs in {discovery_time:.2f}s")
# Filter to URLs we care about (e.g., book detail pages)
# On books.toscrape.com, book pages contain "catalogue/" but not "category/"
book_urls = [
url for url in all_urls
if "catalogue/" in url and "category/" not in url
][:5] # Limit to 5 for demo
print(f" Filtered to {len(book_urls)} book pages")
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# Phase 2: Full processing on selected URLs
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
print("\n--- Phase 2: Full Processing ---")
full_config = CrawlerRunConfig(
word_count_threshold=10,
remove_overlay_elements=True,
)
results = []
start = time.time()
for url in book_urls:
result = await crawler.arun(url, config=full_config)
if result.success:
results.append(result)
title = result.url.split("/")[-2].replace("-", " ").title()[:40]
md_len = len(result.markdown.raw_markdown) if result.markdown else 0
print(f" Processed: {title}... ({md_len} chars)")
processing_time = time.time() - start
print(f"\n Processed {len(results)} pages in {processing_time:.2f}s")
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# Summary
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
print(f"\n--- Summary ---")
print(f" Discovery phase: {discovery_time:.2f}s ({len(all_urls)} URLs)")
print(f" Processing phase: {processing_time:.2f}s ({len(results)} pages)")
print(f" Total time: {discovery_time + processing_time:.2f}s")
print(f" URLs skipped: {len(all_urls) - len(book_urls)} (not matching filter)")
async def example_prefetch_with_deep_crawl():
"""
Example 4: Combine prefetch with deep crawl strategy.
Use prefetch mode during deep crawl for maximum speed.
"""
print("\n" + "=" * 60)
print("Example 4: Prefetch with Deep Crawl")
print("=" * 60)
from crawl4ai.deep_crawling import BFSDeepCrawlStrategy
async with AsyncWebCrawler(verbose=False) as crawler:
# Deep crawl with prefetch - maximum discovery speed
config = CrawlerRunConfig(
prefetch=True, # Fast mode
deep_crawl_strategy=BFSDeepCrawlStrategy(
max_depth=1,
max_pages=10,
)
)
print("\nDeep crawling with prefetch mode...")
start = time.time()
result_container = await crawler.arun("https://books.toscrape.com", config=config)
# Handle iterator result from deep crawl
if hasattr(result_container, '__iter__'):
results = list(result_container)
else:
results = [result_container]
elapsed = time.time() - start
# Collect all discovered links
all_internal_links = set()
all_external_links = set()
for result in results:
for link in result.links.get("internal", []):
all_internal_links.add(link["href"])
for link in result.links.get("external", []):
all_external_links.add(link["href"])
print(f"\nResults:")
print(f" Pages crawled: {len(results)}")
print(f" Total internal links discovered: {len(all_internal_links)}")
print(f" Total external links discovered: {len(all_external_links)}")
print(f" Time: {elapsed:.2f}s")
async def example_prefetch_with_raw_html():
"""
Example 5: Prefetch with raw HTML input.
You can also use prefetch mode with raw: URLs for cached content.
"""
print("\n" + "=" * 60)
print("Example 5: Prefetch with Raw HTML")
print("=" * 60)
sample_html = """
<html>
<head><title>Sample Page</title></head>
<body>
<h1>Hello World</h1>
<nav>
<a href="/page1">Internal Page 1</a>
<a href="/page2">Internal Page 2</a>
<a href="https://example.com/external">External Link</a>
</nav>
<main>
<p>This is the main content with <a href="/page3">another link</a>.</p>
</main>
</body>
</html>
"""
async with AsyncWebCrawler(verbose=False) as crawler:
config = CrawlerRunConfig(
prefetch=True,
base_url="https://mysite.com", # For resolving relative links
)
result = await crawler.arun(f"raw:{sample_html}", config=config)
print(f"\nExtracted from raw HTML:")
print(f" Internal links: {len(result.links.get('internal', []))}")
for link in result.links.get("internal", []):
print(f" - {link['href']} ({link['text']})")
print(f"\n External links: {len(result.links.get('external', []))}")
for link in result.links.get("external", []):
print(f" - {link['href']} ({link['text']})")
async def main():
"""Run all examples."""
print("=" * 60)
print("Prefetch Mode and Two-Phase Crawling Examples")
print("=" * 60)
await example_basic_prefetch()
await example_performance_comparison()
await example_two_phase_crawl()
await example_prefetch_with_deep_crawl()
await example_prefetch_with_raw_html()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "docs/examples/prefetch_two_phase_crawl.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
unclecode/crawl4ai:docs/releases_review/demo_v0.8.0.py | #!/usr/bin/env python3
"""
Crawl4AI v0.8.0 Release Demo - Feature Verification Tests
==========================================================
This demo ACTUALLY RUNS and VERIFIES the new features in v0.8.0.
Each test executes real code and validates the feature is working.
New Features Verified:
1. Crash Recovery - on_state_change callback for real-time state persistence
2. Crash Recovery - resume_state for resuming from checkpoint
3. Crash Recovery - State is JSON serializable
4. Prefetch Mode - Returns HTML and links only
5. Prefetch Mode - Skips heavy processing (markdown, extraction)
6. Prefetch Mode - Two-phase crawl pattern
7. Security - Hooks disabled by default (Docker API)
Breaking Changes in v0.8.0:
- Docker API hooks disabled by default (CRAWL4AI_HOOKS_ENABLED=false)
- file:// URLs blocked on Docker API endpoints
Usage:
python docs/releases_review/demo_v0.8.0.py
"""
import asyncio
import json
import sys
import time
from typing import Dict, Any, List, Optional
from dataclasses import dataclass
# Test results tracking
@dataclass
class TestResult:
name: str
feature: str
passed: bool
message: str
skipped: bool = False
results: list[TestResult] = []
def print_header(title: str):
print(f"\n{'=' * 70}")
print(f"{title}")
print(f"{'=' * 70}")
def print_test(name: str, feature: str):
print(f"\n[TEST] {name} ({feature})")
print("-" * 50)
def record_result(name: str, feature: str, passed: bool, message: str, skipped: bool = False):
results.append(TestResult(name, feature, passed, message, skipped))
if skipped:
print(f" SKIPPED: {message}")
elif passed:
print(f" PASSED: {message}")
else:
print(f" FAILED: {message}")
# =============================================================================
# TEST 1: Crash Recovery - State Capture with on_state_change
# =============================================================================
async def test_crash_recovery_state_capture():
"""
Verify on_state_change callback is called after each URL is processed.
NEW in v0.8.0: Deep crawl strategies support on_state_change callback
for real-time state persistence (useful for cloud deployments).
"""
print_test("Crash Recovery - State Capture", "on_state_change")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
from crawl4ai.deep_crawling import BFSDeepCrawlStrategy
captured_states: List[Dict[str, Any]] = []
async def capture_state(state: Dict[str, Any]):
"""Callback that fires after each URL is processed."""
captured_states.append(state.copy())
strategy = BFSDeepCrawlStrategy(
max_depth=1,
max_pages=3,
on_state_change=capture_state,
)
config = CrawlerRunConfig(
deep_crawl_strategy=strategy,
verbose=False,
)
async with AsyncWebCrawler(verbose=False) as crawler:
await crawler.arun("https://books.toscrape.com", config=config)
# Verify states were captured
if len(captured_states) == 0:
record_result("State Capture", "on_state_change", False,
"No states captured - callback not called")
return
# Verify callback was called for each page
pages_crawled = captured_states[-1].get("pages_crawled", 0)
if pages_crawled != len(captured_states):
record_result("State Capture", "on_state_change", False,
f"Callback count {len(captured_states)} != pages_crawled {pages_crawled}")
return
record_result("State Capture", "on_state_change", True,
f"Callback fired {len(captured_states)} times (once per URL)")
except Exception as e:
record_result("State Capture", "on_state_change", False, f"Exception: {e}")
# =============================================================================
# TEST 2: Crash Recovery - Resume from Checkpoint
# =============================================================================
async def test_crash_recovery_resume():
"""
Verify crawl can resume from a saved checkpoint without re-crawling visited URLs.
NEW in v0.8.0: BFSDeepCrawlStrategy accepts resume_state parameter
to continue from a previously saved checkpoint.
"""
print_test("Crash Recovery - Resume from Checkpoint", "resume_state")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
from crawl4ai.deep_crawling import BFSDeepCrawlStrategy
# Phase 1: Start crawl and capture state after 2 pages
crash_after = 2
captured_states: List[Dict] = []
phase1_urls: List[str] = []
async def capture_until_crash(state: Dict[str, Any]):
captured_states.append(state.copy())
phase1_urls.clear()
phase1_urls.extend(state["visited"])
if state["pages_crawled"] >= crash_after:
raise Exception("Simulated crash")
strategy1 = BFSDeepCrawlStrategy(
max_depth=1,
max_pages=5,
on_state_change=capture_until_crash,
)
config1 = CrawlerRunConfig(
deep_crawl_strategy=strategy1,
verbose=False,
)
# Run until "crash"
try:
async with AsyncWebCrawler(verbose=False) as crawler:
await crawler.arun("https://books.toscrape.com", config=config1)
except Exception:
pass # Expected crash
if not captured_states:
record_result("Resume from Checkpoint", "resume_state", False,
"No state captured before crash")
return
saved_state = captured_states[-1]
print(f" Phase 1: Crawled {len(phase1_urls)} URLs before crash")
# Phase 2: Resume from checkpoint
phase2_urls: List[str] = []
async def track_phase2(state: Dict[str, Any]):
new_urls = set(state["visited"]) - set(saved_state["visited"])
for url in new_urls:
if url not in phase2_urls:
phase2_urls.append(url)
strategy2 = BFSDeepCrawlStrategy(
max_depth=1,
max_pages=5,
resume_state=saved_state, # Resume from checkpoint!
on_state_change=track_phase2,
)
config2 = CrawlerRunConfig(
deep_crawl_strategy=strategy2,
verbose=False,
)
async with AsyncWebCrawler(verbose=False) as crawler:
await crawler.arun("https://books.toscrape.com", config=config2)
print(f" Phase 2: Crawled {len(phase2_urls)} new URLs after resume")
# Verify no duplicates
duplicates = set(phase2_urls) & set(phase1_urls)
if duplicates:
record_result("Resume from Checkpoint", "resume_state", False,
f"Re-crawled {len(duplicates)} URLs: {list(duplicates)[:2]}")
return
record_result("Resume from Checkpoint", "resume_state", True,
f"Resumed successfully, no duplicate crawls")
except Exception as e:
record_result("Resume from Checkpoint", "resume_state", False, f"Exception: {e}")
# =============================================================================
# TEST 3: Crash Recovery - State is JSON Serializable
# =============================================================================
async def test_crash_recovery_json_serializable():
"""
Verify the state dictionary can be serialized to JSON (for Redis/DB storage).
NEW in v0.8.0: State dictionary is designed to be JSON-serializable
for easy storage in Redis, databases, or files.
"""
print_test("Crash Recovery - JSON Serializable", "State Structure")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
from crawl4ai.deep_crawling import BFSDeepCrawlStrategy
captured_state: Optional[Dict] = None
async def capture_state(state: Dict[str, Any]):
nonlocal captured_state
captured_state = state
strategy = BFSDeepCrawlStrategy(
max_depth=1,
max_pages=2,
on_state_change=capture_state,
)
config = CrawlerRunConfig(
deep_crawl_strategy=strategy,
verbose=False,
)
async with AsyncWebCrawler(verbose=False) as crawler:
await crawler.arun("https://books.toscrape.com", config=config)
if not captured_state:
record_result("JSON Serializable", "State Structure", False,
"No state captured")
return
# Test JSON serialization round-trip
try:
json_str = json.dumps(captured_state)
restored = json.loads(json_str)
except (TypeError, json.JSONDecodeError) as e:
record_result("JSON Serializable", "State Structure", False,
f"JSON serialization failed: {e}")
return
# Verify state structure
required_fields = ["strategy_type", "visited", "pending", "depths", "pages_crawled"]
missing = [f for f in required_fields if f not in restored]
if missing:
record_result("JSON Serializable", "State Structure", False,
f"Missing fields: {missing}")
return
# Verify types
if not isinstance(restored["visited"], list):
record_result("JSON Serializable", "State Structure", False,
"visited is not a list")
return
if not isinstance(restored["pages_crawled"], int):
record_result("JSON Serializable", "State Structure", False,
"pages_crawled is not an int")
return
record_result("JSON Serializable", "State Structure", True,
f"State serializes to {len(json_str)} bytes, all fields present")
except Exception as e:
record_result("JSON Serializable", "State Structure", False, f"Exception: {e}")
# =============================================================================
# TEST 4: Prefetch Mode - Returns HTML and Links Only
# =============================================================================
async def test_prefetch_returns_html_links():
"""
Verify prefetch mode returns HTML and links but skips markdown generation.
NEW in v0.8.0: CrawlerRunConfig accepts prefetch=True for fast
URL discovery without heavy processing.
"""
print_test("Prefetch Mode - HTML and Links", "prefetch=True")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
config = CrawlerRunConfig(prefetch=True)
async with AsyncWebCrawler(verbose=False) as crawler:
result = await crawler.arun("https://books.toscrape.com", config=config)
# Verify HTML is present
if not result.html or len(result.html) < 100:
record_result("Prefetch HTML/Links", "prefetch=True", False,
"HTML not returned or too short")
return
# Verify links are present
if not result.links:
record_result("Prefetch HTML/Links", "prefetch=True", False,
"Links not returned")
return
internal_count = len(result.links.get("internal", []))
external_count = len(result.links.get("external", []))
if internal_count == 0:
record_result("Prefetch HTML/Links", "prefetch=True", False,
"No internal links extracted")
return
record_result("Prefetch HTML/Links", "prefetch=True", True,
f"HTML: {len(result.html)} chars, Links: {internal_count} internal, {external_count} external")
except Exception as e:
record_result("Prefetch HTML/Links", "prefetch=True", False, f"Exception: {e}")
# =============================================================================
# TEST 5: Prefetch Mode - Skips Heavy Processing
# =============================================================================
async def test_prefetch_skips_processing():
"""
Verify prefetch mode skips markdown generation and content extraction.
NEW in v0.8.0: prefetch=True skips markdown generation, content scraping,
media extraction, and LLM extraction for maximum speed.
"""
print_test("Prefetch Mode - Skips Processing", "prefetch=True")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
config = CrawlerRunConfig(prefetch=True)
async with AsyncWebCrawler(verbose=False) as crawler:
result = await crawler.arun("https://books.toscrape.com", config=config)
# Check that heavy processing was skipped
checks = []
# Markdown should be None or empty
if result.markdown is None:
checks.append("markdown=None")
elif hasattr(result.markdown, 'raw_markdown') and result.markdown.raw_markdown is None:
checks.append("raw_markdown=None")
else:
record_result("Prefetch Skips Processing", "prefetch=True", False,
f"Markdown was generated (should be skipped)")
return
# cleaned_html should be None
if result.cleaned_html is None:
checks.append("cleaned_html=None")
else:
record_result("Prefetch Skips Processing", "prefetch=True", False,
"cleaned_html was generated (should be skipped)")
return
# extracted_content should be None
if result.extracted_content is None:
checks.append("extracted_content=None")
record_result("Prefetch Skips Processing", "prefetch=True", True,
f"Heavy processing skipped: {', '.join(checks)}")
except Exception as e:
record_result("Prefetch Skips Processing", "prefetch=True", False, f"Exception: {e}")
# =============================================================================
# TEST 6: Prefetch Mode - Two-Phase Crawl Pattern
# =============================================================================
async def test_prefetch_two_phase():
"""
Verify the two-phase crawl pattern: prefetch for discovery, then full processing.
NEW in v0.8.0: Prefetch mode enables efficient two-phase crawling where
you discover URLs quickly, then selectively process important ones.
"""
print_test("Prefetch Mode - Two-Phase Crawl", "Two-Phase Pattern")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
async with AsyncWebCrawler(verbose=False) as crawler:
# Phase 1: Fast discovery with prefetch
prefetch_config = CrawlerRunConfig(prefetch=True)
start = time.time()
discovery = await crawler.arun("https://books.toscrape.com", config=prefetch_config)
prefetch_time = time.time() - start
all_urls = [link["href"] for link in discovery.links.get("internal", [])]
# Filter to specific pages (e.g., book detail pages)
book_urls = [
url for url in all_urls
if "catalogue/" in url and "category/" not in url
][:2] # Just 2 for demo
print(f" Phase 1: Found {len(all_urls)} URLs in {prefetch_time:.2f}s")
print(f" Filtered to {len(book_urls)} book pages for full processing")
if len(book_urls) == 0:
record_result("Two-Phase Crawl", "Two-Phase Pattern", False,
"No book URLs found to process")
return
# Phase 2: Full processing on selected URLs
full_config = CrawlerRunConfig() # Normal mode
start = time.time()
processed = []
for url in book_urls:
result = await crawler.arun(url, config=full_config)
if result.success and result.markdown:
processed.append(result)
full_time = time.time() - start
print(f" Phase 2: Processed {len(processed)} pages in {full_time:.2f}s")
if len(processed) == 0:
record_result("Two-Phase Crawl", "Two-Phase Pattern", False,
"No pages successfully processed in phase 2")
return
# Verify full processing includes markdown
if not processed[0].markdown or not processed[0].markdown.raw_markdown:
record_result("Two-Phase Crawl", "Two-Phase Pattern", False,
"Full processing did not generate markdown")
return
record_result("Two-Phase Crawl", "Two-Phase Pattern", True,
f"Discovered {len(all_urls)} URLs (prefetch), processed {len(processed)} (full)")
except Exception as e:
record_result("Two-Phase Crawl", "Two-Phase Pattern", False, f"Exception: {e}")
# =============================================================================
# TEST 7: Security - Hooks Disabled by Default
# =============================================================================
async def test_security_hooks_disabled():
"""
Verify hooks are disabled by default in Docker API for security.
NEW in v0.8.0: Docker API hooks are disabled by default to prevent
Remote Code Execution. Set CRAWL4AI_HOOKS_ENABLED=true to enable.
"""
print_test("Security - Hooks Disabled", "CRAWL4AI_HOOKS_ENABLED")
try:
import os
# Check the default environment variable
hooks_enabled = os.environ.get("CRAWL4AI_HOOKS_ENABLED", "false").lower()
if hooks_enabled == "true":
record_result("Hooks Disabled Default", "Security", True,
"CRAWL4AI_HOOKS_ENABLED is explicitly set to 'true' (user override)",
skipped=True)
return
# Verify default is "false"
if hooks_enabled == "false":
record_result("Hooks Disabled Default", "Security", True,
"Hooks disabled by default (CRAWL4AI_HOOKS_ENABLED=false)")
else:
record_result("Hooks Disabled Default", "Security", True,
f"CRAWL4AI_HOOKS_ENABLED='{hooks_enabled}' (not 'true', hooks disabled)")
except Exception as e:
record_result("Hooks Disabled Default", "Security", False, f"Exception: {e}")
# =============================================================================
# TEST 8: Comprehensive Crawl Test
# =============================================================================
async def test_comprehensive_crawl():
"""
Run a comprehensive crawl to verify overall stability with new features.
"""
print_test("Comprehensive Crawl Test", "Overall")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, BrowserConfig
async with AsyncWebCrawler(config=BrowserConfig(headless=True), verbose=False) as crawler:
result = await crawler.arun(
url="https://httpbin.org/html",
config=CrawlerRunConfig()
)
checks = []
if result.success:
checks.append("success=True")
else:
record_result("Comprehensive Crawl", "Overall", False,
f"Crawl failed: {result.error_message}")
return
if result.html and len(result.html) > 100:
checks.append(f"html={len(result.html)} chars")
if result.markdown and result.markdown.raw_markdown:
checks.append(f"markdown={len(result.markdown.raw_markdown)} chars")
if result.links:
total_links = len(result.links.get("internal", [])) + len(result.links.get("external", []))
checks.append(f"links={total_links}")
record_result("Comprehensive Crawl", "Overall", True,
f"All checks passed: {', '.join(checks)}")
except Exception as e:
record_result("Comprehensive Crawl", "Overall", False, f"Exception: {e}")
# =============================================================================
# MAIN
# =============================================================================
def print_summary():
"""Print test results summary"""
print_header("TEST RESULTS SUMMARY")
passed = sum(1 for r in results if r.passed and not r.skipped)
failed = sum(1 for r in results if not r.passed and not r.skipped)
skipped = sum(1 for r in results if r.skipped)
print(f"\nTotal: {len(results)} tests")
print(f" Passed: {passed}")
print(f" Failed: {failed}")
print(f" Skipped: {skipped}")
if failed > 0:
print("\nFailed Tests:")
for r in results:
if not r.passed and not r.skipped:
print(f" - {r.name} ({r.feature}): {r.message}")
if skipped > 0:
print("\nSkipped Tests:")
for r in results:
if r.skipped:
print(f" - {r.name} ({r.feature}): {r.message}")
print("\n" + "=" * 70)
if failed == 0:
print("All tests passed! v0.8.0 features verified.")
else:
print(f"WARNING: {failed} test(s) failed!")
print("=" * 70)
return failed == 0
async def main():
"""Run all verification tests"""
print_header("Crawl4AI v0.8.0 - Feature Verification Tests")
print("Running actual tests to verify new features...")
print("\nKey Features in v0.8.0:")
print(" - Crash Recovery for Deep Crawl (resume_state, on_state_change)")
print(" - Prefetch Mode for Fast URL Discovery (prefetch=True)")
print(" - Security: Hooks disabled by default on Docker API")
# Run all tests
tests = [
test_crash_recovery_state_capture, # on_state_change
test_crash_recovery_resume, # resume_state
test_crash_recovery_json_serializable, # State structure
test_prefetch_returns_html_links, # prefetch=True basics
test_prefetch_skips_processing, # prefetch skips heavy work
test_prefetch_two_phase, # Two-phase pattern
test_security_hooks_disabled, # Security check
test_comprehensive_crawl, # Overall stability
]
for test_func in tests:
try:
await test_func()
except Exception as e:
print(f"\nTest {test_func.__name__} crashed: {e}")
results.append(TestResult(
test_func.__name__,
"Unknown",
False,
f"Crashed: {e}"
))
# Print summary
all_passed = print_summary()
return 0 if all_passed else 1
if __name__ == "__main__":
try:
exit_code = asyncio.run(main())
sys.exit(exit_code)
except KeyboardInterrupt:
print("\n\nTests interrupted by user.")
sys.exit(1)
except Exception as e:
print(f"\n\nTest suite failed: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "docs/releases_review/demo_v0.8.0.py",
"license": "Apache License 2.0",
"lines": 491,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
unclecode/crawl4ai:tests/browser/test_browser_context_id.py | """Test for browser_context_id and target_id parameters.
These tests verify that Crawl4AI can connect to and use pre-created
browser contexts, which is essential for cloud browser services that
pre-create isolated contexts for each user.
The flow being tested:
1. Start a browser with CDP
2. Create a context via raw CDP commands (simulating cloud service)
3. Create a page/target in that context
4. Have Crawl4AI connect using browser_context_id and target_id
5. Verify Crawl4AI uses the existing context/page instead of creating new ones
"""
import asyncio
import json
import os
import sys
import websockets
# Add the project root to Python path if running directly
if __name__ == "__main__":
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from crawl4ai.browser_manager import BrowserManager, ManagedBrowser
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
from crawl4ai.async_logger import AsyncLogger
# Create a logger for clear terminal output
logger = AsyncLogger(verbose=True, log_file=None)
class CDPContextCreator:
"""
Helper class to create browser contexts via raw CDP commands.
This simulates what a cloud browser service would do.
"""
def __init__(self, cdp_url: str):
self.cdp_url = cdp_url
self._message_id = 0
self._ws = None
self._pending_responses = {}
self._receiver_task = None
async def connect(self):
"""Establish WebSocket connection to browser."""
# Convert HTTP URL to WebSocket URL if needed
ws_url = self.cdp_url.replace("http://", "ws://").replace("https://", "wss://")
if not ws_url.endswith("/devtools/browser"):
# Get the browser websocket URL from /json/version
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(f"{self.cdp_url}/json/version") as response:
data = await response.json()
ws_url = data.get("webSocketDebuggerUrl", ws_url)
self._ws = await websockets.connect(ws_url, max_size=None, ping_interval=None)
self._receiver_task = asyncio.create_task(self._receive_messages())
logger.info(f"Connected to CDP at {ws_url}", tag="CDP")
async def disconnect(self):
"""Close WebSocket connection."""
if self._receiver_task:
self._receiver_task.cancel()
try:
await self._receiver_task
except asyncio.CancelledError:
pass
if self._ws:
await self._ws.close()
self._ws = None
async def _receive_messages(self):
"""Background task to receive CDP messages."""
try:
async for message in self._ws:
data = json.loads(message)
msg_id = data.get('id')
if msg_id is not None and msg_id in self._pending_responses:
self._pending_responses[msg_id].set_result(data)
except asyncio.CancelledError:
pass
except Exception as e:
logger.error(f"CDP receiver error: {e}", tag="CDP")
async def _send_command(self, method: str, params: dict = None) -> dict:
"""Send CDP command and wait for response."""
self._message_id += 1
msg_id = self._message_id
message = {
"id": msg_id,
"method": method,
"params": params or {}
}
future = asyncio.get_event_loop().create_future()
self._pending_responses[msg_id] = future
try:
await self._ws.send(json.dumps(message))
response = await asyncio.wait_for(future, timeout=30.0)
if 'error' in response:
raise Exception(f"CDP error: {response['error']}")
return response.get('result', {})
finally:
self._pending_responses.pop(msg_id, None)
async def create_context(self) -> dict:
"""
Create an isolated browser context with a blank page.
Returns:
dict with browser_context_id, target_id, and cdp_session_id
"""
await self.connect()
# 1. Create isolated browser context
result = await self._send_command("Target.createBrowserContext", {
"disposeOnDetach": False # Keep context alive
})
browser_context_id = result["browserContextId"]
logger.info(f"Created browser context: {browser_context_id}", tag="CDP")
# 2. Create a new page (target) in the context
result = await self._send_command("Target.createTarget", {
"url": "about:blank",
"browserContextId": browser_context_id
})
target_id = result["targetId"]
logger.info(f"Created target: {target_id}", tag="CDP")
# 3. Attach to the target to get a session ID
result = await self._send_command("Target.attachToTarget", {
"targetId": target_id,
"flatten": True
})
cdp_session_id = result["sessionId"]
logger.info(f"Attached to target, sessionId: {cdp_session_id}", tag="CDP")
return {
"browser_context_id": browser_context_id,
"target_id": target_id,
"cdp_session_id": cdp_session_id
}
async def get_targets(self) -> list:
"""Get list of all targets in the browser."""
result = await self._send_command("Target.getTargets")
return result.get("targetInfos", [])
async def dispose_context(self, browser_context_id: str):
"""Dispose of a browser context."""
try:
await self._send_command("Target.disposeBrowserContext", {
"browserContextId": browser_context_id
})
logger.info(f"Disposed browser context: {browser_context_id}", tag="CDP")
except Exception as e:
logger.warning(f"Error disposing context: {e}", tag="CDP")
async def test_browser_context_id_basic():
"""
Test that BrowserConfig accepts browser_context_id and target_id parameters.
"""
logger.info("Testing BrowserConfig browser_context_id parameter", tag="TEST")
try:
# Test that BrowserConfig accepts the new parameters
config = BrowserConfig(
cdp_url="http://localhost:9222",
browser_context_id="test-context-id",
target_id="test-target-id",
headless=True
)
# Verify parameters are set correctly
assert config.browser_context_id == "test-context-id", "browser_context_id not set"
assert config.target_id == "test-target-id", "target_id not set"
# Test from_kwargs
config2 = BrowserConfig.from_kwargs({
"cdp_url": "http://localhost:9222",
"browser_context_id": "test-context-id-2",
"target_id": "test-target-id-2"
})
assert config2.browser_context_id == "test-context-id-2", "browser_context_id not set via from_kwargs"
assert config2.target_id == "test-target-id-2", "target_id not set via from_kwargs"
# Test to_dict
config_dict = config.to_dict()
assert config_dict.get("browser_context_id") == "test-context-id", "browser_context_id not in to_dict"
assert config_dict.get("target_id") == "test-target-id", "target_id not in to_dict"
logger.success("BrowserConfig browser_context_id test passed", tag="TEST")
return True
except Exception as e:
logger.error(f"Test failed: {str(e)}", tag="TEST")
return False
async def test_pre_created_context_usage():
"""
Test that Crawl4AI uses a pre-created browser context instead of creating a new one.
This simulates the cloud browser service flow:
1. Start browser with CDP
2. Create context via raw CDP (simulating cloud service)
3. Have Crawl4AI connect with browser_context_id
4. Verify it uses existing context
"""
logger.info("Testing pre-created context usage", tag="TEST")
# Start a managed browser first
browser_config_initial = BrowserConfig(
use_managed_browser=True,
headless=True,
debugging_port=9226, # Use unique port
verbose=True
)
managed_browser = ManagedBrowser(browser_config=browser_config_initial, logger=logger)
cdp_creator = None
manager = None
context_info = None
try:
# Start the browser
cdp_url = await managed_browser.start()
logger.info(f"Browser started at {cdp_url}", tag="TEST")
# Create a context via raw CDP (simulating cloud service)
cdp_creator = CDPContextCreator(cdp_url)
context_info = await cdp_creator.create_context()
logger.info(f"Pre-created context: {context_info['browser_context_id']}", tag="TEST")
logger.info(f"Pre-created target: {context_info['target_id']}", tag="TEST")
# Get initial target count
targets_before = await cdp_creator.get_targets()
initial_target_count = len(targets_before)
logger.info(f"Initial target count: {initial_target_count}", tag="TEST")
# Now create BrowserManager with browser_context_id and target_id
browser_config = BrowserConfig(
cdp_url=cdp_url,
browser_context_id=context_info['browser_context_id'],
target_id=context_info['target_id'],
headless=True,
verbose=True
)
manager = BrowserManager(browser_config=browser_config, logger=logger)
await manager.start()
logger.info("BrowserManager started with pre-created context", tag="TEST")
# Get a page
crawler_config = CrawlerRunConfig()
page, context = await manager.get_page(crawler_config)
# Navigate to a test page
await page.goto("https://example.com", wait_until="domcontentloaded")
title = await page.title()
logger.info(f"Page title: {title}", tag="TEST")
# Get target count after
targets_after = await cdp_creator.get_targets()
final_target_count = len(targets_after)
logger.info(f"Final target count: {final_target_count}", tag="TEST")
# Verify: target count should not have increased significantly
# (allow for 1 extra target for internal use, but not many more)
target_diff = final_target_count - initial_target_count
logger.info(f"Target count difference: {target_diff}", tag="TEST")
# Success criteria:
# 1. Page navigation worked
# 2. Target count didn't explode (reused existing context)
success = title == "Example Domain" and target_diff <= 1
if success:
logger.success("Pre-created context usage test passed", tag="TEST")
else:
logger.error(f"Test failed - Title: {title}, Target diff: {target_diff}", tag="TEST")
return success
except Exception as e:
logger.error(f"Test failed: {str(e)}", tag="TEST")
import traceback
traceback.print_exc()
return False
finally:
# Cleanup
if manager:
try:
await manager.close()
except:
pass
if cdp_creator and context_info:
try:
await cdp_creator.dispose_context(context_info['browser_context_id'])
await cdp_creator.disconnect()
except:
pass
if managed_browser:
try:
await managed_browser.cleanup()
except:
pass
async def test_context_isolation():
"""
Test that using browser_context_id actually provides isolation.
Create two contexts and verify they don't share state.
"""
logger.info("Testing context isolation with browser_context_id", tag="TEST")
browser_config_initial = BrowserConfig(
use_managed_browser=True,
headless=True,
debugging_port=9227,
verbose=True
)
managed_browser = ManagedBrowser(browser_config=browser_config_initial, logger=logger)
cdp_creator = None
manager1 = None
manager2 = None
context_info_1 = None
context_info_2 = None
try:
# Start the browser
cdp_url = await managed_browser.start()
logger.info(f"Browser started at {cdp_url}", tag="TEST")
# Create two separate contexts
cdp_creator = CDPContextCreator(cdp_url)
context_info_1 = await cdp_creator.create_context()
logger.info(f"Context 1: {context_info_1['browser_context_id']}", tag="TEST")
# Need to reconnect for second context (or use same connection)
await cdp_creator.disconnect()
cdp_creator2 = CDPContextCreator(cdp_url)
context_info_2 = await cdp_creator2.create_context()
logger.info(f"Context 2: {context_info_2['browser_context_id']}", tag="TEST")
# Verify contexts are different
assert context_info_1['browser_context_id'] != context_info_2['browser_context_id'], \
"Contexts should have different IDs"
# Connect with first context
browser_config_1 = BrowserConfig(
cdp_url=cdp_url,
browser_context_id=context_info_1['browser_context_id'],
target_id=context_info_1['target_id'],
headless=True
)
manager1 = BrowserManager(browser_config=browser_config_1, logger=logger)
await manager1.start()
# Set a cookie in context 1
page1, ctx1 = await manager1.get_page(CrawlerRunConfig())
await page1.goto("https://example.com", wait_until="domcontentloaded")
await ctx1.add_cookies([{
"name": "test_isolation",
"value": "context_1_value",
"domain": "example.com",
"path": "/"
}])
cookies1 = await ctx1.cookies(["https://example.com"])
cookie1_value = next((c["value"] for c in cookies1 if c["name"] == "test_isolation"), None)
logger.info(f"Cookie in context 1: {cookie1_value}", tag="TEST")
# Connect with second context
browser_config_2 = BrowserConfig(
cdp_url=cdp_url,
browser_context_id=context_info_2['browser_context_id'],
target_id=context_info_2['target_id'],
headless=True
)
manager2 = BrowserManager(browser_config=browser_config_2, logger=logger)
await manager2.start()
# Check cookies in context 2 - should not have the cookie from context 1
page2, ctx2 = await manager2.get_page(CrawlerRunConfig())
await page2.goto("https://example.com", wait_until="domcontentloaded")
cookies2 = await ctx2.cookies(["https://example.com"])
cookie2_value = next((c["value"] for c in cookies2 if c["name"] == "test_isolation"), None)
logger.info(f"Cookie in context 2: {cookie2_value}", tag="TEST")
# Verify isolation
isolation_works = cookie1_value == "context_1_value" and cookie2_value is None
if isolation_works:
logger.success("Context isolation test passed", tag="TEST")
else:
logger.error(f"Isolation failed - Cookie1: {cookie1_value}, Cookie2: {cookie2_value}", tag="TEST")
return isolation_works
except Exception as e:
logger.error(f"Test failed: {str(e)}", tag="TEST")
import traceback
traceback.print_exc()
return False
finally:
# Cleanup
for mgr in [manager1, manager2]:
if mgr:
try:
await mgr.close()
except:
pass
for ctx_info, creator in [(context_info_1, cdp_creator), (context_info_2, cdp_creator2 if 'cdp_creator2' in dir() else None)]:
if ctx_info and creator:
try:
await creator.dispose_context(ctx_info['browser_context_id'])
await creator.disconnect()
except:
pass
if managed_browser:
try:
await managed_browser.cleanup()
except:
pass
async def run_tests():
"""Run all browser_context_id tests."""
results = []
logger.info("Running browser_context_id tests", tag="SUITE")
# Basic parameter test
results.append(("browser_context_id_basic", await test_browser_context_id_basic()))
# Pre-created context usage test
results.append(("pre_created_context_usage", await test_pre_created_context_usage()))
# Note: Context isolation test is commented out because isolation is enforced
# at the CDP level by the cloud browser service, not at the Playwright level.
# When multiple BrowserManagers connect to the same browser, Playwright sees
# all contexts. In production, each worker gets exactly one pre-created context.
# results.append(("context_isolation", await test_context_isolation()))
# Print summary
total = len(results)
passed = sum(1 for _, r in results if r)
logger.info("=" * 50, tag="SUMMARY")
logger.info(f"Test Results: {passed}/{total} passed", tag="SUMMARY")
logger.info("=" * 50, tag="SUMMARY")
for name, result in results:
status = "PASSED" if result else "FAILED"
logger.info(f" {name}: {status}", tag="SUMMARY")
if passed == total:
logger.success("All tests passed!", tag="SUMMARY")
return True
else:
logger.error(f"{total - passed} tests failed", tag="SUMMARY")
return False
if __name__ == "__main__":
success = asyncio.run(run_tests())
sys.exit(0 if success else 1)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/browser/test_browser_context_id.py",
"license": "Apache License 2.0",
"lines": 394,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/browser/test_cdp_cleanup_reuse.py | #!/usr/bin/env python3
"""
Tests for CDP connection cleanup and browser reuse.
These tests verify that:
1. WebSocket URLs are properly handled (skip HTTP verification)
2. cdp_cleanup_on_close properly disconnects without terminating the browser
3. The same browser can be reused by multiple sequential connections
Requirements:
- A CDP-compatible browser pool service running (e.g., chromepoold)
- Service should be accessible at CDP_SERVICE_URL (default: http://localhost:11235)
Usage:
pytest tests/browser/test_cdp_cleanup_reuse.py -v
Or run directly:
python tests/browser/test_cdp_cleanup_reuse.py
"""
import asyncio
import os
import pytest
import requests
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
# Configuration
CDP_SERVICE_URL = os.getenv("CDP_SERVICE_URL", "http://localhost:11235")
def is_cdp_service_available():
"""Check if CDP service is running."""
try:
resp = requests.get(f"{CDP_SERVICE_URL}/health", timeout=2)
return resp.status_code == 200
except:
return False
def create_browser():
"""Create a browser via CDP service API."""
resp = requests.post(
f"{CDP_SERVICE_URL}/v1/browsers",
json={"headless": True},
timeout=10
)
resp.raise_for_status()
return resp.json()
def get_browser_info(browser_id):
"""Get browser info from CDP service."""
resp = requests.get(f"{CDP_SERVICE_URL}/v1/browsers", timeout=5)
for browser in resp.json():
if browser["id"] == browser_id:
return browser
return None
def delete_browser(browser_id):
"""Delete a browser via CDP service API."""
try:
requests.delete(f"{CDP_SERVICE_URL}/v1/browsers/{browser_id}", timeout=5)
except:
pass
# Skip all tests if CDP service is not available
pytestmark = pytest.mark.skipif(
not is_cdp_service_available(),
reason=f"CDP service not available at {CDP_SERVICE_URL}"
)
class TestCDPWebSocketURL:
"""Tests for WebSocket URL handling."""
@pytest.mark.asyncio
async def test_websocket_url_skips_http_verification(self):
"""WebSocket URLs should skip HTTP /json/version verification."""
browser = create_browser()
try:
ws_url = browser["ws_url"]
assert ws_url.startswith("ws://") or ws_url.startswith("wss://")
async with AsyncWebCrawler(
config=BrowserConfig(
browser_mode="cdp",
cdp_url=ws_url,
headless=True,
cdp_cleanup_on_close=True,
)
) as crawler:
result = await crawler.arun(
url="https://example.com",
config=CrawlerRunConfig(verbose=False),
)
assert result.success
assert "Example Domain" in result.metadata.get("title", "")
finally:
delete_browser(browser["browser_id"])
class TestCDPCleanupOnClose:
"""Tests for cdp_cleanup_on_close behavior."""
@pytest.mark.asyncio
async def test_browser_survives_after_cleanup_close(self):
"""Browser should remain alive after close with cdp_cleanup_on_close=True."""
browser = create_browser()
browser_id = browser["browser_id"]
ws_url = browser["ws_url"]
try:
# Verify browser exists
info_before = get_browser_info(browser_id)
assert info_before is not None
pid_before = info_before["pid"]
# Connect, crawl, and close with cleanup
async with AsyncWebCrawler(
config=BrowserConfig(
browser_mode="cdp",
cdp_url=ws_url,
headless=True,
cdp_cleanup_on_close=True,
)
) as crawler:
result = await crawler.arun(
url="https://example.com",
config=CrawlerRunConfig(verbose=False),
)
assert result.success
# Browser should still exist with same PID
info_after = get_browser_info(browser_id)
assert info_after is not None, "Browser was terminated but should only disconnect"
assert info_after["pid"] == pid_before, "Browser PID changed unexpectedly"
finally:
delete_browser(browser_id)
class TestCDPBrowserReuse:
"""Tests for reusing the same browser with multiple connections."""
@pytest.mark.asyncio
async def test_sequential_connections_same_browser(self):
"""Multiple sequential connections to the same browser should work."""
browser = create_browser()
browser_id = browser["browser_id"]
ws_url = browser["ws_url"]
try:
urls = [
"https://example.com",
"https://httpbin.org/ip",
"https://httpbin.org/headers",
]
for i, url in enumerate(urls, 1):
# Each connection uses cdp_cleanup_on_close=True
async with AsyncWebCrawler(
config=BrowserConfig(
browser_mode="cdp",
cdp_url=ws_url,
headless=True,
cdp_cleanup_on_close=True,
)
) as crawler:
result = await crawler.arun(
url=url,
config=CrawlerRunConfig(verbose=False),
)
assert result.success, f"Connection {i} failed for {url}"
# Verify browser is still healthy
info = get_browser_info(browser_id)
assert info is not None, f"Browser died after connection {i}"
finally:
delete_browser(browser_id)
@pytest.mark.asyncio
async def test_no_user_wait_needed_between_connections(self):
"""With cdp_cleanup_on_close=True, no user wait should be needed."""
browser = create_browser()
browser_id = browser["browser_id"]
ws_url = browser["ws_url"]
try:
# Rapid-fire connections with NO sleep between them
for i in range(3):
async with AsyncWebCrawler(
config=BrowserConfig(
browser_mode="cdp",
cdp_url=ws_url,
headless=True,
cdp_cleanup_on_close=True,
)
) as crawler:
result = await crawler.arun(
url="https://example.com",
config=CrawlerRunConfig(verbose=False),
)
assert result.success, f"Rapid connection {i+1} failed"
# NO asyncio.sleep() here - internal delay should be sufficient
finally:
delete_browser(browser_id)
class TestCDPBackwardCompatibility:
"""Tests for backward compatibility with existing CDP usage."""
@pytest.mark.asyncio
async def test_http_url_with_browser_id_works(self):
"""HTTP URL with browser_id query param should work (backward compatibility)."""
browser = create_browser()
browser_id = browser["browser_id"]
try:
# Use HTTP URL with browser_id query parameter
http_url = f"{CDP_SERVICE_URL}?browser_id={browser_id}"
async with AsyncWebCrawler(
config=BrowserConfig(
browser_mode="cdp",
cdp_url=http_url,
headless=True,
cdp_cleanup_on_close=True,
)
) as crawler:
result = await crawler.arun(
url="https://example.com",
config=CrawlerRunConfig(verbose=False),
)
assert result.success
finally:
delete_browser(browser_id)
# Allow running directly
if __name__ == "__main__":
if not is_cdp_service_available():
print(f"CDP service not available at {CDP_SERVICE_URL}")
print("Please start a CDP-compatible browser pool service first.")
exit(1)
async def run_tests():
print("=" * 60)
print("CDP Cleanup and Browser Reuse Tests")
print("=" * 60)
tests = [
("WebSocket URL handling", TestCDPWebSocketURL().test_websocket_url_skips_http_verification),
("Browser survives after cleanup", TestCDPCleanupOnClose().test_browser_survives_after_cleanup_close),
("Sequential connections", TestCDPBrowserReuse().test_sequential_connections_same_browser),
("No user wait needed", TestCDPBrowserReuse().test_no_user_wait_needed_between_connections),
("HTTP URL with browser_id", TestCDPBackwardCompatibility().test_http_url_with_browser_id_works),
]
results = []
for name, test_func in tests:
print(f"\n--- {name} ---")
try:
await test_func()
print(f"PASS")
results.append((name, True))
except Exception as e:
print(f"FAIL: {e}")
results.append((name, False))
print("\n" + "=" * 60)
print("SUMMARY")
print("=" * 60)
for name, passed in results:
print(f" {name}: {'PASS' if passed else 'FAIL'}")
all_passed = all(r[1] for r in results)
print(f"\nOverall: {'ALL TESTS PASSED' if all_passed else 'SOME TESTS FAILED'}")
return 0 if all_passed else 1
exit(asyncio.run(run_tests()))
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/browser/test_cdp_cleanup_reuse.py",
"license": "Apache License 2.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/cache_validation/test_end_to_end.py | """
End-to-end tests for Smart Cache validation.
Tests the full flow:
1. Fresh crawl (browser launch) - SLOW
2. Cached crawl without validation (check_cache_freshness=False) - FAST
3. Cached crawl with validation (check_cache_freshness=True) - FAST (304/fingerprint)
Verifies all layers:
- Database storage of etag, last_modified, head_fingerprint, cached_at
- Cache validation logic
- HTTP conditional requests (304 Not Modified)
- Performance improvements
"""
import pytest
import time
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai.async_database import async_db_manager
class TestEndToEndCacheValidation:
"""End-to-end tests for the complete cache validation flow."""
@pytest.mark.asyncio
async def test_full_cache_flow_docs_python(self):
"""
Test complete cache flow with docs.python.org:
1. Fresh crawl (slow - browser) - using BYPASS to force fresh
2. Cache hit without validation (fast)
3. Cache hit with validation (fast - 304)
"""
url = "https://docs.python.org/3/"
browser_config = BrowserConfig(headless=True, verbose=False)
# ========== CRAWL 1: Fresh crawl (force with WRITE_ONLY to skip cache read) ==========
config1 = CrawlerRunConfig(
cache_mode=CacheMode.WRITE_ONLY, # Skip reading, write new data
check_cache_freshness=False,
)
async with AsyncWebCrawler(config=browser_config) as crawler:
start1 = time.perf_counter()
result1 = await crawler.arun(url, config=config1)
time1 = time.perf_counter() - start1
assert result1.success, f"First crawl failed: {result1.error_message}"
# WRITE_ONLY means we did a fresh crawl and wrote to cache
assert result1.cache_status == "miss", f"Expected 'miss', got '{result1.cache_status}'"
print(f"\n[CRAWL 1] Fresh crawl: {time1:.2f}s (cache_status: {result1.cache_status})")
# Verify data is stored in database
metadata = await async_db_manager.aget_cache_metadata(url)
assert metadata is not None, "Metadata should be stored in database"
assert metadata.get("etag") or metadata.get("last_modified"), "Should have ETag or Last-Modified"
print(f" - Stored ETag: {metadata.get('etag', 'N/A')[:30]}...")
print(f" - Stored Last-Modified: {metadata.get('last_modified', 'N/A')}")
print(f" - Stored head_fingerprint: {metadata.get('head_fingerprint', 'N/A')}")
print(f" - Stored cached_at: {metadata.get('cached_at', 'N/A')}")
# ========== CRAWL 2: Cache hit WITHOUT validation ==========
config2 = CrawlerRunConfig(
cache_mode=CacheMode.ENABLED,
check_cache_freshness=False, # Skip validation - pure cache hit
)
async with AsyncWebCrawler(config=browser_config) as crawler:
start2 = time.perf_counter()
result2 = await crawler.arun(url, config=config2)
time2 = time.perf_counter() - start2
assert result2.success, f"Second crawl failed: {result2.error_message}"
assert result2.cache_status == "hit", f"Expected 'hit', got '{result2.cache_status}'"
print(f"\n[CRAWL 2] Cache hit (no validation): {time2:.2f}s (cache_status: {result2.cache_status})")
print(f" - Speedup: {time1/time2:.1f}x faster than fresh crawl")
# Should be MUCH faster - no browser, no HTTP request
assert time2 < time1 / 2, f"Cache hit should be at least 2x faster (was {time1/time2:.1f}x)"
# ========== CRAWL 3: Cache hit WITH validation (304) ==========
config3 = CrawlerRunConfig(
cache_mode=CacheMode.ENABLED,
check_cache_freshness=True, # Validate cache freshness
)
async with AsyncWebCrawler(config=browser_config) as crawler:
start3 = time.perf_counter()
result3 = await crawler.arun(url, config=config3)
time3 = time.perf_counter() - start3
assert result3.success, f"Third crawl failed: {result3.error_message}"
# Should be "hit_validated" (304) or "hit_fallback" (error during validation)
assert result3.cache_status in ["hit_validated", "hit_fallback"], \
f"Expected validated cache hit, got '{result3.cache_status}'"
print(f"\n[CRAWL 3] Cache hit (with validation): {time3:.2f}s (cache_status: {result3.cache_status})")
print(f" - Speedup: {time1/time3:.1f}x faster than fresh crawl")
# Should still be fast - just a HEAD request, no browser
assert time3 < time1 / 2, f"Validated cache hit should be faster than fresh crawl"
# ========== SUMMARY ==========
print(f"\n{'='*60}")
print(f"PERFORMANCE SUMMARY for {url}")
print(f"{'='*60}")
print(f" Fresh crawl (browser): {time1:.2f}s")
print(f" Cache hit (no validation): {time2:.2f}s ({time1/time2:.1f}x faster)")
print(f" Cache hit (with validation): {time3:.2f}s ({time1/time3:.1f}x faster)")
print(f"{'='*60}")
@pytest.mark.asyncio
async def test_full_cache_flow_crawl4ai_docs(self):
"""Test with docs.crawl4ai.com."""
url = "https://docs.crawl4ai.com/"
browser_config = BrowserConfig(headless=True, verbose=False)
# Fresh crawl - use WRITE_ONLY to ensure we get fresh data
config1 = CrawlerRunConfig(cache_mode=CacheMode.WRITE_ONLY, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
start1 = time.perf_counter()
result1 = await crawler.arun(url, config=config1)
time1 = time.perf_counter() - start1
assert result1.success
assert result1.cache_status == "miss"
print(f"\n[docs.crawl4ai.com] Fresh: {time1:.2f}s")
# Cache hit with validation
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
start2 = time.perf_counter()
result2 = await crawler.arun(url, config=config2)
time2 = time.perf_counter() - start2
assert result2.success
assert result2.cache_status in ["hit_validated", "hit_fallback"]
print(f"[docs.crawl4ai.com] Validated: {time2:.2f}s ({time1/time2:.1f}x faster)")
@pytest.mark.asyncio
async def test_verify_database_storage(self):
"""Verify all validation metadata is properly stored in database."""
url = "https://docs.python.org/3/library/asyncio.html"
browser_config = BrowserConfig(headless=True, verbose=False)
config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url, config=config)
assert result.success
# Verify all fields in database
metadata = await async_db_manager.aget_cache_metadata(url)
assert metadata is not None, "Metadata must be stored"
assert "url" in metadata
assert "etag" in metadata
assert "last_modified" in metadata
assert "head_fingerprint" in metadata
assert "cached_at" in metadata
assert "response_headers" in metadata
print(f"\nDatabase storage verification for {url}:")
print(f" - etag: {metadata['etag'][:40] if metadata['etag'] else 'None'}...")
print(f" - last_modified: {metadata['last_modified']}")
print(f" - head_fingerprint: {metadata['head_fingerprint']}")
print(f" - cached_at: {metadata['cached_at']}")
print(f" - response_headers keys: {list(metadata['response_headers'].keys())[:5]}...")
# At least one validation field should be populated
has_validation_data = (
metadata["etag"] or
metadata["last_modified"] or
metadata["head_fingerprint"]
)
assert has_validation_data, "Should have at least one validation field"
@pytest.mark.asyncio
async def test_head_fingerprint_stored_and_used(self):
"""Verify head fingerprint is computed, stored, and used for validation."""
url = "https://example.com/"
browser_config = BrowserConfig(headless=True, verbose=False)
# Fresh crawl
config1 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result1 = await crawler.arun(url, config=config1)
assert result1.success
assert result1.head_fingerprint, "head_fingerprint should be set on CrawlResult"
# Verify in database
metadata = await async_db_manager.aget_cache_metadata(url)
assert metadata["head_fingerprint"], "head_fingerprint should be stored in database"
assert metadata["head_fingerprint"] == result1.head_fingerprint
print(f"\nHead fingerprint for {url}:")
print(f" - CrawlResult.head_fingerprint: {result1.head_fingerprint}")
print(f" - Database head_fingerprint: {metadata['head_fingerprint']}")
# Validate using fingerprint
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
result2 = await crawler.arun(url, config=config2)
assert result2.success
assert result2.cache_status in ["hit_validated", "hit_fallback"]
print(f" - Validation result: {result2.cache_status}")
class TestCacheValidationPerformance:
"""Performance benchmarks for cache validation."""
@pytest.mark.asyncio
async def test_multiple_urls_performance(self):
"""Test cache performance across multiple URLs."""
urls = [
"https://docs.python.org/3/",
"https://docs.python.org/3/library/asyncio.html",
"https://en.wikipedia.org/wiki/Python_(programming_language)",
]
browser_config = BrowserConfig(headless=True, verbose=False)
fresh_times = []
cached_times = []
print(f"\n{'='*70}")
print("MULTI-URL PERFORMANCE TEST")
print(f"{'='*70}")
# Fresh crawls - use WRITE_ONLY to force fresh crawl
for url in urls:
config = CrawlerRunConfig(cache_mode=CacheMode.WRITE_ONLY, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result = await crawler.arun(url, config=config)
elapsed = time.perf_counter() - start
fresh_times.append(elapsed)
print(f"Fresh: {url[:50]:50} {elapsed:.2f}s ({result.cache_status})")
# Cached crawls with validation
for url in urls:
config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result = await crawler.arun(url, config=config)
elapsed = time.perf_counter() - start
cached_times.append(elapsed)
print(f"Cached: {url[:50]:50} {elapsed:.2f}s ({result.cache_status})")
avg_fresh = sum(fresh_times) / len(fresh_times)
avg_cached = sum(cached_times) / len(cached_times)
total_fresh = sum(fresh_times)
total_cached = sum(cached_times)
print(f"\n{'='*70}")
print(f"RESULTS:")
print(f" Total fresh crawl time: {total_fresh:.2f}s")
print(f" Total cached time: {total_cached:.2f}s")
print(f" Average speedup: {avg_fresh/avg_cached:.1f}x")
print(f" Time saved: {total_fresh - total_cached:.2f}s")
print(f"{'='*70}")
# Cached should be significantly faster
assert avg_cached < avg_fresh / 2, "Cached crawls should be at least 2x faster"
@pytest.mark.asyncio
async def test_repeated_access_same_url(self):
"""Test repeated access to the same URL shows consistent cache hits."""
url = "https://docs.python.org/3/"
num_accesses = 5
browser_config = BrowserConfig(headless=True, verbose=False)
print(f"\n{'='*60}")
print(f"REPEATED ACCESS TEST: {url}")
print(f"{'='*60}")
# First access - fresh crawl
config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result = await crawler.arun(url, config=config)
fresh_time = time.perf_counter() - start
print(f"Access 1 (fresh): {fresh_time:.2f}s - {result.cache_status}")
# Repeated accesses - should all be cache hits
cached_times = []
for i in range(2, num_accesses + 1):
config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result = await crawler.arun(url, config=config)
elapsed = time.perf_counter() - start
cached_times.append(elapsed)
print(f"Access {i} (cached): {elapsed:.2f}s - {result.cache_status}")
assert result.cache_status in ["hit", "hit_validated", "hit_fallback"]
avg_cached = sum(cached_times) / len(cached_times)
print(f"\nAverage cached time: {avg_cached:.2f}s")
print(f"Speedup over fresh: {fresh_time/avg_cached:.1f}x")
class TestCacheValidationModes:
"""Test different cache modes and their behavior."""
@pytest.mark.asyncio
async def test_cache_bypass_always_fresh(self):
"""CacheMode.BYPASS should always do fresh crawl."""
# Use a unique URL path to avoid cache from other tests
url = "https://example.com/test-bypass"
browser_config = BrowserConfig(headless=True, verbose=False)
# First crawl with WRITE_ONLY to populate cache (always fresh)
config1 = CrawlerRunConfig(cache_mode=CacheMode.WRITE_ONLY, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result1 = await crawler.arun(url, config=config1)
assert result1.cache_status == "miss"
# Second crawl with BYPASS - should NOT use cache
config2 = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result2 = await crawler.arun(url, config=config2)
# BYPASS mode means no cache interaction
assert result2.cache_status is None or result2.cache_status == "miss"
print(f"\nCacheMode.BYPASS result: {result2.cache_status}")
@pytest.mark.asyncio
async def test_validation_disabled_uses_cache_directly(self):
"""With check_cache_freshness=False, should use cache without HTTP validation."""
url = "https://docs.python.org/3/tutorial/"
browser_config = BrowserConfig(headless=True, verbose=False)
# Fresh crawl - use WRITE_ONLY to force fresh
config1 = CrawlerRunConfig(cache_mode=CacheMode.WRITE_ONLY, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result1 = await crawler.arun(url, config=config1)
assert result1.cache_status == "miss"
# Cached with validation DISABLED - should be "hit" (not "hit_validated")
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result2 = await crawler.arun(url, config=config2)
elapsed = time.perf_counter() - start
assert result2.cache_status == "hit", f"Expected 'hit', got '{result2.cache_status}'"
print(f"\nValidation disabled: {elapsed:.3f}s (cache_status: {result2.cache_status})")
# Should be very fast - no HTTP request at all
assert elapsed < 1.0, "Cache hit without validation should be < 1 second"
@pytest.mark.asyncio
async def test_validation_enabled_checks_freshness(self):
"""With check_cache_freshness=True, should validate before using cache."""
url = "https://docs.python.org/3/reference/"
browser_config = BrowserConfig(headless=True, verbose=False)
# Fresh crawl
config1 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result1 = await crawler.arun(url, config=config1)
# Cached with validation ENABLED - should be "hit_validated"
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result2 = await crawler.arun(url, config=config2)
elapsed = time.perf_counter() - start
assert result2.cache_status in ["hit_validated", "hit_fallback"]
print(f"\nValidation enabled: {elapsed:.3f}s (cache_status: {result2.cache_status})")
class TestCacheValidationResponseHeaders:
"""Test that response headers are properly stored and retrieved."""
@pytest.mark.asyncio
async def test_response_headers_stored(self):
"""Verify response headers including ETag and Last-Modified are stored."""
url = "https://docs.python.org/3/"
browser_config = BrowserConfig(headless=True, verbose=False)
config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url, config=config)
assert result.success
assert result.response_headers is not None
# Check that cache-relevant headers are captured
headers = result.response_headers
print(f"\nResponse headers for {url}:")
# Look for ETag (case-insensitive)
etag = headers.get("etag") or headers.get("ETag")
print(f" - ETag: {etag}")
# Look for Last-Modified
last_modified = headers.get("last-modified") or headers.get("Last-Modified")
print(f" - Last-Modified: {last_modified}")
# Look for Cache-Control
cache_control = headers.get("cache-control") or headers.get("Cache-Control")
print(f" - Cache-Control: {cache_control}")
# At least one should be present for docs.python.org
assert etag or last_modified, "Should have ETag or Last-Modified header"
@pytest.mark.asyncio
async def test_headers_used_for_validation(self):
"""Verify stored headers are used for conditional requests."""
url = "https://docs.crawl4ai.com/"
browser_config = BrowserConfig(headless=True, verbose=False)
# Fresh crawl to store headers
config1 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result1 = await crawler.arun(url, config=config1)
# Get stored metadata
metadata = await async_db_manager.aget_cache_metadata(url)
stored_etag = metadata.get("etag")
stored_last_modified = metadata.get("last_modified")
print(f"\nStored validation data for {url}:")
print(f" - etag: {stored_etag}")
print(f" - last_modified: {stored_last_modified}")
# Validate - should use stored headers
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
result2 = await crawler.arun(url, config=config2)
# Should get validated hit (304 response)
assert result2.cache_status in ["hit_validated", "hit_fallback"]
print(f" - Validation result: {result2.cache_status}")
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/cache_validation/test_end_to_end.py",
"license": "Apache License 2.0",
"lines": 352,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/cache_validation/test_head_fingerprint.py | """Unit tests for head fingerprinting."""
import pytest
from crawl4ai.utils import compute_head_fingerprint
class TestHeadFingerprint:
"""Tests for the compute_head_fingerprint function."""
def test_same_content_same_fingerprint(self):
"""Identical <head> content produces same fingerprint."""
head = "<head><title>Test Page</title></head>"
fp1 = compute_head_fingerprint(head)
fp2 = compute_head_fingerprint(head)
assert fp1 == fp2
assert fp1 != ""
def test_different_title_different_fingerprint(self):
"""Different title produces different fingerprint."""
head1 = "<head><title>Title A</title></head>"
head2 = "<head><title>Title B</title></head>"
assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
def test_empty_head_returns_empty_string(self):
"""Empty or None head should return empty fingerprint."""
assert compute_head_fingerprint("") == ""
assert compute_head_fingerprint(None) == ""
def test_head_without_signals_returns_empty(self):
"""Head without title or key meta tags returns empty."""
head = "<head><link rel='stylesheet' href='style.css'></head>"
assert compute_head_fingerprint(head) == ""
def test_extracts_title(self):
"""Title is extracted and included in fingerprint."""
head1 = "<head><title>My Title</title></head>"
head2 = "<head><title>My Title</title><link href='x'></head>"
# Same title should produce same fingerprint
assert compute_head_fingerprint(head1) == compute_head_fingerprint(head2)
def test_extracts_meta_description(self):
"""Meta description is extracted."""
head1 = '<head><meta name="description" content="Test description"></head>'
head2 = '<head><meta name="description" content="Different description"></head>'
assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
def test_extracts_og_tags(self):
"""Open Graph tags are extracted."""
head1 = '<head><meta property="og:title" content="OG Title"></head>'
head2 = '<head><meta property="og:title" content="Different OG Title"></head>'
assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
def test_extracts_og_image(self):
"""og:image is extracted and affects fingerprint."""
head1 = '<head><meta property="og:image" content="https://example.com/img1.jpg"></head>'
head2 = '<head><meta property="og:image" content="https://example.com/img2.jpg"></head>'
assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
def test_extracts_article_modified_time(self):
"""article:modified_time is extracted."""
head1 = '<head><meta property="article:modified_time" content="2024-01-01T00:00:00Z"></head>'
head2 = '<head><meta property="article:modified_time" content="2024-12-01T00:00:00Z"></head>'
assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
def test_case_insensitive(self):
"""Fingerprinting is case-insensitive for tags."""
head1 = "<head><TITLE>Test</TITLE></head>"
head2 = "<head><title>test</title></head>"
# Both should extract title (case insensitive)
fp1 = compute_head_fingerprint(head1)
fp2 = compute_head_fingerprint(head2)
assert fp1 != ""
assert fp2 != ""
def test_handles_attribute_order(self):
"""Handles different attribute orders in meta tags."""
head1 = '<head><meta name="description" content="Test"></head>'
head2 = '<head><meta content="Test" name="description"></head>'
assert compute_head_fingerprint(head1) == compute_head_fingerprint(head2)
def test_real_world_head(self):
"""Test with a realistic head section."""
head = '''
<head>
<meta charset="utf-8">
<title>Python Documentation</title>
<meta name="description" content="Official Python documentation">
<meta property="og:title" content="Python Docs">
<meta property="og:description" content="Learn Python">
<meta property="og:image" content="https://python.org/logo.png">
<link rel="stylesheet" href="styles.css">
</head>
'''
fp = compute_head_fingerprint(head)
assert fp != ""
# Should be deterministic
assert fp == compute_head_fingerprint(head)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/cache_validation/test_head_fingerprint.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/cache_validation/test_real_domains.py | """
Real-world tests for cache validation using actual HTTP requests.
No mocks - all tests hit real servers.
"""
import pytest
from crawl4ai.cache_validator import CacheValidator, CacheValidationResult
from crawl4ai.utils import compute_head_fingerprint
class TestRealDomainsConditionalSupport:
"""Test domains that support HTTP conditional requests (ETag/Last-Modified)."""
@pytest.mark.asyncio
async def test_docs_python_org_etag(self):
"""docs.python.org supports ETag - should return 304."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
# First fetch to get ETag
head_html, etag, last_modified = await validator._fetch_head(url)
assert head_html is not None, "Should fetch head content"
assert etag is not None, "docs.python.org should return ETag"
# Validate with the ETag we just got
result = await validator.validate(url=url, stored_etag=etag)
assert result.status == CacheValidationResult.FRESH, f"Expected FRESH, got {result.status}: {result.reason}"
assert "304" in result.reason
@pytest.mark.asyncio
async def test_docs_crawl4ai_etag(self):
"""docs.crawl4ai.com supports ETag - should return 304."""
url = "https://docs.crawl4ai.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
assert etag is not None, "docs.crawl4ai.com should return ETag"
result = await validator.validate(url=url, stored_etag=etag)
assert result.status == CacheValidationResult.FRESH, f"Expected FRESH, got {result.status}: {result.reason}"
@pytest.mark.asyncio
async def test_wikipedia_last_modified(self):
"""Wikipedia supports Last-Modified - should return 304."""
url = "https://en.wikipedia.org/wiki/Web_crawler"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
assert last_modified is not None, "Wikipedia should return Last-Modified"
result = await validator.validate(url=url, stored_last_modified=last_modified)
assert result.status == CacheValidationResult.FRESH, f"Expected FRESH, got {result.status}: {result.reason}"
@pytest.mark.asyncio
async def test_github_pages(self):
"""GitHub Pages supports conditional requests."""
url = "https://pages.github.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# GitHub Pages typically has at least one
has_conditional = etag is not None or last_modified is not None
assert has_conditional, "GitHub Pages should support conditional requests"
result = await validator.validate(
url=url,
stored_etag=etag,
stored_last_modified=last_modified,
)
assert result.status == CacheValidationResult.FRESH
@pytest.mark.asyncio
async def test_httpbin_etag(self):
"""httpbin.org/etag endpoint for testing ETag."""
url = "https://httpbin.org/etag/test-etag-value"
async with CacheValidator(timeout=15.0) as validator:
result = await validator.validate(url=url, stored_etag='"test-etag-value"')
# httpbin should return 304 for matching ETag
assert result.status == CacheValidationResult.FRESH, f"Expected FRESH, got {result.status}: {result.reason}"
class TestRealDomainsNoConditionalSupport:
"""Test domains that may NOT support HTTP conditional requests."""
@pytest.mark.asyncio
async def test_dynamic_site_fingerprint_fallback(self):
"""Test fingerprint-based validation for sites without conditional support."""
# Use a site that changes frequently but has stable head
url = "https://example.com/"
async with CacheValidator(timeout=15.0) as validator:
# Get head and compute fingerprint
head_html, etag, last_modified = await validator._fetch_head(url)
assert head_html is not None
fingerprint = compute_head_fingerprint(head_html)
# Validate using fingerprint (not etag/last-modified)
result = await validator.validate(
url=url,
stored_head_fingerprint=fingerprint,
)
# Should be FRESH since fingerprint should match
assert result.status == CacheValidationResult.FRESH, f"Expected FRESH, got {result.status}: {result.reason}"
assert "fingerprint" in result.reason.lower()
@pytest.mark.asyncio
async def test_news_site_changes_frequently(self):
"""News sites change frequently - test that we can detect changes."""
url = "https://www.bbc.com/news"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# BBC News has ETag but it changes with content
assert head_html is not None
# Using a fake old ETag should return STALE (200 with different content)
result = await validator.validate(
url=url,
stored_etag='"fake-old-etag-12345"',
)
# Should be STALE because the ETag doesn't match
assert result.status == CacheValidationResult.STALE, f"Expected STALE, got {result.status}: {result.reason}"
class TestRealDomainsEdgeCases:
"""Edge cases with real domains."""
@pytest.mark.asyncio
async def test_nonexistent_domain(self):
"""Non-existent domain should return ERROR."""
url = "https://this-domain-definitely-does-not-exist-xyz123.com/"
async with CacheValidator(timeout=5.0) as validator:
result = await validator.validate(url=url, stored_etag='"test"')
assert result.status == CacheValidationResult.ERROR
@pytest.mark.asyncio
async def test_timeout_slow_server(self):
"""Test timeout handling with a slow endpoint."""
# httpbin delay endpoint
url = "https://httpbin.org/delay/10"
async with CacheValidator(timeout=2.0) as validator: # 2 second timeout
result = await validator.validate(url=url, stored_etag='"test"')
# Should timeout and return ERROR
assert result.status == CacheValidationResult.ERROR
assert "timeout" in result.reason.lower() or "timed out" in result.reason.lower()
@pytest.mark.asyncio
async def test_redirect_handling(self):
"""Test that redirects are followed."""
# httpbin redirect
url = "https://httpbin.org/redirect/1"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# Should follow redirect and get content
# The final page might not have useful head content, but shouldn't error
# This tests that redirects are handled
@pytest.mark.asyncio
async def test_https_only(self):
"""Test HTTPS connection."""
url = "https://www.google.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
assert head_html is not None
assert "<title" in head_html.lower()
class TestRealDomainsHeadFingerprint:
"""Test head fingerprint extraction with real domains."""
@pytest.mark.asyncio
async def test_python_docs_fingerprint(self):
"""Python docs has title and meta tags."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
assert head_html is not None
fingerprint = compute_head_fingerprint(head_html)
assert fingerprint != "", "Should extract fingerprint from Python docs"
# Fingerprint should be consistent
fingerprint2 = compute_head_fingerprint(head_html)
assert fingerprint == fingerprint2
@pytest.mark.asyncio
async def test_github_fingerprint(self):
"""GitHub has og: tags."""
url = "https://github.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
assert head_html is not None
assert "og:" in head_html.lower() or "title" in head_html.lower()
fingerprint = compute_head_fingerprint(head_html)
assert fingerprint != ""
@pytest.mark.asyncio
async def test_crawl4ai_docs_fingerprint(self):
"""Crawl4AI docs should have title and description."""
url = "https://docs.crawl4ai.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
assert head_html is not None
fingerprint = compute_head_fingerprint(head_html)
assert fingerprint != "", "Should extract fingerprint from Crawl4AI docs"
class TestRealDomainsFetchHead:
"""Test _fetch_head functionality with real domains."""
@pytest.mark.asyncio
async def test_fetch_stops_at_head_close(self):
"""Verify we stop reading after </head>."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
assert head_html is not None
assert "</head>" in head_html.lower()
# Should NOT contain body content
assert "<body" not in head_html.lower() or head_html.lower().index("</head>") < head_html.lower().find("<body")
@pytest.mark.asyncio
async def test_extracts_both_headers(self):
"""Test extraction of both ETag and Last-Modified."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# Python docs should have both
assert etag is not None, "Should have ETag"
assert last_modified is not None, "Should have Last-Modified"
@pytest.mark.asyncio
async def test_handles_missing_head_tag(self):
"""Handle pages that might not have proper head structure."""
# API endpoint that returns JSON (no HTML head)
url = "https://httpbin.org/json"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# Should not crash, may return partial content or None
# The important thing is it doesn't error
class TestRealDomainsValidationCombinations:
"""Test various combinations of validation data."""
@pytest.mark.asyncio
async def test_etag_only(self):
"""Validate with only ETag."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
_, etag, _ = await validator._fetch_head(url)
result = await validator.validate(url=url, stored_etag=etag)
assert result.status == CacheValidationResult.FRESH
@pytest.mark.asyncio
async def test_last_modified_only(self):
"""Validate with only Last-Modified."""
url = "https://en.wikipedia.org/wiki/Python_(programming_language)"
async with CacheValidator(timeout=15.0) as validator:
_, _, last_modified = await validator._fetch_head(url)
if last_modified:
result = await validator.validate(url=url, stored_last_modified=last_modified)
assert result.status == CacheValidationResult.FRESH
@pytest.mark.asyncio
async def test_fingerprint_only(self):
"""Validate with only fingerprint."""
url = "https://example.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
fingerprint = compute_head_fingerprint(head_html)
if fingerprint:
result = await validator.validate(url=url, stored_head_fingerprint=fingerprint)
assert result.status == CacheValidationResult.FRESH
@pytest.mark.asyncio
async def test_all_validation_data(self):
"""Validate with all available data."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
fingerprint = compute_head_fingerprint(head_html)
result = await validator.validate(
url=url,
stored_etag=etag,
stored_last_modified=last_modified,
stored_head_fingerprint=fingerprint,
)
assert result.status == CacheValidationResult.FRESH
@pytest.mark.asyncio
async def test_stale_etag_fresh_fingerprint(self):
"""When ETag is stale but fingerprint matches, should be FRESH."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
fingerprint = compute_head_fingerprint(head_html)
# Use fake ETag but real fingerprint
result = await validator.validate(
url=url,
stored_etag='"fake-stale-etag"',
stored_head_fingerprint=fingerprint,
)
# Fingerprint should save us
assert result.status == CacheValidationResult.FRESH
assert "fingerprint" in result.reason.lower()
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/cache_validation/test_real_domains.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/deep_crawling/test_deep_crawl_resume.py | """
Test Suite: Deep Crawl Resume/Crash Recovery Tests
Tests that verify:
1. State export produces valid JSON-serializable data
2. Resume from checkpoint continues without duplicates
3. Simulated crash at various points recovers correctly
4. State callback fires at expected intervals
5. No damage to existing system behavior (regression tests)
"""
import pytest
import asyncio
import json
from typing import Dict, Any, List
from unittest.mock import AsyncMock, MagicMock
from crawl4ai.deep_crawling import (
BFSDeepCrawlStrategy,
DFSDeepCrawlStrategy,
BestFirstCrawlingStrategy,
FilterChain,
URLPatternFilter,
DomainFilter,
)
from crawl4ai.deep_crawling.scorers import KeywordRelevanceScorer
# ============================================================================
# Helper Functions for Mock Crawler
# ============================================================================
def create_mock_config(stream=False):
"""Create a mock CrawlerRunConfig."""
config = MagicMock()
config.clone = MagicMock(return_value=config)
config.stream = stream
return config
def create_mock_crawler_with_links(num_links: int = 3, include_keyword: bool = False):
"""Create mock crawler that returns results with links."""
call_count = 0
async def mock_arun_many(urls, config):
nonlocal call_count
results = []
for url in urls:
call_count += 1
result = MagicMock()
result.url = url
result.success = True
result.metadata = {}
# Generate child links
links = []
for i in range(num_links):
link_url = f"{url}/child{call_count}_{i}"
if include_keyword:
link_url = f"{url}/important-child{call_count}_{i}"
links.append({"href": link_url})
result.links = {"internal": links, "external": []}
results.append(result)
# For streaming mode, return async generator
if config.stream:
async def gen():
for r in results:
yield r
return gen()
return results
crawler = MagicMock()
crawler.arun_many = mock_arun_many
return crawler
def create_mock_crawler_tracking(crawl_order: List[str], return_no_links: bool = False):
"""Create mock crawler that tracks crawl order."""
async def mock_arun_many(urls, config):
results = []
for url in urls:
crawl_order.append(url)
result = MagicMock()
result.url = url
result.success = True
result.metadata = {}
result.links = {"internal": [], "external": []} if return_no_links else {"internal": [{"href": f"{url}/child"}], "external": []}
results.append(result)
# For streaming mode, return async generator
if config.stream:
async def gen():
for r in results:
yield r
return gen()
return results
crawler = MagicMock()
crawler.arun_many = mock_arun_many
return crawler
def create_simple_mock_crawler():
"""Basic mock crawler returning 1 result with 2 child links."""
call_count = 0
async def mock_arun_many(urls, config):
nonlocal call_count
results = []
for url in urls:
call_count += 1
result = MagicMock()
result.url = url
result.success = True
result.metadata = {}
result.links = {
"internal": [
{"href": f"{url}/child1"},
{"href": f"{url}/child2"},
],
"external": []
}
results.append(result)
if config.stream:
async def gen():
for r in results:
yield r
return gen()
return results
crawler = MagicMock()
crawler.arun_many = mock_arun_many
return crawler
def create_mock_crawler_unlimited_links():
"""Mock crawler that always returns links (for testing limits)."""
async def mock_arun_many(urls, config):
results = []
for url in urls:
result = MagicMock()
result.url = url
result.success = True
result.metadata = {}
result.links = {
"internal": [{"href": f"{url}/link{i}"} for i in range(10)],
"external": []
}
results.append(result)
if config.stream:
async def gen():
for r in results:
yield r
return gen()
return results
crawler = MagicMock()
crawler.arun_many = mock_arun_many
return crawler
# ============================================================================
# TEST SUITE 1: Crash Recovery Tests
# ============================================================================
class TestBFSResume:
"""BFS strategy resume tests."""
@pytest.mark.asyncio
async def test_state_export_json_serializable(self):
"""Verify exported state can be JSON serialized."""
captured_states: List[Dict] = []
async def capture_state(state: Dict[str, Any]):
# Verify JSON serializable
json_str = json.dumps(state)
parsed = json.loads(json_str)
captured_states.append(parsed)
strategy = BFSDeepCrawlStrategy(
max_depth=2,
max_pages=10,
on_state_change=capture_state,
)
# Create mock crawler that returns predictable results
mock_crawler = create_mock_crawler_with_links(num_links=3)
mock_config = create_mock_config()
results = await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
# Verify states were captured
assert len(captured_states) > 0
# Verify state structure
for state in captured_states:
assert state["strategy_type"] == "bfs"
assert "visited" in state
assert "pending" in state
assert "depths" in state
assert "pages_crawled" in state
assert isinstance(state["visited"], list)
assert isinstance(state["pending"], list)
assert isinstance(state["depths"], dict)
assert isinstance(state["pages_crawled"], int)
@pytest.mark.asyncio
async def test_resume_continues_from_checkpoint(self):
"""Verify resume starts from saved state, not beginning."""
# Simulate state from previous crawl (visited 5 URLs, 3 pending)
saved_state = {
"strategy_type": "bfs",
"visited": [
"https://example.com",
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3",
"https://example.com/page4",
],
"pending": [
{"url": "https://example.com/page5", "parent_url": "https://example.com/page2"},
{"url": "https://example.com/page6", "parent_url": "https://example.com/page3"},
{"url": "https://example.com/page7", "parent_url": "https://example.com/page3"},
],
"depths": {
"https://example.com": 0,
"https://example.com/page1": 1,
"https://example.com/page2": 1,
"https://example.com/page3": 1,
"https://example.com/page4": 1,
"https://example.com/page5": 2,
"https://example.com/page6": 2,
"https://example.com/page7": 2,
},
"pages_crawled": 5,
}
crawled_urls: List[str] = []
strategy = BFSDeepCrawlStrategy(
max_depth=2,
max_pages=20,
resume_state=saved_state,
)
# Verify internal state was restored
assert strategy._resume_state == saved_state
mock_crawler = create_mock_crawler_tracking(crawled_urls, return_no_links=True)
mock_config = create_mock_config()
await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
# Should NOT re-crawl already visited URLs
for visited_url in saved_state["visited"]:
assert visited_url not in crawled_urls, f"Re-crawled already visited: {visited_url}"
# Should crawl pending URLs
for pending in saved_state["pending"]:
assert pending["url"] in crawled_urls, f"Did not crawl pending: {pending['url']}"
@pytest.mark.asyncio
async def test_simulated_crash_mid_crawl(self):
"""Simulate crash at URL N, verify resume continues from pending URLs."""
crash_after = 3
states_before_crash: List[Dict] = []
async def capture_until_crash(state: Dict[str, Any]):
states_before_crash.append(state)
if state["pages_crawled"] >= crash_after:
raise Exception("Simulated crash!")
strategy1 = BFSDeepCrawlStrategy(
max_depth=2,
max_pages=10,
on_state_change=capture_until_crash,
)
mock_crawler = create_mock_crawler_with_links(num_links=5)
mock_config = create_mock_config()
# First crawl - crashes
with pytest.raises(Exception, match="Simulated crash"):
await strategy1._arun_batch("https://example.com", mock_crawler, mock_config)
# Get last state before crash
last_state = states_before_crash[-1]
assert last_state["pages_crawled"] >= crash_after
# Calculate which URLs were already crawled vs pending
pending_urls = {item["url"] for item in last_state["pending"]}
visited_urls = set(last_state["visited"])
already_crawled_urls = visited_urls - pending_urls
# Resume from checkpoint
crawled_in_resume: List[str] = []
strategy2 = BFSDeepCrawlStrategy(
max_depth=2,
max_pages=10,
resume_state=last_state,
)
mock_crawler2 = create_mock_crawler_tracking(crawled_in_resume, return_no_links=True)
await strategy2._arun_batch("https://example.com", mock_crawler2, mock_config)
# Verify already-crawled URLs are not re-crawled
for crawled_url in already_crawled_urls:
assert crawled_url not in crawled_in_resume, f"Re-crawled already visited: {crawled_url}"
# Verify pending URLs are crawled
for pending_url in pending_urls:
assert pending_url in crawled_in_resume, f"Did not crawl pending: {pending_url}"
@pytest.mark.asyncio
async def test_callback_fires_per_url(self):
"""Verify callback fires after each URL for maximum granularity."""
callback_count = 0
pages_crawled_sequence: List[int] = []
async def count_callbacks(state: Dict[str, Any]):
nonlocal callback_count
callback_count += 1
pages_crawled_sequence.append(state["pages_crawled"])
strategy = BFSDeepCrawlStrategy(
max_depth=1,
max_pages=5,
on_state_change=count_callbacks,
)
mock_crawler = create_mock_crawler_with_links(num_links=2)
mock_config = create_mock_config()
await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
# Callback should fire once per successful URL
assert callback_count == strategy._pages_crawled, \
f"Callback fired {callback_count} times, expected {strategy._pages_crawled} (per URL)"
# pages_crawled should increment by 1 each callback
for i, count in enumerate(pages_crawled_sequence):
assert count == i + 1, f"Expected pages_crawled={i+1} at callback {i}, got {count}"
@pytest.mark.asyncio
async def test_export_state_returns_last_captured(self):
"""Verify export_state() returns last captured state."""
last_state = None
async def capture(state):
nonlocal last_state
last_state = state
strategy = BFSDeepCrawlStrategy(max_depth=2, max_pages=5, on_state_change=capture)
mock_crawler = create_mock_crawler_with_links(num_links=2)
mock_config = create_mock_config()
await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
exported = strategy.export_state()
assert exported == last_state
class TestDFSResume:
"""DFS strategy resume tests."""
@pytest.mark.asyncio
async def test_state_export_includes_stack_and_dfs_seen(self):
"""Verify DFS state includes stack structure and _dfs_seen."""
captured_states: List[Dict] = []
async def capture_state(state: Dict[str, Any]):
captured_states.append(state)
strategy = DFSDeepCrawlStrategy(
max_depth=3,
max_pages=10,
on_state_change=capture_state,
)
mock_crawler = create_mock_crawler_with_links(num_links=2)
mock_config = create_mock_config()
await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
assert len(captured_states) > 0
for state in captured_states:
assert state["strategy_type"] == "dfs"
assert "stack" in state
assert "dfs_seen" in state
# Stack items should have depth
for item in state["stack"]:
assert "url" in item
assert "parent_url" in item
assert "depth" in item
@pytest.mark.asyncio
async def test_resume_restores_stack_order(self):
"""Verify DFS stack order is preserved on resume."""
saved_state = {
"strategy_type": "dfs",
"visited": ["https://example.com"],
"stack": [
{"url": "https://example.com/deep3", "parent_url": "https://example.com/deep2", "depth": 3},
{"url": "https://example.com/deep2", "parent_url": "https://example.com/deep1", "depth": 2},
{"url": "https://example.com/page1", "parent_url": "https://example.com", "depth": 1},
],
"depths": {"https://example.com": 0},
"pages_crawled": 1,
"dfs_seen": ["https://example.com", "https://example.com/deep3", "https://example.com/deep2", "https://example.com/page1"],
}
crawl_order: List[str] = []
strategy = DFSDeepCrawlStrategy(
max_depth=3,
max_pages=10,
resume_state=saved_state,
)
mock_crawler = create_mock_crawler_tracking(crawl_order, return_no_links=True)
mock_config = create_mock_config()
await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
# DFS pops from end of stack, so order should be: page1, deep2, deep3
assert crawl_order[0] == "https://example.com/page1"
assert crawl_order[1] == "https://example.com/deep2"
assert crawl_order[2] == "https://example.com/deep3"
class TestBestFirstResume:
"""Best-First strategy resume tests."""
@pytest.mark.asyncio
async def test_state_export_includes_scored_queue(self):
"""Verify Best-First state includes queue with scores."""
captured_states: List[Dict] = []
async def capture_state(state: Dict[str, Any]):
captured_states.append(state)
scorer = KeywordRelevanceScorer(keywords=["important"], weight=1.0)
strategy = BestFirstCrawlingStrategy(
max_depth=2,
max_pages=10,
url_scorer=scorer,
on_state_change=capture_state,
)
mock_crawler = create_mock_crawler_with_links(num_links=3, include_keyword=True)
mock_config = create_mock_config(stream=True)
async for _ in strategy._arun_stream("https://example.com", mock_crawler, mock_config):
pass
assert len(captured_states) > 0
for state in captured_states:
assert state["strategy_type"] == "best_first"
assert "queue_items" in state
for item in state["queue_items"]:
assert "score" in item
assert "depth" in item
assert "url" in item
assert "parent_url" in item
@pytest.mark.asyncio
async def test_resume_maintains_priority_order(self):
"""Verify priority queue order is maintained on resume."""
saved_state = {
"strategy_type": "best_first",
"visited": ["https://example.com"],
"queue_items": [
{"score": -0.9, "depth": 1, "url": "https://example.com/high-priority", "parent_url": "https://example.com"},
{"score": -0.5, "depth": 1, "url": "https://example.com/medium-priority", "parent_url": "https://example.com"},
{"score": -0.1, "depth": 1, "url": "https://example.com/low-priority", "parent_url": "https://example.com"},
],
"depths": {"https://example.com": 0},
"pages_crawled": 1,
}
crawl_order: List[str] = []
strategy = BestFirstCrawlingStrategy(
max_depth=2,
max_pages=10,
resume_state=saved_state,
)
mock_crawler = create_mock_crawler_tracking(crawl_order, return_no_links=True)
mock_config = create_mock_config(stream=True)
async for _ in strategy._arun_stream("https://example.com", mock_crawler, mock_config):
pass
# Higher negative score = higher priority (min-heap)
# So -0.9 should be crawled first
assert crawl_order[0] == "https://example.com/high-priority"
class TestCrossStrategyResume:
"""Tests that apply to all strategies."""
@pytest.mark.asyncio
@pytest.mark.parametrize("strategy_class,strategy_type", [
(BFSDeepCrawlStrategy, "bfs"),
(DFSDeepCrawlStrategy, "dfs"),
(BestFirstCrawlingStrategy, "best_first"),
])
async def test_no_callback_means_no_overhead(self, strategy_class, strategy_type):
"""Verify no state tracking when callback is None."""
strategy = strategy_class(max_depth=2, max_pages=5)
# _queue_shadow should be None for Best-First when no callback
if strategy_class == BestFirstCrawlingStrategy:
assert strategy._queue_shadow is None
# _last_state should be None initially
assert strategy._last_state is None
@pytest.mark.asyncio
@pytest.mark.parametrize("strategy_class", [
BFSDeepCrawlStrategy,
DFSDeepCrawlStrategy,
BestFirstCrawlingStrategy,
])
async def test_export_state_returns_last_captured(self, strategy_class):
"""Verify export_state() returns last captured state."""
last_state = None
async def capture(state):
nonlocal last_state
last_state = state
strategy = strategy_class(max_depth=2, max_pages=5, on_state_change=capture)
mock_crawler = create_mock_crawler_with_links(num_links=2)
if strategy_class == BestFirstCrawlingStrategy:
mock_config = create_mock_config(stream=True)
async for _ in strategy._arun_stream("https://example.com", mock_crawler, mock_config):
pass
else:
mock_config = create_mock_config()
await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
exported = strategy.export_state()
assert exported == last_state
# ============================================================================
# TEST SUITE 2: Regression Tests (No Damage to Current System)
# ============================================================================
class TestBFSRegressions:
"""Ensure BFS works identically when new params not used."""
@pytest.mark.asyncio
async def test_default_params_unchanged(self):
"""Constructor with only original params works."""
strategy = BFSDeepCrawlStrategy(
max_depth=2,
include_external=False,
max_pages=10,
)
assert strategy.max_depth == 2
assert strategy.include_external == False
assert strategy.max_pages == 10
assert strategy._resume_state is None
assert strategy._on_state_change is None
@pytest.mark.asyncio
async def test_filter_chain_still_works(self):
"""FilterChain integration unchanged."""
filter_chain = FilterChain([
URLPatternFilter(patterns=["*/blog/*"]),
DomainFilter(allowed_domains=["example.com"]),
])
strategy = BFSDeepCrawlStrategy(
max_depth=2,
filter_chain=filter_chain,
)
# Test filter still applies
assert await strategy.can_process_url("https://example.com/blog/post1", 1) == True
assert await strategy.can_process_url("https://other.com/blog/post1", 1) == False
@pytest.mark.asyncio
async def test_url_scorer_still_works(self):
"""URL scoring integration unchanged."""
scorer = KeywordRelevanceScorer(keywords=["python", "tutorial"], weight=1.0)
strategy = BFSDeepCrawlStrategy(
max_depth=2,
url_scorer=scorer,
score_threshold=0.5,
)
assert strategy.url_scorer is not None
assert strategy.score_threshold == 0.5
# Scorer should work
score = scorer.score("https://example.com/python-tutorial")
assert score > 0
@pytest.mark.asyncio
async def test_batch_mode_returns_list(self):
"""Batch mode still returns List[CrawlResult]."""
strategy = BFSDeepCrawlStrategy(max_depth=1, max_pages=5)
mock_crawler = create_simple_mock_crawler()
mock_config = create_mock_config(stream=False)
results = await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
assert isinstance(results, list)
assert len(results) > 0
@pytest.mark.asyncio
async def test_max_pages_limit_respected(self):
"""max_pages limit still enforced."""
strategy = BFSDeepCrawlStrategy(max_depth=10, max_pages=3)
mock_crawler = create_mock_crawler_unlimited_links()
mock_config = create_mock_config()
results = await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
# Should stop at max_pages
assert strategy._pages_crawled <= 3
@pytest.mark.asyncio
async def test_max_depth_limit_respected(self):
"""max_depth limit still enforced."""
strategy = BFSDeepCrawlStrategy(max_depth=2, max_pages=100)
mock_crawler = create_mock_crawler_unlimited_links()
mock_config = create_mock_config()
results = await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
# All results should have depth <= max_depth
for result in results:
assert result.metadata.get("depth", 0) <= 2
@pytest.mark.asyncio
async def test_metadata_depth_still_set(self):
"""Result metadata still includes depth."""
strategy = BFSDeepCrawlStrategy(max_depth=2, max_pages=5)
mock_crawler = create_simple_mock_crawler()
mock_config = create_mock_config()
results = await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
for result in results:
assert "depth" in result.metadata
assert isinstance(result.metadata["depth"], int)
@pytest.mark.asyncio
async def test_metadata_parent_url_still_set(self):
"""Result metadata still includes parent_url."""
strategy = BFSDeepCrawlStrategy(max_depth=2, max_pages=5)
mock_crawler = create_simple_mock_crawler()
mock_config = create_mock_config()
results = await strategy._arun_batch("https://example.com", mock_crawler, mock_config)
# First result (start URL) should have parent_url = None
assert results[0].metadata.get("parent_url") is None
# Child results should have parent_url set
for result in results[1:]:
assert "parent_url" in result.metadata
class TestDFSRegressions:
"""Ensure DFS works identically when new params not used."""
@pytest.mark.asyncio
async def test_inherits_bfs_params(self):
"""DFS still inherits all BFS parameters."""
strategy = DFSDeepCrawlStrategy(
max_depth=3,
include_external=True,
max_pages=20,
score_threshold=0.5,
)
assert strategy.max_depth == 3
assert strategy.include_external == True
assert strategy.max_pages == 20
assert strategy.score_threshold == 0.5
@pytest.mark.asyncio
async def test_dfs_seen_initialized(self):
"""DFS _dfs_seen set still initialized."""
strategy = DFSDeepCrawlStrategy(max_depth=2)
assert hasattr(strategy, '_dfs_seen')
assert isinstance(strategy._dfs_seen, set)
class TestBestFirstRegressions:
"""Ensure Best-First works identically when new params not used."""
@pytest.mark.asyncio
async def test_default_params_unchanged(self):
"""Constructor with only original params works."""
strategy = BestFirstCrawlingStrategy(
max_depth=2,
include_external=False,
max_pages=10,
)
assert strategy.max_depth == 2
assert strategy.include_external == False
assert strategy.max_pages == 10
assert strategy._resume_state is None
assert strategy._on_state_change is None
assert strategy._queue_shadow is None # Not initialized without callback
@pytest.mark.asyncio
async def test_scorer_integration(self):
"""URL scorer still affects crawl priority."""
scorer = KeywordRelevanceScorer(keywords=["important"], weight=1.0)
strategy = BestFirstCrawlingStrategy(
max_depth=2,
max_pages=10,
url_scorer=scorer,
)
assert strategy.url_scorer is scorer
class TestAPICompatibility:
"""Ensure API/serialization compatibility."""
def test_strategy_signature_backward_compatible(self):
"""Old code calling with positional/keyword args still works."""
# Positional args (old style)
s1 = BFSDeepCrawlStrategy(2)
assert s1.max_depth == 2
# Keyword args (old style)
s2 = BFSDeepCrawlStrategy(max_depth=3, max_pages=10)
assert s2.max_depth == 3
# Mixed (old style)
s3 = BFSDeepCrawlStrategy(2, FilterChain(), None, False, float('-inf'), 100)
assert s3.max_depth == 2
assert s3.max_pages == 100
def test_no_required_new_params(self):
"""New params are optional, not required."""
# Should not raise
BFSDeepCrawlStrategy(max_depth=2)
DFSDeepCrawlStrategy(max_depth=2)
BestFirstCrawlingStrategy(max_depth=2)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/deep_crawling/test_deep_crawl_resume.py",
"license": "Apache License 2.0",
"lines": 608,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/deep_crawling/test_deep_crawl_resume_integration.py | """
Integration Test: Deep Crawl Resume with Real URLs
Tests the crash recovery feature using books.toscrape.com - a site
designed for scraping practice with a clear hierarchy:
- Home page β Category pages β Book detail pages
"""
import pytest
import asyncio
import json
from typing import Dict, Any, List
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
from crawl4ai.deep_crawling import BFSDeepCrawlStrategy
class TestBFSResumeIntegration:
"""Integration tests for BFS resume with real crawling."""
@pytest.mark.asyncio
async def test_real_crawl_state_capture_and_resume(self):
"""
Test crash recovery with real URLs from books.toscrape.com.
Flow:
1. Start crawl with state callback
2. Stop after N pages (simulated crash)
3. Resume from saved state
4. Verify no duplicate crawls
"""
# Phase 1: Initial crawl that "crashes" after 3 pages
crash_after = 3
captured_states: List[Dict[str, Any]] = []
crawled_urls_phase1: List[str] = []
async def capture_state_until_crash(state: Dict[str, Any]):
captured_states.append(state)
crawled_urls_phase1.clear()
crawled_urls_phase1.extend(state["visited"])
if state["pages_crawled"] >= crash_after:
raise Exception("Simulated crash!")
strategy1 = BFSDeepCrawlStrategy(
max_depth=2,
max_pages=10,
on_state_change=capture_state_until_crash,
)
config = CrawlerRunConfig(
deep_crawl_strategy=strategy1,
stream=False,
verbose=False,
)
async with AsyncWebCrawler(verbose=False) as crawler:
# First crawl - will crash after 3 pages
with pytest.raises(Exception, match="Simulated crash"):
await crawler.arun("https://books.toscrape.com", config=config)
# Verify we captured state before crash
assert len(captured_states) > 0, "No states captured before crash"
last_state = captured_states[-1]
print(f"\n=== Phase 1: Crashed after {last_state['pages_crawled']} pages ===")
print(f"Visited URLs: {len(last_state['visited'])}")
print(f"Pending URLs: {len(last_state['pending'])}")
# Verify state structure
assert last_state["strategy_type"] == "bfs"
assert last_state["pages_crawled"] >= crash_after
assert len(last_state["visited"]) > 0
assert "pending" in last_state
assert "depths" in last_state
# Verify state is JSON serializable (important for Redis/DB storage)
json_str = json.dumps(last_state)
restored_state = json.loads(json_str)
assert restored_state == last_state, "State not JSON round-trip safe"
# Phase 2: Resume from checkpoint
crawled_urls_phase2: List[str] = []
async def track_resumed_crawl(state: Dict[str, Any]):
# Track what's being crawled in phase 2
new_visited = set(state["visited"]) - set(last_state["visited"])
for url in new_visited:
if url not in crawled_urls_phase2:
crawled_urls_phase2.append(url)
strategy2 = BFSDeepCrawlStrategy(
max_depth=2,
max_pages=10,
resume_state=restored_state,
on_state_change=track_resumed_crawl,
)
config2 = CrawlerRunConfig(
deep_crawl_strategy=strategy2,
stream=False,
verbose=False,
)
async with AsyncWebCrawler(verbose=False) as crawler:
results = await crawler.arun("https://books.toscrape.com", config=config2)
print(f"\n=== Phase 2: Resumed crawl ===")
print(f"New URLs crawled: {len(crawled_urls_phase2)}")
print(f"Final pages_crawled: {strategy2._pages_crawled}")
# Verify no duplicates - URLs from phase 1 should not be re-crawled
already_crawled = set(last_state["visited"]) - {item["url"] for item in last_state["pending"]}
duplicates = set(crawled_urls_phase2) & already_crawled
assert len(duplicates) == 0, f"Duplicate crawls detected: {duplicates}"
# Verify we made progress (crawled some of the pending URLs)
pending_urls = {item["url"] for item in last_state["pending"]}
crawled_pending = set(crawled_urls_phase2) & pending_urls
print(f"Pending URLs crawled in phase 2: {len(crawled_pending)}")
# Final state should show more pages crawled than before crash
final_state = strategy2.export_state()
if final_state:
assert final_state["pages_crawled"] >= last_state["pages_crawled"], \
"Resume did not make progress"
print("\n=== Integration test PASSED ===")
@pytest.mark.asyncio
async def test_state_export_method(self):
"""Test that export_state() returns valid state during crawl."""
states_from_callback: List[Dict] = []
async def capture(state):
states_from_callback.append(state)
strategy = BFSDeepCrawlStrategy(
max_depth=1,
max_pages=3,
on_state_change=capture,
)
config = CrawlerRunConfig(
deep_crawl_strategy=strategy,
stream=False,
verbose=False,
)
async with AsyncWebCrawler(verbose=False) as crawler:
await crawler.arun("https://books.toscrape.com", config=config)
# export_state should return the last captured state
exported = strategy.export_state()
assert exported is not None, "export_state() returned None"
assert exported == states_from_callback[-1], "export_state() doesn't match last callback"
print(f"\n=== export_state() test PASSED ===")
print(f"Final state: {exported['pages_crawled']} pages, {len(exported['visited'])} visited")
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/deep_crawling/test_deep_crawl_resume_integration.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/proxy/test_sticky_sessions.py | """
Comprehensive test suite for Sticky Proxy Sessions functionality.
Tests cover:
1. Basic sticky session - same proxy for same session_id
2. Different sessions get different proxies
3. Session release
4. TTL expiration
5. Thread safety / concurrent access
6. Integration tests with AsyncWebCrawler
"""
import asyncio
import os
import time
import pytest
from unittest.mock import patch
from crawl4ai import AsyncWebCrawler, BrowserConfig
from crawl4ai.async_configs import CrawlerRunConfig, ProxyConfig
from crawl4ai.proxy_strategy import RoundRobinProxyStrategy
from crawl4ai.cache_context import CacheMode
class TestRoundRobinProxyStrategySession:
"""Test suite for RoundRobinProxyStrategy session methods."""
def setup_method(self):
"""Setup for each test method."""
self.proxies = [
ProxyConfig(server=f"http://proxy{i}.test:8080")
for i in range(5)
]
# ==================== BASIC STICKY SESSION TESTS ====================
@pytest.mark.asyncio
async def test_sticky_session_same_proxy(self):
"""Verify same proxy is returned for same session_id."""
strategy = RoundRobinProxyStrategy(self.proxies)
# First call - acquires proxy
proxy1 = await strategy.get_proxy_for_session("session-1")
# Second call - should return same proxy
proxy2 = await strategy.get_proxy_for_session("session-1")
# Third call - should return same proxy
proxy3 = await strategy.get_proxy_for_session("session-1")
assert proxy1 is not None
assert proxy1.server == proxy2.server == proxy3.server
@pytest.mark.asyncio
async def test_different_sessions_different_proxies(self):
"""Verify different session_ids can get different proxies."""
strategy = RoundRobinProxyStrategy(self.proxies)
proxy_a = await strategy.get_proxy_for_session("session-a")
proxy_b = await strategy.get_proxy_for_session("session-b")
proxy_c = await strategy.get_proxy_for_session("session-c")
# All should be different (round-robin)
servers = {proxy_a.server, proxy_b.server, proxy_c.server}
assert len(servers) == 3
@pytest.mark.asyncio
async def test_sticky_session_with_regular_rotation(self):
"""Verify sticky sessions don't interfere with regular rotation."""
strategy = RoundRobinProxyStrategy(self.proxies)
# Acquire a sticky session
session_proxy = await strategy.get_proxy_for_session("sticky-session")
# Regular rotation should continue independently
regular_proxy1 = await strategy.get_next_proxy()
regular_proxy2 = await strategy.get_next_proxy()
# Sticky session should still return same proxy
session_proxy_again = await strategy.get_proxy_for_session("sticky-session")
assert session_proxy.server == session_proxy_again.server
# Regular proxies should rotate
assert regular_proxy1.server != regular_proxy2.server
# ==================== SESSION RELEASE TESTS ====================
@pytest.mark.asyncio
async def test_session_release(self):
"""Verify session can be released and reacquired."""
strategy = RoundRobinProxyStrategy(self.proxies)
# Acquire session
proxy1 = await strategy.get_proxy_for_session("session-1")
assert strategy.get_session_proxy("session-1") is not None
# Release session
await strategy.release_session("session-1")
assert strategy.get_session_proxy("session-1") is None
# Reacquire - should get a new proxy (next in round-robin)
proxy2 = await strategy.get_proxy_for_session("session-1")
assert proxy2 is not None
# After release, next call gets the next proxy in rotation
# (not necessarily the same as before)
@pytest.mark.asyncio
async def test_release_nonexistent_session(self):
"""Verify releasing non-existent session doesn't raise error."""
strategy = RoundRobinProxyStrategy(self.proxies)
# Should not raise
await strategy.release_session("nonexistent-session")
@pytest.mark.asyncio
async def test_release_twice(self):
"""Verify releasing session twice doesn't raise error."""
strategy = RoundRobinProxyStrategy(self.proxies)
await strategy.get_proxy_for_session("session-1")
await strategy.release_session("session-1")
await strategy.release_session("session-1") # Should not raise
# ==================== GET SESSION PROXY TESTS ====================
@pytest.mark.asyncio
async def test_get_session_proxy_existing(self):
"""Verify get_session_proxy returns proxy for existing session."""
strategy = RoundRobinProxyStrategy(self.proxies)
acquired = await strategy.get_proxy_for_session("session-1")
retrieved = strategy.get_session_proxy("session-1")
assert retrieved is not None
assert acquired.server == retrieved.server
def test_get_session_proxy_nonexistent(self):
"""Verify get_session_proxy returns None for non-existent session."""
strategy = RoundRobinProxyStrategy(self.proxies)
result = strategy.get_session_proxy("nonexistent-session")
assert result is None
# ==================== TTL EXPIRATION TESTS ====================
@pytest.mark.asyncio
async def test_session_ttl_not_expired(self):
"""Verify session returns same proxy when TTL not expired."""
strategy = RoundRobinProxyStrategy(self.proxies)
# Acquire with 10 second TTL
proxy1 = await strategy.get_proxy_for_session("session-1", ttl=10)
# Immediately request again - should return same proxy
proxy2 = await strategy.get_proxy_for_session("session-1", ttl=10)
assert proxy1.server == proxy2.server
@pytest.mark.asyncio
async def test_session_ttl_expired(self):
"""Verify new proxy acquired after TTL expires."""
strategy = RoundRobinProxyStrategy(self.proxies)
# Acquire with 1 second TTL
proxy1 = await strategy.get_proxy_for_session("session-1", ttl=1)
# Wait for TTL to expire
await asyncio.sleep(1.1)
# Request again - should get new proxy due to expiration
proxy2 = await strategy.get_proxy_for_session("session-1", ttl=1)
# May or may not be same server depending on round-robin state,
# but session should have been recreated
assert proxy2 is not None
@pytest.mark.asyncio
async def test_get_session_proxy_ttl_expired(self):
"""Verify get_session_proxy returns None after TTL expires."""
strategy = RoundRobinProxyStrategy(self.proxies)
await strategy.get_proxy_for_session("session-1", ttl=1)
# Wait for expiration
await asyncio.sleep(1.1)
# Should return None for expired session
result = strategy.get_session_proxy("session-1")
assert result is None
@pytest.mark.asyncio
async def test_cleanup_expired_sessions(self):
"""Verify cleanup_expired_sessions removes expired sessions."""
strategy = RoundRobinProxyStrategy(self.proxies)
# Create sessions with short TTL
await strategy.get_proxy_for_session("short-ttl-1", ttl=1)
await strategy.get_proxy_for_session("short-ttl-2", ttl=1)
# Create session without TTL (should not be cleaned up)
await strategy.get_proxy_for_session("no-ttl")
# Wait for TTL to expire
await asyncio.sleep(1.1)
# Cleanup
removed = await strategy.cleanup_expired_sessions()
assert removed == 2
assert strategy.get_session_proxy("short-ttl-1") is None
assert strategy.get_session_proxy("short-ttl-2") is None
assert strategy.get_session_proxy("no-ttl") is not None
# ==================== GET ACTIVE SESSIONS TESTS ====================
@pytest.mark.asyncio
async def test_get_active_sessions(self):
"""Verify get_active_sessions returns all active sessions."""
strategy = RoundRobinProxyStrategy(self.proxies)
await strategy.get_proxy_for_session("session-a")
await strategy.get_proxy_for_session("session-b")
await strategy.get_proxy_for_session("session-c")
active = strategy.get_active_sessions()
assert len(active) == 3
assert "session-a" in active
assert "session-b" in active
assert "session-c" in active
@pytest.mark.asyncio
async def test_get_active_sessions_excludes_expired(self):
"""Verify get_active_sessions excludes expired sessions."""
strategy = RoundRobinProxyStrategy(self.proxies)
await strategy.get_proxy_for_session("short-ttl", ttl=1)
await strategy.get_proxy_for_session("no-ttl")
# Before expiration
active = strategy.get_active_sessions()
assert len(active) == 2
# Wait for TTL to expire
await asyncio.sleep(1.1)
# After expiration
active = strategy.get_active_sessions()
assert len(active) == 1
assert "no-ttl" in active
assert "short-ttl" not in active
# ==================== THREAD SAFETY TESTS ====================
@pytest.mark.asyncio
async def test_concurrent_session_access(self):
"""Verify thread-safe access to sessions."""
strategy = RoundRobinProxyStrategy(self.proxies)
async def acquire_session(session_id: str):
proxy = await strategy.get_proxy_for_session(session_id)
await asyncio.sleep(0.01) # Simulate work
return proxy.server
# Acquire same session from multiple coroutines
results = await asyncio.gather(*[
acquire_session("shared-session") for _ in range(10)
])
# All should get same proxy
assert len(set(results)) == 1
@pytest.mark.asyncio
async def test_concurrent_different_sessions(self):
"""Verify concurrent acquisition of different sessions works correctly."""
strategy = RoundRobinProxyStrategy(self.proxies)
async def acquire_session(session_id: str):
proxy = await strategy.get_proxy_for_session(session_id)
await asyncio.sleep(0.01)
return (session_id, proxy.server)
# Acquire different sessions concurrently
results = await asyncio.gather(*[
acquire_session(f"session-{i}") for i in range(5)
])
# Each session should have a consistent proxy
session_proxies = dict(results)
assert len(session_proxies) == 5
# Verify each session still returns same proxy
for session_id, expected_server in session_proxies.items():
actual = await strategy.get_proxy_for_session(session_id)
assert actual.server == expected_server
@pytest.mark.asyncio
async def test_concurrent_session_acquire_and_release(self):
"""Verify concurrent acquire and release operations work correctly."""
strategy = RoundRobinProxyStrategy(self.proxies)
async def acquire_and_release(session_id: str):
proxy = await strategy.get_proxy_for_session(session_id)
await asyncio.sleep(0.01)
await strategy.release_session(session_id)
return proxy.server
# Run multiple acquire/release cycles concurrently
await asyncio.gather(*[
acquire_and_release(f"session-{i}") for i in range(10)
])
# All sessions should be released
active = strategy.get_active_sessions()
assert len(active) == 0
# ==================== EMPTY PROXY POOL TESTS ====================
@pytest.mark.asyncio
async def test_empty_proxy_pool_session(self):
"""Verify behavior with empty proxy pool."""
strategy = RoundRobinProxyStrategy() # No proxies
result = await strategy.get_proxy_for_session("session-1")
assert result is None
@pytest.mark.asyncio
async def test_add_proxies_after_session(self):
"""Verify adding proxies after session creation works."""
strategy = RoundRobinProxyStrategy()
# No proxies initially
result1 = await strategy.get_proxy_for_session("session-1")
assert result1 is None
# Add proxies
strategy.add_proxies(self.proxies)
# Now should work
result2 = await strategy.get_proxy_for_session("session-2")
assert result2 is not None
class TestCrawlerRunConfigSession:
"""Test CrawlerRunConfig with sticky session parameters."""
def test_config_has_session_fields(self):
"""Verify CrawlerRunConfig has sticky session fields."""
config = CrawlerRunConfig(
proxy_session_id="test-session",
proxy_session_ttl=300,
proxy_session_auto_release=True
)
assert config.proxy_session_id == "test-session"
assert config.proxy_session_ttl == 300
assert config.proxy_session_auto_release is True
def test_config_session_defaults(self):
"""Verify default values for session fields."""
config = CrawlerRunConfig()
assert config.proxy_session_id is None
assert config.proxy_session_ttl is None
assert config.proxy_session_auto_release is False
class TestCrawlerStickySessionIntegration:
"""Integration tests for AsyncWebCrawler with sticky sessions."""
def setup_method(self):
"""Setup for each test method."""
self.proxies = [
ProxyConfig(server=f"http://proxy{i}.test:8080")
for i in range(3)
]
self.test_url = "https://httpbin.org/ip"
@pytest.mark.asyncio
async def test_crawler_sticky_session_without_proxy(self):
"""Test that crawler works when proxy_session_id set but no strategy."""
browser_config = BrowserConfig(headless=True)
config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
proxy_session_id="test-session",
page_timeout=15000
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url=self.test_url, config=config)
# Should work without errors (no proxy strategy means no proxy)
assert result is not None
@pytest.mark.asyncio
async def test_crawler_sticky_session_basic(self):
"""Test basic sticky session with crawler."""
strategy = RoundRobinProxyStrategy(self.proxies)
config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
proxy_rotation_strategy=strategy,
proxy_session_id="integration-test",
page_timeout=10000
)
browser_config = BrowserConfig(headless=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
# First request
try:
result1 = await crawler.arun(url=self.test_url, config=config)
except Exception:
pass # Proxy connection may fail, but session should be tracked
# Verify session was created
session_proxy = strategy.get_session_proxy("integration-test")
assert session_proxy is not None
# Cleanup
await strategy.release_session("integration-test")
@pytest.mark.asyncio
async def test_crawler_rotating_vs_sticky(self):
"""Compare rotating behavior vs sticky session behavior."""
strategy = RoundRobinProxyStrategy(self.proxies)
# Config WITHOUT sticky session - should rotate
rotating_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
proxy_rotation_strategy=strategy,
page_timeout=5000
)
# Config WITH sticky session - should use same proxy
sticky_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
proxy_rotation_strategy=strategy,
proxy_session_id="sticky-test",
page_timeout=5000
)
browser_config = BrowserConfig(headless=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
# Track proxy configs used
rotating_proxies = []
sticky_proxies = []
# Try rotating requests (may fail due to test proxies, but config should be set)
for _ in range(3):
try:
await crawler.arun(url=self.test_url, config=rotating_config)
except Exception:
pass
rotating_proxies.append(rotating_config.proxy_config.server if rotating_config.proxy_config else None)
# Try sticky requests
for _ in range(3):
try:
await crawler.arun(url=self.test_url, config=sticky_config)
except Exception:
pass
sticky_proxies.append(sticky_config.proxy_config.server if sticky_config.proxy_config else None)
# Rotating should have different proxies (or cycle through them)
# Sticky should have same proxy for all requests
if all(sticky_proxies):
assert len(set(sticky_proxies)) == 1, "Sticky session should use same proxy"
await strategy.release_session("sticky-test")
class TestStickySessionRealWorld:
"""Real-world scenario tests for sticky sessions.
Note: These tests require actual proxy servers to verify IP consistency.
They are marked to be skipped if no proxy is configured.
"""
@pytest.mark.asyncio
@pytest.mark.skipif(
not os.environ.get('TEST_PROXY_1'),
reason="Requires TEST_PROXY_1 environment variable"
)
async def test_verify_ip_consistency(self):
"""Verify that sticky session actually uses same IP.
This test requires real proxies set in environment variables:
TEST_PROXY_1=ip:port:user:pass
TEST_PROXY_2=ip:port:user:pass
"""
import re
# Load proxies from environment
proxy_strs = [
os.environ.get('TEST_PROXY_1', ''),
os.environ.get('TEST_PROXY_2', '')
]
proxies = [ProxyConfig.from_string(p) for p in proxy_strs if p]
if len(proxies) < 2:
pytest.skip("Need at least 2 proxies for this test")
strategy = RoundRobinProxyStrategy(proxies)
# Config WITH sticky session
config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
proxy_rotation_strategy=strategy,
proxy_session_id="ip-verify-session",
page_timeout=30000
)
browser_config = BrowserConfig(headless=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
ips = []
for i in range(3):
result = await crawler.arun(
url="https://httpbin.org/ip",
config=config
)
if result and result.success and result.html:
# Extract IP from response
ip_match = re.search(r'"origin":\s*"([^"]+)"', result.html)
if ip_match:
ips.append(ip_match.group(1))
await strategy.release_session("ip-verify-session")
# All IPs should be same for sticky session
if len(ips) >= 2:
assert len(set(ips)) == 1, f"Expected same IP, got: {ips}"
# ==================== STANDALONE TEST FUNCTIONS ====================
@pytest.mark.asyncio
async def test_sticky_session_simple():
"""Simple test for sticky session functionality."""
proxies = [
ProxyConfig(server=f"http://proxy{i}.test:8080")
for i in range(3)
]
strategy = RoundRobinProxyStrategy(proxies)
# Same session should return same proxy
p1 = await strategy.get_proxy_for_session("test")
p2 = await strategy.get_proxy_for_session("test")
p3 = await strategy.get_proxy_for_session("test")
assert p1.server == p2.server == p3.server
print(f"Sticky session works! All requests use: {p1.server}")
# Cleanup
await strategy.release_session("test")
if __name__ == "__main__":
print("Running Sticky Session tests...")
print("=" * 50)
asyncio.run(test_sticky_session_simple())
print("\n" + "=" * 50)
print("To run the full pytest suite, use: pytest " + __file__)
print("=" * 50)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/proxy/test_sticky_sessions.py",
"license": "Apache License 2.0",
"lines": 432,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/test_prefetch_integration.py | """Integration tests for prefetch mode with the crawler."""
import pytest
import asyncio
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, BrowserConfig
# Use crawl4ai docs as test domain
TEST_DOMAIN = "https://docs.crawl4ai.com"
class TestPrefetchModeIntegration:
"""Integration tests for prefetch mode."""
@pytest.mark.asyncio
async def test_prefetch_returns_html_and_links(self):
"""Test that prefetch mode returns HTML and links only."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(prefetch=True)
result = await crawler.arun(TEST_DOMAIN, config=config)
# Should have HTML
assert result.html is not None
assert len(result.html) > 0
assert "<html" in result.html.lower() or "<!doctype" in result.html.lower()
# Should have links
assert result.links is not None
assert "internal" in result.links
assert "external" in result.links
# Should NOT have processed content
assert result.markdown is None or (
hasattr(result.markdown, 'raw_markdown') and
result.markdown.raw_markdown is None
)
assert result.cleaned_html is None
assert result.extracted_content is None
@pytest.mark.asyncio
async def test_prefetch_preserves_metadata(self):
"""Test that prefetch mode preserves essential metadata."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(prefetch=True)
result = await crawler.arun(TEST_DOMAIN, config=config)
# Should have success flag
assert result.success is True
# Should have URL
assert result.url is not None
# Status code should be present
assert result.status_code is not None or result.status_code == 200
@pytest.mark.asyncio
async def test_prefetch_with_deep_crawl(self):
"""Test prefetch mode with deep crawl strategy."""
from crawl4ai import BFSDeepCrawlStrategy
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
prefetch=True,
deep_crawl_strategy=BFSDeepCrawlStrategy(
max_depth=1,
max_pages=3
)
)
result_container = await crawler.arun(TEST_DOMAIN, config=config)
# Handle both list and iterator results
if hasattr(result_container, '__aiter__'):
results = [r async for r in result_container]
else:
results = list(result_container) if hasattr(result_container, '__iter__') else [result_container]
# Each result should have HTML and links
for result in results:
assert result.html is not None
assert result.links is not None
# Should have crawled at least one page
assert len(results) >= 1
@pytest.mark.asyncio
async def test_prefetch_then_process_with_raw(self):
"""Test the full two-phase workflow: prefetch then process."""
async with AsyncWebCrawler() as crawler:
# Phase 1: Prefetch
prefetch_config = CrawlerRunConfig(prefetch=True)
prefetch_result = await crawler.arun(TEST_DOMAIN, config=prefetch_config)
stored_html = prefetch_result.html
assert stored_html is not None
assert len(stored_html) > 0
# Phase 2: Process with raw: URL
process_config = CrawlerRunConfig(
# No prefetch - full processing
base_url=TEST_DOMAIN # Provide base URL for link resolution
)
processed_result = await crawler.arun(
f"raw:{stored_html}",
config=process_config
)
# Should now have full processing
assert processed_result.html is not None
assert processed_result.success is True
# Note: cleaned_html and markdown depend on the content
@pytest.mark.asyncio
async def test_prefetch_links_structure(self):
"""Test that links have the expected structure."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(prefetch=True)
result = await crawler.arun(TEST_DOMAIN, config=config)
assert result.links is not None
# Check internal links structure
if result.links["internal"]:
link = result.links["internal"][0]
assert "href" in link
assert "text" in link
assert link["href"].startswith("http")
# Check external links structure (if any)
if result.links["external"]:
link = result.links["external"][0]
assert "href" in link
assert "text" in link
assert link["href"].startswith("http")
@pytest.mark.asyncio
async def test_prefetch_config_clone(self):
"""Test that config.clone() preserves prefetch setting."""
config = CrawlerRunConfig(prefetch=True)
cloned = config.clone()
assert cloned.prefetch == True
# Clone with override
cloned_false = config.clone(prefetch=False)
assert cloned_false.prefetch == False
@pytest.mark.asyncio
async def test_prefetch_to_dict(self):
"""Test that to_dict() includes prefetch."""
config = CrawlerRunConfig(prefetch=True)
config_dict = config.to_dict()
assert "prefetch" in config_dict
assert config_dict["prefetch"] == True
@pytest.mark.asyncio
async def test_prefetch_default_false(self):
"""Test that prefetch defaults to False."""
config = CrawlerRunConfig()
assert config.prefetch == False
@pytest.mark.asyncio
async def test_prefetch_explicit_false(self):
"""Test explicit prefetch=False works like default."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(prefetch=False)
result = await crawler.arun(TEST_DOMAIN, config=config)
# Should have full processing
assert result.html is not None
# cleaned_html should be populated in normal mode
assert result.cleaned_html is not None
class TestPrefetchPerformance:
"""Performance-related tests for prefetch mode."""
@pytest.mark.asyncio
async def test_prefetch_returns_quickly(self):
"""Test that prefetch mode returns results quickly."""
import time
async with AsyncWebCrawler() as crawler:
# Prefetch mode
start = time.time()
prefetch_config = CrawlerRunConfig(prefetch=True)
await crawler.arun(TEST_DOMAIN, config=prefetch_config)
prefetch_time = time.time() - start
# Full mode
start = time.time()
full_config = CrawlerRunConfig()
await crawler.arun(TEST_DOMAIN, config=full_config)
full_time = time.time() - start
# Log times for debugging
print(f"\nPrefetch: {prefetch_time:.3f}s, Full: {full_time:.3f}s")
# Prefetch should not be significantly slower
# (may be same or slightly faster depending on content)
# This is a soft check - mostly for logging
class TestPrefetchWithRawHTML:
"""Test prefetch mode with raw HTML input."""
@pytest.mark.asyncio
async def test_prefetch_with_raw_html(self):
"""Test prefetch mode works with raw: URL scheme."""
sample_html = """
<html>
<head><title>Test Page</title></head>
<body>
<h1>Hello World</h1>
<a href="/link1">Link 1</a>
<a href="/link2">Link 2</a>
<a href="https://external.com/page">External</a>
</body>
</html>
"""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
prefetch=True,
base_url="https://example.com"
)
result = await crawler.arun(f"raw:{sample_html}", config=config)
assert result.success is True
assert result.html is not None
assert result.links is not None
# Should have extracted links
assert len(result.links["internal"]) >= 2
assert len(result.links["external"]) >= 1
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/test_prefetch_integration.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/test_prefetch_mode.py | """Unit tests for the quick_extract_links function used in prefetch mode."""
import pytest
from crawl4ai.utils import quick_extract_links
class TestQuickExtractLinks:
"""Unit tests for the quick_extract_links function."""
def test_basic_internal_links(self):
"""Test extraction of internal links."""
html = '''
<html>
<body>
<a href="/page1">Page 1</a>
<a href="/page2">Page 2</a>
<a href="https://example.com/page3">Page 3</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
assert len(result["internal"]) == 3
assert result["internal"][0]["href"] == "https://example.com/page1"
assert result["internal"][0]["text"] == "Page 1"
def test_external_links(self):
"""Test extraction and classification of external links."""
html = '''
<html>
<body>
<a href="https://other.com/page">External</a>
<a href="/internal">Internal</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
assert len(result["internal"]) == 1
assert len(result["external"]) == 1
assert result["external"][0]["href"] == "https://other.com/page"
def test_ignores_javascript_and_mailto(self):
"""Test that javascript: and mailto: links are ignored."""
html = '''
<html>
<body>
<a href="javascript:void(0)">Click</a>
<a href="mailto:test@example.com">Email</a>
<a href="tel:+1234567890">Call</a>
<a href="/valid">Valid</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
assert len(result["internal"]) == 1
assert result["internal"][0]["href"] == "https://example.com/valid"
def test_ignores_anchor_only_links(self):
"""Test that anchor-only links (#section) are ignored."""
html = '''
<html>
<body>
<a href="#section1">Section 1</a>
<a href="#section2">Section 2</a>
<a href="/page#section">Page with anchor</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
# Only the page link should be included, anchor-only links are skipped
assert len(result["internal"]) == 1
assert "/page" in result["internal"][0]["href"]
def test_deduplication(self):
"""Test that duplicate URLs are deduplicated."""
html = '''
<html>
<body>
<a href="/page">Link 1</a>
<a href="/page">Link 2</a>
<a href="/page">Link 3</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
assert len(result["internal"]) == 1
def test_handles_malformed_html(self):
"""Test graceful handling of malformed HTML."""
html = "not valid html at all <><><"
result = quick_extract_links(html, "https://example.com")
# Should not raise, should return empty
assert result["internal"] == []
assert result["external"] == []
def test_empty_html(self):
"""Test handling of empty HTML."""
result = quick_extract_links("", "https://example.com")
assert result == {"internal": [], "external": []}
def test_relative_url_resolution(self):
"""Test that relative URLs are resolved correctly."""
html = '''
<html>
<body>
<a href="page1.html">Relative</a>
<a href="./page2.html">Dot Relative</a>
<a href="../page3.html">Parent Relative</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com/docs/")
assert len(result["internal"]) >= 1
# All should be internal and properly resolved
for link in result["internal"]:
assert link["href"].startswith("https://example.com")
def test_text_truncation(self):
"""Test that long link text is truncated to 200 chars."""
long_text = "A" * 300
html = f'''
<html>
<body>
<a href="/page">{long_text}</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
assert len(result["internal"]) == 1
assert len(result["internal"][0]["text"]) == 200
def test_empty_href_ignored(self):
"""Test that empty href attributes are ignored."""
html = '''
<html>
<body>
<a href="">Empty</a>
<a href=" ">Whitespace</a>
<a href="/valid">Valid</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
assert len(result["internal"]) == 1
assert result["internal"][0]["href"] == "https://example.com/valid"
def test_mixed_internal_external(self):
"""Test correct classification of mixed internal and external links."""
html = '''
<html>
<body>
<a href="/internal1">Internal 1</a>
<a href="https://example.com/internal2">Internal 2</a>
<a href="https://google.com">Google</a>
<a href="https://github.com/repo">GitHub</a>
<a href="/internal3">Internal 3</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
assert len(result["internal"]) == 3
assert len(result["external"]) == 2
def test_subdomain_handling(self):
"""Test that subdomains are handled correctly."""
html = '''
<html>
<body>
<a href="https://docs.example.com/page">Docs subdomain</a>
<a href="https://api.example.com/v1">API subdomain</a>
<a href="https://example.com/main">Main domain</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
# All should be internal (same base domain)
total_links = len(result["internal"]) + len(result["external"])
assert total_links == 3
class TestQuickExtractLinksEdgeCases:
"""Edge case tests for quick_extract_links."""
def test_no_links_in_page(self):
"""Test page with no links."""
html = '''
<html>
<body>
<h1>No Links Here</h1>
<p>Just some text content.</p>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
assert result["internal"] == []
assert result["external"] == []
def test_links_in_nested_elements(self):
"""Test links nested in various elements."""
html = '''
<html>
<body>
<nav>
<ul>
<li><a href="/home">Home</a></li>
<li><a href="/about">About</a></li>
</ul>
</nav>
<div class="content">
<p>Check out <a href="/products">our products</a>.</p>
</div>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
assert len(result["internal"]) == 3
def test_link_with_nested_elements(self):
"""Test links containing nested elements."""
html = '''
<html>
<body>
<a href="/page"><span>Nested</span> <strong>Text</strong></a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
assert len(result["internal"]) == 1
assert "Nested" in result["internal"][0]["text"]
assert "Text" in result["internal"][0]["text"]
def test_protocol_relative_urls(self):
"""Test handling of protocol-relative URLs (//example.com)."""
html = '''
<html>
<body>
<a href="//cdn.example.com/asset">CDN Link</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
# Should be resolved with https:
total = len(result["internal"]) + len(result["external"])
assert total >= 1
def test_whitespace_in_href(self):
"""Test handling of whitespace around href values."""
html = '''
<html>
<body>
<a href=" /page1 ">Padded</a>
<a href="
/page2
">Multiline</a>
</body>
</html>
'''
result = quick_extract_links(html, "https://example.com")
# Both should be extracted and normalized
assert len(result["internal"]) >= 1
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/test_prefetch_mode.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/test_prefetch_regression.py | """Regression tests to ensure prefetch mode doesn't break existing functionality."""
import pytest
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
TEST_URL = "https://docs.crawl4ai.com"
class TestNoRegressions:
"""Ensure prefetch mode doesn't break existing functionality."""
@pytest.mark.asyncio
async def test_default_mode_unchanged(self):
"""Test that default mode (prefetch=False) works exactly as before."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig() # Default config
result = await crawler.arun(TEST_URL, config=config)
# All standard fields should be populated
assert result.html is not None
assert result.cleaned_html is not None
assert result.links is not None
assert result.success is True
@pytest.mark.asyncio
async def test_explicit_prefetch_false(self):
"""Test explicit prefetch=False works like default."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(prefetch=False)
result = await crawler.arun(TEST_URL, config=config)
assert result.cleaned_html is not None
@pytest.mark.asyncio
async def test_config_clone_preserves_prefetch(self):
"""Test that config.clone() preserves prefetch setting."""
config = CrawlerRunConfig(prefetch=True)
cloned = config.clone()
assert cloned.prefetch == True
# Clone with override
cloned_false = config.clone(prefetch=False)
assert cloned_false.prefetch == False
@pytest.mark.asyncio
async def test_config_to_dict_includes_prefetch(self):
"""Test that to_dict() includes prefetch."""
config_true = CrawlerRunConfig(prefetch=True)
config_false = CrawlerRunConfig(prefetch=False)
assert config_true.to_dict()["prefetch"] == True
assert config_false.to_dict()["prefetch"] == False
@pytest.mark.asyncio
async def test_existing_extraction_still_works(self):
"""Test that extraction strategies still work in normal mode."""
from crawl4ai import JsonCssExtractionStrategy
schema = {
"name": "Links",
"baseSelector": "a",
"fields": [
{"name": "href", "selector": "", "type": "attribute", "attribute": "href"},
{"name": "text", "selector": "", "type": "text"}
]
}
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
extraction_strategy=JsonCssExtractionStrategy(schema=schema)
)
result = await crawler.arun(TEST_URL, config=config)
assert result.extracted_content is not None
@pytest.mark.asyncio
async def test_existing_deep_crawl_still_works(self):
"""Test that deep crawl without prefetch still does full processing."""
from crawl4ai import BFSDeepCrawlStrategy
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
deep_crawl_strategy=BFSDeepCrawlStrategy(
max_depth=1,
max_pages=2
)
# No prefetch - should do full processing
)
result_container = await crawler.arun(TEST_URL, config=config)
# Handle both list and iterator results
if hasattr(result_container, '__aiter__'):
results = [r async for r in result_container]
else:
results = list(result_container) if hasattr(result_container, '__iter__') else [result_container]
# Each result should have full processing
for result in results:
assert result.cleaned_html is not None
assert len(results) >= 1
@pytest.mark.asyncio
async def test_raw_url_scheme_still_works(self):
"""Test that raw: URL scheme works for processing stored HTML."""
sample_html = """
<html>
<head><title>Test Page</title></head>
<body>
<h1>Hello World</h1>
<p>This is a test paragraph.</p>
<a href="/link1">Link 1</a>
</body>
</html>
"""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig()
result = await crawler.arun(f"raw:{sample_html}", config=config)
assert result.success is True
assert result.html is not None
assert "Hello World" in result.html
assert result.cleaned_html is not None
@pytest.mark.asyncio
async def test_screenshot_still_works(self):
"""Test that screenshot option still works in normal mode."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(screenshot=True)
result = await crawler.arun(TEST_URL, config=config)
assert result.success is True
# Screenshot data should be present
assert result.screenshot is not None or result.screenshot_data is not None
@pytest.mark.asyncio
async def test_js_execution_still_works(self):
"""Test that JavaScript execution still works in normal mode."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code="document.querySelector('h1')?.textContent"
)
result = await crawler.arun(TEST_URL, config=config)
assert result.success is True
assert result.html is not None
class TestPrefetchDoesNotAffectOtherModes:
"""Test that prefetch doesn't interfere with other configurations."""
@pytest.mark.asyncio
async def test_prefetch_with_other_options_ignored(self):
"""Test that other options are properly ignored in prefetch mode."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
prefetch=True,
# These should be ignored in prefetch mode
screenshot=True,
pdf=True,
only_text=True,
word_count_threshold=100
)
result = await crawler.arun(TEST_URL, config=config)
# Should still return HTML and links
assert result.html is not None
assert result.links is not None
# But should NOT have processed content
assert result.cleaned_html is None
assert result.extracted_content is None
@pytest.mark.asyncio
async def test_stream_mode_still_works(self):
"""Test that stream mode still works normally."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(stream=True)
result = await crawler.arun(TEST_URL, config=config)
assert result.success is True
assert result.html is not None
@pytest.mark.asyncio
async def test_cache_mode_still_works(self):
"""Test that cache mode still works normally."""
from crawl4ai import CacheMode
async with AsyncWebCrawler() as crawler:
# First request - bypass cache
config1 = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
result1 = await crawler.arun(TEST_URL, config=config1)
assert result1.success is True
# Second request - should work
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED)
result2 = await crawler.arun(TEST_URL, config=config2)
assert result2.success is True
class TestBackwardsCompatibility:
"""Test backwards compatibility with existing code patterns."""
@pytest.mark.asyncio
async def test_config_without_prefetch_works(self):
"""Test that configs created without prefetch parameter work."""
# Simulating old code that doesn't know about prefetch
config = CrawlerRunConfig(
word_count_threshold=50,
css_selector="body"
)
# Should default to prefetch=False
assert config.prefetch == False
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(TEST_URL, config=config)
assert result.success is True
assert result.cleaned_html is not None
@pytest.mark.asyncio
async def test_from_kwargs_without_prefetch(self):
"""Test CrawlerRunConfig.from_kwargs works without prefetch."""
config = CrawlerRunConfig.from_kwargs({
"word_count_threshold": 50,
"verbose": False
})
assert config.prefetch == False
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/test_prefetch_regression.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/test_raw_html_browser.py | """
Tests for raw:/file:// URL browser pipeline support.
Tests the new feature that allows js_code, wait_for, and other browser operations
to work with raw: and file:// URLs by routing them through _crawl_web() with
set_content() instead of goto().
"""
import pytest
import asyncio
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
@pytest.mark.asyncio
async def test_raw_html_fast_path():
"""Test that raw: without browser params returns HTML directly (fast path)."""
html = "<html><body><div id='test'>Original Content</div></body></html>"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig() # No browser params
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "Original Content" in result.html
# Fast path should not modify the HTML
assert result.html == html
@pytest.mark.asyncio
async def test_js_code_on_raw_html():
"""Test that js_code executes on raw: HTML and modifies the DOM."""
html = "<html><body><div id='test'>Original</div></body></html>"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code="document.getElementById('test').innerText = 'Modified by JS'"
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "Modified by JS" in result.html
assert "Original" not in result.html or "Modified by JS" in result.html
@pytest.mark.asyncio
async def test_js_code_adds_element_to_raw_html():
"""Test that js_code can add new elements to raw: HTML."""
html = "<html><body><div id='container'></div></body></html>"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code='document.getElementById("container").innerHTML = "<span id=\'injected\'>Custom Content</span>"'
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "injected" in result.html
assert "Custom Content" in result.html
@pytest.mark.asyncio
async def test_screenshot_on_raw_html():
"""Test that screenshots work on raw: HTML."""
html = "<html><body><h1 style='color:red;font-size:48px;'>Screenshot Test</h1></body></html>"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(screenshot=True)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert result.screenshot is not None
assert len(result.screenshot) > 100 # Should have substantial screenshot data
@pytest.mark.asyncio
async def test_process_in_browser_flag():
"""Test that process_in_browser=True forces browser path even without other params."""
html = "<html><body><div>Test</div></body></html>"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(process_in_browser=True)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
# Browser path normalizes HTML, so it may be slightly different
assert "Test" in result.html
@pytest.mark.asyncio
async def test_raw_prefix_variations():
"""Test both raw: and raw:// prefix formats."""
html = "<html><body>Content</body></html>"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code='document.body.innerHTML += "<div id=\'added\'>Added</div>"'
)
# Test raw: prefix
result1 = await crawler.arun(f"raw:{html}", config=config)
assert result1.success
assert "Added" in result1.html
# Test raw:// prefix
result2 = await crawler.arun(f"raw://{html}", config=config)
assert result2.success
assert "Added" in result2.html
@pytest.mark.asyncio
async def test_wait_for_on_raw_html():
"""Test that wait_for works with raw: HTML after js_code modifies DOM."""
html = "<html><body><div id='container'></div></body></html>"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code='''
setTimeout(() => {
document.getElementById('container').innerHTML = '<div id="delayed">Delayed Content</div>';
}, 100);
''',
wait_for="#delayed",
wait_for_timeout=5000
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "Delayed Content" in result.html
@pytest.mark.asyncio
async def test_multiple_js_code_scripts():
"""Test that multiple js_code scripts execute in order."""
html = "<html><body><div id='counter'>0</div></body></html>"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code=[
"document.getElementById('counter').innerText = '1'",
"document.getElementById('counter').innerText = parseInt(document.getElementById('counter').innerText) + 1",
"document.getElementById('counter').innerText = parseInt(document.getElementById('counter').innerText) + 1",
]
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert ">3<" in result.html # Counter should be 3 after all scripts run
if __name__ == "__main__":
# Run a quick manual test
async def quick_test():
html = "<html><body><div id='test'>Original</div></body></html>"
async with AsyncWebCrawler(verbose=True) as crawler:
# Test 1: Fast path
print("\n=== Test 1: Fast path (no browser params) ===")
result1 = await crawler.arun(f"raw:{html}")
print(f"Success: {result1.success}")
print(f"HTML contains 'Original': {'Original' in result1.html}")
# Test 2: js_code modifies DOM
print("\n=== Test 2: js_code modifies DOM ===")
config = CrawlerRunConfig(
js_code="document.getElementById('test').innerText = 'Modified by JS'"
)
result2 = await crawler.arun(f"raw:{html}", config=config)
print(f"Success: {result2.success}")
print(f"HTML contains 'Modified by JS': {'Modified by JS' in result2.html}")
print(f"HTML snippet: {result2.html[:500]}...")
asyncio.run(quick_test())
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/test_raw_html_browser.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:tests/test_raw_html_edge_cases.py | """
BRUTAL edge case tests for raw:/file:// URL browser pipeline.
These tests try to break the system with tricky inputs, edge cases,
and compatibility checks to ensure we didn't break existing functionality.
"""
import pytest
import asyncio
import tempfile
import os
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
# ============================================================================
# EDGE CASE: Hash characters in HTML (previously broke urlparse - Issue #283)
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_with_hash_in_css():
"""Test that # in CSS colors doesn't break HTML parsing (regression for #283)."""
html = """
<html>
<head>
<style>
body { background-color: #ff5733; color: #333333; }
.highlight { border: 1px solid #000; }
</style>
</head>
<body>
<div class="highlight" style="color: #ffffff;">Content with hash colors</div>
</body>
</html>
"""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(js_code="document.body.innerHTML += '<div id=\"added\">Added</div>'")
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "#ff5733" in result.html or "ff5733" in result.html # Color should be preserved
assert "Added" in result.html # JS executed
assert "Content with hash colors" in result.html # Original content preserved
@pytest.mark.asyncio
async def test_raw_html_with_fragment_links():
"""Test HTML with # fragment links doesn't break."""
html = """
<html><body>
<a href="#section1">Go to section 1</a>
<a href="#section2">Go to section 2</a>
<div id="section1">Section 1</div>
<div id="section2">Section 2</div>
</body></html>
"""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(js_code="document.getElementById('section1').innerText = 'Modified Section 1'")
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "Modified Section 1" in result.html
assert "#section2" in result.html # Fragment link preserved
# ============================================================================
# EDGE CASE: Special characters and unicode
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_with_unicode():
"""Test raw HTML with various unicode characters."""
html = """
<html><body>
<div id="unicode">ζ₯ζ¬θͺ δΈζ νκ΅μ΄ Ψ§ΩΨΉΨ±Ψ¨ΩΨ© π π» π</div>
<div id="special">& < > " '</div>
</body></html>
"""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(js_code="document.getElementById('unicode').innerText += ' β
Modified'")
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "β
Modified" in result.html or "Modified" in result.html
# Check unicode is preserved
assert "ζ₯ζ¬θͺ" in result.html or "&#" in result.html # Either preserved or encoded
@pytest.mark.asyncio
async def test_raw_html_with_script_tags():
"""Test raw HTML with existing script tags doesn't interfere with js_code."""
html = """
<html><body>
<div id="counter">0</div>
<script>
// This script runs on page load
document.getElementById('counter').innerText = '10';
</script>
</body></html>
"""
async with AsyncWebCrawler() as crawler:
# Our js_code runs AFTER the page scripts
config = CrawlerRunConfig(
js_code="document.getElementById('counter').innerText = parseInt(document.getElementById('counter').innerText) + 5"
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
# The embedded script sets it to 10, then our js_code adds 5
assert ">15<" in result.html or "15" in result.html
# ============================================================================
# EDGE CASE: Empty and malformed HTML
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_empty():
"""Test empty raw HTML."""
html = ""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(js_code="document.body.innerHTML = '<div>Added to empty</div>'")
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "Added to empty" in result.html
@pytest.mark.asyncio
async def test_raw_html_minimal():
"""Test minimal HTML (just text, no tags)."""
html = "Just plain text, no HTML tags"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(js_code="document.body.innerHTML += '<div id=\"injected\">Injected</div>'")
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
# Browser should wrap it in proper HTML
assert "Injected" in result.html
@pytest.mark.asyncio
async def test_raw_html_malformed():
"""Test malformed HTML with unclosed tags."""
html = "<html><body><div><span>Unclosed tags<div>More content"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(js_code="document.body.innerHTML += '<div id=\"valid\">Valid Added</div>'")
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "Valid Added" in result.html
# Browser should have fixed the malformed HTML
# ============================================================================
# EDGE CASE: Very large HTML
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_large():
"""Test large raw HTML (100KB+)."""
# Generate 100KB of HTML
items = "".join([f'<div class="item" id="item-{i}">Item {i} content here with some text</div>\n' for i in range(2000)])
html = f"<html><body>{items}</body></html>"
assert len(html) > 100000 # Verify it's actually large
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code="document.getElementById('item-999').innerText = 'MODIFIED ITEM 999'"
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "MODIFIED ITEM 999" in result.html
assert "item-1999" in result.html # Last item should still exist
# ============================================================================
# EDGE CASE: JavaScript errors and timeouts
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_js_error_doesnt_crash():
"""Test that JavaScript errors in js_code don't crash the crawl."""
html = "<html><body><div id='test'>Original</div></body></html>"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code=[
"nonExistentFunction();", # This will throw an error
"document.getElementById('test').innerText = 'Still works'" # This should still run
]
)
result = await crawler.arun(f"raw:{html}", config=config)
# Crawl should succeed even with JS errors
assert result.success
@pytest.mark.asyncio
async def test_raw_html_wait_for_timeout():
"""Test wait_for with element that never appears times out gracefully."""
html = "<html><body><div id='test'>Original</div></body></html>"
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
wait_for="#never-exists",
wait_for_timeout=1000 # 1 second timeout
)
result = await crawler.arun(f"raw:{html}", config=config)
# Should timeout but still return the HTML we have
# The behavior might be success=False or success=True with partial content
# Either way, it shouldn't hang or crash
assert result is not None
# ============================================================================
# COMPATIBILITY: Normal HTTP URLs still work
# ============================================================================
@pytest.mark.asyncio
async def test_http_urls_still_work():
"""Ensure we didn't break normal HTTP URL crawling."""
async with AsyncWebCrawler() as crawler:
result = await crawler.arun("https://example.com")
assert result.success
assert "Example Domain" in result.html
@pytest.mark.asyncio
async def test_http_with_js_code_still_works():
"""Ensure HTTP URLs with js_code still work."""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code="document.body.innerHTML += '<div id=\"injected\">Injected via JS</div>'"
)
result = await crawler.arun("https://example.com", config=config)
assert result.success
assert "Injected via JS" in result.html
# ============================================================================
# COMPATIBILITY: File URLs
# ============================================================================
@pytest.mark.asyncio
async def test_file_url_with_js_code():
"""Test file:// URLs with js_code execution."""
# Create a temp file
with tempfile.NamedTemporaryFile(mode='w', suffix='.html', delete=False) as f:
f.write("<html><body><div id='file-content'>File Content</div></body></html>")
temp_path = f.name
try:
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code="document.getElementById('file-content').innerText = 'Modified File Content'"
)
result = await crawler.arun(f"file://{temp_path}", config=config)
assert result.success
assert "Modified File Content" in result.html
finally:
os.unlink(temp_path)
@pytest.mark.asyncio
async def test_file_url_fast_path():
"""Test file:// fast path (no browser params)."""
with tempfile.NamedTemporaryFile(mode='w', suffix='.html', delete=False) as f:
f.write("<html><body>Fast path file content</body></html>")
temp_path = f.name
try:
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(f"file://{temp_path}")
assert result.success
assert "Fast path file content" in result.html
finally:
os.unlink(temp_path)
# ============================================================================
# COMPATIBILITY: Extraction strategies with raw HTML
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_with_css_extraction():
"""Test CSS extraction on raw HTML after js_code modifies it."""
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
html = """
<html><body>
<div class="products">
<div class="product"><span class="name">Original Product</span></div>
</div>
</body></html>
"""
schema = {
"name": "Products",
"baseSelector": ".product",
"fields": [
{"name": "name", "selector": ".name", "type": "text"}
]
}
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code="""
document.querySelector('.products').innerHTML +=
'<div class="product"><span class="name">JS Added Product</span></div>';
""",
extraction_strategy=JsonCssExtractionStrategy(schema)
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
# Check that extraction found both products
import json
extracted = json.loads(result.extracted_content)
names = [p.get('name', '') for p in extracted]
assert any("JS Added Product" in name for name in names)
# ============================================================================
# EDGE CASE: Concurrent raw: requests
# ============================================================================
@pytest.mark.asyncio
async def test_concurrent_raw_requests():
"""Test multiple concurrent raw: requests don't interfere."""
htmls = [
f"<html><body><div id='test'>Request {i}</div></body></html>"
for i in range(5)
]
async with AsyncWebCrawler() as crawler:
configs = [
CrawlerRunConfig(
js_code=f"document.getElementById('test').innerText += ' Modified {i}'"
)
for i in range(5)
]
# Run concurrently
tasks = [
crawler.arun(f"raw:{html}", config=config)
for html, config in zip(htmls, configs)
]
results = await asyncio.gather(*tasks)
for i, result in enumerate(results):
assert result.success
assert f"Request {i}" in result.html
assert f"Modified {i}" in result.html
# ============================================================================
# EDGE CASE: raw: with base_url for link resolution
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_with_base_url():
"""Test that base_url is used for link resolution in markdown."""
html = """
<html><body>
<a href="/page1">Page 1</a>
<a href="/page2">Page 2</a>
<img src="/images/logo.png" alt="Logo">
</body></html>
"""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
base_url="https://example.com",
process_in_browser=True # Force browser to test base_url handling
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
# Check markdown has absolute URLs
if result.markdown:
# Links should be absolute
md = result.markdown.raw_markdown if hasattr(result.markdown, 'raw_markdown') else str(result.markdown)
assert "example.com" in md or "/page1" in md
# ============================================================================
# EDGE CASE: raw: with screenshot of complex page
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_screenshot_complex_page():
"""Test screenshot of complex raw HTML with CSS and JS modifications."""
html = """
<html>
<head>
<style>
body { font-family: Arial; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 40px; }
.card { background: white; padding: 20px; border-radius: 10px; box-shadow: 0 4px 6px rgba(0,0,0,0.1); }
h1 { color: #333; }
</style>
</head>
<body>
<div class="card">
<h1 id="title">Original Title</h1>
<p>This is a test card with styling.</p>
</div>
</body>
</html>
"""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code="document.getElementById('title').innerText = 'Modified Title'",
screenshot=True
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert result.screenshot is not None
assert len(result.screenshot) > 1000 # Should be substantial
assert "Modified Title" in result.html
# ============================================================================
# EDGE CASE: JavaScript that tries to navigate away
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_js_navigation_blocked():
"""Test that JS trying to navigate doesn't break the crawl."""
html = """
<html><body>
<div id="content">Original Content</div>
<script>
// Try to navigate away (should be blocked or handled)
// window.location.href = 'https://example.com';
</script>
</body></html>
"""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
# Try to navigate via js_code
js_code=[
"document.getElementById('content').innerText = 'Before navigation attempt'",
# Actual navigation attempt commented - would cause issues
# "window.location.href = 'https://example.com'",
]
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "Before navigation attempt" in result.html
# ============================================================================
# EDGE CASE: Raw HTML with iframes
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_with_iframes():
"""Test raw HTML containing iframes."""
html = """
<html><body>
<div id="main">Main content</div>
<iframe id="frame1" srcdoc="<html><body><div id='iframe-content'>Iframe Content</div></body></html>"></iframe>
</body></html>
"""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code="document.getElementById('main').innerText = 'Modified main'",
process_iframes=True
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "Modified main" in result.html
# ============================================================================
# TRICKY: Protocol inside raw content
# ============================================================================
@pytest.mark.asyncio
async def test_raw_html_with_urls_inside():
"""Test raw: with http:// URLs inside the content."""
html = """
<html><body>
<a href="http://example.com">Example</a>
<a href="https://google.com">Google</a>
<img src="https://placekitten.com/200/300" alt="Cat">
<div id="test">Test content with URL: https://test.com</div>
</body></html>
"""
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
js_code="document.getElementById('test').innerText += ' - Modified'"
)
result = await crawler.arun(f"raw:{html}", config=config)
assert result.success
assert "Modified" in result.html
assert "http://example.com" in result.html or "example.com" in result.html
# ============================================================================
# TRICKY: Double raw: prefix
# ============================================================================
@pytest.mark.asyncio
async def test_double_raw_prefix():
"""Test what happens with double raw: prefix (edge case)."""
html = "<html><body>Content</body></html>"
async with AsyncWebCrawler() as crawler:
# raw:raw:<html>... - the second raw: becomes part of content
result = await crawler.arun(f"raw:raw:{html}")
# Should either handle gracefully or return "raw:<html>..." as content
assert result is not None
if __name__ == "__main__":
import sys
async def run_tests():
# Run a few key tests manually
tests = [
("Hash in CSS", test_raw_html_with_hash_in_css),
("Unicode", test_raw_html_with_unicode),
("Large HTML", test_raw_html_large),
("HTTP still works", test_http_urls_still_work),
("Concurrent requests", test_concurrent_raw_requests),
("Complex screenshot", test_raw_html_screenshot_complex_page),
]
for name, test_fn in tests:
print(f"\n=== Running: {name} ===")
try:
await test_fn()
print(f"β
{name} PASSED")
except Exception as e:
print(f"β {name} FAILED: {e}")
import traceback
traceback.print_exc()
asyncio.run(run_tests())
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/test_raw_html_edge_cases.py",
"license": "Apache License 2.0",
"lines": 451,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:docs/releases_review/demo_v0.7.8.py | #!/usr/bin/env python3
"""
Crawl4AI v0.7.8 Release Demo - Verification Tests
==================================================
This demo ACTUALLY RUNS and VERIFIES the bug fixes in v0.7.8.
Each test executes real code and validates the fix is working.
Bug Fixes Verified:
1. ProxyConfig JSON serialization (#1629)
2. Configurable backoff parameters (#1269)
3. LLM Strategy input_format support (#1178)
4. Raw HTML URL variable (#1116)
5. Relative URLs after redirects (#1268)
6. pypdf migration (#1412)
7. Pydantic v2 ConfigDict (#678)
8. Docker ContentRelevanceFilter (#1642) - requires Docker
9. Docker .cache permissions (#1638) - requires Docker
10. AdaptiveCrawler query expansion (#1621) - requires LLM API key
11. Import statement formatting (#1181)
Usage:
python docs/releases_review/demo_v0.7.8.py
For Docker tests:
docker run -d -p 11235:11235 --shm-size=1g unclecode/crawl4ai:0.7.8
python docs/releases_review/demo_v0.7.8.py
"""
import asyncio
import json
import sys
import warnings
import os
import tempfile
from typing import Tuple, Optional
from dataclasses import dataclass
# Test results tracking
@dataclass
class TestResult:
name: str
issue: str
passed: bool
message: str
skipped: bool = False
results: list[TestResult] = []
def print_header(title: str):
print(f"\n{'=' * 70}")
print(f"{title}")
print(f"{'=' * 70}")
def print_test(name: str, issue: str):
print(f"\n[TEST] {name} ({issue})")
print("-" * 50)
def record_result(name: str, issue: str, passed: bool, message: str, skipped: bool = False):
results.append(TestResult(name, issue, passed, message, skipped))
if skipped:
print(f" SKIPPED: {message}")
elif passed:
print(f" PASSED: {message}")
else:
print(f" FAILED: {message}")
# =============================================================================
# TEST 1: ProxyConfig JSON Serialization (#1629)
# =============================================================================
async def test_proxy_config_serialization():
"""
Verify BrowserConfig.to_dict() properly serializes ProxyConfig to JSON.
BEFORE: ProxyConfig was included as object, causing JSON serialization to fail
AFTER: ProxyConfig.to_dict() is called, producing valid JSON
"""
print_test("ProxyConfig JSON Serialization", "#1629")
try:
from crawl4ai import BrowserConfig
from crawl4ai.async_configs import ProxyConfig
# Create config with ProxyConfig
proxy = ProxyConfig(
server="http://proxy.example.com:8080",
username="testuser",
password="testpass"
)
browser_config = BrowserConfig(headless=True, proxy_config=proxy)
# Test 1: to_dict() should return dict for proxy_config
config_dict = browser_config.to_dict()
proxy_dict = config_dict.get('proxy_config')
if not isinstance(proxy_dict, dict):
record_result("ProxyConfig Serialization", "#1629", False,
f"proxy_config is {type(proxy_dict)}, expected dict")
return
# Test 2: Should be JSON serializable
try:
json_str = json.dumps(config_dict)
json.loads(json_str) # Verify valid JSON
except (TypeError, json.JSONDecodeError) as e:
record_result("ProxyConfig Serialization", "#1629", False,
f"JSON serialization failed: {e}")
return
# Test 3: Verify proxy data is preserved
if proxy_dict.get('server') != "http://proxy.example.com:8080":
record_result("ProxyConfig Serialization", "#1629", False,
"Proxy server not preserved in serialization")
return
record_result("ProxyConfig Serialization", "#1629", True,
"BrowserConfig with ProxyConfig serializes to valid JSON")
except Exception as e:
record_result("ProxyConfig Serialization", "#1629", False, f"Exception: {e}")
# =============================================================================
# TEST 2: Configurable Backoff Parameters (#1269)
# =============================================================================
async def test_configurable_backoff():
"""
Verify LLMConfig accepts and stores backoff configuration parameters.
BEFORE: Backoff was hardcoded (delay=2, attempts=3, factor=2)
AFTER: LLMConfig accepts backoff_base_delay, backoff_max_attempts, backoff_exponential_factor
"""
print_test("Configurable Backoff Parameters", "#1269")
try:
from crawl4ai import LLMConfig
# Test 1: Default values
default_config = LLMConfig(provider="openai/gpt-4o-mini")
if default_config.backoff_base_delay != 2:
record_result("Configurable Backoff", "#1269", False,
f"Default base_delay is {default_config.backoff_base_delay}, expected 2")
return
if default_config.backoff_max_attempts != 3:
record_result("Configurable Backoff", "#1269", False,
f"Default max_attempts is {default_config.backoff_max_attempts}, expected 3")
return
if default_config.backoff_exponential_factor != 2:
record_result("Configurable Backoff", "#1269", False,
f"Default exponential_factor is {default_config.backoff_exponential_factor}, expected 2")
return
# Test 2: Custom values
custom_config = LLMConfig(
provider="openai/gpt-4o-mini",
backoff_base_delay=5,
backoff_max_attempts=10,
backoff_exponential_factor=3
)
if custom_config.backoff_base_delay != 5:
record_result("Configurable Backoff", "#1269", False,
f"Custom base_delay is {custom_config.backoff_base_delay}, expected 5")
return
if custom_config.backoff_max_attempts != 10:
record_result("Configurable Backoff", "#1269", False,
f"Custom max_attempts is {custom_config.backoff_max_attempts}, expected 10")
return
if custom_config.backoff_exponential_factor != 3:
record_result("Configurable Backoff", "#1269", False,
f"Custom exponential_factor is {custom_config.backoff_exponential_factor}, expected 3")
return
# Test 3: to_dict() includes backoff params
config_dict = custom_config.to_dict()
if 'backoff_base_delay' not in config_dict:
record_result("Configurable Backoff", "#1269", False,
"backoff_base_delay missing from to_dict()")
return
record_result("Configurable Backoff", "#1269", True,
"LLMConfig accepts and stores custom backoff parameters")
except Exception as e:
record_result("Configurable Backoff", "#1269", False, f"Exception: {e}")
# =============================================================================
# TEST 3: LLM Strategy Input Format (#1178)
# =============================================================================
async def test_llm_input_format():
"""
Verify LLMExtractionStrategy accepts input_format parameter.
BEFORE: Always used markdown input
AFTER: Supports "markdown", "html", "fit_markdown", "cleaned_html", "fit_html"
"""
print_test("LLM Strategy Input Format", "#1178")
try:
from crawl4ai import LLMExtractionStrategy, LLMConfig
llm_config = LLMConfig(provider="openai/gpt-4o-mini")
# Test 1: Default is markdown
default_strategy = LLMExtractionStrategy(
llm_config=llm_config,
instruction="Extract data"
)
if default_strategy.input_format != "markdown":
record_result("LLM Input Format", "#1178", False,
f"Default input_format is '{default_strategy.input_format}', expected 'markdown'")
return
# Test 2: Can set to html
html_strategy = LLMExtractionStrategy(
llm_config=llm_config,
instruction="Extract data",
input_format="html"
)
if html_strategy.input_format != "html":
record_result("LLM Input Format", "#1178", False,
f"HTML input_format is '{html_strategy.input_format}', expected 'html'")
return
# Test 3: Can set to fit_markdown
fit_strategy = LLMExtractionStrategy(
llm_config=llm_config,
instruction="Extract data",
input_format="fit_markdown"
)
if fit_strategy.input_format != "fit_markdown":
record_result("LLM Input Format", "#1178", False,
f"fit_markdown input_format is '{fit_strategy.input_format}'")
return
record_result("LLM Input Format", "#1178", True,
"LLMExtractionStrategy accepts all input_format options")
except Exception as e:
record_result("LLM Input Format", "#1178", False, f"Exception: {e}")
# =============================================================================
# TEST 4: Raw HTML URL Variable (#1116)
# =============================================================================
async def test_raw_html_url_variable():
"""
Verify that raw: prefix URLs pass "Raw HTML" to extraction strategy.
BEFORE: Entire HTML blob was passed as URL parameter
AFTER: "Raw HTML" string is passed as URL parameter
"""
print_test("Raw HTML URL Variable", "#1116")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
from crawl4ai.extraction_strategy import ExtractionStrategy
# Custom strategy to capture what URL is passed
class URLCapturingStrategy(ExtractionStrategy):
captured_url = None
def extract(self, url: str, html: str, *args, **kwargs):
URLCapturingStrategy.captured_url = url
return [{"content": "test"}]
html_content = "<html><body><h1>Test</h1></body></html>"
strategy = URLCapturingStrategy()
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=f"raw:{html_content}",
config=CrawlerRunConfig(
extraction_strategy=strategy
)
)
captured = URLCapturingStrategy.captured_url
if captured is None:
record_result("Raw HTML URL Variable", "#1116", False,
"Extraction strategy was not called")
return
if captured == html_content or captured.startswith("<html"):
record_result("Raw HTML URL Variable", "#1116", False,
f"URL contains HTML content instead of 'Raw HTML': {captured[:50]}...")
return
if captured != "Raw HTML":
record_result("Raw HTML URL Variable", "#1116", False,
f"URL is '{captured}', expected 'Raw HTML'")
return
record_result("Raw HTML URL Variable", "#1116", True,
"Extraction strategy receives 'Raw HTML' as URL for raw: prefix")
except Exception as e:
record_result("Raw HTML URL Variable", "#1116", False, f"Exception: {e}")
# =============================================================================
# TEST 5: Relative URLs After Redirects (#1268)
# =============================================================================
async def test_redirect_url_handling():
"""
Verify that redirected_url reflects the final URL after JS navigation.
BEFORE: redirected_url was the original URL, not the final URL
AFTER: redirected_url is captured after JS execution completes
"""
print_test("Relative URLs After Redirects", "#1268")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
# Test with a URL that we know the final state of
# We'll use httpbin which doesn't redirect, but verify the mechanism works
test_url = "https://httpbin.org/html"
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=test_url,
config=CrawlerRunConfig()
)
# Verify redirected_url is populated
if not result.redirected_url:
record_result("Redirect URL Handling", "#1268", False,
"redirected_url is empty")
return
# For non-redirecting URL, should match original or be the final URL
if not result.redirected_url.startswith("https://httpbin.org"):
record_result("Redirect URL Handling", "#1268", False,
f"redirected_url is unexpected: {result.redirected_url}")
return
# Verify links are present and resolved
if result.links:
# Check that internal links have full URLs
internal_links = result.links.get('internal', [])
external_links = result.links.get('external', [])
all_links = internal_links + external_links
for link in all_links[:5]: # Check first 5 links
href = link.get('href', '')
if href and not href.startswith(('http://', 'https://', 'mailto:', 'tel:', '#', 'javascript:')):
record_result("Redirect URL Handling", "#1268", False,
f"Link not resolved to absolute URL: {href}")
return
record_result("Redirect URL Handling", "#1268", True,
f"redirected_url correctly captured: {result.redirected_url}")
except Exception as e:
record_result("Redirect URL Handling", "#1268", False, f"Exception: {e}")
# =============================================================================
# TEST 6: pypdf Migration (#1412)
# =============================================================================
async def test_pypdf_migration():
"""
Verify pypdf is used instead of deprecated PyPDF2.
BEFORE: Used PyPDF2 (deprecated since 2022)
AFTER: Uses pypdf (actively maintained)
"""
print_test("pypdf Migration", "#1412")
try:
# Test 1: pypdf should be importable (if pdf extra is installed)
try:
import pypdf
pypdf_available = True
pypdf_version = pypdf.__version__
except ImportError:
pypdf_available = False
pypdf_version = None
# Test 2: PyPDF2 should NOT be imported by crawl4ai
# Check if the processor uses pypdf
try:
from crawl4ai.processors.pdf import processor
processor_source = open(processor.__file__).read()
uses_pypdf = 'from pypdf' in processor_source or 'import pypdf' in processor_source
uses_pypdf2 = 'from PyPDF2' in processor_source or 'import PyPDF2' in processor_source
if uses_pypdf2 and not uses_pypdf:
record_result("pypdf Migration", "#1412", False,
"PDF processor still uses PyPDF2")
return
if uses_pypdf:
record_result("pypdf Migration", "#1412", True,
f"PDF processor uses pypdf{' v' + pypdf_version if pypdf_version else ''}")
return
else:
record_result("pypdf Migration", "#1412", True,
"PDF processor found, pypdf dependency updated", skipped=not pypdf_available)
return
except ImportError:
# PDF processor not available
if pypdf_available:
record_result("pypdf Migration", "#1412", True,
f"pypdf v{pypdf_version} is installed (PDF processor not loaded)")
else:
record_result("pypdf Migration", "#1412", True,
"PDF support not installed (optional feature)", skipped=True)
return
except Exception as e:
record_result("pypdf Migration", "#1412", False, f"Exception: {e}")
# =============================================================================
# TEST 7: Pydantic v2 ConfigDict (#678)
# =============================================================================
async def test_pydantic_configdict():
"""
Verify no Pydantic deprecation warnings for Config class.
BEFORE: Used deprecated 'class Config' syntax
AFTER: Uses ConfigDict for Pydantic v2 compatibility
"""
print_test("Pydantic v2 ConfigDict", "#678")
try:
import pydantic
from pydantic import __version__ as pydantic_version
# Capture warnings during import
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
# Import models that might have Config classes
from crawl4ai.models import CrawlResult, MarkdownGenerationResult
from crawl4ai.async_configs import CrawlerRunConfig, BrowserConfig
# Filter for Pydantic-related deprecation warnings
pydantic_warnings = [
warning for warning in w
if 'pydantic' in str(warning.message).lower()
or 'config' in str(warning.message).lower()
]
if pydantic_warnings:
warning_msgs = [str(w.message) for w in pydantic_warnings[:3]]
record_result("Pydantic ConfigDict", "#678", False,
f"Deprecation warnings: {warning_msgs}")
return
# Verify models work correctly
try:
# Test that models can be instantiated without issues
config = CrawlerRunConfig()
browser = BrowserConfig()
record_result("Pydantic ConfigDict", "#678", True,
f"No deprecation warnings with Pydantic v{pydantic_version}")
except Exception as e:
record_result("Pydantic ConfigDict", "#678", False,
f"Model instantiation failed: {e}")
except Exception as e:
record_result("Pydantic ConfigDict", "#678", False, f"Exception: {e}")
# =============================================================================
# TEST 8: Docker ContentRelevanceFilter (#1642)
# =============================================================================
async def test_docker_content_filter():
"""
Verify ContentRelevanceFilter deserializes correctly in Docker API.
BEFORE: Docker API failed to import/instantiate ContentRelevanceFilter
AFTER: Filter is properly exported and deserializable
"""
print_test("Docker ContentRelevanceFilter", "#1642")
# First verify the fix in local code
try:
# Test 1: ContentRelevanceFilter should be importable from crawl4ai
from crawl4ai import ContentRelevanceFilter
# Test 2: Should be instantiable
filter_instance = ContentRelevanceFilter(
query="test query",
threshold=0.3
)
if not hasattr(filter_instance, 'query'):
record_result("Docker ContentRelevanceFilter", "#1642", False,
"ContentRelevanceFilter missing query attribute")
return
except ImportError as e:
record_result("Docker ContentRelevanceFilter", "#1642", False,
f"ContentRelevanceFilter not exported: {e}")
return
except Exception as e:
record_result("Docker ContentRelevanceFilter", "#1642", False,
f"ContentRelevanceFilter instantiation failed: {e}")
return
# Test Docker API if available
try:
import httpx
async with httpx.AsyncClient(timeout=5.0) as client:
response = await client.get("http://localhost:11235/health")
if response.status_code != 200:
raise Exception("Docker not available")
# Docker is running, test the API
async with httpx.AsyncClient(timeout=30.0) as client:
request = {
"urls": ["https://httpbin.org/html"],
"crawler_config": {
"deep_crawl_strategy": {
"type": "BFSDeepCrawlStrategy",
"max_depth": 1,
"filter_chain": [
{
"type": "ContentTypeFilter",
"allowed_types": ["text/html"]
}
]
}
}
}
response = await client.post(
"http://localhost:11235/crawl",
json=request
)
if response.status_code == 200:
record_result("Docker ContentRelevanceFilter", "#1642", True,
"Filter deserializes correctly in Docker API")
else:
record_result("Docker ContentRelevanceFilter", "#1642", False,
f"Docker API returned {response.status_code}: {response.text[:100]}")
except ImportError:
record_result("Docker ContentRelevanceFilter", "#1642", True,
"ContentRelevanceFilter exportable (Docker test skipped - httpx not installed)",
skipped=True)
except Exception as e:
record_result("Docker ContentRelevanceFilter", "#1642", True,
f"ContentRelevanceFilter exportable (Docker test skipped: {e})",
skipped=True)
# =============================================================================
# TEST 9: Docker Cache Permissions (#1638)
# =============================================================================
async def test_docker_cache_permissions():
"""
Verify Docker image has correct .cache folder permissions.
This test requires Docker container to be running.
"""
print_test("Docker Cache Permissions", "#1638")
try:
import httpx
async with httpx.AsyncClient(timeout=5.0) as client:
response = await client.get("http://localhost:11235/health")
if response.status_code != 200:
raise Exception("Docker not available")
# Test by making a crawl request with caching
async with httpx.AsyncClient(timeout=60.0) as client:
request = {
"urls": ["https://httpbin.org/html"],
"crawler_config": {
"cache_mode": "enabled"
}
}
response = await client.post(
"http://localhost:11235/crawl",
json=request
)
if response.status_code == 200:
result = response.json()
# Check if there were permission errors
if "permission" in str(result).lower() and "denied" in str(result).lower():
record_result("Docker Cache Permissions", "#1638", False,
"Permission denied error in response")
else:
record_result("Docker Cache Permissions", "#1638", True,
"Crawl with caching succeeded in Docker")
else:
error_text = response.text[:200]
if "permission" in error_text.lower():
record_result("Docker Cache Permissions", "#1638", False,
f"Permission error: {error_text}")
else:
record_result("Docker Cache Permissions", "#1638", False,
f"Request failed: {response.status_code}")
except ImportError:
record_result("Docker Cache Permissions", "#1638", True,
"Skipped - httpx not installed", skipped=True)
except Exception as e:
record_result("Docker Cache Permissions", "#1638", True,
f"Skipped - Docker not available: {e}", skipped=True)
# =============================================================================
# TEST 10: AdaptiveCrawler Query Expansion (#1621)
# =============================================================================
async def test_adaptive_crawler_embedding():
"""
Verify EmbeddingStrategy LLM code is uncommented and functional.
BEFORE: LLM call was commented out, using hardcoded mock data
AFTER: Actually calls LLM for query expansion
"""
print_test("AdaptiveCrawler Query Expansion", "#1621")
try:
# Read the source file to verify the fix
import crawl4ai.adaptive_crawler as adaptive_module
source_file = adaptive_module.__file__
with open(source_file, 'r') as f:
source_code = f.read()
# Check that the LLM call is NOT commented out
# Look for the perform_completion_with_backoff call
# Find the EmbeddingStrategy section
if 'class EmbeddingStrategy' not in source_code:
record_result("AdaptiveCrawler Query Expansion", "#1621", True,
"EmbeddingStrategy not in adaptive_crawler (may have moved)",
skipped=True)
return
# Check if the mock data line is commented out
# and the actual LLM call is NOT commented out
lines = source_code.split('\n')
in_embedding_strategy = False
found_llm_call = False
mock_data_commented = False
for i, line in enumerate(lines):
if 'class EmbeddingStrategy' in line:
in_embedding_strategy = True
elif in_embedding_strategy and line.strip().startswith('class '):
in_embedding_strategy = False
if in_embedding_strategy:
# Check for uncommented LLM call
if 'perform_completion_with_backoff' in line and not line.strip().startswith('#'):
found_llm_call = True
# Check for commented mock data
if "variations ={'queries'" in line or 'variations = {\'queries\'' in line:
if line.strip().startswith('#'):
mock_data_commented = True
if found_llm_call:
record_result("AdaptiveCrawler Query Expansion", "#1621", True,
"LLM call is active in EmbeddingStrategy")
else:
# Check if the entire embedding strategy exists but might be structured differently
if 'perform_completion_with_backoff' in source_code:
record_result("AdaptiveCrawler Query Expansion", "#1621", True,
"perform_completion_with_backoff found in module")
else:
record_result("AdaptiveCrawler Query Expansion", "#1621", False,
"LLM call not found or still commented out")
except Exception as e:
record_result("AdaptiveCrawler Query Expansion", "#1621", False, f"Exception: {e}")
# =============================================================================
# TEST 11: Import Statement Formatting (#1181)
# =============================================================================
async def test_import_formatting():
"""
Verify code extraction properly formats import statements.
BEFORE: Import statements were concatenated without newlines
AFTER: Import statements have proper newline separation
"""
print_test("Import Statement Formatting", "#1181")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
# Create HTML with code containing imports
html_with_code = """
<html>
<body>
<pre><code>
import os
import sys
from pathlib import Path
from typing import List, Dict
def main():
pass
</code></pre>
</body>
</html>
"""
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=f"raw:{html_with_code}",
config=CrawlerRunConfig()
)
markdown = result.markdown.raw_markdown if result.markdown else ""
# Check that imports are not concatenated on the same line
# Bad: "import osimport sys" (no newline between statements)
# This is the actual bug - statements getting merged on same line
bad_patterns = [
"import os import sys", # Space but no newline
"import osimport sys", # No space or newline
"import os from pathlib", # Space but no newline
"import osfrom pathlib", # No space or newline
]
markdown_single_line = markdown.replace('\n', ' ') # Convert newlines to spaces
for pattern in bad_patterns:
# Check if pattern exists without proper line separation
if pattern.replace(' ', '') in markdown_single_line.replace(' ', ''):
# Verify it's actually on same line (not just adjacent after newline removal)
lines = markdown.split('\n')
for line in lines:
if 'import' in line.lower():
# Count import statements on this line
import_count = line.lower().count('import ')
if import_count > 1:
record_result("Import Formatting", "#1181", False,
f"Multiple imports on same line: {line[:60]}...")
return
# Verify imports are present
if "import" in markdown.lower():
record_result("Import Formatting", "#1181", True,
"Import statements are properly line-separated")
else:
record_result("Import Formatting", "#1181", True,
"No import statements found to verify (test HTML may have changed)")
except Exception as e:
record_result("Import Formatting", "#1181", False, f"Exception: {e}")
# =============================================================================
# COMPREHENSIVE CRAWL TEST
# =============================================================================
async def test_comprehensive_crawl():
"""
Run a comprehensive crawl to verify overall stability.
"""
print_test("Comprehensive Crawl Test", "Overall")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, BrowserConfig
async with AsyncWebCrawler(config=BrowserConfig(headless=True)) as crawler:
result = await crawler.arun(
url="https://httpbin.org/html",
config=CrawlerRunConfig()
)
# Verify result
checks = []
if result.success:
checks.append("success=True")
else:
record_result("Comprehensive Crawl", "Overall", False,
f"Crawl failed: {result.error_message}")
return
if result.html and len(result.html) > 100:
checks.append(f"html={len(result.html)} chars")
if result.markdown and result.markdown.raw_markdown:
checks.append(f"markdown={len(result.markdown.raw_markdown)} chars")
if result.redirected_url:
checks.append("redirected_url present")
record_result("Comprehensive Crawl", "Overall", True,
f"All checks passed: {', '.join(checks)}")
except Exception as e:
record_result("Comprehensive Crawl", "Overall", False, f"Exception: {e}")
# =============================================================================
# MAIN
# =============================================================================
def print_summary():
"""Print test results summary"""
print_header("TEST RESULTS SUMMARY")
passed = sum(1 for r in results if r.passed and not r.skipped)
failed = sum(1 for r in results if not r.passed and not r.skipped)
skipped = sum(1 for r in results if r.skipped)
print(f"\nTotal: {len(results)} tests")
print(f" Passed: {passed}")
print(f" Failed: {failed}")
print(f" Skipped: {skipped}")
if failed > 0:
print("\nFailed Tests:")
for r in results:
if not r.passed and not r.skipped:
print(f" - {r.name} ({r.issue}): {r.message}")
if skipped > 0:
print("\nSkipped Tests:")
for r in results:
if r.skipped:
print(f" - {r.name} ({r.issue}): {r.message}")
print("\n" + "=" * 70)
if failed == 0:
print("All tests passed! v0.7.8 bug fixes verified.")
else:
print(f"WARNING: {failed} test(s) failed!")
print("=" * 70)
return failed == 0
async def main():
"""Run all verification tests"""
print_header("Crawl4AI v0.7.8 - Bug Fix Verification Tests")
print("Running actual tests to verify bug fixes...")
# Run all tests
tests = [
test_proxy_config_serialization, # #1629
test_configurable_backoff, # #1269
test_llm_input_format, # #1178
test_raw_html_url_variable, # #1116
test_redirect_url_handling, # #1268
test_pypdf_migration, # #1412
test_pydantic_configdict, # #678
test_docker_content_filter, # #1642
test_docker_cache_permissions, # #1638
test_adaptive_crawler_embedding, # #1621
test_import_formatting, # #1181
test_comprehensive_crawl, # Overall
]
for test_func in tests:
try:
await test_func()
except Exception as e:
print(f"\nTest {test_func.__name__} crashed: {e}")
results.append(TestResult(
test_func.__name__,
"Unknown",
False,
f"Crashed: {e}"
))
# Print summary
all_passed = print_summary()
return 0 if all_passed else 1
if __name__ == "__main__":
try:
exit_code = asyncio.run(main())
sys.exit(exit_code)
except KeyboardInterrupt:
print("\n\nTests interrupted by user.")
sys.exit(1)
except Exception as e:
print(f"\n\nTest suite failed: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "docs/releases_review/demo_v0.7.8.py",
"license": "Apache License 2.0",
"lines": 735,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
unclecode/crawl4ai:tests/async/test_redirect_url_resolution.py | """Test delayed redirect WITH wait_for - does link resolution use correct URL?"""
import asyncio
import threading
from http.server import HTTPServer, SimpleHTTPRequestHandler
class RedirectTestHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
def do_GET(self):
if self.path == "/page-a":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
content = """
<!DOCTYPE html>
<html>
<head><title>Page A</title></head>
<body>
<h1>Page A - Will redirect after 200ms</h1>
<script>
setTimeout(function() {
window.location.href = '/redirect-target/';
}, 200);
</script>
</body>
</html>
"""
self.wfile.write(content.encode())
elif self.path.startswith("/redirect-target"):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
content = """
<!DOCTYPE html>
<html>
<head><title>Redirect Target</title></head>
<body>
<h1>Redirect Target</h1>
<nav id="target-nav">
<a href="subpage-1">Subpage 1</a>
<a href="subpage-2">Subpage 2</a>
</nav>
</body>
</html>
"""
self.wfile.write(content.encode())
else:
self.send_response(404)
self.end_headers()
async def main():
import socket
class ReuseAddrHTTPServer(HTTPServer):
allow_reuse_address = True
server = ReuseAddrHTTPServer(("localhost", 8769), RedirectTestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
try:
import sys
sys.path.insert(0, '/Users/nasrin/vscode/c4ai-uc/develop')
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
print("=" * 60)
print("TEST: Delayed JS redirect WITH wait_for='css:#target-nav'")
print("This waits for the redirect to complete")
print("=" * 60)
browser_config = BrowserConfig(headless=True, verbose=False)
crawl_config = CrawlerRunConfig(
cache_mode="bypass",
wait_for="css:#target-nav" # Wait for element on redirect target
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(
url="http://localhost:8769/page-a",
config=crawl_config
)
print(f"Original URL: http://localhost:8769/page-a")
print(f"Redirected URL returned: {result.redirected_url}")
print(f"HTML contains 'Redirect Target': {'Redirect Target' in result.html}")
print()
if "/redirect-target" in (result.redirected_url or ""):
print("β redirected_url is CORRECT")
else:
print("β BUG #1: redirected_url is WRONG - still shows original URL!")
# Check links
all_links = []
if isinstance(result.links, dict):
all_links = result.links.get("internal", []) + result.links.get("external", [])
print(f"\nLinks found ({len(all_links)} total):")
bug_found = False
for link in all_links:
href = link.get("href", "") if isinstance(link, dict) else getattr(link, 'href', "")
if "subpage" in href:
print(f" {href}")
if "/page-a/" in href:
print(" ^^^ BUG #2: Link resolved with WRONG base URL!")
bug_found = True
elif "/redirect-target/" in href:
print(" ^^^ CORRECT")
if not bug_found and all_links:
print("\nβ Link resolution is CORRECT")
finally:
server.shutdown()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "tests/async/test_redirect_url_resolution.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:docs/examples/cloud_browser/scrapeless_browser.py | import json
import asyncio
from urllib.parse import quote, urlencode
from crawl4ai import CrawlerRunConfig, BrowserConfig, AsyncWebCrawler
# Scrapeless provides a free anti-detection fingerprint browser client and cloud browsers:
# https://www.scrapeless.com/en/blog/scrapeless-nstbrowser-strategic-integration
async def main():
# customize browser fingerprint
fingerprint = {
"userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.1.2.3 Safari/537.36",
"platform": "Windows",
"screen": {
"width": 1280, "height": 1024
},
"localization": {
"languages": ["zh-HK", "en-US", "en"], "timezone": "Asia/Hong_Kong",
}
}
fingerprint_json = json.dumps(fingerprint)
encoded_fingerprint = quote(fingerprint_json)
scrapeless_params = {
"token": "your token",
"sessionTTL": 1000,
"sessionName": "Demo",
"fingerprint": encoded_fingerprint,
# Sets the target country/region for the proxy, sending requests via an IP address from that region. You can specify a country code (e.g., US for the United States, GB for the United Kingdom, ANY for any country). See country codes for all supported options.
# "proxyCountry": "ANY",
# create profile on scrapeless
# "profileId": "your profileId",
# For more usage details, please refer to https://docs.scrapeless.com/en/scraping-browser/quickstart/getting-started
}
query_string = urlencode(scrapeless_params)
scrapeless_connection_url = f"wss://browser.scrapeless.com/api/v2/browser?{query_string}"
async with AsyncWebCrawler(
config=BrowserConfig(
headless=False,
browser_mode="cdp",
cdp_url=scrapeless_connection_url,
)
) as crawler:
result = await crawler.arun(
url="https://www.scrapeless.com/en",
config=CrawlerRunConfig(
wait_for="css:.content",
scan_full_page=True,
),
)
print("-" * 20)
print(f'Status Code: {result.status_code}')
print("-" * 20)
print(f'Title: {result.metadata["title"]}')
print(f'Description: {result.metadata["description"]}')
print("-" * 20)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "docs/examples/cloud_browser/scrapeless_browser.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
unclecode/crawl4ai:deploy/docker/monitor.py | # monitor.py - Real-time monitoring stats with Redis persistence
import time
import json
import asyncio
from typing import Dict, List, Optional
from datetime import datetime, timezone
from collections import deque
from redis import asyncio as aioredis
from utils import get_container_memory_percent
import psutil
import logging
logger = logging.getLogger(__name__)
class MonitorStats:
"""Tracks real-time server stats with Redis persistence."""
def __init__(self, redis: aioredis.Redis):
self.redis = redis
self.start_time = time.time()
# In-memory queues (fast reads, Redis backup)
self.active_requests: Dict[str, Dict] = {} # id -> request info
self.completed_requests: deque = deque(maxlen=100) # Last 100
self.janitor_events: deque = deque(maxlen=100)
self.errors: deque = deque(maxlen=100)
# Endpoint stats (persisted in Redis)
self.endpoint_stats: Dict[str, Dict] = {} # endpoint -> {count, total_time, errors, ...}
# Background persistence queue (max 10 pending persist requests)
self._persist_queue: asyncio.Queue = asyncio.Queue(maxsize=10)
self._persist_worker_task: Optional[asyncio.Task] = None
# Timeline data (5min window, 5s resolution = 60 points)
self.memory_timeline: deque = deque(maxlen=60)
self.requests_timeline: deque = deque(maxlen=60)
self.browser_timeline: deque = deque(maxlen=60)
async def track_request_start(self, request_id: str, endpoint: str, url: str, config: Dict = None):
"""Track new request start."""
req_info = {
"id": request_id,
"endpoint": endpoint,
"url": url[:100], # Truncate long URLs
"start_time": time.time(),
"config_sig": config.get("sig", "default") if config else "default",
"mem_start": psutil.Process().memory_info().rss / (1024 * 1024)
}
self.active_requests[request_id] = req_info
# Increment endpoint counter
if endpoint not in self.endpoint_stats:
self.endpoint_stats[endpoint] = {
"count": 0, "total_time": 0, "errors": 0,
"pool_hits": 0, "success": 0
}
self.endpoint_stats[endpoint]["count"] += 1
# Queue persistence (handled by background worker)
try:
self._persist_queue.put_nowait(True)
except asyncio.QueueFull:
logger.warning("Persistence queue full, skipping")
async def track_request_end(self, request_id: str, success: bool, error: str = None,
pool_hit: bool = True, status_code: int = 200):
"""Track request completion."""
if request_id not in self.active_requests:
return
req_info = self.active_requests.pop(request_id)
end_time = time.time()
elapsed = end_time - req_info["start_time"]
mem_end = psutil.Process().memory_info().rss / (1024 * 1024)
mem_delta = mem_end - req_info["mem_start"]
# Update stats
endpoint = req_info["endpoint"]
if endpoint in self.endpoint_stats:
self.endpoint_stats[endpoint]["total_time"] += elapsed
if success:
self.endpoint_stats[endpoint]["success"] += 1
else:
self.endpoint_stats[endpoint]["errors"] += 1
if pool_hit:
self.endpoint_stats[endpoint]["pool_hits"] += 1
# Add to completed queue
completed = {
**req_info,
"end_time": end_time,
"elapsed": round(elapsed, 2),
"mem_delta": round(mem_delta, 1),
"success": success,
"error": error,
"status_code": status_code,
"pool_hit": pool_hit
}
self.completed_requests.append(completed)
# Track errors
if not success and error:
self.errors.append({
"timestamp": end_time,
"endpoint": endpoint,
"url": req_info["url"],
"error": error,
"request_id": request_id
})
await self._persist_endpoint_stats()
async def track_janitor_event(self, event_type: str, sig: str, details: Dict):
"""Track janitor cleanup events."""
self.janitor_events.append({
"timestamp": time.time(),
"type": event_type, # "close_cold", "close_hot", "promote"
"sig": sig[:8],
"details": details
})
def _cleanup_old_entries(self, max_age_seconds: int = 300):
"""Remove entries older than max_age_seconds (default 5min)."""
now = time.time()
cutoff = now - max_age_seconds
# Clean completed requests
while self.completed_requests and self.completed_requests[0].get("end_time", 0) < cutoff:
self.completed_requests.popleft()
# Clean janitor events
while self.janitor_events and self.janitor_events[0].get("timestamp", 0) < cutoff:
self.janitor_events.popleft()
# Clean errors
while self.errors and self.errors[0].get("timestamp", 0) < cutoff:
self.errors.popleft()
async def update_timeline(self):
"""Update timeline data points (called every 5s)."""
now = time.time()
mem_pct = get_container_memory_percent()
# Clean old entries (keep last 5 minutes)
self._cleanup_old_entries(max_age_seconds=300)
# Count requests in last 5s
recent_reqs = sum(1 for req in self.completed_requests
if now - req.get("end_time", 0) < 5)
# Browser counts (acquire lock to prevent race conditions)
from crawler_pool import PERMANENT, HOT_POOL, COLD_POOL, LOCK
async with LOCK:
browser_count = {
"permanent": 1 if PERMANENT else 0,
"hot": len(HOT_POOL),
"cold": len(COLD_POOL)
}
self.memory_timeline.append({"time": now, "value": mem_pct})
self.requests_timeline.append({"time": now, "value": recent_reqs})
self.browser_timeline.append({"time": now, "browsers": browser_count})
async def _persist_endpoint_stats(self):
"""Persist endpoint stats to Redis."""
try:
await self.redis.set(
"monitor:endpoint_stats",
json.dumps(self.endpoint_stats),
ex=86400 # 24h TTL
)
except Exception as e:
logger.warning(f"Failed to persist endpoint stats: {e}")
async def _persistence_worker(self):
"""Background worker to persist stats to Redis."""
while True:
try:
await self._persist_queue.get()
await self._persist_endpoint_stats()
self._persist_queue.task_done()
except asyncio.CancelledError:
break
except Exception as e:
logger.error(f"Persistence worker error: {e}")
def start_persistence_worker(self):
"""Start the background persistence worker."""
if not self._persist_worker_task:
self._persist_worker_task = asyncio.create_task(self._persistence_worker())
logger.info("Started persistence worker")
async def stop_persistence_worker(self):
"""Stop the background persistence worker."""
if self._persist_worker_task:
self._persist_worker_task.cancel()
try:
await self._persist_worker_task
except asyncio.CancelledError:
pass
self._persist_worker_task = None
logger.info("Stopped persistence worker")
async def cleanup(self):
"""Cleanup on shutdown - persist final stats and stop workers."""
logger.info("Monitor cleanup starting...")
try:
# Persist final stats before shutdown
await self._persist_endpoint_stats()
# Stop background worker
await self.stop_persistence_worker()
logger.info("Monitor cleanup completed")
except Exception as e:
logger.error(f"Monitor cleanup error: {e}")
async def load_from_redis(self):
"""Load persisted stats from Redis."""
try:
data = await self.redis.get("monitor:endpoint_stats")
if data:
self.endpoint_stats = json.loads(data)
logger.info("Loaded endpoint stats from Redis")
except Exception as e:
logger.warning(f"Failed to load from Redis: {e}")
async def get_health_summary(self) -> Dict:
"""Get current system health snapshot."""
mem_pct = get_container_memory_percent()
cpu_pct = psutil.cpu_percent(interval=0.1)
# Network I/O (delta since last call)
net = psutil.net_io_counters()
# Pool status (acquire lock to prevent race conditions)
from crawler_pool import PERMANENT, HOT_POOL, COLD_POOL, LOCK
async with LOCK:
# TODO: Track actual browser process memory instead of estimates
# These are conservative estimates based on typical Chromium usage
permanent_mem = 270 if PERMANENT else 0 # Estimate: ~270MB for permanent browser
hot_mem = len(HOT_POOL) * 180 # Estimate: ~180MB per hot pool browser
cold_mem = len(COLD_POOL) * 180 # Estimate: ~180MB per cold pool browser
permanent_active = PERMANENT is not None
hot_count = len(HOT_POOL)
cold_count = len(COLD_POOL)
return {
"container": {
"memory_percent": round(mem_pct, 1),
"cpu_percent": round(cpu_pct, 1),
"network_sent_mb": round(net.bytes_sent / (1024**2), 2),
"network_recv_mb": round(net.bytes_recv / (1024**2), 2),
"uptime_seconds": int(time.time() - self.start_time)
},
"pool": {
"permanent": {"active": permanent_active, "memory_mb": permanent_mem},
"hot": {"count": hot_count, "memory_mb": hot_mem},
"cold": {"count": cold_count, "memory_mb": cold_mem},
"total_memory_mb": permanent_mem + hot_mem + cold_mem
},
"janitor": {
"next_cleanup_estimate": "adaptive", # Would need janitor state
"memory_pressure": "LOW" if mem_pct < 60 else "MEDIUM" if mem_pct < 80 else "HIGH"
}
}
def get_active_requests(self) -> List[Dict]:
"""Get list of currently active requests."""
now = time.time()
return [
{
**req,
"elapsed": round(now - req["start_time"], 1),
"status": "running"
}
for req in self.active_requests.values()
]
def get_completed_requests(self, limit: int = 50, filter_status: str = "all") -> List[Dict]:
"""Get recent completed requests."""
requests = list(self.completed_requests)[-limit:]
if filter_status == "success":
requests = [r for r in requests if r.get("success")]
elif filter_status == "error":
requests = [r for r in requests if not r.get("success")]
return requests
async def get_browser_list(self) -> List[Dict]:
"""Get detailed browser pool information."""
from crawler_pool import PERMANENT, HOT_POOL, COLD_POOL, LAST_USED, USAGE_COUNT, DEFAULT_CONFIG_SIG, LOCK
browsers = []
now = time.time()
# Acquire lock to prevent race conditions during iteration
async with LOCK:
if PERMANENT:
browsers.append({
"type": "permanent",
"sig": DEFAULT_CONFIG_SIG[:8] if DEFAULT_CONFIG_SIG else "unknown",
"age_seconds": int(now - self.start_time),
"last_used_seconds": int(now - LAST_USED.get(DEFAULT_CONFIG_SIG, now)),
"memory_mb": 270,
"hits": USAGE_COUNT.get(DEFAULT_CONFIG_SIG, 0),
"killable": False
})
for sig, crawler in HOT_POOL.items():
browsers.append({
"type": "hot",
"sig": sig[:8],
"age_seconds": int(now - self.start_time), # Approximation
"last_used_seconds": int(now - LAST_USED.get(sig, now)),
"memory_mb": 180, # Estimate
"hits": USAGE_COUNT.get(sig, 0),
"killable": True
})
for sig, crawler in COLD_POOL.items():
browsers.append({
"type": "cold",
"sig": sig[:8],
"age_seconds": int(now - self.start_time),
"last_used_seconds": int(now - LAST_USED.get(sig, now)),
"memory_mb": 180,
"hits": USAGE_COUNT.get(sig, 0),
"killable": True
})
return browsers
def get_endpoint_stats_summary(self) -> Dict[str, Dict]:
"""Get aggregated endpoint statistics."""
summary = {}
for endpoint, stats in self.endpoint_stats.items():
count = stats["count"]
avg_time = (stats["total_time"] / count) if count > 0 else 0
success_rate = (stats["success"] / count * 100) if count > 0 else 0
pool_hit_rate = (stats["pool_hits"] / count * 100) if count > 0 else 0
summary[endpoint] = {
"count": count,
"avg_latency_ms": round(avg_time * 1000, 1),
"success_rate_percent": round(success_rate, 1),
"pool_hit_rate_percent": round(pool_hit_rate, 1),
"errors": stats["errors"]
}
return summary
def get_timeline_data(self, metric: str, window: str = "5m") -> Dict:
"""Get timeline data for charts."""
# For now, only 5m window supported
if metric == "memory":
data = list(self.memory_timeline)
elif metric == "requests":
data = list(self.requests_timeline)
elif metric == "browsers":
data = list(self.browser_timeline)
else:
return {"timestamps": [], "values": []}
return {
"timestamps": [int(d["time"]) for d in data],
"values": [d.get("value", d.get("browsers")) for d in data]
}
def get_janitor_log(self, limit: int = 100) -> List[Dict]:
"""Get recent janitor events."""
return list(self.janitor_events)[-limit:]
def get_errors_log(self, limit: int = 100) -> List[Dict]:
"""Get recent errors."""
return list(self.errors)[-limit:]
# Global instance (initialized in server.py)
monitor_stats: Optional[MonitorStats] = None
def get_monitor() -> MonitorStats:
"""Get global monitor instance."""
if monitor_stats is None:
raise RuntimeError("Monitor not initialized")
return monitor_stats
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/monitor.py",
"license": "Apache License 2.0",
"lines": 330,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
unclecode/crawl4ai:deploy/docker/monitor_routes.py | # monitor_routes.py - Monitor API endpoints
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect
from pydantic import BaseModel
from typing import Optional
from monitor import get_monitor
import logging
import asyncio
import json
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/monitor", tags=["monitor"])
@router.get("/health")
async def get_health():
"""Get current system health snapshot."""
try:
monitor = get_monitor()
return await monitor.get_health_summary()
except Exception as e:
logger.error(f"Error getting health: {e}")
raise HTTPException(500, str(e))
@router.get("/requests")
async def get_requests(status: str = "all", limit: int = 50):
"""Get active and completed requests.
Args:
status: Filter by 'active', 'completed', 'success', 'error', or 'all'
limit: Max number of completed requests to return (default 50)
"""
# Input validation
if status not in ["all", "active", "completed", "success", "error"]:
raise HTTPException(400, f"Invalid status: {status}. Must be one of: all, active, completed, success, error")
if limit < 1 or limit > 1000:
raise HTTPException(400, f"Invalid limit: {limit}. Must be between 1 and 1000")
try:
monitor = get_monitor()
if status == "active":
return {"active": monitor.get_active_requests(), "completed": []}
elif status == "completed":
return {"active": [], "completed": monitor.get_completed_requests(limit)}
elif status in ["success", "error"]:
return {"active": [], "completed": monitor.get_completed_requests(limit, status)}
else: # "all"
return {
"active": monitor.get_active_requests(),
"completed": monitor.get_completed_requests(limit)
}
except Exception as e:
logger.error(f"Error getting requests: {e}")
raise HTTPException(500, str(e))
@router.get("/browsers")
async def get_browsers():
"""Get detailed browser pool information."""
try:
monitor = get_monitor()
browsers = await monitor.get_browser_list()
# Calculate summary stats
total_browsers = len(browsers)
total_memory = sum(b["memory_mb"] for b in browsers)
# Calculate reuse rate from recent requests
recent = monitor.get_completed_requests(100)
pool_hits = sum(1 for r in recent if r.get("pool_hit", False))
reuse_rate = (pool_hits / len(recent) * 100) if recent else 0
return {
"browsers": browsers,
"summary": {
"total_count": total_browsers,
"total_memory_mb": total_memory,
"reuse_rate_percent": round(reuse_rate, 1)
}
}
except Exception as e:
logger.error(f"Error getting browsers: {e}")
raise HTTPException(500, str(e))
@router.get("/endpoints/stats")
async def get_endpoint_stats():
"""Get aggregated endpoint statistics."""
try:
monitor = get_monitor()
return monitor.get_endpoint_stats_summary()
except Exception as e:
logger.error(f"Error getting endpoint stats: {e}")
raise HTTPException(500, str(e))
@router.get("/timeline")
async def get_timeline(metric: str = "memory", window: str = "5m"):
"""Get timeline data for charts.
Args:
metric: 'memory', 'requests', or 'browsers'
window: Time window (only '5m' supported for now)
"""
# Input validation
if metric not in ["memory", "requests", "browsers"]:
raise HTTPException(400, f"Invalid metric: {metric}. Must be one of: memory, requests, browsers")
if window != "5m":
raise HTTPException(400, f"Invalid window: {window}. Only '5m' is currently supported")
try:
monitor = get_monitor()
return monitor.get_timeline_data(metric, window)
except Exception as e:
logger.error(f"Error getting timeline: {e}")
raise HTTPException(500, str(e))
@router.get("/logs/janitor")
async def get_janitor_log(limit: int = 100):
"""Get recent janitor cleanup events."""
# Input validation
if limit < 1 or limit > 1000:
raise HTTPException(400, f"Invalid limit: {limit}. Must be between 1 and 1000")
try:
monitor = get_monitor()
return {"events": monitor.get_janitor_log(limit)}
except Exception as e:
logger.error(f"Error getting janitor log: {e}")
raise HTTPException(500, str(e))
@router.get("/logs/errors")
async def get_errors_log(limit: int = 100):
"""Get recent errors."""
# Input validation
if limit < 1 or limit > 1000:
raise HTTPException(400, f"Invalid limit: {limit}. Must be between 1 and 1000")
try:
monitor = get_monitor()
return {"errors": monitor.get_errors_log(limit)}
except Exception as e:
logger.error(f"Error getting errors log: {e}")
raise HTTPException(500, str(e))
# ========== Control Actions ==========
class KillBrowserRequest(BaseModel):
sig: str
@router.post("/actions/cleanup")
async def force_cleanup():
"""Force immediate janitor cleanup (kills idle cold pool browsers)."""
try:
from crawler_pool import COLD_POOL, LAST_USED, USAGE_COUNT, LOCK
import time
from contextlib import suppress
killed_count = 0
now = time.time()
async with LOCK:
for sig in list(COLD_POOL.keys()):
# Kill all cold pool browsers immediately
logger.info(f"π§Ή Force cleanup: closing cold browser (sig={sig[:8]})")
with suppress(Exception):
await COLD_POOL[sig].close()
COLD_POOL.pop(sig, None)
LAST_USED.pop(sig, None)
USAGE_COUNT.pop(sig, None)
killed_count += 1
monitor = get_monitor()
await monitor.track_janitor_event("force_cleanup", "manual", {"killed": killed_count})
return {"success": True, "killed_browsers": killed_count}
except Exception as e:
logger.error(f"Error during force cleanup: {e}")
raise HTTPException(500, str(e))
@router.post("/actions/kill_browser")
async def kill_browser(req: KillBrowserRequest):
"""Kill a specific browser by signature (hot or cold only).
Args:
sig: Browser config signature (first 8 chars)
"""
try:
from crawler_pool import HOT_POOL, COLD_POOL, LAST_USED, USAGE_COUNT, LOCK, DEFAULT_CONFIG_SIG
from contextlib import suppress
# Find full signature matching prefix
target_sig = None
pool_type = None
async with LOCK:
# Check hot pool
for sig in HOT_POOL.keys():
if sig.startswith(req.sig):
target_sig = sig
pool_type = "hot"
break
# Check cold pool
if not target_sig:
for sig in COLD_POOL.keys():
if sig.startswith(req.sig):
target_sig = sig
pool_type = "cold"
break
# Check if trying to kill permanent
if DEFAULT_CONFIG_SIG and DEFAULT_CONFIG_SIG.startswith(req.sig):
raise HTTPException(403, "Cannot kill permanent browser. Use restart instead.")
if not target_sig:
raise HTTPException(404, f"Browser with sig={req.sig} not found")
# Warn if there are active requests (browser might be in use)
monitor = get_monitor()
active_count = len(monitor.get_active_requests())
if active_count > 0:
logger.warning(f"Killing browser {target_sig[:8]} while {active_count} requests are active - may cause failures")
# Kill the browser
if pool_type == "hot":
browser = HOT_POOL.pop(target_sig)
else:
browser = COLD_POOL.pop(target_sig)
with suppress(Exception):
await browser.close()
LAST_USED.pop(target_sig, None)
USAGE_COUNT.pop(target_sig, None)
logger.info(f"πͺ Killed {pool_type} browser (sig={target_sig[:8]})")
monitor = get_monitor()
await monitor.track_janitor_event("kill_browser", target_sig, {"pool": pool_type, "manual": True})
return {"success": True, "killed_sig": target_sig[:8], "pool_type": pool_type}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error killing browser: {e}")
raise HTTPException(500, str(e))
@router.post("/actions/restart_browser")
async def restart_browser(req: KillBrowserRequest):
"""Restart a browser (kill + recreate). Works for permanent too.
Args:
sig: Browser config signature (first 8 chars), or "permanent"
"""
try:
from crawler_pool import (PERMANENT, HOT_POOL, COLD_POOL, LAST_USED,
USAGE_COUNT, LOCK, DEFAULT_CONFIG_SIG, init_permanent)
from crawl4ai import AsyncWebCrawler, BrowserConfig
from contextlib import suppress
import time
# Handle permanent browser restart
if req.sig == "permanent" or (DEFAULT_CONFIG_SIG and DEFAULT_CONFIG_SIG.startswith(req.sig)):
async with LOCK:
if PERMANENT:
with suppress(Exception):
await PERMANENT.close()
# Reinitialize permanent
from utils import load_config
config = load_config()
await init_permanent(BrowserConfig(
extra_args=config["crawler"]["browser"].get("extra_args", []),
**config["crawler"]["browser"].get("kwargs", {}),
))
logger.info("π Restarted permanent browser")
return {"success": True, "restarted": "permanent"}
# Handle hot/cold browser restart
target_sig = None
pool_type = None
browser_config = None
async with LOCK:
# Find browser
for sig in HOT_POOL.keys():
if sig.startswith(req.sig):
target_sig = sig
pool_type = "hot"
# Would need to reconstruct config (not stored currently)
break
if not target_sig:
for sig in COLD_POOL.keys():
if sig.startswith(req.sig):
target_sig = sig
pool_type = "cold"
break
if not target_sig:
raise HTTPException(404, f"Browser with sig={req.sig} not found")
# Kill existing
if pool_type == "hot":
browser = HOT_POOL.pop(target_sig)
else:
browser = COLD_POOL.pop(target_sig)
with suppress(Exception):
await browser.close()
# Note: We can't easily recreate with same config without storing it
# For now, just kill and let new requests create fresh ones
LAST_USED.pop(target_sig, None)
USAGE_COUNT.pop(target_sig, None)
logger.info(f"π Restarted {pool_type} browser (sig={target_sig[:8]})")
monitor = get_monitor()
await monitor.track_janitor_event("restart_browser", target_sig, {"pool": pool_type})
return {"success": True, "restarted_sig": target_sig[:8], "note": "Browser will be recreated on next request"}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error restarting browser: {e}")
raise HTTPException(500, str(e))
@router.post("/stats/reset")
async def reset_stats():
"""Reset today's endpoint counters."""
try:
monitor = get_monitor()
monitor.endpoint_stats.clear()
await monitor._persist_endpoint_stats()
return {"success": True, "message": "Endpoint stats reset"}
except Exception as e:
logger.error(f"Error resetting stats: {e}")
raise HTTPException(500, str(e))
@router.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
"""WebSocket endpoint for real-time monitoring updates.
Sends updates every 2 seconds with:
- Health stats
- Active/completed requests
- Browser pool status
- Timeline data
"""
await websocket.accept()
logger.info("WebSocket client connected")
try:
while True:
try:
# Gather all monitoring data
monitor = get_monitor()
data = {
"timestamp": asyncio.get_event_loop().time(),
"health": await monitor.get_health_summary(),
"requests": {
"active": monitor.get_active_requests(),
"completed": monitor.get_completed_requests(limit=10)
},
"browsers": await monitor.get_browser_list(),
"timeline": {
"memory": monitor.get_timeline_data("memory", "5m"),
"requests": monitor.get_timeline_data("requests", "5m"),
"browsers": monitor.get_timeline_data("browsers", "5m")
},
"janitor": monitor.get_janitor_log(limit=10),
"errors": monitor.get_errors_log(limit=10)
}
# Send update to client
await websocket.send_json(data)
# Wait 2 seconds before next update
await asyncio.sleep(2)
except WebSocketDisconnect:
logger.info("WebSocket client disconnected")
break
except Exception as e:
logger.error(f"WebSocket error: {e}", exc_info=True)
await asyncio.sleep(2) # Continue trying
except Exception as e:
logger.error(f"WebSocket connection error: {e}", exc_info=True)
finally:
logger.info("WebSocket connection closed")
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/monitor_routes.py",
"license": "Apache License 2.0",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
unclecode/crawl4ai:deploy/docker/test-websocket.py | #!/usr/bin/env python3
"""
Quick WebSocket test - Connect to monitor WebSocket and print updates
"""
import asyncio
import websockets
import json
async def test_websocket():
uri = "ws://localhost:11235/monitor/ws"
print(f"Connecting to {uri}...")
try:
async with websockets.connect(uri) as websocket:
print("β
Connected!")
# Receive and print 5 updates
for i in range(5):
message = await websocket.recv()
data = json.loads(message)
print(f"\nπ Update #{i+1}:")
print(f" - Health: CPU {data['health']['container']['cpu_percent']}%, Memory {data['health']['container']['memory_percent']}%")
print(f" - Active Requests: {len(data['requests']['active'])}")
print(f" - Browsers: {len(data['browsers'])}")
except Exception as e:
print(f"β Error: {e}")
return 1
print("\nβ
WebSocket test passed!")
return 0
if __name__ == "__main__":
exit(asyncio.run(test_websocket()))
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/test-websocket.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
unclecode/crawl4ai:deploy/docker/tests/demo_monitor_dashboard.py | #!/usr/bin/env python3
"""
Monitor Dashboard Demo Script
Generates varied activity to showcase all monitoring features for video recording.
"""
import httpx
import asyncio
import time
from datetime import datetime
BASE_URL = "http://localhost:11235"
async def demo_dashboard():
print("π¬ Monitor Dashboard Demo - Starting...\n")
print(f"π Dashboard: {BASE_URL}/dashboard")
print("=" * 60)
async with httpx.AsyncClient(timeout=60.0) as client:
# Phase 1: Simple requests (permanent browser)
print("\nπ· Phase 1: Testing permanent browser pool")
print("-" * 60)
for i in range(5):
print(f" {i+1}/5 Request to /crawl (default config)...")
try:
r = await client.post(
f"{BASE_URL}/crawl",
json={"urls": [f"https://httpbin.org/html?req={i}"], "crawler_config": {}}
)
print(f" β
Status: {r.status_code}, Time: {r.elapsed.total_seconds():.2f}s")
except Exception as e:
print(f" β Error: {e}")
await asyncio.sleep(1) # Small delay between requests
# Phase 2: Create variant browsers (different configs)
print("\nπΆ Phase 2: Testing coldβhot pool promotion")
print("-" * 60)
viewports = [
{"width": 1920, "height": 1080},
{"width": 1280, "height": 720},
{"width": 800, "height": 600}
]
for idx, viewport in enumerate(viewports):
print(f" Viewport {viewport['width']}x{viewport['height']}:")
for i in range(4): # 4 requests each to trigger promotion at 3
try:
r = await client.post(
f"{BASE_URL}/crawl",
json={
"urls": [f"https://httpbin.org/json?v={idx}&r={i}"],
"browser_config": {"viewport": viewport},
"crawler_config": {}
}
)
print(f" {i+1}/4 β
{r.status_code} - Should see coldβhot after 3 uses")
except Exception as e:
print(f" {i+1}/4 β {e}")
await asyncio.sleep(0.5)
# Phase 3: Concurrent burst (stress pool)
print("\nπ· Phase 3: Concurrent burst (10 parallel)")
print("-" * 60)
tasks = []
for i in range(10):
tasks.append(
client.post(
f"{BASE_URL}/crawl",
json={"urls": [f"https://httpbin.org/delay/2?burst={i}"], "crawler_config": {}}
)
)
print(" Sending 10 concurrent requests...")
start = time.time()
results = await asyncio.gather(*tasks, return_exceptions=True)
elapsed = time.time() - start
successes = sum(1 for r in results if not isinstance(r, Exception) and r.status_code == 200)
print(f" β
{successes}/10 succeeded in {elapsed:.2f}s")
# Phase 4: Multi-endpoint coverage
print("\nπΆ Phase 4: Testing multiple endpoints")
print("-" * 60)
endpoints = [
("/md", {"url": "https://httpbin.org/html", "f": "fit", "c": "0"}),
("/screenshot", {"url": "https://httpbin.org/html"}),
("/pdf", {"url": "https://httpbin.org/html"}),
]
for endpoint, payload in endpoints:
print(f" Testing {endpoint}...")
try:
if endpoint == "/md":
r = await client.post(f"{BASE_URL}{endpoint}", json=payload)
else:
r = await client.post(f"{BASE_URL}{endpoint}", json=payload)
print(f" β
{r.status_code}")
except Exception as e:
print(f" β {e}")
await asyncio.sleep(1)
# Phase 5: Intentional error (to populate errors tab)
print("\nπ· Phase 5: Generating error examples")
print("-" * 60)
print(" Triggering invalid URL error...")
try:
r = await client.post(
f"{BASE_URL}/crawl",
json={"urls": ["invalid://bad-url"], "crawler_config": {}}
)
print(f" Response: {r.status_code}")
except Exception as e:
print(f" β
Error captured: {type(e).__name__}")
# Phase 6: Wait for janitor activity
print("\nπΆ Phase 6: Waiting for janitor cleanup...")
print("-" * 60)
print(" Idle for 40s to allow janitor to clean cold pool browsers...")
for i in range(40, 0, -10):
print(f" {i}s remaining... (Check dashboard for cleanup events)")
await asyncio.sleep(10)
# Phase 7: Final stats check
print("\nπ· Phase 7: Final dashboard state")
print("-" * 60)
r = await client.get(f"{BASE_URL}/monitor/health")
health = r.json()
print(f" Memory: {health['container']['memory_percent']:.1f}%")
print(f" Browsers: Perm={health['pool']['permanent']['active']}, "
f"Hot={health['pool']['hot']['count']}, Cold={health['pool']['cold']['count']}")
r = await client.get(f"{BASE_URL}/monitor/endpoints/stats")
stats = r.json()
print(f"\n Endpoint Stats:")
for endpoint, data in stats.items():
print(f" {endpoint}: {data['count']} req, "
f"{data['avg_latency_ms']:.0f}ms avg, "
f"{data['success_rate_percent']:.1f}% success")
r = await client.get(f"{BASE_URL}/monitor/browsers")
browsers = r.json()
print(f"\n Pool Efficiency:")
print(f" Total browsers: {browsers['summary']['total_count']}")
print(f" Memory usage: {browsers['summary']['total_memory_mb']} MB")
print(f" Reuse rate: {browsers['summary']['reuse_rate_percent']:.1f}%")
print("\n" + "=" * 60)
print("β
Demo complete! Dashboard is now populated with rich data.")
print(f"\nπΉ Recording tip: Refresh {BASE_URL}/dashboard")
print(" You should see:")
print(" β’ Active & completed requests")
print(" β’ Browser pool (permanent + hot/cold)")
print(" β’ Janitor cleanup events")
print(" β’ Endpoint analytics")
print(" β’ Memory timeline")
if __name__ == "__main__":
try:
asyncio.run(demo_dashboard())
except KeyboardInterrupt:
print("\n\nβ οΈ Demo interrupted by user")
except Exception as e:
print(f"\n\nβ Demo failed: {e}")
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/demo_monitor_dashboard.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:deploy/docker/tests/test_1_basic.py | #!/usr/bin/env python3
"""
Test 1: Basic Container Health + Single Endpoint
- Starts container
- Hits /health endpoint 10 times
- Reports success rate and basic latency
"""
import asyncio
import time
import docker
import httpx
# Config
IMAGE = "crawl4ai-local:latest"
CONTAINER_NAME = "crawl4ai-test"
PORT = 11235
REQUESTS = 10
async def test_endpoint(url: str, count: int):
"""Hit endpoint multiple times, return stats."""
results = []
async with httpx.AsyncClient(timeout=30.0) as client:
for i in range(count):
start = time.time()
try:
resp = await client.get(url)
elapsed = (time.time() - start) * 1000 # ms
results.append({
"success": resp.status_code == 200,
"latency_ms": elapsed,
"status": resp.status_code
})
print(f" [{i+1}/{count}] β {resp.status_code} - {elapsed:.0f}ms")
except Exception as e:
results.append({
"success": False,
"latency_ms": None,
"error": str(e)
})
print(f" [{i+1}/{count}] β Error: {e}")
return results
def start_container(client, image: str, name: str, port: int):
"""Start container, return container object."""
# Clean up existing
try:
old = client.containers.get(name)
print(f"π§Ή Stopping existing container '{name}'...")
old.stop()
old.remove()
except docker.errors.NotFound:
pass
print(f"π Starting container '{name}' from image '{image}'...")
container = client.containers.run(
image,
name=name,
ports={f"{port}/tcp": port},
detach=True,
shm_size="1g",
environment={"PYTHON_ENV": "production"}
)
# Wait for health
print(f"β³ Waiting for container to be healthy...")
for _ in range(30): # 30s timeout
time.sleep(1)
container.reload()
if container.status == "running":
try:
# Quick health check
import requests
resp = requests.get(f"http://localhost:{port}/health", timeout=2)
if resp.status_code == 200:
print(f"β
Container healthy!")
return container
except:
pass
raise TimeoutError("Container failed to start")
def stop_container(container):
"""Stop and remove container."""
print(f"π Stopping container...")
container.stop()
container.remove()
print(f"β
Container removed")
async def main():
print("="*60)
print("TEST 1: Basic Container Health + Single Endpoint")
print("="*60)
client = docker.from_env()
container = None
try:
# Start container
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
# Test /health endpoint
print(f"\nπ Testing /health endpoint ({REQUESTS} requests)...")
url = f"http://localhost:{PORT}/health"
results = await test_endpoint(url, REQUESTS)
# Calculate stats
successes = sum(1 for r in results if r["success"])
success_rate = (successes / len(results)) * 100
latencies = [r["latency_ms"] for r in results if r["latency_ms"] is not None]
avg_latency = sum(latencies) / len(latencies) if latencies else 0
# Print results
print(f"\n{'='*60}")
print(f"RESULTS:")
print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(results)})")
print(f" Avg Latency: {avg_latency:.0f}ms")
if latencies:
print(f" Min Latency: {min(latencies):.0f}ms")
print(f" Max Latency: {max(latencies):.0f}ms")
print(f"{'='*60}")
# Pass/Fail
if success_rate >= 100:
print(f"β
TEST PASSED")
return 0
else:
print(f"β TEST FAILED (expected 100% success rate)")
return 1
except Exception as e:
print(f"\nβ TEST ERROR: {e}")
return 1
finally:
if container:
stop_container(container)
if __name__ == "__main__":
exit_code = asyncio.run(main())
exit(exit_code)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/test_1_basic.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:deploy/docker/tests/test_2_memory.py | #!/usr/bin/env python3
"""
Test 2: Docker Stats Monitoring
- Extends Test 1 with real-time container stats
- Monitors memory % and CPU during requests
- Reports baseline, peak, and final memory
"""
import asyncio
import time
import docker
import httpx
from threading import Thread, Event
# Config
IMAGE = "crawl4ai-local:latest"
CONTAINER_NAME = "crawl4ai-test"
PORT = 11235
REQUESTS = 20 # More requests to see memory usage
# Stats tracking
stats_history = []
stop_monitoring = Event()
def monitor_stats(container):
"""Background thread to collect container stats."""
for stat in container.stats(decode=True, stream=True):
if stop_monitoring.is_set():
break
try:
# Extract memory stats
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024) # MB
mem_limit = stat['memory_stats'].get('limit', 1) / (1024 * 1024)
mem_percent = (mem_usage / mem_limit * 100) if mem_limit > 0 else 0
# Extract CPU stats (handle missing fields on Mac)
cpu_percent = 0
try:
cpu_delta = stat['cpu_stats']['cpu_usage']['total_usage'] - \
stat['precpu_stats']['cpu_usage']['total_usage']
system_delta = stat['cpu_stats'].get('system_cpu_usage', 0) - \
stat['precpu_stats'].get('system_cpu_usage', 0)
if system_delta > 0:
num_cpus = stat['cpu_stats'].get('online_cpus', 1)
cpu_percent = (cpu_delta / system_delta * num_cpus * 100.0)
except (KeyError, ZeroDivisionError):
pass
stats_history.append({
'timestamp': time.time(),
'memory_mb': mem_usage,
'memory_percent': mem_percent,
'cpu_percent': cpu_percent
})
except Exception as e:
# Skip malformed stats
pass
time.sleep(0.5) # Sample every 500ms
async def test_endpoint(url: str, count: int):
"""Hit endpoint, return stats."""
results = []
async with httpx.AsyncClient(timeout=30.0) as client:
for i in range(count):
start = time.time()
try:
resp = await client.get(url)
elapsed = (time.time() - start) * 1000
results.append({
"success": resp.status_code == 200,
"latency_ms": elapsed,
})
if (i + 1) % 5 == 0: # Print every 5 requests
print(f" [{i+1}/{count}] β {resp.status_code} - {elapsed:.0f}ms")
except Exception as e:
results.append({"success": False, "error": str(e)})
print(f" [{i+1}/{count}] β Error: {e}")
return results
def start_container(client, image: str, name: str, port: int):
"""Start container."""
try:
old = client.containers.get(name)
print(f"π§Ή Stopping existing container '{name}'...")
old.stop()
old.remove()
except docker.errors.NotFound:
pass
print(f"π Starting container '{name}'...")
container = client.containers.run(
image,
name=name,
ports={f"{port}/tcp": port},
detach=True,
shm_size="1g",
mem_limit="4g", # Set explicit memory limit
)
print(f"β³ Waiting for health...")
for _ in range(30):
time.sleep(1)
container.reload()
if container.status == "running":
try:
import requests
resp = requests.get(f"http://localhost:{port}/health", timeout=2)
if resp.status_code == 200:
print(f"β
Container healthy!")
return container
except:
pass
raise TimeoutError("Container failed to start")
def stop_container(container):
"""Stop container."""
print(f"π Stopping container...")
container.stop()
container.remove()
async def main():
print("="*60)
print("TEST 2: Docker Stats Monitoring")
print("="*60)
client = docker.from_env()
container = None
monitor_thread = None
try:
# Start container
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
# Start stats monitoring in background
print(f"\nπ Starting stats monitor...")
stop_monitoring.clear()
stats_history.clear()
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
monitor_thread.start()
# Wait a bit for baseline
await asyncio.sleep(2)
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
print(f"π Baseline memory: {baseline_mem:.1f} MB")
# Test /health endpoint
print(f"\nπ Running {REQUESTS} requests to /health...")
url = f"http://localhost:{PORT}/health"
results = await test_endpoint(url, REQUESTS)
# Wait a bit to capture peak
await asyncio.sleep(1)
# Stop monitoring
stop_monitoring.set()
if monitor_thread:
monitor_thread.join(timeout=2)
# Calculate stats
successes = sum(1 for r in results if r.get("success"))
success_rate = (successes / len(results)) * 100
latencies = [r["latency_ms"] for r in results if "latency_ms" in r]
avg_latency = sum(latencies) / len(latencies) if latencies else 0
# Memory stats
memory_samples = [s['memory_mb'] for s in stats_history]
peak_mem = max(memory_samples) if memory_samples else 0
final_mem = memory_samples[-1] if memory_samples else 0
mem_delta = final_mem - baseline_mem
# Print results
print(f"\n{'='*60}")
print(f"RESULTS:")
print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(results)})")
print(f" Avg Latency: {avg_latency:.0f}ms")
print(f"\n Memory Stats:")
print(f" Baseline: {baseline_mem:.1f} MB")
print(f" Peak: {peak_mem:.1f} MB")
print(f" Final: {final_mem:.1f} MB")
print(f" Delta: {mem_delta:+.1f} MB")
print(f"{'='*60}")
# Pass/Fail
if success_rate >= 100 and mem_delta < 100: # No significant memory growth
print(f"β
TEST PASSED")
return 0
else:
if success_rate < 100:
print(f"β TEST FAILED (success rate < 100%)")
if mem_delta >= 100:
print(f"β οΈ WARNING: Memory grew by {mem_delta:.1f} MB")
return 1
except Exception as e:
print(f"\nβ TEST ERROR: {e}")
return 1
finally:
stop_monitoring.set()
if container:
stop_container(container)
if __name__ == "__main__":
exit_code = asyncio.run(main())
exit(exit_code)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/test_2_memory.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:deploy/docker/tests/test_3_pool.py | #!/usr/bin/env python3
"""
Test 3: Pool Validation - Permanent Browser Reuse
- Tests /html endpoint (should use permanent browser)
- Monitors container logs for pool hit markers
- Validates browser reuse rate
- Checks memory after browser creation
"""
import asyncio
import time
import docker
import httpx
from threading import Thread, Event
# Config
IMAGE = "crawl4ai-local:latest"
CONTAINER_NAME = "crawl4ai-test"
PORT = 11235
REQUESTS = 30
# Stats tracking
stats_history = []
stop_monitoring = Event()
def monitor_stats(container):
"""Background stats collector."""
for stat in container.stats(decode=True, stream=True):
if stop_monitoring.is_set():
break
try:
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024)
stats_history.append({
'timestamp': time.time(),
'memory_mb': mem_usage,
})
except:
pass
time.sleep(0.5)
def count_log_markers(container):
"""Extract pool usage markers from logs."""
logs = container.logs().decode('utf-8')
permanent_hits = logs.count("π₯ Using permanent browser")
hot_hits = logs.count("β¨οΈ Using hot pool browser")
cold_hits = logs.count("βοΈ Using cold pool browser")
new_created = logs.count("π Creating new browser")
return {
'permanent_hits': permanent_hits,
'hot_hits': hot_hits,
'cold_hits': cold_hits,
'new_created': new_created,
'total_hits': permanent_hits + hot_hits + cold_hits
}
async def test_endpoint(url: str, count: int):
"""Hit endpoint multiple times."""
results = []
async with httpx.AsyncClient(timeout=60.0) as client:
for i in range(count):
start = time.time()
try:
resp = await client.post(url, json={"url": "https://httpbin.org/html"})
elapsed = (time.time() - start) * 1000
results.append({
"success": resp.status_code == 200,
"latency_ms": elapsed,
})
if (i + 1) % 10 == 0:
print(f" [{i+1}/{count}] β {resp.status_code} - {elapsed:.0f}ms")
except Exception as e:
results.append({"success": False, "error": str(e)})
print(f" [{i+1}/{count}] β Error: {e}")
return results
def start_container(client, image: str, name: str, port: int):
"""Start container."""
try:
old = client.containers.get(name)
print(f"π§Ή Stopping existing container...")
old.stop()
old.remove()
except docker.errors.NotFound:
pass
print(f"π Starting container...")
container = client.containers.run(
image,
name=name,
ports={f"{port}/tcp": port},
detach=True,
shm_size="1g",
mem_limit="4g",
)
print(f"β³ Waiting for health...")
for _ in range(30):
time.sleep(1)
container.reload()
if container.status == "running":
try:
import requests
resp = requests.get(f"http://localhost:{port}/health", timeout=2)
if resp.status_code == 200:
print(f"β
Container healthy!")
return container
except:
pass
raise TimeoutError("Container failed to start")
def stop_container(container):
"""Stop container."""
print(f"π Stopping container...")
container.stop()
container.remove()
async def main():
print("="*60)
print("TEST 3: Pool Validation - Permanent Browser Reuse")
print("="*60)
client = docker.from_env()
container = None
monitor_thread = None
try:
# Start container
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
# Wait for permanent browser initialization
print(f"\nβ³ Waiting for permanent browser init (3s)...")
await asyncio.sleep(3)
# Start stats monitoring
print(f"π Starting stats monitor...")
stop_monitoring.clear()
stats_history.clear()
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
monitor_thread.start()
await asyncio.sleep(1)
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
print(f"π Baseline (with permanent browser): {baseline_mem:.1f} MB")
# Test /html endpoint (uses permanent browser for default config)
print(f"\nπ Running {REQUESTS} requests to /html...")
url = f"http://localhost:{PORT}/html"
results = await test_endpoint(url, REQUESTS)
# Wait a bit
await asyncio.sleep(1)
# Stop monitoring
stop_monitoring.set()
if monitor_thread:
monitor_thread.join(timeout=2)
# Analyze logs for pool markers
print(f"\nπ Analyzing pool usage...")
pool_stats = count_log_markers(container)
# Calculate request stats
successes = sum(1 for r in results if r.get("success"))
success_rate = (successes / len(results)) * 100
latencies = [r["latency_ms"] for r in results if "latency_ms" in r]
avg_latency = sum(latencies) / len(latencies) if latencies else 0
# Memory stats
memory_samples = [s['memory_mb'] for s in stats_history]
peak_mem = max(memory_samples) if memory_samples else 0
final_mem = memory_samples[-1] if memory_samples else 0
mem_delta = final_mem - baseline_mem
# Calculate reuse rate
total_requests = len(results)
total_pool_hits = pool_stats['total_hits']
reuse_rate = (total_pool_hits / total_requests * 100) if total_requests > 0 else 0
# Print results
print(f"\n{'='*60}")
print(f"RESULTS:")
print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(results)})")
print(f" Avg Latency: {avg_latency:.0f}ms")
print(f"\n Pool Stats:")
print(f" π₯ Permanent Hits: {pool_stats['permanent_hits']}")
print(f" β¨οΈ Hot Pool Hits: {pool_stats['hot_hits']}")
print(f" βοΈ Cold Pool Hits: {pool_stats['cold_hits']}")
print(f" π New Created: {pool_stats['new_created']}")
print(f" π Reuse Rate: {reuse_rate:.1f}%")
print(f"\n Memory Stats:")
print(f" Baseline: {baseline_mem:.1f} MB")
print(f" Peak: {peak_mem:.1f} MB")
print(f" Final: {final_mem:.1f} MB")
print(f" Delta: {mem_delta:+.1f} MB")
print(f"{'='*60}")
# Pass/Fail
passed = True
if success_rate < 100:
print(f"β FAIL: Success rate {success_rate:.1f}% < 100%")
passed = False
if reuse_rate < 80:
print(f"β FAIL: Reuse rate {reuse_rate:.1f}% < 80% (expected high permanent browser usage)")
passed = False
if pool_stats['permanent_hits'] < (total_requests * 0.8):
print(f"β οΈ WARNING: Only {pool_stats['permanent_hits']} permanent hits out of {total_requests} requests")
if mem_delta > 200:
print(f"β οΈ WARNING: Memory grew by {mem_delta:.1f} MB (possible browser leak)")
if passed:
print(f"β
TEST PASSED")
return 0
else:
return 1
except Exception as e:
print(f"\nβ TEST ERROR: {e}")
import traceback
traceback.print_exc()
return 1
finally:
stop_monitoring.set()
if container:
stop_container(container)
if __name__ == "__main__":
exit_code = asyncio.run(main())
exit(exit_code)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/test_3_pool.py",
"license": "Apache License 2.0",
"lines": 200,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:deploy/docker/tests/test_4_concurrent.py | #!/usr/bin/env python3
"""
Test 4: Concurrent Load Testing
- Tests pool under concurrent load
- Escalates: 10 β 50 β 100 concurrent requests
- Validates latency distribution (P50, P95, P99)
- Monitors memory stability
"""
import asyncio
import time
import docker
import httpx
from threading import Thread, Event
from collections import defaultdict
# Config
IMAGE = "crawl4ai-local:latest"
CONTAINER_NAME = "crawl4ai-test"
PORT = 11235
LOAD_LEVELS = [
{"name": "Light", "concurrent": 10, "requests": 20},
{"name": "Medium", "concurrent": 50, "requests": 100},
{"name": "Heavy", "concurrent": 100, "requests": 200},
]
# Stats
stats_history = []
stop_monitoring = Event()
def monitor_stats(container):
"""Background stats collector."""
for stat in container.stats(decode=True, stream=True):
if stop_monitoring.is_set():
break
try:
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024)
stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage})
except:
pass
time.sleep(0.5)
def count_log_markers(container):
"""Extract pool markers."""
logs = container.logs().decode('utf-8')
return {
'permanent': logs.count("π₯ Using permanent browser"),
'hot': logs.count("β¨οΈ Using hot pool browser"),
'cold': logs.count("βοΈ Using cold pool browser"),
'new': logs.count("π Creating new browser"),
}
async def hit_endpoint(client, url, payload, semaphore):
"""Single request with concurrency control."""
async with semaphore:
start = time.time()
try:
resp = await client.post(url, json=payload, timeout=60.0)
elapsed = (time.time() - start) * 1000
return {"success": resp.status_code == 200, "latency_ms": elapsed}
except Exception as e:
return {"success": False, "error": str(e)}
async def run_concurrent_test(url, payload, concurrent, total_requests):
"""Run concurrent requests."""
semaphore = asyncio.Semaphore(concurrent)
async with httpx.AsyncClient() as client:
tasks = [hit_endpoint(client, url, payload, semaphore) for _ in range(total_requests)]
results = await asyncio.gather(*tasks)
return results
def calculate_percentiles(latencies):
"""Calculate P50, P95, P99."""
if not latencies:
return 0, 0, 0
sorted_lat = sorted(latencies)
n = len(sorted_lat)
return (
sorted_lat[int(n * 0.50)],
sorted_lat[int(n * 0.95)],
sorted_lat[int(n * 0.99)],
)
def start_container(client, image, name, port):
"""Start container."""
try:
old = client.containers.get(name)
print(f"π§Ή Stopping existing container...")
old.stop()
old.remove()
except docker.errors.NotFound:
pass
print(f"π Starting container...")
container = client.containers.run(
image, name=name, ports={f"{port}/tcp": port},
detach=True, shm_size="1g", mem_limit="4g",
)
print(f"β³ Waiting for health...")
for _ in range(30):
time.sleep(1)
container.reload()
if container.status == "running":
try:
import requests
if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200:
print(f"β
Container healthy!")
return container
except:
pass
raise TimeoutError("Container failed to start")
async def main():
print("="*60)
print("TEST 4: Concurrent Load Testing")
print("="*60)
client = docker.from_env()
container = None
monitor_thread = None
try:
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
print(f"\nβ³ Waiting for permanent browser init (3s)...")
await asyncio.sleep(3)
# Start monitoring
stop_monitoring.clear()
stats_history.clear()
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
monitor_thread.start()
await asyncio.sleep(1)
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
print(f"π Baseline: {baseline_mem:.1f} MB\n")
url = f"http://localhost:{PORT}/html"
payload = {"url": "https://httpbin.org/html"}
all_results = []
level_stats = []
# Run load levels
for level in LOAD_LEVELS:
print(f"{'='*60}")
print(f"π {level['name']} Load: {level['concurrent']} concurrent, {level['requests']} total")
print(f"{'='*60}")
start_time = time.time()
results = await run_concurrent_test(url, payload, level['concurrent'], level['requests'])
duration = time.time() - start_time
successes = sum(1 for r in results if r.get("success"))
success_rate = (successes / len(results)) * 100
latencies = [r["latency_ms"] for r in results if "latency_ms" in r]
p50, p95, p99 = calculate_percentiles(latencies)
avg_lat = sum(latencies) / len(latencies) if latencies else 0
print(f" Duration: {duration:.1f}s")
print(f" Success: {success_rate:.1f}% ({successes}/{len(results)})")
print(f" Avg Latency: {avg_lat:.0f}ms")
print(f" P50/P95/P99: {p50:.0f}ms / {p95:.0f}ms / {p99:.0f}ms")
level_stats.append({
'name': level['name'],
'concurrent': level['concurrent'],
'success_rate': success_rate,
'avg_latency': avg_lat,
'p50': p50, 'p95': p95, 'p99': p99,
})
all_results.extend(results)
await asyncio.sleep(2) # Cool down between levels
# Stop monitoring
await asyncio.sleep(1)
stop_monitoring.set()
if monitor_thread:
monitor_thread.join(timeout=2)
# Final stats
pool_stats = count_log_markers(container)
memory_samples = [s['memory_mb'] for s in stats_history]
peak_mem = max(memory_samples) if memory_samples else 0
final_mem = memory_samples[-1] if memory_samples else 0
print(f"\n{'='*60}")
print(f"FINAL RESULTS:")
print(f"{'='*60}")
print(f" Total Requests: {len(all_results)}")
print(f"\n Pool Utilization:")
print(f" π₯ Permanent: {pool_stats['permanent']}")
print(f" β¨οΈ Hot: {pool_stats['hot']}")
print(f" βοΈ Cold: {pool_stats['cold']}")
print(f" π New: {pool_stats['new']}")
print(f"\n Memory:")
print(f" Baseline: {baseline_mem:.1f} MB")
print(f" Peak: {peak_mem:.1f} MB")
print(f" Final: {final_mem:.1f} MB")
print(f" Delta: {final_mem - baseline_mem:+.1f} MB")
print(f"{'='*60}")
# Pass/Fail
passed = True
for ls in level_stats:
if ls['success_rate'] < 99:
print(f"β FAIL: {ls['name']} success rate {ls['success_rate']:.1f}% < 99%")
passed = False
if ls['p99'] > 10000: # 10s threshold
print(f"β οΈ WARNING: {ls['name']} P99 latency {ls['p99']:.0f}ms very high")
if final_mem - baseline_mem > 300:
print(f"β οΈ WARNING: Memory grew {final_mem - baseline_mem:.1f} MB")
if passed:
print(f"β
TEST PASSED")
return 0
else:
return 1
except Exception as e:
print(f"\nβ TEST ERROR: {e}")
import traceback
traceback.print_exc()
return 1
finally:
stop_monitoring.set()
if container:
print(f"π Stopping container...")
container.stop()
container.remove()
if __name__ == "__main__":
exit_code = asyncio.run(main())
exit(exit_code)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/test_4_concurrent.py",
"license": "Apache License 2.0",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:deploy/docker/tests/test_5_pool_stress.py | #!/usr/bin/env python3
"""
Test 5: Pool Stress - Mixed Configs
- Tests hot/cold pool with different browser configs
- Uses different viewports to create config variants
- Validates cold β hot promotion after 3 uses
- Monitors pool tier distribution
"""
import asyncio
import time
import docker
import httpx
from threading import Thread, Event
import random
# Config
IMAGE = "crawl4ai-local:latest"
CONTAINER_NAME = "crawl4ai-test"
PORT = 11235
REQUESTS_PER_CONFIG = 5 # 5 requests per config variant
# Different viewport configs to test pool tiers
VIEWPORT_CONFIGS = [
None, # Default (permanent browser)
{"width": 1920, "height": 1080}, # Desktop
{"width": 1024, "height": 768}, # Tablet
{"width": 375, "height": 667}, # Mobile
]
# Stats
stats_history = []
stop_monitoring = Event()
def monitor_stats(container):
"""Background stats collector."""
for stat in container.stats(decode=True, stream=True):
if stop_monitoring.is_set():
break
try:
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024)
stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage})
except:
pass
time.sleep(0.5)
def analyze_pool_logs(container):
"""Extract detailed pool stats from logs."""
logs = container.logs().decode('utf-8')
permanent = logs.count("π₯ Using permanent browser")
hot = logs.count("β¨οΈ Using hot pool browser")
cold = logs.count("βοΈ Using cold pool browser")
new = logs.count("π Creating new browser")
promotions = logs.count("β¬οΈ Promoting to hot pool")
return {
'permanent': permanent,
'hot': hot,
'cold': cold,
'new': new,
'promotions': promotions,
'total': permanent + hot + cold
}
async def crawl_with_viewport(client, url, viewport):
"""Single request with specific viewport."""
payload = {
"urls": ["https://httpbin.org/html"],
"browser_config": {},
"crawler_config": {}
}
# Add viewport if specified
if viewport:
payload["browser_config"] = {
"type": "BrowserConfig",
"params": {
"viewport": {"type": "dict", "value": viewport},
"headless": True,
"text_mode": True,
"extra_args": [
"--no-sandbox",
"--disable-dev-shm-usage",
"--disable-gpu",
"--disable-software-rasterizer",
"--disable-web-security",
"--allow-insecure-localhost",
"--ignore-certificate-errors"
]
}
}
start = time.time()
try:
resp = await client.post(url, json=payload, timeout=60.0)
elapsed = (time.time() - start) * 1000
return {"success": resp.status_code == 200, "latency_ms": elapsed, "viewport": viewport}
except Exception as e:
return {"success": False, "error": str(e), "viewport": viewport}
def start_container(client, image, name, port):
"""Start container."""
try:
old = client.containers.get(name)
print(f"π§Ή Stopping existing container...")
old.stop()
old.remove()
except docker.errors.NotFound:
pass
print(f"π Starting container...")
container = client.containers.run(
image, name=name, ports={f"{port}/tcp": port},
detach=True, shm_size="1g", mem_limit="4g",
)
print(f"β³ Waiting for health...")
for _ in range(30):
time.sleep(1)
container.reload()
if container.status == "running":
try:
import requests
if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200:
print(f"β
Container healthy!")
return container
except:
pass
raise TimeoutError("Container failed to start")
async def main():
print("="*60)
print("TEST 5: Pool Stress - Mixed Configs")
print("="*60)
client = docker.from_env()
container = None
monitor_thread = None
try:
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
print(f"\nβ³ Waiting for permanent browser init (3s)...")
await asyncio.sleep(3)
# Start monitoring
stop_monitoring.clear()
stats_history.clear()
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
monitor_thread.start()
await asyncio.sleep(1)
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
print(f"π Baseline: {baseline_mem:.1f} MB\n")
url = f"http://localhost:{PORT}/crawl"
print(f"Testing {len(VIEWPORT_CONFIGS)} different configs:")
for i, vp in enumerate(VIEWPORT_CONFIGS):
vp_str = "Default" if vp is None else f"{vp['width']}x{vp['height']}"
print(f" {i+1}. {vp_str}")
print()
# Run requests: repeat each config REQUESTS_PER_CONFIG times
all_results = []
config_sequence = []
for _ in range(REQUESTS_PER_CONFIG):
for viewport in VIEWPORT_CONFIGS:
config_sequence.append(viewport)
# Shuffle to mix configs
random.shuffle(config_sequence)
print(f"π Running {len(config_sequence)} requests with mixed configs...")
async with httpx.AsyncClient() as http_client:
for i, viewport in enumerate(config_sequence):
result = await crawl_with_viewport(http_client, url, viewport)
all_results.append(result)
if (i + 1) % 5 == 0:
vp_str = "default" if result['viewport'] is None else f"{result['viewport']['width']}x{result['viewport']['height']}"
status = "β" if result.get('success') else "β"
lat = f"{result.get('latency_ms', 0):.0f}ms" if 'latency_ms' in result else "error"
print(f" [{i+1}/{len(config_sequence)}] {status} {vp_str} - {lat}")
# Stop monitoring
await asyncio.sleep(2)
stop_monitoring.set()
if monitor_thread:
monitor_thread.join(timeout=2)
# Analyze results
pool_stats = analyze_pool_logs(container)
successes = sum(1 for r in all_results if r.get("success"))
success_rate = (successes / len(all_results)) * 100
latencies = [r["latency_ms"] for r in all_results if "latency_ms" in r]
avg_lat = sum(latencies) / len(latencies) if latencies else 0
memory_samples = [s['memory_mb'] for s in stats_history]
peak_mem = max(memory_samples) if memory_samples else 0
final_mem = memory_samples[-1] if memory_samples else 0
print(f"\n{'='*60}")
print(f"RESULTS:")
print(f"{'='*60}")
print(f" Requests: {len(all_results)}")
print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(all_results)})")
print(f" Avg Latency: {avg_lat:.0f}ms")
print(f"\n Pool Statistics:")
print(f" π₯ Permanent: {pool_stats['permanent']}")
print(f" β¨οΈ Hot: {pool_stats['hot']}")
print(f" βοΈ Cold: {pool_stats['cold']}")
print(f" π New: {pool_stats['new']}")
print(f" β¬οΈ Promotions: {pool_stats['promotions']}")
print(f" π Reuse: {(pool_stats['total'] / len(all_results) * 100):.1f}%")
print(f"\n Memory:")
print(f" Baseline: {baseline_mem:.1f} MB")
print(f" Peak: {peak_mem:.1f} MB")
print(f" Final: {final_mem:.1f} MB")
print(f" Delta: {final_mem - baseline_mem:+.1f} MB")
print(f"{'='*60}")
# Pass/Fail
passed = True
if success_rate < 99:
print(f"β FAIL: Success rate {success_rate:.1f}% < 99%")
passed = False
# Should see promotions since we repeat each config 5 times
if pool_stats['promotions'] < (len(VIEWPORT_CONFIGS) - 1): # -1 for default
print(f"β οΈ WARNING: Only {pool_stats['promotions']} promotions (expected ~{len(VIEWPORT_CONFIGS)-1})")
# Should have created some browsers for different configs
if pool_stats['new'] == 0:
print(f"β οΈ NOTE: No new browsers created (all used default?)")
if pool_stats['permanent'] == len(all_results):
print(f"β οΈ NOTE: All requests used permanent browser (configs not varying enough?)")
if final_mem - baseline_mem > 500:
print(f"β οΈ WARNING: Memory grew {final_mem - baseline_mem:.1f} MB")
if passed:
print(f"β
TEST PASSED")
return 0
else:
return 1
except Exception as e:
print(f"\nβ TEST ERROR: {e}")
import traceback
traceback.print_exc()
return 1
finally:
stop_monitoring.set()
if container:
print(f"π Stopping container...")
container.stop()
container.remove()
if __name__ == "__main__":
exit_code = asyncio.run(main())
exit(exit_code)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/test_5_pool_stress.py",
"license": "Apache License 2.0",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:deploy/docker/tests/test_6_multi_endpoint.py | #!/usr/bin/env python3
"""
Test 6: Multi-Endpoint Testing
- Tests multiple endpoints together: /html, /screenshot, /pdf, /crawl
- Validates each endpoint works correctly
- Monitors success rates per endpoint
"""
import asyncio
import time
import docker
import httpx
from threading import Thread, Event
# Config
IMAGE = "crawl4ai-local:latest"
CONTAINER_NAME = "crawl4ai-test"
PORT = 11235
REQUESTS_PER_ENDPOINT = 10
# Stats
stats_history = []
stop_monitoring = Event()
def monitor_stats(container):
"""Background stats collector."""
for stat in container.stats(decode=True, stream=True):
if stop_monitoring.is_set():
break
try:
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024)
stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage})
except:
pass
time.sleep(0.5)
async def test_html(client, base_url, count):
"""Test /html endpoint."""
url = f"{base_url}/html"
results = []
for _ in range(count):
start = time.time()
try:
resp = await client.post(url, json={"url": "https://httpbin.org/html"}, timeout=30.0)
elapsed = (time.time() - start) * 1000
results.append({"success": resp.status_code == 200, "latency_ms": elapsed})
except Exception as e:
results.append({"success": False, "error": str(e)})
return results
async def test_screenshot(client, base_url, count):
"""Test /screenshot endpoint."""
url = f"{base_url}/screenshot"
results = []
for _ in range(count):
start = time.time()
try:
resp = await client.post(url, json={"url": "https://httpbin.org/html"}, timeout=30.0)
elapsed = (time.time() - start) * 1000
results.append({"success": resp.status_code == 200, "latency_ms": elapsed})
except Exception as e:
results.append({"success": False, "error": str(e)})
return results
async def test_pdf(client, base_url, count):
"""Test /pdf endpoint."""
url = f"{base_url}/pdf"
results = []
for _ in range(count):
start = time.time()
try:
resp = await client.post(url, json={"url": "https://httpbin.org/html"}, timeout=30.0)
elapsed = (time.time() - start) * 1000
results.append({"success": resp.status_code == 200, "latency_ms": elapsed})
except Exception as e:
results.append({"success": False, "error": str(e)})
return results
async def test_crawl(client, base_url, count):
"""Test /crawl endpoint."""
url = f"{base_url}/crawl"
results = []
payload = {
"urls": ["https://httpbin.org/html"],
"browser_config": {},
"crawler_config": {}
}
for _ in range(count):
start = time.time()
try:
resp = await client.post(url, json=payload, timeout=30.0)
elapsed = (time.time() - start) * 1000
results.append({"success": resp.status_code == 200, "latency_ms": elapsed})
except Exception as e:
results.append({"success": False, "error": str(e)})
return results
def start_container(client, image, name, port):
"""Start container."""
try:
old = client.containers.get(name)
print(f"π§Ή Stopping existing container...")
old.stop()
old.remove()
except docker.errors.NotFound:
pass
print(f"π Starting container...")
container = client.containers.run(
image, name=name, ports={f"{port}/tcp": port},
detach=True, shm_size="1g", mem_limit="4g",
)
print(f"β³ Waiting for health...")
for _ in range(30):
time.sleep(1)
container.reload()
if container.status == "running":
try:
import requests
if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200:
print(f"β
Container healthy!")
return container
except:
pass
raise TimeoutError("Container failed to start")
async def main():
print("="*60)
print("TEST 6: Multi-Endpoint Testing")
print("="*60)
client = docker.from_env()
container = None
monitor_thread = None
try:
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
print(f"\nβ³ Waiting for permanent browser init (3s)...")
await asyncio.sleep(3)
# Start monitoring
stop_monitoring.clear()
stats_history.clear()
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
monitor_thread.start()
await asyncio.sleep(1)
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
print(f"π Baseline: {baseline_mem:.1f} MB\n")
base_url = f"http://localhost:{PORT}"
# Test each endpoint
endpoints = {
"/html": test_html,
"/screenshot": test_screenshot,
"/pdf": test_pdf,
"/crawl": test_crawl,
}
all_endpoint_stats = {}
async with httpx.AsyncClient() as http_client:
for endpoint_name, test_func in endpoints.items():
print(f"π Testing {endpoint_name} ({REQUESTS_PER_ENDPOINT} requests)...")
results = await test_func(http_client, base_url, REQUESTS_PER_ENDPOINT)
successes = sum(1 for r in results if r.get("success"))
success_rate = (successes / len(results)) * 100
latencies = [r["latency_ms"] for r in results if "latency_ms" in r]
avg_lat = sum(latencies) / len(latencies) if latencies else 0
all_endpoint_stats[endpoint_name] = {
'success_rate': success_rate,
'avg_latency': avg_lat,
'total': len(results),
'successes': successes
}
print(f" β Success: {success_rate:.1f}% ({successes}/{len(results)}), Avg: {avg_lat:.0f}ms")
# Stop monitoring
await asyncio.sleep(1)
stop_monitoring.set()
if monitor_thread:
monitor_thread.join(timeout=2)
# Final stats
memory_samples = [s['memory_mb'] for s in stats_history]
peak_mem = max(memory_samples) if memory_samples else 0
final_mem = memory_samples[-1] if memory_samples else 0
print(f"\n{'='*60}")
print(f"RESULTS:")
print(f"{'='*60}")
for endpoint, stats in all_endpoint_stats.items():
print(f" {endpoint:12} Success: {stats['success_rate']:5.1f}% Avg: {stats['avg_latency']:6.0f}ms")
print(f"\n Memory:")
print(f" Baseline: {baseline_mem:.1f} MB")
print(f" Peak: {peak_mem:.1f} MB")
print(f" Final: {final_mem:.1f} MB")
print(f" Delta: {final_mem - baseline_mem:+.1f} MB")
print(f"{'='*60}")
# Pass/Fail
passed = True
for endpoint, stats in all_endpoint_stats.items():
if stats['success_rate'] < 100:
print(f"β FAIL: {endpoint} success rate {stats['success_rate']:.1f}% < 100%")
passed = False
if passed:
print(f"β
TEST PASSED")
return 0
else:
return 1
except Exception as e:
print(f"\nβ TEST ERROR: {e}")
import traceback
traceback.print_exc()
return 1
finally:
stop_monitoring.set()
if container:
print(f"π Stopping container...")
container.stop()
container.remove()
if __name__ == "__main__":
exit_code = asyncio.run(main())
exit(exit_code)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/test_6_multi_endpoint.py",
"license": "Apache License 2.0",
"lines": 203,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:deploy/docker/tests/test_7_cleanup.py | #!/usr/bin/env python3
"""
Test 7: Cleanup Verification (Janitor)
- Creates load spike then goes idle
- Verifies memory returns to near baseline
- Tests janitor cleanup of idle browsers
- Monitors memory recovery time
"""
import asyncio
import time
import docker
import httpx
from threading import Thread, Event
# Config
IMAGE = "crawl4ai-local:latest"
CONTAINER_NAME = "crawl4ai-test"
PORT = 11235
SPIKE_REQUESTS = 20 # Create some browsers
IDLE_TIME = 90 # Wait 90s for janitor (runs every 60s)
# Stats
stats_history = []
stop_monitoring = Event()
def monitor_stats(container):
"""Background stats collector."""
for stat in container.stats(decode=True, stream=True):
if stop_monitoring.is_set():
break
try:
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024)
stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage})
except:
pass
time.sleep(1) # Sample every 1s for this test
def start_container(client, image, name, port):
"""Start container."""
try:
old = client.containers.get(name)
print(f"π§Ή Stopping existing container...")
old.stop()
old.remove()
except docker.errors.NotFound:
pass
print(f"π Starting container...")
container = client.containers.run(
image, name=name, ports={f"{port}/tcp": port},
detach=True, shm_size="1g", mem_limit="4g",
)
print(f"β³ Waiting for health...")
for _ in range(30):
time.sleep(1)
container.reload()
if container.status == "running":
try:
import requests
if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200:
print(f"β
Container healthy!")
return container
except:
pass
raise TimeoutError("Container failed to start")
async def main():
print("="*60)
print("TEST 7: Cleanup Verification (Janitor)")
print("="*60)
client = docker.from_env()
container = None
monitor_thread = None
try:
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
print(f"\nβ³ Waiting for permanent browser init (3s)...")
await asyncio.sleep(3)
# Start monitoring
stop_monitoring.clear()
stats_history.clear()
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
monitor_thread.start()
await asyncio.sleep(2)
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
print(f"π Baseline: {baseline_mem:.1f} MB\n")
# Create load spike with different configs to populate pool
print(f"π₯ Creating load spike ({SPIKE_REQUESTS} requests with varied configs)...")
url = f"http://localhost:{PORT}/crawl"
viewports = [
{"width": 1920, "height": 1080},
{"width": 1024, "height": 768},
{"width": 375, "height": 667},
]
async with httpx.AsyncClient(timeout=60.0) as http_client:
tasks = []
for i in range(SPIKE_REQUESTS):
vp = viewports[i % len(viewports)]
payload = {
"urls": ["https://httpbin.org/html"],
"browser_config": {
"type": "BrowserConfig",
"params": {
"viewport": {"type": "dict", "value": vp},
"headless": True,
"text_mode": True,
"extra_args": [
"--no-sandbox", "--disable-dev-shm-usage",
"--disable-gpu", "--disable-software-rasterizer",
"--disable-web-security", "--allow-insecure-localhost",
"--ignore-certificate-errors"
]
}
},
"crawler_config": {}
}
tasks.append(http_client.post(url, json=payload))
results = await asyncio.gather(*tasks, return_exceptions=True)
successes = sum(1 for r in results if hasattr(r, 'status_code') and r.status_code == 200)
print(f" β Spike completed: {successes}/{len(results)} successful")
# Measure peak
await asyncio.sleep(2)
peak_mem = max([s['memory_mb'] for s in stats_history]) if stats_history else baseline_mem
print(f" π Peak memory: {peak_mem:.1f} MB (+{peak_mem - baseline_mem:.1f} MB)")
# Now go idle and wait for janitor
print(f"\nβΈοΈ Going idle for {IDLE_TIME}s (janitor cleanup)...")
print(f" (Janitor runs every 60s, checking for idle browsers)")
for elapsed in range(0, IDLE_TIME, 10):
await asyncio.sleep(10)
current_mem = stats_history[-1]['memory_mb'] if stats_history else 0
print(f" [{elapsed+10:3d}s] Memory: {current_mem:.1f} MB")
# Stop monitoring
stop_monitoring.set()
if monitor_thread:
monitor_thread.join(timeout=2)
# Analyze memory recovery
final_mem = stats_history[-1]['memory_mb'] if stats_history else 0
recovery_mb = peak_mem - final_mem
recovery_pct = (recovery_mb / (peak_mem - baseline_mem) * 100) if (peak_mem - baseline_mem) > 0 else 0
print(f"\n{'='*60}")
print(f"RESULTS:")
print(f"{'='*60}")
print(f" Memory Journey:")
print(f" Baseline: {baseline_mem:.1f} MB")
print(f" Peak: {peak_mem:.1f} MB (+{peak_mem - baseline_mem:.1f} MB)")
print(f" Final: {final_mem:.1f} MB (+{final_mem - baseline_mem:.1f} MB)")
print(f" Recovered: {recovery_mb:.1f} MB ({recovery_pct:.1f}%)")
print(f"{'='*60}")
# Pass/Fail
passed = True
# Should have created some memory pressure
if peak_mem - baseline_mem < 100:
print(f"β οΈ WARNING: Peak increase only {peak_mem - baseline_mem:.1f} MB (expected more browsers)")
# Should recover most memory (within 100MB of baseline)
if final_mem - baseline_mem > 100:
print(f"β οΈ WARNING: Memory didn't recover well (still +{final_mem - baseline_mem:.1f} MB above baseline)")
else:
print(f"β
Good memory recovery!")
# Baseline + 50MB tolerance
if final_mem - baseline_mem < 50:
print(f"β
Excellent cleanup (within 50MB of baseline)")
print(f"β
TEST PASSED")
return 0
except Exception as e:
print(f"\nβ TEST ERROR: {e}")
import traceback
traceback.print_exc()
return 1
finally:
stop_monitoring.set()
if container:
print(f"π Stopping container...")
container.stop()
container.remove()
if __name__ == "__main__":
exit_code = asyncio.run(main())
exit(exit_code)
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/test_7_cleanup.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:deploy/docker/tests/test_monitor_demo.py | #!/usr/bin/env python3
"""Quick test to generate monitor dashboard activity"""
import httpx
import asyncio
async def test_dashboard():
async with httpx.AsyncClient(timeout=30.0) as client:
print("π Generating dashboard activity...")
# Test 1: Simple crawl
print("\n1οΈβ£ Running simple crawl...")
r1 = await client.post(
"http://localhost:11235/crawl",
json={"urls": ["https://httpbin.org/html"], "crawler_config": {}}
)
print(f" Status: {r1.status_code}")
# Test 2: Multiple URLs
print("\n2οΈβ£ Running multi-URL crawl...")
r2 = await client.post(
"http://localhost:11235/crawl",
json={
"urls": [
"https://httpbin.org/html",
"https://httpbin.org/json"
],
"crawler_config": {}
}
)
print(f" Status: {r2.status_code}")
# Test 3: Check monitor health
print("\n3οΈβ£ Checking monitor health...")
r3 = await client.get("http://localhost:11235/monitor/health")
health = r3.json()
print(f" Memory: {health['container']['memory_percent']}%")
print(f" Browsers: {health['pool']['permanent']['active']}")
# Test 4: Check requests
print("\n4οΈβ£ Checking request log...")
r4 = await client.get("http://localhost:11235/monitor/requests")
reqs = r4.json()
print(f" Active: {len(reqs['active'])}")
print(f" Completed: {len(reqs['completed'])}")
# Test 5: Check endpoint stats
print("\n5οΈβ£ Checking endpoint stats...")
r5 = await client.get("http://localhost:11235/monitor/endpoints/stats")
stats = r5.json()
for endpoint, data in stats.items():
print(f" {endpoint}: {data['count']} requests, {data['avg_latency_ms']}ms avg")
print("\nβ
Dashboard should now show activity!")
print(f"\nπ Open: http://localhost:11235/dashboard")
if __name__ == "__main__":
asyncio.run(test_dashboard())
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "deploy/docker/tests/test_monitor_demo.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
unclecode/crawl4ai:docs/examples/dfs_crawl_demo.py | """
Simple demonstration of the DFS deep crawler visiting multiple pages.
Run with: python docs/examples/dfs_crawl_demo.py
"""
import asyncio
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
from crawl4ai.async_webcrawler import AsyncWebCrawler
from crawl4ai.cache_context import CacheMode
from crawl4ai.deep_crawling.dfs_strategy import DFSDeepCrawlStrategy
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
async def main() -> None:
dfs_strategy = DFSDeepCrawlStrategy(
max_depth=3,
max_pages=50,
include_external=False,
)
config = CrawlerRunConfig(
deep_crawl_strategy=dfs_strategy,
cache_mode=CacheMode.BYPASS,
markdown_generator=DefaultMarkdownGenerator(),
stream=True,
)
seed_url = "https://docs.python.org/3/" # Plenty of internal links
async with AsyncWebCrawler(config=BrowserConfig(headless=True)) as crawler:
async for result in await crawler.arun(url=seed_url, config=config):
depth = result.metadata.get("depth")
status = "SUCCESS" if result.success else "FAILED"
print(f"[{status}] depth={depth} url={result.url}")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "unclecode/crawl4ai",
"file_path": "docs/examples/dfs_crawl_demo.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.