sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
streamlit/streamlit:e2e_playwright/st_altair_chart_title.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import altair as alt
import pandas as pd
import streamlit as st
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": [10, 20, 30, 40, 50]})
chart = (
alt.Chart(
data=df,
title="Lorem ipsum dolor sit amet, consectetur adipiscing elit. " * 5,
)
.mark_line()
.encode(x="x", y="y")
)
st.altair_chart(chart)
st.altair_chart(chart, width="content")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_altair_chart_title.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/st_altair_chart_title_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from playwright.sync_api import Page, expect
from e2e_playwright.conftest import ImageCompareFunction
def test_altair_chart_title_displays_correctly(
app: Page, assert_snapshot: ImageCompareFunction
):
expect(
app.get_by_test_id("stVegaLiteChart").locator("[role='graphics-document']")
).to_have_count(2)
charts = app.get_by_test_id("stVegaLiteChart")
expect(charts).to_have_count(2)
snapshot_names = [
"st_altair_chart_title-long_title_rendering_use_container_width_true",
"st_altair_chart_title-long_title_rendering_use_container_width_false",
]
for i, name in enumerate(snapshot_names):
# We use a higher threshold here to prevent some flakiness
# We should probably remove this once we have refactored the
# altair frontend component.
assert_snapshot(charts.nth(i), name=name, image_threshold=0.6)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_altair_chart_title_test.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/multipage_apps_v2/mpa_v2_anchors.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
def page1():
st.title("Page1")
st.write("""
Instructions:
* Click the button below.
* A new tab will open (that's OK)
* The app in that tab should scroll down to 'My title 2'
""")
st.link_button("Open new tab", "/page2#my-title-2")
def page2():
st.title("Page2")
st.header("My title 1")
for _ in range(30):
st.text("blah " * 100)
st.header("My title 2")
for _ in range(30):
st.text("blah " * 100)
page = st.navigation(
[
st.Page(page1, title="Page1"),
st.Page(page2, title="Page2"),
]
)
page.run()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/multipage_apps_v2/mpa_v2_anchors.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/multipage_apps_v2/mpa_v2_anchors_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from playwright.sync_api import Page, expect
def test_anchor_scrolling(app: Page):
"""Test that anchor scrolling works correctly in multipage apps in a new
tab.
"""
# The app opens in a new tab, so we need to wait for that new page
# to be created.
with app.context.expect_page() as new_page_info:
app.get_by_text("Open new tab").click()
new_page = new_page_info.value
new_page.wait_for_load_state()
# Assert that the app in the new tab scrolls to the header `My title 2`
expect(new_page.get_by_text("My title 2")).to_be_in_viewport()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/multipage_apps_v2/mpa_v2_anchors_test.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/config/script_level_config.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
st.markdown(f"Secret value: {st.secrets.get('FAKE_SECRET')}")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/config/script_level_config.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/config/script_level_config_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from playwright.sync_api import Page, expect
from e2e_playwright.shared.app_utils import get_markdown
def test_secret_value_display_and_styling(app: Page):
"""Test that the script-level config and secrets are loaded correctly."""
secret_element = get_markdown(app, "Secret value: fake")
# Check the secrets value:
expect(secret_element).to_be_visible()
# Check that its using the monospace font family (theme.font=monospace):
# This needs to be a regex because the actual font-family can be a list of fonts.
expect(secret_element).to_have_css("font-family", re.compile(r".*monospace.*"))
# Check that the app is in dark mode (theme.base=dark):
app_container = app.get_by_test_id("stApp")
expect(app_container).to_have_css("color-scheme", "dark")
# Check that the main menu is not visible (toolbarMode=minimal):
expect(app.get_by_test_id("stMainMenu")).not_to_be_attached()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/config/script_level_config_test.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/st_logo.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import streamlit as st
# Set random seed to always get the same results in the plotting demo
STATIC_ASSETS_DIR = Path(__file__).parent / "static"
def logo_no_sidebar_subtest():
st.logo(
STATIC_ASSETS_DIR / "streamlit-logo.png",
size="small",
icon_image=STATIC_ASSETS_DIR / "streamlit-mark.png",
)
def small_logo_w_sidebar_subtest():
st.logo(
STATIC_ASSETS_DIR / "streamlit-logo.png",
size="small",
icon_image=STATIC_ASSETS_DIR / "streamlit-mark.png",
)
st.sidebar.write("Hi")
def medium_logo_w_sidebar_subtest():
st.logo(
STATIC_ASSETS_DIR / "streamlit-logo.png",
size="medium",
icon_image=STATIC_ASSETS_DIR / "streamlit-mark.png",
)
st.sidebar.write("Hi")
def large_logo_w_sidebar_subtest():
st.logo(
STATIC_ASSETS_DIR / "streamlit-logo.png",
size="large",
icon_image=STATIC_ASSETS_DIR / "streamlit-mark.png",
)
st.sidebar.write("Hi")
def material_icon_logo_subtest():
st.logo(":material/rocket_launch:", size="medium")
st.sidebar.write("Hi")
def emoji_logo_subtest():
st.logo("🚀", size="medium")
st.sidebar.write("Hi")
# NOTE: Must be run last, since st.navigation will linger in all other tests.
def logo_w_sidebar_and_nav_subtest():
st.logo(
STATIC_ASSETS_DIR / "streamlit-logo.png",
size="small",
icon_image=STATIC_ASSETS_DIR / "streamlit-mark.png",
)
st.sidebar.write("Hi")
st.navigation(
[
st.Page("multipage_apps_v2/page_2.py"),
st.Page("multipage_apps_v2/page_3.py"),
]
)
SUBTESTS = {k: v for k, v in globals().items() if k.endswith("_subtest")}
subtest = SUBTESTS[st.selectbox("Test to run", SUBTESTS.keys())]
subtest()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_logo.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/st_logo_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from playwright.sync_api import Page, expect
from e2e_playwright.conftest import ImageCompareFunction
from e2e_playwright.shared.app_utils import select_selectbox_option
def test_logo_no_sidebar(
themed_app: Page, assert_snapshot: ImageCompareFunction
) -> None:
select_subtest(themed_app, "logo_no_sidebar_subtest")
expect(themed_app.get_by_test_id("stHeader")).to_be_visible()
expect(themed_app.get_by_test_id("stHeaderLogo")).to_be_visible()
assert_snapshot(themed_app.get_by_test_id("stHeader"), name="logo-no_sidebar")
def test_small_logo_w_sidebar(
themed_app: Page, assert_snapshot: ImageCompareFunction
) -> None:
select_subtest(themed_app, "small_logo_w_sidebar_subtest")
expect(themed_app.get_by_test_id("stSidebar")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarHeader")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarLogo")).to_be_visible()
# Ensure collapse button is shown:
themed_app.get_by_test_id("stSidebar").hover()
expect(themed_app.get_by_test_id("stSidebarCollapseButton")).to_be_visible()
assert_snapshot(
themed_app.get_by_test_id("stSidebarHeader"),
name="logo-small_w_sidebar_expanded",
)
themed_app.get_by_test_id("stSidebar").hover()
themed_app.get_by_test_id("stSidebarCollapseButton").locator("button").click()
expect(themed_app.get_by_test_id("stHeaderLogo")).to_be_visible()
expect(themed_app.get_by_test_id("stHeader")).to_be_visible()
assert_snapshot(
themed_app.get_by_test_id("stHeader"),
name="logo-small_w_sidebar_collapsed",
)
def test_medium_logo_w_sidebar(
themed_app: Page, assert_snapshot: ImageCompareFunction
) -> None:
select_subtest(themed_app, "medium_logo_w_sidebar_subtest")
expect(themed_app.get_by_test_id("stSidebar")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarLogo")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarHeader")).to_be_visible()
# Ensure collapse button is shown:
themed_app.get_by_test_id("stSidebar").hover()
expect(themed_app.get_by_test_id("stSidebarCollapseButton")).to_be_visible()
assert_snapshot(
themed_app.get_by_test_id("stSidebarHeader"),
name="logo-medium_w_sidebar_expanded",
)
themed_app.get_by_test_id("stSidebar").hover()
themed_app.get_by_test_id("stSidebarCollapseButton").locator("button").click()
expect(themed_app.get_by_test_id("stHeader")).to_be_visible()
expect(themed_app.get_by_test_id("stHeaderLogo")).to_be_visible()
assert_snapshot(
themed_app.get_by_test_id("stHeader"),
name="logo-medium_w_sidebar_collapsed",
)
def test_large_logo_w_sidebar(
themed_app: Page, assert_snapshot: ImageCompareFunction
) -> None:
select_subtest(themed_app, "large_logo_w_sidebar_subtest")
expect(themed_app.get_by_test_id("stSidebar")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarHeader")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarLogo")).to_be_visible()
# Ensure collapse button is shown:
themed_app.get_by_test_id("stSidebar").hover()
expect(themed_app.get_by_test_id("stSidebarCollapseButton")).to_be_visible()
assert_snapshot(
themed_app.get_by_test_id("stSidebarHeader"),
name="logo-large_w_sidebar_expanded",
)
themed_app.get_by_test_id("stSidebar").hover()
themed_app.get_by_test_id("stSidebarCollapseButton").locator("button").click()
expect(themed_app.get_by_test_id("stHeader")).to_be_visible()
expect(themed_app.get_by_test_id("stHeaderLogo")).to_be_visible()
assert_snapshot(
themed_app.get_by_test_id("stHeader"),
name="logo-large_w_sidebar_collapsed",
)
def test_logo_w_sidebar_and_nav(
themed_app: Page, assert_snapshot: ImageCompareFunction
) -> None:
select_subtest(themed_app, "logo_w_sidebar_and_nav_subtest")
expect(themed_app.get_by_test_id("stSidebar")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarHeader")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarLogo")).to_be_visible()
# Ensure collapse button is shown:
themed_app.get_by_test_id("stSidebar").hover()
expect(themed_app.get_by_test_id("stSidebarCollapseButton")).to_be_visible()
assert_snapshot(themed_app.get_by_test_id("stSidebarHeader"), name="logo-navbar")
def test_material_icon_logo(
themed_app: Page, assert_snapshot: ImageCompareFunction
) -> None:
"""Test that material icons render correctly as logos."""
select_subtest(themed_app, "material_icon_logo_subtest")
expect(themed_app.get_by_test_id("stSidebar")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarHeader")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarLogo")).to_be_visible()
# Material icon logos should NOT render as <img> elements
logo_element = themed_app.get_by_test_id("stSidebarLogo")
expect(logo_element.locator("img")).not_to_be_attached()
assert_snapshot(
themed_app.get_by_test_id("stSidebarHeader"),
name="logo-material_icon",
)
def test_emoji_logo(themed_app: Page, assert_snapshot: ImageCompareFunction) -> None:
"""Test that emojis render correctly as logos."""
select_subtest(themed_app, "emoji_logo_subtest")
expect(themed_app.get_by_test_id("stSidebar")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarHeader")).to_be_visible()
expect(themed_app.get_by_test_id("stSidebarLogo")).to_be_visible()
# Emoji logos should NOT render as <img> elements
logo_element = themed_app.get_by_test_id("stSidebarLogo")
expect(logo_element.locator("img")).not_to_be_attached()
assert_snapshot(
themed_app.get_by_test_id("stSidebarHeader"),
name="logo-emoji",
)
def select_subtest(app: Page, name: str) -> None:
select_selectbox_option(app, "Test to run", name)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_logo_test.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/mega_tester_app.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import importlib.util
import re
import sys
from datetime import date, datetime, time
from pathlib import Path
from typing import TYPE_CHECKING, Literal, cast
import numpy as np
import pandas as pd
import streamlit as st
import streamlit.components.v1 as components
if TYPE_CHECKING:
from collections.abc import Generator
from streamlit.elements.widgets.chat import ChatInputValue
from streamlit.navigation.page import StreamlitPage
_DUMMY_PDF = (
"%PDF-1.4\n1 0 obj\n<<\n/Type /Catalog\n/Pages 2 0 R\n>>\nendobj\n"
"2 0 obj\n<<\n/Type /Pages\n/Kids [3 0 R]\n/Count 1\n>>\nendobj\n"
"3 0 obj\n<<\n/Type /Page\n/Parent 2 0 R\n/MediaBox [0 0 612 792]\n"
"/Contents 4 0 R\n/Resources <<\n/Font <<\n/F1 5 0 R\n>>\n>>\n>>\nendobj\n"
"4 0 obj\n<<\n/Length 44\n>>\nstream\nBT\n/F1 12 Tf\n100 700 Td\n"
"(Hello PDF World!) Tj\nET\nendstream\nendobj\n"
"5 0 obj\n<<\n/Type /Font\n/Subtype /Type1\n/BaseFont /Helvetica\n>>\nendobj\n"
"xref\n0 6\n0000000000 65535 f\n0000000009 00000 n\n0000000058 00000 n\n"
"0000000115 00000 n\n0000000274 00000 n\n0000000373 00000 n\ntrailer\n"
"<<\n/Size 6\n/Root 1 0 R\n>>\nstartxref\n446\n%%EOF"
).encode("latin-1")
_MAGIC_COW = """\
______________
< Abracowdabra! >
--------------
\\ ^__^
\\ (oo)\\_______
(__)\\ )\\/\\
||----w |
|| ||
"""
_STATIC_DIR = Path(__file__).resolve().parent / "static"
def _minor_version() -> int:
match = re.match(r"^\d+\.(\d+)", st.__version__)
if match is None:
raise RuntimeError(f"Unable to parse Streamlit version: {st.__version__}")
return int(match.group(1))
def _module_available(module_name: str) -> bool:
try:
return importlib.util.find_spec(module_name) is not None
except ModuleNotFoundError:
return False
def _stream_chunks() -> Generator[str | pd.DataFrame, None, None]:
yield "lorem "
yield "ipsum "
yield pd.DataFrame({"a": [1, 2], "b": [3, 4]})
yield "dolor sit amet"
def _generate_sparkline_data(
length: int = 15, drift: float = 0.05, volatility: float = 10
) -> list[float]:
random_changes = np.random.default_rng(31).normal(
loc=drift, scale=volatility, size=length
)
initial_value = np.random.default_rng(32).normal(loc=50, scale=5)
data = initial_value + np.cumsum(random_changes)
return cast("list[float]", data.tolist())
def _render_sidebar_controls() -> tuple[str | None, bool]:
with st.sidebar:
st.subheader("Mega tester controls")
show_tooltips = st.toggle("Show tooltips", True, key="show_tooltips")
disabled = st.toggle("Disable widgets", False, key="disable_widgets")
st.toggle("Wide mode", True, key="wide_mode")
st.toggle("Navigation sections", True, key="nav_sections")
st.toggle("Many pages", False, key="many_pages")
st.toggle("Show more chart lines", False, key="more_lines")
st.toggle("Horizontal bars", False, key="horizontal_bars")
st.toggle("Range sliders", False, key="range_sliders")
st.toggle("Show chat input at bottom", False, key="chat_input_bottom")
st.divider()
st.write("Sidebar widgets")
st.selectbox(
"Sidebar animal",
["cat", "dog", "bird"],
key="sidebar_selectbox",
disabled=disabled,
)
st.button("Sidebar button", key="sidebar_button", disabled=disabled)
st.checkbox(
"Sidebar choice",
key="sidebar_checkbox",
disabled=disabled,
)
with st.expander("Sidebar expander"):
st.write("Sidebar expander content")
st.info("Sidebar info")
st.sidebar.write("Sidebar write API")
help_text = "Tooltip text" if show_tooltips else None
return help_text, disabled
def _render_packages_and_magic() -> None:
st.header("Packages and magic")
python_match = re.match(r"^\d+\.\d+\.\d+", sys.version)
if python_match is None:
raise RuntimeError(f"Unable to parse Python version: {sys.version}")
st.dataframe(
pd.DataFrame(
{
"package": ["Python", "Streamlit"],
"version": [python_match.group(0), st.__version__],
}
),
width="stretch",
)
optional_deps = [
"altair",
"graphviz",
"matplotlib",
"plotly",
"pydeck",
"seaborn",
"snowflake.snowpark",
"streamlit_pdf",
]
st.dataframe(
pd.DataFrame(
{
"module": optional_deps,
"status": [
"available" if _module_available(module_name) else "missing"
for module_name in optional_deps
],
}
),
width="stretch",
)
st.code(_MAGIC_COW)
st.write("Write API call")
"Magic bare expression"
def _render_map_and_media(minor_version: int) -> None:
st.header("Map and media elements")
rng = np.random.default_rng(7)
map_df = pd.DataFrame(rng.standard_normal((1000, 2)) / [50, 50] + [37.76, -122.4])
map_df.columns = ["lat", "lon"]
st.map(map_df)
st.image(
np.arange(10000, dtype=np.uint8).reshape(100, 100), caption="Generated image"
)
if minor_version >= 35:
logo = np.tile(np.array([[0, 255], [255, 0]], dtype=np.uint8), (40, 40))
if minor_version >= 39:
st.logo(logo, link="https://streamlit.io", size="large")
else:
st.logo(logo, link="https://streamlit.io")
t = np.linspace(0.0, 0.25, 2000, endpoint=False)
audio = 0.25 * np.sin(2 * np.pi * 440 * t)
st.audio(audio, sample_rate=8000)
example_video_path = _STATIC_DIR / "sintel-short.mp4"
if example_video_path.is_file():
st.video(example_video_path)
def _render_data_display(
minor_version: int, help_text: str | None, disabled: bool
) -> None:
st.header("Data display elements")
rng = np.random.default_rng(11)
chart_data = pd.DataFrame(rng.standard_normal((20, 3)), columns=["a", "b", "c"])
st.dataframe(chart_data, width="stretch")
selection_df = pd.DataFrame(rng.standard_normal((12, 5)), columns=list("abcde"))
st.dataframe(
selection_df,
key="selection_df",
on_select="rerun",
selection_mode=["multi-row", "multi-column", "multi-cell"],
width="stretch",
)
if minor_version >= 52:
st.dataframe(
pd.DataFrame({"col1": [1, None, 3], "col2": [None, "b", "c"]}),
placeholder="N/A",
width="stretch",
)
edited_df = st.data_editor(
pd.DataFrame(
[
{"command": "st.selectbox", "rating": 4, "is_widget": True},
{"command": "st.balloons", "rating": 5, "is_widget": False},
{"command": "st.time_input", "rating": 3, "is_widget": True},
]
),
num_rows="dynamic",
disabled=disabled,
key="data_editor",
)
st.download_button(
"Download data as CSV",
edited_df.to_csv(index=False).encode("utf-8"),
"df.csv",
"text/csv",
)
if hasattr(st, "column_config"):
st.subheader("Column config matrix")
column_config_df = pd.DataFrame(
{
"column": ["foo", "bar", "baz"],
"text": ["foo", "bar", "baz"],
"number": [1, 2, 3],
"checkbox": [True, False, True],
"selectbox": ["foo", "bar", "foo"],
"datetime": pd.to_datetime(
[
"2021-01-01 00:00:00",
"2021-01-02 00:00:00",
"2021-01-03 00:00:00",
]
),
"date": pd.to_datetime(["2021-01-01", "2021-01-02", "2021-01-03"]),
"time": [time(0, 0), time(1, 0), time(2, 0)],
"list": [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
"link": [
"https://streamlit.io",
"https://streamlit.io",
"https://streamlit.io",
],
"image": [
"./app/static/test-streamlit-logo.png",
"./app/static/test-streamlit-logo.png",
"./app/static/test-streamlit-logo.png",
],
"area_chart": [[1, 2, 1], [2, 3, 1], [3, 1, 2]],
"line_chart": [[1, 2, 1], [2, 3, 1], [3, 1, 2]],
"bar_chart": [[1, 2, 1], [2, 3, 1], [3, 1, 2]],
"progress": [0.1, 0.2, 0.3],
"json": [
{"foo": "bar"},
{"numbers": [123, 4.56]},
{"level1": {"level2": {"level3": {"a": "b"}}}},
],
}
)
st.data_editor(
column_config_df,
key="column_config_editor",
column_config={
"column": st.column_config.Column(
"Column", help="A column tooltip", pinned=True
),
"text": st.column_config.TextColumn("TextColumn"),
"number": st.column_config.NumberColumn("NumberColumn"),
"checkbox": st.column_config.CheckboxColumn("CheckboxColumn"),
"selectbox": st.column_config.SelectboxColumn(
"SelectboxColumn", options=["foo", "bar", "baz"]
),
"datetime": st.column_config.DatetimeColumn("DatetimeColumn"),
"date": st.column_config.DateColumn("DateColumn"),
"time": st.column_config.TimeColumn("TimeColumn"),
"list": st.column_config.ListColumn("ListColumn"),
"link": st.column_config.LinkColumn("LinkColumn"),
"image": st.column_config.ImageColumn("ImageColumn"),
"area_chart": st.column_config.AreaChartColumn("AreaChartColumn"),
"line_chart": st.column_config.LineChartColumn("LineChartColumn"),
"bar_chart": st.column_config.BarChartColumn("BarChartColumn"),
"progress": st.column_config.ProgressColumn("ProgressColumn"),
"json": st.column_config.JsonColumn("JSONColumn"),
},
)
st.table(chart_data.head())
st.metric("Metric", 42, 2, help=help_text)
if minor_version >= 52:
st.metric("Metric without arrow", 100, -5, delta_arrow="off")
pos_col, neg_col, neutral_col = st.columns(3)
pos_col.metric("Metric positive", 1234, 123, help=help_text)
neg_col.metric("Metric negative", 1234, -123, help=help_text)
neutral_col.metric("Metric neutral", 1234, 123, delta_color="off", help=help_text)
left, middle, right = st.columns(3)
left.metric(
"Metric sparkline line",
1234,
123,
border=True,
chart_data=_generate_sparkline_data(),
chart_type="line",
help=help_text,
)
middle.metric(
"Metric sparkline area",
1234,
-123,
border=True,
chart_data=_generate_sparkline_data(),
chart_type="area",
help=help_text,
)
right.metric(
"Metric sparkline bar",
1234,
123,
border=True,
chart_data=_generate_sparkline_data(),
chart_type="bar",
delta_color="off",
help=help_text,
)
st.json(chart_data.head().to_dict(), expanded=2)
def _render_charts(minor_version: int) -> None:
st.header("Chart elements")
if _module_available("matplotlib"):
import matplotlib.pyplot as plt
arr = np.random.default_rng(17).normal(1, 1, size=100)
fig, ax = plt.subplots()
ax.hist(arr, bins=20)
ax.set_title("Histogram")
st.pyplot(fig)
if _module_available("matplotlib") and _module_available("seaborn"):
import matplotlib.pyplot as plt
import seaborn as sns # type: ignore[import-untyped]
fig, _ = plt.subplots()
sns.heatmap(pd.DataFrame([[1, 2], [3, 4]]), cmap="plasma")
st.pyplot(fig)
column_count = 10 if st.session_state.get("more_lines", False) else 3
columns = (
["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
if column_count == 10
else ["a", "b", "c"]
)
chart_data = pd.DataFrame(
np.random.default_rng(19).standard_normal((20, column_count)),
columns=columns,
)
st.line_chart(chart_data, x_label="x label", y_label="y label")
area_stack = cast(
"Literal['normalize', 'center'] | bool",
st.segmented_control(
"Area chart stack",
[True, False, "normalize", "center"],
key="area_stack",
),
)
st.area_chart(chart_data, x_label="x label", y_label="y label", stack=area_stack)
bar_stack = cast(
"Literal['normalize', 'center'] | bool",
st.segmented_control(
"Bar chart stack",
[True, False, "normalize", "center"],
key="bar_stack",
),
)
st.bar_chart(
chart_data,
x_label="x label",
y_label="y label",
horizontal=st.session_state.get("horizontal_bars", False),
stack=bar_stack,
)
if _module_available("altair"):
import altair as alt
st.altair_chart(
alt.Chart(chart_data)
.mark_circle()
.encode(x="a", y="b", size="c", color="c", tooltip=["a", "b", "c"]),
use_container_width=True,
)
if minor_version >= 27:
st.scatter_chart(chart_data, x_label="x label", y_label="y label")
st.vega_lite_chart(
chart_data,
{
"mark": {"type": "circle", "tooltip": True},
"encoding": {
"x": {"field": "a", "type": "quantitative"},
"y": {"field": "b", "type": "quantitative"},
"size": {"field": "c", "type": "quantitative"},
"color": {"field": "c", "type": "quantitative"},
},
},
)
if _module_available("plotly"):
import plotly.graph_objects as go
fig = go.Figure()
fig.add_scatter(y=[1, 3, 2, 4], mode="lines", name="Demo")
st.plotly_chart(fig, use_container_width=True)
if _module_available("pydeck"):
import pydeck as pdk
points = pd.DataFrame(
np.random.default_rng(23).standard_normal((1000, 2)) / [50, 50]
+ [37.76, -122.4],
columns=["lat", "lon"],
)
st.pydeck_chart(
pdk.Deck(
map_style=None,
initial_view_state=pdk.ViewState(
latitude=37.76,
longitude=-122.4,
zoom=11,
),
layers=[
pdk.Layer(
"ScatterplotLayer",
data=points,
get_position="[lon, lat]",
get_radius=200,
)
],
)
)
if _module_available("graphviz"):
import graphviz
graph = graphviz.Digraph()
graph.edge("run", "kernel")
graph.edge("kernel", "sleep")
st.graphviz_chart(graph)
st.graphviz_chart("""
digraph {
start -> process
process -> decision
decision -> finish
}
""")
def _render_custom_ui(minor_version: int) -> None:
st.header("Custom UI elements")
components.html("<b style='color: green'>Bold green HTML text</b>", height=50)
st.markdown(
"<b style='color: green'>Unsafe markdown HTML</b>", unsafe_allow_html=True
)
components.html("<button>Click me</button>", height=50)
if (
minor_version >= 51
and hasattr(st, "components")
and hasattr(st.components, "v2")
and hasattr(st.components.v2, "component")
):
inline_component = st.components.v2.component(
"inline_links",
js="""
export default function(component) {
const { setTriggerValue } = component;
const links = document.querySelectorAll('a[href="#"]');
links.forEach((link) => {
link.onclick = () => {
setTriggerValue("clicked", link.innerHTML);
};
});
}
""",
)
click_result = inline_component(on_clicked_change=lambda: None)
st.markdown("Click [one](#) or [two](#) inline link.")
clicked_link = click_result.get("clicked")
if clicked_link:
st.write(f"Clicked link: {clicked_link}")
@st.dialog(
"Test dialog",
width=cast(
"Literal['small', 'medium', 'large']",
st.session_state.get("dialog_width", "small"),
),
dismissible=st.session_state.get("dialog_dismissible", True),
)
def _dialog(item: str) -> None:
reason = st.text_input("Dialog reason", key="dialog_reason")
if st.button("Submit dialog"):
st.session_state.vote = {"item": item, "reason": reason}
st.rerun()
def _render_inputs(minor_version: int, help_text: str | None, disabled: bool) -> None:
st.header("Input widgets")
st.text_input(
"Textbox",
key="textbox",
help=help_text,
disabled=disabled,
)
st.number_input(
"Number",
key="number",
help=help_text,
disabled=disabled,
)
range_slider = st.session_state.get("range_sliders", False)
st.slider(
"Slider",
value=(30, 60) if range_slider else 30,
key="slider",
help=help_text,
disabled=disabled,
)
if minor_version >= 46:
st.slider(
"Slider narrow",
width=200,
key="slider_narrow",
help=help_text,
disabled=disabled,
)
if st.button(
"Button",
key="button",
icon=":material/home:",
help=help_text,
disabled=disabled,
):
st.write("You pressed the default button")
if st.button(
"Button primary",
key="button_primary",
type="primary",
icon=":material/home:",
help=help_text,
disabled=disabled,
):
st.write("You pressed the primary button")
if st.button(
"Button tertiary",
key="button_tertiary",
type="tertiary",
icon=":material/home:",
help=help_text,
disabled=disabled,
):
st.write("You pressed the tertiary button")
if minor_version >= 52:
st.button(
"Shortcut button",
shortcut="k",
key="shortcut_button",
disabled=disabled,
)
st.download_button(
"Download hello",
data="Hello!",
icon=":material/home:",
help=help_text,
disabled=disabled,
)
st.link_button(
"Link button in inputs",
"https://streamlit.io",
icon=":material/home:",
help=help_text,
disabled=disabled,
)
if hasattr(st, "page_link"):
st.page_link(
"https://streamlit.io",
label="Page link",
icon=":material/home:",
help=help_text,
disabled=disabled,
)
st.checkbox("Checkbox", key="checkbox", help=help_text, disabled=disabled)
toggle_value = st.toggle("Toggle", key="toggle", help=help_text, disabled=disabled)
st.write(f"Toggle state is {toggle_value}")
st.radio(
"Radio",
["cat", "dog"],
key="radio",
help=help_text,
disabled=disabled,
)
st.radio(
"Horizontal option",
["cat", "dog"],
key="radio_horizontal",
horizontal=True,
help=help_text,
disabled=disabled,
)
accept_new_options = st.toggle(
"Accept new options",
key="accept_new_options",
)
if minor_version >= 45:
st.selectbox(
"Selectbox",
["cat", "dog"],
accept_new_options=accept_new_options,
key="selectbox",
help=help_text,
disabled=disabled,
)
st.multiselect(
"Multiselect",
["cat", "dog"],
accept_new_options=accept_new_options,
key="multiselect",
help=help_text,
disabled=disabled,
)
else:
st.selectbox(
"Selectbox",
["cat", "dog"],
key="selectbox",
help=help_text,
disabled=disabled,
)
st.multiselect(
"Multiselect",
["cat", "dog"],
key="multiselect",
help=help_text,
disabled=disabled,
)
st.select_slider(
"Select slider",
["xsmall", "small", "medium", "large", "xlarge"],
value=("small", "large") if range_slider else "small",
key="select_slider",
help=help_text,
disabled=disabled,
)
st.text_area(
"Text area",
key="text_area",
help=help_text,
disabled=disabled,
)
st.date_input(
"Date input",
value=date(2024, 1, 1),
key="date_input",
help=help_text,
disabled=disabled,
)
st.time_input(
"Time input",
value=time(8, 30),
key="time_input",
help=help_text,
disabled=disabled,
)
if minor_version >= 52:
st.datetime_input(
"Datetime input",
value=datetime(2024, 1, 1, 8, 30),
key="datetime_input",
help=help_text,
disabled=disabled,
)
if minor_version >= 49:
upload_mode = cast(
"Literal['directory'] | bool",
st.segmented_control(
"File uploader mode",
[False, True, "directory"],
default=False,
key="file_uploader_mode",
),
)
if upload_mode is False:
st.file_uploader("File input", key="file_input", disabled=disabled)
else:
st.file_uploader(
"File input",
accept_multiple_files=upload_mode,
key="file_input",
disabled=disabled,
)
else:
st.file_uploader("File input", key="file_input", disabled=disabled)
st.color_picker(
"Color picker",
key="color_picker",
help=help_text,
disabled=disabled,
)
with st.form("form"):
st.text_input(
"Form text",
key="form_text",
help=help_text,
disabled=disabled,
)
submitted = st.form_submit_button(
"Submit form", help=help_text, disabled=disabled
)
if submitted:
st.write("Form submitted")
if st.button("Trigger rerun", key="rerun"):
st.rerun()
st.segmented_control(
"Dialog width",
["small", "medium", "large"],
default="small",
key="dialog_width",
)
st.toggle(
"Dialog dismissible",
True,
key="dialog_dismissible",
)
show_chat_input_bottom = st.session_state.get("chat_input_bottom", False)
prompt: str | ChatInputValue | None = None
if show_chat_input_bottom:
if minor_version >= 52:
prompt = st.chat_input(
"Chat input",
key="chat_input",
accept_file="multiple",
accept_audio=True,
disabled=disabled,
)
elif minor_version >= 43:
prompt = st.chat_input(
"Chat input",
key="chat_input",
accept_file="multiple",
disabled=disabled,
)
else:
prompt = st.chat_input(
"Chat input",
key="chat_input",
disabled=disabled,
)
else:
chat_input_target = st.container()
if minor_version >= 52:
prompt = chat_input_target.chat_input(
"Chat input",
key="chat_input",
accept_file="multiple",
accept_audio=True,
disabled=disabled,
)
elif minor_version >= 43:
prompt = chat_input_target.chat_input(
"Chat input",
key="chat_input",
accept_file="multiple",
disabled=disabled,
)
else:
prompt = chat_input_target.chat_input(
"Chat input",
key="chat_input",
disabled=disabled,
)
if prompt:
st.chat_message("user").write(getattr(prompt, "text", str(prompt)))
st.chat_message("assistant").write("Assistant response")
if st.button("Write stream", key="write_stream"):
st.write_stream(_stream_chunks)
if "vote" not in st.session_state:
if st.button("Open dialog item 1", key="dialog_open"):
_dialog("1")
else:
st.write(
f"Dialog result item={st.session_state.vote['item']} "
f"reason={st.session_state.vote['reason']}"
)
if minor_version >= 37:
st.feedback("thumbs", key="feedback_thumbs", disabled=disabled)
st.feedback("faces", key="feedback_faces", disabled=disabled)
st.feedback("stars", key="feedback_stars", disabled=disabled)
if minor_version >= 40:
pills_value = st.pills(
"Pills",
["North", "East", "South", "West"],
selection_mode="multi",
key="pills",
disabled=disabled,
help=help_text,
)
st.write(f"Pills value is {pills_value}")
segmented_value = st.segmented_control(
"Segmented",
["North", "East", "South", "West"],
selection_mode="multi",
key="segmented",
disabled=disabled,
help=help_text,
)
st.write(f"Segmented value is {segmented_value}")
audio_value = st.audio_input(
"Audio input",
key="audio_input",
disabled=disabled,
)
st.write(f"Audio input is {audio_value}")
if hasattr(st, "camera_input") and st.toggle(
"Show camera input", False, key="show_camera_input"
):
camera_value = st.camera_input(
"Camera input",
key="camera_input",
disabled=disabled,
)
st.write(f"Camera input is {camera_value}")
def _render_text_elements(minor_version: int, help_text: str | None) -> None:
st.header("Text elements")
st.title("Title with tooltip", help=help_text)
st.markdown("Markdown", help=help_text)
st.markdown(
"Markdown features: **bold** *italic* ~strikethrough~ [link](https://streamlit.io) "
"`code` $a=b$ 🐶 :cat: :material/home: :streamlit: <- -> <-> -- >= <= ~= :small[small] $$a = b$$"
)
st.markdown("""
Text colors:
:blue[blue] :green[green] :orange[orange] :red[red] :violet[violet] :gray[gray] :rainbow[rainbow] :primary[primary]
:blue-background[blue] :green-background[green] :orange-background[orange] :red-background[red]
:violet-background[violet] :gray-background[gray] :rainbow-background[rainbow] :primary-background[primary]
:blue-badge[blue] :green-badge[green] :orange-badge[orange] :red-badge[red] :violet-badge[violet]
:gray-badge[gray] :primary-badge[primary]
""")
st.header("Header")
for color in [
"blue",
"green",
"yellow",
"orange",
"red",
"violet",
"gray",
"rainbow",
]:
st.header(f"Header with {color} divider", divider=color, help=help_text)
st.subheader("Subheader")
st.caption("Caption", help=help_text)
st.code("a = 1234")
st.code("a = 1234", line_numbers=True)
st.code(
'a = "This is a very very very very very very very very very very very very long string"',
wrap_lines=True,
)
st.text("Text", help=help_text)
if minor_version >= 52:
st.markdown("Centered markdown", text_alignment="center")
st.caption("Centered caption", text_alignment="center")
st.text("Centered text", text_alignment="center", width="stretch")
st.latex(r"\int a x^2 \,dx", help=help_text)
st.divider()
st.warning("Warning")
st.warning("Warning with icon", icon=":material/home:")
st.error("Error")
st.error("Error with icon", icon=":material/home:")
st.info("Info")
st.info("Info with icon", icon=":material/home:")
st.success("Success")
st.success("Success with icon", icon=":material/home:")
st.exception(RuntimeError("Example exception"))
if st.button("Run balloons", key="balloons"):
st.balloons()
if st.button("Run snow", key="snow"):
st.snow()
if minor_version >= 27:
st.link_button("Link button", "https://streamlit.io")
if minor_version >= 44:
st.badge("Badge", icon=":material/check:", color="green")
def _render_blocks(minor_version: int, help_text: str | None, disabled: bool) -> None:
st.header("Block elements")
left, right = st.columns(2)
left.write("Left column")
right.write("Right column")
bordered_left, bordered_right = st.columns(2, border=True)
bordered_left.write("Bordered left column")
bordered_right.write("Bordered right column")
tab_a, tab_b = st.tabs(["Tab A", "Tab B"])
tab_a.write("Tab A content")
tab_b.write("Tab B content")
with st.expander("Expander"):
st.write("Expander content")
st.container().write("Container content")
st.container(border=True).write("Bordered container content")
if minor_version >= 32:
with st.popover("Popover", help=help_text, disabled=disabled):
st.write("Popover content")
st.empty().write("Empty content")
if minor_version >= 37:
@st.fragment
def _fragment() -> None:
st.button("Fragment button", key="fragment_button")
st.write("Fragment content")
_fragment()
if minor_version >= 48:
horizontal = st.container(horizontal=True, horizontal_alignment="right")
for idx in range(3):
horizontal.button(
f"Horizontal button {idx + 1}", key=f"horizontal_button_{idx}"
)
if minor_version >= 51:
with st.container(horizontal=True):
st.button("Left", key="space_left")
st.space("stretch")
st.button("Right", key="space_right")
if minor_version >= 52:
with st.container(width="content"):
st.write("Content-width container")
def _render_navigation(minor_version: int) -> None:
st.header("Navigation elements")
nav_positions: list[Literal["hidden", "sidebar", "top"]] = ["hidden", "sidebar"]
if minor_version >= 46:
nav_positions.append("top")
position = st.selectbox(
"Navigation position",
nav_positions,
index=nav_positions.index("sidebar"),
key="navigation_position",
)
def _nav_home() -> None:
st.write("Home page")
def _nav_about() -> None:
st.write("About page")
def _nav_contact() -> None:
st.write("Contact page")
def _nav_logs() -> None:
st.write("Logs page")
def _nav_data_visualizations() -> None:
st.write("Data visualizations page")
def _nav_analytics() -> None:
st.write("Analytics page")
def _nav_calculator() -> None:
st.write("Calculator page")
def _nav_editor() -> None:
st.write("Editor page")
def _nav_viewer() -> None:
st.write("Viewer page")
def _nav_converter() -> None:
st.write("Converter page")
def _nav_import() -> None:
st.write("Import page")
def _nav_export() -> None:
st.write("Export page")
def _nav_transform() -> None:
st.write("Transform page")
def _nav_settings() -> None:
st.write("Settings page")
def _nav_users() -> None:
st.write("Users page")
many_pages = st.session_state.get("many_pages", False)
nav_sections = st.session_state.get("nav_sections", True)
pages: dict[str, list[StreamlitPage]]
if many_pages:
pages = {
"General": [
st.Page(_nav_home, title="Home", icon=":material/home:"),
st.Page(
_nav_data_visualizations,
title="Data visualizations",
icon=":material/monitoring:",
),
st.Page(_nav_analytics, title="Analytics", icon=":material/analytics:"),
],
"Tools": [
st.Page(
_nav_calculator, title="Calculator", icon=":material/calculate:"
),
st.Page(_nav_editor, title="Editor", icon=":material/edit:"),
st.Page(_nav_viewer, title="Viewer", icon=":material/visibility:"),
st.Page(
_nav_converter, title="Converter", icon=":material/swap_horiz:"
),
],
"Data": [
st.Page(_nav_import, title="Import", icon=":material/file_upload:"),
st.Page(_nav_export, title="Export", icon=":material/file_download:"),
st.Page(_nav_transform, title="Transform", icon=":material/transform:"),
],
"Admin": [
st.Page(_nav_settings, title="Settings", icon=":material/settings:"),
st.Page(_nav_users, title="Users", icon=":material/people:"),
st.Page(_nav_logs, title="Logs", icon=":material/history:"),
],
}
else:
pages = {
"General": [
st.Page(_nav_home, title="Home", icon=":material/home:"),
st.Page(_nav_about, title="About", icon=":material/info:"),
],
"Admin": [
st.Page(_nav_contact, title="Contact", icon=":material/contact_mail:")
],
}
navigation_pages: list[StreamlitPage] | dict[str, list[StreamlitPage]]
if nav_sections:
navigation_pages = pages
else:
navigation_pages = [page for section in pages.values() for page in section]
nav = st.navigation(navigation_pages, position=position)
nav.run()
def _render_context() -> None:
st.header("Streamlit context")
context_values: dict[str, str] = {}
for attr in dir(st.context):
if attr.startswith("_"):
continue
try:
context_values[attr] = str(getattr(st.context, attr))
except Exception as ex:
context_values[attr] = f"Error: {ex}"
st.json(context_values)
def _render_authentication(minor_version: int) -> None:
if minor_version < 42:
return
st.header("Authentication")
if st.button("Login", key="login_button") and hasattr(st, "login"):
try:
st.login()
except Exception as ex:
st.info(f"Login unavailable in this environment: {ex}")
def _render_utilities() -> None:
st.header("Utility elements")
with st.echo():
st.write("Echo")
st.help(st.write)
duration = cast(
"Literal['short', 'long', 'infinite']",
st.segmented_control(
"Toast duration",
["short", "long", "infinite"],
default="short",
key="toast_duration",
),
)
if st.button("Show toast", key="toast_button"):
st.toast("Hello there!", icon="🎈", duration=duration)
if st.button("Run status", key="status_button"):
with st.status("Working...", expanded=True) as status:
st.write("Status step 1")
st.write("Status step 2")
status.update(label="Done", state="complete")
if st.button("Run progress", key="progress_button"):
progress_bar = st.progress(0)
for percent_complete in range(0, 101, 25):
progress_bar.progress(percent_complete)
if st.button("Run spinner", key="spinner_button"):
with st.spinner("Wait!", show_time=True):
st.write("Spinner finished")
if st.button("Add query params", key="query_params_button"):
st.query_params["test"] = "1"
st.query_params["bvt"] = "true"
st.write("Query params", dict(st.query_params))
st.session_state.setdefault("key", True)
st.write(st.session_state["key"])
def _render_pdf(minor_version: int) -> None:
if minor_version < 49 or not hasattr(st, "pdf"):
return
show_pdf = st.toggle("Show PDF", False, key="show_pdf")
if not show_pdf:
return
st.header("PDF element")
uploaded = st.file_uploader("Upload a PDF", type="pdf", key="pdf_upload")
st.pdf(uploaded if uploaded is not None else _DUMMY_PDF, height=240)
def _render_stop_section() -> None:
st.header("Stop behavior")
st.text("Text before stop")
if st.button("Run st.stop", key="stop_button"):
st.stop()
st.text("Text after stop")
layout_mode: Literal["wide", "centered"] = (
"wide" if st.session_state.get("wide_mode", True) else "centered"
)
st.set_page_config(
page_title="Mega tester app",
page_icon="🎈",
layout=layout_mode,
initial_sidebar_state="expanded",
)
st.title("🎈 Mega tester app")
minor = _minor_version()
help_text, disabled = _render_sidebar_controls()
_render_packages_and_magic()
_render_map_and_media(minor)
_render_data_display(minor, help_text, disabled)
_render_charts(minor)
_render_custom_ui(minor)
_render_inputs(minor, help_text, disabled)
_render_text_elements(minor, help_text)
_render_blocks(minor, help_text, disabled)
_render_navigation(minor)
_render_context()
_render_authentication(minor)
_render_utilities()
_render_pdf(minor)
_render_stop_section()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/mega_tester_app.py",
"license": "Apache License 2.0",
"lines": 1082,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/mega_tester_app_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import re
from typing import TYPE_CHECKING
import pytest
from playwright.sync_api import Page, expect
from e2e_playwright.conftest import IframedPage, rerun_app, wait_for_app_run, wait_until
from e2e_playwright.shared.app_utils import (
click_button,
click_checkbox,
click_toggle,
expect_no_skeletons,
fill_number_input,
get_checkbox,
get_number_input,
get_text_input,
open_popover,
select_radio_option,
select_selectbox_option,
)
if TYPE_CHECKING:
from playwright.sync_api import ConsoleMessage, FrameLocator
from e2e_playwright.shared.app_target import AppTarget
def is_expected_error(
msg: ConsoleMessage, browser_name: str, *, uses_csp: bool
) -> bool:
"""Check if a console error is expected and should be ignored."""
# Mapbox error is expected and should be ignored:
if (
msg.text == "Failed to load resource: net::ERR_CONNECTION_REFUSED"
and "events.mapbox.com" in msg.location["url"]
):
return True
# There is an expected error with pydeck and firefox related to WebGL rendering
# This seems to be an issue with firefox used with playwright:
if re.search(r"deck:.*is null undefined", msg.text) and browser_name == "firefox":
return True
# TODO(lukasmasuch): Investigate why firefox is running into this eval issue:
if (
(
"settings blocked a JavaScript eval (script-src) from being executed"
in msg.text
)
and browser_name == "firefox"
and uses_csp
):
return True
# TODO(lukasmasuch): Investigate why webkit is running into this blob: issue:
return bool(
msg.text == "Failed to load resource"
and re.match(r"blob:https?://", msg.location["url"]) is not None
and browser_name == "webkit"
and uses_csp
)
@pytest.mark.external_test(upload_test_assets=True)
def test_no_console_errors(app_target: AppTarget, browser_name: str) -> None:
"""Test that the app does not log any console errors."""
console_errors = []
def on_console_message(msg: ConsoleMessage) -> None:
# Possible message types: "log", "debug", "info", "error", "warning", ...
if msg.type == "error" and not is_expected_error(
msg, browser_name, uses_csp=False
):
# Each console message has text, location, etc.
console_errors.append(
{
"message": msg.text,
"url": msg.location["url"],
"line": msg.location["lineNumber"],
"column": msg.location["columnNumber"],
}
)
app_target.page.on("console", on_console_message)
# Make sure that all elements are rendered and no skeletons are shown:
expect_no_skeletons(app_target.locator_context, timeout=25000)
# There should be only one exception in the app:
expect(app_target.get_by_test_id("stException")).to_have_count(1)
# Check that title is visible:
expect(app_target.get_by_text("🎈 Mega tester app", exact=True)).to_be_visible()
# There should be no unexpected console errors:
assert not console_errors, "Console errors were logged " + str(console_errors)
def test_mega_tester_app_in_iframe(iframed_app: IframedPage, browser_name: str) -> None:
"""Test that the mega tester app can be loaded within an iframe with CSP."""
console_errors = []
def on_console_message(msg: ConsoleMessage) -> None:
# Possible message types: "log", "debug", "info", "error", "warning", ...
if msg.type == "error" and not is_expected_error(
msg, browser_name, uses_csp=True
):
# Each console message has text, location, etc.
console_errors.append(
{
"message": msg.text,
"url": msg.location["url"],
"line": msg.location["lineNumber"],
"column": msg.location["columnNumber"],
}
)
page: Page = iframed_app.page
page.on("console", on_console_message)
frame_locator: FrameLocator = iframed_app.open_app(None)
wait_for_app_run(frame_locator)
page.wait_for_load_state()
# Make sure that all elements are rendered and no skeletons are shown:
expect_no_skeletons(frame_locator, timeout=25000)
# Check that title is visible:
expect(frame_locator.get_by_text("🎈 Mega tester app", exact=True)).to_be_visible()
expect(frame_locator.get_by_test_id("stException")).to_have_count(1)
# Check that there are no dialogs (e.g. with errors) visible:
expect(frame_locator.get_by_test_id("stDialog")).to_have_count(0)
# There should be no unexpected console errors:
assert not console_errors, "Console errors were logged " + str(console_errors)
@pytest.mark.performance
@pytest.mark.repeat(5) # only repeat 5 times since otherwise it would take too long
def test_mega_tester_app_rendering_performance(app: Page) -> None:
"""Test the performance of the mega tester app rendering."""
# Rerun the app 5 times:
for _ in range(5):
rerun_app(app)
@pytest.mark.external_test(upload_test_assets=True)
def test_mega_tester_app_renders_expected_content(app_target: AppTarget) -> None:
expect_no_skeletons(app_target.locator_context, timeout=25000)
expect(app_target.get_by_text("🎈 Mega tester app", exact=True)).to_be_visible()
mandatory_headings = [
"Packages and magic",
"Map and media elements",
"Data display elements",
"Chart elements",
"Custom UI elements",
"Input widgets",
"Text elements",
"Block elements",
"Navigation elements",
"Streamlit context",
"Utility elements",
"Stop behavior",
]
for heading in mandatory_headings:
expect(
app_target.get_by_role("heading", name=heading, exact=True)
).to_be_visible()
authentication_heading = app_target.get_by_role(
"heading", name="Authentication", exact=True
)
wait_until(
app_target.page,
lambda: authentication_heading.count() in {0, 1},
timeout=10000,
)
if authentication_heading.count() == 1:
expect(authentication_heading).to_be_visible()
# Packages + magic: verify semantic content, not just section presence.
expect(app_target.get_by_test_id("stDataFrame").first).to_contain_text("Python")
expect(app_target.get_by_test_id("stDataFrame").first).to_contain_text("Streamlit")
expect(
app_target.get_by_test_id("stCode").filter(has_text="Abracowdabra")
).to_be_visible()
expect(app_target.get_by_text("Magic bare expression", exact=True)).to_be_visible()
# Media and data display.
expect(app_target.get_by_text("Generated image", exact=True)).to_be_visible()
expect(app_target.get_by_test_id("stImage").first).to_be_visible()
expect(app_target.get_by_test_id("stAudio").first).to_be_visible()
expect(
app_target.get_by_role("button", name="Download data as CSV", exact=True)
).to_be_visible()
column_config_heading = app_target.get_by_role(
"heading", name="Column config matrix", exact=True
)
if column_config_heading.count() == 1:
expect(column_config_heading).to_be_visible()
# Column headers can be virtualized/off-screen; assert presence rather than visibility.
expect(app_target.get_by_text("TextColumn", exact=True)).to_have_count(1)
expect(app_target.get_by_test_id("stTable")).to_be_visible()
metric_cards = app_target.get_by_test_id("stMetric")
expect(metric_cards.first).to_contain_text("Metric")
expect(metric_cards.first).to_contain_text("42")
expect(metric_cards.first).to_contain_text("2")
expect(metric_cards.filter(has_text="Metric positive")).to_have_count(1)
expect(metric_cards.filter(has_text="Metric negative")).to_have_count(1)
expect(metric_cards.filter(has_text="Metric neutral")).to_have_count(1)
expect(
app_target.get_by_test_id("stJson").filter(has_text="timezone").first
).to_be_visible()
# Charts: ensure multiple concrete chart outputs rendered.
# The mega tester app always renders at least four Vega charts.
vega_charts = app_target.get_by_test_id("stVegaLiteChart")
expect(vega_charts.nth(3)).to_be_visible()
expect(vega_charts.first).to_be_visible()
plotly_charts = app_target.get_by_test_id("stPlotlyChart")
wait_until(
app_target.page,
lambda: plotly_charts.count() in {0, 1},
timeout=10000,
)
if plotly_charts.count() == 1:
expect(plotly_charts.first).to_be_visible()
else:
expect(plotly_charts).to_have_count(0)
expect(app_target.get_by_test_id("stGraphVizChart").first).to_be_visible()
# Custom UI: verify HTML component iframe and unsafe markdown output.
custom_html_iframe = app_target.locator(
"iframe[srcdoc*='Bold green HTML text']"
).first
expect(custom_html_iframe).to_be_visible()
expect(custom_html_iframe).to_have_attribute(
"srcDoc", re.compile(r"Bold green HTML text|Click me")
)
expect(app_target.get_by_text("Unsafe markdown HTML", exact=True)).to_be_visible()
# Text elements should render concrete messages.
expect(
app_target.get_by_role("heading", name="Title with tooltip", exact=True)
).to_be_visible()
expect(app_target.get_by_text("Warning", exact=True)).to_be_visible()
expect(app_target.get_by_text("Warning with icon", exact=True)).to_be_visible()
expect(app_target.get_by_text("Error", exact=True)).to_be_visible()
expect(app_target.get_by_text("Error with icon", exact=True)).to_be_visible()
expect(app_target.get_by_text("Info", exact=True)).to_be_visible()
expect(app_target.get_by_text("Info with icon", exact=True)).to_be_visible()
expect(app_target.get_by_text("Success", exact=True)).to_be_visible()
expect(app_target.get_by_text("Success with icon", exact=True)).to_be_visible()
expect(
app_target.get_by_role("heading", name="Header with blue divider", exact=True)
).to_be_visible()
expect(
app_target.get_by_role(
"heading", name="Header with rainbow divider", exact=True
)
).to_be_visible()
expect(app_target.get_by_test_id("stException")).to_contain_text(
"Example exception"
)
expect(
app_target.get_by_role("link", name="Link button", exact=True)
).to_be_visible()
# Inputs + blocks + utility content.
expect(app_target.get_by_text("Textbox", exact=True)).to_be_visible()
expect(app_target.get_by_text("Number", exact=True)).to_be_visible()
expect(
app_target.get_by_role("button", name=re.compile(r"Button primary"))
).to_be_visible()
expect(
app_target.get_by_role("button", name=re.compile(r"Button tertiary"))
).to_be_visible()
expect(app_target.get_by_text("Accept new options", exact=True)).to_be_visible()
file_uploader_mode = app_target.get_by_text("File uploader mode", exact=True)
if file_uploader_mode.count() == 1:
expect(file_uploader_mode).to_be_visible()
expect(app_target.get_by_text("Dialog width", exact=True)).to_be_visible()
expect(app_target.get_by_text("Dialog dismissible", exact=True)).to_be_visible()
show_camera_input = app_target.get_by_text("Show camera input", exact=True)
if show_camera_input.count() == 1:
expect(show_camera_input).to_be_visible()
wide_mode = app_target.get_by_text("Wide mode", exact=True)
if wide_mode.count() == 1:
expect(wide_mode).to_be_visible()
expect(app_target.get_by_text("Sidebar widgets", exact=True)).to_be_visible()
expect(app_target.get_by_text("Sidebar expander", exact=True)).to_be_visible()
expect(app_target.get_by_text("Sidebar write API", exact=True)).to_be_visible()
expect(app_target.get_by_text("Selectbox", exact=True)).to_be_visible()
expect(app_target.get_by_role("tab", name="Tab A", exact=True)).to_be_visible()
expect(app_target.get_by_role("tab", name="Tab B", exact=True)).to_be_visible()
expect(app_target.get_by_text("Expander", exact=True)).to_be_visible()
expect(app_target.get_by_text("Bordered left column", exact=True)).to_be_visible()
expect(
app_target.get_by_text("Bordered container content", exact=True)
).to_be_visible()
expect(app_target.get_by_text("Echo", exact=True)).to_be_visible()
expect(app_target.get_by_text("Text before stop", exact=True)).to_be_visible()
expect(app_target.get_by_text("Text after stop", exact=True)).to_be_visible()
# Negatives for initial state.
expect(app_target.get_by_test_id("stDialog")).to_have_count(0)
expect(app_target.get_by_text("Form submitted", exact=True)).to_have_count(0)
if app_target.get_by_role("heading", name="PDF element", exact=True).count() == 1:
pdf_container = app_target.get_by_test_id("pdf-container").first
expect(pdf_container).to_be_visible(timeout=30000)
expect(pdf_container.get_by_test_id("pdf-loading")).to_be_hidden(timeout=30000)
expect(pdf_container.locator('[data-index="0"]').first).to_be_visible(
timeout=30000
)
def test_mega_tester_app_interactions_validate_behavior(app: Page) -> None:
textbox = get_text_input(app, "Textbox")
textbox.locator("input").first.fill("Ada")
textbox.locator("input").first.press("Enter")
wait_for_app_run(app)
expect(textbox.locator("input").first).to_have_value("Ada")
fill_number_input(app, "Number", 7)
number_input = get_number_input(app, "Number")
expect(number_input.locator("input").first).to_have_value(re.compile(r"7"))
click_checkbox(app, "Checkbox")
expect(get_checkbox(app, "Checkbox").locator("input").first).to_be_checked()
select_radio_option(app, option="dog", label="Radio")
expect(
app.get_by_test_id("stRadio")
.filter(has_text="Radio")
.get_by_role("radio", name="dog", exact=True)
).to_be_checked()
click_toggle(app, "Accept new options")
wait_for_app_run(app)
select_selectbox_option(app, "Selectbox", "dog")
expect(
app.get_by_test_id("stSelectbox").filter(has_text="Selectbox")
).to_contain_text("dog")
form_text = get_text_input(app, "Form text")
form_text.locator("input").first.fill("hello")
app.get_by_role("button", name="Submit form").first.click()
expect(app.get_by_text("Form submitted", exact=True)).to_be_visible()
click_button(app, "Write stream")
expect(app.get_by_text("lorem ipsum", exact=False)).to_be_visible()
expect(app.get_by_text("dolor sit amet", exact=False)).to_be_visible()
app.get_by_role("button", name=re.compile(r"Button primary")).first.click()
wait_for_app_run(app)
expect(
app.get_by_text("You pressed the primary button", exact=True)
).to_be_visible()
app.get_by_role("button", name=re.compile(r"Button tertiary")).first.click()
wait_for_app_run(app)
expect(
app.get_by_text("You pressed the tertiary button", exact=True)
).to_be_visible()
# Expander should hide content until opened.
expect(app.get_by_text("Expander content", exact=True)).to_be_hidden()
app.get_by_text("Expander", exact=True).click()
expect(app.get_by_text("Expander content", exact=True)).to_be_visible()
expect(app.get_by_text("Sidebar expander content", exact=True)).to_be_hidden()
app.get_by_text("Sidebar expander", exact=True).click()
expect(app.get_by_text("Sidebar expander content", exact=True)).to_be_visible()
# Popover content should only appear when opened.
expect(app.get_by_test_id("stPopoverBody")).to_have_count(0)
popover_container = open_popover(app, "Popover")
expect(popover_container).to_contain_text("Popover content")
# Tab content should switch when selecting tabs.
expect(app.get_by_text("Tab A content", exact=True)).to_be_visible()
app.get_by_role("tab", name="Tab B", exact=True).click()
expect(app.get_by_text("Tab B content", exact=True)).to_be_visible()
click_button(app, "Open dialog item 1")
expect(app.get_by_test_id("stDialog")).to_have_count(1)
dialog_reason = get_text_input(app, "Dialog reason")
dialog_reason.locator("input").first.fill("because")
click_button(app, "Submit dialog")
expect(
app.get_by_text("Dialog result item=1 reason=because", exact=True)
).to_be_visible()
expect(app.get_by_test_id("stDialog")).to_have_count(0)
select_selectbox_option(app, "Navigation position", "hidden")
expect(app.get_by_text("Home page", exact=True)).to_be_visible()
expect(app.get_by_test_id("stSidebarNav")).not_to_be_visible()
expect(app.get_by_test_id("stTopNavLink")).to_have_count(0)
select_selectbox_option(app, "Navigation position", "sidebar")
expect(app.get_by_test_id("stSidebarNav")).to_be_visible()
app.get_by_test_id("stSidebarNavLink").filter(has_text="About").first.click()
wait_for_app_run(app)
expect(app.get_by_text("About page", exact=True)).to_be_visible()
app.get_by_test_id("stSidebarNavLink").filter(has_text="Contact").first.click()
wait_for_app_run(app)
expect(app.get_by_text("Contact page", exact=True)).to_be_visible()
click_toggle(app, "Many pages")
expect(
app.get_by_test_id("stSidebarNavLink").filter(has_text="About")
).to_have_count(0)
# Flatten sections so Logs is always directly clickable.
click_toggle(app, "Navigation sections")
app.get_by_test_id("stSidebarNavLink").filter(
has_text="Data visualizations"
).first.click()
wait_for_app_run(app)
expect(app.get_by_text("Data visualizations page", exact=True)).to_be_visible()
select_selectbox_option(app, "Navigation position", "top")
expect(app.get_by_test_id("stSidebarNav")).not_to_be_visible()
top_nav_items = app.locator(
'[data-testid="stTopNavLink"], [data-testid="stTopNavSection"]'
)
expect(top_nav_items.first).to_be_visible()
expect(app.get_by_test_id("stChatInput")).to_have_count(1)
click_toggle(app, "Show chat input at bottom")
wait_for_app_run(app)
expect(app.get_by_test_id("stChatInput")).to_have_count(1)
expect(app.get_by_text("Text after stop", exact=True)).to_be_visible()
click_button(app, "Run st.stop")
expect(app.get_by_text("Text before stop", exact=True)).to_be_visible()
# Negative: content after st.stop should not be rendered after button click.
expect(app.get_by_text("Text after stop", exact=True)).to_have_count(0)
click_toggle(app, "Disable widgets")
expect(get_text_input(app, "Textbox").locator("input").first).to_be_disabled()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/mega_tester_app_test.py",
"license": "Apache License 2.0",
"lines": 395,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_fuzz_sdma_warm_start.py | import subprocess, sys, os, random
CHILD_SCRIPT = """
import os, random
import numpy as np
from tinygrad import Tensor, Device
from tinygrad.runtime.ops_amd import AMDDevice
dev = Device["AMD"]
for i in range({N}):
sz = random.randint(1, {MAX_SZ})
data = np.random.randint(0, 256, sz, dtype=np.uint8)
t = Tensor(data, device="AMD").contiguous().realize()
dev.synchronize()
result = t.numpy()
assert (result == data).all(), f"Data mismatch at iter {{i}}"
""".strip()
def run_child(n_ops, max_sz, timeout):
env = os.environ.copy()
env.setdefault("SDMA_RING_SIZE", "4096")
script = CHILD_SCRIPT.format(N=n_ops, MAX_SZ=max_sz)
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
try:
_, stderr = p.communicate(timeout=timeout)
return ("ok" if p.returncode == 0 else "fail"), stderr.decode(errors='replace')
except subprocess.TimeoutExpired:
p.kill()
p.communicate()
return "timeout", "TIMEOUT: SDMA ring likely stuck"
if __name__ == "__main__":
n_iters = int(os.environ.get("FUZZ_ITERS", "10000"))
timeout = int(os.environ.get("FUZZ_TIMEOUT", "10"))
max_sz = int(os.environ.get("FUZZ_MAX_SZ", "65536"))
timeouts = 0
failures = 0
for i in range(n_iters):
# Run child with many ops to stress the small sdma ring buffer across warm starts
n_ops = random.randint(20, 100)
status, stderr = run_child(n_ops=n_ops, max_sz=max_sz, timeout=timeout)
if status == "timeout":
timeouts += 1
print(f"\tstderr: {stderr[:500]}")
elif status == "fail":
failures += 1
print(f"\tstderr: {stderr[:500]}")
else:
print(f"iter {i}: ok (n_ops={n_ops})")
print(f"\n=== Results: {n_iters} iterations, {timeouts} timeouts, {failures} failures ===")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_fuzz_sdma_warm_start.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:examples/openpilot/load_pickle.py | import sys, pickle
from extra.bench_log import WallTimeEvent, BenchEvent
from tinygrad.helpers import getenv
PKL = sys.argv[1] if len(sys.argv) > 1 else "/tmp/openpilot.pkl"
load_times = []
for _ in range(10):
with WallTimeEvent(BenchEvent.STEP) as wte: pickle.load(open(PKL, 'rb'))
load_times.append(wte.time)
print(f"pickle load: {wte.time:6.2f} s")
if (assert_time:=getenv("ASSERT_MIN_LOAD_TIME")):
min_time = min(load_times)
assert min_time < assert_time, f"Speed regression, expected min load time of < {assert_time} s but took: {min_time} s"
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "examples/openpilot/load_pickle.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/external/external_fuzz_beam_timeout_recovery.py | #!/usr/bin/env python3
"""
Stress test for beam timeout + device recovery on AM devices.
Usage:
AMD=1 python test/external/external_test_beam_timeout_recovery.py
"""
from tinygrad import Tensor, Device
from tinygrad.helpers import Context
from tinygrad.runtime.ops_amd import AMDDevice
if __name__ == "__main__":
dev = Device["AMD"]
assert isinstance(dev, AMDDevice) and dev.is_am(), "not am"
N = 10000
for i in range(N):
with Context(DEBUG=0, BEAM=0):
a = Tensor.rand(4096, 4096, device="AMD").contiguous().realize()
b = Tensor.rand(4096, 4096, device="AMD").contiguous().realize()
c = a.matmul(b)
c.realize()
try: dev.synchronize(timeout=1)
except RuntimeError as e: print(e)
with Context(DEBUG=0, BEAM=0):
a = Tensor.ones(512, 512, device="AMD").contiguous().realize()
b = Tensor.ones(512, 512, device="AMD").contiguous().realize()
result = a.matmul(b).realize()[0, 0].item()
assert result == 512.0, f"iter {i}: got {result}"
print(f" iter {i+1}/{N}: ok")
print(f"=== All {N} iterations passed ===")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_fuzz_beam_timeout_recovery.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/sqtt/examples/discover_ops.py | #!/usr/bin/env python3
# Run all ALU and memory instructions in the ISA
import functools, inspect
from enum import Enum
from tinygrad import Tensor, Device, dtypes
from tinygrad.uop.ops import UOp, Ops, KernelInfo, AddrSpace
from tinygrad.renderer.amd.dsl import Inst, Reg, OPERANDS, SrcField, VGPRField, SGPRField, SSrcField, SBaseField, AlignedSGPRField, BitField
from tinygrad.renderer.amd.dsl import FixedBitField, EnumBitField, s, v, NULL, VCC_LO
from extra.gemm.amd_asm_matmul import Kernel
# skip instructions that mutate wave state (PC, EXEC, allocations, signals)
SKIP = {"S_SETPC_B64", "S_SWAPPC_B64", "S_RFE_B64", "S_BARRIER_SIGNAL_ISFIRST", "S_GET_BARRIER_STATE", "S_ALLOC_VGPR", "S_SLEEP_VAR", "S_GETPC_B64",
"S_SENDMSG_RTN_B32", "S_SENDMSG_RTN_B64"}
# skip barriers, s_waits, wrap level atomics, and ray tracing (bvh)
SKIP_SUBSTR = ["SAVEEXEC", "CMPX", "WREXEC", "MOVREL", "ATOMIC", "S_BUFFER_", "S_ATC_PROBE", "BARRIER", "S_WAITCNT", "BVH",
"DS_CMPSTORE_RTN", "DS_WRAP_RTN_B32", "DS_ORDERED_COUNT", "DS_GWS", "GS_REG", "GLOBAL_LOAD_LDS", "GLOBAL_STORE_BLOCK"]
ALU_FORMATS = {"VOP1", "VOP1_LIT", "VOP1_SDST", "VOP2", "VOP2_LIT", "VOP3", "VOP3_SDST", "VOP3SD", "VOP3P", "VOP3P_MFMA", "VOP3PX2",
"VOPC", "SOP1", "SOP1_LIT", "SOP2", "SOP2_LIT", "SOPC", "SOPC_LIT", "SOPK", "SOPK_LIT", "VINTERP"}
# intentionally not testing scratch memory ops
MEM_FORMATS = {"VGLOBAL", "GLOBAL", "SMEM", "DS"}
def should_skip(op:Enum) -> bool: return (name:=op.name) in SKIP or any(sub in name for sub in SKIP_SUBSTR)
# ** named register assignments
# ALU operands
ALU_VGPR_STRIDE = 16 # v[0], v[16], v[32], ... per ALU operand slot
ALU_SGPR_STRIDE = 4 # s[0], s[4], s[8], ... per ALU operand slot
# memory address registers
S_KERNARG_PTR = (0, 1)
S_BUF_PTR = (2, 3)
V_VADDR = (0, 1)
V_DS_ADDR = 0
# memory data registers
MEM_VGPR_BASE = 32 # v[32], v[48], ... for vdst/vdata/vsrc
MEM_VGPR_STRIDE = 16 # spacing between memory data vgpr slots
MEM_SGPR_BASE = 8 # s[8], s[10], ... for SMEM sdata
MEM_SGPR_STRIDE = 2 # spacing between memory data sgpr slots
# ** create an ALU instruction based on the operands
def create_alu_inst(op:Enum, builder:functools.partial[Inst]) -> Inst:
inst_cls, operands, slot = builder.func, OPERANDS[op], 0
kwargs:dict[str, Reg|int] = {}
for name, field in inst_cls._fields:
if isinstance(field, (FixedBitField, EnumBitField)): continue
nregs = max(1, operands[name][1] // 32) if name in operands else 1
is_sreg = name in operands and "SREG" in str(operands[name][2])
base_v, base_s = slot * ALU_VGPR_STRIDE, slot * ALU_SGPR_STRIDE
if name == "sdst" and isinstance(field, SGPRField): reg = VCC_LO
elif is_sreg and not isinstance(field, VGPRField): reg = VCC_LO
elif isinstance(field, VGPRField): reg = v[base_v:base_v+nregs-1] if nregs > 1 else v[base_v]
elif isinstance(field, SSrcField): reg = VCC_LO if nregs <= 2 else s[base_s:base_s+nregs-1] if nregs > 1 else s[base_s]
elif isinstance(field, SGPRField): reg = s[base_s:base_s+nregs-1] if nregs > 1 else s[base_s]
elif isinstance(field, SrcField): reg = v[base_v:base_v+nregs-1] if nregs > 1 else v[base_v]
else: reg = None
if reg is not None: kwargs[name] = reg; slot += 1
elif isinstance(field, BitField): kwargs[name] = field.default
return builder(**kwargs)
# ** create a memory instruction with pre set address registers
MEM_PRESET_REGS:dict[str, dict[str, Reg]] = {
"VGLOBAL":{"saddr":s[S_BUF_PTR[0]:S_BUF_PTR[1]], "vaddr":v[V_VADDR[0]:V_VADDR[1]]},
"GLOBAL":{"saddr":s[S_BUF_PTR[0]:S_BUF_PTR[1]], "addr":v[V_DS_ADDR]}, # addr is 32-bit offset when saddr is valid SGPR
"DS":{"addr":v[V_DS_ADDR]},
"SMEM":{"sbase":s[S_KERNARG_PTR[0]:S_KERNARG_PTR[1]], "soffset":NULL},
}
def create_mem_inst(op:Enum, builder:functools.partial[Inst]) -> Inst:
inst_cls, operands, field_map = builder.func, OPERANDS.get(op, {}), MEM_PRESET_REGS.get(builder.func.__name__, {})
kwargs:dict[str, Reg|int] = {}
vslot, sslot = 0, 0
for name, field in inst_cls._fields:
if isinstance(field, (FixedBitField, EnumBitField)): continue
if name in field_map:
kwargs[name] = field_map[name]
continue
nregs = max(1, operands[name][1] // 32) if name in operands else 1
if isinstance(field, VGPRField):
vi = MEM_VGPR_BASE + vslot * MEM_VGPR_STRIDE
kwargs[name] = v[vi:vi+nregs-1] if nregs > 1 else v[vi]
vslot += 1
elif isinstance(field, (SGPRField, AlignedSGPRField, SBaseField)):
si = MEM_SGPR_BASE + sslot * MEM_SGPR_STRIDE
kwargs[name] = s[si:si+nregs-1] if nregs > 1 else s[si]
sslot += 1
elif isinstance(field, BitField): kwargs[name] = field.default
return builder(**kwargs)
# ** collect all memory and ALU instructions from the ISA autogen
def collect_instructions() -> tuple[list[Inst], list[Inst], list[str]]:
op_map:dict[Enum, functools.partial[Inst]] = {}
for name, obj in inspect.getmembers(all_insts):
if isinstance(obj, functools.partial) and len(obj.args) == 1: op_map[obj.args[0]] = obj
alu_insts:list[Inst] = []
mem_insts:list[Inst] = []
skipped:list[str] = []
for op_enum, builder in op_map.items():
if should_skip(op_enum) or op_enum not in OPERANDS: skipped.append(op_enum.name); continue
fmt = builder.func.__name__
if fmt in ALU_FORMATS: alu_insts.append(create_alu_inst(op_enum, builder))
elif fmt in MEM_FORMATS: mem_insts.append(create_mem_inst(op_enum, builder))
return alu_insts, mem_insts, skipped
def exec_insts(insts:list):
k = Kernel(arch)
# ** prologue for global memory
k.emit(s_load_b64(sdata=s[S_BUF_PTR[0]:S_BUF_PTR[1]], sbase=s[S_KERNARG_PTR[0]:S_KERNARG_PTR[1]], soffset=NULL))
k.waitcnt(lgkm=0)
k.emit(v_mov_b32_e32(v[V_VADDR[0]], 0))
k.emit(v_mov_b32_e32(v[V_VADDR[1]], 0))
# ** emit
for inst in insts: k.emit(inst)
k.emit(s_endpgm())
# ** run
NUM_THREADS, NUM_GRIDS, BUF_SIZE = 32, 1, 1024*1024
def fxn(A:UOp, B:UOp, C:UOp) -> UOp:
lidx, gidx = UOp.special(NUM_THREADS, "lidx0"), UOp.special(NUM_GRIDS, "gidx0")
lds = UOp(Ops.DEFINE_LOCAL, dtypes.uint8.ptr(size=BUF_SIZE, addrspace=AddrSpace.LOCAL), (), "lds")
sink = UOp.sink(A.base, B.base, C.base, lds, lidx, gidx, arg=KernelInfo(name="discover_ops"))
return UOp(Ops.PROGRAM, src=(sink, UOp(Ops.DEVICE, arg="AMD"), UOp(Ops.LINEAR, src=tuple(UOp(Ops.INS, arg=x) for x in k.finalize()))))
A = Tensor.empty(BUF_SIZE, dtype=dtypes.uint8)
B = Tensor.empty(1, dtype=dtypes.uint8)
C = Tensor.empty(1, dtype=dtypes.uint8)
Tensor.custom_kernel(A, B, C, fxn=fxn)[0].realize()
if __name__ == "__main__":
import sys
arch = Device[Device.DEFAULT].renderer.arch
if arch.startswith("gfx12"):
from tinygrad.runtime.autogen.amd.rdna4.ins import *
import tinygrad.runtime.autogen.amd.rdna4.ins as all_insts
elif arch.startswith("gfx11"):
from tinygrad.runtime.autogen.amd.rdna3.ins import *
import tinygrad.runtime.autogen.amd.rdna3.ins as all_insts
# these don"t exist in RDNA3, only RDNA3.5 and above
SKIP.update(["S_FMAAK_F32", "S_FMAMK_F32"])
else:
print(f"{arch} not supported yet")
sys.exit(0)
alu_insts, mem_insts, skipped = collect_instructions()
print(f"collected {len(alu_insts)} ALU + {len(mem_insts)} memory instructions ({len(skipped)} skipped)")
exec_insts(mem_insts+alu_insts)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/sqtt/examples/discover_ops.py",
"license": "MIT License",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/unit/test_function.py | import numpy as np
import unittest
from tinygrad.function import function
from tinygrad import Tensor
from tinygrad.uop.ops import UOp
class TestFunction(unittest.TestCase):
def test_simple(self):
@function
def f(a:Tensor, b:Tensor) -> Tensor: return a+b
a = Tensor([1,2,3])
b = Tensor([4,5,6])
np.testing.assert_equal(f(a,b).numpy(), [5,7,9])
def test_simple_same(self):
@function
def f(a:Tensor, b:Tensor) -> Tensor: return a+b
a = Tensor([1,2,3])
np.testing.assert_equal(f(a,a).numpy(), [2,4,6])
def test_implicit(self):
inp = Tensor([7,8,9])
@function
def f(a:Tensor, b:Tensor) -> Tensor: return a+b+inp
a = Tensor([1,2,3])
b = Tensor([4,5,6])
np.testing.assert_equal(f(a,b).numpy(), [12,15,18])
def test_implicit_same_as_input(self):
inp = Tensor([7,8,9])
@function
def f(a:Tensor, b:Tensor) -> Tensor: return a+b+inp
a = Tensor([1,2,3])
np.testing.assert_equal(f(a, inp).numpy(), [15,18,21])
def test_implicit_2(self):
inp = Tensor([7,8,9])
@function
def f(a:Tensor, b:Tensor) -> Tensor:
return a+b+inp
inp2 = Tensor([7,8,10])
@function
def g(a:Tensor, b:Tensor) -> Tensor:
return a+b+inp2
a = Tensor([1,2,3])
b = Tensor([4,5,6])
c = f(a,b)
d = g(a,b)
c.realize(d)
np.testing.assert_equal(c.numpy(), [12,15,18])
np.testing.assert_equal(d.numpy(), [12,15,19])
def test_implicit_unrealized(self):
inp = Tensor([1,2,3]) + Tensor([4,5,6])
@function
def f(a:Tensor) -> Tensor: return a + inp
np.testing.assert_equal(f(Tensor([10,20,30])).numpy(), [15,27,39])
def test_detach(self):
@function
def f(a:Tensor, b:Tensor) -> Tensor: return a.detach() + b
a = Tensor([1,2,3])
b = Tensor([4,5,6])
np.testing.assert_equal(f(a, b).numpy(), [5,7,9])
def test_contiguous_backward(self):
@function
def f(a:Tensor, b:Tensor) -> Tensor: return (a + b).contiguous_backward()
a = Tensor([1,2,3])
b = Tensor([4,5,6])
np.testing.assert_equal(f(a, b).numpy(), [5,7,9])
def test_method(self):
class Foo:
def __init__(self): self.w = Tensor([10,20,30])
@function
def __call__(self, x:Tensor) -> Tensor: return x + self.w
foo = Foo()
np.testing.assert_equal(foo(Tensor([1,2,3])).numpy(), [11,22,33])
def test_grad_gemm(self):
@function
def f(a:Tensor, b:Tensor) -> Tensor: return a @ b
a = Tensor([[1.,2.],[3.,4.]], requires_grad=True)
b = Tensor([[5.,6.],[7.,8.]], requires_grad=True)
(f(a, b).contiguous() * b).sum().backward()
Tensor.realize(a, b, a.grad, b.grad)
# L = sum((a@b) * b), dL/d(a@b) = b, dL/da = b @ b^T, dL/db = a^T @ b + (a@b)
na, nb = a.numpy(), b.numpy()
np.testing.assert_allclose(a.grad.numpy(), nb @ nb.T)
np.testing.assert_allclose(b.grad.numpy(), na.T @ nb + na @ nb)
def test_grad_implicit(self):
w = Tensor([1., 2., 3.], requires_grad=True)
w.realize() # TODO: this is required
@function
def f(x:Tensor) -> Tensor: return x * w
x = Tensor([4., 5., 6.])
f(x).sum().backward()
np.testing.assert_allclose(w.grad.numpy(), [4., 5., 6.])
def test_symbolic_index(self):
table = Tensor([10,20,30,40]).contiguous().realize()
@function
def f(x:Tensor, start_pos:int|UOp) -> Tensor:
return x + table[start_pos]
v = UOp.variable("start_pos", 0, 3)
np.testing.assert_equal(f(Tensor([1,2,3]), v.bind(0)).numpy(), [11,12,13])
def test_symbolic_shape_input(self):
table = Tensor([10,20,30,40]).contiguous().realize()
@function
def f(x:Tensor) -> Tensor: return x * 2
sz = UOp.variable("sz", 1, 3)
slic = table[:sz.bind(2)]
np.testing.assert_equal(f(slic)[:2].numpy(), [20,40])
def test_nested_calls(self):
w = Tensor([10., 20., 30.])
@function
def f(a:Tensor) -> Tensor: return a + w
@function
def g(a:Tensor) -> Tensor: return a * w
a = Tensor([1., 2., 3.])
np.testing.assert_allclose(g(f(a)).numpy(), [110., 440., 990.])
def test_nested_calls_backward(self):
w = Tensor([[1., 2.], [3., 4.]]).contiguous().realize()
@function
def inner(x:Tensor) -> Tensor: return x + w
@function
def outer(a:Tensor, b:Tensor) -> Tensor: return inner(a.reshape(1,2) + b.reshape(1,2))
a = Tensor([1., 2.], requires_grad=True)
b = Tensor([3., 4.], requires_grad=True)
outer(a, b).sum().backward()
np.testing.assert_allclose(a.grad.numpy(), [2., 2.])
np.testing.assert_allclose(b.grad.numpy(), [2., 2.])
def test_unused_param_backward(self):
@function
def f(a:Tensor, b:Tensor, c:Tensor) -> Tensor: return a + c # b is unused
a = Tensor([1., 2., 3.], requires_grad=True)
b = Tensor([4., 5., 6.], requires_grad=True)
c = Tensor([7., 8., 9.], requires_grad=True)
f(a, b, c).sum().backward()
np.testing.assert_allclose(a.grad.numpy(), [1., 1., 1.])
np.testing.assert_allclose(b.grad.numpy(), [0., 0., 0.])
np.testing.assert_allclose(c.grad.numpy(), [1., 1., 1.])
def test_name(self):
@function
def f(a:Tensor) -> Tensor: return a + 1
assert f(Tensor([1])).uop.arg.name.endswith("f")
def test_method_name(self):
class Foo:
@function
def __call__(self, x:Tensor) -> Tensor: return x + 1
assert Foo()(Tensor([1])).uop.arg.name.endswith("Foo.__call__")
def test_callable_instance(self):
class Foo:
def __init__(self): self.w = Tensor([10,20,30])
def __call__(self, x:Tensor) -> Tensor: return x + self.w
foo = Foo()
f = function(foo)
np.testing.assert_equal(f(Tensor([1,2,3])).numpy(), [11,22,33])
assert f(Tensor([1,2,3])).uop.arg.name.endswith("Foo")
def test_iadd(self):
@function
def f(x:Tensor) -> Tensor:
x += 1
return x
a = Tensor([1,2,3]).realize()
np.testing.assert_equal(f(a).numpy(), [2,3,4])
np.testing.assert_equal(a.numpy(), [3,4,5]) # TODO: should be [1,2,3]
def test_implicit_assign(self):
a = Tensor([1,2,3])
a += 1
c = Tensor([2,2,2]).contiguous()
@function
def f(b:Tensor) -> Tensor: return a+b+c
b = Tensor([10,20,30]).realize()
np.testing.assert_equal(f(b).numpy(), [14,25,36])
def test_assign_input(self):
@function
def f(a:Tensor, b:Tensor) -> Tensor:
a.assign(b+1)
return a
a = Tensor([1,2,3]).realize()
b = Tensor([10,20,30]).realize()
np.testing.assert_equal(f(a,b).numpy(), [11,21,31])
np.testing.assert_equal(a.numpy(), [11,21,31]) # TODO: should be [1,2,3]
np.testing.assert_equal(b.numpy(), [10,20,30])
@unittest.expectedFailure
def test_assign_slice(self):
@function
def f(a:Tensor, b:Tensor) -> Tensor:
a[1:] = b[1:]+1
return a
a = Tensor([1,2,3]).realize()
b = Tensor([10,20,30]).realize()
np.testing.assert_equal(f(a,b).numpy(), [1,21,31])
np.testing.assert_equal(a.numpy(), [1,2,3])
np.testing.assert_equal(b.numpy(), [10,20,30])
class TestFunctionMulti(unittest.TestCase):
devices_2 = ("CPU:0", "CPU:1")
def test_simple_multi(self):
@function
def f(a:Tensor, b:Tensor) -> Tensor: return a+b
a = Tensor([1,2,3,4]).shard(self.devices_2, axis=None)
b = Tensor([10,20,30,40]).shard(self.devices_2, axis=None)
np.testing.assert_equal(f(a,b).numpy(), [11,22,33,44])
def test_simple_multi_sharded(self):
@function
def f(a:Tensor, b:Tensor) -> Tensor: return a+b
a = Tensor([1,2,3,4]).shard(self.devices_2, axis=0)
b = Tensor([10,20,30,40]).shard(self.devices_2, axis=0)
np.testing.assert_equal(f(a,b).numpy(), [11,22,33,44])
def test_data_parallel_multi(self):
@function
def f(x:Tensor, w:Tensor) -> Tensor: return x @ w
x = Tensor([[1.,2.],[3.,4.],[5.,6.],[7.,8.]]).shard(self.devices_2, axis=0)
w = Tensor([[1.,0.],[0.,1.]]).shard(self.devices_2, axis=None)
np.testing.assert_allclose(f(x, w).numpy(), [[1.,2.],[3.,4.],[5.,6.],[7.,8.]])
def test_grad_implicit_multi(self):
w = Tensor([1., 2., 3., 4.], requires_grad=True).shard(self.devices_2, axis=None)
w.realize()
@function
def f(x:Tensor) -> Tensor: return x * w
x = Tensor([4., 5., 6., 7.]).shard(self.devices_2, axis=None)
f(x).sum().backward()
np.testing.assert_allclose(w.grad.numpy(), [4., 5., 6., 7.])
def test_call_axis(self):
@function
def f(x:Tensor, w:Tensor) -> Tensor: return x @ w
x = Tensor([[1.,0.],[0.,1.],[1.,1.],[0.,0.]]).shard(self.devices_2, axis=0)
w = Tensor([[1.,2.],[3.,4.]]).shard(self.devices_2, axis=None)
result = f(x, w)
# CALL output should inherit axis=0 from the sharded input
self.assertEqual(result.uop.axis, 0)
# reduce on the sharded axis should remove it
self.assertIsNone(result.sum().uop.axis)
def test_call_axis_shard_inside(self):
@function
def f(x:Tensor, w:Tensor) -> Tensor:
return x.shard(self.devices_2, axis=0) @ w.shard(self.devices_2, axis=None)
x = Tensor([[1.,0.],[0.,1.],[1.,1.],[0.,0.]])
w = Tensor([[1.,2.],[3.,4.]])
result = f(x, w)
self.assertEqual(result.uop.axis, 0)
np.testing.assert_allclose(result.numpy(), x.numpy() @ w.numpy())
def test_data_parallel_backward(self):
@function
def f(x:Tensor, w:Tensor) -> Tensor: return x @ w
x = Tensor([[1.,0.],[0.,1.],[1.,1.],[0.,0.]], requires_grad=True).shard(self.devices_2, axis=0)
w = Tensor([[1.,2.],[3.,4.]], requires_grad=True).shard(self.devices_2, axis=None)
w.realize()
f(x, w).sum().backward()
# d/dx = ones @ w^T = [[1,3],[1,3],[1,3],[1,3]], but sum so ones(4,2) @ w^T? no:
# L = sum(x @ w), dL/dx = ones(4,2) @ w^T... actually dL/d(xw) = ones(4,2), dL/dx = ones(4,2) @ w^T
np.testing.assert_allclose(x.grad.numpy(), np.ones((4,2)) @ np.array([[1,3],[2,4]]))
def test_data_parallel_backward_4(self):
devices_4 = tuple(f"CPU:{i}" for i in range(4))
@function
def f(x:Tensor, w:Tensor) -> Tensor: return x @ w
x = Tensor(np.arange(16).reshape(8,2).astype(np.float32), requires_grad=True).shard(devices_4, axis=0)
w = Tensor([[1.,2.],[3.,4.]], requires_grad=True).shard(devices_4, axis=None)
w.realize()
f(x, w).sum().backward()
np.testing.assert_allclose(x.grad.numpy(), np.ones((8,2)) @ np.array([[1,3],[2,4]]))
def test_data_parallel_backward_implicit(self):
devices_4 = tuple(f"CPU:{i}" for i in range(4))
w = Tensor([[1.,2.],[3.,4.]], requires_grad=True).shard(devices_4, axis=None)
w.realize()
@function
def f(x:Tensor) -> Tensor: return x @ w
x = Tensor(np.arange(16).reshape(8,2).astype(np.float32), requires_grad=True).shard(devices_4, axis=0)
f(x).sum().backward()
np.testing.assert_allclose(x.grad.numpy(), np.ones((8,2)) @ np.array([[1,3],[2,4]]))
def test_data_parallel_backward_twice(self):
devices_4 = tuple(f"CPU:{i}" for i in range(4))
w = Tensor([[1.,2.],[3.,4.]], requires_grad=True).shard(devices_4, axis=None)
w.realize()
# pre-init grads like the training loop does
w.grad = w.zeros_like().contiguous().realize()
@function
def f(x:Tensor) -> Tensor: return x @ w
expected = np.ones((8,2)) @ np.array([[1,3],[2,4]])
for _ in range(2):
x = Tensor(np.arange(16).reshape(8,2).astype(np.float32), requires_grad=True).shard(devices_4, axis=0)
f(x).sum().backward()
np.testing.assert_allclose(x.grad.numpy(), expected)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_function.py",
"license": "MIT License",
"lines": 276,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/unit/test_callify.py | import unittest
from tinygrad import Tensor, dtypes
class TestCallify(unittest.TestCase):
def test_basic(self):
a = Tensor([1.,2,3])
b = Tensor([4.,5,6])
out = a + b
out.callify()
self.assertListEqual(out.tolist(), [5.0, 7.0, 9.0])
def test_const(self):
out = Tensor(2.0) + Tensor(3.0)
out.callify()
self.assertEqual(out.item(), 5.0)
def test_sum(self):
out = Tensor.ones(16).contiguous().sum()
out.callify()
self.assertEqual(out.item(), 16.0)
def test_multi_output(self):
a = Tensor([1.,2,3])
b = Tensor([4.,5,6])
c = a + b
d = a * b
c.callify(d)
self.assertListEqual(c.tolist(), [5.0, 7.0, 9.0])
self.assertListEqual(d.tolist(), [4.0, 10.0, 18.0])
def test_two_callify_independent(self):
a = Tensor([1.,2,3])
b = Tensor([4.,5,6])
c = a + b
c.callify()
d = Tensor([10.,20,30])
e = Tensor([1.,1,1])
f = d - e
f.callify()
self.assertListEqual(c.tolist(), [5.0, 7.0, 9.0])
self.assertListEqual(f.tolist(), [9.0, 19.0, 29.0])
def test_two_callify_shared_input(self):
a = Tensor([1.,2,3]).contiguous().realize()
b = a + 1
b.callify()
c = a * 2
c.callify()
self.assertListEqual(b.tolist(), [2.0, 3.0, 4.0])
self.assertListEqual(c.tolist(), [2.0, 4.0, 6.0])
def test_chained_callify(self):
a = Tensor([1.,2,3])
b = a + 1
b.callify()
b.realize()
c = b + 1
c.callify()
self.assertListEqual(c.tolist(), [3.0, 4.0, 5.0])
def test_gemm(self):
a = Tensor.ones(8, 8).contiguous()
b = Tensor.eye(8).contiguous()
out = a @ b
out.callify()
lst = out.tolist()
for y in range(8):
for x in range(8):
self.assertEqual(lst[y][x], 1.0)
def test_int_dtype(self):
a = Tensor([1,2,3], dtype=dtypes.int)
b = Tensor([4,5,6], dtype=dtypes.int)
out = a + b
out.callify()
self.assertListEqual(out.tolist(), [5, 7, 9])
def test_reduce(self):
out = Tensor([1.,2,3,4]).sum()
out.callify()
self.assertEqual(out.item(), 10.0)
def test_multiple_ops(self):
a = Tensor([1.,2,3])
b = Tensor([4.,5,6])
out = (a + b) * (a - b)
out.callify()
self.assertListEqual(out.tolist(), [-15.0, -21.0, -27.0])
def test_double_callify(self):
a = Tensor([1.,2,3])
b = Tensor([4.,5,6])
out = a + b
out.callify()
out.callify()
self.assertListEqual(out.tolist(), [5.0, 7.0, 9.0])
def test_double_callify_multi_output(self):
a = Tensor([1.,2,3])
b = Tensor([4.,5,6])
c = a + b
d = a * b
c.callify(d)
c.callify(d)
self.assertListEqual(c.tolist(), [5.0, 7.0, 9.0])
self.assertListEqual(d.tolist(), [4.0, 10.0, 18.0])
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_callify.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/unit/test_tinyfs.py | import json, math, os, socketserver, threading, unittest
import numpy as np
from tinygrad import Tensor, dtypes
from extra.tinyfs.fetch_file import hash_file, _python_hash_1mb
_chunks: dict[bytes, bytes] = {}
class _Handler(socketserver.StreamRequestHandler):
def handle(self):
while line := self.rfile.readline():
cmd = line.decode().strip()
if cmd == "INFO":
self.wfile.write(json.dumps({"node0": ["node0", f"127.0.0.1:{self.server.server_address[1]}"]}).encode() + b"\r\n")
elif cmd.startswith("STORE_IN"):
data = self.rfile.read(int(cmd.split()[1]))
hashes = bytearray()
for i in range(math.ceil(len(data) / Tensor.CHUNK_SIZE)):
chunk = data[i*Tensor.CHUNK_SIZE:(i+1)*Tensor.CHUNK_SIZE].ljust(Tensor.CHUNK_SIZE, b'\0')
h = _python_hash_1mb(chunk)
_chunks[h] = chunk
hashes.extend(h)
self.wfile.write(hashes)
elif cmd.startswith("LOAD_IN"):
hashes = self.rfile.read(int(cmd.split()[1]))
self.wfile.write(json.dumps(["node0"] * (len(hashes) // 16)).encode() + b"\r\n")
elif cmd.startswith("CHUNK_OUT"):
size = int(cmd.split()[1])
self.wfile.write(_chunks.get(self.rfile.read(16), bytes(size))[:size])
self.wfile.flush()
# regressed in 55d3a5def "preallocate all realized buffers"
class TestTinyFS(unittest.TestCase):
@classmethod
def setUpClass(cls):
_chunks.clear()
cls._server = socketserver.ThreadingTCPServer(('127.0.0.1', 0), _Handler)
cls._server.daemon_threads = True
threading.Thread(target=cls._server.serve_forever, daemon=True).start()
os.environ["TINYFS_ENDPOINT"] = f"127.0.0.1:{cls._server.server_address[1]}"
@classmethod
def tearDownClass(cls):
_chunks.clear()
os.environ.pop("TINYFS_ENDPOINT", None)
cls._server.shutdown()
cls._server.server_close()
def test_store(self):
h = Tensor([1.0, 2.0, 3.0, 4.0]).fs_store().realize()
self.assertEqual(h.shape, (16,))
self.assertEqual(h.dtype, dtypes.uint8)
def test_store_deterministic(self):
a = Tensor([1.0, 2.0, 3.0, 4.0]).fs_store().realize()
b = Tensor([1.0, 2.0, 3.0, 4.0]).fs_store().realize()
np.testing.assert_array_equal(a.numpy(), b.numpy())
def test_store_different_data(self):
a = Tensor([1.0, 2.0, 3.0, 4.0]).fs_store().realize()
b = Tensor([5.0, 6.0, 7.0, 8.0]).fs_store().realize()
self.assertNotEqual(a.tolist(), b.tolist())
def test_roundtrip_uint8(self):
arr = np.arange(256, dtype=np.uint8)
loaded = Tensor(arr).fs_store().realize().fs_load(len(arr)).to("CPU")
np.testing.assert_array_equal(loaded.numpy(), arr)
def test_roundtrip_multichunk_uint8(self):
arr = np.random.default_rng(42).integers(0, 256, size=Tensor.CHUNK_SIZE + 1024, dtype=np.uint8)
loaded = Tensor(arr).fs_store().realize().fs_load(len(arr)).to("CPU")
np.testing.assert_array_equal(loaded.numpy(), arr)
def test_hash_matches_python_impl(self):
arr = np.arange(256, dtype=np.uint8)
h = Tensor(arr).fs_store().realize()
# the hash from fs_store should match the pure-Python hash_file reference
padded = arr.tobytes().ljust(Tensor.CHUNK_SIZE, b'\0')
self.assertEqual(h.data().tobytes(), hash_file(padded))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_tinyfs.py",
"license": "MIT License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/amd/test_sqtt_profiler.py | import unittest, contextlib
from tinygrad import Device, Tensor, Context, TinyJit
from tinygrad.device import Compiled, ProfileProgramEvent, ProfileDeviceEvent
from tinygrad.viz.serve import load_amd_counters
@contextlib.contextmanager
def save_sqtt():
yield (ret:=[])
Device[Device.DEFAULT].synchronize()
Device[Device.DEFAULT]._at_profile_finalize()
load_amd_counters(ret, Compiled.profile_events)
ret[:] = [r for r in ret if r["name"].startswith("Exec")]
@unittest.skipUnless(Device.DEFAULT == "AMD", "only runs on AMD")
class TestSQTTProfiler(unittest.TestCase):
# TODO: can we enable SQTT profiling in context?
@classmethod
def setUpClass(cls):
if not Device[Device.DEFAULT].sqtt_enabled: raise unittest.SkipTest("device must be in SQTT profiling mode")
def setUp(self):
Device[Device.DEFAULT].synchronize()
Compiled.profile_events[:] = [e for e in Compiled.profile_events if isinstance(e, (ProfileProgramEvent, ProfileDeviceEvent))]
def test_simple(self):
t = Tensor.empty(1) + 1
with save_sqtt() as sqtt:
ei = t.schedule()[0].lower()
ei.run()
self.assertEqual(len(sqtt), 1)
self.assertEqual(sqtt[0]["name"], f"Exec {ei.prg.p.function_name}")
def test_multiple_runs(self):
t = Tensor.empty(1) + 1
with save_sqtt() as sqtt:
ei = t.schedule()[0].lower()
for _ in range(N:=3):
ei.run()
self.assertEqual(len(sqtt), N)
for i in range(1, N):
self.assertEqual(sqtt[i]["name"], f"Exec {ei.prg.p.function_name} n{i+1}")
def test_multiple_kernels(self):
t = ((Tensor.empty(1) + 1).contiguous() + 2)
sched = t.schedule()
with save_sqtt() as sqtt:
for si in sched: si.lower().run()
self.assertEqual(len(sqtt), len(sched))
for i,k in enumerate(sched):
self.assertEqual(sqtt[i]["name"], f"Exec {k.lower().prg.p.function_name}")
def test_multiple_kernels_lower(self):
t = ((Tensor.empty(1) + 1).contiguous() + 2)
sched = t.schedule()
with save_sqtt() as sqtt:
prgs = [si.lower() for si in sched]
for p in prgs: p.run()
self.assertEqual(len(sqtt), len(sched))
for i,ei in enumerate(prgs):
self.assertEqual(sqtt[i]["name"], f"Exec {ei.prg.p.function_name}")
def test_jit(self):
@TinyJit
def f(a): return a + 1
t = Tensor.empty(1)
with save_sqtt() as sqtt:
for _ in range(N:=5):
f(t).realize()
self.assertEqual(len(sqtt), N)
kernel_name = sqtt[0]["name"]
for i,s in enumerate(sqtt[1:], start=1): self.assertEqual(s["name"], f"{kernel_name} n{i+1}")
# TODO: can we trace SQTT for graphed kernels?
def test_jit_graph(self, kernel_count=3*2):
@TinyJit
def f(a): return ((a + 1).contiguous() + 2).contiguous().sum()
t = Tensor.empty(32)
with save_sqtt() as sqtt:
for _ in range(5):
f(t).realize()
names = [s["name"] for s in sqtt]
k0, k1, k2 = names[:3]
for i in range(3, len(sqtt), 3):
n = (i // 3)+1
self.assertEqual(names[i], f"{k0} n{n}")
self.assertEqual(names[i+1], f"{k1} n{n}")
self.assertEqual(names[i+2], f"{k2} n{n}")
self.assertEqual(len(sqtt), kernel_count)
@Context(JIT=2)
def test_jit_multiple_kernels(self): self.test_jit_graph(kernel_count=3*5)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/amd/test_sqtt_profiler.py",
"license": "MIT License",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/engine/allocations.py | from dataclasses import dataclass, field
from tinygrad.uop.ops import UOp, UPat, PatternMatcher, Ops, GroupOp, graph_rewrite, identity_element, track_rewrites
from tinygrad.dtype import dtypes, ImageDType
from tinygrad.helpers import prod, DEBUG, argsort, VIZ, pluralize, FLOAT16
@dataclass
class AllocCtx:
uop_list: list[UOp] = field(default_factory=list)
buffer_map: dict[UOp, UOp] = field(default_factory=dict)
bases: set[UOp] = field(default_factory=set)
assigns: list[UOp] = field(default_factory=list)
replacements: list[UOp] = field(default_factory=list)
def tag_uop(ctx:AllocCtx, x:UOp):
if x.tag is not None: return None
ctx.uop_list.append(x)
return x.replace(tag=(len(ctx.uop_list)-1,))
def disk_copy_is_buffer(ctx:AllocCtx, u:UOp):
# copies to disk are replaced with the disk buffer
to_disk = isinstance(u._device, str) and u._device.startswith(("DISK", "TINYFS"))
if to_disk: ctx.buffer_map[u] = UOp.new_buffer(u.device, u.shard_size, u.dtype).reshape(u.max_shard_shape)
# all copies from disk/numpy are realized into a real buffer
from_creation = isinstance(u.src[0]._device, str) and any(u.src[0]._device.startswith(x) for x in ["NPY", "DISK", "PYTHON", "TINYFS"])
if from_creation: return tag_uop(ctx, u)
def apply_after(ctx:AllocCtx, u:UOp):
base = u.src[0]
while base.op is Ops.AFTER: base = base.src[0]
ctx.buffer_map[u] = base
# CONTIGUOUS and ASSIGN + parents are the only nodes that get updated
add_tags = PatternMatcher([
(UPat(Ops.COPY, name="u"), disk_copy_is_buffer),
# no tag on copies that are assigned
(UPat(Ops.ASSIGN, src=(UPat(), UPat(Ops.COPY, name="c")), name="a"),
lambda a,c: a.replace(src=(a.src[0], c.rtag(())), tag=a.tag+c.tag) if a.tag and c.tag else None),
(UPat(Ops.AFTER, name="u"), apply_after),
(UPat({Ops.CONTIGUOUS, Ops.ASSIGN}, name="x"), tag_uop),
(UPat(GroupOp.All, name="x"), lambda ctx,x: tag_uop(ctx,x) if x in ctx.bases else None),
])
def _buffer_like(u:UOp) -> UOp:
dtype = u.dtype
if isinstance(dtype, ImageDType):
if prod(dtype.shape) != prod(u.max_shard_shape) or ([x for x in u.max_shard_shape if x != 1] or [1])[-1] % 4 != 0:
if DEBUG >= 1: print(f"demoting Image {dtype} with shape {u.max_shard_shape}")
dtype = dtype.base
buffer = UOp.new_buffer(u.device, u.shard_size, dtype).reshape(u.max_shard_shape)
if isinstance(u.device, tuple) and u.axis is not None: buffer = buffer.multi(u.axis)
return buffer
def replace_contig_with_assign(u:UOp):
# if size is 0, remove the contig
if u.size == 0: return u.src[0]
# no real contig for DISK/TINYFS tensors, they are left alone
if isinstance(u._device, str) and u._device.startswith(("DISK", "TINYFS")): return u.rtag(None)
return _buffer_like(u).assign(u.src[0]).rtag(u.tag)
def replace_assign_with_contig(u:UOp):
assigned_to = u
while assigned_to.op in {Ops.ASSIGN, Ops.BITCAST, Ops.AFTER}: assigned_to = assigned_to.src[0].base
if assigned_to.op is not Ops.BUFFER:
return u.src[1].contiguous(tag=u.tag)
def found_contiguous(ctx:dict[UOp, UOp], contig:UOp, src:UOp):
if (x:=src).op is Ops.CAST and x.dtype == dtypes.half and FLOAT16: x, contig = x.src[0], contig.cast(dtypes.float)
while x is not x.base:
if x.op is Ops.PERMUTE: contig = contig.permute(argsort(x.marg))
elif x.op is Ops.RESHAPE: contig = contig.reshape(x.src[0].shape)
else: return None
x = x.src[0]
ctx[x] = contig
def contiguous_mops_to_view(c:UOp):
"""CONTIGUOUS(MOPS(BUFFER)) → CONTIGUOUS(BUFFER_VIEW) when movement ops collapse to a contiguous range."""
src = c.src[0]
buf = src.base
if buf.op not in {Ops.BUFFER, Ops.BUFFER_VIEW}: return None
if src.op is Ops.RESHAPE and src.src[0].op in {Ops.BUFFER, Ops.BUFFER_VIEW}: return None
# no symbolic shape
if not all(isinstance(x, int) for x in c.shape): return None
# check if view is supported
if not isinstance(c.device, str): return None
from tinygrad.device import Device
if not hasattr(Device[c.device].allocator, "_offset"): return None
# see if this can be a view
offset = src.contiguous_view_offset()
if offset is None: return None
# merge BUFFER_VIEWs
if buf.op is Ops.BUFFER_VIEW: offset, buf = offset + buf.arg[1], buf.src[0]
# NOTE: this contiguous is removed because this BUFFER_VIEW/RESHAPE has_buffer_identity
return UOp(Ops.BUFFER_VIEW, src.dtype, (buf,), (src.size, offset)).reshape(src.shape).contiguous(tag=c.tag)
def transform_precompiled_call(c:UOp) -> UOp|None:
if not c.arg.precompile: return None
if c.src[0].op is Ops.SINK: return None
out = _buffer_like(c)
fxn = out.param_like(len(c.src)-1).assign(c.src[0]).sink()
return out.after(c.replace(src=(fxn,)+tuple(x.contiguous() if x.op is not Ops.AFTER else x for x in c.src[1:])+(out,), dtype=dtypes.void, tag=None))
pm_early_transform_tensor_graph = PatternMatcher([
# transform precompiled CALLs
(UPat(Ops.CALL, name="c"), transform_precompiled_call),
# CONTIGUOUS(MOPS(BUFFER/BUFFER_VIEW)) → CONTIGUOUS(BUFFER_VIEW) when movement ops collapse to contiguous range
(UPat(Ops.CONTIGUOUS, src=(UPat(GroupOp.Movement),), name="c"), contiguous_mops_to_view),
# *** CONTIGUOUS replacement hack for openpilot ***
(UPat(Ops.CONTIGUOUS, src=(UPat((*GroupOp.Movement, Ops.CAST), name="src"),), name="contig"), found_contiguous),
# replace ALU sources with contiguous versions found above
(UPat(GroupOp.ALU, name="alu"), lambda ctx,alu: alu.replace(src=new_src) if (new_src:=tuple(ctx.get(s, s) for s in alu.src)) != alu.src else None),
# add CONTIGUOUS to tagged UOps
(UPat(GroupOp.All-{Ops.CONTIGUOUS, Ops.ASSIGN}, name="x"), lambda x: x.rtag(None).contiguous(tag=x.tag) if x.tag else x.replace(tag=None)),
# remove extra CONTIGUOUS on ASSIGN (only when assign target is contiguous)
(UPat(Ops.CONTIGUOUS, src=(UPat(Ops.ASSIGN, name="a"),), name="c"),
lambda a,c: a.replace(tag=(a.tag or ())+(c.tag or ())) if a.src[0].has_buffer_identity() else None),
# replace ASSIGN with CONTIGUOUS
(UPat(Ops.ASSIGN, name="u"), replace_assign_with_contig),
# replace CONTIGUOUS with ASSIGNs
(UPat(Ops.CONTIGUOUS, name="u"), replace_contig_with_assign),
# remove DETACH/CONTIGUOUS_BACKWARD
(UPat((Ops.DETACH, Ops.CONTIGUOUS_BACKWARD), name="x"), lambda x: x.src[0]),
# reduce of size 0 is the identity element
(UPat(Ops.REDUCE_AXIS, name="reduce", src=(UPat.var("x"),)),
lambda reduce,x: reduce.const_like(identity_element(reduce.arg[0], reduce.dtype)) if x.size == 0 and reduce.size != 0 else None),
# handle size 0
(UPat(GroupOp.All-{Ops.SINK}, name="x"), lambda x: x.const_like(0).rtag(x.tag) if x._shape is not None and x.size == 0 else None),
# early fixup const copy (TODO: is this wrong if there's a pad?)
(UPat(Ops.COPY, src=(UPat.var("s"), UPat()), name="c"), lambda c,s: c.const_like(ss.arg) if (ss:=s.base).op is Ops.CONST else None),
])
def untag_and_append(ctx:AllocCtx, x:UOp):
if x.tag is None: return None
ret = x.replace(tag=None)
for t in x.tag:
original_uop: UOp = ctx.uop_list[t]
replace_uop = ret
while replace_uop.op is Ops.ASSIGN: replace_uop = replace_uop.src[0]
ctx.buffer_map[original_uop] = replace_uop.shrink_to(original_uop.shape)
ctx.assigns.append(ret)
return ret
def append_after(ctx:AllocCtx, x:UOp):
ctx.assigns.append(x)
def replace_input_buffer(ctx:AllocCtx, b:UOp):
ctx.replacements.append(b)
return UOp.param(len(ctx.replacements)-1, b.dtype, b.shape, b._device,
b._min_max if b.op is Ops.BIND else None, b.src[0].arg[0] if b.op is Ops.BIND else None)
pm_finalize_call = PatternMatcher([
(UPat(Ops.ASSIGN, name="x"), untag_and_append),
(UPat(Ops.AFTER, name="x"), append_after),
(UPat(Ops.COPY, name="x"), lambda ctx,x: append_after(ctx,x) if isinstance(x.device, str) and x.device.startswith(("DISK", "TINYFS")) else None),
# replace UNIQUE with LUNIQUE for CONST cache key normalization
(UPat(Ops.CONST, src=(UPat(Ops.UNIQUE), UPat(Ops.DEVICE, name="d")), name="b"), lambda b,d: b.replace(src=(d,))),
])
pm_replace_buf = PatternMatcher([
# replace BUFFER with PARAM for cache key normalization
(UPat(Ops.BUFFER, src=(UPat(Ops.UNIQUE), UPat(Ops.DEVICE)), name="b"), replace_input_buffer),
# replace BUFFER_VIEW with PARAM. this rewrite is bottom up so BUFFERs we don't need won't be in the input
(UPat(Ops.BUFFER_VIEW, src=(UPat(Ops.BUFFER),), name="b"), replace_input_buffer),
# strip value from BIND for cache key normalization, so different values hit same cache
(UPat(Ops.BIND, src=(UPat(Ops.DEFINE_VAR), UPat(Ops.CONST)), name="b"), replace_input_buffer),
])
@track_rewrites(lambda _,ret: f"Process {pluralize('Buffer', len(ret[1]))}")
def transform_to_call(big_sink:UOp) -> tuple[UOp, dict[UOp, UOp]]:
if VIZ: graph_rewrite(big_sink, PatternMatcher([]), name="View Tensor Graph")
# uop list is a list in the original_sink graph and we can map to the tags later
# here we build buffer map
dont_realize = {Ops.CONST, Ops.BUFFER, Ops.BIND, Ops.DEFINE_VAR, Ops.AFTER}
ctx = AllocCtx(bases=set([x.multibase for x in big_sink.src if x.base.op not in dont_realize]))
# this rewrite is "read-only", it adds simple things to buffer_map and may sink things on big_sink, bottom_up
# this is the only one where we have to be careful to not break the tensor graph
big_sink = graph_rewrite(big_sink, add_tags, ctx=ctx, bottom_up=True, name="number the uops")
# here we can break the tensor graph. this is the only place you need to maintain numbered tags
big_sink = graph_rewrite(big_sink, pm_early_transform_tensor_graph, ctx={}, name="early transform tensor graph")
# here we construct the final buffer_map. this is everything that will go into the tensor map
graph_rewrite(big_sink, pm_finalize_call, ctx=ctx, name="finalize call")
ret = graph_rewrite(UOp.sink(*ctx.assigns), pm_replace_buf, ctx=ctx, bottom_up=True, name="replace bufs").call(*ctx.replacements)
if VIZ: graph_rewrite(ret, PatternMatcher([]), name="View Call")
return ret, ctx.buffer_map
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/engine/allocations.py",
"license": "MIT License",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/mockgpu/am/amgpu.py | # mypy: ignore-errors
from __future__ import annotations
import ctypes, ctypes.util, struct, functools, os, mmap
from tinygrad.runtime.autogen.am import am
from tinygrad.runtime.support.amd import AMDReg, import_asic_regs
from test.mockgpu.amd.amdgpu import AMDGPU
libc = ctypes.CDLL(ctypes.util.find_library("c"))
libc.mmap.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_long]
libc.mmap.restype = ctypes.c_void_p
VRAM_SIZE = 512 << 20
IP_VERSIONS = {
am.GC_HWIP: (12, 0, 0), am.SDMA0_HWIP: (7, 0, 0), am.MMHUB_HWIP: (4, 1, 0), am.NBIO_HWIP: (6, 3, 1),
am.MP0_HWIP: (14, 0, 2), am.MP1_HWIP: (14, 0, 2), am.HDP_HWIP: (7, 0, 0), am.OSSSYS_HWIP: (7, 0, 0),
}
def _pad(t, n=10): return t + (0,) * (n - len(t))
IP_BASES = {
am.GC_HWIP: _pad((0x00001260, 0x0000A000, 0x0001C000, 0x02402C00)),
am.SDMA0_HWIP: _pad((0x00001260, 0x0000A000, 0x0001C000, 0x02402C00)),
am.MMHUB_HWIP: _pad((0x0001A000, 0x02408800)),
am.NBIO_HWIP: _pad((0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000)),
am.MP0_HWIP: _pad((0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00)),
am.MP1_HWIP: _pad((0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00)),
am.HDP_HWIP: _pad((0x00000F20, 0x0240A400)),
am.OSSSYS_HWIP: _pad((0x000010A0, 0x0240A000)),
}
IP_HWIDS = {hwip: am.hw_id_map[hwip] for hwip in IP_VERSIONS}
GC_INFO = dict(gc_num_se=2, gc_num_cu_per_sh=8, gc_num_sh_per_se=2, gc_num_rb_per_se=4,
gc_num_tccs=8, gc_wave_size=32, gc_max_waves_per_simd=16, gc_max_scratch_slots_per_cu=32, gc_lds_size=64)
def _build_ip_regs(prefix, hwip) -> dict[str, AMDReg]:
try: return import_asic_regs(prefix, IP_VERSIONS[hwip], cls=functools.partial(AMDReg, bases={0: IP_BASES[hwip]}))
except Exception: return {}
class MockMMU:
def __init__(self, gpu:MockAMGPU):
self.gpu = gpu
self.tlb: dict[int, tuple[int, int, bool]] = {}
def invalidate(self, pt_base:int, va_base:int):
new_tlb: dict[int, tuple[int, int, bool]] = {}
self._walk(pt_base, 0, 0, new_tlb, va_base)
for va, (pa, sz, is_sys) in new_tlb.items():
old = self.tlb.get(va)
if not is_sys and (old is None or old[0] != pa): self.gpu.map_vram_at(va, pa, sz)
if old is None: self.gpu.map_range(va, sz)
self.tlb = new_tlb
def _walk(self, pt_paddr:int, level:int, va_acc:int, out:dict, va_base:int):
shift = [39, 30, 21, 12][level]
for i in range(512):
pte = struct.unpack_from('<Q', self.gpu.vram, pt_paddr + i * 8)[0]
if not (pte & am.AMDGPU_PTE_VALID): continue
va, pa = va_acc | (i << shift), pte & 0x0000FFFFFFFFF000
if level == 3 or (pte & am.AMDGPU_PDE_PTE_GFX12):
out[va_base + va] = (pa, 1 << shift, bool(pte & am.AMDGPU_PTE_SYSTEM))
else:
self._walk(pa, level + 1, va, out, va_base)
def paddr_to_host(self, paddr:int) -> int:
page, off = paddr & ~0xFFF, paddr & 0xFFF
if page in self.gpu._sysmem_map: return self.gpu._sysmem_map[page] + off
if paddr < VRAM_SIZE: return self.gpu.vram_addr + paddr
raise ValueError(f"paddr {paddr:#x} not found in sysmem_map or VRAM")
def addr_to_host(self, addr:int) -> int:
gmc = self.gpu.mmio.gmc
sys_lo = self.gpu.mmio.regs.get(gmc.reg('regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR') or 0, 0) << 18
sys_hi = self.gpu.mmio.regs.get(gmc.reg('regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR') or 0, 0) << 18
if sys_lo <= addr < sys_hi: return self.paddr_to_host(addr - self.gpu.mc_base)
for tva, (pa, sz, is_sys) in self.tlb.items():
if tva <= addr < tva + sz:
paddr = pa + (addr - tva)
if not is_sys: return self.gpu.vram_addr + paddr
return self.paddr_to_host(paddr)
raise ValueError(f"addr {addr:#x} not mapped (sys_aperture=[{sys_lo:#x}, {sys_hi:#x}])")
class MockIPBlock:
def __init__(self, gpu:MockAMGPU, mmio:MockMMIOInterface, regs:dict[str, AMDReg]):
self.gpu, self.mmio, self._regs = gpu, mmio, regs
self._n2a = {n: r.addr[0] for n, r in regs.items()}
self._a2n = {a: n for n, a in self._n2a.items()}
self.addrs = set(self._n2a.values())
def reg(self, name) -> int|None: return self._n2a.get(name)
def decode(self, name) -> dict: return self._regs[name].decode(self.mmio.regs.get(self._n2a[name], 0))
def read(self, reg:int) -> int: return self.mmio.regs.get(reg, 0)
def write(self, reg:int, val:int): self.mmio.regs[reg] = val
def _read_pair(self, pair) -> int:
if pair[0] is None: return 0
return self.mmio.regs.get(pair[0], 0) | (self.mmio.regs.get(pair[1], 0) << 32)
class MockPSP(MockIPBlock):
def __init__(self, gpu, mmio):
super().__init__(gpu, mmio, _build_ip_regs('mp', am.MP0_HWIP))
self._sos_alive, self._ring_wptr = False, 0
pref = "regMPASP_SMN_C2PMSG" if IP_VERSIONS[am.MP0_HWIP] >= (14,0,0) else "regMP0_SMN_C2PMSG"
def r(n): return self.reg(f"{pref}_{n}")
self._c2pmsg_35, self._c2pmsg_64, self._c2pmsg_67 = r(35), r(64), r(67)
self._c2pmsg_69, self._c2pmsg_70, self._c2pmsg_81 = r(69), r(70), r(81)
def read(self, reg:int) -> int:
if reg == self._c2pmsg_35: return 0x80000000
if reg == self._c2pmsg_81: return 0x1 if self._sos_alive else 0x0
if reg == self._c2pmsg_64: return 0x80000000 if self._sos_alive else 0x0
if reg == self._c2pmsg_67: return self._ring_wptr
return super().read(reg)
def write(self, reg:int, val:int):
super().write(reg, val)
if reg == self._c2pmsg_35 and val == am.PSP_BL__LOAD_SOSDRV: self._sos_alive = True
if reg == self._c2pmsg_67: self._ring_submit(val)
def _ring_submit(self, new_wptr:int):
old_wptr = self._ring_wptr
self._ring_wptr = new_wptr
lo, hi = self._c2pmsg_69, self._c2pmsg_70
if lo is None or hi is None: return
ring_mc = self.mmio.regs.get(lo, 0) | (self.mmio.regs.get(hi, 0) << 32)
ring_paddr = ring_mc - self.gpu.mc_base
frame_off = ring_paddr + old_wptr * 4
frame = am.struct_psp_gfx_rb_frame.from_buffer_copy(bytes(self.gpu.vram[frame_off:frame_off + ctypes.sizeof(am.struct_psp_gfx_rb_frame)]))
fence_paddr = ((frame.fence_addr_hi << 32) | frame.fence_addr_lo) - self.gpu.mc_base
if 0 <= fence_paddr < len(self.gpu.vram):
struct.pack_into('<I', self.gpu.vram, fence_paddr, frame.fence_value)
cmd_paddr = ((frame.cmd_buf_addr_hi << 32) | frame.cmd_buf_addr_lo) - self.gpu.mc_base
if 0 <= cmd_paddr < len(self.gpu.vram):
struct.pack_into('<I', self.gpu.vram, cmd_paddr + 864, 0)
class MockSMU(MockIPBlock):
def __init__(self, gpu, mmio):
try: regs = import_asic_regs('mp', (11, 0), cls=functools.partial(AMDReg, bases={0: IP_BASES[am.MP1_HWIP]}))
except Exception: regs = {}
super().__init__(gpu, mmio, regs)
self._msg_pending = False
def r(n): return self.reg(f"mmMP1_SMN_C2PMSG_{n}")
self._c2pmsg_53, self._c2pmsg_54, self._c2pmsg_66 = r(53), r(54), r(66)
self._c2pmsg_75, self._c2pmsg_82, self._c2pmsg_90 = r(75), r(82), r(90)
def read(self, reg:int) -> int:
if reg == self._c2pmsg_90 or reg == self._c2pmsg_54: return 0x1 if self._msg_pending else super().read(reg)
if reg == self._c2pmsg_82: return self.mmio.regs.get(reg, 3)
return super().read(reg)
def write(self, reg:int, val:int):
super().write(reg, val)
if reg == self._c2pmsg_66 or reg == self._c2pmsg_75: self._msg_pending = True
if (reg == self._c2pmsg_90 or reg == self._c2pmsg_54) and val == 0: self._msg_pending = False
class MockSDMA(MockIPBlock):
def __init__(self, gpu, mmio):
all_gc = _build_ip_regs('gc', am.GC_HWIP)
super().__init__(gpu, mmio, {n: r for n, r in all_gc.items() if 'SDMA' in n})
def write(self, reg:int, val:int):
super().write(reg, val)
name = self._a2n.get(reg, '')
if name.endswith('_RB_CNTL') and self._regs[name].decode(val).get('rb_enable', 0):
self._activate_queue(name.rsplit('_RB_CNTL', 1)[0])
def _activate_queue(self, prefix:str):
ring_addr = self._read_pair((self.reg(f'{prefix}_RB_BASE'), self.reg(f'{prefix}_RB_BASE_HI'))) << 8
rptr_addr = self._read_pair((self.reg(f'{prefix}_RB_RPTR_ADDR_LO'), self.reg(f'{prefix}_RB_RPTR_ADDR_HI')))
wptr_addr = self._read_pair((self.reg(f'{prefix}_RB_WPTR_POLL_ADDR_LO'), self.reg(f'{prefix}_RB_WPTR_POLL_ADDR_HI')))
rb_size = self.decode(f'{prefix}_RB_CNTL')['rb_size']
self.gpu.add_sdma_queue(self.gpu.mmu.addr_to_host(ring_addr), 4 << rb_size,
self.gpu.mmu.addr_to_host(rptr_addr), self.gpu.mmu.addr_to_host(wptr_addr))
class MockGFX(MockIPBlock):
def __init__(self, gpu, mmio):
super().__init__(gpu, mmio, _build_ip_regs('gc', am.GC_HWIP))
self._pt_base = (self.reg('regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32'), self.reg('regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32'))
self._pt_start = (self.reg('regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32'), self.reg('regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32'))
self._gc_inv_ack = self.reg('regGCVM_INVALIDATE_ENG17_ACK')
self._gc_inv_req = self.reg('regGCVM_INVALIDATE_ENG17_REQ')
self._hqd_active = self.reg('regCP_HQD_ACTIVE')
def read(self, reg:int) -> int:
if reg == self.reg('regCP_STAT') or reg == self.reg('regRLC_SAFE_MODE'): return 0
if reg == self.reg('regRLC_RLCS_BOOTLOAD_STATUS'): return 0x2
if reg == self._gc_inv_ack: return 0x1
return super().read(reg)
def write(self, reg:int, val:int):
super().write(reg, val)
if reg == self.reg('regCP_HQD_DEQUEUE_REQUEST'):
if self._hqd_active is not None: self.mmio.regs[self._hqd_active] = 0
if reg == self._hqd_active and val == 1: self._activate_pm4_queue()
if reg == self._gc_inv_req: self.gpu.mmu.invalidate(self.get_pt_base(), self.get_va_base())
def _activate_pm4_queue(self):
ring_addr = self._read_pair((self.reg('regCP_HQD_PQ_BASE'), self.reg('regCP_HQD_PQ_BASE_HI'))) << 8
rptr_addr = self._read_pair((self.reg('regCP_HQD_PQ_RPTR_REPORT_ADDR'), self.reg('regCP_HQD_PQ_RPTR_REPORT_ADDR_HI')))
wptr_addr = self._read_pair((self.reg('regCP_HQD_PQ_WPTR_POLL_ADDR'), self.reg('regCP_HQD_PQ_WPTR_POLL_ADDR_HI')))
queue_size = self.decode('regCP_HQD_PQ_CONTROL')['queue_size']
self.gpu.add_pm4_queue(self.gpu.mmu.addr_to_host(ring_addr), 4 << (queue_size + 1),
self.gpu.mmu.addr_to_host(rptr_addr), self.gpu.mmu.addr_to_host(wptr_addr))
def get_pt_base(self) -> int: return self._read_pair(self._pt_base) & 0x0000FFFFFFFFF000
def get_va_base(self) -> int: return self._read_pair(self._pt_start) << 12
class MockGMC(MockIPBlock):
def __init__(self, gpu, mmio, gfx:MockGFX):
super().__init__(gpu, mmio, _build_ip_regs('mmhub', am.MMHUB_HWIP))
self._gfx = gfx
self._inv_ack = self.reg('regMMVM_INVALIDATE_ENG17_ACK')
self._inv_sem = self.reg('regMMVM_INVALIDATE_ENG17_SEM')
self._inv_req = self.reg('regMMVM_INVALIDATE_ENG17_REQ')
self._fb_loc_top = self.reg('regMMMC_VM_FB_LOCATION_TOP')
def read(self, reg:int) -> int:
if reg == self._inv_ack or reg == self._inv_sem: return 0x1
if reg == self._fb_loc_top: return VRAM_SIZE >> 24
return super().read(reg)
def write(self, reg:int, val:int):
super().write(reg, val)
if reg == self._inv_req: self.gpu.mmu.invalidate(self._gfx.get_pt_base(), self._gfx.get_va_base())
class MockNBIO(MockIPBlock):
def __init__(self, gpu, mmio):
regs = _build_ip_regs('nbif', am.NBIO_HWIP)
regs.update(_build_ip_regs('hdp', am.HDP_HWIP))
super().__init__(gpu, mmio, regs)
self._remap_hdp = self.reg('regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL')
self._hdp_flush = self.reg('regHDP_MEM_FLUSH_CNTL')
def read(self, reg:int) -> int:
if reg == self._remap_hdp and self._hdp_flush is not None: return self._hdp_flush * 4
return super().read(reg)
class MockMMIOInterface:
def __init__(self, gpu:MockAMGPU):
self.gpu = gpu
self.regs: dict[int, int] = {}
gfx = MockGFX(gpu, self)
self.gmc = MockGMC(gpu, self, gfx)
self.blocks = [MockPSP(gpu, self), MockSMU(gpu, self), MockSDMA(gpu, self), gfx, self.gmc, MockNBIO(gpu, self)]
self._addr_block: dict[int, MockIPBlock] = {}
for block in self.blocks:
for addr in block.addrs: self._addr_block.setdefault(addr, block)
def __getitem__(self, index:int|slice) -> int|list[int]:
if isinstance(index, slice): return [self[i] for i in range(index.start or 0, index.stop or 0, index.step or 1)] # type: ignore[misc]
if index == 0xde3: return VRAM_SIZE >> 20
if block := self._addr_block.get(index): return block.read(index)
return self.regs.get(index, 0)
def __setitem__(self, index:int|slice, val:int|list[int]|tuple[int, ...]):
if isinstance(index, slice):
vals = val if isinstance(val, (list, tuple)) else [val] * ((index.stop - index.start) // (index.step or 1)) # type: ignore[operator]
for i, v in zip(range(index.start or 0, index.stop or 0, index.step or 1), vals): self[i] = v
return
assert isinstance(val, int)
self.regs[index] = val
if block := self._addr_block.get(index): block.write(index, val)
def __len__(self): return 0x10000000
class MockAMGPU(AMDGPU):
def __init__(self, gpuid:int=0):
super().__init__(gpuid)
self.vram_fd = os.memfd_create("vram")
os.ftruncate(self.vram_fd, VRAM_SIZE)
self.vram_addr = libc.mmap(0, VRAM_SIZE, mmap.PROT_READ | mmap.PROT_WRITE, mmap.MAP_SHARED, self.vram_fd, 0)
self.vram = (ctypes.c_ubyte * VRAM_SIZE).from_address(self.vram_addr)
self.doorbell_fd = os.memfd_create("doorbell")
os.ftruncate(self.doorbell_fd, 0x2000)
self.arch = "rdna4"
self._sysmem_map:dict[int,int] = {}
self._next_sysmem_paddr = 0x100000000
self.mmu = MockMMU(self)
self.mmio = MockMMIOInterface(self)
self._preboot()
def translate_addr(self, addr:int) -> int: return self.mmu.addr_to_host(addr)
def map_vram_at(self, va:int, paddr:int, size:int):
libc.mmap(va, size, mmap.PROT_READ | mmap.PROT_WRITE, mmap.MAP_SHARED | 0x10, self.vram_fd, paddr)
def _preboot(self):
ip_data = bytearray()
for hwip, (major, minor, rev) in IP_VERSIONS.items():
ip = am.struct_ip_v4(hw_id=IP_HWIDS[hwip], num_base_address=len(IP_BASES[hwip]), major=major, minor=minor, revision=rev)
ip_data += bytes(ip) + b'\x00'
for b in IP_BASES[hwip]: ip_data += struct.pack('<I', b)
dhdr = am.struct_die_header(num_ips=len(IP_VERSIONS))
ihdr = am.struct_ip_discovery_header(signature=am.DISCOVERY_TABLE_SIGNATURE, version=4, num_dies=1)
ip_disc_off = ctypes.sizeof(am.struct_binary_header)
ihdr.die_info[0].die_offset = ip_disc_off + ctypes.sizeof(am.struct_ip_discovery_header)
gc = am.struct_gc_info_v2_1()
gc.header.table_id, gc.header.version_major, gc.header.version_minor = am.GC, 2, 1
gc.header.size = ctypes.sizeof(am.struct_gc_info_v2_1)
for field, val in GC_INFO.items(): setattr(gc, field, val)
gc_off = ip_disc_off + ctypes.sizeof(am.struct_ip_discovery_header) + ctypes.sizeof(am.struct_die_header) + len(ip_data)
bhdr = am.struct_binary_header(binary_signature=am.BINARY_SIGNATURE)
bhdr.table_list[am.IP_DISCOVERY].offset = ip_disc_off
bhdr.table_list[am.GC].offset = gc_off
tbl = bytes(bhdr) + bytes(ihdr) + bytes(dhdr) + ip_data + bytes(gc)
tbl_offset = VRAM_SIZE - (64 << 10)
self.vram[tbl_offset:tbl_offset + len(tbl)] = list(tbl)
@property
def mc_base(self) -> int:
fb_loc_base = self.mmio.gmc.reg('regMMMC_VM_FB_LOCATION_BASE') or 0
return (self.mmio.regs.get(fb_loc_base, 0) & 0xFFFFFF) << 24
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/mockgpu/am/amgpu.py",
"license": "MIT License",
"lines": 268,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_gpudims.py | import unittest, math
import z3
from tinygrad.codegen.gpudims import get_grouped_dims
from tinygrad.uop.ops import UOp, Ops
from tinygrad.uop.validate import uops_to_z3
from tinygrad.dtype import dtypes
from tinygrad.helpers import flatten, dedup
class TestGroupedDims(unittest.TestCase):
def _check_grouped_dims(self, prefix, dims, max_sizes, reverse, expected_sizes, assert_same_length=True):
idxs = get_grouped_dims(prefix, dims, max_sizes, reverse)
loop_idxs = dedup(flatten([[y for y in x.toposort() if y.op is Ops.SPECIAL] for x in idxs]))
loop_idxs = sorted(loop_idxs, key=lambda uop: uop.arg)
sizes = [x.src[0].arg for x in loop_idxs]
assert len(idxs) == len(dims), f"expected idxs to have same length as dims {len(dims)}, got {len(idxs)}"
if assert_same_length:
assert len(loop_idxs) == min(len(sizes), len(dims)), f"expected idxs to have length {min(len(sizes), len(dims))}, got {len(loop_idxs)}"
assert sizes == expected_sizes, f"expected sizes={expected_sizes}, got {sizes=}"
self._verify_indices_z3(idxs, dims)
def _verify_indices_z3(self, idxs, dims):
"""Use z3 to prove bijectivity: bounds (0 <= flat < total) + injectivity (different inputs => different flat)."""
total = math.prod(dims)
specials = sorted(dedup(flatten([[y for y in x.toposort() if y.op is Ops.SPECIAL] for x in idxs])), key=lambda u: u.arg)
# build flat index and primed flat (same expression with renamed SPECIALs)
flat = UOp.const(dtypes.index, 0)
for i, idx in enumerate(idxs):
flat = flat + idx * int(math.prod(dims[i+1:]))
flat_p = flat.substitute({s: UOp(Ops.SPECIAL, s.dtype, s.src, s.arg+"_p") for s in specials})
solver = z3.Solver()
[z3_flat, z3_flat_p] = uops_to_z3(solver, flat, flat_p)
# bounds
self.assertEqual(solver.check(z3_flat < 0), z3.unsat, f"flat can be negative: {dims=}")
self.assertEqual(solver.check(z3_flat >= total), z3.unsat, f"flat can be >= {total}: {dims=}")
# injectivity: flat == flat' but inputs differ => unsat
inputs_differ = z3.Or(*[z3.Int(s.arg) != z3.Int(s.arg+"_p") for s in specials])
self.assertEqual(solver.check(z3.And(z3_flat == z3_flat_p, inputs_differ)), z3.unsat, f"not injective: {dims=}")
def test_grouped_dims(self):
# no-op
self._check_grouped_dims("gidx", (2,), (16,16,16), False, [2])
self._check_grouped_dims("gidx", (2,3), (16,16,16), False, [2,3])
# check reverse dims
self._check_grouped_dims("gidx", (2,3), (16,16,16), True, [3,2])
self._check_grouped_dims("gidx", (2,3,4), (16,16,16), False, [2,3,4])
# test splitting globals: len(dims) == len(max)
self._check_grouped_dims("gidx", (64,3,4), (16,16,16), False, [16,12,4])
self._check_grouped_dims("gidx", (64,3,4), (16,4,16), False, [16,3,16])
self._check_grouped_dims("gidx", (64,3,4), (16,16,16), True, [16,3,16])
self._check_grouped_dims("gidx", (128,3,4), (16,4,256), False, [16,3,32])
self._check_grouped_dims("gidx", (4,4,512), (16,4,256), False, [8,4,256])
self._check_grouped_dims("gidx", (5,12,7), (8,4,16), False, [10,3,14])
# prefer group_dim strategy when possible
self._check_grouped_dims("gidx", (512,4,2), (8192,2,2), False, [2048,2])
# test splitting globals: len(dims) < len(max)
# len(dim) -> len(limited)
# 1 -> 2
self._check_grouped_dims("gidx", (128,), (16,16,256), False, [16,8], False)
# 1 -> 3
self._check_grouped_dims("gidx", (65536,), (16,16,256), False, [16,16,256], False)
# 2 -> 2
self._check_grouped_dims("gidx", (65536,2), (65535,65535,65535), False, [32768,4], False)
# test when the only divisor is the square root of dim
self._check_grouped_dims("gidx", (121,), (12,12,12), False, [11,11], False)
# 2 -> 3
self._check_grouped_dims("gidx", (128,128), (16,16,256), False, [16,16,64], False)
# collapse on onto the left most axis
self._check_grouped_dims("gidx", (2,3,4,5), (16,16,16), False, [6,4,5])
self._check_grouped_dims("gidx", (2,3,4,5), (32,16,16), True, [20,3,2])
# collapse on left-most available axis (the left most is too small)
self._check_grouped_dims("gidx", (2,3,4,5), (4,16,16), False, [2,12,5])
self._check_grouped_dims("gidx", (2,3,4,5), (16,16,16), True, [5,12,2])
# dim too large and not factorable
with self.assertRaises(RuntimeError):
get_grouped_dims("gidx", (23,), (16,16,16), False,)
with self.assertRaises(RuntimeError):
get_grouped_dims("gidx", (128,3,4), (16,2,2), False,)
# too large for sizes
with self.assertRaises(RuntimeError):
get_grouped_dims("gidx", (2,3,4,5,6), (16,16,16))
def test_grouped_direct_dims_are_special(self):
# when (2,3) are merged into 6, the unmerged dims (4,5) should map directly to SPECIAL ops (no div/mod)
idxs = get_grouped_dims("gidx", (2,3,4,5), (16,16,16), False)
assert idxs[2].op is Ops.SPECIAL, f"expected SPECIAL for direct-mapped dim, got {idxs[2].op}"
assert idxs[3].op is Ops.SPECIAL, f"expected SPECIAL for direct-mapped dim, got {idxs[3].op}"
def test_max_sizes_none(self):
self._check_grouped_dims("gidx", (2,3,4), None, False, [2,3,4])
self._check_grouped_dims("gidx", (100,), None, False, [100])
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_gpudims.py",
"license": "MIT License",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/unit/test_system_pci_scan_bus.py | import sys
import pytest
@pytest.mark.skipif(sys.platform != "linux", reason="uses linux sysfs layout")
def test_pci_scan_bus_filters_vendor(monkeypatch):
import tinygrad.runtime.support.system as system
fake = {
"/sys/bus/pci/devices/0000:00:01.0/vendor": "0x1234",
"/sys/bus/pci/devices/0000:00:01.0/device": "0x1111",
"/sys/bus/pci/devices/0000:00:02.0/vendor": "0xabcd",
"/sys/bus/pci/devices/0000:00:02.0/device": "0x1111",
}
class FakeFileIOInterface:
def __init__(self, path, *args, **kwargs):
self.path = path
def listdir(self):
assert self.path == "/sys/bus/pci/devices"
return ["0000:00:01.0", "0000:00:02.0"]
def read(self, *args, **kwargs):
return fake[self.path]
monkeypatch.setattr(system, "FileIOInterface", FakeFileIOInterface)
assert system.System.pci_scan_bus(0x1234, devices=[(0xffff, [0x1111])]) == ["0000:00:01.0"]
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_system_pci_scan_bus.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:examples/mlperf/optim.py | from tinygrad.tensor import Tensor
from tinygrad.dtype import dtypes
from tinygrad.nn.optim import Optimizer
from tinygrad.helpers import FUSE_OPTIM
class GradAccClipAdamW(Optimizer):
def __init__(self, params:list[Tensor], lr=0.001, b1=0.9, b2=0.999, eps=1e-6, weight_decay=0.0, grad_acc=1, clip_norm=1.0, device=None, fused=FUSE_OPTIM):
super().__init__(params, lr, device, fused)
self.b1, self.b2, self.eps, self.wd = b1, b2, eps, weight_decay
self.b1_t, self.b2_t = (Tensor.ones((1,), dtype=dtypes.float32, device=self.device, requires_grad=False) for _ in [b1, b2])
self.m = self._new_optim_param()
self.v = self._new_optim_param()
self.grad_acc, self.clip_norm = grad_acc, clip_norm
def fstep(self, grads:list[Tensor]):
if self.fused:
out, extra = self._step([], grads)
updates = [out[0][self.pos_params[i]:self.pos_params[i+1]].reshape(tt.shape) for i, tt in enumerate(self.params)]
else:
updates, extra = self._step([], grads)
for i, tt in enumerate(self.params): tt.assign(self._apply_update(tt, updates[i]))
to_realize = extra+self.params+self.buffers
Tensor.realize(*to_realize)
return extra[-1]
def _step(self, params:list[Tensor], grads:list[Tensor]) -> tuple[list[Tensor], list[Tensor]]:
for i in range(len(grads)):
if grads[i].device != self.m[i].device: grads[i].assign(grads[i].to(self.m[i].device))
if self.fused:
grads[0].assign(grads[0] / self.grad_acc)
total_norm = grads[0].float().square().sum().sqrt()
grads[0].assign((grads[0] * (self.clip_norm / (total_norm + 1e-6)).clamp(max_=1.0)).cast(grads[0].dtype))
else:
for i in range(len(grads)):
grads[i].assign(grads[i] / self.grad_acc).realize()
total_norm = Tensor.stack(*[g.float().square().sum() for g in grads]).sum().sqrt().contiguous().realize()
for i in range(len(grads)):
grads[i].assign((grads[i] * (self.clip_norm / (total_norm + 1e-6)).clamp(max_=1.0)).cast(grads[i].dtype)).realize()
ret = []
self.b1_t *= self.b1
self.b2_t *= self.b2
for i, g in enumerate(grads):
self.m[i].assign((self.b1 * self.m[i] + (1.0 - self.b1) * g).cast(self.m[i].dtype))
self.v[i].assign((self.b2 * self.v[i] + (1.0 - self.b2) * (g * g)).cast(self.v[i].dtype))
m_hat = self.m[i] / (1.0 - self.b1_t)
v_hat = self.v[i] / (1.0 - self.b2_t)
up = m_hat / (v_hat.sqrt() + self.eps)
ret.append((self.lr * up).cast(g.dtype))
return ret, [self.b1_t, self.b2_t] + self.m + self.v + [total_norm]
def _apply_update(self, t:Tensor, up:Tensor) -> Tensor:
up = up.shard_like(t) + self.lr.to(t.device) * self.wd * t.detach()
return t.detach() - up.cast(t.dtype)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "examples/mlperf/optim.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/unit/test_realize_is_realize.py | import tempfile, unittest
import numpy as np
from tinygrad import Tensor, Device, dtypes, Variable
class TestRealizeIsRealized(unittest.TestCase):
def test_list(self):
t = Tensor([1, 2, 3]).realize()
assert t.uop.is_realized
def test_rand(self):
t = Tensor.rand(4, 4).realize()
assert t.uop.is_realized
def test_contiguous(self):
t = Tensor.zeros(10).contiguous().realize()
assert t.uop.is_realized
def test_bytes(self):
t = Tensor(b'\x01\x02\x03').realize()
assert t.uop.is_realized
def test_numpy(self):
t = Tensor(np.array([1, 2, 3])).realize()
assert t.uop.is_realized
def test_multi(self):
d = Device.DEFAULT
t = Tensor.ones(8).contiguous().shard((d, d), axis=0).realize()
assert all(u.is_realized for u in t.uop.src)
def test_empty(self):
t = Tensor.empty(4, 4).realize()
assert not t.uop.is_realized
def test_disk(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'\x00' * 16)
f.flush()
t = Tensor.empty(4, dtype=dtypes.float32, device=f"disk:{f.name}").realize()
assert not t.uop.is_realized
def test_assign(self):
t = Tensor([1, 2, 3])
t += 1
t.realize()
assert t.uop.is_realized
# TODO: these are not realized after .realize()
def test_const_not_realized(self):
t = Tensor(3.14).realize()
assert not t.uop.is_realized
def test_ones_not_realized(self):
t = Tensor.ones(4, 4).realize()
assert not t.uop.is_realized
def test_none_not_realized(self):
t = Tensor(None).realize()
assert not t.uop.is_realized
def test_variable_not_realized(self):
t = Tensor(Variable("v", 1, 10).bind(3)).realize()
assert not t.uop.is_realized
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_realize_is_realize.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/unit/test_hcq_graph.py | import unittest
from tinygrad import Device, Tensor
from tinygrad.engine.jit import TinyJit
from tinygrad.engine.realize import CompiledRunner
from tinygrad.runtime.graph.hcq import HCQGraph
from tinygrad.runtime.support.hcq import HCQCompiled
from tinygrad.runtime.support.usb import USBMMIOInterface
from test.mockgpu.usb import MockUSB
@unittest.skipUnless(issubclass(type(Device[Device.DEFAULT]), HCQCompiled), "HCQ device required to run")
class TestHCQUnit(unittest.TestCase):
@unittest.skipIf(Device.DEFAULT == "CPU", "requires non-CPU HCQ device")
def test_supports_exec_item(self):
d0, cpu_dev = Device[Device.DEFAULT], Device["CPU"]
@TinyJit
def f(inp, inp_cpu):
return (inp + 1.0).contiguous().realize(), (inp_cpu + 1.0).contiguous().realize()
inp, inp_cpu = Tensor.randn(10, 10, device=Device.DEFAULT).realize(), Tensor.randn(10, 10, device="CPU").realize()
for _ in range(5): f(inp, inp_cpu)
gpu_ei, cpu_ei, gpu_devs = None, None, []
for ji in f.captured.jit_cache:
if isinstance(ji.prg, CompiledRunner):
if ji.prg.dev._is_cpu(): cpu_ei = ji
else:
gpu_ei = ji
if ji.prg.dev not in gpu_devs: gpu_devs.append(ji.prg.dev)
assert gpu_ei is not None and cpu_ei is not None and len(gpu_devs) > 0
# local MMIO: GPU works alone and with CPU in batch (cpu_support=True)
assert HCQGraph.supports_exec_item(gpu_devs, gpu_ei) is True
assert HCQGraph.supports_exec_item(gpu_devs, cpu_ei) is True
assert HCQGraph.supports_exec_item(gpu_devs + [cpu_dev], gpu_ei) is True
# USB MMIO: GPU-only still works, but CPU batching must be rejected (cpu_support=False)
orig_view = d0.timeline_signal.base_buf.view
try:
d0.timeline_signal.base_buf.view = USBMMIOInterface(MockUSB(bytearray(256)), 0, 16, fmt='B')
assert HCQGraph.supports_exec_item(gpu_devs, gpu_ei) is True
assert HCQGraph.supports_exec_item(gpu_devs, cpu_ei) is False
assert HCQGraph.supports_exec_item(gpu_devs + [cpu_dev], gpu_ei) is False
finally:
d0.timeline_signal.base_buf.view = orig_view
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_hcq_graph.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_test_llama3_layer.py | #!/usr/bin/env python3
from tinygrad import Tensor, TinyJit, nn, dtypes
from tinygrad.helpers import getenv
from extra.models.llama import TransformerBlock, precompute_freqs_cis
BS = getenv("BS", 1)
SEQLEN = getenv("SEQLEN", 128)
# DEFAULT_FLOAT=bfloat16 SEQLEN=8192 ASM_GEMM=1 HK_FLASH_ATTENTION=1 EMULATE=AMD_CDNA4 NULL=1 DEBUG=2 VIZ=1 PYTHONPATH="."
# python test/external/external_test_llama3_layer.py
if __name__ == "__main__":
dim, hidden_dim, n_heads, n_kv_heads, norm_eps = 4096, 14336, 32, 8, 1e-5
layer = TransformerBlock(dim, hidden_dim, n_heads, n_kv_heads, norm_eps, max_context=0)
for x in nn.state.get_parameters(layer): x.replace(x.cast(dtypes.default_float)).realize()
freqs_cis = precompute_freqs_cis(dim // n_heads, SEQLEN, theta=500000.0).contiguous().requires_grad_(False).realize()
@TinyJit
def run(t): return layer(t, 0, freqs_cis, None)
for i in range(5):
print(f"*** run {i}")
run(Tensor.rand(BS, SEQLEN, dim, dtype=dtypes.default_float).realize())
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_llama3_layer.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/runtime/autogen/am/smu_v13_0_12.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
PPSMC_Result: TypeAlias = Annotated[int, ctypes.c_uint32]
PPSMC_MSG: TypeAlias = Annotated[int, ctypes.c_uint32]
class FEATURE_LIST_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
FEATURE_DATA_CALCULATION = FEATURE_LIST_e.define('FEATURE_DATA_CALCULATION', 0)
FEATURE_DPM_FCLK = FEATURE_LIST_e.define('FEATURE_DPM_FCLK', 1)
FEATURE_DPM_GFXCLK = FEATURE_LIST_e.define('FEATURE_DPM_GFXCLK', 2)
FEATURE_DPM_LCLK = FEATURE_LIST_e.define('FEATURE_DPM_LCLK', 3)
FEATURE_DPM_SOCCLK = FEATURE_LIST_e.define('FEATURE_DPM_SOCCLK', 4)
FEATURE_DPM_UCLK = FEATURE_LIST_e.define('FEATURE_DPM_UCLK', 5)
FEATURE_DPM_VCN = FEATURE_LIST_e.define('FEATURE_DPM_VCN', 6)
FEATURE_DPM_XGMI = FEATURE_LIST_e.define('FEATURE_DPM_XGMI', 7)
FEATURE_DS_FCLK = FEATURE_LIST_e.define('FEATURE_DS_FCLK', 8)
FEATURE_DS_GFXCLK = FEATURE_LIST_e.define('FEATURE_DS_GFXCLK', 9)
FEATURE_DS_LCLK = FEATURE_LIST_e.define('FEATURE_DS_LCLK', 10)
FEATURE_DS_MP0CLK = FEATURE_LIST_e.define('FEATURE_DS_MP0CLK', 11)
FEATURE_DS_MP1CLK = FEATURE_LIST_e.define('FEATURE_DS_MP1CLK', 12)
FEATURE_DS_MPIOCLK = FEATURE_LIST_e.define('FEATURE_DS_MPIOCLK', 13)
FEATURE_DS_SOCCLK = FEATURE_LIST_e.define('FEATURE_DS_SOCCLK', 14)
FEATURE_DS_VCN = FEATURE_LIST_e.define('FEATURE_DS_VCN', 15)
FEATURE_APCC_DFLL = FEATURE_LIST_e.define('FEATURE_APCC_DFLL', 16)
FEATURE_APCC_PLUS = FEATURE_LIST_e.define('FEATURE_APCC_PLUS', 17)
FEATURE_PPT = FEATURE_LIST_e.define('FEATURE_PPT', 18)
FEATURE_TDC = FEATURE_LIST_e.define('FEATURE_TDC', 19)
FEATURE_THERMAL = FEATURE_LIST_e.define('FEATURE_THERMAL', 20)
FEATURE_SOC_PCC = FEATURE_LIST_e.define('FEATURE_SOC_PCC', 21)
FEATURE_PROCHOT = FEATURE_LIST_e.define('FEATURE_PROCHOT', 22)
FEATURE_FDD_AID_HBM = FEATURE_LIST_e.define('FEATURE_FDD_AID_HBM', 23)
FEATURE_FDD_AID_SOC = FEATURE_LIST_e.define('FEATURE_FDD_AID_SOC', 24)
FEATURE_FDD_XCD_EDC = FEATURE_LIST_e.define('FEATURE_FDD_XCD_EDC', 25)
FEATURE_FDD_XCD_XVMIN = FEATURE_LIST_e.define('FEATURE_FDD_XCD_XVMIN', 26)
FEATURE_FW_CTF = FEATURE_LIST_e.define('FEATURE_FW_CTF', 27)
FEATURE_SMU_CG = FEATURE_LIST_e.define('FEATURE_SMU_CG', 28)
FEATURE_PSI7 = FEATURE_LIST_e.define('FEATURE_PSI7', 29)
FEATURE_XGMI_PER_LINK_PWR_DOWN = FEATURE_LIST_e.define('FEATURE_XGMI_PER_LINK_PWR_DOWN', 30)
FEATURE_SOC_DC_RTC = FEATURE_LIST_e.define('FEATURE_SOC_DC_RTC', 31)
FEATURE_GFX_DC_RTC = FEATURE_LIST_e.define('FEATURE_GFX_DC_RTC', 32)
FEATURE_DVM_MIN_PSM = FEATURE_LIST_e.define('FEATURE_DVM_MIN_PSM', 33)
FEATURE_PRC = FEATURE_LIST_e.define('FEATURE_PRC', 34)
FEATURE_PSM_SQ_THROTTLER = FEATURE_LIST_e.define('FEATURE_PSM_SQ_THROTTLER', 35)
FEATURE_PIT = FEATURE_LIST_e.define('FEATURE_PIT', 36)
FEATURE_DVO = FEATURE_LIST_e.define('FEATURE_DVO', 37)
FEATURE_XVMINORPSM_CLKSTOP_DS = FEATURE_LIST_e.define('FEATURE_XVMINORPSM_CLKSTOP_DS', 38)
FEATURE_GLOBAL_DPM = FEATURE_LIST_e.define('FEATURE_GLOBAL_DPM', 39)
FEATURE_HROM_EN = FEATURE_LIST_e.define('FEATURE_HROM_EN', 40)
NUM_FEATURES = FEATURE_LIST_e.define('NUM_FEATURES', 41)
class PCIE_LINK_SPEED_INDEX_TABLE_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
PCIE_LINK_SPEED_INDEX_TABLE_RESERVED = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_RESERVED', 0)
PCIE_LINK_SPEED_INDEX_TABLE_GEN1 = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN1', 1)
PCIE_LINK_SPEED_INDEX_TABLE_GEN2 = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN2', 2)
PCIE_LINK_SPEED_INDEX_TABLE_GEN3 = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN3', 3)
PCIE_LINK_SPEED_INDEX_TABLE_GEN4 = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN4', 4)
PCIE_LINK_SPEED_INDEX_TABLE_GEN5 = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN5', 5)
PCIE_LINK_SPEED_INDEX_TABLE_COUNT = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_COUNT', 6)
class GFX_GUARDBAND_OFFSET_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
GFX_GUARDBAND_OFFSET_0 = GFX_GUARDBAND_OFFSET_e.define('GFX_GUARDBAND_OFFSET_0', 0)
GFX_GUARDBAND_OFFSET_1 = GFX_GUARDBAND_OFFSET_e.define('GFX_GUARDBAND_OFFSET_1', 1)
GFX_GUARDBAND_OFFSET_2 = GFX_GUARDBAND_OFFSET_e.define('GFX_GUARDBAND_OFFSET_2', 2)
GFX_GUARDBAND_OFFSET_3 = GFX_GUARDBAND_OFFSET_e.define('GFX_GUARDBAND_OFFSET_3', 3)
GFX_GUARDBAND_OFFSET_4 = GFX_GUARDBAND_OFFSET_e.define('GFX_GUARDBAND_OFFSET_4', 4)
GFX_GUARDBAND_OFFSET_5 = GFX_GUARDBAND_OFFSET_e.define('GFX_GUARDBAND_OFFSET_5', 5)
GFX_GUARDBAND_OFFSET_6 = GFX_GUARDBAND_OFFSET_e.define('GFX_GUARDBAND_OFFSET_6', 6)
GFX_GUARDBAND_OFFSET_7 = GFX_GUARDBAND_OFFSET_e.define('GFX_GUARDBAND_OFFSET_7', 7)
GFX_GUARDBAND_OFFSET_COUNT = GFX_GUARDBAND_OFFSET_e.define('GFX_GUARDBAND_OFFSET_COUNT', 8)
class GFX_DVM_MARGIN_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
GFX_DVM_MARGINHI_0 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINHI_0', 0)
GFX_DVM_MARGINHI_1 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINHI_1', 1)
GFX_DVM_MARGINHI_2 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINHI_2', 2)
GFX_DVM_MARGINHI_3 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINHI_3', 3)
GFX_DVM_MARGINHI_4 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINHI_4', 4)
GFX_DVM_MARGINHI_5 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINHI_5', 5)
GFX_DVM_MARGINHI_6 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINHI_6', 6)
GFX_DVM_MARGINHI_7 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINHI_7', 7)
GFX_DVM_MARGINLO_0 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINLO_0', 8)
GFX_DVM_MARGINLO_1 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINLO_1', 9)
GFX_DVM_MARGINLO_2 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINLO_2', 10)
GFX_DVM_MARGINLO_3 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINLO_3', 11)
GFX_DVM_MARGINLO_4 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINLO_4', 12)
GFX_DVM_MARGINLO_5 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINLO_5', 13)
GFX_DVM_MARGINLO_6 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINLO_6', 14)
GFX_DVM_MARGINLO_7 = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGINLO_7', 15)
GFX_DVM_MARGIN_COUNT = GFX_DVM_MARGIN_e.define('GFX_DVM_MARGIN_COUNT', 16)
class SYSTEM_TEMP_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
SYSTEM_TEMP_UBB_FPGA = SYSTEM_TEMP_e.define('SYSTEM_TEMP_UBB_FPGA', 0)
SYSTEM_TEMP_UBB_FRONT = SYSTEM_TEMP_e.define('SYSTEM_TEMP_UBB_FRONT', 1)
SYSTEM_TEMP_UBB_BACK = SYSTEM_TEMP_e.define('SYSTEM_TEMP_UBB_BACK', 2)
SYSTEM_TEMP_UBB_OAM7 = SYSTEM_TEMP_e.define('SYSTEM_TEMP_UBB_OAM7', 3)
SYSTEM_TEMP_UBB_IBC = SYSTEM_TEMP_e.define('SYSTEM_TEMP_UBB_IBC', 4)
SYSTEM_TEMP_UBB_UFPGA = SYSTEM_TEMP_e.define('SYSTEM_TEMP_UBB_UFPGA', 5)
SYSTEM_TEMP_UBB_OAM1 = SYSTEM_TEMP_e.define('SYSTEM_TEMP_UBB_OAM1', 6)
SYSTEM_TEMP_OAM_0_1_HSC = SYSTEM_TEMP_e.define('SYSTEM_TEMP_OAM_0_1_HSC', 7)
SYSTEM_TEMP_OAM_2_3_HSC = SYSTEM_TEMP_e.define('SYSTEM_TEMP_OAM_2_3_HSC', 8)
SYSTEM_TEMP_OAM_4_5_HSC = SYSTEM_TEMP_e.define('SYSTEM_TEMP_OAM_4_5_HSC', 9)
SYSTEM_TEMP_OAM_6_7_HSC = SYSTEM_TEMP_e.define('SYSTEM_TEMP_OAM_6_7_HSC', 10)
SYSTEM_TEMP_UBB_FPGA_0V72_VR = SYSTEM_TEMP_e.define('SYSTEM_TEMP_UBB_FPGA_0V72_VR', 11)
SYSTEM_TEMP_UBB_FPGA_3V3_VR = SYSTEM_TEMP_e.define('SYSTEM_TEMP_UBB_FPGA_3V3_VR', 12)
SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR = SYSTEM_TEMP_e.define('SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR', 13)
SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR = SYSTEM_TEMP_e.define('SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR', 14)
SYSTEM_TEMP_RETIMER_0_1_0V9_VR = SYSTEM_TEMP_e.define('SYSTEM_TEMP_RETIMER_0_1_0V9_VR', 15)
SYSTEM_TEMP_RETIMER_4_5_0V9_VR = SYSTEM_TEMP_e.define('SYSTEM_TEMP_RETIMER_4_5_0V9_VR', 16)
SYSTEM_TEMP_RETIMER_2_3_0V9_VR = SYSTEM_TEMP_e.define('SYSTEM_TEMP_RETIMER_2_3_0V9_VR', 17)
SYSTEM_TEMP_RETIMER_6_7_0V9_VR = SYSTEM_TEMP_e.define('SYSTEM_TEMP_RETIMER_6_7_0V9_VR', 18)
SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR = SYSTEM_TEMP_e.define('SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR', 19)
SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR = SYSTEM_TEMP_e.define('SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR', 20)
SYSTEM_TEMP_IBC_HSC = SYSTEM_TEMP_e.define('SYSTEM_TEMP_IBC_HSC', 21)
SYSTEM_TEMP_IBC = SYSTEM_TEMP_e.define('SYSTEM_TEMP_IBC', 22)
SYSTEM_TEMP_MAX_ENTRIES = SYSTEM_TEMP_e.define('SYSTEM_TEMP_MAX_ENTRIES', 32)
class NODE_TEMP_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
NODE_TEMP_RETIMER = NODE_TEMP_e.define('NODE_TEMP_RETIMER', 0)
NODE_TEMP_IBC_TEMP = NODE_TEMP_e.define('NODE_TEMP_IBC_TEMP', 1)
NODE_TEMP_IBC_2_TEMP = NODE_TEMP_e.define('NODE_TEMP_IBC_2_TEMP', 2)
NODE_TEMP_VDD18_VR_TEMP = NODE_TEMP_e.define('NODE_TEMP_VDD18_VR_TEMP', 3)
NODE_TEMP_04_HBM_B_VR_TEMP = NODE_TEMP_e.define('NODE_TEMP_04_HBM_B_VR_TEMP', 4)
NODE_TEMP_04_HBM_D_VR_TEMP = NODE_TEMP_e.define('NODE_TEMP_04_HBM_D_VR_TEMP', 5)
NODE_TEMP_MAX_TEMP_ENTRIES = NODE_TEMP_e.define('NODE_TEMP_MAX_TEMP_ENTRIES', 12)
class SVI_TEMP_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
SVI_VDDCR_VDD0_TEMP = SVI_TEMP_e.define('SVI_VDDCR_VDD0_TEMP', 0)
SVI_VDDCR_VDD1_TEMP = SVI_TEMP_e.define('SVI_VDDCR_VDD1_TEMP', 1)
SVI_VDDCR_VDD2_TEMP = SVI_TEMP_e.define('SVI_VDDCR_VDD2_TEMP', 2)
SVI_VDDCR_VDD3_TEMP = SVI_TEMP_e.define('SVI_VDDCR_VDD3_TEMP', 3)
SVI_VDDCR_SOC_A_TEMP = SVI_TEMP_e.define('SVI_VDDCR_SOC_A_TEMP', 4)
SVI_VDDCR_SOC_C_TEMP = SVI_TEMP_e.define('SVI_VDDCR_SOC_C_TEMP', 5)
SVI_VDDCR_SOCIO_A_TEMP = SVI_TEMP_e.define('SVI_VDDCR_SOCIO_A_TEMP', 6)
SVI_VDDCR_SOCIO_C_TEMP = SVI_TEMP_e.define('SVI_VDDCR_SOCIO_C_TEMP', 7)
SVI_VDD_085_HBM_TEMP = SVI_TEMP_e.define('SVI_VDD_085_HBM_TEMP', 8)
SVI_VDDCR_11_HBM_B_TEMP = SVI_TEMP_e.define('SVI_VDDCR_11_HBM_B_TEMP', 9)
SVI_VDDCR_11_HBM_D_TEMP = SVI_TEMP_e.define('SVI_VDDCR_11_HBM_D_TEMP', 10)
SVI_VDD_USR_TEMP = SVI_TEMP_e.define('SVI_VDD_USR_TEMP', 11)
SVI_VDDIO_11_E32_TEMP = SVI_TEMP_e.define('SVI_VDDIO_11_E32_TEMP', 12)
SVI_MAX_TEMP_ENTRIES = SVI_TEMP_e.define('SVI_MAX_TEMP_ENTRIES', 13)
@c.record
class MetricsTable_t(c.Struct):
SIZE = 1284
AccumulationCounter: Annotated[uint64_t, 0]
MaxSocketTemperature: Annotated[uint32_t, 8]
MaxVrTemperature: Annotated[uint32_t, 12]
MaxHbmTemperature: Annotated[uint32_t, 16]
MaxSocketTemperatureAcc: Annotated[uint64_t, 20]
MaxVrTemperatureAcc: Annotated[uint64_t, 28]
MaxHbmTemperatureAcc: Annotated[uint64_t, 36]
SocketPowerLimit: Annotated[uint32_t, 44]
SocketPower: Annotated[uint32_t, 48]
Timestamp: Annotated[uint64_t, 52]
SocketEnergyAcc: Annotated[uint64_t, 60]
XcdEnergyAcc: Annotated[uint64_t, 68]
AidEnergyAcc: Annotated[uint64_t, 76]
HbmEnergyAcc: Annotated[uint64_t, 84]
GfxclkFrequencyLimit: Annotated[uint32_t, 92]
FclkFrequency: Annotated[uint32_t, 96]
UclkFrequency: Annotated[uint32_t, 100]
SocclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 104]
VclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 120]
DclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 136]
LclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 152]
GfxclkFrequencyAcc: Annotated[c.Array[uint64_t, Literal[8]], 168]
MaxLclkDpmRange: Annotated[uint32_t, 232]
MinLclkDpmRange: Annotated[uint32_t, 236]
XgmiWidth: Annotated[uint32_t, 240]
XgmiBitrate: Annotated[uint32_t, 244]
XgmiReadBandwidthAcc: Annotated[c.Array[uint64_t, Literal[8]], 248]
XgmiWriteBandwidthAcc: Annotated[c.Array[uint64_t, Literal[8]], 312]
SocketGfxBusy: Annotated[uint32_t, 376]
DramBandwidthUtilization: Annotated[uint32_t, 380]
SocketGfxBusyAcc: Annotated[uint64_t, 384]
DramBandwidthAcc: Annotated[uint64_t, 392]
MaxDramBandwidth: Annotated[uint32_t, 400]
DramBandwidthUtilizationAcc: Annotated[uint64_t, 404]
PcieBandwidthAcc: Annotated[c.Array[uint64_t, Literal[4]], 412]
ProchotResidencyAcc: Annotated[uint32_t, 444]
PptResidencyAcc: Annotated[uint32_t, 448]
SocketThmResidencyAcc: Annotated[uint32_t, 452]
VrThmResidencyAcc: Annotated[uint32_t, 456]
HbmThmResidencyAcc: Annotated[uint32_t, 460]
GfxLockXCDMak: Annotated[uint32_t, 464]
GfxclkFrequency: Annotated[c.Array[uint32_t, Literal[8]], 468]
XgmiReadDataSizeAcc: Annotated[c.Array[uint64_t, Literal[8]], 500]
XgmiWriteDataSizeAcc: Annotated[c.Array[uint64_t, Literal[8]], 564]
PcieBandwidth: Annotated[c.Array[uint32_t, Literal[4]], 628]
PCIeL0ToRecoveryCountAcc: Annotated[uint32_t, 644]
PCIenReplayAAcc: Annotated[uint32_t, 648]
PCIenReplayARolloverCountAcc: Annotated[uint32_t, 652]
PCIeNAKSentCountAcc: Annotated[uint32_t, 656]
PCIeNAKReceivedCountAcc: Annotated[uint32_t, 660]
VcnBusy: Annotated[c.Array[uint32_t, Literal[4]], 664]
JpegBusy: Annotated[c.Array[uint32_t, Literal[40]], 680]
PCIeLinkSpeed: Annotated[uint32_t, 840]
PCIeLinkWidth: Annotated[uint32_t, 844]
GfxBusy: Annotated[c.Array[uint32_t, Literal[8]], 848]
GfxBusyAcc: Annotated[c.Array[uint64_t, Literal[8]], 880]
PCIeOtherEndRecoveryAcc: Annotated[uint32_t, 944]
GfxclkBelowHostLimitPptAcc: Annotated[c.Array[uint64_t, Literal[8]], 948]
GfxclkBelowHostLimitThmAcc: Annotated[c.Array[uint64_t, Literal[8]], 1012]
GfxclkBelowHostLimitTotalAcc: Annotated[c.Array[uint64_t, Literal[8]], 1076]
GfxclkLowUtilizationAcc: Annotated[c.Array[uint64_t, Literal[8]], 1140]
AidTemperature: Annotated[c.Array[uint32_t, Literal[4]], 1204]
XcdTemperature: Annotated[c.Array[uint32_t, Literal[8]], 1220]
HbmTemperature: Annotated[c.Array[uint32_t, Literal[8]], 1252]
uint64_t: TypeAlias = Annotated[int, ctypes.c_uint64]
uint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@c.record
class SystemMetricsTable_t(c.Struct):
SIZE = 152
AccumulationCounter: Annotated[uint64_t, 0]
LabelVersion: Annotated[uint16_t, 8]
NodeIdentifier: Annotated[uint16_t, 10]
SystemTemperatures: Annotated[c.Array[int16_t, Literal[32]], 12]
NodeTemperatures: Annotated[c.Array[int16_t, Literal[12]], 76]
VrTemperatures: Annotated[c.Array[int16_t, Literal[13]], 100]
spare: Annotated[c.Array[int16_t, Literal[7]], 126]
NodePowerLimit: Annotated[uint32_t, 140]
NodePower: Annotated[uint32_t, 144]
GlobalPPTResidencyAcc: Annotated[uint32_t, 148]
uint16_t: TypeAlias = Annotated[int, ctypes.c_uint16]
int16_t: TypeAlias = Annotated[int, ctypes.c_int16]
@c.record
class VfMetricsTable_t(c.Struct):
SIZE = 56
AccumulationCounter: Annotated[uint32_t, 0]
InstGfxclk_TargFreq: Annotated[uint32_t, 4]
AccGfxclk_TargFreq: Annotated[uint64_t, 8]
AccGfxRsmuDpm_Busy: Annotated[uint64_t, 16]
AccGfxclkBelowHostLimitPpt: Annotated[uint64_t, 24]
AccGfxclkBelowHostLimitThm: Annotated[uint64_t, 32]
AccGfxclkBelowHostLimitTotal: Annotated[uint64_t, 40]
AccGfxclkLowUtilization: Annotated[uint64_t, 48]
@c.record
class FRUProductInfo_t(c.Struct):
SIZE = 168
ModelNumber: Annotated[c.Array[uint8_t, Literal[20]], 0]
Name: Annotated[c.Array[uint8_t, Literal[64]], 20]
Serial: Annotated[c.Array[uint8_t, Literal[20]], 84]
ManufacturerName: Annotated[c.Array[uint8_t, Literal[32]], 104]
FruId: Annotated[c.Array[uint8_t, Literal[32]], 136]
uint8_t: TypeAlias = Annotated[int, ctypes.c_ubyte]
@c.record
class StaticMetricsTable_t(c.Struct):
SIZE = 408
ProductInfo: Annotated[FRUProductInfo_t, 0]
MaxSocketPowerLimit: Annotated[uint32_t, 168]
MaxGfxclkFrequency: Annotated[uint32_t, 172]
MinGfxclkFrequency: Annotated[uint32_t, 176]
FclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 180]
UclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 196]
SocclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 212]
VclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 228]
DclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 244]
LclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 260]
PublicSerialNumber_AID: Annotated[c.Array[uint64_t, Literal[4]], 276]
PublicSerialNumber_XCD: Annotated[c.Array[uint64_t, Literal[8]], 308]
MaxXgmiWidth: Annotated[uint32_t, 372]
MaxXgmiBitrate: Annotated[uint32_t, 376]
InputTelemetryVoltageInmV: Annotated[uint32_t, 380]
pldmVersion: Annotated[c.Array[uint32_t, Literal[2]], 384]
MaxNodePowerLimit: Annotated[uint32_t, 392]
PPT1Max: Annotated[uint32_t, 396]
PPT1Min: Annotated[uint32_t, 400]
PPT1Default: Annotated[uint32_t, 404]
class I2cControllerPort_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
I2C_CONTROLLER_PORT_0 = I2cControllerPort_e.define('I2C_CONTROLLER_PORT_0', 0)
I2C_CONTROLLER_PORT_1 = I2cControllerPort_e.define('I2C_CONTROLLER_PORT_1', 1)
I2C_CONTROLLER_PORT_COUNT = I2cControllerPort_e.define('I2C_CONTROLLER_PORT_COUNT', 2)
class I2cSpeed_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
UNSUPPORTED_1 = I2cSpeed_e.define('UNSUPPORTED_1', 0)
I2C_SPEED_STANDARD_100K = I2cSpeed_e.define('I2C_SPEED_STANDARD_100K', 1)
I2C_SPEED_FAST_400K = I2cSpeed_e.define('I2C_SPEED_FAST_400K', 2)
I2C_SPEED_FAST_PLUS_1M = I2cSpeed_e.define('I2C_SPEED_FAST_PLUS_1M', 3)
UNSUPPORTED_2 = I2cSpeed_e.define('UNSUPPORTED_2', 4)
UNSUPPORTED_3 = I2cSpeed_e.define('UNSUPPORTED_3', 5)
I2C_SPEED_COUNT = I2cSpeed_e.define('I2C_SPEED_COUNT', 6)
class I2cCmdType_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
I2C_CMD_READ = I2cCmdType_e.define('I2C_CMD_READ', 0)
I2C_CMD_WRITE = I2cCmdType_e.define('I2C_CMD_WRITE', 1)
I2C_CMD_COUNT = I2cCmdType_e.define('I2C_CMD_COUNT', 2)
class ERR_CODE_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
CODE_DAGB0 = ERR_CODE_e.define('CODE_DAGB0', 0)
CODE_EA0 = ERR_CODE_e.define('CODE_EA0', 5)
CODE_UTCL2_ROUTER = ERR_CODE_e.define('CODE_UTCL2_ROUTER', 10)
CODE_VML2 = ERR_CODE_e.define('CODE_VML2', 11)
CODE_VML2_WALKER = ERR_CODE_e.define('CODE_VML2_WALKER', 12)
CODE_MMCANE = ERR_CODE_e.define('CODE_MMCANE', 13)
CODE_VIDD = ERR_CODE_e.define('CODE_VIDD', 14)
CODE_VIDV = ERR_CODE_e.define('CODE_VIDV', 15)
CODE_JPEG0S = ERR_CODE_e.define('CODE_JPEG0S', 16)
CODE_JPEG0D = ERR_CODE_e.define('CODE_JPEG0D', 17)
CODE_JPEG1S = ERR_CODE_e.define('CODE_JPEG1S', 18)
CODE_JPEG1D = ERR_CODE_e.define('CODE_JPEG1D', 19)
CODE_JPEG2S = ERR_CODE_e.define('CODE_JPEG2S', 20)
CODE_JPEG2D = ERR_CODE_e.define('CODE_JPEG2D', 21)
CODE_JPEG3S = ERR_CODE_e.define('CODE_JPEG3S', 22)
CODE_JPEG3D = ERR_CODE_e.define('CODE_JPEG3D', 23)
CODE_JPEG4S = ERR_CODE_e.define('CODE_JPEG4S', 24)
CODE_JPEG4D = ERR_CODE_e.define('CODE_JPEG4D', 25)
CODE_JPEG5S = ERR_CODE_e.define('CODE_JPEG5S', 26)
CODE_JPEG5D = ERR_CODE_e.define('CODE_JPEG5D', 27)
CODE_JPEG6S = ERR_CODE_e.define('CODE_JPEG6S', 28)
CODE_JPEG6D = ERR_CODE_e.define('CODE_JPEG6D', 29)
CODE_JPEG7S = ERR_CODE_e.define('CODE_JPEG7S', 30)
CODE_JPEG7D = ERR_CODE_e.define('CODE_JPEG7D', 31)
CODE_MMSCHD = ERR_CODE_e.define('CODE_MMSCHD', 32)
CODE_SDMA0 = ERR_CODE_e.define('CODE_SDMA0', 33)
CODE_SDMA1 = ERR_CODE_e.define('CODE_SDMA1', 34)
CODE_SDMA2 = ERR_CODE_e.define('CODE_SDMA2', 35)
CODE_SDMA3 = ERR_CODE_e.define('CODE_SDMA3', 36)
CODE_HDP = ERR_CODE_e.define('CODE_HDP', 37)
CODE_ATHUB = ERR_CODE_e.define('CODE_ATHUB', 38)
CODE_IH = ERR_CODE_e.define('CODE_IH', 39)
CODE_XHUB_POISON = ERR_CODE_e.define('CODE_XHUB_POISON', 40)
CODE_SMN_SLVERR = ERR_CODE_e.define('CODE_SMN_SLVERR', 40)
CODE_WDT = ERR_CODE_e.define('CODE_WDT', 41)
CODE_UNKNOWN = ERR_CODE_e.define('CODE_UNKNOWN', 42)
CODE_COUNT = ERR_CODE_e.define('CODE_COUNT', 43)
class GC_ERROR_CODE_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
SH_FED_CODE = GC_ERROR_CODE_e.define('SH_FED_CODE', 0)
GCEA_CODE = GC_ERROR_CODE_e.define('GCEA_CODE', 1)
SQ_CODE = GC_ERROR_CODE_e.define('SQ_CODE', 2)
LDS_CODE = GC_ERROR_CODE_e.define('LDS_CODE', 3)
GDS_CODE = GC_ERROR_CODE_e.define('GDS_CODE', 4)
SP0_CODE = GC_ERROR_CODE_e.define('SP0_CODE', 5)
SP1_CODE = GC_ERROR_CODE_e.define('SP1_CODE', 6)
TCC_CODE = GC_ERROR_CODE_e.define('TCC_CODE', 7)
TCA_CODE = GC_ERROR_CODE_e.define('TCA_CODE', 8)
TCX_CODE = GC_ERROR_CODE_e.define('TCX_CODE', 9)
CPC_CODE = GC_ERROR_CODE_e.define('CPC_CODE', 10)
CPF_CODE = GC_ERROR_CODE_e.define('CPF_CODE', 11)
CPG_CODE = GC_ERROR_CODE_e.define('CPG_CODE', 12)
SPI_CODE = GC_ERROR_CODE_e.define('SPI_CODE', 13)
RLC_CODE = GC_ERROR_CODE_e.define('RLC_CODE', 14)
SQC_CODE = GC_ERROR_CODE_e.define('SQC_CODE', 15)
TA_CODE = GC_ERROR_CODE_e.define('TA_CODE', 16)
TD_CODE = GC_ERROR_CODE_e.define('TD_CODE', 17)
TCP_CODE = GC_ERROR_CODE_e.define('TCP_CODE', 18)
TCI_CODE = GC_ERROR_CODE_e.define('TCI_CODE', 19)
GC_ROUTER_CODE = GC_ERROR_CODE_e.define('GC_ROUTER_CODE', 20)
VML2_CODE = GC_ERROR_CODE_e.define('VML2_CODE', 21)
VML2_WALKER_CODE = GC_ERROR_CODE_e.define('VML2_WALKER_CODE', 22)
ATCL2_CODE = GC_ERROR_CODE_e.define('ATCL2_CODE', 23)
GC_CANE_CODE = GC_ERROR_CODE_e.define('GC_CANE_CODE', 24)
MP5_CODE_SMN_SLVERR = GC_ERROR_CODE_e.define('MP5_CODE_SMN_SLVERR', 40)
MP5_CODE_UNKNOWN = GC_ERROR_CODE_e.define('MP5_CODE_UNKNOWN', 42)
@c.record
class SwI2cCmd_t(c.Struct):
SIZE = 2
ReadWriteData: Annotated[uint8_t, 0]
CmdConfig: Annotated[uint8_t, 1]
@c.record
class SwI2cRequest_t(c.Struct):
SIZE = 52
I2CcontrollerPort: Annotated[uint8_t, 0]
I2CSpeed: Annotated[uint8_t, 1]
SlaveAddress: Annotated[uint8_t, 2]
NumCmds: Annotated[uint8_t, 3]
SwI2cCmds: Annotated[c.Array[SwI2cCmd_t, Literal[24]], 4]
@c.record
class SwI2cRequestExternal_t(c.Struct):
SIZE = 116
SwI2cRequest: Annotated[SwI2cRequest_t, 0]
Spare: Annotated[c.Array[uint32_t, Literal[8]], 52]
MmHubPadding: Annotated[c.Array[uint32_t, Literal[8]], 84]
class PPCLK_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
PPCLK_VCLK = PPCLK_e.define('PPCLK_VCLK', 0)
PPCLK_DCLK = PPCLK_e.define('PPCLK_DCLK', 1)
PPCLK_SOCCLK = PPCLK_e.define('PPCLK_SOCCLK', 2)
PPCLK_UCLK = PPCLK_e.define('PPCLK_UCLK', 3)
PPCLK_FCLK = PPCLK_e.define('PPCLK_FCLK', 4)
PPCLK_LCLK = PPCLK_e.define('PPCLK_LCLK', 5)
PPCLK_COUNT = PPCLK_e.define('PPCLK_COUNT', 6)
class GpioIntPolarity_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
GPIO_INT_POLARITY_ACTIVE_LOW = GpioIntPolarity_e.define('GPIO_INT_POLARITY_ACTIVE_LOW', 0)
GPIO_INT_POLARITY_ACTIVE_HIGH = GpioIntPolarity_e.define('GPIO_INT_POLARITY_ACTIVE_HIGH', 1)
class UCLK_DPM_MODE_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
UCLK_DPM_MODE_BANDWIDTH = UCLK_DPM_MODE_e.define('UCLK_DPM_MODE_BANDWIDTH', 0)
UCLK_DPM_MODE_LATENCY = UCLK_DPM_MODE_e.define('UCLK_DPM_MODE_LATENCY', 1)
@c.record
class AvfsDebugTableAid_t(c.Struct):
SIZE = 360
avgPsmCount: Annotated[c.Array[uint16_t, Literal[30]], 0]
minPsmCount: Annotated[c.Array[uint16_t, Literal[30]], 60]
avgPsmVoltage: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[30]], 120]
minPsmVoltage: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[30]], 240]
@c.record
class AvfsDebugTableXcd_t(c.Struct):
SIZE = 360
avgPsmCount: Annotated[c.Array[uint16_t, Literal[30]], 0]
minPsmCount: Annotated[c.Array[uint16_t, Literal[30]], 60]
avgPsmVoltage: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[30]], 120]
minPsmVoltage: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[30]], 240]
@c.record
class struct_smu_hw_power_state(c.Struct):
SIZE = 4
magic: Annotated[Annotated[int, ctypes.c_uint32], 0]
class struct_smu_power_state(ctypes.Structure): pass
class enum_smu_state_ui_label(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_STATE_UI_LABEL_NONE = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_NONE', 0)
SMU_STATE_UI_LABEL_BATTERY = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_BATTERY', 1)
SMU_STATE_UI_TABEL_MIDDLE_LOW = enum_smu_state_ui_label.define('SMU_STATE_UI_TABEL_MIDDLE_LOW', 2)
SMU_STATE_UI_LABEL_BALLANCED = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_BALLANCED', 3)
SMU_STATE_UI_LABEL_MIDDLE_HIGHT = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_MIDDLE_HIGHT', 4)
SMU_STATE_UI_LABEL_PERFORMANCE = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_PERFORMANCE', 5)
SMU_STATE_UI_LABEL_BACO = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_BACO', 6)
class enum_smu_state_classification_flag(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_STATE_CLASSIFICATION_FLAG_BOOT = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_BOOT', 1)
SMU_STATE_CLASSIFICATION_FLAG_THERMAL = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_THERMAL', 2)
SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE', 4)
SMU_STATE_CLASSIFICATION_FLAG_RESET = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_RESET', 8)
SMU_STATE_CLASSIFICATION_FLAG_FORCED = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_FORCED', 16)
SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE', 32)
SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE', 64)
SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE', 128)
SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE', 256)
SMU_STATE_CLASSIFICATION_FLAG_UVD = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_UVD', 512)
SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW', 1024)
SMU_STATE_CLASSIFICATION_FLAG_ACPI = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_ACPI', 2048)
SMU_STATE_CLASSIFICATION_FLAG_HD2 = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_HD2', 4096)
SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_UVD_HD', 8192)
SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_UVD_SD', 16384)
SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE', 32768)
SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE', 65536)
SMU_STATE_CLASSIFICATION_FLAG_BACO = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_BACO', 131072)
SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2', 262144)
SMU_STATE_CLASSIFICATION_FLAG_ULV = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_ULV', 524288)
SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC', 1048576)
@c.record
class struct_smu_state_classification_block(c.Struct):
SIZE = 16
ui_label: Annotated[enum_smu_state_ui_label, 0]
flags: Annotated[enum_smu_state_classification_flag, 4]
bios_index: Annotated[Annotated[int, ctypes.c_int32], 8]
temporary_state: Annotated[Annotated[bool, ctypes.c_bool], 12]
to_be_deleted: Annotated[Annotated[bool, ctypes.c_bool], 13]
@c.record
class struct_smu_state_pcie_block(c.Struct):
SIZE = 4
lanes: Annotated[Annotated[int, ctypes.c_uint32], 0]
class enum_smu_refreshrate_source(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_REFRESHRATE_SOURCE_EDID = enum_smu_refreshrate_source.define('SMU_REFRESHRATE_SOURCE_EDID', 0)
SMU_REFRESHRATE_SOURCE_EXPLICIT = enum_smu_refreshrate_source.define('SMU_REFRESHRATE_SOURCE_EXPLICIT', 1)
@c.record
class struct_smu_state_display_block(c.Struct):
SIZE = 20
disable_frame_modulation: Annotated[Annotated[bool, ctypes.c_bool], 0]
limit_refreshrate: Annotated[Annotated[bool, ctypes.c_bool], 1]
refreshrate_source: Annotated[enum_smu_refreshrate_source, 4]
explicit_refreshrate: Annotated[Annotated[int, ctypes.c_int32], 8]
edid_refreshrate_index: Annotated[Annotated[int, ctypes.c_int32], 12]
enable_vari_bright: Annotated[Annotated[bool, ctypes.c_bool], 16]
@c.record
class struct_smu_state_memory_block(c.Struct):
SIZE = 5
dll_off: Annotated[Annotated[bool, ctypes.c_bool], 0]
m3arb: Annotated[Annotated[int, ctypes.c_ubyte], 1]
unused: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[3]], 2]
@c.record
class struct_smu_state_software_algorithm_block(c.Struct):
SIZE = 2
disable_load_balancing: Annotated[Annotated[bool, ctypes.c_bool], 0]
enable_sleep_for_timestamps: Annotated[Annotated[bool, ctypes.c_bool], 1]
@c.record
class struct_smu_temperature_range(c.Struct):
SIZE = 44
min: Annotated[Annotated[int, ctypes.c_int32], 0]
max: Annotated[Annotated[int, ctypes.c_int32], 4]
edge_emergency_max: Annotated[Annotated[int, ctypes.c_int32], 8]
hotspot_min: Annotated[Annotated[int, ctypes.c_int32], 12]
hotspot_crit_max: Annotated[Annotated[int, ctypes.c_int32], 16]
hotspot_emergency_max: Annotated[Annotated[int, ctypes.c_int32], 20]
mem_min: Annotated[Annotated[int, ctypes.c_int32], 24]
mem_crit_max: Annotated[Annotated[int, ctypes.c_int32], 28]
mem_emergency_max: Annotated[Annotated[int, ctypes.c_int32], 32]
software_shutdown_temp: Annotated[Annotated[int, ctypes.c_int32], 36]
software_shutdown_temp_offset: Annotated[Annotated[int, ctypes.c_int32], 40]
@c.record
class struct_smu_state_validation_block(c.Struct):
SIZE = 3
single_display_only: Annotated[Annotated[bool, ctypes.c_bool], 0]
disallow_on_dc: Annotated[Annotated[bool, ctypes.c_bool], 1]
supported_power_levels: Annotated[Annotated[int, ctypes.c_ubyte], 2]
@c.record
class struct_smu_uvd_clocks(c.Struct):
SIZE = 8
vclk: Annotated[Annotated[int, ctypes.c_uint32], 0]
dclk: Annotated[Annotated[int, ctypes.c_uint32], 4]
class enum_smu_power_src_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_POWER_SOURCE_AC = enum_smu_power_src_type.define('SMU_POWER_SOURCE_AC', 0)
SMU_POWER_SOURCE_DC = enum_smu_power_src_type.define('SMU_POWER_SOURCE_DC', 1)
SMU_POWER_SOURCE_COUNT = enum_smu_power_src_type.define('SMU_POWER_SOURCE_COUNT', 2)
class enum_smu_ppt_limit_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_DEFAULT_PPT_LIMIT = enum_smu_ppt_limit_type.define('SMU_DEFAULT_PPT_LIMIT', 0)
SMU_FAST_PPT_LIMIT = enum_smu_ppt_limit_type.define('SMU_FAST_PPT_LIMIT', 1)
class enum_smu_ppt_limit_level(Annotated[int, ctypes.c_int32], c.Enum): pass
SMU_PPT_LIMIT_MIN = enum_smu_ppt_limit_level.define('SMU_PPT_LIMIT_MIN', -1)
SMU_PPT_LIMIT_CURRENT = enum_smu_ppt_limit_level.define('SMU_PPT_LIMIT_CURRENT', 0)
SMU_PPT_LIMIT_DEFAULT = enum_smu_ppt_limit_level.define('SMU_PPT_LIMIT_DEFAULT', 1)
SMU_PPT_LIMIT_MAX = enum_smu_ppt_limit_level.define('SMU_PPT_LIMIT_MAX', 2)
class enum_smu_memory_pool_size(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_MEMORY_POOL_SIZE_ZERO = enum_smu_memory_pool_size.define('SMU_MEMORY_POOL_SIZE_ZERO', 0)
SMU_MEMORY_POOL_SIZE_256_MB = enum_smu_memory_pool_size.define('SMU_MEMORY_POOL_SIZE_256_MB', 268435456)
SMU_MEMORY_POOL_SIZE_512_MB = enum_smu_memory_pool_size.define('SMU_MEMORY_POOL_SIZE_512_MB', 536870912)
SMU_MEMORY_POOL_SIZE_1_GB = enum_smu_memory_pool_size.define('SMU_MEMORY_POOL_SIZE_1_GB', 1073741824)
SMU_MEMORY_POOL_SIZE_2_GB = enum_smu_memory_pool_size.define('SMU_MEMORY_POOL_SIZE_2_GB', 2147483648)
class enum_smu_clk_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_GFXCLK = enum_smu_clk_type.define('SMU_GFXCLK', 0)
SMU_VCLK = enum_smu_clk_type.define('SMU_VCLK', 1)
SMU_DCLK = enum_smu_clk_type.define('SMU_DCLK', 2)
SMU_VCLK1 = enum_smu_clk_type.define('SMU_VCLK1', 3)
SMU_DCLK1 = enum_smu_clk_type.define('SMU_DCLK1', 4)
SMU_ECLK = enum_smu_clk_type.define('SMU_ECLK', 5)
SMU_SOCCLK = enum_smu_clk_type.define('SMU_SOCCLK', 6)
SMU_UCLK = enum_smu_clk_type.define('SMU_UCLK', 7)
SMU_DCEFCLK = enum_smu_clk_type.define('SMU_DCEFCLK', 8)
SMU_DISPCLK = enum_smu_clk_type.define('SMU_DISPCLK', 9)
SMU_PIXCLK = enum_smu_clk_type.define('SMU_PIXCLK', 10)
SMU_PHYCLK = enum_smu_clk_type.define('SMU_PHYCLK', 11)
SMU_FCLK = enum_smu_clk_type.define('SMU_FCLK', 12)
SMU_SCLK = enum_smu_clk_type.define('SMU_SCLK', 13)
SMU_MCLK = enum_smu_clk_type.define('SMU_MCLK', 14)
SMU_PCIE = enum_smu_clk_type.define('SMU_PCIE', 15)
SMU_LCLK = enum_smu_clk_type.define('SMU_LCLK', 16)
SMU_OD_CCLK = enum_smu_clk_type.define('SMU_OD_CCLK', 17)
SMU_OD_SCLK = enum_smu_clk_type.define('SMU_OD_SCLK', 18)
SMU_OD_MCLK = enum_smu_clk_type.define('SMU_OD_MCLK', 19)
SMU_OD_VDDC_CURVE = enum_smu_clk_type.define('SMU_OD_VDDC_CURVE', 20)
SMU_OD_RANGE = enum_smu_clk_type.define('SMU_OD_RANGE', 21)
SMU_OD_VDDGFX_OFFSET = enum_smu_clk_type.define('SMU_OD_VDDGFX_OFFSET', 22)
SMU_OD_FAN_CURVE = enum_smu_clk_type.define('SMU_OD_FAN_CURVE', 23)
SMU_OD_ACOUSTIC_LIMIT = enum_smu_clk_type.define('SMU_OD_ACOUSTIC_LIMIT', 24)
SMU_OD_ACOUSTIC_TARGET = enum_smu_clk_type.define('SMU_OD_ACOUSTIC_TARGET', 25)
SMU_OD_FAN_TARGET_TEMPERATURE = enum_smu_clk_type.define('SMU_OD_FAN_TARGET_TEMPERATURE', 26)
SMU_OD_FAN_MINIMUM_PWM = enum_smu_clk_type.define('SMU_OD_FAN_MINIMUM_PWM', 27)
SMU_CLK_COUNT = enum_smu_clk_type.define('SMU_CLK_COUNT', 28)
@c.record
class struct_smu_user_dpm_profile(c.Struct):
SIZE = 140
fan_mode: Annotated[Annotated[int, ctypes.c_uint32], 0]
power_limit: Annotated[Annotated[int, ctypes.c_uint32], 4]
fan_speed_pwm: Annotated[Annotated[int, ctypes.c_uint32], 8]
fan_speed_rpm: Annotated[Annotated[int, ctypes.c_uint32], 12]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
user_od: Annotated[Annotated[int, ctypes.c_uint32], 20]
clk_mask: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[28]], 24]
clk_dependency: Annotated[Annotated[int, ctypes.c_uint32], 136]
@c.record
class struct_smu_table(c.Struct):
SIZE = 48
size: Annotated[Annotated[int, ctypes.c_uint64], 0]
align: Annotated[Annotated[int, ctypes.c_uint32], 8]
domain: Annotated[Annotated[int, ctypes.c_ubyte], 12]
mc_address: Annotated[Annotated[int, ctypes.c_uint64], 16]
cpu_addr: Annotated[ctypes.c_void_p, 24]
bo: Annotated[c.POINTER[struct_amdgpu_bo], 32]
version: Annotated[Annotated[int, ctypes.c_uint32], 40]
class struct_amdgpu_bo(ctypes.Structure): pass
class enum_smu_perf_level_designation(Annotated[int, ctypes.c_uint32], c.Enum): pass
PERF_LEVEL_ACTIVITY = enum_smu_perf_level_designation.define('PERF_LEVEL_ACTIVITY', 0)
PERF_LEVEL_POWER_CONTAINMENT = enum_smu_perf_level_designation.define('PERF_LEVEL_POWER_CONTAINMENT', 1)
@c.record
class struct_smu_performance_level(c.Struct):
SIZE = 24
core_clock: Annotated[Annotated[int, ctypes.c_uint32], 0]
memory_clock: Annotated[Annotated[int, ctypes.c_uint32], 4]
vddc: Annotated[Annotated[int, ctypes.c_uint32], 8]
vddci: Annotated[Annotated[int, ctypes.c_uint32], 12]
non_local_mem_freq: Annotated[Annotated[int, ctypes.c_uint32], 16]
non_local_mem_width: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_smu_clock_info(c.Struct):
SIZE = 24
min_mem_clk: Annotated[Annotated[int, ctypes.c_uint32], 0]
max_mem_clk: Annotated[Annotated[int, ctypes.c_uint32], 4]
min_eng_clk: Annotated[Annotated[int, ctypes.c_uint32], 8]
max_eng_clk: Annotated[Annotated[int, ctypes.c_uint32], 12]
min_bus_bandwidth: Annotated[Annotated[int, ctypes.c_uint32], 16]
max_bus_bandwidth: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_smu_bios_boot_up_values(c.Struct):
SIZE = 68
revision: Annotated[Annotated[int, ctypes.c_uint32], 0]
gfxclk: Annotated[Annotated[int, ctypes.c_uint32], 4]
uclk: Annotated[Annotated[int, ctypes.c_uint32], 8]
socclk: Annotated[Annotated[int, ctypes.c_uint32], 12]
dcefclk: Annotated[Annotated[int, ctypes.c_uint32], 16]
eclk: Annotated[Annotated[int, ctypes.c_uint32], 20]
vclk: Annotated[Annotated[int, ctypes.c_uint32], 24]
dclk: Annotated[Annotated[int, ctypes.c_uint32], 28]
vddc: Annotated[Annotated[int, ctypes.c_uint16], 32]
vddci: Annotated[Annotated[int, ctypes.c_uint16], 34]
mvddc: Annotated[Annotated[int, ctypes.c_uint16], 36]
vdd_gfx: Annotated[Annotated[int, ctypes.c_uint16], 38]
cooling_id: Annotated[Annotated[int, ctypes.c_ubyte], 40]
pp_table_id: Annotated[Annotated[int, ctypes.c_uint32], 44]
format_revision: Annotated[Annotated[int, ctypes.c_uint32], 48]
content_revision: Annotated[Annotated[int, ctypes.c_uint32], 52]
fclk: Annotated[Annotated[int, ctypes.c_uint32], 56]
lclk: Annotated[Annotated[int, ctypes.c_uint32], 60]
firmware_caps: Annotated[Annotated[int, ctypes.c_uint32], 64]
class enum_smu_table_id(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_TABLE_PPTABLE = enum_smu_table_id.define('SMU_TABLE_PPTABLE', 0)
SMU_TABLE_WATERMARKS = enum_smu_table_id.define('SMU_TABLE_WATERMARKS', 1)
SMU_TABLE_CUSTOM_DPM = enum_smu_table_id.define('SMU_TABLE_CUSTOM_DPM', 2)
SMU_TABLE_DPMCLOCKS = enum_smu_table_id.define('SMU_TABLE_DPMCLOCKS', 3)
SMU_TABLE_AVFS = enum_smu_table_id.define('SMU_TABLE_AVFS', 4)
SMU_TABLE_AVFS_PSM_DEBUG = enum_smu_table_id.define('SMU_TABLE_AVFS_PSM_DEBUG', 5)
SMU_TABLE_AVFS_FUSE_OVERRIDE = enum_smu_table_id.define('SMU_TABLE_AVFS_FUSE_OVERRIDE', 6)
SMU_TABLE_PMSTATUSLOG = enum_smu_table_id.define('SMU_TABLE_PMSTATUSLOG', 7)
SMU_TABLE_SMU_METRICS = enum_smu_table_id.define('SMU_TABLE_SMU_METRICS', 8)
SMU_TABLE_DRIVER_SMU_CONFIG = enum_smu_table_id.define('SMU_TABLE_DRIVER_SMU_CONFIG', 9)
SMU_TABLE_ACTIVITY_MONITOR_COEFF = enum_smu_table_id.define('SMU_TABLE_ACTIVITY_MONITOR_COEFF', 10)
SMU_TABLE_OVERDRIVE = enum_smu_table_id.define('SMU_TABLE_OVERDRIVE', 11)
SMU_TABLE_I2C_COMMANDS = enum_smu_table_id.define('SMU_TABLE_I2C_COMMANDS', 12)
SMU_TABLE_PACE = enum_smu_table_id.define('SMU_TABLE_PACE', 13)
SMU_TABLE_ECCINFO = enum_smu_table_id.define('SMU_TABLE_ECCINFO', 14)
SMU_TABLE_COMBO_PPTABLE = enum_smu_table_id.define('SMU_TABLE_COMBO_PPTABLE', 15)
SMU_TABLE_WIFIBAND = enum_smu_table_id.define('SMU_TABLE_WIFIBAND', 16)
SMU_TABLE_COUNT = enum_smu_table_id.define('SMU_TABLE_COUNT', 17)
c.init_records()
PPSMC_Result_OK = 0x1 # type: ignore
PPSMC_Result_Failed = 0xFF # type: ignore
PPSMC_Result_UnknownCmd = 0xFE # type: ignore
PPSMC_Result_CmdRejectedPrereq = 0xFD # type: ignore
PPSMC_Result_CmdRejectedBusy = 0xFC # type: ignore
PPSMC_MSG_TestMessage = 0x1 # type: ignore
PPSMC_MSG_GetSmuVersion = 0x2 # type: ignore
PPSMC_MSG_GfxDriverReset = 0x3 # type: ignore
PPSMC_MSG_GetDriverIfVersion = 0x4 # type: ignore
PPSMC_MSG_EnableAllSmuFeatures = 0x5 # type: ignore
PPSMC_MSG_DisableAllSmuFeatures = 0x6 # type: ignore
PPSMC_MSG_RequestI2cTransaction = 0x7 # type: ignore
PPSMC_MSG_GetMetricsVersion = 0x8 # type: ignore
PPSMC_MSG_GetMetricsTable = 0x9 # type: ignore
PPSMC_MSG_GetEccInfoTable = 0xA # type: ignore
PPSMC_MSG_GetEnabledSmuFeaturesLow = 0xB # type: ignore
PPSMC_MSG_GetEnabledSmuFeaturesHigh = 0xC # type: ignore
PPSMC_MSG_SetDriverDramAddrHigh = 0xD # type: ignore
PPSMC_MSG_SetDriverDramAddrLow = 0xE # type: ignore
PPSMC_MSG_SetToolsDramAddrHigh = 0xF # type: ignore
PPSMC_MSG_SetToolsDramAddrLow = 0x10 # type: ignore
PPSMC_MSG_SetSystemVirtualDramAddrHigh = 0x11 # type: ignore
PPSMC_MSG_SetSystemVirtualDramAddrLow = 0x12 # type: ignore
PPSMC_MSG_SetSoftMinByFreq = 0x13 # type: ignore
PPSMC_MSG_SetSoftMaxByFreq = 0x14 # type: ignore
PPSMC_MSG_GetMinDpmFreq = 0x15 # type: ignore
PPSMC_MSG_GetMaxDpmFreq = 0x16 # type: ignore
PPSMC_MSG_GetDpmFreqByIndex = 0x17 # type: ignore
PPSMC_MSG_SetPptLimit = 0x18 # type: ignore
PPSMC_MSG_GetPptLimit = 0x19 # type: ignore
PPSMC_MSG_DramLogSetDramAddrHigh = 0x1A # type: ignore
PPSMC_MSG_DramLogSetDramAddrLow = 0x1B # type: ignore
PPSMC_MSG_DramLogSetDramSize = 0x1C # type: ignore
PPSMC_MSG_GetDebugData = 0x1D # type: ignore
PPSMC_MSG_HeavySBR = 0x1E # type: ignore
PPSMC_MSG_SetNumBadHbmPagesRetired = 0x1F # type: ignore
PPSMC_MSG_DFCstateControl = 0x20 # type: ignore
PPSMC_MSG_GetGmiPwrDnHyst = 0x21 # type: ignore
PPSMC_MSG_SetGmiPwrDnHyst = 0x22 # type: ignore
PPSMC_MSG_GmiPwrDnControl = 0x23 # type: ignore
PPSMC_MSG_EnterGfxoff = 0x24 # type: ignore
PPSMC_MSG_ExitGfxoff = 0x25 # type: ignore
PPSMC_MSG_EnableDeterminism = 0x26 # type: ignore
PPSMC_MSG_DisableDeterminism = 0x27 # type: ignore
PPSMC_MSG_DumpSTBtoDram = 0x28 # type: ignore
PPSMC_MSG_STBtoDramLogSetDramAddrHigh = 0x29 # type: ignore
PPSMC_MSG_STBtoDramLogSetDramAddrLow = 0x2A # type: ignore
PPSMC_MSG_STBtoDramLogSetDramSize = 0x2B # type: ignore
PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh = 0x2C # type: ignore
PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow = 0x2D # type: ignore
PPSMC_MSG_GfxDriverResetRecovery = 0x2E # type: ignore
PPSMC_MSG_TriggerVFFLR = 0x2F # type: ignore
PPSMC_MSG_SetSoftMinGfxClk = 0x30 # type: ignore
PPSMC_MSG_SetSoftMaxGfxClk = 0x31 # type: ignore
PPSMC_MSG_GetMinGfxDpmFreq = 0x32 # type: ignore
PPSMC_MSG_GetMaxGfxDpmFreq = 0x33 # type: ignore
PPSMC_MSG_PrepareForDriverUnload = 0x34 # type: ignore
PPSMC_MSG_ReadThrottlerLimit = 0x35 # type: ignore
PPSMC_MSG_QueryValidMcaCount = 0x36 # type: ignore
PPSMC_MSG_McaBankDumpDW = 0x37 # type: ignore
PPSMC_MSG_GetCTFLimit = 0x38 # type: ignore
PPSMC_MSG_ClearMcaOnRead = 0x39 # type: ignore
PPSMC_MSG_QueryValidMcaCeCount = 0x3A # type: ignore
PPSMC_MSG_McaBankCeDumpDW = 0x3B # type: ignore
PPSMC_MSG_SelectPLPDMode = 0x40 # type: ignore
PPSMC_MSG_PmLogReadSample = 0x41 # type: ignore
PPSMC_MSG_PmLogGetTableVersion = 0x42 # type: ignore
PPSMC_MSG_RmaDueToBadPageThreshold = 0x43 # type: ignore
PPSMC_MSG_SetThrottlingPolicy = 0x44 # type: ignore
PPSMC_MSG_SetPhaseDetectCSBWThreshold = 0x45 # type: ignore
PPSMC_MSG_SetPhaseDetectFreqHigh = 0x46 # type: ignore
PPSMC_MSG_SetPhaseDetectFreqLow = 0x47 # type: ignore
PPSMC_MSG_SetPhaseDetectDownHysterisis = 0x48 # type: ignore
PPSMC_MSG_SetPhaseDetectAlphaX1e6 = 0x49 # type: ignore
PPSMC_MSG_SetPhaseDetectOnOff = 0x4A # type: ignore
PPSMC_MSG_GetPhaseDetectResidency = 0x4B # type: ignore
PPSMC_MSG_UpdatePccWaitDecMaxStr = 0x4C # type: ignore
PPSMC_MSG_ResetSDMA = 0x4D # type: ignore
PPSMC_MSG_GetRasTableVersion = 0x4E # type: ignore
PPSMC_MSG_GetBadPageCount = 0x50 # type: ignore
PPSMC_MSG_GetBadPageMcaAddress = 0x51 # type: ignore
PPSMC_MSG_SetTimestamp = 0x53 # type: ignore
PPSMC_MSG_SetTimestampHi = 0x54 # type: ignore
PPSMC_MSG_GetTimestamp = 0x55 # type: ignore
PPSMC_MSG_GetBadPageIpIdLoHi = 0x57 # type: ignore
PPSMC_MSG_EraseRasTable = 0x58 # type: ignore
PPSMC_MSG_GetStaticMetricsTable = 0x59 # type: ignore
PPSMC_MSG_ResetVfArbitersByIndex = 0x5A # type: ignore
PPSMC_MSG_GetSystemMetricsTable = 0x5C # type: ignore
PPSMC_MSG_GetSystemMetricsVersion = 0x5D # type: ignore
PPSMC_MSG_ResetVCN = 0x5E # type: ignore
PPSMC_MSG_SetFastPptLimit = 0x5F # type: ignore
PPSMC_MSG_GetFastPptLimit = 0x60 # type: ignore
PPSMC_Message_Count = 0x61 # type: ignore
PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET = 0x1 # type: ignore
PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET = 0x2 # type: ignore
PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET = 0x3 # type: ignore
PPSMC_THROTTLING_LIMIT_TYPE_SOCKET = 0x1 # type: ignore
PPSMC_THROTTLING_LIMIT_TYPE_HBM = 0x2 # type: ignore
PPSMC_AID_THM_TYPE = 0x1 # type: ignore
PPSMC_CCD_THM_TYPE = 0x2 # type: ignore
PPSMC_XCD_THM_TYPE = 0x3 # type: ignore
PPSMC_HBM_THM_TYPE = 0x4 # type: ignore
PPSMC_PLPD_MODE_DEFAULT = 0x1 # type: ignore
PPSMC_PLPD_MODE_OPTIMIZED = 0x2 # type: ignore
NUM_VCLK_DPM_LEVELS = 4 # type: ignore
NUM_DCLK_DPM_LEVELS = 4 # type: ignore
NUM_SOCCLK_DPM_LEVELS = 4 # type: ignore
NUM_LCLK_DPM_LEVELS = 4 # type: ignore
NUM_UCLK_DPM_LEVELS = 4 # type: ignore
NUM_FCLK_DPM_LEVELS = 4 # type: ignore
NUM_XGMI_DPM_LEVELS = 2 # type: ignore
NUM_CXL_BITRATES = 4 # type: ignore
NUM_PCIE_BITRATES = 4 # type: ignore
NUM_XGMI_BITRATES = 4 # type: ignore
NUM_XGMI_WIDTHS = 3 # type: ignore
NUM_TDP_GROUPS = 4 # type: ignore
NUM_SOC_P2S_TABLES = 6 # type: ignore
NUM_GFX_P2S_TABLES = 8 # type: ignore
NUM_PSM_DIDT_THRESHOLDS = 3 # type: ignore
NUM_XVMIN_VMIN_THRESHOLDS = 3 # type: ignore
PRODUCT_MODEL_NUMBER_LEN = 20 # type: ignore
PRODUCT_NAME_LEN = 64 # type: ignore
PRODUCT_SERIAL_LEN = 20 # type: ignore
PRODUCT_MANUFACTURER_NAME_LEN = 32 # type: ignore
PRODUCT_FRU_ID_LEN = 32 # type: ignore
SMU_METRICS_TABLE_VERSION = 0x15 # type: ignore
SMU_SYSTEM_METRICS_TABLE_VERSION = 0x1 # type: ignore
SMU_VF_METRICS_TABLE_MASK = (1 << 31) # type: ignore
SMU_VF_METRICS_TABLE_VERSION = (0x6 | SMU_VF_METRICS_TABLE_MASK) # type: ignore
SMU13_0_6_DRIVER_IF_VERSION = 0x08042024 # type: ignore
NUM_I2C_CONTROLLERS = 8 # type: ignore
I2C_CONTROLLER_ENABLED = 1 # type: ignore
I2C_CONTROLLER_DISABLED = 0 # type: ignore
MAX_SW_I2C_COMMANDS = 24 # type: ignore
CMDCONFIG_STOP_BIT = 0 # type: ignore
CMDCONFIG_RESTART_BIT = 1 # type: ignore
CMDCONFIG_READWRITE_BIT = 2 # type: ignore
CMDCONFIG_STOP_MASK = (1 << CMDCONFIG_STOP_BIT) # type: ignore
CMDCONFIG_RESTART_MASK = (1 << CMDCONFIG_RESTART_BIT) # type: ignore
CMDCONFIG_READWRITE_MASK = (1 << CMDCONFIG_READWRITE_BIT) # type: ignore
IH_INTERRUPT_ID_TO_DRIVER = 0xFE # type: ignore
IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING = 0x7 # type: ignore
THROTTLER_PROCHOT_BIT = 0 # type: ignore
THROTTLER_PPT_BIT = 1 # type: ignore
THROTTLER_THERMAL_SOCKET_BIT = 2 # type: ignore
THROTTLER_THERMAL_VR_BIT = 3 # type: ignore
THROTTLER_THERMAL_HBM_BIT = 4 # type: ignore
ClearMcaOnRead_UE_FLAG_MASK = 0x1 # type: ignore
ClearMcaOnRead_CE_POLL_MASK = 0x2 # type: ignore
int32_t = int # type: ignore
SMU_THERMAL_MINIMUM_ALERT_TEMP = 0 # type: ignore
SMU_THERMAL_MAXIMUM_ALERT_TEMP = 255 # type: ignore
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES = 1000 # type: ignore
SMU_FW_NAME_LEN = 0x24 # type: ignore
SMU_DPM_USER_PROFILE_RESTORE = (1 << 0) # type: ignore
SMU_CUSTOM_FAN_SPEED_RPM = (1 << 1) # type: ignore
SMU_CUSTOM_FAN_SPEED_PWM = (1 << 2) # type: ignore
SMU_THROTTLER_PPT0_BIT = 0 # type: ignore
SMU_THROTTLER_PPT1_BIT = 1 # type: ignore
SMU_THROTTLER_PPT2_BIT = 2 # type: ignore
SMU_THROTTLER_PPT3_BIT = 3 # type: ignore
SMU_THROTTLER_SPL_BIT = 4 # type: ignore
SMU_THROTTLER_FPPT_BIT = 5 # type: ignore
SMU_THROTTLER_SPPT_BIT = 6 # type: ignore
SMU_THROTTLER_SPPT_APU_BIT = 7 # type: ignore
SMU_THROTTLER_TDC_GFX_BIT = 16 # type: ignore
SMU_THROTTLER_TDC_SOC_BIT = 17 # type: ignore
SMU_THROTTLER_TDC_MEM_BIT = 18 # type: ignore
SMU_THROTTLER_TDC_VDD_BIT = 19 # type: ignore
SMU_THROTTLER_TDC_CVIP_BIT = 20 # type: ignore
SMU_THROTTLER_EDC_CPU_BIT = 21 # type: ignore
SMU_THROTTLER_EDC_GFX_BIT = 22 # type: ignore
SMU_THROTTLER_APCC_BIT = 23 # type: ignore
SMU_THROTTLER_TEMP_GPU_BIT = 32 # type: ignore
SMU_THROTTLER_TEMP_CORE_BIT = 33 # type: ignore
SMU_THROTTLER_TEMP_MEM_BIT = 34 # type: ignore
SMU_THROTTLER_TEMP_EDGE_BIT = 35 # type: ignore
SMU_THROTTLER_TEMP_HOTSPOT_BIT = 36 # type: ignore
SMU_THROTTLER_TEMP_SOC_BIT = 37 # type: ignore
SMU_THROTTLER_TEMP_VR_GFX_BIT = 38 # type: ignore
SMU_THROTTLER_TEMP_VR_SOC_BIT = 39 # type: ignore
SMU_THROTTLER_TEMP_VR_MEM0_BIT = 40 # type: ignore
SMU_THROTTLER_TEMP_VR_MEM1_BIT = 41 # type: ignore
SMU_THROTTLER_TEMP_LIQUID0_BIT = 42 # type: ignore
SMU_THROTTLER_TEMP_LIQUID1_BIT = 43 # type: ignore
SMU_THROTTLER_VRHOT0_BIT = 44 # type: ignore
SMU_THROTTLER_VRHOT1_BIT = 45 # type: ignore
SMU_THROTTLER_PROCHOT_CPU_BIT = 46 # type: ignore
SMU_THROTTLER_PROCHOT_GFX_BIT = 47 # type: ignore
SMU_THROTTLER_PPM_BIT = 56 # type: ignore
SMU_THROTTLER_FIT_BIT = 57 # type: ignore | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/am/smu_v13_0_12.py",
"license": "MIT License",
"lines": 811,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/amd/test_sqtt_encoder.py | #!/usr/bin/env python3
"""Tests for SQTT encoder: verifies the emulator produces correct SQTT traces for known kernels.
Run with: AMD=1 MOCKGPU=1 python -m pytest test/amd/test_sqtt_encoder.py -v
"""
import ctypes, unittest
from tinygrad.helpers import Context
from tinygrad.renderer.amd.sqtt import decode, LAYOUT_HEADER, WAVESTART, WAVEEND, INST, IMMEDIATE, VALUINST, InstOp
from tinygrad.runtime.autogen.amd.rdna3.ins import *
def _run_kernel(instructions: list, lx=1, ly=1, lz=1, gx=1, gy=1, gz=1, args_ptr=0) -> bytes:
"""Assemble instructions, run on emulator with PROFILE=1, return the SQTT blob."""
from test.mockgpu.amd.emu import run_asm, sqtt_traces
code = b''.join(inst.to_bytes() for inst in instructions)
buf = (ctypes.c_char * len(code))(*code)
lib = ctypes.addressof(buf)
sqtt_traces.clear()
with Context(PROFILE=1):
run_asm(lib, len(code), gx, gy, gz, lx, ly, lz, args_ptr)
assert len(sqtt_traces) == 1, f"expected 1 trace, got {len(sqtt_traces)}"
return sqtt_traces.pop()
class TestSQTTEncoder(unittest.TestCase):
def test_simple_salu(self):
"""A simple s_mov + s_endpgm kernel emits SALU INST packet."""
blob = _run_kernel([s_mov_b32(s[0], 42), s_endpgm()])
packets = list(decode(blob))
inst_pkts = [p for p in packets if isinstance(p, INST)]
self.assertEqual(len(inst_pkts), 1)
self.assertEqual(inst_pkts[0].op, InstOp.SALU)
def test_valu_emits_valuinst(self):
"""Regular VALU ops emit VALUINST packets."""
blob = _run_kernel([v_mov_b32_e32(v[0], 0), v_add_f32_e32(v[1], v[0], v[0]), s_endpgm()])
packets = list(decode(blob))
valu_pkts = [p for p in packets if isinstance(p, VALUINST)]
self.assertEqual(len(valu_pkts), 2)
# no INST packets for regular VALU
self.assertEqual(len([p for p in packets if isinstance(p, INST)]), 0)
def test_waitcnt_emits_immediate(self):
"""s_waitcnt and s_nop emit IMMEDIATE packets."""
blob = _run_kernel([s_nop(simm16=0), s_waitcnt(simm16=0), s_endpgm()])
imm_pkts = [p for p in decode(blob) if isinstance(p, IMMEDIATE)]
self.assertEqual(len(imm_pkts), 2) # s_nop + s_waitcnt
def test_endpgm_skipped(self):
"""s_endpgm does not emit any packet."""
blob = _run_kernel([s_endpgm()])
packets = list(decode(blob))
self.assertEqual(len([p for p in packets if isinstance(p, INST)]), 0)
self.assertEqual(len([p for p in packets if isinstance(p, IMMEDIATE)]), 0)
def test_wave_lifecycle(self):
"""Every WAVESTART has a matching WAVEEND."""
blob = _run_kernel([s_mov_b32(s[0], 0), s_endpgm()])
packets = list(decode(blob))
self.assertEqual(sum(1 for p in packets if isinstance(p, WAVESTART)), sum(1 for p in packets if isinstance(p, WAVEEND)))
def test_layout_header(self):
"""First packet is LAYOUT_HEADER with layout=3."""
blob = _run_kernel([s_endpgm()])
packets = list(decode(blob))
self.assertIsInstance(packets[0], LAYOUT_HEADER)
self.assertEqual(packets[0].layout, 3)
def test_blob_32byte_aligned(self):
"""SQTT blob is 32-byte aligned."""
blob = _run_kernel([s_mov_b32(s[0], 0), s_mov_b32(s[1], 1), s_endpgm()])
self.assertEqual(len(blob) % 32, 0)
def test_multiple_waves(self):
"""Multiple wavefronts each get their own WAVESTART/WAVEEND."""
blob = _run_kernel([s_mov_b32(s[0], 0), s_endpgm()], lx=64) # 64 threads = 2 waves (WAVE_SIZE=32)
packets = list(decode(blob))
self.assertEqual(sum(1 for p in packets if isinstance(p, WAVESTART)), 2)
self.assertEqual(sum(1 for p in packets if isinstance(p, WAVEEND)), 2)
def test_branch_taken_and_not_taken(self):
"""A loop with s_cbranch_scc1 emits JUMP when taken, JUMP_NO on final iteration."""
# s[0] = 2; loop: s[0] -= 1; cmp s[0] != 0 (SCC=1 if true); cbranch_scc1 loop; endpgm
# iteration 1: s[0]=2→1, SCC=1 (1!=0), branch taken (JUMP)
# iteration 2: s[0]=1→0, SCC=0 (0==0), branch not taken (JUMP_NO)
blob = _run_kernel([s_mov_b32(s[0], 2), s_sub_u32(s[0], s[0], 1), s_cmp_lg_u32(s[0], 0), s_cbranch_scc1(simm16=-3), s_endpgm()])
inst_pkts = [p for p in decode(blob) if isinstance(p, INST)]
ops = [p.op for p in inst_pkts]
self.assertIn(InstOp.JUMP, ops)
self.assertIn(InstOp.JUMP_NO, ops)
def test_timestamps_monotonic(self):
"""Timestamps are monotonically non-decreasing."""
blob = _run_kernel([s_mov_b32(s[0], 0), s_mov_b32(s[1], 1), s_mov_b32(s[2], 2), s_endpgm()])
times = [p._time for p in decode(blob)]
self.assertEqual(times, sorted(times))
def test_no_trace_without_profile(self):
"""No SQTT trace is emitted when PROFILE=0."""
from test.mockgpu.amd.emu import run_asm, sqtt_traces
code = s_endpgm().to_bytes()
buf = (ctypes.c_char * len(code))(*code)
sqtt_traces.clear()
with Context(PROFILE=0):
run_asm(ctypes.addressof(buf), len(code), 1, 1, 1, 1, 1, 1, 0)
self.assertEqual(len(sqtt_traces), 0)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/amd/test_sqtt_encoder.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/testextra/test_hk_fa.py | import unittest, time
from tinygrad import Tensor, Device, dtypes, Context
from tinygrad.engine.jit import TinyJit
import numpy as np
from extra.thunder.amd.fa import flash_attention
def assert_allclose(cmp:Tensor, ref:Tensor, **kwargs) -> None:
if Device.DEFAULT == "NULL": Tensor.realize(cmp, ref)
else: np.testing.assert_allclose(cmp.numpy(), ref.numpy(), **kwargs)
class TestFA(unittest.TestCase):
def setUp(self):
arch = getattr(Device[Device.DEFAULT].renderer, "arch", "")
if not arch.startswith("gfx9"):
self.skipTest(f"arch {arch} not supported")
def test_fast_fa_causal(self):
B, N, H, H_KV, D = 1, 8192, 32, 8, 128
with Context(DEBUG=0):
q = Tensor.randn(B, N, H, D, dtype=dtypes.bfloat16).contiguous()
k = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16).contiguous()
v = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16).contiguous()
Tensor.realize(q, k, v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
fa_jitted = TinyJit(flash_attention)
for _ in range(10):
st = time.perf_counter()
out = fa_jitted(q, k, v, is_causal=True)
et = time.perf_counter() - st
attn_flops = 2 * B * H * N * N * D + \
4 * B * H * N * N + \
2 * B * H * N * N * D
print(f"{attn_flops/(et*1e9):2f} GFLOPS")
out = out.float().transpose(1, 2)
ref = q.scaled_dot_product_attention(k, v, is_causal=True, enable_gqa=True).float().transpose(1, 2)
assert_allclose(out, ref, atol=2e-2, rtol=2e-2)
def test_fast_fa_bwd_causal(self):
Tensor.manual_seed(42)
B, N, H, H_KV, D = 1, 8192, 32, 8, 128
with Context(DEBUG=0):
q = Tensor.randn(B, N, H, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
k = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
v = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
Tensor.realize(q, k, v)
do = Tensor.ones(B, N, H, D, dtype=dtypes.float32).contiguous()
Tensor.realize(do)
q_, k_, v_ = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
out = flash_attention(q_, k_, v_, is_causal=True)
out = out.float().transpose(1, 2)
out.backward(do)
Tensor.realize(q.grad, k.grad, v.grad)
with Context(DEBUG=0):
q_ref = q.detach().clone().requires_grad_(True)
k_ref = k.detach().clone().requires_grad_(True)
v_ref = v.detach().clone().requires_grad_(True)
Tensor.realize(q_ref, k_ref, v_ref)
q_ref_, k_ref_, v_ref_ = q_ref.transpose(1, 2), k_ref.transpose(1, 2), v_ref.transpose(1, 2)
ref = q_ref_.scaled_dot_product_attention(k_ref_, v_ref_, is_causal=True, enable_gqa=True)
ref = ref.float().transpose(1, 2)
ref.backward(do)
Tensor.realize(q_ref.grad, k_ref.grad, v_ref.grad)
assert_allclose(q.grad, q_ref.grad, atol=2e-2, rtol=2e-2)
assert_allclose(v.grad, v_ref.grad, atol=2e-2, rtol=2e-2)
assert_allclose(k.grad, k_ref.grad, atol=6e-2, rtol=2e-2)
def test_fast_fa_bwd_causal_jitted(self):
Tensor.manual_seed(42)
B, N, H, H_KV, D = 1, 8192, 32, 8, 128
with Context(DEBUG=0):
q = Tensor.randn(B, N, H, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
k = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
v = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
Tensor.realize(q, k, v)
do = Tensor.ones(B, N, H, D, dtype=dtypes.float32).contiguous()
Tensor.realize(do)
def fn(q, k, v, do):
q_, k_, v_ = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
out = flash_attention(q_, k_, v_, is_causal=True)
out = out.float().transpose(1, 2)
out.backward(do)
Tensor.realize(out, q.grad, k.grad, v.grad)
return q.grad, k.grad, v.grad
fn_jitted = TinyJit(fn)
for _ in range(10):
q = Tensor.randn(B, N, H, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
k = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
v = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
Tensor.realize(q, k, v)
do = Tensor.ones(B, N, H, D, dtype=dtypes.float32).contiguous()
Tensor.realize(do)
q.grad, k.grad, v.grad = fn_jitted(q, k, v, do)
with Context(DEBUG=0):
q_ref = q.detach().clone().requires_grad_(True)
k_ref = k.detach().clone().requires_grad_(True)
v_ref = v.detach().clone().requires_grad_(True)
Tensor.realize(q_ref, k_ref, v_ref)
q_ref_, k_ref_, v_ref_ = q_ref.transpose(1, 2), k_ref.transpose(1, 2), v_ref.transpose(1, 2)
ref = flash_attention(q_ref_, k_ref_, v_ref_, is_causal=True)
ref = ref.float().transpose(1, 2)
ref.backward(do)
Tensor.realize(q_ref.grad, k_ref.grad, v_ref.grad)
assert_allclose(q.grad, q_ref.grad, atol=3e-3, rtol=3e-3)
assert_allclose(k.grad, k_ref.grad, atol=1e-5, rtol=1e-5)
assert_allclose(v.grad, v_ref.grad, atol=1e-5, rtol=1e-5)
def test_fast_fa_bwd_dp(self):
Tensor.manual_seed(42)
B, N, H, H_KV, D = 2, 1024, 32, 8, 128
GPUS = tuple(f"AMD:{i}" for i in range(B))
with Context(DEBUG=0):
base_q = Tensor.randn(B, N, H, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
base_k = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
base_v = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
base_do = Tensor.ones(B, N, H, D, dtype=dtypes.float32).contiguous()
with Context(DEBUG=0):
q = base_q.clone().requires_grad_(True).shard(GPUS, axis=0)
k = base_k.clone().requires_grad_(True).shard(GPUS, axis=0)
v = base_v.clone().requires_grad_(True).shard(GPUS, axis=0)
Tensor.realize(q, k, v)
do = base_do.clone().shard(GPUS, axis=0)
Tensor.realize(do)
q_, k_, v_ = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
out = flash_attention(q_, k_, v_, is_causal=True)
out = out.float().transpose(1, 2)
out.backward(do)
Tensor.realize(q.grad, k.grad, v.grad)
with Context(DEBUG=0):
q_ref = base_q.clone().requires_grad_(True)
k_ref = base_k.clone().requires_grad_(True)
v_ref = base_v.clone().requires_grad_(True)
Tensor.realize(q_ref, k_ref, v_ref)
do_ref = base_do.clone()
Tensor.realize(do_ref)
q_ref_, k_ref_, v_ref_ = q_ref.transpose(1, 2), k_ref.transpose(1, 2), v_ref.transpose(1, 2)
ref = flash_attention(q_ref_, k_ref_, v_ref_, is_causal=True)
ref = ref.float().transpose(1, 2)
ref.backward(do_ref)
Tensor.realize(q_ref.grad, k_ref.grad, v_ref.grad)
assert_allclose(q.grad, q_ref.grad, atol=1e-5, rtol=1e-5)
assert_allclose(v.grad, v_ref.grad, atol=1e-5, rtol=1e-5)
assert_allclose(k.grad, k_ref.grad, atol=1e-5, rtol=1e-5)
def test_fast_fa_bwd_mp(self):
Tensor.manual_seed(42)
B, N, H, H_KV, D = 2, 1024, 32, 8, 128
GPUS = tuple(f"AMD:{i}" for i in range(B))
with Context(DEBUG=0):
base_q = Tensor.randn(B, N, H, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
base_k = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
base_v = Tensor.randn(B, N, H_KV, D, dtype=dtypes.bfloat16, requires_grad=True).contiguous()
base_do = Tensor.ones(B, N, H, D, dtype=dtypes.float32).contiguous()
with Context(DEBUG=0):
q = base_q.clone().requires_grad_(True).shard(GPUS, axis=2)
k = base_k.clone().requires_grad_(True).shard(GPUS, axis=2)
v = base_v.clone().requires_grad_(True).shard(GPUS, axis=2)
Tensor.realize(q, k, v)
do = base_do.clone().shard(GPUS, axis=2)
Tensor.realize(do)
q_, k_, v_ = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
out = flash_attention(q_, k_, v_, is_causal=True)
out = out.float().transpose(1, 2)
out.backward(do)
Tensor.realize(q.grad, k.grad, v.grad)
with Context(DEBUG=0):
q_ref = base_q.clone().requires_grad_(True)
k_ref = base_k.clone().requires_grad_(True)
v_ref = base_v.clone().requires_grad_(True)
Tensor.realize(q_ref, k_ref, v_ref)
do_ref = base_do.clone()
Tensor.realize(do_ref)
q_ref_, k_ref_, v_ref_ = q_ref.transpose(1, 2), k_ref.transpose(1, 2), v_ref.transpose(1, 2)
ref = flash_attention(q_ref_, k_ref_, v_ref_, is_causal=True)
ref = ref.float().transpose(1, 2)
ref.backward(do_ref)
Tensor.realize(q_ref.grad, k_ref.grad, v_ref.grad)
assert_allclose(q.grad, q_ref.grad, atol=1e-5, rtol=1e-5)
assert_allclose(v.grad, v_ref.grad, atol=1e-5, rtol=1e-5)
assert_allclose(k.grad, k_ref.grad, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/testextra/test_hk_fa.py",
"license": "MIT License",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/mixin/dtype.py | from typing import Self
from tinygrad.dtype import DType, dtypes
class DTypeMixin:
@property
def dtype(self) -> DType: raise NotImplementedError
def cast(self, dtype:DType) -> Self: raise NotImplementedError
def element_size(self) -> int:
"""
Returns the size in bytes of an individual element in the tensor.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([5], dtype=dtypes.int16)
print(t.element_size())
```
"""
return self.dtype.itemsize
def is_floating_point(self) -> bool:
"""
Returns `True` if the tensor contains floating point types, i.e. is one of `dtypes.float64`, `dtypes.float32`,
`dtypes.float16`, `dtypes.bfloat16`.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([8, 9], dtype=dtypes.float32)
print(t.is_floating_point())
```
"""
return dtypes.is_float(self.dtype.base)
def float(self) -> Self:
"""
Convenience method to cast `self` to a `float32` Tensor.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
```
```python exec="true" source="above" session="tensor" result="python"
t = t.float()
print(t.dtype, t.numpy())
```
"""
return self.cast(dtypes.float32)
def half(self) -> Self:
"""
Convenience method to cast `self` to a `float16` Tensor.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
```
```python exec="true" source="above" session="tensor" result="python"
t = t.half()
print(t.dtype, t.numpy())
```
"""
return self.cast(dtypes.float16)
def int(self) -> Self:
"""
Convenience method to cast `self` to a `int32` Tensor.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([-1.5, -0.5, 0.0, 0.5, 1.5])
print(t.dtype, t.numpy())
```
```python exec="true" source="above" session="tensor" result="python"
t = t.int()
print(t.dtype, t.numpy())
```
"""
return self.cast(dtypes.int32)
def bool(self) -> Self:
"""
Convenience method to cast `self` to a `bool` Tensor.
```python exec="true" source="above" session="tensor" result="python"
t = Tensor([-1, 0, 1])
print(t.dtype, t.numpy())
```
```python exec="true" source="above" session="tensor" result="python"
t = t.bool()
print(t.dtype, t.numpy())
```
"""
return self.cast(dtypes.bool)
def bfloat16(self) -> Self: return self.cast(dtypes.bfloat16)
def double(self) -> Self: return self.cast(dtypes.double)
def long(self) -> Self: return self.cast(dtypes.long)
def short(self) -> Self: return self.cast(dtypes.short)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/mixin/dtype.py",
"license": "MIT License",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
tinygrad/tinygrad:test/unit/test_setitem_schedule.py | import unittest
from tinygrad import Tensor, dtypes, GlobalCounters
class TestSetitemInto(unittest.TestCase):
def test_setitem_into_unrealized(self):
GlobalCounters.reset()
t = Tensor.arange(4, dtype=dtypes.int32).reshape(2, 2)
self.assertEqual(GlobalCounters.kernel_count, 0)
t[1] = 5
self.assertEqual(GlobalCounters.kernel_count, 0)
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(GlobalCounters.global_mem, 16)
t[1].realize()
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertListEqual(t.tolist(), [[0, 1], [5, 5]])
def test_setitem_into_unrealized_sliced_compute(self):
# base computation contains SHRINK from prior slicing (like QR decomposition pattern)
GlobalCounters.reset()
a = Tensor.arange(8, dtype=dtypes.int32).reshape(2, 4)
w = a[0] + a[1] # unrealized ADD with SHRINK in graph: [4, 6, 8, 10]
self.assertEqual(GlobalCounters.kernel_count, 0)
w[1] = 99
self.assertEqual(GlobalCounters.kernel_count, 0)
w.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(GlobalCounters.global_mem, 4*4)
self.assertListEqual(w.tolist(), [4, 99, 8, 10])
def test_setitem_into_empty(self):
GlobalCounters.reset()
t = Tensor.empty(4, dtype=dtypes.int32)
t[1] = 5
self.assertEqual(GlobalCounters.kernel_count, 0)
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
# TODO: this can be just 4 if empty goes through is_realized setitem path
self.assertEqual(GlobalCounters.global_mem, 4*(3*2+1)) # 3 elements had +1, 1 is assigned directly
t[1].realize()
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(t[1].item(), 5)
def test_setitem_into_empty_alu(self):
GlobalCounters.reset()
t = Tensor.empty(4, dtype=dtypes.int32) + 1
self.assertEqual(GlobalCounters.kernel_count, 0)
t[1] = 5
self.assertEqual(GlobalCounters.kernel_count, 0)
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(GlobalCounters.global_mem, 4*(3*2+1)) # 3 elements had +1, 1 is assigned directly
t[1].realize()
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(t[1].item(), 5)
def test_setitem_into_tensor(self):
t = Tensor([1, 2, 3, 4], dtype=dtypes.int32).realize()
GlobalCounters.reset()
t[1] = 5
self.assertEqual(GlobalCounters.kernel_count, 0)
t[1].realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(GlobalCounters.global_mem, 4)
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertListEqual(t.tolist(), [1, 5, 3, 4])
def test_setitem_into_tensor_alu(self):
t = Tensor([1, 2, 3, 4], dtype=dtypes.int32).realize() + 1
GlobalCounters.reset()
t[1] = 5
self.assertEqual(GlobalCounters.kernel_count, 0)
t[1].realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(GlobalCounters.global_mem, 4*(3*2+1)) # 3 elements had +1, 1 is assigned directly
t[1].realize()
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertListEqual(t.tolist(), [2, 5, 4, 5])
def test_setitem_into_cont(self):
GlobalCounters.reset()
t = Tensor.ones(4, dtype=dtypes.int32)
t[1] = 5
self.assertEqual(GlobalCounters.kernel_count, 0)
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(GlobalCounters.global_mem, 4*4)
t[1].realize()
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertListEqual(t.tolist(), [1, 5, 1, 1])
def test_setitem_into_const_alu(self):
GlobalCounters.reset()
t = Tensor.ones(4, dtype=dtypes.int32) + 1
t[1] = 5
self.assertEqual(GlobalCounters.kernel_count, 0)
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(GlobalCounters.global_mem, 4*4)
t[1].realize()
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertListEqual(t.tolist(), [2, 5, 2, 2])
def test_setitem_into_arange(self):
# NOTE: arange has no real buffer, but assigning to it is fine
GlobalCounters.reset()
t = Tensor.arange(4, dtype=dtypes.int32)
t[1] = 5
self.assertEqual(GlobalCounters.kernel_count, 0)
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertListEqual(t.tolist(), [0, 5, 2, 3])
def test_setitem_slice_const(self):
t = Tensor.zeros(100, dtype=dtypes.int32).contiguous().realize()
GlobalCounters.reset()
t[20:50] = 3
self.assertEqual(GlobalCounters.kernel_count, 0)
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(GlobalCounters.global_mem, 30*4) # 30 elements written
def test_setitem_slice_tensor(self):
t = Tensor.zeros(100, dtype=dtypes.int32).contiguous().realize()
v = Tensor.zeros(30, dtype=dtypes.int32).contiguous().realize()
GlobalCounters.reset()
t[20:50] = v
self.assertEqual(GlobalCounters.kernel_count, 0)
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(GlobalCounters.global_mem, 30*4*2) # 30 read + 30 written
def test_setitem_full(self):
t = Tensor.zeros(100, dtype=dtypes.int32).contiguous().realize()
GlobalCounters.reset()
t[:] = 3
self.assertEqual(GlobalCounters.kernel_count, 0)
t.realize()
self.assertEqual(GlobalCounters.kernel_count, 1)
self.assertEqual(GlobalCounters.global_mem, 100*4) # full buffer written
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_setitem_schedule.py",
"license": "MIT License",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_test_hive_reset.py | #!/usr/bin/env python3
import subprocess, sys
from tinygrad.helpers import getenv
LOOPS = getenv("LOOPS", 50)
BROKEN = getenv("BROKEN", 0)
ONLY_RESET = getenv("ONLY_RESET", 0)
BROKEN_KERNEL_SCRIPT = """
from tinygrad.device import Device
from tinygrad.runtime.ops_amd import AMDProgram, AMDDevice
from tinygrad.runtime.support.compiler_amd import compile_hip
dev = Device["AMD"]
assert isinstance(dev, AMDDevice) and dev.is_am(), "Need AM driver (not KFD)"
broken_src = '''
extern "C" __attribute__((global)) void broken(int* dummy) {
volatile int* bad_ptr = (volatile int*)0xDEAD00000000ULL;
*bad_ptr = 0x42;
}
'''
broken_lib = compile_hip(broken_src, dev.arch)
broken_prg = AMDProgram(dev, "broken", broken_lib)
buf = dev.allocator.alloc(64)
try:
broken_prg(buf, global_size=(1,1,1), local_size=(1,1,1), wait=True)
print(" ERROR: Kernel did not fault!")
except RuntimeError as e:
print(f" Got expected error: {e}")
"""
for i in range(LOOPS):
print(f"=== Running hive_reset.py ({i+1}/{LOOPS}) ===")
subprocess.run([sys.executable, "extra/amdpci/hive_reset.py"], check=True)
print("=== hive_reset complete ===")
if BROKEN:
print(f"=== Running broken kernel ({i+1}/{LOOPS}) ===")
ret = subprocess.run([sys.executable, "-c", BROKEN_KERNEL_SCRIPT])
print(f"=== broken kernel exited with code {ret.returncode} ===")
elif not ONLY_RESET:
print(f"=== Running test_tiny.py ({i+1}/{LOOPS}) ===")
ret = subprocess.run([sys.executable, "test/test_tiny.py", "TestTiny.test_plus"])
print(f"=== test_tiny.py exited with code {ret.returncode} ===")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_hive_reset.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/thunder/amd/fa.py | import math, pathlib, functools, struct
from tinygrad import Device, Tensor
from tinygrad.dtype import DTypeLike, dtypes
from tinygrad.helpers import DEBUG
from tinygrad.renderer import Estimates
from tinygrad.runtime.support.compiler_amd import HIPCCCompiler
from tinygrad.runtime.support.elf import elf_loader
from tinygrad.uop.ops import UOp, Ops, KernelInfo
def _sharded_empty(shape:Tensor, ref:Tensor, axis:int|None, dtype:DTypeLike|None=None) -> Tensor:
dtype = dtype or ref.dtype
if not isinstance(ref.device, tuple): return Tensor.empty(*shape, dtype=dtype, device=ref.device)
shard_axis = ref.uop.axis if axis is None else axis
shape = tuple(s // len(ref.device) if i == shard_axis else s for i, s in enumerate(shape))
axis = ref.uop.axis if axis is None else axis
return Tensor(Tensor.empty(*shape, dtype=dtype, device=ref.device).uop.multi(axis), dtype=dtype, device=ref.device)
def _sharded_empty_like(ref:Tensor, axis:int|None=None) -> Tensor:
return _sharded_empty(ref.shape, ref, axis)
def flash_attention(xq, xk, xv, attn_mask:Tensor|None=None, is_causal:bool=False):
assert attn_mask is None, "attn_mask not supported"
assert is_causal, "only causal attention supported"
xq, xk, xv = xq.transpose(1, 2), xk.transpose(1, 2), xv.transpose(1, 2)
B, N, H, D = xq.shape
H_KV = xk.shape[2]
assert D == 128, "only D=128 supported"
num_devices = len(xq.device) if isinstance(xq.device, tuple) else 1
is_dp = xq.uop.axis == 0
is_mp = xq.uop.axis == 2
B_local = B // num_devices if is_dp else B
H_local = H // num_devices if is_mp else H
H_KV_local = H_KV // num_devices if is_mp else H_KV
shard_axis = 0 if is_dp else 2 if is_mp else None
shard_axis_t = 0 if is_dp else 1 if is_mp else None
if DEBUG >= 2: print(f"Flash Attention {B=} {B_local=} {N=} {H=} {H_local=} {H_KV=} {H_KV_local=} {D=} on {num_devices} devices, {'DP' if is_dp else 'MP' if is_mp else 'no sharding'}")
single_device = xq.device[0] if isinstance(xq.device, tuple) else xq.device
arch = Device[single_device].renderer.arch
attn = _sharded_empty_like(xq, axis=shard_axis)
l_vec = _sharded_empty((B, H, 1, N), xq, dtype=dtypes.float32, axis=shard_axis_t)
def grad(dou:UOp, _) -> tuple[None, None, UOp, UOp, UOp]:
do = Tensor(dou, device=dou.device)
dq_in = _sharded_empty((B, H, N, D), xq, axis=shard_axis_t)
dq = _sharded_empty_like(xq, axis=shard_axis)
dk = _sharded_empty_like(xk, axis=shard_axis)
dv = _sharded_empty_like(xv, axis=shard_axis)
# delta_vec = (do * attn).sum(-1, dtype=dtypes.float32).transpose(1, 2).unsqueeze(-2).detach()
delta_vec = _sharded_empty((B, H, 1, N), xq, dtype=dtypes.float32, axis=shard_axis_t)
delta_vec, dq_in = Tensor.custom_kernel(delta_vec, dq_in, attn, do, fxn=functools.partial(custom_fa_backward_pre, device=single_device, arch=arch, B=B_local, N=N, H=H_local, H_KV=H_KV_local, D=D))[:2]
dq_in, dk, dv = Tensor.custom_kernel(dq_in, dk, dv, do, xq, xk, xv, l_vec, delta_vec, fxn=functools.partial(custom_fa_backward, device=single_device, arch=arch, B=B_local, N=N, H=H_local, H_KV=H_KV_local, D=D))[:3]
# unshuffle dq
dq = Tensor.custom_kernel(dq, dq_in, fxn=functools.partial(custom_fa_backward_post, device=single_device, arch=arch, B=B_local, N=N, H=H_local, H_KV=H_KV_local, D=D))[0]
return None, None, dq.uop, dk.uop, dv.uop
attn, l_vec = Tensor.custom_kernel(attn, l_vec, xq, xk, xv, fxn=functools.partial(custom_fa_forward, device=single_device, arch=arch, B=B_local, N=N, H=H_local, H_KV=H_KV_local, D=D), grad_fxn=grad)[:2]
return attn.transpose(1, 2)
@functools.cache
def custom_fa_forward(o:UOp, l_vec:UOp, q:UOp, k:UOp, v:UOp, device:str, arch:str, B:int, N:int, H:int, H_KV:int, D:int):
code = (pathlib.Path(__file__).parent / "fa_fwd_causal.cpp").read_text()
compile_args = [f"-I{(pathlib.Path(__file__).parent / 'include').as_posix()}", "-std=c++20", "-DKITTENS_CDNA4", "-DHIP_ENABLE_WARP_SYNC_BUILTINS", "-ffast-math",
f"-DATTN_B={B}", f"-DATTN_N={N}", f"-DATTN_H={H}", f"-DATTN_H_KV={H_KV}"]
Q_BLOCK_SIZE = 32
NUM_WARPS = 8
NUM_THREADS = 64 * NUM_WARPS
gsz = (H, (math.ceil((N // Q_BLOCK_SIZE) / NUM_WARPS)), B)
lsz = (NUM_THREADS, 1, 1)
threadIdx_x = UOp.special(lsz[0], "lidx0")
blockIdx_x, blockIdx_y, blockIdx_z = UOp.special(gsz[0], "gidx0"), UOp.special(gsz[1], "gidx1"), UOp.special(gsz[2], "gidx2")
el = q.dtype.itemsize
mem = (2*B*N*H*D + 2*B*N*H_KV*D) * el + B*H*N * l_vec.dtype.itemsize
estimates = Estimates(ops=2*B*H*N*N*D, lds=mem, mem=mem)
sink = UOp.sink(o.base, l_vec.base, q.base, k.base, v.base,
threadIdx_x, blockIdx_x, blockIdx_y, blockIdx_z,
arg=KernelInfo(name="custom_fa_forward", estimates=estimates))
lib = HIPCCCompiler(arch, compile_args).compile_cached(code)
lib = bytearray(lib)
rodata_off = next(sh.header.sh_offset for sh in elf_loader(bytes(lib))[1] if sh.name == ".rodata")
struct.pack_into('<I', lib, rodata_off, 160000)
lib = bytes(lib)
return UOp(Ops.PROGRAM,
src=(sink, UOp(Ops.DEVICE, arg=device), UOp(Ops.LINEAR, src=(*sink.src, sink)), UOp(Ops.SOURCE, arg=code), UOp(Ops.BINARY, arg=lib)))
@functools.cache
def custom_fa_backward_pre(delta_vec:UOp, dq:UOp, o:UOp, do:UOp, device:str, arch:str, B:int, N:int, H:int, H_KV:int, D:int):
code = (pathlib.Path(__file__).parent / "fa_bwd_pre.cpp").read_text()
compile_args = [f"-I{(pathlib.Path(__file__).parent / 'include').as_posix()}", "-std=c++20", "-DKITTENS_CDNA4", "-DHIP_ENABLE_WARP_SYNC_BUILTINS", "-ffast-math",
f"-DATTN_B={B}", f"-DATTN_N={N}", f"-DATTN_H={H}"]
DOT_SLICE_QO = 16
NUM_WARPS = 4
NUM_THREADS = 64 * NUM_WARPS
gsz = (B, H, N // (DOT_SLICE_QO * NUM_WARPS))
lsz = (NUM_THREADS, 1, 1)
threadIdx_x = UOp.special(lsz[0], "lidx0")
blockIdx_x, blockIdx_y, blockIdx_z = UOp.special(gsz[0], "gidx0"), UOp.special(gsz[1], "gidx1"), UOp.special(gsz[2], "gidx2")
el = o.dtype.itemsize
mem = 3*B*H*N*D * el + B*H*N * delta_vec.dtype.itemsize
estimates = Estimates(ops=2*B*H*N*D, lds=mem, mem=mem)
sink = UOp.sink(delta_vec.base, dq.base, o.base, do.base,
threadIdx_x, blockIdx_x, blockIdx_y, blockIdx_z,
arg=KernelInfo(name="custom_fa_backward_pre", estimates=estimates))
lib = HIPCCCompiler(arch, compile_args).compile_cached(code)
lib = bytearray(lib)
rodata_off = next(sh.header.sh_offset for sh in elf_loader(bytes(lib))[1] if sh.name == ".rodata")
struct.pack_into('<I', lib, rodata_off, 160000)
lib = bytes(lib)
return UOp(Ops.PROGRAM,
src=(sink, UOp(Ops.DEVICE, arg=device), UOp(Ops.LINEAR, src=(*sink.src, sink)), UOp(Ops.SOURCE, arg=code), UOp(Ops.BINARY, arg=lib)))
@functools.cache
def custom_fa_backward(dq:UOp, dk:UOp, dv:UOp, do:UOp, q:UOp, k:UOp, v:UOp, l_vec:UOp, delta_vec:UOp, device:str, arch:str, B:int, N:int, H:int, H_KV:int, D:int):
code = (pathlib.Path(__file__).parent / "fa_bwd_causal.cpp").read_text()
compile_args = [f"-I{(pathlib.Path(__file__).parent / 'include').as_posix()}", "-std=c++20", "-DKITTENS_CDNA4", "-DHIP_ENABLE_WARP_SYNC_BUILTINS", "-ffast-math",
f"-DATTN_B={B}", f"-DATTN_N={N}", f"-DATTN_H={H}", f"-DATTN_H_KV={H_KV}"]
BLOCK_SIZE_KV = 256
NUM_WARPS = 4
NUM_THREADS = 64 * NUM_WARPS
gsz = (H_KV, N // BLOCK_SIZE_KV, B)
lsz = (NUM_THREADS, 1, 1)
threadIdx_x = UOp.special(lsz[0], "lidx0")
blockIdx_x, blockIdx_y, blockIdx_z = UOp.special(gsz[0], "gidx0"), UOp.special(gsz[1], "gidx1"), UOp.special(gsz[2], "gidx2")
el = q.dtype.itemsize
mem = (3*B*H*N*D + 4*B*H_KV*N*D) * el + 2*B*H*N * l_vec.dtype.itemsize
estimates = Estimates(ops=5*B*H*N*N*D, lds=mem, mem=mem)
sink = UOp.sink(dq.base, dk.base, dv.base, do.base, q.base, k.base, v.base, l_vec.base, delta_vec.base,
threadIdx_x, blockIdx_x, blockIdx_y, blockIdx_z,
arg=KernelInfo(name="custom_fa_backward", estimates=estimates))
lib = HIPCCCompiler(arch, compile_args).compile_cached(code)
lib = bytearray(lib)
rodata_off = next(sh.header.sh_offset for sh in elf_loader(bytes(lib))[1] if sh.name == ".rodata")
struct.pack_into('<I', lib, rodata_off, 160000)
lib = bytes(lib)
return UOp(Ops.PROGRAM,
src=(sink, UOp(Ops.DEVICE, arg=device), UOp(Ops.LINEAR, src=(*sink.src, sink)), UOp(Ops.SOURCE, arg=code), UOp(Ops.BINARY, arg=lib)))
@functools.cache
def custom_fa_backward_post(dq_out:UOp, dq_in:UOp, device:str, arch:str, B:int, N:int, H:int, H_KV:int, D:int):
code = (pathlib.Path(__file__).parent / "fa_bwd_post.cpp").read_text()
compile_args = [f"-I{(pathlib.Path(__file__).parent / 'include').as_posix()}", "-std=c++20", "-DKITTENS_CDNA4", "-DHIP_ENABLE_WARP_SYNC_BUILTINS", "-ffast-math",
f"-DATTN_B={B}", f"-DATTN_N={N}", f"-DATTN_H={H}"]
DOT_SLICE_QO = 16
NUM_WARPS = 4
NUM_THREADS = 64 * NUM_WARPS
gsz = (B, H, N // (DOT_SLICE_QO * NUM_WARPS))
lsz = (NUM_THREADS, 1, 1)
threadIdx_x = UOp.special(lsz[0], "lidx0")
blockIdx_x, blockIdx_y, blockIdx_z = UOp.special(gsz[0], "gidx0"), UOp.special(gsz[1], "gidx1"), UOp.special(gsz[2], "gidx2")
el = dq_out.dtype.itemsize
mem = 2*B*H*N*D * el
estimates = Estimates(lds=mem, mem=mem)
sink = UOp.sink(dq_out.base, dq_in.base,
threadIdx_x, blockIdx_x, blockIdx_y, blockIdx_z,
arg=KernelInfo(name="custom_fa_backward_post", estimates=estimates))
lib = HIPCCCompiler(arch, compile_args).compile_cached(code)
lib = bytearray(lib)
rodata_off = next(sh.header.sh_offset for sh in elf_loader(bytes(lib))[1] if sh.name == ".rodata")
struct.pack_into('<I', lib, rodata_off, 160000)
lib = bytes(lib)
return UOp(Ops.PROGRAM,
src=(sink, UOp(Ops.DEVICE, arg=device), UOp(Ops.LINEAR, src=(*sink.src, sink)), UOp(Ops.SOURCE, arg=code), UOp(Ops.BINARY, arg=lib)))
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/thunder/amd/fa.py",
"license": "MIT License",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/renderer/amd/elf.py | # minimal amdgpu elf packer
import ctypes
from tinygrad.helpers import ceildiv, round_up
from tinygrad.uop.ops import UOp, Ops
from tinygrad.runtime.autogen import amdgpu_kd, hsa, libc
from tinygrad.renderer.amd.dsl import Reg, FixedBitField
from tinygrad.runtime.autogen.amd.common import OpType
# instructions used for padding
from tinygrad.runtime.autogen.amd.rdna3.ins import s_code_end # same encoding as RDNA4
from tinygrad.runtime.autogen.amd.cdna.ins import s_nop as s_nop_cdna
_arch_map = {"gfx9": "cdna", "gfx10": "rdna3", "gfx11": "rdna3", "gfx12": "rdna4"}
def do_assemble_amd(ctx, prg:UOp, lin:UOp) -> UOp:
insts = [u.arg for u in lin.src]
# ** scan for max vgpr/sgpr/accvgpr
max_vgpr, max_sgpr, max_accvgpr = 0, 0, 0
_ACCVGPR_TYPES = {OpType.OPR_ACCVGPR, OpType.OPR_SRC_ACCVGPR}
for inst in insts:
# build set of field names that are AccVGPR for this instruction
accvgpr_fields: set[str] = set()
for opr_name, (_, _, opr_type) in inst.operands.items():
if opr_type in _ACCVGPR_TYPES: accvgpr_fields.add(opr_name)
elif opr_type in {OpType.OPR_VGPR_OR_ACCVGPR, OpType.OPR_SRC_VGPR_OR_ACCVGPR, OpType.OPR_SRC_VGPR_OR_ACCVGPR_OR_CONST}:
if getattr(inst, 'acc_cd', 0) == 1: accvgpr_fields.add(opr_name)
for name, field in inst._fields:
if isinstance(field, FixedBitField): continue
val = getattr(inst, name)
if not isinstance(val, Reg): continue
if 256 <= val.offset < 512:
if name in accvgpr_fields: max_accvgpr = max(max_accvgpr, (val.offset - 256) + val.sz)
else: max_vgpr = max(max_vgpr, (val.offset - 256) + val.sz)
elif val.offset < 106: max_sgpr = max(max_sgpr, val.offset + val.sz)
# ** scan sink for metadata
sink, n_bufs, n_vars, lds_size, gids = prg.src[0], 0, 0, 0, set()
for u in sink.toposort():
if u.op is Ops.PARAM: n_bufs += 1
elif u.op is Ops.DEFINE_VAR: n_vars += 1
elif u.op is Ops.DEFINE_LOCAL: lds_size += u.ptrdtype.size * u.ptrdtype.base.itemsize
elif u.op is Ops.SPECIAL and u.arg.startswith("gidx"): gids.add(int(u.arg[-1]))
src = "\n".join(str(inst) for inst in insts)
code_bytes = b"".join(inst.to_bytes() for inst in insts)
arch = next(v for k, v in _arch_map.items() if ctx.arch.startswith(k))
is_cdna, is_rdna4 = arch == "cdna", arch == "rdna4"
# ** pad text to ISA alignment
padding_inst = (s_nop_cdna(0) if is_cdna else s_code_end()).to_bytes()
text = code_bytes + padding_inst * ((hsa.AMD_ISA_ALIGN_BYTES - len(code_bytes) % hsa.AMD_ISA_ALIGN_BYTES) % hsa.AMD_ISA_ALIGN_BYTES)
text_offset = round_up(ctypes.sizeof(libc.Elf64_Ehdr), hsa.AMD_ISA_ALIGN_BYTES)
# ** pack kernel descriptor (rodata)
# CDNA: total VGPRs = regular VGPRs + AccVGPRs, each rounded to granularity of 4
accum_offset = round_up(max_vgpr, 4) if max_accvgpr > 0 else 0
next_free_vgpr = round_up(accum_offset + max_accvgpr, 8) if max_accvgpr > 0 else round_up(max_vgpr, 8)
next_free_sgpr = round_up(max_sgpr, 8)
vgpr_granule = max(0, (next_free_vgpr + 7) // 8 - 1)
# CDNA: add 6 for VCC(2) + FLAT_SCRATCH(2) + XNACK_MASK(2), next_free_sgpr is unused in RDNA.
sgpr_granule = max(0, ceildiv(next_free_sgpr + 6, 8) - 1) if is_cdna else 0
desc = amdgpu_kd.llvm_amdhsa_kernel_descriptor_t()
desc.group_segment_fixed_size = lds_size
desc.kernarg_size = n_bufs * 8 + n_vars * 4
desc.kernel_code_entry_byte_offset = -len(text)
# https://llvm.org/docs/AMDGPUUsage.html#amdgpu-amdhsa-compute-pgm-rsrc1-gfx6-gfx12-table
# NOTE: CU mode is the default
desc.compute_pgm_rsrc1 = (vgpr_granule << amdgpu_kd.COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_SHIFT |
sgpr_granule << amdgpu_kd.COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_SHIFT |
3 << amdgpu_kd.COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64_SHIFT |
(0 if is_rdna4 else 1) << amdgpu_kd.COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP_SHIFT |
(0 if is_rdna4 else 1) << amdgpu_kd.COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE_SHIFT |
(0 if is_cdna else 1) << amdgpu_kd.COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED_SHIFT)
desc.compute_pgm_rsrc2 = (2 << amdgpu_kd.COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_SHIFT |
int(0 in gids) << amdgpu_kd.COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X_SHIFT |
int(1 in gids) << amdgpu_kd.COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y_SHIFT |
int(2 in gids) << amdgpu_kd.COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z_SHIFT)
desc.kernel_code_properties = (1 << amdgpu_kd.KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT |
(0 if is_cdna else 1) << amdgpu_kd.KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32_SHIFT)
if is_cdna and max_accvgpr > 0:
desc.compute_pgm_rsrc3 = max(0, accum_offset // 4 - 1) << amdgpu_kd.COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET_SHIFT
rodata = bytes(desc)
# ** pack ELF
sh_names:list[int] = []
strtab = bytearray(b"\x00")
for name in [".text", ".rodata", ".strtab"]:
sh_names.append(len(strtab))
strtab += name.encode("ascii") + b"\x00"
rodata_offset = round_up(text_offset + (text_size := len(text)), hsa.AMD_KERNEL_CODE_ALIGN_BYTES)
strtab_offset = rodata_offset + (rodata_size := len(rodata))
shdr_offset = strtab_offset + (strtab_size := len(strtab))
sections = [(libc.SHT_PROGBITS, libc.SHF_ALLOC | libc.SHF_EXECINSTR, text_offset, text_offset, text_size),
(libc.SHT_PROGBITS, libc.SHF_ALLOC, rodata_offset, rodata_offset, rodata_size),
(libc.SHT_STRTAB, 0, 0, strtab_offset, strtab_size)]
shdrs = (libc.Elf64_Shdr * len(sections))()
for i, s in enumerate(sections): shdrs[i] = libc.Elf64_Shdr(sh_names[i], *s)
ehdr = libc.Elf64_Ehdr()
ehdr.e_shoff, ehdr.e_shnum, ehdr.e_shstrndx = shdr_offset, len(sections), 2
elf = bytearray(shdr_offset + ctypes.sizeof(shdrs))
elf[0:ctypes.sizeof(ehdr)] = bytes(ehdr)
elf[text_offset:text_offset+text_size] = text
elf[rodata_offset:rodata_offset+rodata_size] = rodata
elf[strtab_offset:strtab_offset+strtab_size] = strtab
elf[shdr_offset:shdr_offset+ctypes.sizeof(shdrs)] = bytes(shdrs)
binary = bytes(elf)
return prg.replace(src=prg.src[:3]+(UOp(Ops.SOURCE, arg=src), UOp(Ops.BINARY, arg=binary)))
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/renderer/amd/elf.py",
"license": "MIT License",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/external/external_test_schedule_scaling.py | import unittest, time
from tinygrad import Tensor
class TestScheduleScaling(unittest.TestCase):
"""Test that .schedule() scales linearly with graph size (no O(n^2) behavior)."""
def _assert_linear(self, fn, n_small=200, n_large=1000):
"""Assert schedule time scales at most ~linearly: time(n_large)/time(n_small) should be close to n_large/n_small."""
fn(n_small).schedule() # warmup
t_small = min(self._time_schedule(fn, n) for n in [n_small]*3)
t_large = min(self._time_schedule(fn, n) for n in [n_large]*3)
size_ratio = n_large / n_small # 5.0
time_ratio = t_large / t_small
# O(n) -> time_ratio ~ 5, O(n^2) -> time_ratio ~ 25. threshold at 10 catches n^2 with margin.
self.assertLess(time_ratio / size_ratio, 2.0,
f"schedule appears superlinear: n={n_small} {t_small*1e3:.1f}ms, n={n_large} {t_large*1e3:.1f}ms "
f"(time grew {time_ratio:.1f}x for {size_ratio:.0f}x size, per-node ratio {time_ratio/size_ratio:.2f})")
@staticmethod
def _time_schedule(fn, n) -> float:
st = time.perf_counter()
fn(n).schedule()
return time.perf_counter() - st
# *** rangeify: ending_ranges accumulation and consumer merge ***
# ending_ranges accumulation via sum([], []) and nested scan in run_rangeify.
# this creates reduce ops whose ending_ranges lists grow with graph depth, causing O(n^2) list copies.
def test_multi_reduce_scaling(self):
def multi_reduce(n):
x = Tensor.empty(256, 256)
for _ in range(n):
s = x.sum(axis=-1, keepdim=True)
x = x + s + s
return x
self._assert_linear(multi_reduce)
# reduce+elementwise chain stresses ending_ranges propagation and post-rangeify rewrites
def test_wide_reduce_scaling(self):
def wide_reduce(n):
x = Tensor.empty(256, 256)
for _ in range(n):
x = x + x.sum(axis=-1, keepdim=True)
return x
self._assert_linear(wide_reduce)
# expand ops inject into ending_ranges via the EXPAND path in run_rangeify
def test_expand_reduce_scaling(self):
def expand_reduce(n):
x = Tensor.empty(256, 1)
for _ in range(n):
y = x.expand(256, 256)
x = (y + y).sum(axis=-1, keepdim=True)
return x
self._assert_linear(expand_reduce)
# *** graph_rewrite: multi-consumer DAG patterns ***
# multi-consumer diamond pattern (fan-out/fan-in) stresses consumer_rngs merge in run_rangeify
def test_diamond_scaling(self):
def diamond(n):
x = Tensor.empty(256, 256)
for _ in range(n):
a = x + 1
b = x + 2
x = a + b
return x
self._assert_linear(diamond)
# elementwise chain baseline — should be trivially O(n)
def test_chain_scaling(self):
def chain(n):
x = Tensor.empty(256, 256)
for _ in range(n): x = x + 1
return x
self._assert_linear(chain)
# softmax has multi-consumer structure (x used for max, exp, and sum), stresses graph_rewrite on DAGs
def test_softmax_scaling(self):
def softmax_chain(n):
x = Tensor.empty(64, 256)
for _ in range(n): x = x.softmax(axis=-1)
return x
self._assert_linear(softmax_chain)
# *** post-rangeify: symbolic rewrites, kernel splitting ***
# matmul chain stresses symbolic+reduce_collapse and split_store
def test_matmul_scaling(self):
def matmul_chain(n):
xs = [Tensor.empty(32, 32) for _ in range(n + 1)]
result = xs[0]
for i in range(n): result = result @ xs[i + 1]
return result
self._assert_linear(matmul_chain)
# contiguous chain stresses remove_bufferize callbacks (toposort per BUFFERIZE node)
def test_contiguous_scaling(self):
def contiguous_chain(n):
x = Tensor.empty(256, 256)
for _ in range(n): x = (x + 1).contiguous()
return x
self._assert_linear(contiguous_chain)
# *** schedule: AFTER handling, assign ***
# assign chain stresses AFTER cycle detection (toposort inside toposort loop in get_rangeify_map)
def test_assign_scaling(self):
def assign_chain(n):
x = Tensor.empty(256, 256).realize()
for _ in range(n): x.assign(x + 1)
return x
self._assert_linear(assign_chain)
# layernorm has multi-consumer reduces (mean reused in variance), stresses consumer_rngs merge and symbolic rewrites
def test_layernorm_scaling(self):
def layernorm_chain(n):
x = Tensor.empty(64, 256)
for _ in range(n):
mean = x.mean(axis=-1, keepdim=True)
var = ((x - mean) ** 2).mean(axis=-1, keepdim=True)
x = (x - mean) / (var + 1e-5).sqrt()
return x
self._assert_linear(layernorm_chain)
# concat chain stresses MSTACK/MSELECT handling and wide SINK construction
def test_concat_scaling(self):
def concat_chain(n):
parts = [Tensor.empty(4, 256) + i for i in range(n)]
return parts[0].cat(*parts[1:])
self._assert_linear(concat_chain)
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_schedule_scaling.py",
"license": "MIT License",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_process_replay.py | import unittest
from tinygrad import Tensor, Device
from tinygrad.engine.realize import get_program
from tinygrad.codegen.opt import Opt, OptOps
from test.external.process_replay.process_replay import replay_get_program
N = 16
class TestProcessReplay(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ast = (Tensor.empty(N, N) @ Tensor.empty(N, N)).schedule()[-1].ast
cls.renderer = Device[Device.DEFAULT].renderer
def test_replay_no_opts(self):
# opts=None means use default heuristic path
p = get_program(self.ast, self.renderer)
good, compare, _ = replay_get_program(p, self.ast, self.renderer)
self.assertEqual(good, compare)
def test_replay_empty_opts(self):
# opts=[] means explicitly apply zero opts (unoptimized)
p = get_program(self.ast, self.renderer, opts=[])
good, compare, _ = replay_get_program(p, self.ast, self.renderer, opts=[])
self.assertEqual(good, compare)
def test_replay_with_opt(self):
# opts=[Opt(...)] means apply a specific opt
opts = [Opt(OptOps.UPCAST, 0, 4)]
p = get_program(self.ast, self.renderer, opts=opts)
good, compare, _ = replay_get_program(p, self.ast, self.renderer, opts=opts)
self.assertEqual(good, compare)
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_process_replay.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_fuzz_qcom_cpu_cache.py | #!/usr/bin/env python3
from tinygrad.tensor import Tensor
import numpy as np
while True:
arr = np.ones(1000000, dtype=np.uint8)
print(f"numpy: {(arr + 1)[:10]}")
ptr = arr.ctypes.data
tensor = Tensor.from_blob(ptr, arr.shape, dtype='uint8', device='QCOM').realize() + 1
print(f"from_blob: {tensor.numpy()[:10]}")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_fuzz_qcom_cpu_cache.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_attention.py | import unittest
from tinygrad import Tensor, dtypes, TinyJit, UOp
from tinygrad.apps.llm import apply_rope as apply_rope_new, precompute_freqs_cis
def apply_rope(x:Tensor, start_pos:int):
B, H, T, Hd = x.shape
precompute_freqs_cis.cache_clear()
freqs_cis = precompute_freqs_cis(Hd, start_pos+T)[start_pos:start_pos+T]
return apply_rope_new(x, freqs_cis)
class TestAttention(unittest.TestCase):
def test_half_qkv_buffers(self):
BS, seqlen, dim = 10, 4, 100
q = Tensor.ones(BS, seqlen, dim, dtype=dtypes.half).contiguous().realize()
k = Tensor.ones(BS, seqlen, dim, dtype=dtypes.half).contiguous().realize()
v = Tensor.ones(BS, seqlen, dim, dtype=dtypes.half).contiguous().realize()
attn = q.scaled_dot_product_attention(k, v)
sched = attn.schedule()
# attention has 4 kernels now
self.assertEqual(len(sched), 4)
def test_apply_rope_jit_prune(self):
def rope_fn(x_in, pos): return apply_rope(x_in, pos)
rope_noprune = TinyJit(rope_fn)
rope_prune = TinyJit(rope_fn, prune=True)
v_pos = UOp.variable("start_pos", 0, 100)
for _ in range(3):
rope_noprune(Tensor.randn(1, 2, 4, 8, dtype=dtypes.float32), v_pos.bind(1))
rope_prune(Tensor.randn(1, 2, 4, 8, dtype=dtypes.float32), v_pos.bind(1))
noprune_size = len(rope_noprune.captured.jit_cache)
prune_size = len(rope_prune.captured.jit_cache)
self.assertGreater(noprune_size, prune_size)
self.assertGreaterEqual(noprune_size, 2)
self.assertEqual(prune_size, 1)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_attention.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_const_folding.py | import unittest, itertools, math
from tinygrad import Tensor, dtypes, Context
from tinygrad.dtype import DType, ConstType
from tinygrad.uop.ops import Ops, UOp
from tinygrad.codegen import full_rewrite_to_sink
import numpy as np
def _check_ast_count(desired_count:int, t:Tensor):
# NOTE: this has side effect because everything can be scheduled only once
schedule = t.schedule()
asts = [s for s in schedule if s.ast.op is Ops.SINK]
len(asts)
# NOT SUPPORTED ANYMORE
#assert len(asts) == desired_count, f"{len(asts)} != {desired_count}"
class TestUnaryOpsConstFolding(unittest.TestCase):
def test_all_consts_ops(self):
_check_ast_count(0, Tensor.ones(4).exp())
_check_ast_count(0, Tensor.ones(4).sqrt())
_check_ast_count(0, Tensor.ones(4) + Tensor.ones(4))
_check_ast_count(0, Tensor.ones(4) / Tensor.ones(4))
def test_cast(self):
_check_ast_count(0, Tensor.ones(4).cast(dtypes.int16))
_check_ast_count(0, Tensor.full(4, fill_value=-1).cast(dtypes.uint16))
def test_neg_folding(self):
_check_ast_count(0, Tensor([1, 2, 3]).mul(-1).neg())
_check_ast_count(0, Tensor([1, 2, 3]).neg().mul(-1))
_check_ast_count(0, Tensor([1, 2, 3]).neg().neg())
def test_neg_realized_no_fold(self):
x = Tensor.randn(32, 32)
x = x.clip(0, 1).realize()
_check_ast_count(1, x.neg())
class TestBinaryOpsConstFolding(unittest.TestCase):
def test_add_literal_zero(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) + 0)
def test_add_tensor_zero(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) + Tensor.zeros(4))
def test_literal_zero_add(self):
_check_ast_count(0, 0 + Tensor([1.0, 2, 3, 4]))
def test_tensor_zero_add(self):
_check_ast_count(0, Tensor.zeros(4) + Tensor([1.0, 2, 3, 4]))
def test_sub_literal_zero(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) - 0)
def test_sub_tensor_zero(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) - Tensor.zeros(4))
def test_mul_literal_zero(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) * 0)
def test_mul_tensor_zero(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) * Tensor.zeros(4))
def test_literal_zero_mul(self):
_check_ast_count(0, 0 * Tensor([1.0, 2, 3, 4]) * 0)
def test_tensor_zero_mul(self):
_check_ast_count(0, Tensor.zeros(4) * Tensor([1.0, 2, 3, 4]))
def test_mul_literal_one(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) * 1)
def test_mul_tensor_one(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) * Tensor.ones(4))
def test_literal_one_mul(self):
_check_ast_count(0, 1 * Tensor([1.0, 2, 3, 4]))
def test_tensor_one_mul(self):
_check_ast_count(0, Tensor.ones(4) * Tensor([1.0, 2, 3, 4]))
def test_bool_tensor_mul_bool(self):
_check_ast_count(0, Tensor([True, False]) * True)
_check_ast_count(0, Tensor([True, False]) * False)
def test_bool_mul_bool_tensor(self):
_check_ast_count(0, True * Tensor([True, False]))
_check_ast_count(0, False * Tensor([True, False]))
def test_div_literal_one(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) / 1)
def test_div_tensor_one(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) / Tensor.ones(4))
def test_idiv_literal_one(self):
_check_ast_count(0, Tensor([1, 2, 3, 4]) // 1)
def test_idiv_tensor_one(self):
_check_ast_count(0, Tensor([1, 2, 3, 4]) // Tensor.ones(4, dtype=dtypes.int32))
def test_pow_literal_zero(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) ** 0)
def test_pow_tensor_zero(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) ** Tensor.zeros(4))
def test_pow_literal_one(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) ** 1)
def test_pow_tensor_one(self):
_check_ast_count(0, Tensor([1.0, 2, 3, 4]) ** Tensor.ones(4))
def test_literal_one_pow(self):
_check_ast_count(0, 1 ** Tensor([1.0, 2, 3, 4]))
def test_tensor_one_pow(self):
_check_ast_count(0, Tensor.ones(4) ** Tensor([1.0, 2, 3, 4]))
class TestBitcastConstFolding(unittest.TestCase):
def test_scalar_bitcast(self):
def t(cases: dict[DType, ConstType]):
for (from_dt, from_v), (to_dt, to_v) in itertools.product(cases.items(), cases.items()):
if not math.isnan(from_v):
r = full_rewrite_to_sink(UOp.const(from_dt, from_v).bitcast(to_dt).sink()).src[0]
self.assertEqual(r.op, Ops.CONST, msg:=f"{from_dt} -> {to_dt} ({from_v} -> {to_v})")
self.assertEqual(r.dtype, to_dt, msg)
np.testing.assert_equal(r.arg, to_v, msg)
t({dtypes.int8: 0, dtypes.uint8: 0, dtypes.bool: False})
t({dtypes.int8: 1, dtypes.uint8: 1, dtypes.bool: True})
t({dtypes.int8: -1, dtypes.uint8: 2**8-1})
t({dtypes.int16: -1, dtypes.uint16: 2**16-1, dtypes.float16: float('nan')})
t({dtypes.int32: -1, dtypes.uint32: 2**32-1, dtypes.float32: float('nan')})
t({dtypes.int64: -1, dtypes.uint64: 2**64-1, dtypes.float64: float('nan')})
t({dtypes.int8: -2**7, dtypes.uint8: 2**7})
t({dtypes.int16: -2**15, dtypes.uint16: 2**15})
t({dtypes.int32: -2**31, dtypes.uint32: 2**31})
t({dtypes.int64: -2**63, dtypes.uint64: 2**63})
t({dtypes.int16: 13496, dtypes.uint16: 13496, dtypes.float16: 0.294921875})
t({dtypes.int32: 1050081145, dtypes.uint32: 1050081145, dtypes.float32: 0.29485681653022766})
t({dtypes.int64: 4598983288165178391, dtypes.uint64: 4598983288165178391, dtypes.float64: 0.29485681936461233})
def test_vec_bitcast(self):
with Context(SPEC=0):
r = full_rewrite_to_sink(UOp.const(dtypes.int32.vec(3), (-1, -2**31, 75)).bitcast(dtypes.uint32.vec(3)).sink()).src[0]
self.assertEqual(r.op, Ops.VECTORIZE)
self.assertEqual(r.dtype, dtypes.uint32.vec(3))
self.assertEqual(tuple(x.arg for x in r.src), (2**32-1, 2**31, 75))
# folds advance indexing into basic indexing
class TestIndexingConstFolding(unittest.TestCase):
def test_scalar_index(self):
t = Tensor.arange(16).float().reshape(1,1,4,4).realize()
_check_ast_count(1, t[:,:,Tensor(1),:])
_check_ast_count(1, t[:,:,Tensor(1)+2,:])
_check_ast_count(1, t[:,:,Tensor(1),Tensor(0)])
def test_const_tensor_index(self):
# TODO: these can be 0, implement const tensor folded indexing
t = Tensor.arange(16).float().reshape(1,1,4,4).realize()
_check_ast_count(1, t[:,:,Tensor.ones(2,1,dtype=dtypes.int),:])
_check_ast_count(1, t[:,:,Tensor.ones(1,2,dtype=dtypes.int)+2,:])
_check_ast_count(1, t[:,:,Tensor.ones(1,1,dtype=dtypes.int),Tensor.zeros(2,1,2,dtype=dtypes.int)])
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_const_folding.py",
"license": "MIT License",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_dtype_spec.py | import unittest, math, struct, operator
from tinygrad.tensor import Tensor, dtypes
from tinygrad.dtype import DTYPES_DICT, truncate, float_to_fp16, float_to_bf16, _to_np_dtype, least_upper_dtype, least_upper_float
from tinygrad.device import is_dtype_supported
from tinygrad.helpers import getenv
from hypothesis import given, settings, strategies as strat
import numpy as np
import torch
settings.register_profile("my_profile", max_examples=50, deadline=None, derandomize=getenv("DERANDOMIZE_CI", False))
settings.load_profile("my_profile")
core_dtypes = list(DTYPES_DICT.values())
dtype_ints = [dt for dt in core_dtypes if dtypes.is_int(dt) and is_dtype_supported(dt)]
dtype_floats = [dt for dt in core_dtypes if dtypes.is_float(dt) and is_dtype_supported(dt)]
FP8E4M3_MAX = 448.0
FP8E5M2_MAX = 57344.0
def u32_to_f32(u): return struct.unpack('f', struct.pack('I', u))[0]
def f32_to_u32(f): return struct.unpack('I', struct.pack('f', f))[0]
class TestHelpers(unittest.TestCase):
signed_ints = (dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64)
uints = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
floats = (dtypes.float16, dtypes.float32, dtypes.float64)
@given(strat.sampled_from(signed_ints+uints), strat.integers(min_value=1, max_value=8))
def test_is_int(self, dtype, amt):
assert dtypes.is_int(dtype.vec(amt) if amt > 1 else dtype)
assert not dtypes.is_float(dtype.vec(amt) if amt > 1 else dtype)
@given(strat.sampled_from(uints), strat.integers(min_value=1, max_value=8))
def test_is_unsigned_uints(self, dtype, amt):
assert dtypes.is_unsigned(dtype.vec(amt) if amt > 1 else dtype)
@given(strat.sampled_from(signed_ints), strat.integers(min_value=1, max_value=8))
def test_is_unsigned_signed_ints(self, dtype, amt):
assert not dtypes.is_unsigned(dtype.vec(amt) if amt > 1 else dtype)
@given(strat.sampled_from(floats), strat.integers(min_value=1, max_value=8))
def test_is_float(self, dtype, amt):
assert dtypes.is_float(dtype.vec(amt) if amt > 1 else dtype)
assert not dtypes.is_int(dtype.vec(amt) if amt > 1 else dtype)
assert not dtypes.is_unsigned(dtype.vec(amt) if amt > 1 else dtype)
def test_bf16_is_float(self):
assert dtypes.is_float(dtypes.bfloat16)
def test_fp8s_are_float(self):
assert dtypes.is_float(dtypes.fp8e4m3)
assert dtypes.is_float(dtypes.fp8e5m2)
@given(strat.sampled_from([d for d in DTYPES_DICT.values() if dtypes.is_float(d) or dtypes.is_int(d)]), strat.integers(min_value=2, max_value=8))
def test_scalar(self, dtype, amt):
assert dtype.vec(amt).scalar() == dtype
def test_from_py(self):
assert dtypes.from_py(True) == dtypes.bool
assert dtypes.from_py(2) == dtypes.default_int
assert dtypes.from_py(3.0) == dtypes.default_float
assert dtypes.from_py([]) == dtypes.default_float
assert dtypes.from_py(()) == dtypes.default_float
assert dtypes.from_py([True]) == dtypes.bool
assert dtypes.from_py([True, 2]) == dtypes.default_int
assert dtypes.from_py([True, 3.0]) == dtypes.default_float
assert dtypes.from_py([2, 3.0]) == dtypes.default_float
assert dtypes.from_py([True, 2, 3.0]) == dtypes.default_float
with self.assertRaises(RuntimeError): dtypes.from_py(None)
with self.assertRaises(RuntimeError): dtypes.from_py([None])
with self.assertRaises(RuntimeError): dtypes.from_py({})
with self.assertRaises(RuntimeError): dtypes.from_py(set())
def test_dtype_range(self):
for dt in core_dtypes:
if dtypes.is_float(dt):
np.testing.assert_equal(dtypes.min(dt), -math.inf)
np.testing.assert_equal(dtypes.max(dt), math.inf)
np.testing.assert_equal(dt.min, -math.inf)
np.testing.assert_equal(dt.max, math.inf)
elif dtypes.is_int(dt):
info = np.iinfo(_to_np_dtype(dt))
np.testing.assert_equal(dtypes.min(dt), info.min)
np.testing.assert_equal(dtypes.max(dt), info.max)
np.testing.assert_equal(dt.min, info.min)
np.testing.assert_equal(dt.max, info.max)
else:
assert dt == dtypes.bool, dt
np.testing.assert_equal(dtypes.min(dt), False)
np.testing.assert_equal(dtypes.max(dt), True)
np.testing.assert_equal(dt.min, False)
np.testing.assert_equal(dt.max, True)
def test_dtype_range_vec(self):
for dt in core_dtypes:
self.assertEqual(dt.min, dt.vec(4).min)
self.assertEqual(dt.max, dt.vec(4).max)
def test_float_to_fp16(self):
self.assertEqual(float_to_fp16(1), 1)
self.assertEqual(float_to_fp16(65504), 65504)
self.assertEqual(float_to_fp16(65519.999), 65504)
self.assertEqual(float_to_fp16(65520), math.inf)
self.assertEqual(float_to_fp16(1e-8), 0.0)
self.assertEqual(float_to_fp16(-65504), -65504)
self.assertEqual(float_to_fp16(-65519.999), -65504)
self.assertEqual(float_to_fp16(-65520), -math.inf)
self.assertTrue(math.isnan(float_to_fp16(math.nan)))
def test_float_to_bf16(self):
max_bf16 = torch.finfo(torch.bfloat16).max
for a in [1, 1.1, 1234, 23456, -777.777, max_bf16, max_bf16 * 1.00001, -max_bf16, -max_bf16 * 1.00001, math.inf, -math.inf]:
self.assertEqual(float_to_bf16(a), torch.tensor([a], dtype=torch.bfloat16).item())
self.assertTrue(math.isnan(float_to_bf16(math.nan)))
def test_float_to_bf16_nan(self):
patterns = [0x7FC00001, 0xFFC00001, 0x7F800001, 0xFF800001, 0x7FFFFFFF, 0xFFFFFFFF]
for u in patterns:
x = u32_to_f32(u)
y = float_to_bf16(x)
t = torch.tensor([x], dtype=torch.bfloat16).item()
self.assertTrue(math.isnan(y))
self.assertTrue(math.isnan(t))
def test_float_to_bf16_round(self):
uppers = [0x3f800000, 0x41230000, 0xC1460000]
for upper in uppers:
base = upper & 0xFFFF0000
base_f32 = u32_to_f32(base)
base_f32_round_up = u32_to_f32(base + 0x00010000)
x = u32_to_f32(base | 0x00007000)
self.assertEqual(float_to_bf16(x), base_f32)
self.assertEqual(torch.tensor([x], dtype=torch.bfloat16).item(), base_f32)
x = u32_to_f32(base | 0x0000C000)
self.assertEqual(float_to_bf16(x), base_f32_round_up)
self.assertEqual(torch.tensor([x], dtype=torch.bfloat16).item(), base_f32_round_up)
if ((upper >> 16) & 1) == 0:
x = u32_to_f32(base | 0x00008000)
self.assertEqual(float_to_bf16(x), base_f32)
self.assertEqual(torch.tensor([x], dtype=torch.bfloat16).item(), base_f32)
else:
x = u32_to_f32(base | 0x00008000)
self.assertEqual(float_to_bf16(x), base_f32_round_up)
self.assertEqual(torch.tensor([x], dtype=torch.bfloat16).item(), base_f32_round_up)
def test_float_to_bf16_boundary(self):
base = 0x7F7F0000
inf_u32 = 0x7F800000
x = u32_to_f32(base | 0x00007FFF)
self.assertEqual(f32_to_u32(float_to_bf16(x)), base)
self.assertEqual(f32_to_u32(torch.tensor([x], dtype=torch.bfloat16).item()), base)
x = u32_to_f32(base | 0x0000C000)
self.assertEqual(f32_to_u32(float_to_bf16(x)), inf_u32)
self.assertEqual(f32_to_u32(torch.tensor([x], dtype=torch.bfloat16).item()), inf_u32)
x = u32_to_f32(base | 0x00008000)
self.assertEqual(f32_to_u32(float_to_bf16(x)), inf_u32)
self.assertEqual(f32_to_u32(torch.tensor([x], dtype=torch.bfloat16).item()), inf_u32)
@given(strat.floats(width=32, allow_subnormal=True, allow_nan=True, allow_infinity=True))
def test_truncate_fp8e4m3(self, x):
if math.isnan(x): np.testing.assert_equal(truncate[dtypes.fp8e4m3](x), x)
elif math.isinf(x): np.testing.assert_equal(truncate[dtypes.fp8e4m3](x), math.copysign(math.nan, x))
elif x > FP8E4M3_MAX: np.testing.assert_equal(truncate[dtypes.fp8e4m3](x), FP8E4M3_MAX)
elif x < -FP8E4M3_MAX: np.testing.assert_equal(truncate[dtypes.fp8e4m3](x), -FP8E4M3_MAX)
else: np.testing.assert_equal(truncate[dtypes.fp8e4m3](x), torch.tensor(x, dtype=torch.float8_e4m3fn).float().item())
@given(strat.floats(width=32, allow_subnormal=True, allow_nan=True, allow_infinity=True))
def test_truncate_fp8e5m2(self, x):
if math.isnan(x): np.testing.assert_equal(truncate[dtypes.fp8e5m2](x), x)
elif math.isinf(x): np.testing.assert_equal(truncate[dtypes.fp8e5m2](x), x)
elif x > FP8E5M2_MAX: np.testing.assert_equal(truncate[dtypes.fp8e5m2](x), FP8E5M2_MAX)
elif x < -FP8E5M2_MAX: np.testing.assert_equal(truncate[dtypes.fp8e5m2](x), -FP8E5M2_MAX)
else: np.testing.assert_equal(truncate[dtypes.fp8e5m2](x), torch.tensor(x, dtype=torch.float8_e5m2).float().item())
class TestTypePromotion(unittest.TestCase):
@given(strat.sampled_from(core_dtypes))
def test_self_promo_to_self(self, dtype):
assert least_upper_dtype(dtype) == dtype
assert least_upper_dtype(dtype, dtype) == dtype
assert least_upper_dtype(dtype, dtype, dtype) == dtype
@given(strat.sampled_from(core_dtypes), strat.sampled_from(core_dtypes))
def test_promo_resulted_higher_than_inputs(self, dtype1, dtype2):
result = least_upper_dtype(dtype1, dtype2)
assert not (result < dtype1) and not (result < dtype2)
def test_dtype_promo(self):
assert least_upper_dtype(dtypes.bool, dtypes.int8) == dtypes.int8
assert least_upper_dtype(dtypes.int8, dtypes.uint8) == dtypes.int16
assert least_upper_dtype(dtypes.uint8, dtypes.int16) == dtypes.int16
assert least_upper_dtype(dtypes.int16, dtypes.uint16) == dtypes.int32
assert least_upper_dtype(dtypes.uint16, dtypes.int32) == dtypes.int32
assert least_upper_dtype(dtypes.int32, dtypes.uint32) == dtypes.int64
assert least_upper_dtype(dtypes.uint32, dtypes.int64) == dtypes.int64
assert least_upper_dtype(dtypes.int64, dtypes.uint64) == dtypes.uint64
assert least_upper_dtype(dtypes.float16, dtypes.float32) == dtypes.float32
assert least_upper_dtype(dtypes.float32, dtypes.float64) == dtypes.float64
assert least_upper_dtype(dtypes.bool, dtypes.float32) == dtypes.float32
assert least_upper_dtype(dtypes.bool, dtypes.float64) == dtypes.float64
assert least_upper_dtype(dtypes.float16, dtypes.int64) == dtypes.float16
assert least_upper_dtype(dtypes.float16, dtypes.uint64) == dtypes.float16
assert least_upper_dtype(dtypes.fp8e4m3, dtypes.fp8e5m2) == dtypes.half
assert least_upper_dtype(dtypes.fp8e4m3, dtypes.bfloat16) == dtypes.bfloat16
assert least_upper_dtype(dtypes.fp8e5m2, dtypes.bfloat16) == dtypes.bfloat16
assert least_upper_dtype(dtypes.fp8e4m3, dtypes.float16) == dtypes.float16
assert least_upper_dtype(dtypes.fp8e5m2, dtypes.float16) == dtypes.float16
assert least_upper_dtype(dtypes.fp8e4m3, dtypes.int64) == dtypes.fp8e4m3
assert least_upper_dtype(dtypes.fp8e4m3, dtypes.uint64) == dtypes.fp8e4m3
assert least_upper_dtype(dtypes.fp8e5m2, dtypes.int64) == dtypes.fp8e5m2
assert least_upper_dtype(dtypes.fp8e5m2, dtypes.uint64) == dtypes.fp8e5m2
class TestTypeSpec(unittest.TestCase):
def setUp(self):
self.old_default_int, self.old_default_float = dtypes.default_int, dtypes.default_float
def tearDown(self):
dtypes.default_int, dtypes.default_float = self.old_default_int, self.old_default_float
def test_set_dtype_default(self):
for default_int in [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64]:
dtypes.default_int = default_int
assert dtypes.default_int == default_int
for default_float in [*dtypes.fp8s, dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64]:
dtypes.default_float = default_float
assert dtypes.default_float == default_float
@given(strat.sampled_from(core_dtypes), strat.sampled_from([operator.gt, operator.ge, operator.le, operator.lt, operator.eq, operator.ne]))
def test_bool_ops(self, dtype, op):
assert op(Tensor.ones(4, 4, dtype=dtype), Tensor.ones(4, 4, dtype=dtype)).dtype == dtypes.bool
@given(strat.sampled_from(core_dtypes), strat.sampled_from(dtype_ints), strat.sampled_from(dtype_floats))
def test_functions_return_index(self, dtype, default_int, default_float):
dtypes.default_int, dtypes.default_float = default_int, default_float
assert Tensor([0, 1], dtype=dtype).argmax().dtype == dtypes.int32
assert Tensor([0, 1], dtype=dtype).argmin().dtype == dtypes.int32
assert Tensor([0, 1], dtype=dtype).multinomial().dtype == dtypes.int32
@given(strat.sampled_from(core_dtypes), strat.sampled_from(dtype_ints))
def test_tensor_indexing_returns_same_dtype(self, data_dtype, indices_dtype):
X_data = Tensor.ones(60000, 1, 28, 28, dtype=data_dtype)
indices = Tensor.randint(512, high=X_data.shape[0]).cast(indices_dtype)
assert X_data[indices].dtype == X_data.dtype
@given(strat.sampled_from(core_dtypes), strat.sampled_from(dtype_ints))
def test_gather_returns_same_dtype(self, data_dtype, indices_dtype):
X_data = Tensor([[1, 0], [0, 1]], dtype=data_dtype)
indices = Tensor([[0, 0], [1, 0]], dtype=indices_dtype)
assert X_data.gather(0, indices).dtype == X_data.dtype
assert X_data.gather(1, indices).dtype == X_data.dtype
@given(strat.sampled_from(dtype_floats), strat.sampled_from(dtype_floats))
def test_attention_returns_same_dtype(self, data_dtype, default_float):
dtypes.default_float = default_float
query = Tensor.rand(32, 8, 128, 64, dtype=data_dtype)
key = Tensor.rand(32, 8, 128, 64, dtype=data_dtype)
value = Tensor.rand(32, 8, 128, 64, dtype=data_dtype)
mask = (Tensor.rand(32, 8, 128, 128) < 0.5)
assert query.scaled_dot_product_attention(key, value, is_causal=True).dtype == data_dtype
assert query.scaled_dot_product_attention(key, value, is_causal=True, dropout_p=0.3).dtype == data_dtype
assert query.scaled_dot_product_attention(key, value, is_causal=False).dtype == data_dtype
assert query.scaled_dot_product_attention(key, value, attn_mask=mask).dtype == data_dtype
class TestAutoCastType(unittest.TestCase):
def setUp(self):
self.old_default_int, self.old_default_float = dtypes.default_int, dtypes.default_float
def tearDown(self):
dtypes.default_int, dtypes.default_float = self.old_default_int, self.old_default_float
@given(strat.sampled_from(dtype_floats), strat.sampled_from(dtype_floats))
def test_least_upper_float_input_is_float(self, input_dtype, default_float):
dtypes.default_float = default_float
self.assertEqual(least_upper_float(input_dtype), input_dtype)
@given(strat.sampled_from(dtype_ints), strat.sampled_from(dtype_floats))
def test_least_upper_float_input_is_int(self, input_dtype, default_float):
dtypes.default_float = default_float
self.assertEqual(least_upper_float(input_dtype), default_float)
@given(strat.sampled_from(core_dtypes))
def test_broadcast_scalar(self, dt):
assert (Tensor.ones(4, 4, dtype=dt) + 2.3).dtype == (dt if dtypes.is_float(dt) else dtypes.default_float)
assert (Tensor.ones(4, 4, dtype=dt) + 2).dtype == (dt if dtypes.is_float(dt) or dtypes.is_int(dt) else dtypes.default_int)
assert (Tensor.ones(4, 4, dtype=dt) + True).dtype == dt
@given(strat.sampled_from(dtype_floats))
def test_int_div_int(self, default_float):
dtypes.default_float = default_float
self.assertEqual(Tensor([1]).div(Tensor([2])).dtype, default_float)
def test_sum(self):
assert (Tensor([0, 1], dtype=dtypes.bool)).sum().dtype == dtypes.int32
assert (Tensor([0, 1], dtype=dtypes.int8)).sum().dtype == dtypes.int32
assert (Tensor([0, 1], dtype=dtypes.int16)).sum().dtype == dtypes.int32
assert (Tensor([0, 1], dtype=dtypes.int32)).sum().dtype == dtypes.int32
assert (Tensor([0, 1], dtype=dtypes.int64)).sum().dtype == dtypes.int64
assert (Tensor([0, 1], dtype=dtypes.uint8)).sum().dtype == dtypes.uint32
assert (Tensor([0, 1], dtype=dtypes.uint16)).sum().dtype == dtypes.uint32
assert (Tensor([0, 1], dtype=dtypes.uint32)).sum().dtype == dtypes.uint32
assert (Tensor([0, 1], dtype=dtypes.uint64)).sum().dtype == dtypes.uint64
assert (Tensor([0, 1], dtype=dtypes.fp8e4m3)).sum().dtype == dtypes.fp8e4m3
assert (Tensor([0, 1], dtype=dtypes.fp8e5m2)).sum().dtype == dtypes.fp8e5m2
assert (Tensor([0, 1], dtype=dtypes.float16)).sum().dtype == dtypes.float16
assert (Tensor([0, 1], dtype=dtypes.bfloat16)).sum().dtype == dtypes.bfloat16
assert (Tensor([0, 1], dtype=dtypes.float32)).sum().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.float64)).sum().dtype == dtypes.float64
def test_mean(self):
assert (Tensor([0, 1], dtype=dtypes.bool)).mean().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.int8)).mean().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.int16)).mean().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.int32)).mean().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.int64)).mean().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.uint8)).mean().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.uint16)).mean().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.uint32)).mean().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.uint64)).mean().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.fp8e4m3)).mean().dtype == dtypes.fp8e4m3
assert (Tensor([0, 1], dtype=dtypes.fp8e5m2)).mean().dtype == dtypes.fp8e5m2
assert (Tensor([0, 1], dtype=dtypes.float16)).mean().dtype == dtypes.float16
assert (Tensor([0, 1], dtype=dtypes.bfloat16)).mean().dtype == dtypes.bfloat16
assert (Tensor([0, 1], dtype=dtypes.float32)).mean().dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.float64)).mean().dtype == dtypes.float64
def test_cumsum(self):
assert (Tensor([0, 1], dtype=dtypes.bool)).cumsum(0).dtype == dtypes.int32
assert (Tensor([0, 1], dtype=dtypes.int8)).cumsum(0).dtype == dtypes.int32
assert (Tensor([0, 1], dtype=dtypes.int16)).cumsum(0).dtype == dtypes.int32
assert (Tensor([0, 1], dtype=dtypes.int32)).cumsum(0).dtype == dtypes.int32
assert (Tensor([0, 1], dtype=dtypes.int64)).cumsum(0).dtype == dtypes.int64
assert (Tensor([0, 1], dtype=dtypes.uint8)).cumsum(0).dtype == dtypes.uint32
assert (Tensor([0, 1], dtype=dtypes.uint16)).cumsum(0).dtype == dtypes.uint32
assert (Tensor([0, 1], dtype=dtypes.uint32)).cumsum(0).dtype == dtypes.uint32
assert (Tensor([0, 1], dtype=dtypes.uint64)).cumsum(0).dtype == dtypes.uint64
assert (Tensor([0, 1], dtype=dtypes.fp8e4m3)).cumsum(0).dtype == dtypes.fp8e4m3
assert (Tensor([0, 1], dtype=dtypes.fp8e5m2)).cumsum(0).dtype == dtypes.fp8e5m2
assert (Tensor([0, 1], dtype=dtypes.float16)).cumsum(0).dtype == dtypes.float16
assert (Tensor([0, 1], dtype=dtypes.bfloat16)).cumsum(0).dtype == dtypes.bfloat16
assert (Tensor([0, 1], dtype=dtypes.float32)).cumsum(0).dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.float64)).cumsum(0).dtype == dtypes.float64
@given(strat.sampled_from(core_dtypes), strat.sampled_from(core_dtypes), strat.sampled_from(core_dtypes))
def test_matmul(self, dt1, dt2, acc_dt):
t1 = Tensor([0, 1], dtype=dt1)
t2 = Tensor([0, 1], dtype=dt2)
self.assertEqual(t1.matmul(t2).dtype, least_upper_dtype(t1.dtype, t2.dtype))
self.assertEqual(t1.matmul(t2, dtype=acc_dt).dtype, acc_dt)
@given(strat.sampled_from(core_dtypes), strat.sampled_from(core_dtypes), strat.sampled_from(core_dtypes), strat.sampled_from(core_dtypes))
def test_linear(self, dt1, dt2, dt3, acc_dt):
x = Tensor([0, 1], dtype=dt1)
w = Tensor([0, 1], dtype=dt2)
b = Tensor([0, 1], dtype=dt3)
self.assertEqual(x.linear(w).dtype, least_upper_dtype(x.dtype, w.dtype))
self.assertEqual(x.linear(w, b).dtype, least_upper_dtype(least_upper_dtype(x.dtype, w.dtype), b.dtype))
self.assertEqual(x.linear(w, dtype=acc_dt).dtype, acc_dt)
self.assertEqual(x.linear(w, b, dtype=acc_dt).dtype, acc_dt)
@staticmethod
def check_where_alternate_input_other(input_, other, data_type):
assert (Tensor([True, False]).where(input_, other)).dtype == data_type
assert (Tensor([True, False]).where(other, input_)).dtype == data_type
@given(strat.sampled_from(core_dtypes), strat.sampled_from(core_dtypes))
def test_where_no_scalar(self, dt1, dt2):
self.check_where_alternate_input_other(Tensor(2, dtype=dt1), Tensor(3, dtype=dt2), least_upper_dtype(dt1, dt2))
@given(strat.sampled_from(core_dtypes))
def test_where_one_scalar(self, dt):
t = Tensor(2, dtype=dt)
self.check_where_alternate_input_other(t, 3.2, (dt if dtypes.is_float(dt) else dtypes.default_float))
self.check_where_alternate_input_other(t, 3, (dt if dtypes.is_float(dt) or dtypes.is_int(dt) else dtypes.default_int))
self.check_where_alternate_input_other(t, True, dt)
def test_where_two_scalars(self):
self.check_where_alternate_input_other(3.1, 3.2, dtypes.default_float)
self.check_where_alternate_input_other(3.1, 3, dtypes.default_float)
self.check_where_alternate_input_other(3.1, True, dtypes.default_float)
self.check_where_alternate_input_other(3, 2, dtypes.default_int)
self.check_where_alternate_input_other(3, True, dtypes.default_int)
self.check_where_alternate_input_other(False, True, dtypes.bool)
@given(strat.sampled_from(core_dtypes), strat.sampled_from(core_dtypes))
def test_maximum(self, dt1, dt2):
assert Tensor([0, 1, 2], dtype=dt1).maximum(Tensor([2, 0, 5], dtype=dt2)).dtype == least_upper_dtype(dt1, dt2)
@given(strat.sampled_from(core_dtypes))
def test_maximum_const(self, dt):
assert Tensor([1, 2], dtype=dt).maximum(3.1).dtype == (dt if dtypes.is_float(dt) else dtypes.default_float)
assert Tensor([1, 2], dtype=dt).maximum(3).dtype == (dt if dtypes.is_float(dt) or dtypes.is_int(dt) else dtypes.default_int)
assert Tensor([1, 2], dtype=dt).maximum(True).dtype == dt
def test_div(self):
assert (Tensor([1, 2], dtype=dtypes.int32) / Tensor([2, 2], dtype=dtypes.int32)).dtype == dtypes.default_float
assert (Tensor([1, 2], dtype=dtypes.int16) / Tensor([2, 2], dtype=dtypes.int32)).dtype == dtypes.default_float
assert (Tensor([1, 2], dtype=dtypes.float32) / Tensor([2, 2], dtype=dtypes.float16)).dtype == dtypes.float32
assert (Tensor([1, 2], dtype=dtypes.int32) / Tensor([2, 2], dtype=dtypes.float16)).dtype == dtypes.float16
def test_div_const(self):
assert (Tensor([1, 2], dtype=dtypes.int32) / 2).dtype == dtypes.default_float
assert (Tensor([1, 2], dtype=dtypes.int32) / 2.0).dtype == dtypes.default_float
assert (Tensor([1, 2], dtype=dtypes.float16) / 2).dtype == dtypes.float16
assert (Tensor([1, 2], dtype=dtypes.float16) / 2.0).dtype == dtypes.float16
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_dtype_spec.py",
"license": "MIT License",
"lines": 353,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_gradient.py | from typing import Callable
import unittest, math
import torch
from tinygrad import Tensor
from tinygrad.dtype import dtypes
from tinygrad.uop.ops import UOp
from tinygrad.gradient import compute_gradient
class TestGradient(unittest.TestCase):
def _cmp_nan_okay(self, x, y):
if math.isnan(x) and math.isnan(y): return
self.assertAlmostEqual(x, y, places=5)
def _test_one_input_function(self, f:Callable, jf:Callable|None=None):
if jf is None: jf = f
x = UOp.variable('x', -math.inf, math.inf, dtype=dtypes.float)
gx = compute_gradient(f(x), UOp.const(dtypes.float, 1.0), set([x]))[x]
for val in [-5., -2.0, 0.0, 2.0, 5.]:
tg_out = gx.substitute({x: x.const_like(val)}).ssimplify()
tx = torch.tensor([val], dtype=torch.float, requires_grad=True)
torch_out = torch.autograd.grad(jf(tx), tx)[0].item()
self._cmp_nan_okay(tg_out, torch_out)
def _test_two_input_function(self, f:Callable, jf:Callable|None=None):
if jf is None: jf = f
x = UOp.variable('x', -math.inf, math.inf, dtype=dtypes.float)
y = UOp.variable('y', -math.inf, math.inf, dtype=dtypes.float)
grads = compute_gradient(f(x, y), UOp.const(dtypes.float, 1.0), set([x, y]))
gx, gy = grads[x], grads[y]
for valx in [-5., -2.0, 0.0, 2.0, 5.]:
for valy in [-5., -2.0, 0.0, 2.0, 5.]:
# Substitute the values into the gradient expressions
substitutions = {x: x.const_like(valx), y: y.const_like(valy)}
tg_out_x = gx.substitute(substitutions).ssimplify()
tg_out_y = gy.substitute(substitutions).ssimplify()
tx = torch.tensor([valx], dtype=torch.float, requires_grad=True)
ty = torch.tensor([valy], dtype=torch.float, requires_grad=True)
torch_grad = torch.autograd.grad(jf(tx, ty), [tx, ty])
torch_out_x, torch_out_y = [x.item() for x in torch_grad]
self._cmp_nan_okay(tg_out_x, torch_out_x)
self._cmp_nan_okay(tg_out_y, torch_out_y)
# unary ops unit
def test_recip(self): self._test_one_input_function(lambda x: 1.0/x)
def test_sin(self): self._test_one_input_function(lambda x: x.sin())
def test_sqrt(self): self._test_one_input_function(lambda x: x.sqrt())
def test_log2(self): self._test_one_input_function(lambda x: x.log2())
def test_exp2(self): self._test_one_input_function(lambda x: x.exp2())
# binary ops unit
def test_add(self): self._test_two_input_function(lambda x,y: x+y)
def test_mul(self): self._test_two_input_function(lambda x,y: x*y)
# chain rule
def test_chain(self): self._test_one_input_function(lambda x: x.sin().sqrt())
def test_chain_binop(self): self._test_two_input_function(lambda x,y: (x*y)+x*y)
def test_big_add_sin(self): self._test_two_input_function(lambda x,y: x.sin()+3.0/y)
def test_big_chain(self): self._test_two_input_function(lambda x,y: (1.0/x*y)+x*y)
def test_where(self): self._test_two_input_function(lambda x,y: (x<y).where(x,y), lambda x,y: torch.where(x<y,x,y))
class TestRealizeMeansRealize(unittest.TestCase):
def test_randn_realizes(self):
x = Tensor.randn(2, 3, 64, 64, requires_grad=True).realize()
assert x.uop is not x.uop.base
assert x.uop.is_realized
def test_uniform_realizes(self):
x = Tensor.uniform(16, 3, 3, 3, requires_grad=True).realize()
print(x.uop)
assert x.uop is not x.uop.base
assert x.uop.is_realized
def test_uniform_gradient(self):
x = Tensor.uniform(16, 3, 3, 3, requires_grad=True).realize()
y = x * 2
y.sum().gradient(x)[0].realize()
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_gradient.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_helpers.py | import ctypes, gzip, unittest, timeit, pickle
from tinygrad import Variable
from tinygrad.helpers import Context, ContextVar, argfix, colored, word_wrap, is_numpy_ndarray, mv_address, get_contraction, count, all_same
from tinygrad.helpers import merge_dicts, strip_parens, prod, round_up, fetch, fully_flatten, from_mv, to_mv, polyN, time_to_str, cdiv, cmod, getbits
from tinygrad.helpers import ceildiv
from tinygrad.tensor import Tensor, get_shape
import numpy as np
VARIABLE = ContextVar("VARIABLE", 0)
class TestContextVars(unittest.TestCase):
# Ensuring that the test does not modify variables outside the tests.
ctx = Context()
def setUp(self): TestContextVars.ctx.__enter__()
def tearDown(self): TestContextVars.ctx.__exit__()
def test_initial_value_is_set(self):
_TMP = ContextVar("_TMP", 5)
self.assertEqual(_TMP.value, 5)
def test_cannot_recreate(self):
_TMP2 = ContextVar("_TMP2", 1)
with self.assertRaises(RuntimeError):
_TMP2 = ContextVar("_TMP2", 2)
def test_new_var_inside_context(self):
with Context(VARIABLE=1):
_TMP3 = ContextVar("_TMP3", 1)
with self.assertRaises(RuntimeError):
_TMP3 = ContextVar("_TMP3", 2)
def test_value_across_modules(self):
# Mocking module import by invoking the code but not in our globals().
exec('from tinygrad.helpers import ContextVar;C = ContextVar("C", 13)', {}) # pylint:disable=exec-used
# It should not matter that the first creation was in another module.
with self.assertRaises(RuntimeError):
_C = ContextVar("C", 0)
def test_assignment_across_modules(self):
B = ContextVar("B", 1)
# local assignment
B.value = 2
self.assertEqual(B.value, 2)
with self.assertRaises(RuntimeError):
# Assignment in another module.
exec('from tinygrad.helpers import ContextVar;B = ContextVar("B", 0);B.value = 3;', {}) # pylint:disable=exec-used
def test_context_assignment(self):
with Context(VARIABLE=1):
self.assertEqual(VARIABLE.value, 1)
self.assertEqual(VARIABLE.value, 0)
def test_unknown_param_to_context(self):
with self.assertRaises(KeyError):
with Context(SOMETHING_ELSE=1):
pass
def test_nested_context(self):
with Context(VARIABLE=1):
with Context(VARIABLE=2):
MORE = ContextVar("MORE", 2)
with Context(VARIABLE=3, MORE=3):
self.assertEqual(VARIABLE.value, 3)
self.assertEqual(MORE.value, 3)
self.assertEqual(VARIABLE.value, 2)
self.assertEqual(MORE.value, 2)
self.assertEqual(VARIABLE.value, 1)
self.assertEqual(MORE.value, 2) # TODO: should this raise?
self.assertEqual(VARIABLE.value, 0)
def test_decorator(self):
@Context(VARIABLE=1, DEBUG=4)
def test():
self.assertEqual(VARIABLE.value, 1)
self.assertEqual(VARIABLE.value, 0)
test()
self.assertEqual(VARIABLE.value, 0)
def test_context_exit_reverts_updated_values(self):
D = ContextVar("D", 1)
D.value = 2
with Context(D=3):
...
assert D.value == 2, f"Expected D to be 2, but was {D.value}. Indicates that Context.__exit__ did not restore to the correct value."
class TestAllSame(unittest.TestCase):
def test_empty(self): self.assertTrue(all_same([]))
def test_single(self): self.assertTrue(all_same([1]))
def test_same(self): self.assertTrue(all_same([1, 1, 1]))
def test_different(self): self.assertFalse(all_same([1, 2, 1]))
class TestMergeDicts(unittest.TestCase):
def test_merge_dicts(self):
a = {"a": 1, "b": 2}
b = {"a": 1, "c": 3}
c = {}
d = {"a": 2, "b": 2}
assert merge_dicts([a, b]) == {"a": 1, "b": 2, "c": 3}
assert merge_dicts([a, c]) == a
assert merge_dicts([a, b, c]) == {"a": 1, "b": 2, "c": 3}
with self.assertRaises(RuntimeError):
merge_dicts([a, d])
class TestStripParens(unittest.TestCase):
def test_simple(self): self.assertEqual("1+2", strip_parens("(1+2)"))
def test_nested(self): self.assertEqual("1+(2+3)", strip_parens("(1+(2+3))"))
def test_casted_no_strip(self): self.assertEqual("(int)(1+2)", strip_parens("(int)(1+2)"))
def test_unmatched_parens(self): self.assertEqual("((c35+c39>>23&255)+-127).cast(dtypes.float)",
strip_parens("((c35+c39>>23&255)+-127).cast(dtypes.float)"))
def test_single_paren_left(self): self.assertEqual("(abc", strip_parens("(abc"))
def test_single_paren_right(self): self.assertEqual("abc)", strip_parens("abc)"))
def test_parens_at_different_depths(self): self.assertEqual("(a+(b))*(c)", strip_parens("(a+(b))*(c)"))
class TestProd(unittest.TestCase):
def test_empty(self): self.assertEqual(1, prod(tuple()))
def test_ints(self): self.assertEqual(30, prod((2, 3, 5)))
def test_variable(self): self.assertEqual("(a*12)", prod((Variable("a", 1, 5), 3, 4)).render())
def test_variable_order(self): self.assertEqual("(a*12)", prod((3, 4, Variable("a", 1, 5))).render())
class TestRoundUp(unittest.TestCase):
def test_round_up(self):
self.assertEqual(round_up(-3,4), 0)
self.assertEqual(round_up(-4,4), -4)
self.assertEqual(round_up(6,4), 8)
self.assertEqual(round_up(8,4), 8)
self.assertEqual(round_up(232, 24984), 24984)
self.assertEqual(round_up(24984, 232), 25056)
class TestCeilDiv(unittest.TestCase):
def test_int(self):
self.assertEqual(ceildiv(10, 3), 4)
self.assertEqual(ceildiv(9, 3), 3)
self.assertEqual(ceildiv(0, 5), 0)
self.assertEqual(ceildiv(1, 5), 1)
def test_symbolic(self):
# tests that ceildiv with UOp uses (num + amt - 1) // amt formula for non-negative num
v = Variable('v', 0, 100)
result = ceildiv(v, 6)
self.assertEqual(result.render(), "((v+5)//6)")
def test_symbolic_negative_offset(self):
# tests ceildiv(v-5, 6) which is used in conv2d output shape
# old implementation incorrectly simplified -(x//-y) to ((v+1)//6-1) for v-5
# new implementation uses (v-5+5)//6 = v//6 which is correct
v = Variable('v', 11, 100)
result = ceildiv(v - 5, 6)
self.assertEqual(result.render(), "(v//6)")
class TestCount(unittest.TestCase):
def test_count_basic(self):
c = count(3)
self.assertEqual(next(c), 3)
self.assertEqual(next(c), 4)
def test_count_step_pickle(self):
c = count(1, 2)
self.assertEqual(next(c), 1)
c2 = pickle.loads(pickle.dumps(c))
self.assertEqual(next(c2), 3)
@unittest.skip("no fetch tests because they need internet")
class TestFetch(unittest.TestCase):
def test_fetch_bad_http(self):
self.assertRaises(Exception, fetch, 'http://www.google.com/404', allow_caching=False)
def test_fetch_small(self):
assert (len(fetch('https://google.com', allow_caching=False).read_bytes())>0)
def test_fetch_img(self):
from PIL import Image
img = fetch("https://avatars.githubusercontent.com/u/132956020", allow_caching=False)
with Image.open(img) as pimg:
assert pimg.size == (77, 77), pimg.size
def test_fetch_subdir(self):
from PIL import Image
img = fetch("https://avatars.githubusercontent.com/u/132956020", allow_caching=False, subdir="images")
with Image.open(img) as pimg:
assert pimg.size == (77, 77), pimg.size
assert img.parent.name == "images"
def test_fetch_gunzip_valid(self):
# compare fetch(gunzip=True) to fetch(gunzip=False) plus decompressing afterwards
gzip_url: str = 'https://ftp.gnu.org/gnu/gzip/gzip-1.13.tar.gz'
fp_gz = fetch(gzip_url, gunzip=True)
fp_no_gz = fetch(gzip_url, gunzip=False)
with open(fp_gz, 'rb') as f: content_gz = f.read()
with open(fp_no_gz, 'rb') as f: content_no_gz = gzip.decompress(f.read())
assert fp_gz.stat().st_size > fp_no_gz.stat().st_size
assert isinstance(content_gz, bytes) and isinstance(content_no_gz, bytes)
assert len(content_gz) == len(content_no_gz)
assert content_gz == content_no_gz
def test_fetch_gunzip_invalid(self):
# given a non-gzipped file, fetch(gunzip=True) fails
no_gzip_url: str = 'https://ftp.gnu.org/gnu/gzip/gzip-1.13.zip'
with self.assertRaises(gzip.BadGzipFile):
fetch(no_gzip_url, gunzip=True)
def test_fetch_user_agent(self):
fetch("https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/finalist-round/updated-submissions/sparkle.zip",
allow_caching=False)
def test_fetch_half_and_full_file(self):
x = fetch("https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/finalist-round/updated-submissions/sparkle.zip",
headers={"Range": "bytes=0-10"}).read_bytes()
assert len(x) == 11, f"{len(x) != 11}"
x = fetch("https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/finalist-round/updated-submissions/sparkle.zip",
headers={"Range": "bytes=0-100"}).read_bytes()
assert len(x) == 101, f"{len(x) != 101}"
class TestFullyFlatten(unittest.TestCase):
def test_fully_flatten(self):
self.assertEqual(fully_flatten([[1, 3], [1, 2]]), [1, 3, 1, 2])
self.assertEqual(fully_flatten(((1, 3), (1, 2))), [1, 3, 1, 2])
self.assertEqual(fully_flatten([[[1], [3]], [[1], [2]]]), [1, 3, 1, 2])
self.assertEqual(fully_flatten([[[[1], 2], 3], 4]), [1, 2, 3, 4])
self.assertEqual(fully_flatten([[1, 2, [3, 4]], [5, 6], 7]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(fully_flatten([[1, "ab"], [True, None], [3.14, [5, "b"]]]), [1, "ab", True, None, 3.14, 5, "b"])
def test_fully_flatten_numpy(self):
self.assertEqual(fully_flatten([np.array([])]), [])
self.assertEqual(fully_flatten([np.array(3)]), [3])
self.assertEqual(fully_flatten([np.array([3])]), [3])
self.assertEqual(fully_flatten([np.array([[3]])]), [3])
self.assertEqual(fully_flatten([np.array([1, 3]), np.array([1, 2])]), [1, 3, 1, 2])
self.assertEqual(fully_flatten((np.array([1, 3]), np.array([1, 2]))), [1, 3, 1, 2])
self.assertEqual(fully_flatten([np.array([[1], [3]]), np.array([[1], [2]])]), [1, 3, 1, 2])
self.assertEqual(fully_flatten([[1, "ab"], [True, None], np.array([[3.14], [6.28]])]), [1, "ab", True, None, 3.14, 6.28])
class TestMemoryview(unittest.TestCase):
def test_from_mv_to_mv(self):
base = memoryview(bytearray(b"\x11\x22\x33"*40))
ct = from_mv(base)
mv = to_mv(ctypes.addressof(ct), len(base))
mv[0] = 2
assert base[0] == 2
@unittest.skip("allocates tons of memory")
def test_to_mv(self):
sizes = [
(16, "16 B"),
(64, "64 B"),
(256, "256 B"),
(1024, "1 KB"),
(4 * 1024, "4 KB"),
(16 * 1024, "16 KB"),
(64 * 1024, "64 KB"),
(256 * 1024, "256 KB"),
(1 * 1024 * 1024, "1 MB"),
(10 * 1024 * 1024, "10 MB"),
(200 * 1024 * 1024, "200 MB"),
]
for sz, label in sizes:
buf = np.random.randint(0, 256, sz, dtype=np.uint8)
ptr = buf.ctypes.data
iters = 100_000
t_us = timeit.timeit(lambda: to_mv(ptr, sz), number=iters) * 1e6 / iters
print(f"Size {label:>9} | Time: {t_us:8.3f} µs")
def test_speed_from_mv_vs_mv_address(self):
x = memoryview(bytearray(1))
iters = 100000
fmv_us = timeit.timeit(lambda: from_mv(x), number=iters) * 1e6 / iters
mva_us = timeit.timeit(lambda: mv_address(x), number=iters) * 1e6 / iters
print(f"from_mv vs mv_address: {fmv_us:8.3f} µs vs {mva_us:8.3f} µs")
class TestGetContraction(unittest.TestCase):
def test_contraction(self):
r = get_contraction((1,2,3,4), (2,3,4))
self.assertEqual(r, [[0, 1], [2], [3]])
r = get_contraction((2,1,3,4), (2,3,4))
self.assertEqual(r, [[0], [1, 2], [3]])
r = get_contraction((1,2,3,1,4), (1,2,3,4))
self.assertEqual(r, [[], [0, 1], [2], [3, 4]])
r = get_contraction((1,2,3,1,4,1,1), (2,3,4))
self.assertEqual(r, [[0, 1], [2], [3, 4, 5, 6]])
r = get_contraction((1,2,3,4), (1,2,3*4))
self.assertEqual(r, [[], [0, 1], [2, 3]])
r = get_contraction((1,2,3,4), (2,1,3,4))
self.assertEqual(r, [[0, 1], [], [2], [3]])
r = get_contraction((1,2,3,4), (1,1,2*3*4,1))
self.assertEqual(r, [[], [], [0,1,2,3], []])
r = get_contraction((2,1,3,4), (1,2,3,4))
self.assertEqual(r, [[], [0], [1, 2], [3]])
r = get_contraction((1,2,3,4), (2*3*4,1,1,1))
self.assertEqual(r, [[0, 1, 2, 3], [], [], []])
r = get_contraction((4,4,4,4), (16,1,16))
self.assertEqual(r, [[0, 1], [], [2, 3]])
r = get_contraction((1,2,3,4,1,1,1), (2,3,4))
self.assertEqual(r, [[0, 1], [2], [3, 4, 5, 6]])
r = get_contraction((1,2,3,4), (1,2,3,4,1))
self.assertEqual(r, [[], [0, 1], [2], [3], []])
r = get_contraction((14,1,384,14,1,1,1,1), (1,14,384,14))
self.assertEqual(r, [[], [0], [1,2], [3,4,5,6,7]])
r = get_contraction((14,1,384,1,14,1,1,1,1), (1,14,384,14))
self.assertEqual(r, [[], [0], [1,2], [3,4,5,6,7,8]])
r = get_contraction((512, 512), (1, 1, 512, 1, 1, 1, 1, 512))
self.assertEqual(r, [[], [], [0], [], [], [], [], [1]])
r = get_contraction((1,2,3,4), (1,2,6,2))
self.assertEqual(r, None)
def test_contraction_ones(self):
r = get_contraction((1,), (1,1,1))
self.assertEqual(r, [[], [], [0]])
r = get_contraction((1,1), (1,1,1))
self.assertEqual(r, [[], [], [0, 1]])
r = get_contraction((1,1,1,1), (1,))
self.assertEqual(r, [[0,1,2,3]])
r = get_contraction((1,1,1,1), (1,1))
self.assertEqual(r, [[], [0,1,2,3]])
r = get_contraction((1,1,1,1), (1,1,1))
self.assertEqual(r, [[], [], [0,1,2,3]])
r = get_contraction((1,1,1,1), (1,1,1,1))
self.assertEqual(r, [[], [], [], [0,1,2,3]])
class TestGetShape(unittest.TestCase):
def test_get_shape(self):
assert get_shape(2) == ()
assert get_shape([]) == (0,)
assert get_shape([[]]) == (1, 0)
assert get_shape([[1, 2]]) == (1, 2)
assert get_shape([[1, 2], (3, 4)]) == (2, 2)
def test_inhomogeneous_shape(self):
with self.assertRaises(ValueError): get_shape([[], [1]])
with self.assertRaises(ValueError): get_shape([[1, [2]], [1]])
class TestPolyN(unittest.TestCase):
def test_float(self):
np.testing.assert_allclose(polyN(1.0, [1.0, -2.0, 1.0]), 0.0)
np.testing.assert_allclose(polyN(2.0, [1.0, -2.0, 1.0]), 1.0)
np.testing.assert_allclose(polyN(3.0, [1.0, -2.0, 1.0]), 4.0)
np.testing.assert_allclose(polyN(4.0, [1.0, -2.0, 1.0]), 9.0)
def test_uop(self):
from tinygrad.dtype import dtypes
from tinygrad.uop.ops import UOp
from test.helpers import eval_uop
np.testing.assert_allclose(eval_uop(polyN(UOp.const(dtypes.float, 1.0), [1.0, -2.0, 1.0])), 0.0)
np.testing.assert_allclose(eval_uop(polyN(UOp.const(dtypes.float, 2.0), [1.0, -2.0, 1.0])), 1.0)
np.testing.assert_allclose(eval_uop(polyN(UOp.const(dtypes.float, 3.0), [1.0, -2.0, 1.0])), 4.0)
np.testing.assert_allclose(eval_uop(polyN(UOp.const(dtypes.float, 4.0), [1.0, -2.0, 1.0])), 9.0)
class TestTimeToStr(unittest.TestCase):
def test_seconds(self): self.assertEqual(" 10.01s ", time_to_str(10.01))
def test_boundary_sec_ms(self): self.assertEqual("10000.00ms", time_to_str(10))
def test_milliseconds(self): self.assertEqual(" 500.00ms", time_to_str(0.5))
def test_boundary_ms_us(self): self.assertEqual("10000.00us", time_to_str(0.01))
def test_microseconds(self): self.assertEqual(" 100.00us", time_to_str(0.0001))
def test_zero(self): self.assertEqual(" 0.00us", time_to_str(0))
def test_width_formatting(self): self.assertEqual(" 10.01s ", time_to_str(10.01, w=6))
class TestCStyleDivMod(unittest.TestCase):
def test_div_pos(self):
self.assertEqual(cdiv(-9, 5), -1)
self.assertEqual(cdiv(-4, 5), 0)
self.assertEqual(cdiv(0, 5), 0)
self.assertEqual(cdiv(4, 5), 0)
self.assertEqual(cdiv(9, 5), 1)
def test_div_neg(self):
self.assertEqual(cdiv(-9, -5), 1)
self.assertEqual(cdiv(-4, -5), 0)
self.assertEqual(cdiv(0, -5), 0)
self.assertEqual(cdiv(4, -5), 0)
self.assertEqual(cdiv(9, -5), -1)
def test_mod_pos(self):
self.assertEqual(cmod(-9, 5), -4)
self.assertEqual(cmod(-4, 5), -4)
self.assertEqual(cmod(0, 5), 0)
self.assertEqual(cmod(4, 5), 4)
self.assertEqual(cmod(9, 5), 4)
def test_mod_neg(self):
self.assertEqual(cmod(-9, -5), -4)
self.assertEqual(cmod(-4, -5), -4)
self.assertEqual(cmod(0, -5), 0)
self.assertEqual(cmod(4, -5), 4)
self.assertEqual(cmod(9, -5), 4)
class TestGetBits(unittest.TestCase):
def test_low_bits(self):
self.assertEqual(getbits(0b11010110, 0, 3), 0b0110)
def test_high_bits(self):
self.assertEqual(getbits(0b11010110, 4, 7), 0b1101)
def test_middle_bits(self):
self.assertEqual(getbits(0b11010110, 3, 5), 0b010)
def test_full_range(self):
self.assertEqual(getbits(0b11010110, 0, 7), 0b11010110)
def test_single_bit(self):
self.assertEqual(getbits(0b100000000, 8, 8), 1)
class TestArgFix(unittest.TestCase):
def test_none(self):
self.assertEqual(argfix(None), (None, ))
self.assertEqual(argfix(None, None), (None, None))
def test_positional_arguments(self):
self.assertEqual(argfix(1, 2, 3), (1, 2, 3))
def test_tuple(self):
self.assertEqual(argfix((1., 2., 3.)), (1., 2., 3.))
def test_list(self):
self.assertEqual(argfix([True, False]), (True, False))
class TestWordWrap(unittest.TestCase):
def test_wrap_simple(self):
wrap = 10
st = "x"*wrap*2
st2 = word_wrap(st, wrap)
self.assertEqual(len(st2.splitlines()), 2)
def test_wrap_colored(self):
wrap = 10
st = colored("x"*wrap*2, "red")
st2 = word_wrap(st, wrap=wrap)
self.assertEqual(len(st2.splitlines()), 2)
def test_wrap_explicit_newline(self):
wrap = 10
st = "\n".join(["x"*wrap, "x"*wrap, "x"*wrap])
st2 = word_wrap(st, wrap=wrap)
self.assertEqual(len(st2.splitlines()), len(st.splitlines()))
st = "\n".join(["x"*(wrap+1), "x"*wrap, "x"*wrap])
st2 = word_wrap(st, wrap=wrap)
self.assertEqual(len(st2.splitlines()), len(st.splitlines())+1)
st = "\n".join(["x"*(wrap+1), "x"*(wrap+1), "x"*(wrap+1)])
st2 = word_wrap(st, wrap=wrap)
self.assertEqual(len(st2.splitlines()), len(st.splitlines())+3)
class TestIsNumpyNdarray(unittest.TestCase):
def test_ndarray(self):
self.assertTrue(is_numpy_ndarray(np.array([1, 2, 3])))
def test_ndarray_tolist(self):
self.assertFalse(is_numpy_ndarray(np.array([1, 2, 3]).tolist()))
def test_list(self):
self.assertFalse(is_numpy_ndarray([1, 2, 3]))
def test_tensor(self):
self.assertFalse(is_numpy_ndarray(Tensor([1, 2, 3])))
self.assertFalse(is_numpy_ndarray(Tensor(np.array([1, 2, 3]))))
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_helpers.py",
"license": "MIT License",
"lines": 391,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_indexing.py | # test cases are modified from pytorch test_indexing.py
import unittest
from tinygrad import Tensor
class TestIndexing(unittest.TestCase):
def test_single_int(self):
v = Tensor.randn(5, 7, 3)
self.assertEqual(v[4].shape, (7, 3))
def test_multiple_int(self):
v = Tensor.randn(5, 7, 3)
self.assertEqual(v[4].shape, (7, 3))
self.assertEqual(v[4, :, 1].shape, (7,))
def test_none(self):
v = Tensor.randn(5, 7, 3)
self.assertEqual(v[None].shape, (1, 5, 7, 3))
self.assertEqual(v[:, None].shape, (5, 1, 7, 3))
self.assertEqual(v[:, None, None].shape, (5, 1, 1, 7, 3))
self.assertEqual(v[..., None].shape, (5, 7, 3, 1))
def test_int_indices(self):
v = Tensor.randn(5, 7, 3)
self.assertEqual(v[[0, 4, 2]].shape, (3, 7, 3))
self.assertEqual(v[:, [0, 4, 2]].shape, (5, 3, 3))
self.assertEqual(v[:, [[0, 1], [4, 3]]].shape, (5, 2, 2, 3))
def test_index_src_datatype(self):
src = Tensor.ones(3, 2, 4)
# test index
res = src[[0, 2, 1], :, :]
self.assertEqual(res.shape, src.shape)
def test_empty_slice(self):
x = Tensor.randn(2, 3, 4, 5)
y = x[:, :, :, 1]
z = y[:, 1:1, :]
self.assertEqual((2, 0, 4), z.shape)
def test_invalid_index(self):
x = Tensor.arange(0, 16).reshape(4, 4)
self.assertRaises(TypeError, lambda: x["0":"1"])
def test_out_of_bound_index(self):
x = Tensor.arange(0, 100).reshape(2, 5, 10)
self.assertRaises(IndexError, lambda: x[0, 5])
self.assertRaises(IndexError, lambda: x[4, 5])
self.assertRaises(IndexError, lambda: x[0, 1, 15])
self.assertRaises(IndexError, lambda: x[:, :, 12])
class TestNumpy(unittest.TestCase):
def test_index_no_floats(self):
a = Tensor([[[5.]]])
self.assertRaises(IndexError, lambda: a[0.0])
self.assertRaises(IndexError, lambda: a[0, 0.0])
self.assertRaises(IndexError, lambda: a[0.0, 0])
self.assertRaises(IndexError, lambda: a[0.0, :])
self.assertRaises(IndexError, lambda: a[:, 0.0])
self.assertRaises(IndexError, lambda: a[:, 0.0, :])
self.assertRaises(IndexError, lambda: a[0.0, :, :])
self.assertRaises(IndexError, lambda: a[0, 0, 0.0])
self.assertRaises(IndexError, lambda: a[0.0, 0, 0])
self.assertRaises(IndexError, lambda: a[0, 0.0, 0])
self.assertRaises(IndexError, lambda: a[-1.4])
self.assertRaises(IndexError, lambda: a[0, -1.4])
self.assertRaises(IndexError, lambda: a[-1.4, 0])
self.assertRaises(IndexError, lambda: a[-1.4, :])
self.assertRaises(IndexError, lambda: a[:, -1.4])
self.assertRaises(IndexError, lambda: a[:, -1.4, :])
self.assertRaises(IndexError, lambda: a[-1.4, :, :])
self.assertRaises(IndexError, lambda: a[0, 0, -1.4])
self.assertRaises(IndexError, lambda: a[-1.4, 0, 0])
self.assertRaises(IndexError, lambda: a[0, -1.4, 0])
# these two trigger slice internal type verification first
self.assertRaises(TypeError, lambda: a[0.0:, 0.0])
self.assertRaises(TypeError, lambda: a[0.0:, 0.0,:])
def test_none_index(self):
# `None` index adds newaxis
a = Tensor([1, 2, 3])
self.assertEqual(a[None].ndim, a.ndim+1)
def test_everything_returns_views(self):
# Before `...` would return a itself.
a = Tensor([5])
self.assertIs(a, a[()])
self.assertIs(a, a[...])
self.assertIs(a, a[:])
def test_broaderrors_indexing(self):
a = Tensor.zeros(5, 5)
self.assertRaises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
self.assertRaises(IndexError, a.contiguous().__setitem__, ([0, 1], [0, 1, 2]), 0)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_indexing.py",
"license": "MIT License",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_llm_server.py | import unittest, threading, time
from unittest.mock import Mock
class TestLLMServer(unittest.TestCase):
"""Integration tests using the real OpenAI client."""
@classmethod
def setUpClass(cls):
cls.mock_tok = Mock()
cls.mock_tok.role = Mock(return_value=[100, 101])
cls.mock_tok.encode = Mock(return_value=[200, 201, 202])
cls.mock_tok.decode = Mock(return_value="Hello")
cls.mock_tok.end_turn = Mock(return_value=[998])
cls.mock_model = Mock()
cls.mock_model.generate = Mock(side_effect=lambda ids, **kwargs: iter([300, 301, 999]))
cls.bos_id = 1
cls.eos_id = 999
import tinygrad.apps.llm as llm_module
llm_module.model = cls.mock_model
llm_module.tok = cls.mock_tok
llm_module.bos_id = cls.bos_id
llm_module.eos_id = cls.eos_id
from tinygrad.apps.llm import Handler
from tinygrad.viz.serve import TCPServerWithReuse
cls.server = TCPServerWithReuse(('127.0.0.1', 0), Handler)
cls.port = cls.server.server_address[1]
cls.server_thread = threading.Thread(target=cls.server.serve_forever, daemon=True)
cls.server_thread.start()
time.sleep(0.1)
from openai import OpenAI
cls.client = OpenAI(base_url=f"http://127.0.0.1:{cls.port}/v1", api_key="test")
@classmethod
def tearDownClass(cls):
cls.server.shutdown()
cls.server.server_close()
def test_chat_completion_stream(self):
stream = self.client.chat.completions.create(
model="test",
messages=[{"role": "user", "content": "Hello"}],
stream=True
)
chunks = list(stream)
self.assertGreater(len(chunks), 0)
self.assertEqual(chunks[0].choices[0].delta.role, "assistant")
self.assertEqual(chunks[-1].choices[0].finish_reason, "stop")
def test_openai_response_structure(self):
stream = self.client.chat.completions.create(
model="test-model",
messages=[{"role": "user", "content": "Test"}],
stream=True
)
for chunk in stream:
self.assertTrue(chunk.id.startswith("chatcmpl-"))
self.assertEqual(chunk.object, "chat.completion.chunk")
self.assertIsNotNone(chunk.choices)
self.assertIsNotNone(chunk.created)
self.assertIsInstance(chunk.created, int)
self.assertEqual(chunk.model, "test-model")
def test_stream_with_usage(self):
stream = self.client.chat.completions.create(
model="test",
messages=[{"role": "user", "content": "Hello"}],
stream=True,
stream_options={"include_usage": True}
)
chunks = list(stream)
last_chunk = chunks[-1]
self.assertIsNotNone(last_chunk.usage)
self.assertIsNotNone(last_chunk.usage.prompt_tokens)
self.assertIsNotNone(last_chunk.usage.completion_tokens)
self.assertIsNotNone(last_chunk.usage.total_tokens)
def test_multi_turn_conversation(self):
stream = self.client.chat.completions.create(
model="test",
messages=[
{"role": "system", "content": "You are helpful."},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi!"},
{"role": "user", "content": "How are you?"}
],
stream=True
)
chunks = list(stream)
self.assertGreater(len(chunks), 0)
self.assertEqual(chunks[-1].choices[0].finish_reason, "stop")
def test_content_is_streamed(self):
stream = self.client.chat.completions.create(
model="test",
messages=[{"role": "user", "content": "Hello"}],
stream=True
)
contents = []
for chunk in stream:
if chunk.choices and chunk.choices[0].delta.content:
contents.append(chunk.choices[0].delta.content)
self.assertGreater(len(contents), 0)
def test_non_streaming(self):
resp = self.client.chat.completions.create(
model="test-model",
messages=[{"role": "user", "content": "Hello"}],
stream=False
)
self.assertTrue(resp.id.startswith("chatcmpl-"))
self.assertEqual(resp.object, "chat.completion")
self.assertEqual(resp.model, "test-model")
self.assertIsNotNone(resp.created)
self.assertEqual(len(resp.choices), 1)
self.assertEqual(resp.choices[0].message.role, "assistant")
self.assertIsNotNone(resp.choices[0].message.content)
self.assertEqual(resp.choices[0].finish_reason, "stop")
self.assertIsNotNone(resp.usage)
self.assertIsNotNone(resp.usage.prompt_tokens)
self.assertIsNotNone(resp.usage.completion_tokens)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_llm_server.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_multitensor.py | import gc, unittest
from tinygrad import Tensor, GlobalCounters, dtypes
class TestMultiRamUsage(unittest.TestCase):
def setUp(self):
gc.collect()
self.baseline = GlobalCounters.mem_used
self.baseline_per_device = dict(GlobalCounters.mem_used_per_device)
self.N = 100
def assertUsed(self, amt, strict=True):
gc.collect()
used = GlobalCounters.mem_used - self.baseline
print(f"used {used} bytes")
if strict: self.assertEqual(used, amt)
else: self.assertLessEqual(used, amt)
def assertDeviceUsed(self, expected:dict[str, int]):
gc.collect()
for dev, amt in expected.items():
used = GlobalCounters.mem_used_per_device[dev] - self.baseline_per_device.get(dev, 0)
self.assertEqual(used, amt, f"device {dev}: expected {amt} bytes used, got {used}")
def test_zeros(self):
_ = Tensor.zeros(self.N, self.N).contiguous().realize()
self.assertUsed(self.N*self.N*4)
def test_zeros_del(self):
_ = Tensor.zeros(self.N, self.N).contiguous().realize()
del _
self.assertUsed(0)
def test_zeros_copy(self):
devices_2 = ("NULL:1", "NULL:2")
_ = Tensor.zeros(self.N, self.N).contiguous().to(devices_2).realize()
# NOTE: the first one on the DEFAULT device should be freed
self.assertUsed(self.N*self.N*4*2)
def test_zeros_shard(self, devices=("NULL:1", "NULL:2")):
_ = Tensor.zeros(self.N, self.N).contiguous().shard(devices, axis=0).realize()
self.assertUsed(self.N*self.N*4) # sharding should not increase total ram usage
def test_zeros_shard_self(self): self.test_zeros_shard(("NULL:0", "NULL:1"))
def test_zeros_contiguous_shard(self):
devices_2 = ("NULL:1", "NULL:2")
_ = Tensor.zeros(self.N, self.N).contiguous().shard(devices_2, axis=0).contiguous().realize()
self.assertUsed(self.N*self.N*4) # sharding should not increase total ram usage
def test_sharded_memory_replicated(self):
devices_4 = tuple(f"NULL:{i+1}" for i in range(4))
X = Tensor.ones(256).contiguous().realize()
self.assertUsed(256 * 4)
X.shard_(devices_4).realize()
self.assertUsed(256 * 4 * 4)
def test_sharded_memory_replicated_const(self):
devices_4 = tuple(f"NULL:{i+1}" for i in range(4))
X = Tensor.ones(256).realize()
self.assertUsed(0)
X.shard_(devices_4).realize()
self.assertUsed(256 * 4 * 4) # TODO: can be zero
def test_sharded_memory_axis_const(self):
devices_4 = tuple(f"NULL:{i+1}" for i in range(4))
X = Tensor.ones(256).realize()
self.assertUsed(0)
X.shard_(devices_4, axis=0).realize()
self.assertUsed(256 * 4) # TODO: can be zero
def test_zeros_per_device(self):
_ = Tensor.zeros(self.N, self.N, device="NULL").contiguous().realize()
self.assertDeviceUsed({"NULL": self.N*self.N*4})
def test_zeros_del_per_device(self):
_ = Tensor.zeros(self.N, self.N, device="NULL").contiguous().realize()
del _
self.assertDeviceUsed({"NULL": 0})
def test_zeros_copy_per_device(self):
devices_2 = ("NULL:1", "NULL:2")
_ = Tensor.zeros(self.N, self.N).contiguous().to(devices_2).realize()
self.assertDeviceUsed({"NULL:1": self.N*self.N*4, "NULL:2": self.N*self.N*4})
def test_zeros_shard_per_device(self):
devices_2 = ("NULL:1", "NULL:2")
_ = Tensor.zeros(self.N, self.N).contiguous().shard(devices_2, axis=0).realize()
self.assertDeviceUsed({"NULL:1": self.N*(self.N//2)*4, "NULL:2": self.N*(self.N//2)*4})
def test_sharded_memory_replicated_per_device(self):
devices_4 = tuple(f"NULL:{i+1}" for i in range(4))
X = Tensor.ones(256, device="NULL").contiguous().realize()
self.assertDeviceUsed({"NULL": 256*4})
X.shard_(devices_4).realize()
for d in devices_4:
self.assertDeviceUsed({d: 256*4})
def _test_matmul_half(self, dev_count:int):
N = 32
total_mem = {}
devs = tuple(f"NULL:{i}" for i in range(dev_count))
for dtype in {dtypes.float, dtypes.half}:
GlobalCounters.reset()
a = Tensor.empty((N, N), dtype=dtype, device=devs[0]).shard(devs, axis=0)
b = Tensor.empty((N, N), dtype=dtype, device=devs[0]).shard(devs, axis=None)
(a @ b).realize()
total_mem[dtype] = GlobalCounters.global_mem
self.assertEqual(total_mem[dtypes.half], total_mem[dtypes.float] // 2)
def test_matmul_half(self): self._test_matmul_half(dev_count=2)
def test_matmul_half_alt(self): self._test_matmul_half(dev_count=4)
class TestMultiAxis(unittest.TestCase):
def test_reshape_shard_invalid(self):
devices = ("NULL:0", "NULL:1")
t = Tensor.ones(4, 3).shard(devices, axis=0)
with self.assertRaises(RuntimeError, msg="reshape cannot move items between shards"):
t.reshape(3, 4).uop.axis
def test_reshape_shard_valid(self):
devices = ("NULL:0", "NULL:1")
t = Tensor.ones(4, 8).shard(devices, axis=0)
self.assertEqual(t.reshape(2, 16).uop.axis, 0)
self.assertEqual(t.reshape(2, 2, 8).uop.axis, 0)
def test_empty_like_sharded(self):
t = Tensor.ones(4, 8).shard(("NULL:0", "NULL:1"), axis=0)
e = t.empty_like()
self.assertEqual(e.shape, t.shape)
self.assertEqual(e.device, t.device)
self.assertEqual(e.uop.axis, 0)
self.assertTrue(e.uop.has_buffer_identity())
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_multitensor.py",
"license": "MIT License",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_rearrange_einops.py | # modified from
# https://github.com/arogozhnikov/einops/blob/master/tests/test_examples.py
# https://github.com/arogozhnikov/einops/blob/master/tests/test_ops.py
# https://github.com/arogozhnikov/einops/blob/master/tests/test_parsing.py
import numpy as np
import unittest
from tinygrad import Tensor
class test_rearrange_examples(unittest.TestCase):
def test1(self):
# transpose
x = Tensor(np.arange(10 * 20 * 30 * 40, dtype=np.int32).reshape([10, 20, 30, 40]))
y = x.rearrange("b c h w -> b h w c")
assert tuple(y.shape) == (10, 30, 40, 20)
def test2(self):
# view / reshape
x = Tensor(np.arange(10 * 20 * 30 * 40, dtype=np.int32).reshape([10, 20, 30, 40]))
y = x.rearrange("b c h w -> b (c h w)")
assert tuple(y.shape) == (10, 20 * 30 * 40)
def test3(self):
# depth-to-space
x = Tensor(np.arange(10 * 20 * 30 * 40, dtype=np.int32).reshape([10, 20, 30, 40]))
y = x.rearrange("b (c h1 w1) h w -> b c (h h1) (w w1)", h1=2, w1=2)
assert tuple(y.shape) == (10, 5, 30 * 2, 40 * 2)
def test4(self):
# space-to-depth
x = Tensor(np.arange(10 * 20 * 30 * 40, dtype=np.int32).reshape([10, 20, 30, 40]))
y = x.rearrange("b c (h h1) (w w1) -> b (h1 w1 c) h w", h1=2, w1=2)
assert tuple(y.shape) == (10, 20 * 4, 30 // 2, 40 // 2)
def test5(self):
# simple transposition
x = Tensor(np.arange(10 * 20 * 30 * 40, dtype=np.int32).reshape([10, 20, 30, 40]))
y = x.rearrange("b1 sound b2 letter -> b1 b2 sound letter")
assert tuple(y.shape) == (10, 30, 20, 40)
def test6(self):
# parsing parameters
x = Tensor(np.arange(10 * 20 * 30 * 40, dtype=np.int32).reshape([10, 20, 30, 40]))
t = x.rearrange("b c h w -> (b h w) c")
t = t[:, ::2] # replacement for dot-product, just changes size of second axis
assert tuple(t.shape) == (10 * 30 * 40, 10)
def test7(self):
x = Tensor(np.arange(10 * 20 * 30 * 40, dtype=np.int32).reshape([10, 20, 30, 40]))
# split of embedding into groups
y1, y2 = x.rearrange("b (c g) h w -> g b c h w", g=2)
assert tuple(y1.shape) == (10, 10, 30, 40)
assert tuple(y2.shape) == (10, 10, 30, 40)
def test8(self):
x = Tensor(np.arange(10 * 20 * 1 * 1, dtype=np.int32).reshape([10, 20, 1, 1]))
# squeeze - unsqueeze
y = x.rearrange("b c () () -> b c")
assert tuple(y.shape) == (10, 20)
y = y.rearrange("b c -> c b () ()")
assert tuple(y.shape) == (20, 10, 1, 1)
def test9(self):
x = Tensor(np.arange(10 * 20 * 1 * 1, dtype=np.int32).reshape([10, 20, 1, 1]))
# squeeze - unsqueeze
y = x.rearrange("b c 1 1 -> b c")
assert tuple(y.shape) == (10, 20)
y = y.rearrange("b1 c -> c b1 1 1")
assert tuple(y.shape) == (20, 10, 1, 1)
class test_rearrange_ops(unittest.TestCase):
def test_rearrange_errors(self):
x = Tensor.zeros([1, 1, 1, 1, 1])
x.rearrange("a b c d ... -> a b c ... d")
bad_patterns = [
"a b c d (...) -> a b c ... d", # collapsed ellipsis on input
"a b (c d ... -> a b c ... d", # unbalanced brackets
"a b* c d ... -> a b c ... d", # not alphanumeric
"a b c d -> a b c d -> a b c d", # two "->"
"a ... c ... -> ... a ... c", # two "..."
"a b c d e -> f b c d e", # name mismatch
]
for pattern in bad_patterns:
with self.assertRaises(AssertionError):
x.rearrange(pattern)
x.rearrange("... -> (...)")
with self.assertRaises(AssertionError):
x.rearrange("(...) -> (...)")
y = Tensor.zeros([8, 1])
y.rearrange("(a1 a2 a3) b -> b a3 a2 a1", a1=2, a2=2)
with self.assertRaises(RuntimeError):
## should fail as not enough dimensions specified
y.rearrange("(a1 a2 a3) b -> b a3 a2 a1", a1=2)
with self.assertRaises(ValueError):
## should fail as 6 does not divide 8
y.rearrange("(a1 a2 a3) b -> b a3 a2 a1", a1=3, a2=2)
with self.assertRaises(AssertionError):
## incorrect dimension provided for an axis that is only permuted
y.rearrange("(a1 a2 a3) b -> b a3 a2 a1", a1=2, a2=2, b=2)
with self.assertRaises(AssertionError):
## unused axis provided
y.rearrange("(a b c) d -> a b c d", b=2, c=2, e=2)
class test_rearrange_parsing(unittest.TestCase):
def test_elementary_axis_name(self):
for name in [
"a",
"b",
"h",
"dx",
"h1",
"zz",
"i9123",
"somelongname",
"Alex",
"camelCase",
"u_n_d_e_r_score",
"unreasonablyLongAxisName",
]:
Tensor.ones((1,)).rearrange(f"{name} -> {name}")
for name in ["2b", "12", "_startWithUnderscore", "endWithUnderscore_", "_"]:
with self.assertRaises(AssertionError):
Tensor.ones((1,)).rearrange(f"{name} -> {name}")
with self.assertRaises(RuntimeError):
Tensor.ones((1,)).rearrange(" -> ")
def test_invalid_expressions(self):
# double ellipsis should raise an error
def _test_expression(expression: str):
Tensor.ones((2, 3, 4, 5, 6)).rearrange(f"{expression} -> {expression}")
_test_expression("... a b c d")
with self.assertRaises(AssertionError):
_test_expression("... a b c d ...")
with self.assertRaises(AssertionError):
_test_expression("... a b c (d ...)")
with self.assertRaises(AssertionError):
_test_expression("(... a) b c (d ...)")
# double/missing/enclosed parenthesis
Tensor.ones((2, 3, 4, 5, 6)).rearrange("a b c d ... -> (a) b c (d ...)")
with self.assertRaises(AssertionError):
_test_expression("(a)) b c (d ...)")
with self.assertRaises(AssertionError):
_test_expression("(a b c (d ...)")
with self.assertRaises(AssertionError):
_test_expression("(a) (()) b c (d ...)")
with self.assertRaises(AssertionError):
_test_expression("(a) ((b c) (d ...))")
# invalid identifiers
_test_expression("camelCase under_scored cApiTaLs ß ...")
with self.assertRaises(AssertionError):
_test_expression("1a")
with self.assertRaises(AssertionError):
_test_expression("_pre")
with self.assertRaises(AssertionError):
_test_expression("...pre")
with self.assertRaises(AssertionError):
_test_expression("pre...")
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_rearrange_einops.py",
"license": "MIT License",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_schedule.py | # schedule tests that pass on NULL backend (no copyout needed)
import gc, unittest, time
from tinygrad import nn, dtypes, Device, Tensor
from tinygrad.device import is_dtype_supported
from tinygrad.uop.ops import UOp, Ops, GroupOp, UPat
from tinygrad.helpers import DEBUG, GlobalCounters, Context
from tinygrad.engine.realize import CompiledRunner, run_schedule
class KernelCountException(Exception): pass
def check_schedule(t:Tensor|list[Tensor]|UOp, allowed:int, to_prerealize:list[Tensor]|None=None, filter_sink=True):
if to_prerealize:
with Context(DEBUG=0, TRACK_MATCH_STATS=0): Tensor.realize(*to_prerealize)
if isinstance(t, Tensor): sched = t.schedule()
elif isinstance(t, list) and isinstance(t[0], Tensor): sched = Tensor.schedule(*t)
else:
assert isinstance(t, UOp), f"can't schedule {t}"
sched = Tensor(t).schedule()
# test lowering all the ExecItems
for si in sched: si.lower()
kernel_cnt = len([si for si in sched if isinstance(si.prg, CompiledRunner) or not filter_sink])
if kernel_cnt != allowed:
print(f"SCHEDULE ISSUE, expecting {allowed} got {kernel_cnt}")
if DEBUG >= 3:
for i,s in enumerate(sched):
print("kernel", i+1)
print(s.ast)
raise KernelCountException(f"{kernel_cnt} != {allowed}")
return sched
def _realize_weights(m):
for p in nn.state.get_parameters(m): p.realize()
class TestBufferUOp(unittest.TestCase):
# BUFFER has a ShapeTracker of shape=(n,) and stride=(1,)
def test_buffer_has_buffer(self):
buf = Tensor.empty(10)
self.assertIsNotNone(buf.uop.buffer)
self.assertEqual(buf.uop.shape, (10,))
# the device Buffer remains unallocated until it's we run the schedule
self.assertFalse(buf.uop.buffer.is_allocated())
add = buf+1
sched = add.schedule()
self.assertFalse(buf.uop.buffer.is_allocated())
run_schedule(sched)
self.assertTrue(buf.uop.buffer.is_allocated())
def test_buffer_has_unique_buffer(self):
buf = Tensor.empty(10)
buf1 = buf.uop.buffer
buf2 = buf.uop.buffer
self.assertIs(buf1, buf2)
# we also allow VIEW(BUFFER) to access the underlying device Buffer, as long as it's contiguous
def test_buffer_view_allowed(self):
add = Tensor.empty(1, 1)+Tensor.empty(1, 1)
add.realize()
self.assertIsNotNone(add.uop.buffer)
self.assertEqual(add.uop.shape, (1, 1))
def test_buffer_view_not_allowed(self):
permuted_view = Tensor.empty(1, 2, 3).permute(0, 2, 1)
with self.assertRaises(RuntimeError):
permuted_view.uop.buffer # cannot access Buffer of a non contiguous VIEW
def test_buffer_only_after_realize(self):
a = Tensor([1])+Tensor([2])
# accessing realized will return None
self.assertIsNone(a.uop.realized)
# accessing Buffer will assert
with self.assertRaisesRegex(AssertionError, "must be BUFFER"):
a.uop.buffer # there is no BUFFER on an unrealized ADD
# Buffer only exists once we realize it
a.realize()
self.assertIsNotNone(a.uop.buffer)
def test_const_does_not_realize(self):
a = Tensor(1)
run_schedule(check_schedule(a, 0))
self.assertIsNone(a.uop.base.realized)
def test_var_does_not_realize(self):
a = Tensor(UOp.variable("a", 0, 10).bind(1))
run_schedule(check_schedule(a, 0))
self.assertIsNone(a.uop.base.realized)
def test_unused_var_not_in_var_vals(self):
# unused variable should not appear in var_vals even when there's other work
a = Tensor(UOp.variable("unused", 0, 10).bind(1))
b = Tensor.empty(3) + 1
_, var_vals = Tensor.schedule_with_vars(a, b)
self.assertEqual(var_vals, {})
self.assertIsNone(a.uop.base.realized)
def test_view_does_not_realize(self):
a = Tensor.randn(1, 4).expand(4, 4)
a.realize()
self.assertEqual(a.uop.base.realized.size, 4)
a2 = a.contiguous().realize()
self.assertEqual(a2.uop.base.realized.size, 16)
class TestContiguous(unittest.TestCase):
def test_contiguous_buffer(self):
a = Tensor.empty(4)
b = a.contiguous()
check_schedule(b, 0)
def test_contiguous_buffer_view(self):
a = Tensor.empty(4)
b = a.reshape((2, 2)).contiguous()
check_schedule(b, 0)
def test_non_contiguous_buffer_view(self):
a = Tensor.empty(4, 1)
b = a.expand((4, 4)).contiguous()
check_schedule(b, 1)
def test_size_change_buffer_view(self):
a = Tensor.empty(4)
b = a.reshape((1, 1, 4)).shrink(((0, 1), (0, 1), (0, 3))).contiguous()
check_schedule(b, 0) # contiguous shrink of a realized buffer is a zero-copy BUFFER_VIEW
def test_double_contiguous_realizes_once(self):
a = Tensor.empty(4, 1)
b = a.expand((4, 4)).contiguous().contiguous()
check_schedule(b, 1)
def test_view_does_not_realize(self):
a = Tensor.empty(4)
b = a.expand((4, 4))
check_schedule(b, 0)
self.assertEqual(b.uop.base.buffer.size, 4)
def test_contiguous_view_realizes(self):
a = Tensor.empty(4)
b = a.expand((4, 4)).contiguous()
check_schedule(b, 1)
self.assertEqual(b.uop.base.buffer.size, 16)
class TestSimpleSchedule(unittest.TestCase):
def test_reduce_doesnt_split(self):
a = Tensor.empty(16,16).sum(axis=1)
a1 = a.reshape(4,4)
a2 = a.reshape(16,1,1)
self.assertEqual(len(Tensor.schedule(a1, a2)), 1)
class TestSchedule(unittest.TestCase):
@unittest.skipIf(Device.DEFAULT == "CPU", "devices must mismatch")
def test_error_on_device_mismatch(self):
a = Tensor.empty(10)
b = Tensor.empty(10, device="CPU")
c = a+b
with self.assertRaisesRegex(RuntimeError, "all buffers must be on the same device"): check_schedule(c, 1)
@unittest.skipIf(Device.DEFAULT == "CPU", "devices must mismatch")
def test_error_on_device_mismatch_alt(self):
a = Tensor.empty(10)
b = Tensor.empty((1,), device="CPU").expand(10).contiguous()
c = a+b
with self.assertRaisesRegex(RuntimeError, "all buffers must be on the same device"): check_schedule(c, 2)
def test_rand(self):
x = Tensor.rand(32)
check_schedule(x, 1, [Tensor._device_rng_counters[x.device]])
def test_rand_recompute_arange(self):
x = Tensor.rand(32)
check_schedule(x, 1, [Tensor._device_rng_counters[x.device]])
def test_empty_is_not_realized(self):
a = Tensor.empty(10)
child = a+2
assert not a.uop.is_realized
child.realize()
assert a.uop.is_realized
def test_realize_view_of_realized_has_empty_schedule(self):
# views of realized buffers produce an empty schedule
t = Tensor.zeros((3, 3)).contiguous().realize()
v = t[1] # view - is_realized but not has_buffer_identity
assert v.uop.is_realized
sched, _ = Tensor.schedule_with_vars(v)
self.assertEqual(len(sched), 0)
# NOTE: because empty does not have a lowered ExecItem if realize is called on a childless empty, it never gets allocated.
def test_childless_empty_never_allocates(self):
a = Tensor.empty(10)
a.realize()
assert not a.uop.is_realized
def test_simplify_padded_const(self):
a, _ = Tensor.empty(1022).cummax(axis=0)
check_schedule(a, 3)
@unittest.skip("should this pass?")
def test_contiguous_assign(self):
a = Tensor.ones(10) * 2
b = Tensor.empty(10)
c = b.assign(a.contiguous())
check_schedule(c, 1)
def test_basic_binop_fusion(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = Tensor.empty(10)
d = a+b+c
check_schedule(d, 1)
def test_basic_binop_fusion_assign(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = Tensor.empty(10)
d = a+b+c
e = Tensor.empty(10).assign(d)
check_schedule(e, 1)
def test_basic_binop_fusion_deep(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = Tensor.empty(10)
d = Tensor.empty(10)
e = a+b+c+d
check_schedule(e, 1)
def test_mulacc_fusion(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = (a*b).sum()
check_schedule(c, 1)
def test_mulacc_fusion_assign(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = (a*b).sum()
d = Tensor.empty(1).assign(c)
check_schedule(d, 1)
def test_detach_assign(self):
a = Tensor.ones(4, 4).contiguous().realize()
buf1, buf2 = Tensor.empty(4, 4).contiguous(), Tensor.empty(4, 4).contiguous()
r = buf2.assign(buf1.assign(a + 1.0) * 2.0)
check_schedule(r.detach().contiguous(), 2)
def test_contiguous_backward_assign(self):
a = Tensor.ones(4, 4).contiguous().realize()
buf1, buf2 = Tensor.empty(4, 4).contiguous(), Tensor.empty(4, 4).contiguous()
r = buf2.assign(buf1.assign(a + 1.0) * 2.0)
check_schedule(r.contiguous_backward().contiguous(), 2)
def test_mulacc_relu_fusion(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = (a*b).sum().relu()
check_schedule(c, 1)
def test_binop_reshape_fusion(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = Tensor.empty(5,2)
d = (a+b).reshape(5,2)+c
check_schedule(d, 1)
def test_binop_permute_fusion(self):
a = Tensor.empty(2,5)
b = Tensor.empty(2,5)
c = Tensor.empty(5,2)
d = (a+b).permute(1,0)+c
check_schedule(d, 1)
def test_constants_are_embedded(self):
a = Tensor.empty(3,3) * 2
check_schedule(a, 1, filter_sink=False)
def tests_constants_are_folded(self):
a = Tensor(2)
check_schedule(a, 0)
def test_binop_elu_fusion(self):
a = Tensor.empty(10)
b = a.elu()
check_schedule(b, 1)
def test_binop_reshape_reduce_fusion(self):
a = Tensor.empty(100)
b = Tensor.empty(100)
c = (a+b).reshape(10, 10).sum(axis=0, keepdim=True)
check_schedule(c, 1)
def test_reduce_reshape_binop_fusion(self):
a = Tensor.empty(10,10)
b = Tensor.empty(10)
c = a.sum(axis=0) + b
check_schedule(c, 1)
def test_reduce_permute_binop_fusion(self):
a = Tensor.empty(10,10,10)
b = Tensor.empty(10,10,1)
c = a.sum(axis=0, keepdim=True).permute(2,1,0) + b
check_schedule(c, 1)
def test_binop_early_reshape_reduce_fusion(self):
a = Tensor.empty(100)
b = Tensor.empty(100)
c = Tensor.empty(10,10)
d = ((a+b).reshape(10,10) + c).sum(axis=0)
check_schedule(d, 1)
def test_diamond_folded(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = Tensor.empty(10)
d = Tensor.empty(10)
ab = a+b
e = (ab+c) + (ab+d)
check_schedule(e, 1)
def test_cache_binaryop(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = a+b
d = a+b
check_schedule(d, 0, [c])
# failing in new lazy
def test_cache_binaryop_reshaped(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = a+b
d = a.reshape(10,1)+b.reshape(10,1)
check_schedule(d, 1, [c])
# failing in new lazy
def test_cache_binaryop_transpose(self):
a = Tensor.empty(10,10)
b = Tensor.empty(10,10)
c = (a.T*b.T).T #.contiguous()
d = a*b
check_schedule(d, 1, [c])
def test_cache_two_reduceops(self):
a = Tensor.empty(10)
b = a.sum()
c = a.sum()
bc = b+c
check_schedule(bc, 1)
def test_cache_reduce_parent(self):
x = Tensor.empty(32)
r0 = x.mean(axis=0, keepdim=True)
r1 = (x - r0).sum(axis=0).div(2)
out = r0 + r1
schedule = check_schedule(out, 2)
reduceops = [x for si in schedule for x in si.ast.toposort() if x.op in {Ops.REDUCE_AXIS, Ops.REDUCE}]
assert len(reduceops) == 2
def test_cache_reduce_multiple_children(self):
x = Tensor.empty(32)
y = Tensor.empty(4, 4)
r0 = x.mean(axis=0, keepdim=True)
r1 = (x - r0).sum(axis=0).div(2)
out0 = r0 + y
out1 = r1 + y
schedule = check_schedule([out0, out1], 3)
reduceops = [x for si in schedule for x in si.ast.toposort() if x.op in {Ops.REDUCE_AXIS, Ops.REDUCE}]
self.assertEqual(len(reduceops), 2) # why is RANGEIFY different?
def test_dedup_assign(self):
a = Tensor.ones(4).contiguous().realize()
b = Tensor.full((4,), 2.).contiguous()
first = a.assign(b)
second = a.assign(b)
check_schedule([first, second], 2) # TODO: 1?
def test_no_dedup_empty(self):
a = Tensor.empty((4,))
b = Tensor.empty((4,))
# NOTE: empty does not have any schedule
check_schedule([a, b], 0, filter_sink=False)
self.assertIsNot(a.uop.buffer, b.uop.buffer)
def test_dedup_outputs(self):
a = Tensor.full((4, 4), 1.).contiguous().realize()
b = Tensor.full((4, 4), 1.).contiguous().realize()
check_schedule([a+b, a+b], 1)
def test_const_realize(self):
t = Tensor.ones(2)
check_schedule(t[0], 0)
check_schedule(t[1], 0)
def test_fold_double_unary(self):
y = Tensor.empty(2)
out = y.sum(keepdim=True).sqrt().neg()
check_schedule(out, 1)
#@unittest.skip("may want to reconsider this")
def test_fold_batchnorm(self):
with Tensor.train():
img = Tensor.empty(1,32,4,4)
bn = nn.BatchNorm2d(32, track_running_stats=False)
out = bn(img)
check_schedule(out, 3)
def test_fold_conv_batchnorm_notrain(self):
with Tensor.train(False):
img = Tensor.empty(1,3,8,8)
c1 = nn.Conv2d(3,32,3)
bn = nn.BatchNorm2d(32, track_running_stats=True)
out = bn(c1(img)).relu()
check_schedule(out, 1, [c1.weight, c1.bias])
def test_fold_conv_batchnorm_notrain_no_running_stats(self):
with Tensor.train(False):
img = Tensor.empty(1,3,8,8)
c1 = nn.Conv2d(3,32,3)
bn = nn.BatchNorm2d(32, track_running_stats=False)
out = bn(c1(img)).relu()
check_schedule(out, 4, [c1.weight, c1.bias])
def test_fold_conv_batchnorm(self):
with Tensor.train():
img = Tensor.empty(1,3,8,8)
c1 = nn.Conv2d(3,32,3)
bn = nn.BatchNorm2d(32, track_running_stats=False)
out = bn(c1(img)).relu()
check_schedule(out, 4, [c1.weight, c1.bias])
def test_fold_conv_batchnorm_optim(self, adam=False):
# 2 is too low?
optim, cnt = (nn.optim.Adam, 16) if adam else (nn.optim.SGD, 2)
with Tensor.train():
img = Tensor.ones(1,3,4,4)
c1 = nn.Conv2d(3,32,3)
bn = nn.BatchNorm2d(32, track_running_stats=False)
_realize_weights([c1, bn])
opt = optim(nn.state.get_parameters([c1, bn]))
img_bn = bn(c1(img)).elu().sum()
opt.zero_grad()
img_bn.backward()
check_schedule(opt.schedule_step(), cnt)
def test_fold_conv_batchnorm_optim_adam(self): self.test_fold_conv_batchnorm_optim(True)
def test_fold_batchnorm_backward(self):
with Tensor.train():
x = Tensor.empty((2, 16, 8, 8)).contiguous()
bn = nn.BatchNorm2d(16)
bn.weight.requires_grad = bn.bias.requires_grad = x.requires_grad = True
fw = bn(x).contiguous_backward().relu().contiguous()
fw.sum().backward()
# TODO: this is too many
check_schedule([x.grad, bn.weight.grad, bn.bias.grad, fw], 9)
def test_fold_conv_relu(self):
c1 = nn.Conv2d(3,16,3)
# run
img = Tensor.ones(2,3,64,64)
out = c1(img).relu()
check_schedule(out, 1, [c1.weight, c1.bias])
def test_fold_conv_relu_alt(self):
img = Tensor.ones(1,4,8,8)
c1 = nn.Conv2d(4, 4, kernel_size=3)
c2 = nn.Conv2d(4, 4, kernel_size=3)
img_conv = img.sequential([c1, Tensor.relu, c2, Tensor.relu])
check_schedule(img_conv, 2, [*nn.state.get_parameters(c1), *nn.state.get_parameters(c2), img])
def test_fold_conv_relu_nobias(self):
img = Tensor.ones(1,4,8,8)
c1 = nn.Conv2d(4, 4, kernel_size=3, bias=False)
c2 = nn.Conv2d(4, 4, kernel_size=3, bias=False)
out = img.sequential([c1, Tensor.relu, c2, Tensor.relu])
check_schedule(out, 2, [c1.weight, c2.weight, img])
def test_fold_conv_elu(self):
c1 = nn.Conv2d(3,16,3)
# run
img = Tensor.rand(2,3,64,64)
out = c1(img).elu()
check_schedule(out, 1, [c1.weight, c1.bias, img])
def test_fold_conv_elu_alt(self):
img = Tensor.ones(1,4,8,8).contiguous()
c1 = nn.Conv2d(4, 4, kernel_size=3)
c2 = nn.Conv2d(4, 4, kernel_size=3)
img_conv = img.sequential([c1, Tensor.elu, c2, Tensor.elu])
check_schedule(img_conv, 2, [*nn.state.get_parameters(c1), *nn.state.get_parameters(c2), img])
def test_two_sum(self):
img = Tensor.empty(64,64)
x = (img.sum(0) + img.sum(1))
out = x.relu()
check_schedule(out, 1)
def test_push_permute_through_reshape(self):
a = Tensor.empty(16,16)
b = Tensor.empty(16,16)
c = (a+b).reshape(4,4,4,4).permute(2,3,0,1).contiguous()
check_schedule(c, 1)
#@unittest.skip("failing in old lazy")
def test_push_permute_through_reshape_alt(self):
a = Tensor.empty(4,4,4,4)
b = Tensor.empty(4,4,4,4)
c = (a+b).reshape(16,16).permute(1,0).contiguous()
check_schedule(c, 1)
def test_no_binop_rerun(self):
a = Tensor.empty(16)
b = Tensor.empty(16)
c = a+b
d = (a+b).reshape(16,1)
check_schedule(d, 0, [c])
@unittest.skipUnless(is_dtype_supported(dtypes.half), "need half")
def test_multi_permute_should_collapse(self):
a = Tensor.empty(4,4,4,4)
b = Tensor.empty(16)
c = a.sum((0,1)).cast(dtypes.float16).permute(1,0).reshape(4,4,1).permute(1,0,2).reshape(16) + b
check_schedule(c, 1)
def test_fancy_reshape_fusion(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = a+b
d = a.reshape(10,1)+b.reshape(10,1)
out = c.sum() + d.sum()
check_schedule(out, 1)
def test_children_dont_push(self):
a = Tensor.empty(10, 10, 1)
b = Tensor.empty(10, 10, 1)
d = (a+b).expand(10, 10, 10)
e = (a+b).permute(2,1,0)
f = d+e
check_schedule(f, 1)
# failing in new lazy
@unittest.skip("always fusing elementwise")
def test_dont_fuse_binops_with_children(self):
a = Tensor.empty(10)
b = Tensor.empty(10)
c = Tensor.empty(10)
keep_me = a+b
e = keep_me.sum() # noqa: F841 give keep_me a child (NOTE: BinaryOps won't be a child since it will instant fuse)
d = keep_me+c
check_schedule(d, 2)
check_schedule(keep_me, 0, [d])
#@unittest.skip("failing in old lazy")
def test_permute_breaks_fusion(self):
a = Tensor.empty(10, 10, 10)
b = Tensor.empty(10, 10)
c = (a.sum(axis=2) + b).permute(1,0)
d = c.permute(1,0)
check_schedule(d, 1)
def test_some_permute_fusion(self):
a = Tensor.empty(8192, 16)
b = Tensor.empty(1, 16)
d = (a.T + b.expand(8192, 16).T)
c = a + b.expand(8192, 16)
e = d.T
check_schedule(c, 1)
check_schedule(e, 1)
def test_shrink_fuse(self):
a = Tensor.empty(8192, 16)
b = Tensor.empty(8192, 16)
c = a * b
d = Tensor.empty(1, 16)
e = c[0] * d
check_schedule(e, 1)
def test_expand_fuse(self):
a = Tensor.empty(1, 16)
b = Tensor.empty(1, 16)
c = a * b
d = Tensor.empty(8192, 16)
e = c * d
check_schedule(e, 1)
# this is the failing case in openpilot...it's very simple like this
def test_image_conv_fusion(self):
w1 = Tensor.empty(16, 16, 1, 1)
b1 = Tensor.empty(16)
w2 = Tensor.empty(16, 16, 1, 1)
b2 = Tensor.empty(16)
w3 = Tensor.empty(16, 16, 1, 1)
b3 = Tensor.empty(16)
x = Tensor.empty(1, 16, 32, 32)
x = base = x.image_conv2d(w1, b1)
x = x.image_conv2d(w2, b2) + base
x = x.image_conv2d(w3, b3)
# NOOP, 3 convs, contiguous
#check_schedule(x, 5)
check_schedule(x, 7)
def test_image_conv_fusion_minimal(self):
b1 = Tensor.empty(16)
b2 = Tensor.empty(16)
def p(x): return x.permute(1,0).contiguous().reshape(32,16,1).expand(32,16,16).sum(axis=2).permute(1,0)
x = Tensor.empty(16, 32)
x = base = p(x) + b1.reshape(16,1)
x = p(x)
x = x + b2.reshape(16,1)
x = x + base
del base
x = p(x)
check_schedule(x, 4)
def test_image_conv_fusion_more_minimal(self):
b1 = Tensor.empty(16)
def p(x): return x.permute(1,0).contiguous().reshape(32,16,1).expand(32,16,16).sum(axis=2).permute(1,0)
x = Tensor.empty(16, 32)
x = base = p(x) + b1.reshape(16,1)
x = p(x)
del base
check_schedule(x, 3)
def test_contiguous_while_contiguous(self):
x = Tensor.empty(1, 64, 32, 32)
out = x.contiguous()
check_schedule(out, 0, filter_sink=False)
def test_contiguous_while_not_contiguous(self):
x = Tensor.empty(1, 64, 32, 32)
out = x.permute(0,2,3,1).contiguous()
check_schedule(out, 1, filter_sink=False)
def test_fold_with_contiguous(self):
a = Tensor.randn(16, 16, 16).realize()
b = Tensor.randn(16, 16).realize()
c = (a.sum(2).contiguous() + b).contiguous()
check_schedule(c, 2)
def _alu_from_tensor(self, t:Tensor):
s = [s for s in t.schedule() if s.ast.op is Ops.SINK]
self.assertEqual(len(s), 1)
return [u.op for u in s[0].ast.toposort() if u.op in GroupOp.ALU]
def test_2_pow_is_exp2(self):
t = 2.0 ** Tensor([1.0, 2.0, 3.0])
self.assertEqual(self._alu_from_tensor(t), [Ops.EXP2])
def test_pow_05_is_sqrt(self):
t = Tensor([1.0, 2.0, 3.0]) ** 0.5
self.assertEqual(self._alu_from_tensor(t), [Ops.SQRT])
def test_pow_neg_05_is_rsqrt(self):
t = Tensor([1.0, 2.0, 3.0]) ** -0.5
self.assertEqual(self._alu_from_tensor(t), [Ops.RECIPROCAL, Ops.SQRT])
def test_pow_2_has_1_mul(self):
t = Tensor([1.0, 2.0, 3.0]) ** Tensor(2.0)
self.assertEqual(self._alu_from_tensor(t), [Ops.MUL])
def test_pow_8_has_3_muls(self):
t = Tensor([1.0, 2.0, 3.0]) ** 8
self.assertEqual(self._alu_from_tensor(t), [Ops.MUL, Ops.MUL, Ops.MUL])
@unittest.skip("const folding is removed")
def test_pow_const_tensor_to_zero(self):
x = Tensor([1,2,3,4])
out = x ** Tensor(0.0)
# NOTE: this is UOp.const(0) + UOp.const(1)
check_schedule(out, 0)
def test_zero_size(self):
x = Tensor.empty(2, 3, 0)
out = x + 1
check_schedule(out, 0, filter_sink=False)
def test_reduce_permute_nofuse(self):
x = Tensor.empty(32, 32, 32)
y = Tensor.empty(32, 32)
out = x.sum(axis=2).T+y
check_schedule(out, 1)
def test_two_elus_sum(self):
x = Tensor.empty(32, 32)
y = Tensor.empty(32, 32)
out = x.sum(1).relu().elu() + y.sum(1).relu().elu()
check_schedule(out, 1)
def test_multistage_reduce(self):
x = Tensor.empty(32, 32, 32)
out = x.sum(2).relu().sum(1)
check_schedule(out, 1)
def test_multistage_reduce_fork(self):
x = Tensor.empty(32, 32, 32)
x = x.sum(2)
out2 = x + 1
out = x.relu().sum(1) + out2[0]
check_schedule(out, 2)
def test_contiguous_add(self):
x = Tensor.empty(32)
y = Tensor.empty(32)
z = Tensor.empty(32)
out = (x+y).contiguous()+z
check_schedule(out, 2)
def test_double_sum_ref(self):
x = Tensor.empty(32, 32, 32)
x = x.sum(2)
out = x + x[:, 4]
check_schedule(out, 2)
def test_reduce_shrink(self):
x = Tensor.empty(32, 32)
y = Tensor.empty(16)
x = x.sum(1)
x = x[:16]
out = x + y
check_schedule(out, 1)
def test_const_no_recompute(self):
x = Tensor(2) + Tensor(2)
y = Tensor(2) + Tensor(2)
out = x.contiguous() + y.contiguous()
check_schedule(out, 2, filter_sink=False)
def test_reduce_shrink_child(self):
a = Tensor.empty(100, 100)
b = Tensor.empty(10,)
c = a.sum() + b[0]
d = a.sum() + 2
check_schedule([c, d], 2) # TODO: 1?
def test_reduce_multiple_paths_midshrink(self):
a = Tensor.empty(4, 4)
r = a.sum(axis=1)
out0 = r.exp2()
out1 = out0[0] + out0
check_schedule([r, out0, out1], 3)
def test_reduce_shrink_output(self):
a = Tensor.empty(4, 4)
r = a.sum(keepdim=True)
out0 = r.exp2()
out1 = out0[0] + Tensor.empty(1, )
check_schedule([r, out0, out1], 3)
@unittest.skipUnless(is_dtype_supported(dtypes.half), "need half")
def test_softmax_upcast(self):
# input half, softmax in float
Tensor.manual_seed(0)
x = Tensor.randn(4, 12, 64, 64, dtype=dtypes.half).realize()
out = x.softmax(dtype=dtypes.float)
sched = out.schedule()
self.assertEqual(len(sched), 3)
self.assertEqual(sched[0].bufs[0].dtype, dtypes.float)
# input float, softmax in float
Tensor.manual_seed(0)
x = Tensor.randn(4, 12, 64, 64, dtype=dtypes.float).realize()
out = x.softmax(dtype=dtypes.float)
sched = out.schedule()
self.assertEqual(len(sched), 3)
self.assertEqual(sched[0].bufs[0].dtype, dtypes.float)
def test_softmax_backward(self):
Tensor.manual_seed(0)
x = Tensor.randn(4, 12, 64, 64, requires_grad=True).realize()
x.softmax().sum().backward()
run_schedule(check_schedule(x.grad, 4))
def test_scaled_dot_product_attention_fusion(self):
x, y, z, m = (Tensor.empty(32, 8, 16, 16) for _ in range(4))
out = Tensor.scaled_dot_product_attention(x, y, z, attn_mask=m)
check_schedule(out, 4)
def test_scaled_dot_product_attention_causal_fusion(self):
x, y, z = (Tensor.empty(32, 8, 16, 16) for _ in range(3))
out = Tensor.scaled_dot_product_attention(x, y, z, is_causal=True)
check_schedule(out, 4)
def test_adam_step_fusion(self):
with Tensor.train():
x = Tensor.empty(4, 64, 32)
layer = nn.Linear(32, 32*4)
_realize_weights(layer)
opt = nn.optim.Adam(nn.state.get_parameters(layer), lr=1e-4)
layer(x).relu().sum().backward()
check_schedule(opt.schedule_step(), 13)
def test_adam_conv_fuse(self):
with Tensor.train():
img = Tensor.empty(2,3,4,4)
c1 = nn.Conv2d(3,32,3)
_realize_weights(c1)
opt = nn.optim.Adam(nn.state.get_parameters(c1), lr=1e-4)
opt.zero_grad()
c1(img).relu().sum().backward()
check_schedule(opt.schedule_step(), 13)
def test_adam_2convs_fuse(self):
with Tensor.train():
img = Tensor.empty(2,3,4,4)
c1 = nn.Conv2d(3,16,3,bias=False)
c2 = nn.Conv2d(16,32,2,bias=False)
_realize_weights([c1, c2])
opt = nn.optim.Adam(nn.state.get_parameters([c1, c2]), lr=1e-4)
opt.zero_grad()
c2(c1(img).relu()).relu().sum().backward()
check_schedule(opt.schedule_step(), 15)
def test_sgd_conv_fuse(self):
with Tensor.train():
img = Tensor.empty(2,3,4,4)
c1 = nn.Conv2d(3,32,3)
_realize_weights(c1)
opt = nn.optim.SGD(nn.state.get_parameters(c1))
opt.zero_grad()
c1(img).relu().sum().backward()
check_schedule(opt.schedule_step(), 5) # TODO: 3?
def test_sgd_2convs_fuse(self):
with Tensor.train():
img = Tensor.empty(2,3,4,4)
c1 = nn.Conv2d(3,16,3,bias=False)
c2 = nn.Conv2d(16,32,2,bias=False)
_realize_weights([c1, c2])
opt = nn.optim.SGD(nn.state.get_parameters([c1, c2]))
opt.zero_grad()
c2(c1(img).relu()).relu().sum().backward()
check_schedule(opt.schedule_step(), 7)
def test_fold_2convs_sgd_nesterov_momentum_wd(self):
with Tensor.train():
img = Tensor.empty(2,3,4,4)
c1 = nn.Conv2d(3,16,3,bias=False)
c2 = nn.Conv2d(16,32,2,bias=False)
_realize_weights([c1, c2])
opt = nn.optim.SGD(nn.state.get_parameters([c1, c2]), nesterov=True, momentum=0.9, weight_decay=0.1)
opt.zero_grad()
c2(c1(img).relu()).relu().sum().backward()
check_schedule(opt.schedule_step(), 11)
def test_sgd_4convs_fuse(self):
with Tensor.train():
img = Tensor.empty(2,3,16,16)
c1 = nn.Conv2d(3,4,3,bias=False)
c2 = nn.Conv2d(4,8,3,bias=False)
c3 = nn.Conv2d(8,16,3,bias=False)
c4 = nn.Conv2d(16,32,3,bias=False)
_realize_weights([c1, c2, c3, c4])
opt = nn.optim.SGD(nn.state.get_parameters([c1, c2, c3, c4]))
opt.zero_grad()
c4(c3(c2(c1(img).relu()).relu()).relu()).relu().sum().backward()
check_schedule(opt.schedule_step(), 15)
def test_sgd_4convs_fuse_conv_bw(self):
with Tensor.train():
img = Tensor.empty(2,3,16,16)
c1 = nn.Conv2d(3,4,3,bias=False)
c2 = nn.Conv2d(4,8,3,bias=False)
c3 = nn.Conv2d(8,16,3,bias=False)
c4 = nn.Conv2d(16,32,3,bias=False)
_realize_weights([c1, c2, c3, c4])
opt = nn.optim.SGD(nn.state.get_parameters([c1, c2, c3, c4]))
opt.zero_grad()
c4(c3(c2(c1(img).relu()).relu()).relu()).relu().sum().backward()
check_schedule(opt.schedule_step(), 15)
def test_reduce_simple_chase(self):
a = Tensor.empty(4, 4, 4)
r = a.sum(0) + 6
b = r.sum(0) * 4
c = r.sum(1) * 2
check_schedule([b, c], 3)
def test_push_permute_chase(self):
a = Tensor.empty(4, 4, 4)
b = Tensor.empty(4, 4)
r = a.sum(2) + b
d = r.T * 4
e = r * d
check_schedule([d, e], 3)
def test_push_shrink_chase(self):
a = Tensor.empty(16, 16)
b = Tensor.empty(4)
c = Tensor.empty(16, )
r = a.sum(1) + c
d = r[:4] * b
check_schedule(d, 1)
def test_midreduce_nochase(self):
a = Tensor.empty(16, 16)
b = (a.sum(0) + a.max(1)) + 2
check_schedule(b, 1)
def test_bitcast_fuses(self):
x = Tensor.empty(1, dtype=dtypes.float32)
a = x.exp2().bitcast(dtypes.int32)
b = x.bitcast(dtypes.int32)
check_schedule(a+b, 1) # this should fuse when it makes sense
def test_reduceop_reshape_dont_push(self):
Tensor.manual_seed(0)
x = Tensor.randn(10, 20).realize()
out = x.argmax(1)
run_schedule(check_schedule(out, 2))
def test_resnet_conv2d(self):
x = Tensor.empty(1, 8, 32, 32)
w1 = Tensor.empty(8, 8, 3, 3)
w2 = Tensor.empty(8, 8, 1, 1)
out = x.conv2d(w1).conv2d(w2)
check_schedule(out, 2)
def test_schedule_mem_used(self):
gc.collect()
base = GlobalCounters.mem_used
Tensor.ones(256).contiguous().realize()
Tensor.ones(5, 5).contiguous().schedule()
gc.collect()
self.assertEqual(GlobalCounters.mem_used-base, 0)
def test_const_schedule(self):
constv = Tensor.empty(2, 2).uop.const_like(10)
check_schedule(constv, 0)
def test_const_schedule_contig(self):
constv = Tensor.empty(2, 2).uop.const_like(10).contiguous()
check_schedule(constv, 1)
def test_advanced_simple_indexing_combined(self):
X = Tensor.arange(16).reshape(4, 4)
xt = X[1:2, [-1, 2]]
check_schedule(xt, 1)
def test_arange_index_shrink(self):
Tensor.manual_seed(0)
with Context(TRACK_MATCH_STATS=0):
x = Tensor.randn(11).realize()
a = Tensor.arange(22)
out = (x + a[:11]).sum()
check_schedule(out, 1)
def test_fuse_arange_avg_pool2d_ceil_mode(self):
x = Tensor.avg_pool2d(Tensor.empty(1,1,6,6), kernel_size=(3,3), padding=1, stride=3, ceil_mode=True)
sched = check_schedule(x, 1)
self.assertEqual(len([x for x in sched[0].ast.backward_slice_with_self if x.op is Ops.REDUCE]), 1)
def test_fuse_arange_pad_circular_mode_bw(self):
x = Tensor.empty(1,1,5,5,5)
out = x.pad((1,2,3,5,1,2), mode="circular")
g = out.sum().gradient(x)[0]
sched = check_schedule(g, 1)
self.assertEqual(len([x for x in sched[0].ast.backward_slice_with_self if x.op is Ops.REDUCE]), 0)
def test_resnet_block(self):
with Tensor.train(False):
in_planes, planes = 64, 64
conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
bn1 = nn.BatchNorm2d(planes)
conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, stride=1, bias=False)
bn2 = nn.BatchNorm2d(planes)
x = Tensor.empty(1, 64, 32, 32)
out = bn1(conv1(x)).relu()
out = bn2(conv2(out))
out = (out + x).relu()
run_schedule(check_schedule(out, 2, [conv1.weight, conv2.weight]))
class TestSwizzle(unittest.TestCase):
def test_softmax_one_kernel(self):
Tensor.manual_seed(0)
with Context(DEBUG=0, TRACK_MATCH_STATS=0):
a = Tensor.randn(32, 32).realize()
t = a.softmax()
check_schedule(t, 3) # TODO: 1?
def test_argmax_one_kernel(self):
Tensor.manual_seed(0)
with Context(DEBUG=0, TRACK_MATCH_STATS=0):
a = Tensor.randn(10, 20).realize()
t = a.argmax(0)
check_schedule(t, 2) # TODO: 1?
class TestView(unittest.TestCase):
def test_zero_size_alt(self):
a = Tensor.empty(135, 0, 9)
b = a.pad(((0, 0), (0, 0), (18, 0)))
check_schedule(b, 0)
class TestUOpBecome(unittest.TestCase):
# the simplest case, if we create a new BUFFER for this tensor UOp
def test_new_buffer(self):
a = Tensor.empty(4, 4)
b = Tensor.empty(4, 4)
add = a+b
check_schedule(add, 1)
# NOTE: realized base is always a flat buffer
assert UPat(Ops.BUFFER).match(add.uop.base, {})
# the Tensor UOp can optionally stack a VIEW on top of the BUFFER, in this case to preserve the (4, 4) shape of the tensor
assert add.uop is not add.uop.base
self.assertEqual(add.uop.size, 16)
self.assertEqual(add.uop.shape, (4, 4))
def test_new_buffer_view(self):
a = Tensor.empty(4, 4)
b = Tensor.empty(4, 4)
add = (a+b).reshape(8, 2)
check_schedule(add, 1)
assert UPat(Ops.BUFFER).match(add.uop.base, {})
# the shape is preserverd in the becomes_map.
self.assertEqual(add.uop.shape, (8, 2))
assert add.uop is not add.uop.base
def test_new_flat_buffer(self):
a = Tensor.empty(4,)
b = Tensor.empty(4,)
add = a+b
check_schedule(add, 1)
# BUFFER already has a shape (4,), this tensor just becomes a contiguous BUFFER
assert UPat(Ops.BUFFER).match(add.uop.base, {})
# sometimes we prefer to perform an op before movement ops, in this case we should stack the mops on top of the new buffer
@unittest.skip("no longer supported")
def test_reorder_expand(self):
a = Tensor.empty(4, 1)
b = a.expand(4, 4).reciprocal()
check_schedule(b, 1)
self.assertEqual(b.uop.base.buffer.size, 4)
self.assertEqual(b.uop.shape, (4, 4))
def test_reorder_expand_alt(self):
x = Tensor.empty(4, 1)
y = Tensor.empty(4, 1)
img = Tensor.empty(4, 4)
z = (img*x) / y
check_schedule(z, 1)
# TODO: rangeify doesn't yet cleanup this kind of re-indexing
@unittest.expectedFailure
def test_become_existing_buffer(self):
a = Tensor.empty(4, 4)
b = a*1
assert UPat(Ops.MUL).match(b.uop, {}) # before scheduling it's a mul
check_schedule(b, 0)
self.assertIs(a.uop.base.buffer, b.uop.base.buffer)
def test_become_buf_with_mops(self):
a = Tensor.empty(2, 4, 2)
noop = a.shrink(((1, 2), (0, 4), (0, 2))).reshape(4, 2)*1+0
# before realizing, this tensor is base
assert noop.uop is noop.uop.base
noop.realize()
# it becomes a realized view after realize
assert noop.uop is not noop.uop.base
assert noop.uop.base.op is Ops.BUFFER
late_add = noop+2
late_add.realize()
@unittest.skip("const folding is removed")
def test_become_const_in_base(self):
a = Tensor.empty(4)
b = a*0
assert UPat(Ops.MUL).match(b.uop, {}) # before scheduling it's a mul
check_schedule(b, 0)
assert UPat(Ops.CONST, arg=0).match(b.uop.base, {}) # scheduling replaces the tensor uop with a VIEW(BUFFER)
@unittest.skip("const folding is removed")
def test_become_const_from_const(self):
const_add = Tensor(1)+Tensor(2)
assert UPat(Ops.ADD).match(const_add.uop, {})
check_schedule(const_add, 0)
assert UPat(Ops.CONST, arg=3).match(const_add.uop.base, {})
# tensors can become another realized tensor source
@unittest.expectedFailure
def test_become_existing_buf_simple(self):
a = Tensor.empty(4, 4)
b = a+0
check_schedule(b, 0)
assert b.uop.base.op is Ops.BUFFER
self.assertIs(a.uop, b.uop)
# they can also chain other movement ops on top of the tensor source
@unittest.expectedFailure
def test_become_existing_buf_view(self):
a = Tensor.empty(4, 4)
b = a.permute((1, 0))+0
check_schedule(b, 0)
self.assertEqual(b.uop.st, a.uop.permute((1, 0)).st)
@unittest.expectedFailure
def test_become_existing_buf_view_alt(self):
a = Tensor.empty(4, 4)
b = a.permute((1, 0)).reshape((8, 2))+0
check_schedule(b, 0)
self.assertEqual(b.uop.st, a.uop.permute((1, 0)).reshape((8, 2)).st)
# they can also have other base parents that simplified, in that case we just backtrack to the chained mops
@unittest.expectedFailure
def test_become_existing_buf_complex(self):
a = Tensor.empty(4, 4)
b = (a.permute((1, 0))+0).reshape((8, 2))+0
check_schedule(b, 0)
self.assertEqual(b.uop.st, a.uop.permute((1, 0)).reshape((8, 2)).st)
assert b.uop.base.op is Ops.BUFFER
@unittest.expectedFailure
def test_become_multiple_choices(self):
a = Tensor.empty(16)
b = (a.reshape(1, 1, 4, 1, 4)+0).reshape(1, 1, 4, 4).shrink(((0, 1), (0, 1), (0, 3), (0, 3)))+0
c = (a.reshape(1, 1, 4, 4)+0).shrink(((0, 1), (0, 1), (0, 3), (0, 3)))+0
check_schedule([b, c], 0)
from tinygrad.helpers import all_same
assert all_same([x.uop.base.realized for x in [a,b,c]])
@unittest.skip("not clear if we want this")
def test_setitem_becomes_subbuffer(self):
a = Tensor.full((4,), 2.).contiguous().realize()
b = a.shrink(((0, 2),)).assign(Tensor.full((2,), 1.0))
b.realize()
assert a.uop.is_realized
assert a.uop.buffer._base is None
assert b.uop.op_in_backward_slice_with_self(Ops.SHRINK)
assert b.uop.base is a.uop.base
class TestFusionOp(unittest.TestCase):
def test_recursive_add(self):
st = time.perf_counter()
a = Tensor([1,2,3,4])
for _ in range(24): a = a + a
sched = a.schedule()
sched[-1].lower()
self.assertLess(time.perf_counter()-st, 2.0)
assert len(sched[-1].prg.p.src.splitlines()) < 250
def test_recursive_add_cmp(self):
st = time.perf_counter()
a = Tensor([1,2,3,4])
for _ in range(24): a = a + a
sched1 = a.schedule()
b = Tensor([1,2,3,4])
for _ in range(24): b = b + b
sched2 = b.schedule()
c = Tensor([1,2,3,4])
for _ in range(23): c = c + c
sched3 = c.schedule()
self.assertEqual(sched1[-1].ast, sched2[-1].ast)
with self.assertRaises(AssertionError): self.assertEqual(sched1[-1].ast, sched3[-1].ast)
self.assertLess(time.perf_counter()-st, 2.0)
def test_recursive_pad(self):
st = time.perf_counter()
val = 1.0
a = Tensor(val)
for _ in range(24): a = Tensor.stack(a, a)[0]
sched = a.schedule()
self.assertLessEqual(len(sched), 1)
self.assertLess(time.perf_counter()-st, 2.0)
def test_recursive_reshape(self):
st = time.perf_counter()
a = Tensor.empty(32, 32).realize()
b = Tensor.empty(16, 2).realize()
r = a.sum(1)
for _ in range(24): r = r.reshape(16, 2) + b
sched = r.schedule()
self.assertEqual(len(sched), 1)
self.assertLess(time.perf_counter()-st, 2.0)
# NOTE: the NULL backend supports BUFFER_VIEW
class TestBufferView(unittest.TestCase):
def test_shrink_contiguous_is_buffer_view(self):
# simple 1D shrink of a realized buffer should be BUFFER_VIEW, not a copy kernel
a = Tensor.arange(100).contiguous().realize()
b = a.shrink(((10, 50),)).contiguous()
run_schedule(check_schedule(b, 0))
def test_shrink_2d_contiguous_is_buffer_view(self):
a = Tensor.arange(100).reshape(10,10).contiguous().realize()
b = a.shrink(((1, 5),None)).contiguous()
run_schedule(check_schedule(b, 0))
def test_chained_shrink_is_buffer_view(self):
a = Tensor.arange(1000).contiguous().realize()
b = a.shrink(((200, 800),)).shrink(((0, 300),)).reshape((30, 10)).shrink(((20, 25), (0, 10))).contiguous()
run_schedule(check_schedule(b, 0))
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_schedule.py",
"license": "MIT License",
"lines": 1028,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_schedule_cache.py | import unittest
from tinygrad import Tensor, Variable, Context
from tinygrad.helpers import cpu_events
from tinygrad.engine.schedule import schedule_cache
def schedule_one():
Tensor([1]).schedule()
class TestScheduleCache(unittest.TestCase):
def test_bound_variable_var_vals(self):
v = Variable('pos', 1, 100)
x = Tensor.ones(10).contiguous().realize()
t = x + Tensor(v.bind(42))
_, var_vals = t.schedule_with_vars()
self.assertEqual(var_vals, {'pos': 42})
def test_disable_schedule_cache(self):
schedule_cache.clear()
# test write
with Context(SCACHE=0): schedule_one()
self.assertEqual(len(schedule_cache), 0)
with Context(SCACHE=1):
schedule_one()
schedule_one()
self.assertEqual(len(schedule_cache), 1)
# test read
with Context(PROFILE=1):
cpu_events.clear()
with Context(SCACHE=0): schedule_one()
num_events_no_cache = len(cpu_events)
cpu_events.clear()
with Context(SCACHE=1): schedule_one()
num_events_cache = len(cpu_events)
self.assertLess(num_events_cache, num_events_no_cache)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_schedule_cache.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_symbolic_tensor.py | import unittest
from tinygrad import Variable
from tinygrad.tensor import Tensor
class TestSymbolic(unittest.TestCase):
def assert_tuple_equal(self, x, y):
for a,b in zip(x,y): self.assertFalse(a != b)
def test_cat_dim0_is_expanded(self):
i = Variable("i", 1, 5).bind(3)
j = Variable("j", 1, 5).bind(3)
k = Variable("k", 1, 5).bind(3)
t = Tensor.rand(5, 4)[:i].cat(Tensor.rand(5, 4)[:j], dim=0).cat(Tensor.rand(5, 4)[:k], dim=0)
self.assert_tuple_equal(t.shape, (i+j+k, 4))
t = Tensor.rand(5, 3)[:i].cat(Tensor.rand(5, 3)[:i], dim=0).cat(Tensor.rand(3, 3), dim=0)
self.assert_tuple_equal(t.shape, (2*i+3, 3))
def test_cat_dim1_strides(self):
i = Variable("i", 1, 5).bind(4)
j = Variable("j", 1, 5).bind(4)
k = Variable("k", 1, 5).bind(4)
t = Tensor.rand(3, 5)[:, :i].cat(Tensor.rand(3, 5)[:, :j], dim=1).cat(Tensor.rand(3, 5)[:, :k], dim=1)
self.assert_tuple_equal(t.shape, (3, i+j+k))
class TestSymbolicVarVals(unittest.TestCase):
def assert_equal(self, x, y): self.assertFalse(x != y)
def test_shrink_unbind(self):
v = Variable("v", 1, 100)
bv = Variable("v", 1, 100).bind(2)
t = Tensor.rand(3, 4).shrink(((0,bv),(0,4)))
unbound_st, var_val = t.uop.unbind_all()
assert var_val == {v: 2}
t = Tensor.rand(3, 4).shrink(((bv, bv+1), (0, 4)))
unbound_st, var_val = t.uop.unbind_all()
assert var_val == {v: 2}
class TestSymbolicReshape(unittest.TestCase):
def test_reshape(self):
a = Tensor.rand(5, 4)
b = Tensor.rand(5, 6)
for i in range(1, 6):
vi = Variable("i", 1, 5).bind(i)
ret = a[:vi]
ret = ret.reshape((vi, 4))
assert ret.shape == (vi, 4)
ret = b[:vi]
ret = ret.reshape((vi, 2, 3))
assert ret.shape == (vi, 2, 3)
def test_two_symbol_reshape(self):
t = Tensor.rand(5, 5)
for i in range(1, 6):
for j in range(1, 6):
vi = Variable("i", 1, 5).bind(i)
vj = Variable("j", 1, 5).bind(j)
ret = t[:vi, :vj]
ret = ret.reshape(vj, vi)
assert ret.shape == (vj, vi)
ret = ret.reshape(vi, vj)
assert ret.shape == (vi, vj)
ret = ret.reshape(1, vi*vj)
assert ret.shape == (1, vi*vj)
class TestSymbolicExpand(unittest.TestCase):
def test_expand_into_symbols(self):
vi = Variable("i", 1, 5).bind(3)
vj = Variable("j", 1, 5).bind(3)
a = Tensor([[1], [2], [3]]).expand((3, vi))
assert a.shape == (3, vi)
a = a.reshape(3, vi, 1).expand((3, vi, vj))
assert a.shape == (3, vi, vj)
def test_plus_expands_constant(self):
a = Tensor.rand(3, 5)
for i in range(1, 6):
vi = Variable("i", 1, 5).bind(i)
ret = a[:, :vi]
ret = ret + 1
self.assertTupleEqual(ret.shape, (3, vi))
def test_pad_then_expand_into_symbols(self):
vi = Variable("i", 1, 10).bind(3)
a = Tensor(1).unsqueeze(0).pad((0, 24)).unsqueeze(0).expand((vi, 25))
self.assertEqual(a.shape, (vi, 25))
self.assertEqual(a.reshape(25*vi).shape, (vi*25,))
self.assertEqual(a.reshape(vi*25).shape, (vi*25,))
class TestSymbolicShrink(unittest.TestCase):
def test_shrink_symbols_simple(self):
vi = Variable("i", 1, 5)
t = Tensor.rand(5, 5).shrink(((0, 5),(0,vi)))
assert t.shape == (5, vi)
def test_shrink_symbols(self):
vi = Variable("i", 1, 5)
t = Tensor.rand(3, 5).shrink(((0, 2), (vi, vi+1)))
assert t.shape == (2, 1)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_symbolic_tensor.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_tensor.py | # tensor tests that pass on NULL backend (no copyout needed)
import numpy as np
import unittest
from tinygrad import Tensor, Device, dtypes
from tinygrad.device import is_dtype_supported
from tinygrad.uop.ops import Ops, UOp
from tinygrad.renderer.ptx import PTXRenderer
from tinygrad.renderer.nir import NIRRenderer
from tinygrad.engine.realize import get_program
from tinygrad.dtype import DType
x_init = np.random.randn(1,3).astype(np.float32)
W_init = np.random.randn(3,3).astype(np.float32)
m_init = np.random.randn(1,3).astype(np.float32)
class TestTrainMode(unittest.TestCase):
def test_train_mode(self):
assert not Tensor.training
@Tensor.train()
def f():
assert Tensor.training
f()
assert not Tensor.training
class TestInferenceMode(unittest.TestCase):
def test_inference(self):
x = Tensor(x_init, requires_grad=True)
m = Tensor(m_init, requires_grad=True)
W = Tensor(W_init, requires_grad=True)
tmp = x.mul(m)
mm = tmp.matmul(W)
out = mm.relu()
out = out.sum()
#out.backward()
assert x.grad is None
assert m.grad is None
assert tmp.grad is None
assert mm.grad is None
assert W.grad is None
assert W.requires_grad
def test_no_grad_mode_context_manager(self):
x = Tensor(x_init, requires_grad=True)
m = Tensor(m_init, requires_grad=True)
W = Tensor(W_init, requires_grad=True)
def f(x, m, W):
tmp = x.mul(m)
mm = tmp.matmul(W)
out = mm.relu()
out = out.sum()
#out.backward()
assert x.grad is None
assert m.grad is None
assert tmp.grad is None
assert mm.grad is None
assert W.grad is None
f(x, m, W)
class TestIdxUpcast(unittest.TestCase):
def _find_op(self, ast: UOp, op: Ops):
if ast.op is op: return ast
for src in ast.src:
if (ret:=self._find_op(src, op)) is not None: return ret
def _schedule_render(self, a: Tensor):
schedule, _ = a.schedule_with_vars()
for s in schedule:
if s.ast.op is Ops.SINK:
renderer = Device[s.bufs[0].device].renderer
prg = get_program(s.ast, renderer)
return prg.uops
def _assert(self, dtype: DType, a: Tensor):
uops = self._schedule_render(a)
# Assert the dtype of the INDEX value, This will need be updated if UOp spec changes
store = next(uop for uop in uops if uop.op is Ops.STORE)
assert store.op is Ops.STORE
idx = self._find_op(store, Ops.INDEX)
# PTX and NIR turn Ops.INDEX into pointer arithmetic earlier than cstyle, plus it's already cast to int64
if not isinstance(Device[Device.DEFAULT].renderer, (PTXRenderer, NIRRenderer)):
assert idx.op is Ops.INDEX
idx_val = idx.src[1]
assert idx_val.dtype is dtype
# use expand to generate kernel that uses large idx
def do_op_then_assert(self, dtype: DType, dim1, dim2, dim3):
self._assert(dtype, Tensor.empty(dim1, dim2, 1).expand(-1, -1, dim3).contiguous())
@unittest.skipUnless(is_dtype_supported(dtypes.long), "int64 is supported")
def test_overflow(self):
# 2**11, 2**11, 2**11 -> 2**33 will overflow when indexed
self.do_op_then_assert(dtypes.long, 2048, 2048, 2048)
@unittest.skipUnless(is_dtype_supported(dtypes.long), "int64 is supported")
def test_overflow_sym(self):
self.do_op_then_assert(dtypes.long, 2048, 2048, UOp.variable("dim3", 1, 2048).bind(32))
def test_regular(self):
self.do_op_then_assert(dtypes.int, 64, 64, 64)
def test_regular_sym(self):
self.do_op_then_assert(dtypes.int, 2048, 2048, UOp.variable("dim3", 1, 64).bind(32))
@unittest.skipIf(isinstance(Device[Device.DEFAULT].renderer, (PTXRenderer, NIRRenderer)), "PTX and NIR always converts Ops.INDEX to int64")
def test_symfold(self):
# This would cause an overflow, but after sym fold it's within int32
a = Tensor.arange(65535)
uops = self._schedule_render(a)
assert all(uop.dtype is not dtypes.long for uop in uops)
def test_arange_raise_overflow(self):
with self.assertRaises(ValueError):
self._schedule_render(Tensor.arange(2**33, dtype=dtypes.int))
@unittest.skipIf(is_dtype_supported(dtypes.long), "int64 is supported")
def test_int64_unsupported_overflow_sym(self):
with self.assertRaises((KeyError, RuntimeError)):
self.do_op_then_assert(dtypes.long, 2048, 2048, UOp.variable("dim3", 1, 2048).bind(32))
@unittest.skipIf(is_dtype_supported(dtypes.long), "int64 is supported")
@unittest.expectedFailure # bug in gpu dims limiting
def test_int64_unsupported_overflow(self):
with self.assertRaises((KeyError, RuntimeError)):
self.do_op_then_assert(dtypes.long, 2048, 2048, 2048)
@unittest.skip("This is kept for reference, it requires large memory to run")
def test_overflow_kernel_run(self):
# This creates a total of 2**31+10 elements, requiring at least 2147 MB memory to run
# Modified example from issue 3271
a = Tensor.empty(2**11, 2**11, 1, dtype=dtypes.int8).permute((2, 0, 1)).expand((2**9+10, -1, -1)).contiguous()
a.realize()
class TestTensorUnique(unittest.TestCase):
def test_empty_bufs_unique(self):
a = Tensor.empty(10, 10).contiguous()
b = Tensor.empty(10, 10).contiguous()
Tensor.realize(a,b)
self.assertIsNot(a.uop.buffer, b.uop.buffer)
def test_zeros_bufs_unique_sep(self):
a = Tensor.zeros(10, 10).contiguous()
Tensor.realize(a)
b = Tensor.zeros(10, 10).contiguous()
Tensor.realize(b)
self.assertIsNot(a.uop.buffer, b.uop.buffer)
def test_zeros_bufs_unique(self):
a = Tensor.zeros(10, 10).contiguous()
b = Tensor.zeros(10, 10).contiguous()
Tensor.realize(a,b)
self.assertIsNot(a.uop.buffer, b.uop.buffer)
def test_eye_bufs_unique(self):
a = Tensor.eye(10).contiguous()
b = Tensor.eye(10).contiguous()
Tensor.realize(a,b)
self.assertIsNot(a.uop.buffer, b.uop.buffer)
def test_times_2_not_unique(self):
a = Tensor.zeros(10, 10).contiguous()
b = a * 2
c = a * 2
Tensor.realize(b,c)
self.assertIs(b.uop.buffer, c.uop.buffer)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_tensor.py",
"license": "MIT License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_tensor_io.py | import unittest
from tinygrad import Tensor, dtypes
from tinygrad.nn.state import TensorIO
class TestTensorIO(unittest.TestCase):
def test_create(self):
with self.assertRaises(ValueError):
TensorIO(Tensor(b"Hello World").reshape(1, -1))
with self.assertRaises(ValueError):
TensorIO(Tensor([], dtype=dtypes.int64).reshape(1, -1))
def test_seek(self):
t = Tensor(b"Hello World!")
fobj = TensorIO(t)
self.assertEqual(fobj.tell(), 0)
self.assertEqual(fobj.seek(1), 1)
self.assertEqual(fobj.seek(-2, 2), len(t) - 2)
self.assertEqual(fobj.seek(1, 1), len(t) - 1)
self.assertEqual(fobj.seek(10, 1), len(t))
self.assertEqual(fobj.seek(10, 2), len(t))
self.assertEqual(fobj.seek(-10, 0), 0)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_tensor_io.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_uops.py | # uops tests that pass on NULL backend (no copyout needed)
import unittest
import numpy as np
from tinygrad.tensor import Tensor
from tinygrad.helpers import Timing, Context
from tinygrad.dtype import dtypes, ConstFloat # noqa: F401
from tinygrad.device import Device
from tinygrad.uop.ops import Ops, UOp, UPat, exec_alu
from tinygrad.uop.spec import shared_spec
from tinygrad.uop.symbolic import sym
from test.helpers import to_uops_list
class TestSafeCast(unittest.TestCase):
def test_cast_folds(self):
a = UOp.variable("a", 1, 10, dtype=dtypes.int32)
self.assertEqual(a.cast(dtypes.int64).cast(dtypes.int32).simplify(), a)
self.assertEqual(a.cast(dtypes.double).cast(dtypes.int32).simplify(), a)
a = UOp.variable("a", 1, 10, dtype=dtypes.uint8)
self.assertEqual(a.cast(dtypes.int64).cast(dtypes.uint8).simplify(), a)
self.assertEqual(a.cast(dtypes.uint32).cast(dtypes.uint8).simplify(), a)
def test_remove_intermediate_cast(self):
a = UOp.variable("a", 0., 100., dtype=dtypes.half)
self.assertEqual(a.cast(dtypes.double).cast(dtypes.float).simplify(), a.cast(dtypes.float))
a = UOp.variable("a", 1, 10, dtype=dtypes.int32)
# TODO: double preserves certain int dtypes
self.assertEqual(a.cast(dtypes.double).cast(dtypes.float).simplify(), a.cast(dtypes.float))
self.assertEqual(a.cast(dtypes.int64).cast(dtypes.int16).simplify(), a.cast(dtypes.int16))
a = UOp.variable("a", 1, 10, dtype=dtypes.uint8)
self.assertEqual(a.cast(dtypes.int64).cast(dtypes.int32).simplify(), a.cast(dtypes.int32))
def test_safe_cast_using_bounds(self):
a = UOp.variable("a", 1, 10, dtype=dtypes.uint64)
self.assertEqual(a.cast(dtypes.int16).cast(dtypes.int).simplify(), a.cast(dtypes.int))
a = UOp.variable("a", -10, 10, dtype=dtypes.int32)
self.assertEqual(a.cast(dtypes.int8).cast(dtypes.int64).simplify(), a.cast(dtypes.int64))
self.assertEqual(a.cast(dtypes.int8).cast(dtypes.float).simplify(), a.cast(dtypes.float))
class TestExecALU(unittest.TestCase):
def test_sqrt(self):
self.assertEqual(exec_alu(Ops.SQRT, dtypes.float, (0.0,)), 0.0)
def test_div(self):
self.assertEqual(exec_alu(Ops.IDIV, dtypes.int8, (8, 2)), 4)
self.assertEqual(exec_alu(Ops.IDIV, dtypes.int8, (7, 3)), 2)
self.assertEqual(exec_alu(Ops.IDIV, dtypes.int8, (7, -3)), -2)
self.assertEqual(exec_alu(Ops.IDIV, dtypes.int8, (-50, 6)), -8)
np.testing.assert_allclose(exec_alu(Ops.MUL, dtypes.float32, (7.0, exec_alu(Ops.RECIPROCAL, dtypes.float32, (3.0,)))), 2+(1.0/3.0))
np.testing.assert_allclose(exec_alu(Ops.MUL, dtypes.float32, (7.0, exec_alu(Ops.RECIPROCAL, dtypes.float32, (-3.0,)))), -2-(1.0/3.0))
def test_recip(self):
np.testing.assert_allclose(exec_alu(Ops.RECIPROCAL, dtypes.float32, (8,)), 1/8)
np.testing.assert_allclose(exec_alu(Ops.RECIPROCAL, dtypes.float32, (7,)), 1/7)
np.testing.assert_allclose(exec_alu(Ops.RECIPROCAL, dtypes.float32, (-3,)), 1/-3)
np.testing.assert_allclose(exec_alu(Ops.RECIPROCAL, dtypes.float32, (-50,)), 1/-50)
np.testing.assert_allclose(exec_alu(Ops.RECIPROCAL, dtypes.float32, ((32+521+3),)), 1/(32+521+3))
np.testing.assert_allclose(exec_alu(Ops.RECIPROCAL, dtypes.float32, ((34**2),)), 1/(34**2))
np.testing.assert_allclose(exec_alu(Ops.RECIPROCAL, dtypes.float32, (10,)), 1/10)
def test_bool_cmplt(self):
self.assertEqual(exec_alu(Ops.CMPLT, dtypes.bool, (False, False)), False)
self.assertEqual(exec_alu(Ops.CMPLT, dtypes.bool, (False, True)), True)
self.assertEqual(exec_alu(Ops.CMPLT, dtypes.bool, (True, False)), False)
self.assertEqual(exec_alu(Ops.CMPLT, dtypes.bool, (True, True)), False)
def test_bool_cmpne(self):
self.assertEqual(exec_alu(Ops.CMPNE, dtypes.bool, (False, False)), False)
self.assertEqual(exec_alu(Ops.CMPNE, dtypes.bool, (False, True)), True)
self.assertEqual(exec_alu(Ops.CMPNE, dtypes.bool, (True, False)), True)
self.assertEqual(exec_alu(Ops.CMPNE, dtypes.bool, (True, True)), False)
def test_bool_where(self):
self.assertEqual(exec_alu(Ops.WHERE, dtypes.bool, (False, False, False)), False)
self.assertEqual(exec_alu(Ops.WHERE, dtypes.int, (False, 2, 4)), 4)
np.testing.assert_allclose(exec_alu(Ops.WHERE, dtypes.float, (False, 2.2, 4.5)), 4.5)
def test_overflow(self):
self.assertEqual(exec_alu(Ops.ADD, dtypes.uint8, (250, 250)), 244)
self.assertEqual(exec_alu(Ops.ADD, dtypes.uint8, (256, 0)), 0)
self.assertEqual(exec_alu(Ops.ADD, dtypes.uint8, (0, -1)), 255)
self.assertEqual(exec_alu(Ops.ADD, dtypes.uint8, (0, -1000)), 24)
self.assertEqual(exec_alu(Ops.ADD, dtypes.int8, (127, 0)), 127)
self.assertEqual(exec_alu(Ops.ADD, dtypes.int8, (-128, 0)), -128)
self.assertEqual(exec_alu(Ops.ADD, dtypes.int8, (-100, -100)), 56)
self.assertEqual(exec_alu(Ops.ADD, dtypes.int8, (-1000, -0)), 24)
self.assertEqual(exec_alu(Ops.ADD, dtypes.int8, (-130, -0)), 126)
self.assertEqual(exec_alu(Ops.ADD, dtypes.int8, (1, 1)), 2)
self.assertEqual(exec_alu(Ops.ADD, dtypes.int8, (-128, 0)), -128)
# test no truncate
self.assertEqual(exec_alu(Ops.ADD, dtypes.uint8, (250, 250), truncate_output=False), 500)
class TestGatedStoreRewrite(unittest.TestCase):
def test_tiny_gate_store(self):
gmem = UOp(Ops.PARAM, dtypes.float.ptr(), (), 0)
gidx0 = UOp(Ops.SPECIAL, dtypes.int, (UOp.const(dtypes.int, 4),), 'gidx0')
gate = gidx0<UOp.const(dtypes.int, 1)
idx = UOp(Ops.INDEX, dtypes.float.ptr(), (gmem, (gidx0 * UOp.const(dtypes.int, 2)).valid(gate)))
val = UOp.const(dtypes.float, 42.0)
store = UOp(Ops.STORE, dtypes.void, (idx, val))
uops = to_uops_list([store])
if_uop = next(u for u in uops if u.op is Ops.IF)
endif = next(u for u in uops if u.op is Ops.ENDIF)
assert endif.src[0] is if_uop
gated_uops = tuple(uops[uops.index(if_uop)+1:uops.index(endif)])
self.assertEqual(len(gated_uops), 1)
self.assertIs(gated_uops[-1].op, Ops.STORE)
def test_gate_some_stores(self):
gmem0 = UOp(Ops.PARAM, dtypes.float.ptr(), (), 0)
gmem1 = UOp(Ops.PARAM, dtypes.float.ptr(), (), 1)
gidx0 = UOp(Ops.SPECIAL, dtypes.int, (UOp.const(dtypes.int, 4),), 'gidx0')
idx = gidx0 * UOp.const(dtypes.int, 2)
idx0 = UOp(Ops.INDEX, dtypes.float.ptr(), (gmem0, idx.valid(gidx0<UOp.const(dtypes.int, 1))))
idx1 = UOp(Ops.INDEX, dtypes.float.ptr(), (gmem1, idx))
val = UOp.const(dtypes.float, 42.0)
stores = [UOp.store(idx0, val), UOp.store(idx1, val)]
uops = to_uops_list(stores)
if_uop = next(u for u in uops if u.op is Ops.IF)
endif = next(u for u in uops if u.op is Ops.ENDIF)
assert endif.src[0] is if_uop
gated_uops = tuple(uops[uops.index(if_uop)+1:uops.index(endif)])
self.assertEqual(len(gated_uops), 1)
self.assertIs(gated_uops[-1].op, Ops.STORE)
# scaled down version of TestLinearizerDumb.test_unmerged_ifs
@unittest.skip("we don't merge ifs anymore")
def test_merge_ifs_alt(self):
gmem0 = UOp(Ops.PARAM, dtypes.float.ptr(), (), 0)
gmem1 = UOp(Ops.PARAM, dtypes.float.ptr(), (), 1)
gidx0 = UOp(Ops.SPECIAL, dtypes.int, (UOp.const(dtypes.int, 4),), 'gidx0')
idx = gidx0*UOp.const(dtypes.int, 2)
gate = gidx0<UOp.const(dtypes.int, 1)
idx0 = UOp(Ops.INDEX, dtypes.float.ptr(), (gmem0, idx, gate))
idx1 = UOp(Ops.INDEX, dtypes.float.ptr(), (gmem1, idx, gate))
val = UOp.const(dtypes.float, 42.0)
stores = [UOp.store(idx0, val), UOp.store(idx1, val)]
uops = to_uops_list(stores)
ifs = [u for u in uops if u.op is Ops.IF]
endifs = [u for u in uops if u.op is Ops.ENDIF]
self.assertEqual(len(ifs), 1)
self.assertEqual(len(endifs), 1)
gated_uops = tuple(uops[uops.index(ifs[0])+1:uops.index(endifs[0])])
self.assertEqual(len(gated_uops), 2)
for x in gated_uops: self.assertIs(x.op, Ops.STORE)
@unittest.skipIf(Device.DEFAULT == "METAL", "compiler bug")
@unittest.skipUnless(Ops.SHR in Device[Device.DEFAULT].renderer.code_for_op, "fast_idiv requires SHR")
class TestFastIdiv(unittest.TestCase):
def test_division_power_of_two(self):
for dt in (dtypes.int32, dtypes.uint32):
g = UOp(Ops.PARAM, dt.ptr(), (), 0)
c = UOp.const(dt, 2)
l = g.index(c)
a = UOp(Ops.IDIV, dt, (l, c))
uops = to_uops_list([a], ren=Device[Device.DEFAULT].renderer)
Device[Device.DEFAULT].renderer.render(uops)
ops = [x.op for x in uops]
self.assertIn(Ops.SHR, ops, f"For dtype={dt} divison by power of two did not simplify to shift")
self.assertNotIn(Ops.IDIV, ops, f"For dtype={dt} divison by power of two did not simplify to shift")
@unittest.skipIf(Device.DEFAULT == "WEBGPU", "WEBGPU doesn't support long")
def test_fast_idiv_and_mod(self):
g = UOp(Ops.PARAM, dtypes.uint32.ptr(), (), 0)
c = UOp.const(dtypes.uint, 3)
l = g.index(c)
a = UOp(Ops.IDIV, dtypes.uint, (l, c))
uops = to_uops_list([a], ren=Device[Device.DEFAULT].renderer)
Device[Device.DEFAULT].renderer.render(uops)
ops = [x.op for x in uops]
self.assertIn(Ops.SHR, ops)
self.assertNotIn(Ops.IDIV, ops)
b = UOp(Ops.MOD, dtypes.uint, (l, c))
uops = to_uops_list([b], ren=Device[Device.DEFAULT].renderer)
Device[Device.DEFAULT].renderer.render(uops)
ops = [x.op for x in uops]
self.assertIn(Ops.SHR, ops)
self.assertNotIn(Ops.MOD, ops)
def test_fast_idiv_remove_powers_of_two(self):
ridx = UOp.range(2**20, 0)
uops = to_uops_list([ridx//(7*64)], ren=Device[Device.DEFAULT].renderer)
ops = [x.op for x in uops]
# this requires shifting out the powers of two before doing fast_idiv
# (((ridx0>>6)*18725)>>17) instead of (int)((((long)(ridx0)*1198373)>>29))
self.assertNotIn(Ops.CAST, ops)
@unittest.expectedFailure
def test_fast_idiv_overflow(self):
# This will be possible with a slightly different method for fast_idiv
g = UOp(Ops.PARAM, dtypes.uint32.ptr(), (), 0)
c = UOp.const(dtypes.uint, 7)
l = UOp(Ops.LOAD, dtypes.uint, (g.index(c),))
a = UOp(Ops.IDIV, dtypes.uint, (l, c))
uops = to_uops_list([a], ren=Device[Device.DEFAULT].renderer)
Device[Device.DEFAULT].renderer.render(uops)
ops = [x.op for x in uops]
self.assertIn(Ops.SHR, ops)
self.assertNotIn(Ops.IDIV, ops)
def test_disable_fast_idiv(self):
g = UOp(Ops.PARAM, dtypes.uint32.ptr(), (), 0)
c = UOp.const(dtypes.uint, 3)
l = g.index(c)
a = UOp(Ops.IDIV, dtypes.uint, (l, c))
with Context(DISABLE_FAST_IDIV=1):
uops = to_uops_list([a], ren=Device[Device.DEFAULT].renderer)
ops = [x.op for x in uops]
self.assertNotIn(Ops.SHR, ops)
self.assertIn(Ops.IDIV, ops)
class TestUOpMethod(unittest.TestCase):
@unittest.skip("uops lt no longer ordered")
def test_compare_alu_same_src_different_arg(self):
a = UOp.const(dtypes.float, 2.0)
b = UOp.const(dtypes.float, 3.0)
add = UOp(Ops.ADD, dtypes.float, (a, b))
mul = UOp(Ops.MUL, dtypes.float, (a, b))
assert (add < mul) or (mul < add), "add and mul with same src should have an order"
def test_uop_variables(self):
a = UOp.variable("a", 1, 10)
uop_var = Tensor(a.bind(1))
st_var = Tensor.empty((2, 10))[:, :a.bind(1)]
_, var_vals = (uop_var+st_var).schedule_with_vars()
self.assertEqual(len(var_vals), 1)
self.assertEqual(list(var_vals)[0], a.expr)
def test_const_factor(self):
gidx0 = UOp(Ops.SPECIAL, dtypes.int, (UOp.const(dtypes.int, 8),), 'gidx0')
self.assertEqual(UOp.const(dtypes.int, 17).const_factor(), 17)
self.assertEqual(gidx0.const_factor(), 1)
self.assertEqual((gidx0*3).const_factor(), 3)
self.assertEqual((gidx0*3+6).const_factor(), 3)
self.assertEqual((gidx0*3+1).const_factor(), 1)
def test_replace(self):
x = UOp(Ops.PARAM, dtypes.int.ptr(), (), 0)
self.assertIs(x.replace(arg=None).arg, None)
with self.assertRaises(AssertionError): x.replace(field="a")
def test_const_zero_neg_zero_different(self):
# -0.0 and 0.0 must be different UOps (for IEEE754 correctness, e.g. 1/-0.0 = -inf)
pos_zero = UOp.const(dtypes.float, 0.0)
neg_zero = UOp.const(dtypes.float, -0.0)
self.assertIsNot(pos_zero, neg_zero)
self.assertNotEqual(hash(pos_zero.arg), hash(neg_zero.arg))
def test_const_nan_same(self):
# nan constants should be deduplicated
nan1 = UOp.const(dtypes.float, float('nan'))
nan2 = UOp.const(dtypes.float, float('nan'))
self.assertIs(nan1, nan2)
class TestUOpStr(unittest.TestCase):
def test_uop_str(self):
a = UOp.const(dtypes.float, 2.0) + UOp.const(dtypes.float, 3.0)
for _ in range(20): a = a + a
assert len(str(a)) < 10_000, "exponential string growth"
assert str(eval(str(a))) == str(a)
def test_vectorized_str(self):
vec = UOp(Ops.VECTORIZE, dtypes.int.vec(4), tuple(UOp.const(dtypes.int, x) for x in range(4)))
assert str(eval(str(vec))) == str(vec)
def test_device_arg(self):
device = UOp(Ops.DEVICE, arg="CL")
assert str(eval(str(device))) == str(device)
def test_reduceop_arg(self):
sum_uop = Tensor.empty(32, 32).sum().uop
assert str(eval(str(sum_uop))) == str(sum_uop)
class TestUPatHelpers(unittest.TestCase):
def test_location(self):
self.assertEqual(sym.patterns[-1][0].location[0].replace("\\", "/").split("/")[-1], "symbolic.py")
self.assertEqual(shared_spec.patterns[0][0].location[0].replace("\\", "/").split("/")[-1], "spec.py")
test_upat = UPat(Ops.CONST, dtypes.bool)
self.assertEqual(test_upat.location[0].replace("\\", "/").split("/")[-1], __file__.replace("\\", "/").split("/")[-1])
test_upat_named = test_upat.named("test_name")
self.assertEqual(test_upat.location[0], test_upat_named.location[0])
self.assertNotEqual(test_upat.location[1], test_upat_named.location[1])
class TestUopsObject(unittest.TestCase):
def test_timing(self):
with Timing("create 10k uops:"): ret = [UOp(Ops.CONST, dtypes.int, arg=10000000+i) for i in range(10000)]
assert len(ret) == 10000
def test_nested(self):
a = UOp.new_buffer(Device.DEFAULT, 1, dtypes.char)
for _ in range(10_000): a = a+a
self.assertEqual(a.device, Device.DEFAULT)
class TestUOpRender(unittest.TestCase):
def test_render_vectorize_empty(self):
u = UOp(Ops.VECTORIZE, dtype=dtypes.int.vec(0), src=())
self.assertEqual(u.render(simplify=False), "{}")
def test_render_vectorize_empty_simplified(self):
u = UOp(Ops.VECTORIZE, dtype=dtypes.int.vec(0), src=())
self.assertEqual(u.render(), "{}")
def test_render_vectorize_same(self):
u = UOp(Ops.VECTORIZE, dtype=dtypes.int.vec(3), src=(UOp.const(dtypes.int, 0), UOp.const(dtypes.int, 0), UOp.const(dtypes.int, 0)))
self.assertEqual(u.render(simplify=False), "{0, ...}")
def test_render_vectorize_different(self):
u = UOp(Ops.VECTORIZE, dtype=dtypes.int.vec(3), src=(UOp.const(dtypes.int, 0), UOp.const(dtypes.int, 1), UOp.const(dtypes.int, 2)))
self.assertEqual(u.render(simplify=False), "{0,1,2}")
def test_render_vectorize_same_simplified(self):
u = UOp(Ops.VECTORIZE, dtype=dtypes.int.vec(3), src=(UOp.const(dtypes.int, 0), UOp.const(dtypes.int, 0), UOp.const(dtypes.int, 0)))
self.assertEqual(u.render(), "0")
def test_render_vectorize_different_simplified(self):
u = UOp(Ops.VECTORIZE, dtype=dtypes.int.vec(3), src=(UOp.const(dtypes.int, 0), UOp.const(dtypes.int, 1), UOp.const(dtypes.int, 2)))
self.assertEqual(u.render(), "(0, 1, 2)")
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_uops.py",
"license": "MIT License",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/null/test_winograd.py | import unittest, sys
from tinygrad import Tensor, GlobalCounters, dtypes, Context
from tinygrad.helpers import CI, Profiling, WINO
@unittest.skipIf(sys.platform.startswith("win"), "flaky on Windows")
class TestWinograd(unittest.TestCase):
def setUp(self):
self.old = WINO.value
WINO.value = 1
def tearDown(self):
WINO.value = self.old
def test_profile(self):
x,w = Tensor.rand(1,4,9,9).realize(), Tensor.rand(4,4,3,3).realize()
with Profiling(enabled=not CI, sort='time'):
Tensor.conv2d(x,w).realize()
def test_forward_kernels(self):
x,w = Tensor.rand(1,4,9,9).realize(), Tensor.rand(4,4,3,3).realize()
out = Tensor.conv2d(x,w)
self.assertEqual(len(out.schedule()), 2)
def test_backward_kernels(self):
x,w = Tensor.empty(1,4,9,9,requires_grad=True).realize(), Tensor.empty(4,4,3,3,requires_grad=True).realize()
out = Tensor.conv2d(x,w, padding=1)
out.mean().backward()
backward_schedule = Tensor.schedule(x.grad, w.grad)
self.assertEqual(len(backward_schedule), 4)
def test_counters(self):
IC, OC, X, Y = 4,4,9,9
x,w = Tensor.rand(1,IC,Y,X).realize(), Tensor.rand(OC,IC,3,3).realize()
GlobalCounters.reset()
with Context(WINO=1):
Tensor.conv2d(x,w).realize()
ops_wino, mem_wino = GlobalCounters.global_ops, GlobalCounters.global_mem
GlobalCounters.reset()
with Context(WINO=0):
Tensor.conv2d(x,w).realize()
ops_normal, mem_normal = GlobalCounters.global_ops, GlobalCounters.global_mem
ops_ratio, mem_ratio = ops_wino/ops_normal, mem_wino/mem_normal
print(f"ops: normal {ops_normal:9d} wino {ops_wino:9d} ratio {ops_ratio:.2f}")
print(f"mem: normal {mem_normal:9d} wino {mem_wino:9d} ratio {mem_ratio:.2f}")
# TODO: what's optimal on this?
self.assertLess(ops_ratio, 4.3)
self.assertLess(mem_ratio, 4)
def test_dtype(self):
IC, OC, X, Y = 4,4,9,9
x,w = Tensor.empty(1,IC,Y,X), Tensor.empty(OC,IC,3,3)
self.assertEqual(Tensor.conv2d(x,w).dtype, dtypes.default_float)
x,w = Tensor.empty(1,IC,Y,X,dtype=dtypes.half), Tensor.empty(OC,IC,3,3,dtype=dtypes.half)
self.assertEqual(Tensor.conv2d(x,w).dtype, dtypes.half)
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/null/test_winograd.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_test_gpu_crash.py | # ruff: noqa: F405
"""Tests for GPU crash scenarios using AMD assembly to trigger invalid operations.
These tests intentionally cause GPU faults to verify error handling.
Run with: AMD=1 python -m pytest test/external/external_test_gpu_crash.py -v
"""
import unittest, re, importlib
from tinygrad.device import Device
from tinygrad.renderer.amd.dsl import s, v, Inst, NULL
RDNA3_CDNA3_MAP = {"v_mov_b32_e32": "v_mov_b32_e32", "s_mov_b32": "s_mov_b32", "s_waitcnt": "s_waitcnt", "s_endpgm": "s_endpgm",
"global_load_b32": "global_load_dword", "global_store_b32": "global_store_dword",
"global_atomic_add_u32": "global_atomic_add", "flat_load_b32": "flat_load_dword",
"flat_store_b32": "flat_store_dword", "flat_atomic_add_u32": "flat_atomic_add", "s_load_b32": "s_load_dword"}
def assemble(code:str, name:str="test", is_cdna:bool=False) -> str:
kd = {"next_free_vgpr": 8, "next_free_sgpr": 8, "user_sgpr_kernarg_segment_ptr": 1, "kernarg_size": 8}
if is_cdna: kd["accum_offset"] = 8
else: kd["wavefront_size32"] = 1
return f".text\n.globl {name}\n.p2align 8\n.type {name},@function\n{name}:\n{code}\n.rodata\n.p2align 6\n.amdhsa_kernel {name}\n" + \
"\n".join(f".amdhsa_{k} {v}" for k,v in kd.items()) + "\n.end_amdhsa_kernel"
@unittest.skipIf(Device.DEFAULT != "AMD", "AMD required")
class TestGPUCrash(unittest.TestCase):
@classmethod
def setUpClass(cls):
from tinygrad.runtime.support.compiler_amd import HIPCompiler
cls.dev = Device["AMD"]
cls.compiler = HIPCompiler(cls.dev.arch)
cls.is_cdna = cls.dev.target[0] < 10
ins = importlib.import_module('tinygrad.runtime.autogen.amd.' + ('cdna' if cls.is_cdna else 'rdna3') + '.ins')
for rdna3_name, cdna3_name in RDNA3_CDNA3_MAP.items():
setattr(cls, rdna3_name, getattr(ins, cdna3_name if cls.is_cdna else rdna3_name))
def setUp(self):
# Verify device works before each test
from tinygrad import Tensor
try:
t = Tensor([1.0, 2.0], device="AMD").realize()
assert (t + 1).numpy().tolist() == [2.0, 3.0]
except Exception:
self.fail("Device not working before test")
def _run(self, code: str):
from tinygrad.runtime.ops_amd import AMDProgram
prg = AMDProgram(self.dev, "test", self.compiler.compile(assemble(code, is_cdna=self.is_cdna)))
prg(self.dev.allocator.alloc(64), global_size=(1,1,1), local_size=(1,1,1), wait=True)
def _run_insts(self, insts: list[Inst]):
from test.amd.disasm import disasm
self._run("\n".join(disasm(i) for i in insts))
def _assert_gpu_fault(self, func):
"""Assert that func raises a RuntimeError indicating a GPU fault (not a setup error)."""
with self.assertRaises(RuntimeError) as cm:
func()
err_msg = str(cm.exception).lower()
# Verify it's a GPU fault, not a setup/device initialization error
self.assertTrue(
re.search(r'fault|hang|timeout|illegal|memviol', err_msg),
f"Expected GPU fault error, got: {cm.exception}"
)
class TestOutOfBoundsMemoryAccess(TestGPUCrash):
"""Tests for out-of-bounds memory accesses."""
def test_global_load_null_ptr(self):
"""Global load from NULL pointer."""
insts = [self.v_mov_b32_e32(v[0], 0), self.v_mov_b32_e32(v[1], 0),
self.global_load_b32(v[2], addr=v[0:1], saddr=NULL, offset=0), self.s_waitcnt(0), self.s_endpgm()]
self._assert_gpu_fault(lambda: self._run_insts(insts))
def test_global_store_null_ptr(self):
"""Global store to NULL pointer."""
insts = [self.v_mov_b32_e32(v[0], 0), self.v_mov_b32_e32(v[1], 0), self.v_mov_b32_e32(v[2], 0xDEADBEEF),
self.global_store_b32(addr=v[0:1], data=v[2], saddr=NULL, offset=0), self.s_waitcnt(0), self.s_endpgm()]
self._assert_gpu_fault(lambda: self._run_insts(insts))
def test_global_load_unmapped_high_address(self):
"""Global load from high unmapped address (0xDEAD00000000)."""
insts = [self.v_mov_b32_e32(v[0], 0x00000000), self.v_mov_b32_e32(v[1], 0xDEAD),
self.global_load_b32(v[2], addr=v[0:1], saddr=NULL, offset=0), self.s_waitcnt(0), self.s_endpgm()]
self._assert_gpu_fault(lambda: self._run_insts(insts))
def test_global_store_unmapped_high_address(self):
"""Global store to high unmapped address."""
insts = [self.v_mov_b32_e32(v[0], 0x00000000), self.v_mov_b32_e32(v[1], 0xDEAD), self.v_mov_b32_e32(v[2], 0x12345678),
self.global_store_b32(addr=v[0:1], data=v[2], saddr=NULL, offset=0), self.s_waitcnt(0), self.s_endpgm()]
self._assert_gpu_fault(lambda: self._run_insts(insts))
def test_global_atomic_unmapped(self):
"""Atomic operation on unmapped memory."""
insts = [self.v_mov_b32_e32(v[0], 0xBEEF0000), self.v_mov_b32_e32(v[1], 0xDEAD), self.v_mov_b32_e32(v[2], 1),
self.global_atomic_add_u32(addr=v[0:1], data=v[2], saddr=NULL, offset=0), self.s_waitcnt(0), self.s_endpgm()]
self._assert_gpu_fault(lambda: self._run_insts(insts))
class TestSMEMFaults(TestGPUCrash):
"""Tests for scalar memory (SMEM) faults."""
def test_smem_load_null(self):
"""SMEM load from NULL base."""
insts = [self.s_mov_b32(s[2], 0), self.s_mov_b32(s[3], 0),
self.s_load_b32(s[4], s[2:3], 0, soffset=NULL), self.s_waitcnt(0), self.s_endpgm()]
self._assert_gpu_fault(lambda: self._run_insts(insts))
def test_smem_load_unmapped(self):
"""SMEM load from unmapped address."""
insts = [self.s_mov_b32(s[2], 0xBEEF0000), self.s_mov_b32(s[3], 0xDEAD),
self.s_load_b32(s[4], s[2:3], 0, soffset=NULL), self.s_waitcnt(0), self.s_endpgm()]
self._assert_gpu_fault(lambda: self._run_insts(insts))
class TestFlatMemoryFaults(TestGPUCrash):
"""Tests for FLAT memory instruction faults."""
def test_flat_load_null(self):
"""FLAT load from NULL address."""
insts = [self.v_mov_b32_e32(v[0], 0), self.v_mov_b32_e32(v[1], 0),
self.flat_load_b32(v[2], addr=v[0:1], saddr=NULL, offset=0), self.s_waitcnt(0), self.s_endpgm()]
self._assert_gpu_fault(lambda: self._run_insts(insts))
def test_flat_store_null(self):
"""FLAT store to NULL address."""
insts = [self.v_mov_b32_e32(v[0], 0), self.v_mov_b32_e32(v[1], 0), self.v_mov_b32_e32(v[2], 0xDEADBEEF),
self.flat_store_b32(addr=v[0:1], data=v[2], saddr=NULL, offset=0), self.s_waitcnt(0), self.s_endpgm()]
self._assert_gpu_fault(lambda: self._run_insts(insts))
def test_flat_atomic_null(self):
"""FLAT atomic on NULL address."""
insts = [self.v_mov_b32_e32(v[0], 0), self.v_mov_b32_e32(v[1], 0), self.v_mov_b32_e32(v[2], 1),
self.flat_atomic_add_u32(addr=v[0:1], data=v[2], saddr=NULL, offset=0), self.s_waitcnt(0), self.s_endpgm()]
self._assert_gpu_fault(lambda: self._run_insts(insts))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_gpu_crash.py",
"license": "MIT License",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/gemm/asm/cdna/gemm.py | import atexit, functools
from tinygrad import Tensor, Device, dtypes
from tinygrad.dtype import AddrSpace
from tinygrad.uop.ops import UOp, Ops, KernelInfo, AxisType
from tinygrad.renderer import Estimates
from tinygrad.helpers import getenv, all_same, DEBUG
from extra.gemm.asm.cdna.asm import build_kernel, TILE_M, TILE_N, TILE_K, NUM_WG
# ** CDNA4 assembly gemm
WORKGROUP_SIZE = 256
@functools.cache
def custom_asm_gemm(C:UOp, A:UOp, B:UOp, dname:str) -> UOp:
batch, M, K = A.shape
K2, N = B.shape[(1 if B.ndim == 3 else 0):]
assert K == K2
lidx = UOp.special(WORKGROUP_SIZE, "lidx0")
gidx = UOp.special(NUM_WG, "gidx0")
insts = build_kernel(batch, M, N, K, A.dtype.base)
lds = UOp(Ops.DEFINE_LOCAL, dtypes.uint8.ptr(size=133_120, addrspace=AddrSpace.LOCAL), (), 'lds')
sink = UOp.sink(C.base, A.base, B.base, lds, lidx, gidx,
arg=KernelInfo(name=f"gemm_{batch}_{M}_{N}_{K}", estimates=Estimates(ops=2*batch*M*N*K, mem=(batch*M*K + K*N + batch*M*N)*2)))
return UOp(Ops.PROGRAM, src=(sink, UOp(Ops.DEVICE, arg=dname),
UOp(Ops.LINEAR, src=tuple([UOp(Ops.INS, arg=x) for x in insts]))))
counters = {"used":0, "todos":[]}
def todo(msg:str) -> bool: counters["todos"].append(msg); return False
def _asm_gemm_report():
print(f'asm_gemm: {counters["used"]} used, {len(counters["todos"])} not used')
if DEBUG >= 2 and counters["todos"]:
from collections import Counter
for msg, cnt in Counter(counters["todos"]).most_common(): print(f' {cnt:3d}x {msg}')
atexit.register(_asm_gemm_report)
def can_use_asm_gemm(a:Tensor, b:Tensor) -> bool:
if a.dtype != b.dtype: return todo(f"dtypes must match {a.dtype} != {b.dtype}")
if a.dtype not in {dtypes.bfloat16, dtypes.float16}: return todo(f"only bfloat16/float16, got {a.dtype}")
batch, M, K = (1, *a.shape) if a.ndim == 2 else a.shape
N = b.shape[1]
if isinstance(a.device, tuple):
if a.ndim == 2 and a.uop.axis == 0 and b.uop.axis is None: M //= len(a.device)
elif a.ndim == 2 and a.uop.axis == 1 and b.uop.axis == 0: K //= len(a.device)
elif a.ndim == 2 and a.uop.axis is None and b.uop.axis == 1: N //= len(a.device)
elif a.ndim == 3 and a.uop.axis == 0 and b.uop.axis is None: batch //= len(a.device)
elif a.ndim == 3 and a.uop.axis is None and b.uop.axis == 1: N //= len(a.device)
elif a.ndim == 3 and a.uop.axis == 2 and b.uop.axis == 0: K //= len(a.device)
else: return todo(f"sharding mismatch a.ndim={a.ndim} a.uop.axis={a.uop.axis} b.uop.axis={b.uop.axis}")
dname = a.device[0]
else: dname = a.device
arch = getattr(Device[dname].renderer, "arch", "")
if batch not in {1, 2}: return todo(f"GEMM batch size {batch}")
if (M % TILE_M != 0 or N % TILE_N != 0 or K % TILE_K != 0) and arch == "gfx950":
return todo(f"GEMM shape ({M},{N},{K}) not a multiple of ({TILE_M},{TILE_N},{TILE_K})")
return True
# ** UOp gemm to test Tensor.custom_kernel multi and backward correctness on non cdna4
# note: this can be removed after we have GEMM on mixins
def custom_uop_gemm(C:UOp, A:UOp, B:UOp) -> UOp:
M, K = A.shape[0]*A.shape[1], A.shape[2]
K2, N = B.shape[(1 if B.ndim == 3 else 0):]
assert K == K2
m = UOp.range(M, 1, AxisType.LOOP)
n = UOp.range(N, 2, AxisType.LOOP)
k = UOp.range(K, 0, AxisType.REDUCE)
mul = (A.index((m*UOp.const(dtypes.index, K)+k))*B.index((k*UOp.const(dtypes.index, N)+n))).cast(dtypes.float32)
red = mul.reduce(k, arg=Ops.ADD, dtype=dtypes.float32).cast(C.dtype.base)
store = C.index((m*UOp.const(dtypes.index, N)+n), ptr=True).store(red).end(m, n)
return store.sink(arg=KernelInfo(name=f'uop_gemm_{M}_{N}_{K}'))
# ** backward gemm, might use the asm gemm
def custom_gemm_bw(gradient:UOp, kernel:UOp):
out, a, b = kernel.src[1:]
assert all_same([gradient.device, a.device, b.device, out.device])
a_t, b_t, g_t = Tensor(a, device=a.device), Tensor(b, device=a.device), Tensor(gradient, device=a.device)
# TODO: this needs to be cleaned up and done properly, the batch dim of grad and a multi need to align
g_t = g_t[:a.shape[0]]
grad_a = (g_t @ b_t.T).uop
grad_b = (a_t.permute(2, 0, 1).reshape(a_t.shape[2], -1) @ g_t.reshape(-1, g_t.shape[-1])).uop
return (None, grad_a, grad_b)
# ** main gemm function
def asm_gemm(a:Tensor, b:Tensor) -> Tensor:
assert can_use_asm_gemm(a, b), f"{counters['todos'][-1]}"
counters["used"] += 1
unfold_batch = a.ndim == 3 and isinstance(a.device, tuple) and a.uop.axis == 2 and b.uop.axis == 0
if unfold_batch:
orig_batch = a.shape[0]
a = a.reshape(a.shape[0]*a.shape[1], a.shape[2])
squeeze = a.ndim == 2
if squeeze: a = a.unsqueeze(0)
batch, M, K = a.shape
N = b.shape[1]
is_multi = isinstance(a.device, tuple)
if (k_sharded:=is_multi and a.uop.axis == 2): K //= len(a.device)
if (m_sharded:=is_multi and a.uop.axis == 1): M //= len(a.device)
n_sharded = is_multi and b.uop.axis == 1
if is_multi:
if n_sharded:
out = Tensor(Tensor.empty(batch, M, N//len(a.device), dtype=a.dtype, device=a.device).uop.multi(2), device=a.device)
elif m_sharded:
out = Tensor(Tensor.empty(batch, M, N, dtype=a.dtype, device=a.device).uop.multi(1), device=a.device)
else:
out = Tensor(Tensor.empty(batch//len(a.device) if a.uop.axis==0 else batch, M, N, dtype=a.dtype, device=a.device).uop.multi(0), device=a.device)
else:
out = Tensor.empty(batch, M, N, dtype=a.dtype, device=a.device)
renderer = Device[a.device[0] if is_multi else a.device].renderer
dname, arch = renderer.device, getattr(renderer, "arch", "")
if arch.startswith("gfx950") and getenv("USE_ASM", 1):
out = Tensor.custom_kernel(out, a, b, fxn=functools.partial(custom_asm_gemm, dname=dname), grad_fxn=custom_gemm_bw)[0]
else:
out = Tensor.custom_kernel(out, a, b, fxn=custom_uop_gemm, grad_fxn=custom_gemm_bw)[0]
if k_sharded: out = out.sum(0)
out = out.squeeze(0) if squeeze else out
if unfold_batch: out = out.reshape(orig_batch, -1, out.shape[-1])
return out
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/gemm/asm/cdna/gemm.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/unit/test_call.py | import unittest
import numpy as np
from tinygrad import Tensor, function
from tinygrad.dtype import dtypes
from tinygrad.uop.ops import UOp
class TestCall(unittest.TestCase):
def test_call_plus(self):
a = Tensor.randn(10, 10)
b = Tensor.randn(10, 10)
Tensor.realize(a,b)
# we define a plus function
plus_fxn = UOp.param(0, dtypes.float, (10,10)) + UOp.param(1, dtypes.float, (10,10))
c = Tensor.call(a, b, fxn=plus_fxn)
np.testing.assert_equal(c.numpy(), (a+b).numpy())
def test_call_plus_backward(self):
a = Tensor.ones(10, 10, requires_grad=True)
b = Tensor.ones(10, 10, requires_grad=True)
(a+b).mean().backward()
gt_a_grad = a.grad.numpy()
gt_b_grad = b.grad.numpy()
a.grad, b.grad = None, None
# this is the gradient for +
def grad_fxn(grad:UOp, call:UOp): return (grad, grad)
# we define a plus function
plus_fxn = UOp.param(0, dtypes.float, (10,10)) + UOp.param(1, dtypes.float, (10,10))
c = Tensor.call(a, b, fxn=plus_fxn, grad_fxn=grad_fxn)
c.mean().backward()
np.testing.assert_allclose(a.grad.numpy(), gt_a_grad, rtol=1e-5)
np.testing.assert_allclose(b.grad.numpy(), gt_b_grad, rtol=1e-5)
def test_call_plus_backward_auto(self):
a = Tensor.ones(10, 10, requires_grad=True)
b = Tensor.ones(10, 10, requires_grad=True)
(a+b).mean().backward()
gt_a_grad = a.grad.numpy()
gt_b_grad = b.grad.numpy()
a.grad, b.grad = None, None
plus_fxn = UOp.param(0, dtypes.float, (10,10)) + UOp.param(1, dtypes.float, (10,10))
c = Tensor.call(a, b, fxn=plus_fxn)
c.mean().backward()
np.testing.assert_allclose(a.grad.numpy(), gt_a_grad, rtol=1e-5)
np.testing.assert_allclose(b.grad.numpy(), gt_b_grad, rtol=1e-5)
def test_call_gemm(self):
M, K, N = 4, 8, 4
a = Tensor.randn(M, K)
b = Tensor.randn(K, N)
Tensor.realize(a, b)
c = Tensor.call(a, b, fxn=a.as_param(0) @ b.as_param(1))
np.testing.assert_allclose(c.numpy(), a.numpy() @ b.numpy(), rtol=1e-5, atol=1e-6)
@unittest.skip("needs GEMM on mixins")
def test_call_gemm_uop(self):
M, K, N = 4, 8, 4
a = Tensor.randn(M, K)
b = Tensor.randn(K, N)
Tensor.realize(a, b)
# we define a gemm function
x = UOp.param(0, dtypes.float, shape=(M, K))
y = UOp.param(1, dtypes.float, shape=(K, N))
c = Tensor.call(a, b, fxn=x@y)
np.testing.assert_allclose(c.numpy(), a.numpy() @ b.numpy(), rtol=1e-5, atol=1e-6)
def test_call_complex_backward_auto(self):
# complex chain: (a*b + a).exp2() * b.reciprocal() - tests mul, add, exp2, reciprocal, param reuse
a = Tensor.randn(10, 10, requires_grad=True)
b = Tensor.randn(10, 10, requires_grad=True) + 2 # avoid div by zero
Tensor.realize(a, b)
((a*b + a).exp2() * b.reciprocal()).mean().backward()
gt_a_grad, gt_b_grad = a.grad.numpy(), b.grad.numpy()
a.grad, b.grad = None, None
p0, p1 = UOp.param(0, dtypes.float, (10,10)), UOp.param(1, dtypes.float, (10,10))
complex_fxn = (p0*p1 + p0).exp2() * p1.reciprocal()
c = Tensor.call(a, b, fxn=complex_fxn)
c.mean().backward()
np.testing.assert_allclose(a.grad.numpy(), gt_a_grad, rtol=1e-5)
np.testing.assert_allclose(b.grad.numpy(), gt_b_grad, rtol=1e-5)
def test_call_plus_sharded(self):
devs = ("CPU:0", "CPU:1")
a = Tensor.ones(10, 10).shard(devs, axis=0)
b = Tensor.ones(10, 10).shard(devs, axis=0)
Tensor.realize(a, b)
c = Tensor.call(a, b, fxn=a.as_param(0) + b.as_param(1))
np.testing.assert_equal(c.numpy(), 2 * np.ones((10, 10)))
class TestCallSchedule(unittest.TestCase):
def test_reshape_precompile(self):
a = Tensor.empty(4, 8).realize()
a = a.reshape(4,4,2).assign(Tensor.empty(4,4,2)).reshape(8,4)
@function(precompile=True)
def s(x): return x.sum(axis=0)
(s(a)*3).realize()
def test_call_precompiled(self):
a = Tensor.empty(4, 8)
@function(precompile=True)
def s(x): return x*2
(s(a)*3).realize()
def test_double_call(self):
a = Tensor.empty(4, 8)
@function(precompile=True)
def s(x): return x*2
s(s(a)).realize()
def test_double_call_contiguous(self):
a = Tensor.empty(4, 8)
@function(precompile=True)
def s(x): return x*2
s(s(a).contiguous()).realize()
def test_call_double_gemm(self):
a = Tensor.randn(4, 8, requires_grad=True)
b = Tensor.randn(8, 12, requires_grad=True)
c = Tensor.randn(12, 16, requires_grad=True)
ref = Tensor.randn(4, 16)
Tensor.realize(a,b,c,ref)
@function(precompile=True)
def gemm(a:Tensor, b:Tensor, c:Tensor) -> Tensor: return (a@b)@c
out = gemm(a,b,c)
(out-ref).square().mean().backward()
out.realize(a.grad, b.grad, c.grad)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_call.py",
"license": "MIT License",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_test_am_fault_recovery.py | # ruff: noqa: F405
import unittest, subprocess, os
from tinygrad.runtime.autogen.amd.rdna3.ins import * # noqa: F403
from tinygrad.renderer.amd.dsl import s, v, Inst, NULL
def assemble_kernel(insts:list[Inst], name:str="test") -> str:
kd = {"next_free_vgpr": 8, "next_free_sgpr": 8, "wavefront_size32": 1, "user_sgpr_kernarg_segment_ptr": 1, "kernarg_size": 8}
from test.amd.disasm import disasm as _disasm
disasm = "\n".join(_disasm(inst) for inst in insts)
hsasrc = f".text\n.globl {name}\n.p2align 8\n.type {name},@function\n{name}:\n{disasm}\n"
return hsasrc + f".rodata\n.p2align 6\n.amdhsa_kernel {name}\n" + "\n".join(f".amdhsa_{k} {v}" for k, v in kd.items()) + "\n.end_amdhsa_kernel"
def _run(code:str, timeout:float=15.0) -> subprocess.CompletedProcess:
# TODO: AM_RESET is required for now, so subprocesses
return subprocess.run(["python", "-c", code], env={**os.environ, "AMD": "1"}, capture_output=True, text=True, timeout=timeout)
def _run_asm(asm_src:str) -> subprocess.CompletedProcess:
return _run('from tinygrad.device import Device; from tinygrad.runtime.ops_amd import AMDProgram; '
'from tinygrad.runtime.support.compiler_amd import HIPCompiler; dev = Device["AMD"]; '
f'AMDProgram(dev, "test", HIPCompiler(dev.arch).compile("""{asm_src}"""))('
'dev.allocator.alloc(64), global_size=(1,1,1), local_size=(1,1,1), wait=True)')
def _verify_recovery() -> subprocess.CompletedProcess:
return _run('from tinygrad import Tensor; t = Tensor([1.0, 2.0], device="AMD").realize(); assert (t + 1).numpy().tolist() == [2.0, 3.0]')
_ILLEGAL_INST_ASM = ".text\n.globl test\n.p2align 8\n.type test,@function\ntest:\n.byte 0xff,0xff,0xff,0xff\ns_endpgm\n" \
".rodata\n.p2align 6\n.amdhsa_kernel test\n.amdhsa_next_free_vgpr 8\n.amdhsa_next_free_sgpr 8\n" \
".amdhsa_wavefront_size32 1\n.amdhsa_user_sgpr_kernarg_segment_ptr 1\n.amdhsa_kernarg_size 8\n.end_amdhsa_kernel"
@unittest.skipIf(os.environ.get("AMD") != "1" or os.environ.get("MOCKGPU") == "1", "AMD with AM driver required")
class TestAMFaultRecovery(unittest.TestCase):
def _run_kernel(self, insts: list[Inst]) -> subprocess.CompletedProcess: return _run_asm(assemble_kernel(insts))
def _assert_fault_and_recovery(self, result:subprocess.CompletedProcess):
if result.stdout.strip(): print(f"\nstdout: {result.stdout.strip()}")
if result.stderr.strip(): print(f"\nstderr: {result.stderr.strip()}")
self.assertNotEqual(result.returncode, 0, f"Expected fault but succeeded: {result.stdout}")
self.assertEqual(_verify_recovery().returncode, 0)
class TestGlobalMemoryFaults(TestAMFaultRecovery):
def test_global_load_unmapped(self):
insts = [v_mov_b32_e32(v[0], 0xBEEF0000), v_mov_b32_e32(v[1], 0xDEAD),
global_load_b32(v[2], addr=v[0:1], saddr=NULL, offset=0), s_waitcnt(vmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
def test_global_store_unmapped(self):
insts = [v_mov_b32_e32(v[0], 0xBEEF0000), v_mov_b32_e32(v[1], 0xDEAD), v_mov_b32_e32(v[2], 0x12345678),
global_store_b32(addr=v[0:1], data=v[2], saddr=NULL, offset=0), s_waitcnt(vmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
def test_global_null_ptr(self):
insts = [v_mov_b32_e32(v[0], 0), v_mov_b32_e32(v[1], 0),
global_load_b32(v[2], addr=v[0:1], saddr=NULL, offset=0), s_waitcnt(vmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
def test_global_misaligned_b64(self):
insts = [v_mov_b32_e32(v[0], 0xBEEF0001), v_mov_b32_e32(v[1], 0xDEAD),
global_load_b64(v[2:3], addr=v[0:1], saddr=NULL, offset=0), s_waitcnt(vmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
def test_global_misaligned_b128(self):
insts = [v_mov_b32_e32(v[0], 0xBEEF0004), v_mov_b32_e32(v[1], 0xDEAD),
global_load_b128(v[2:5], addr=v[0:1], saddr=NULL, offset=0), s_waitcnt(vmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
class TestSMEMFaults(TestAMFaultRecovery):
def test_smem_null_base(self):
insts = [s_mov_b32(s[2], 0), s_mov_b32(s[3], 0),
s_load_b32(s[4], s[2:3], 0, soffset=NULL), s_waitcnt(lgkmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
def test_smem_unmapped_address(self):
insts = [s_mov_b32(s[2], 0xBEEF0000), s_mov_b32(s[3], 0xDEAD),
s_load_b32(s[4], s[2:3], 0, soffset=NULL), s_waitcnt(lgkmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
def test_smem_misaligned_b64(self):
insts = [s_mov_b32(s[2], 0xBEEF0004), s_mov_b32(s[3], 0xDEAD),
s_load_b64(s[4:5], s[2:3], 0, soffset=NULL), s_waitcnt(lgkmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
def test_smem_misaligned_b128(self):
insts = [s_mov_b32(s[2], 0xBEEF0004), s_mov_b32(s[3], 0xDEAD),
s_load_b128(s[4:7], s[2:3], 0, soffset=NULL), s_waitcnt(lgkmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
class TestIllegalInstruction(TestAMFaultRecovery):
def test_malformed_encoding(self):
self._assert_fault_and_recovery(_run_asm(_ILLEGAL_INST_ASM))
class TestFlatFaults(TestAMFaultRecovery):
def test_flat_load_unmapped(self):
insts = [v_mov_b32_e32(v[0], 0xBEEF0000), v_mov_b32_e32(v[1], 0xDEAD),
flat_load_b32(v[2], addr=v[0:1], saddr=NULL, offset=0), s_waitcnt(vmcnt=0, lgkmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
def test_flat_store_unmapped(self):
insts = [v_mov_b32_e32(v[0], 0xBEEF0000), v_mov_b32_e32(v[1], 0xDEAD), v_mov_b32_e32(v[2], 0x12345678),
flat_store_b32(addr=v[0:1], data=v[2], saddr=NULL, offset=0), s_waitcnt(vmcnt=0, lgkmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
class TestAtomicFaults(TestAMFaultRecovery):
def test_global_atomic_unmapped(self):
insts = [v_mov_b32_e32(v[0], 0xBEEF0000), v_mov_b32_e32(v[1], 0xDEAD), v_mov_b32_e32(v[2], 1),
global_atomic_add_u32(addr=v[0:1], data=v[2], saddr=NULL, offset=0), s_waitcnt(vmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
def test_flat_atomic_unmapped(self):
insts = [v_mov_b32_e32(v[0], 0xBEEF0000), v_mov_b32_e32(v[1], 0xDEAD), v_mov_b32_e32(v[2], 1),
flat_atomic_add_u32(addr=v[0:1], data=v[2], saddr=NULL, offset=0), s_waitcnt(vmcnt=0, lgkmcnt=0), s_endpgm()]
self._assert_fault_and_recovery(self._run_kernel(insts))
class TestRecovery(TestAMFaultRecovery):
def test_recovery_after_memviol(self):
insts = [v_mov_b32_e32(v[0], 0xBEEF0000), v_mov_b32_e32(v[1], 0xDEAD),
global_load_b32(v[2], addr=v[0:1], saddr=NULL, offset=0), s_waitcnt(vmcnt=0), s_endpgm()]
self.assertNotEqual(self._run_kernel(insts).returncode, 0)
self.assertEqual(_verify_recovery().returncode, 0)
def test_recovery_after_illegal_inst(self):
self.assertNotEqual(_run_asm(_ILLEGAL_INST_ASM).returncode, 0)
self.assertEqual(_verify_recovery().returncode, 0)
def test_multiple_faults_recovery(self):
insts = [v_mov_b32_e32(v[0], 0xBEEF0000), v_mov_b32_e32(v[1], 0xDEAD),
global_load_b32(v[2], addr=v[0:1], saddr=NULL, offset=0), s_waitcnt(vmcnt=0), s_endpgm()]
for _ in range(3):
self.assertNotEqual(self._run_kernel(insts).returncode, 0)
self.assertEqual(_verify_recovery().returncode, 0)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_am_fault_recovery.py",
"license": "MIT License",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/nv_pma/decode.py | #!/usr/bin/env python3
from __future__ import annotations
import enum, collections
from typing import Iterator
from tinygrad.helpers import colored
from tinygrad.renderer.amd.sqtt import PacketType, bits
# ═══════════════════════════════════════════════════════════════════════════════
# STALL REASONS
# ═══════════════════════════════════════════════════════════════════════════════
class StallReason(enum.IntEnum):
# Based on CUpti_ActivityPCSamplingStallReason
INVALID = 0
NONE = 1 # selected, selected_not_issued
INST_FETCH = 2 # branch_resolving, no_instructions
EXEC_DEPENDENCY = 3 # short_scoreboard, wait
MEMORY_DEPENDENCY = 4 # long_scoreboard
TEXTURE = 5 # tex_throttle
SYNC = 6 # barrier, membar
CONSTANT_MEMORY = 7 # imc_miss
PIPE_BUSY = 8 # mio_throttle, math_pipe_throttle
MEMORY_THROTTLE = 9 # drain, lg_throttle
NOT_SELECTED = 10 # not_selected
OTHER = 11 # misc, dispatch_stall
SLEEPING = 12 # sleeping
STALL_KEY_MAP_AMPERE: dict[int, StallReason] = {
1: StallReason.MEMORY_THROTTLE, 15: StallReason.MEMORY_THROTTLE,
2: StallReason.CONSTANT_MEMORY,
3: StallReason.SYNC,
6: StallReason.INST_FETCH, 11: StallReason.INST_FETCH,
7: StallReason.EXEC_DEPENDENCY, 10: StallReason.EXEC_DEPENDENCY,
9: StallReason.MEMORY_DEPENDENCY,
12: StallReason.PIPE_BUSY,
17: StallReason.OTHER, 20: StallReason.OTHER,
18: StallReason.NONE,
}
STALL_KEY_MAP_BLACKWELL: dict[int, StallReason] = {
0x01: StallReason.MEMORY_THROTTLE, 0x0e: StallReason.MEMORY_THROTTLE,
0x02: StallReason.SYNC,
0x05: StallReason.INST_FETCH, 0x0a: StallReason.INST_FETCH,
0x06: StallReason.EXEC_DEPENDENCY, 0x09: StallReason.EXEC_DEPENDENCY,
0x08: StallReason.MEMORY_DEPENDENCY,
0x0b: StallReason.PIPE_BUSY, 0x0f: StallReason.PIPE_BUSY,
0x10: StallReason.OTHER, 0x13: StallReason.OTHER,
0x11: StallReason.NONE,
}
# Lookup table for extracting sample bytes from 32-byte packet (bytes 0-3, 8-31, skipping header at 4-7)
LOOKUP_28B = [0, 1, 2, 3, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]
# ═══════════════════════════════════════════════════════════════════════════════
# PACKET HEADER
# ═══════════════════════════════════════════════════════════════════════════════
class PMAHeader(PacketType):
num_bytes = bits[4:0] # number of sample bytes in this packet
tpc_id_lo = bits[15:8] # TPC identifier low 8 bits
tpc_id_hi = bits[27:25] # TPC identifier high 3 bits
dropped = bits[28:28] # dropped flag (resets byte accumulator)
@property
def tpc_id(self) -> int: return self.tpc_id_lo | (self.tpc_id_hi << 8)
# ═══════════════════════════════════════════════════════════════════════════════
# 8-BYTE SAMPLE FORMAT (Ampere/Ada/Hopper)
# ═══════════════════════════════════════════════════════════════════════════════
class PMASampleAmpere8B(PacketType):
pc_raw = bits[44:0] # raw PC value (pc_offset = pc_raw << 4)
stall_key = bits[49:45] # stall reason key
wave_id = bits[55:50] # warp/wave identifier
active = bits[62:62] # 1 if warp was executing, 0 if scheduled but not issued
@property
def pc_offset(self) -> int: return self.pc_raw << 4
@property
def stall_reason(self) -> StallReason: return STALL_KEY_MAP_AMPERE.get(self.stall_key, StallReason.OTHER)
# ═══════════════════════════════════════════════════════════════════════════════
# 9-BYTE SAMPLE FORMAT (Blackwell+)
# ═══════════════════════════════════════════════════════════════════════════════
class PMASampleBlackwell9B(PacketType):
stall_key = bits[5:0] # stall reason key
pc_raw = bits[60:8] # raw PC value (pc_offset = pc_raw << 4)
wave_hi = bits[7:6] # wave_id high 2 bits
wave_lo = bits[71:68] # wave_id low 4 bits
active = bits[67:67] # 1 if warp was executing, 0 if scheduled but not issued
@property
def pc_offset(self) -> int: return self.pc_raw << 4
@property
def stall_reason(self) -> StallReason: return STALL_KEY_MAP_BLACKWELL.get(self.stall_key, StallReason.OTHER)
@property
def wave_id(self) -> int: return (self.wave_hi << 4) | self.wave_lo
PMASample = PMASampleAmpere8B|PMASampleBlackwell9B
def decode(data: bytes, sm_version: int = 0x800) -> Iterator[tuple[PMASample, int]]:
use_9byte = sm_version >= 0xa04
record_size = 9 if use_9byte else 8
sample_cls = PMASampleBlackwell9B if use_9byte else PMASampleAmpere8B
tpc_state: dict[int, list[int]] = collections.defaultdict(list)
for pkt_idx in range(len(data) // 32):
pkt = data[pkt_idx * 32:(pkt_idx + 1) * 32]
hdr = PMAHeader.from_raw(int.from_bytes(pkt[4:8], 'little'))
if hdr.dropped: tpc_state[hdr.tpc_id].clear()
for i in range(hdr.num_bytes):
tpc_state[hdr.tpc_id].append(pkt[LOOKUP_28B[i]])
while len(tpc_state[hdr.tpc_id]) >= record_size:
yield sample_cls.from_raw(int.from_bytes(bytes(tpc_state[hdr.tpc_id][:record_size]), 'little')), hdr.tpc_id
del tpc_state[hdr.tpc_id][:record_size]
# ═══════════════════════════════════════════════════════════════════════════════
# CLI
# ═══════════════════════════════════════════════════════════════════════════════
STALL_COLORS = {
StallReason.NONE: "green", StallReason.INST_FETCH: "yellow", StallReason.EXEC_DEPENDENCY: "cyan",
StallReason.MEMORY_DEPENDENCY: "red", StallReason.SYNC: "magenta", StallReason.CONSTANT_MEMORY: "blue",
StallReason.PIPE_BUSY: "yellow", StallReason.MEMORY_THROTTLE: "RED", StallReason.OTHER: "white",
}
def decode_tpc_id(tpc_id:int) -> tuple[int, int, int]:
# NOTE: valid only for ops_nv, cuda encoding is different
return (tpc_id >> 5, (tpc_id >> 1) & 0xf, tpc_id & 1)
def print_packets(data:bytes, sm_version:int=0x800) -> None:
record_size = 9 if sm_version >= 0x890 else 8
tpc_state: dict[int, list[int]] = collections.defaultdict(list)
for i in range(len(data) // 32):
pkt = data[i * 32:(i + 1) * 32]
hdr = PMAHeader.from_raw(int.from_bytes(pkt[4:8], 'little'))
if hdr.dropped: tpc_state[hdr.tpc_id].clear()
for j in range(hdr.num_bytes): tpc_state[hdr.tpc_id].append(pkt[LOOKUP_28B[j]])
# Show complete records extracted from this packet
records = []
while len(tpc_state[hdr.tpc_id]) >= record_size:
records.append(bytes(tpc_state[hdr.tpc_id][:record_size]).hex())
del tpc_state[hdr.tpc_id][:record_size]
leftover = len(tpc_state[hdr.tpc_id])
print(f"Pkt {i:3d}: tpc={hdr.tpc_id:4d} n={hdr.num_bytes:2d} drop={hdr.dropped} left={leftover} | {' '.join(records)}")
def print_aggregated(samples:list[tuple[PMASample, int]]) -> None:
if not samples: return
base_pc = min(s.pc_offset for s, _ in samples)
counter: collections.Counter[tuple[int, StallReason]] = collections.Counter((s.pc_offset, s.stall_reason) for s, _ in samples)
print(f"\nAggregated samples (base_pc=0x{base_pc:x}):")
for (pc, reason), cnt in sorted(counter.items()):
stall_str = colored(f"{reason.name:17}", STALL_COLORS.get(reason, "white"))
print(f" pc=0x{pc - base_pc:06x} {stall_str} samples={cnt:4d}")
if __name__ == "__main__":
import sys, pickle
if len(sys.argv) < 2:
print("Usage: python decode.py <pkl_file> [--raw] [--sm=0xNNN]")
sys.exit(1)
with open(sys.argv[1], "rb") as f:
data = pickle.load(f)
if isinstance(data, dict):
sm_version = 0x800 # default to Ampere
for arg in sys.argv:
if arg.startswith("--sm="): sm_version = int(arg[5:], 0)
dumps = [(i, x, sm_version) for i, x in enumerate(data["pma_raw_dumps"])]
else:
devs = {e.device: e for e in data if type(e).__name__ == "ProfileDeviceEvent"}
dumps = []
for i, e in enumerate(e for e in data if type(e).__name__ == "ProfilePMAEvent"):
dumps.append((i, e.blob, devs[e.device].props.get('sm_version', 0x800)))
for dump_idx, raw, sm_ver in dumps:
print(f"\n{'='*60}\nDump {dump_idx} ({len(raw)} bytes, {len(raw)//32} packets)\n{'='*60}")
if "--raw" in sys.argv: print_packets(raw, sm_ver)
else:
samples = []
for s, tpc_id in decode(raw, sm_ver):
gpc, tpc, sm = decode_tpc_id(tpc_id)
stall_str = colored(f"{s.stall_reason.name:17}", STALL_COLORS.get(s.stall_reason, "white"))
print(f"pc=0x{s.pc_offset:06x} {stall_str} ev={s.stall_key:2d} active={s.active} wave={s.wave_id:2d} gpc={gpc} tpc={tpc} sm={sm}")
samples.append((s, tpc_id))
print(f"\nDecoded {len(samples)} samples:")
print_aggregated(samples)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/nv_pma/decode.py",
"license": "MIT License",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/nv_pma/test/test_nvprof.py | import pickle, unittest
from collections import Counter
from pathlib import Path
from extra.nv_pma.decode import decode
from tinygrad.helpers import DEBUG
EXAMPLES_DIR = Path(__file__).parent.parent / "examples"
EXAMPLES_5090_DIR = Path(__file__).parent.parent / "examples_5090"
def decode_and_aggregate(raw_dumps: list[bytes], sm_version: int = 0x800) -> Counter[tuple[int, int]]:
"""Decode all PMA buffers and aggregate by (relative_pc, stall_reason). Each dump is normalized separately."""
result: Counter[tuple[int, int]] = Counter()
for raw in raw_dumps:
samples = [s for s, _ in decode(raw, sm_version)]
if not samples: continue
base_pc = min(s.pc_offset for s in samples)
result += Counter((s.pc_offset - base_pc, int(s.stall_reason)) for s in samples)
return result
def cupti_to_counter(cupti_records: list[dict]) -> Counter[tuple[int, int]]:
"""Convert CUPTI records to Counter[(pcOffset, stallReason)]."""
counter: Counter[tuple[int, int]] = Counter()
for r in cupti_records:
counter[(r['pcOffset'], r['stallReason'])] += r['samples']
return counter
class TestNVProf(unittest.TestCase):
def _test_example(self, name: str, sm_version: int = 0x800, examples_dir: Path = EXAMPLES_DIR):
pkl_file = examples_dir / f"{name}.pkl"
if not pkl_file.exists():
self.skipTest(f"Example data not found: {pkl_file}. Run collect.py first.")
with open(pkl_file, "rb") as f:
data = pickle.load(f)
self.assertEqual(data["test_name"], name)
pma_agg = decode_and_aggregate(data["pma_raw_dumps"], sm_version)
cupti_agg = cupti_to_counter(data["cupti_pc_samples"])
if DEBUG >= 2:
total = sum(cupti_agg.values())
mismatched = sum(abs(pma_agg.get(k, 0) - v) for k, v in cupti_agg.items())
mismatched += sum(v for k, v in pma_agg.items() if k not in cupti_agg)
mismatched //= 2
print(f"\n=== Test: {name} ===")
print(f"Total samples: {total}, Mismatched: {mismatched} ({mismatched/total*100 if total else 0:.1f}%)")
self.assertEqual(pma_agg, cupti_agg, f"PMA: {dict(pma_agg)}\nCUPTI: {dict(cupti_agg)}")
# Ampere tests (8-byte format)
def test_decode_test_plus(self): self._test_example("test_plus")
def test_decode_test_reduce_sum(self): self._test_example("test_reduce_sum")
def test_decode_test_broadcast(self): self._test_example("test_broadcast")
def test_decode_test_matmul(self): self._test_example("test_matmul")
def test_decode_test_plus_big(self): self._test_example("test_plus_big")
def test_decode_test_elementwise_chain(self): self._test_example("test_elementwise_chain")
def test_decode_test_conv2d(self): self._test_example("test_conv2d")
def test_decode_test_large_matmul(self): self._test_example("test_large_matmul")
# Blackwell/5090 tests (9-byte format)
def test_5090_test_plus(self): self._test_example("test_plus", 0xa04, EXAMPLES_5090_DIR)
def test_5090_test_plus_big(self): self._test_example("test_plus_big", 0xa04, EXAMPLES_5090_DIR)
def test_5090_test_broadcast(self): self._test_example("test_broadcast", 0xa04, EXAMPLES_5090_DIR)
def test_5090_test_matmul(self): self._test_example("test_matmul", 0xa04, EXAMPLES_5090_DIR)
def test_5090_test_large_matmul(self): self._test_example("test_large_matmul", 0xa04, EXAMPLES_5090_DIR)
def test_5090_test_reduce_sum(self): self._test_example("test_reduce_sum", 0xa04, EXAMPLES_5090_DIR)
def test_5090_test_reduce_max(self): self._test_example("test_reduce_max", 0xa04, EXAMPLES_5090_DIR)
def test_5090_test_elementwise_chain(self): self._test_example("test_elementwise_chain", 0xa04, EXAMPLES_5090_DIR)
def test_5090_test_conv2d(self): self._test_example("test_conv2d", 0xa04, EXAMPLES_5090_DIR)
def test_5090_test_exp(self): self._test_example("test_exp", 0xa04, EXAMPLES_5090_DIR)
def test_5090_test_softmax(self): self._test_example("test_softmax", 0xa04, EXAMPLES_5090_DIR)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/nv_pma/test/test_nvprof.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/nv_pma/collect.py | import pickle, os, sys, functools, numpy as np
from pathlib import Path
os.environ["DEV"] = "CUDA"
os.environ["PROFILE"] = os.environ.get("PROFILE", "2")
from extra.nv_pma.cupti import cu_prof_ext
cu_prof_ext.enable_auto()
from tinygrad import Tensor, Device
if not os.environ.get("IOCTL") or not os.environ.get("GRAB_PMA"):
print("Usage: GRAB_PMA=1 IOCTL=1 IOCTL_PRINT=0 python3 extra/nv_pma/collect.py")
sys.exit(1)
assert Device.DEFAULT == "CUDA", "only works with CUDA"
EXAMPLES_DIR = Path(__file__).parent / "examples"
_collectors: list[tuple[str, callable]] = []
def pcsampling_test(name: str):
def decorator(fn):
@functools.wraps(fn)
def wrapper():
cu_prof_ext.clear_pma_raw_dumps()
cu_prof_ext.clear_cupti_pc_samples()
fn()
Device["CUDA"].synchronize()
dumps = cu_prof_ext.get_pma_raw_dumps()
# from hexdump import hexdump
# hexdump(dumps[0][:0x40])
return {"test_name": name, "pma_raw_dumps": list(cu_prof_ext.get_pma_raw_dumps()), "cupti_pc_samples": list(cu_prof_ext.get_cupti_pc_samples())}
_collectors.append((name, wrapper))
return wrapper
return decorator
# Refs
@pcsampling_test("test_plus")
def test_plus():
a = Tensor([1, 2, 3, 4])
b = Tensor([5, 6, 7, 8])
(a + b).realize()
@pcsampling_test("test_matmul")
def test_matmul():
a = Tensor(np.random.rand(12, 12).astype(np.float32))
b = Tensor(np.random.rand(12, 12).astype(np.float32))
(a @ b).realize()
@pcsampling_test("test_reduce_sum")
def test_reduce_sum():
a = Tensor(np.random.rand(1024).astype(np.float32))
a.sum().realize()
@pcsampling_test("test_reduce_max")
def test_reduce_max():
a = Tensor(np.random.rand(1024).astype(np.float32))
a.max().realize()
@pcsampling_test("test_exp")
def test_exp():
a = Tensor(np.random.rand(256).astype(np.float32))
a.exp().realize()
@pcsampling_test("test_softmax")
def test_softmax():
a = Tensor(np.random.rand(64, 64).astype(np.float32))
a.softmax().realize()
@pcsampling_test("test_conv2d")
def test_conv2d():
x = Tensor(np.random.rand(1, 3, 32, 32).astype(np.float32))
w = Tensor(np.random.rand(8, 3, 3, 3).astype(np.float32))
x.conv2d(w).realize()
@pcsampling_test("test_large_matmul")
def test_large_matmul():
a = Tensor(np.random.rand(128, 128).astype(np.float32))
b = Tensor(np.random.rand(128, 128).astype(np.float32))
(a @ b).realize()
@pcsampling_test("test_elementwise_chain")
def test_elementwise_chain():
a = Tensor(np.random.rand(512).astype(np.float32))
((a + 1) * 2 - 0.5).relu().realize()
@pcsampling_test("test_broadcast")
def test_broadcast():
a = Tensor(np.random.rand(64, 1).astype(np.float32))
b = Tensor(np.random.rand(1, 64).astype(np.float32))
(a + b).realize()
@pcsampling_test("test_plus_big")
def test_plus_big():
a = Tensor(np.random.rand(64, 32).astype(np.float32))
b = Tensor(np.random.rand(64, 32).astype(np.float32))
(a + b).realize()
def save_example(name: str, data: dict):
pma_bytes = sum(len(d) for d in data['pma_raw_dumps'])
cupti_samples = sum(r['samples'] for r in data['cupti_pc_samples'])
print(f" PMA: {len(data['pma_raw_dumps'])} buffers, {pma_bytes} bytes")
print(f" CUPTI: {len(data['cupti_pc_samples'])} records, {cupti_samples} samples")
outfile = EXAMPLES_DIR / f"{name}.pkl"
with open(outfile, "wb") as f:
pickle.dump(data, f)
print(f" Saved to {outfile}")
if __name__ == "__main__":
EXAMPLES_DIR.mkdir(exist_ok=True)
# Run specific tests if provided as arguments, otherwise run all
if len(sys.argv) > 1:
test_names = sys.argv[1:]
collectors = [(name, fn) for name, fn in _collectors if name in test_names]
if not collectors:
print(f"Unknown tests: {test_names}")
print(f"Available: {[name for name, _ in _collectors]}")
sys.exit(1)
else:
collectors = _collectors
for name, collect_fn in collectors:
print(f"\nCollecting {name}...")
try:
data = collect_fn()
save_example(name, data)
except Exception as e:
print(f" ERROR: {e}")
import traceback
traceback.print_exc()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/nv_pma/collect.py",
"license": "MIT License",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/nv_pma/cupti/cu_prof_ext.py | from __future__ import annotations
import ctypes
from tinygrad.helpers import DEBUG, getenv
from extra.nv_pma.cupti import cupti
def stall_reason_name(reason: int) -> str:
name = cupti.CUpti_ActivityPCSamplingStallReason.get(reason)
return name.replace("CUPTI_ACTIVITY_PC_SAMPLING_STALL_", "").lower() if name else str(reason)
class CUPTIProfiler:
def __init__(self):
self.initialized = False
self.pc_sampling_enabled = False
self.buffers: list[ctypes.Array] = []
self.kernel_stalls: dict[int, dict[int, int]] = {}
self.raw_buffers: list[bytes] = []
self.pc_samples: list[dict] = []
def _check_cupti(self, status, soft=False):
if status != cupti.CUPTI_SUCCESS:
if soft: return False
raise RuntimeError(f"CUPTI Error {status}")
return True
def init(self, ctx, device_id: int = 0, profile_level: int = 2):
if self.initialized: return
# Initialize profiler API
init_params = cupti.CUpti_Profiler_Initialize_Params()
init_params.structSize = 16
cupti.cuptiProfilerInitialize(ctypes.byref(init_params))
# Register buffer callbacks for Activity API
self._buf_req_cb = cupti.CUpti_BuffersCallbackRequestFunc(self._buffer_requested)
self._buf_comp_cb = cupti.CUpti_BuffersCallbackCompleteFunc(self._buffer_completed)
self._check_cupti(cupti.cuptiActivityRegisterCallbacks(self._buf_req_cb, self._buf_comp_cb))
# PROFILE=1: kernel timing, PROFILE=2: PC sampling with stall reasons
if profile_level >= 2:
# PC sampling for stall analysis (requires elevated privileges)
if DEBUG >= 1: print(" CUPTI: PC sampling mode (before)")
pc_status = cupti.cuptiActivityEnable(cupti.CUPTI_ACTIVITY_KIND_PC_SAMPLING)
if pc_status == cupti.CUPTI_SUCCESS:
config = cupti.CUpti_ActivityPCSamplingConfig()
config.size, config.samplingPeriod = 16, cupti.CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_MIN
cfg_status = cupti.dll.cuptiActivityConfigurePCSampling(ctx, ctypes.byref(config))
if cfg_status == cupti.CUPTI_SUCCESS:
if DEBUG >= 1: print(" CUPTI: PC sampling mode (before stall analysis)")
cupti.cuptiActivityEnable(cupti.CUPTI_ACTIVITY_KIND_PC_SAMPLING_RECORD_INFO)
self.pc_sampling_enabled = True
if DEBUG >= 1: print(" CUPTI: PC sampling mode (stall analysis)")
elif cfg_status == 35:
if DEBUG >= 1: print(" CUPTI: PC sampling needs: echo 'options nvidia NVreg_RestrictProfilingToAdminUsers=0'|sudo tee /etc/modprobe.d/nvidia.conf && sudo reboot")
# Fall back to kernel timing if PC sampling setup failed
if not self.pc_sampling_enabled:
self._check_cupti(cupti.cuptiActivityEnable(cupti.CUPTI_ACTIVITY_KIND_KERNEL))
else:
# Kernel activity tracing for timing
self._check_cupti(cupti.cuptiActivityEnable(cupti.CUPTI_ACTIVITY_KIND_KERNEL))
self.initialized = True
def _buffer_requested(self, buffer, size, max_num_records):
buf = (ctypes.c_uint8 * 1024 * 1024)() # 1MB buffer
self.buffers.append(buf)
buffer[0] = ctypes.cast(buf, ctypes.POINTER(ctypes.c_uint8))
size[0] = ctypes.sizeof(buf)
max_num_records[0] = 0
def _buffer_completed(self, ctx, stream_id, buffer, size, valid_size):
if valid_size > 0:
record = ctypes.POINTER(cupti.CUpti_Activity)()
while cupti.cuptiActivityGetNextRecord(buffer, valid_size, ctypes.byref(record)) == cupti.CUPTI_SUCCESS:
kind = record.contents.kind
if kind == cupti.CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL:
kernel = ctypes.cast(record, ctypes.POINTER(cupti.CUpti_ActivityKernel9)).contents
name = ctypes.string_at(kernel.name).decode() if kernel.name else "unknown"
duration_us = (kernel.end - kernel.start) / 1000.0
grid, block = (kernel.gridX, kernel.gridY, kernel.gridZ), (kernel.blockX, kernel.blockY, kernel.blockZ)
print(f" CUPTI: {name[:40]:40s} | {duration_us:10.2f} us | grid={grid} block={block} | regs={kernel.registersPerThread:3d} smem={kernel.staticSharedMemory + kernel.dynamicSharedMemory:6d}B")
elif kind == cupti.CUPTI_ACTIVITY_KIND_PC_SAMPLING:
pc = ctypes.cast(record, ctypes.POINTER(cupti.CUpti_ActivityPCSampling3)).contents
cid = pc.correlationId
if cid not in self.kernel_stalls: self.kernel_stalls[cid] = {}
self.kernel_stalls[cid][pc.stallReason] = self.kernel_stalls[cid].get(pc.stallReason, 0) + pc.samples
self.pc_samples.append({
'correlationId': pc.correlationId, 'pcOffset': pc.pcOffset, 'stallReason': pc.stallReason,
'samples': pc.samples, 'latencySamples': pc.latencySamples, 'functionId': pc.functionId, 'sourceLocatorId': pc.sourceLocatorId
})
if DEBUG >= 3:
print(f" PC {pc.pcOffset:#x} stall={stall_reason_name(pc.stallReason)} samples={pc.samples} latency={pc.latencySamples} func={pc.functionId} src={pc.sourceLocatorId}")
elif kind == cupti.CUPTI_ACTIVITY_KIND_PC_SAMPLING_RECORD_INFO:
info = ctypes.cast(record, ctypes.POINTER(cupti.CUpti_ActivityPCSamplingRecordInfo)).contents
cid = info.correlationId
if cid in self.kernel_stalls:
stalls = self.kernel_stalls[cid]
total = sum(stalls.values())
if total > 0:
top = sorted(stalls.items(), key=lambda x: -x[1])[:5]
stall_str = " ".join(f"{stall_reason_name(r)}:{100*c//total}%" for r,c in top if c > 0)
print(f" CUPTI stalls (corr={cid}): {total} samples | {stall_str}")
del self.kernel_stalls[cid]
else: print(f" CUPTI: Unhandled activity kind {kind}")
def flush(self):
if not self.initialized: return
self._check_cupti(cupti.cuptiActivityFlushAll(0))
# Module-level profiler instance
_profiler: CUPTIProfiler | None = None
def get_profiler() -> CUPTIProfiler | None:
return _profiler
def get_cupti_raw_buffers() -> list[bytes]:
return _profiler.raw_buffers if _profiler else []
def clear_cupti_raw_buffers():
if _profiler: _profiler.raw_buffers.clear()
def get_cupti_pc_samples() -> list[dict]:
return _profiler.pc_samples if _profiler else []
def clear_cupti_pc_samples():
if _profiler: _profiler.pc_samples.clear()
# Raw PMA buffer access (from ioctl interception)
def get_pma_raw_dumps() -> list[bytes]:
try:
from extra.nv_gpu_driver.nv_ioctl import get_pma_raw_dumps as _get
return _get()
except ImportError: return []
def clear_pma_raw_dumps():
try:
from extra.nv_gpu_driver.nv_ioctl import clear_pma_raw_dumps as _clear
_clear()
except ImportError: pass
def enable(profile_level:int=2):
global _profiler
if _profiler is not None: return
_profiler = CUPTIProfiler()
# Patch CUDADevice to initialize CUPTI profiler
from tinygrad.runtime.ops_cuda import CUDADevice
_orig_init = CUDADevice.__init__
_orig_sync = CUDADevice.synchronize
def _patched_init(self, device: str):
_orig_init(self, device)
device_id = int(device.split(":")[1]) if ":" in device else 0
_profiler.init(self.context, device_id, profile_level)
def _patched_sync(self):
_orig_sync(self)
if _profiler: _profiler.flush()
CUDADevice.__init__ = _patched_init
CUDADevice.synchronize = _patched_sync
def enable_auto():
if (profile_level:=getenv("PROFILE", 0)) > 0: enable(profile_level)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/nv_pma/cupti/cu_prof_ext.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/nv_pma/cupti/cupti.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
dll = c.DLL('cupti', '/usr/local/cuda/targets/x86_64-linux/lib/libcupti.so')
class CUptiResult(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_SUCCESS = CUptiResult.define('CUPTI_SUCCESS', 0)
CUPTI_ERROR_INVALID_PARAMETER = CUptiResult.define('CUPTI_ERROR_INVALID_PARAMETER', 1)
CUPTI_ERROR_INVALID_DEVICE = CUptiResult.define('CUPTI_ERROR_INVALID_DEVICE', 2)
CUPTI_ERROR_INVALID_CONTEXT = CUptiResult.define('CUPTI_ERROR_INVALID_CONTEXT', 3)
CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID = CUptiResult.define('CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID', 4)
CUPTI_ERROR_INVALID_EVENT_ID = CUptiResult.define('CUPTI_ERROR_INVALID_EVENT_ID', 5)
CUPTI_ERROR_INVALID_EVENT_NAME = CUptiResult.define('CUPTI_ERROR_INVALID_EVENT_NAME', 6)
CUPTI_ERROR_INVALID_OPERATION = CUptiResult.define('CUPTI_ERROR_INVALID_OPERATION', 7)
CUPTI_ERROR_OUT_OF_MEMORY = CUptiResult.define('CUPTI_ERROR_OUT_OF_MEMORY', 8)
CUPTI_ERROR_HARDWARE = CUptiResult.define('CUPTI_ERROR_HARDWARE', 9)
CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT = CUptiResult.define('CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT', 10)
CUPTI_ERROR_API_NOT_IMPLEMENTED = CUptiResult.define('CUPTI_ERROR_API_NOT_IMPLEMENTED', 11)
CUPTI_ERROR_MAX_LIMIT_REACHED = CUptiResult.define('CUPTI_ERROR_MAX_LIMIT_REACHED', 12)
CUPTI_ERROR_NOT_READY = CUptiResult.define('CUPTI_ERROR_NOT_READY', 13)
CUPTI_ERROR_NOT_COMPATIBLE = CUptiResult.define('CUPTI_ERROR_NOT_COMPATIBLE', 14)
CUPTI_ERROR_NOT_INITIALIZED = CUptiResult.define('CUPTI_ERROR_NOT_INITIALIZED', 15)
CUPTI_ERROR_INVALID_METRIC_ID = CUptiResult.define('CUPTI_ERROR_INVALID_METRIC_ID', 16)
CUPTI_ERROR_INVALID_METRIC_NAME = CUptiResult.define('CUPTI_ERROR_INVALID_METRIC_NAME', 17)
CUPTI_ERROR_QUEUE_EMPTY = CUptiResult.define('CUPTI_ERROR_QUEUE_EMPTY', 18)
CUPTI_ERROR_INVALID_HANDLE = CUptiResult.define('CUPTI_ERROR_INVALID_HANDLE', 19)
CUPTI_ERROR_INVALID_STREAM = CUptiResult.define('CUPTI_ERROR_INVALID_STREAM', 20)
CUPTI_ERROR_INVALID_KIND = CUptiResult.define('CUPTI_ERROR_INVALID_KIND', 21)
CUPTI_ERROR_INVALID_EVENT_VALUE = CUptiResult.define('CUPTI_ERROR_INVALID_EVENT_VALUE', 22)
CUPTI_ERROR_DISABLED = CUptiResult.define('CUPTI_ERROR_DISABLED', 23)
CUPTI_ERROR_INVALID_MODULE = CUptiResult.define('CUPTI_ERROR_INVALID_MODULE', 24)
CUPTI_ERROR_INVALID_METRIC_VALUE = CUptiResult.define('CUPTI_ERROR_INVALID_METRIC_VALUE', 25)
CUPTI_ERROR_HARDWARE_BUSY = CUptiResult.define('CUPTI_ERROR_HARDWARE_BUSY', 26)
CUPTI_ERROR_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_NOT_SUPPORTED', 27)
CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED', 28)
CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE = CUptiResult.define('CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE', 29)
CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES = CUptiResult.define('CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES', 30)
CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_WITH_MPS = CUptiResult.define('CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_WITH_MPS', 31)
CUPTI_ERROR_CDP_TRACING_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_CDP_TRACING_NOT_SUPPORTED', 32)
CUPTI_ERROR_VIRTUALIZED_DEVICE_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_VIRTUALIZED_DEVICE_NOT_SUPPORTED', 33)
CUPTI_ERROR_CUDA_COMPILER_NOT_COMPATIBLE = CUptiResult.define('CUPTI_ERROR_CUDA_COMPILER_NOT_COMPATIBLE', 34)
CUPTI_ERROR_INSUFFICIENT_PRIVILEGES = CUptiResult.define('CUPTI_ERROR_INSUFFICIENT_PRIVILEGES', 35)
CUPTI_ERROR_OLD_PROFILER_API_INITIALIZED = CUptiResult.define('CUPTI_ERROR_OLD_PROFILER_API_INITIALIZED', 36)
CUPTI_ERROR_OPENACC_UNDEFINED_ROUTINE = CUptiResult.define('CUPTI_ERROR_OPENACC_UNDEFINED_ROUTINE', 37)
CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED', 38)
CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED', 39)
CUPTI_ERROR_VIRTUALIZED_DEVICE_INSUFFICIENT_PRIVILEGES = CUptiResult.define('CUPTI_ERROR_VIRTUALIZED_DEVICE_INSUFFICIENT_PRIVILEGES', 40)
CUPTI_ERROR_CONFIDENTIAL_COMPUTING_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_CONFIDENTIAL_COMPUTING_NOT_SUPPORTED', 41)
CUPTI_ERROR_CMP_DEVICE_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_CMP_DEVICE_NOT_SUPPORTED', 42)
CUPTI_ERROR_MIG_DEVICE_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_MIG_DEVICE_NOT_SUPPORTED', 43)
CUPTI_ERROR_SLI_DEVICE_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_SLI_DEVICE_NOT_SUPPORTED', 44)
CUPTI_ERROR_WSL_DEVICE_NOT_SUPPORTED = CUptiResult.define('CUPTI_ERROR_WSL_DEVICE_NOT_SUPPORTED', 45)
CUPTI_ERROR_UNKNOWN = CUptiResult.define('CUPTI_ERROR_UNKNOWN', 999)
CUPTI_ERROR_FORCE_INT = CUptiResult.define('CUPTI_ERROR_FORCE_INT', 2147483647)
@dll.bind
def cuptiGetResultString(result:CUptiResult, str:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]]) -> CUptiResult: ...
@dll.bind
def cuptiGetErrorMessage(result:CUptiResult, str:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]]) -> CUptiResult: ...
class CUpti_ActivityKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_KIND_INVALID = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_INVALID', 0)
CUPTI_ACTIVITY_KIND_MEMCPY = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_MEMCPY', 1)
CUPTI_ACTIVITY_KIND_MEMSET = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_MEMSET', 2)
CUPTI_ACTIVITY_KIND_KERNEL = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_KERNEL', 3)
CUPTI_ACTIVITY_KIND_DRIVER = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_DRIVER', 4)
CUPTI_ACTIVITY_KIND_RUNTIME = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_RUNTIME', 5)
CUPTI_ACTIVITY_KIND_EVENT = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_EVENT', 6)
CUPTI_ACTIVITY_KIND_METRIC = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_METRIC', 7)
CUPTI_ACTIVITY_KIND_DEVICE = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_DEVICE', 8)
CUPTI_ACTIVITY_KIND_CONTEXT = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_CONTEXT', 9)
CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL', 10)
CUPTI_ACTIVITY_KIND_NAME = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_NAME', 11)
CUPTI_ACTIVITY_KIND_MARKER = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_MARKER', 12)
CUPTI_ACTIVITY_KIND_MARKER_DATA = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_MARKER_DATA', 13)
CUPTI_ACTIVITY_KIND_SOURCE_LOCATOR = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_SOURCE_LOCATOR', 14)
CUPTI_ACTIVITY_KIND_GLOBAL_ACCESS = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_GLOBAL_ACCESS', 15)
CUPTI_ACTIVITY_KIND_BRANCH = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_BRANCH', 16)
CUPTI_ACTIVITY_KIND_OVERHEAD = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_OVERHEAD', 17)
CUPTI_ACTIVITY_KIND_CDP_KERNEL = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_CDP_KERNEL', 18)
CUPTI_ACTIVITY_KIND_PREEMPTION = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_PREEMPTION', 19)
CUPTI_ACTIVITY_KIND_ENVIRONMENT = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_ENVIRONMENT', 20)
CUPTI_ACTIVITY_KIND_EVENT_INSTANCE = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_EVENT_INSTANCE', 21)
CUPTI_ACTIVITY_KIND_MEMCPY2 = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_MEMCPY2', 22)
CUPTI_ACTIVITY_KIND_METRIC_INSTANCE = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_METRIC_INSTANCE', 23)
CUPTI_ACTIVITY_KIND_INSTRUCTION_EXECUTION = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_INSTRUCTION_EXECUTION', 24)
CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER', 25)
CUPTI_ACTIVITY_KIND_FUNCTION = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_FUNCTION', 26)
CUPTI_ACTIVITY_KIND_MODULE = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_MODULE', 27)
CUPTI_ACTIVITY_KIND_DEVICE_ATTRIBUTE = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_DEVICE_ATTRIBUTE', 28)
CUPTI_ACTIVITY_KIND_SHARED_ACCESS = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_SHARED_ACCESS', 29)
CUPTI_ACTIVITY_KIND_PC_SAMPLING = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_PC_SAMPLING', 30)
CUPTI_ACTIVITY_KIND_PC_SAMPLING_RECORD_INFO = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_PC_SAMPLING_RECORD_INFO', 31)
CUPTI_ACTIVITY_KIND_INSTRUCTION_CORRELATION = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_INSTRUCTION_CORRELATION', 32)
CUPTI_ACTIVITY_KIND_OPENACC_DATA = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_OPENACC_DATA', 33)
CUPTI_ACTIVITY_KIND_OPENACC_LAUNCH = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_OPENACC_LAUNCH', 34)
CUPTI_ACTIVITY_KIND_OPENACC_OTHER = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_OPENACC_OTHER', 35)
CUPTI_ACTIVITY_KIND_CUDA_EVENT = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_CUDA_EVENT', 36)
CUPTI_ACTIVITY_KIND_STREAM = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_STREAM', 37)
CUPTI_ACTIVITY_KIND_SYNCHRONIZATION = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_SYNCHRONIZATION', 38)
CUPTI_ACTIVITY_KIND_EXTERNAL_CORRELATION = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_EXTERNAL_CORRELATION', 39)
CUPTI_ACTIVITY_KIND_NVLINK = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_NVLINK', 40)
CUPTI_ACTIVITY_KIND_INSTANTANEOUS_EVENT = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_INSTANTANEOUS_EVENT', 41)
CUPTI_ACTIVITY_KIND_INSTANTANEOUS_EVENT_INSTANCE = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_INSTANTANEOUS_EVENT_INSTANCE', 42)
CUPTI_ACTIVITY_KIND_INSTANTANEOUS_METRIC = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_INSTANTANEOUS_METRIC', 43)
CUPTI_ACTIVITY_KIND_INSTANTANEOUS_METRIC_INSTANCE = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_INSTANTANEOUS_METRIC_INSTANCE', 44)
CUPTI_ACTIVITY_KIND_MEMORY = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_MEMORY', 45)
CUPTI_ACTIVITY_KIND_PCIE = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_PCIE', 46)
CUPTI_ACTIVITY_KIND_OPENMP = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_OPENMP', 47)
CUPTI_ACTIVITY_KIND_INTERNAL_LAUNCH_API = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_INTERNAL_LAUNCH_API', 48)
CUPTI_ACTIVITY_KIND_MEMORY2 = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_MEMORY2', 49)
CUPTI_ACTIVITY_KIND_MEMORY_POOL = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_MEMORY_POOL', 50)
CUPTI_ACTIVITY_KIND_GRAPH_TRACE = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_GRAPH_TRACE', 51)
CUPTI_ACTIVITY_KIND_JIT = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_JIT', 52)
CUPTI_ACTIVITY_KIND_DEVICE_GRAPH_TRACE = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_DEVICE_GRAPH_TRACE', 53)
CUPTI_ACTIVITY_KIND_MEM_DECOMPRESS = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_MEM_DECOMPRESS', 54)
CUPTI_ACTIVITY_KIND_COUNT = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_COUNT', 55)
CUPTI_ACTIVITY_KIND_FORCE_INT = CUpti_ActivityKind.define('CUPTI_ACTIVITY_KIND_FORCE_INT', 2147483647)
class CUpti_ActivityObjectKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_OBJECT_UNKNOWN = CUpti_ActivityObjectKind.define('CUPTI_ACTIVITY_OBJECT_UNKNOWN', 0)
CUPTI_ACTIVITY_OBJECT_PROCESS = CUpti_ActivityObjectKind.define('CUPTI_ACTIVITY_OBJECT_PROCESS', 1)
CUPTI_ACTIVITY_OBJECT_THREAD = CUpti_ActivityObjectKind.define('CUPTI_ACTIVITY_OBJECT_THREAD', 2)
CUPTI_ACTIVITY_OBJECT_DEVICE = CUpti_ActivityObjectKind.define('CUPTI_ACTIVITY_OBJECT_DEVICE', 3)
CUPTI_ACTIVITY_OBJECT_CONTEXT = CUpti_ActivityObjectKind.define('CUPTI_ACTIVITY_OBJECT_CONTEXT', 4)
CUPTI_ACTIVITY_OBJECT_STREAM = CUpti_ActivityObjectKind.define('CUPTI_ACTIVITY_OBJECT_STREAM', 5)
CUPTI_ACTIVITY_OBJECT_FORCE_INT = CUpti_ActivityObjectKind.define('CUPTI_ACTIVITY_OBJECT_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityObjectKindId(c.Struct):
SIZE = 12
pt: Annotated[CUpti_ActivityObjectKindId_pt, 0]
dcs: Annotated[CUpti_ActivityObjectKindId_dcs, 0]
@c.record
class CUpti_ActivityObjectKindId_pt(c.Struct):
SIZE = 8
processId: Annotated[uint32_t, 0]
threadId: Annotated[uint32_t, 4]
uint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@c.record
class CUpti_ActivityObjectKindId_dcs(c.Struct):
SIZE = 12
deviceId: Annotated[uint32_t, 0]
contextId: Annotated[uint32_t, 4]
streamId: Annotated[uint32_t, 8]
@c.record
class CUpti_ActivityOverheadCommandBufferFullData(c.Struct):
SIZE = 12
commandBufferLength: Annotated[uint32_t, 0]
channelID: Annotated[uint32_t, 4]
channelType: Annotated[uint32_t, 8]
class CUpti_ActivityOverheadKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_OVERHEAD_UNKNOWN = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_UNKNOWN', 0)
CUPTI_ACTIVITY_OVERHEAD_DRIVER_COMPILER = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_DRIVER_COMPILER', 1)
CUPTI_ACTIVITY_OVERHEAD_CUPTI_BUFFER_FLUSH = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_CUPTI_BUFFER_FLUSH', 65536)
CUPTI_ACTIVITY_OVERHEAD_CUPTI_INSTRUMENTATION = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_CUPTI_INSTRUMENTATION', 131072)
CUPTI_ACTIVITY_OVERHEAD_CUPTI_RESOURCE = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_CUPTI_RESOURCE', 196608)
CUPTI_ACTIVITY_OVERHEAD_RUNTIME_TRIGGERED_MODULE_LOADING = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_RUNTIME_TRIGGERED_MODULE_LOADING', 262144)
CUPTI_ACTIVITY_OVERHEAD_LAZY_FUNCTION_LOADING = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_LAZY_FUNCTION_LOADING', 327680)
CUPTI_ACTIVITY_OVERHEAD_COMMAND_BUFFER_FULL = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_COMMAND_BUFFER_FULL', 393216)
CUPTI_ACTIVITY_OVERHEAD_ACTIVITY_BUFFER_REQUEST = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_ACTIVITY_BUFFER_REQUEST', 458752)
CUPTI_ACTIVITY_OVERHEAD_UVM_ACTIVITY_INIT = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_UVM_ACTIVITY_INIT', 524288)
CUPTI_ACTIVITY_OVERHEAD_FORCE_INT = CUpti_ActivityOverheadKind.define('CUPTI_ACTIVITY_OVERHEAD_FORCE_INT', 2147483647)
class CUpti_ActivityComputeApiKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_COMPUTE_API_UNKNOWN = CUpti_ActivityComputeApiKind.define('CUPTI_ACTIVITY_COMPUTE_API_UNKNOWN', 0)
CUPTI_ACTIVITY_COMPUTE_API_CUDA = CUpti_ActivityComputeApiKind.define('CUPTI_ACTIVITY_COMPUTE_API_CUDA', 1)
CUPTI_ACTIVITY_COMPUTE_API_CUDA_MPS = CUpti_ActivityComputeApiKind.define('CUPTI_ACTIVITY_COMPUTE_API_CUDA_MPS', 2)
CUPTI_ACTIVITY_COMPUTE_API_FORCE_INT = CUpti_ActivityComputeApiKind.define('CUPTI_ACTIVITY_COMPUTE_API_FORCE_INT', 2147483647)
class CUpti_ActivityFlag(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_FLAG_NONE = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_NONE', 0)
CUPTI_ACTIVITY_FLAG_DEVICE_CONCURRENT_KERNELS = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_DEVICE_CONCURRENT_KERNELS', 1)
CUPTI_ACTIVITY_FLAG_DEVICE_ATTRIBUTE_CUDEVICE = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_DEVICE_ATTRIBUTE_CUDEVICE', 1)
CUPTI_ACTIVITY_FLAG_MEMCPY_ASYNC = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MEMCPY_ASYNC', 1)
CUPTI_ACTIVITY_FLAG_MARKER_INSTANTANEOUS = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MARKER_INSTANTANEOUS', 1)
CUPTI_ACTIVITY_FLAG_MARKER_START = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MARKER_START', 2)
CUPTI_ACTIVITY_FLAG_MARKER_END = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MARKER_END', 4)
CUPTI_ACTIVITY_FLAG_MARKER_SYNC_ACQUIRE = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MARKER_SYNC_ACQUIRE', 8)
CUPTI_ACTIVITY_FLAG_MARKER_SYNC_ACQUIRE_SUCCESS = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MARKER_SYNC_ACQUIRE_SUCCESS', 16)
CUPTI_ACTIVITY_FLAG_MARKER_SYNC_ACQUIRE_FAILED = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MARKER_SYNC_ACQUIRE_FAILED', 32)
CUPTI_ACTIVITY_FLAG_MARKER_SYNC_RELEASE = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MARKER_SYNC_RELEASE', 64)
CUPTI_ACTIVITY_FLAG_MARKER_COLOR_NONE = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MARKER_COLOR_NONE', 1)
CUPTI_ACTIVITY_FLAG_MARKER_COLOR_ARGB = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MARKER_COLOR_ARGB', 2)
CUPTI_ACTIVITY_FLAG_GLOBAL_ACCESS_KIND_SIZE_MASK = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_GLOBAL_ACCESS_KIND_SIZE_MASK', 255)
CUPTI_ACTIVITY_FLAG_GLOBAL_ACCESS_KIND_LOAD = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_GLOBAL_ACCESS_KIND_LOAD', 256)
CUPTI_ACTIVITY_FLAG_GLOBAL_ACCESS_KIND_CACHED = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_GLOBAL_ACCESS_KIND_CACHED', 512)
CUPTI_ACTIVITY_FLAG_METRIC_OVERFLOWED = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_METRIC_OVERFLOWED', 1)
CUPTI_ACTIVITY_FLAG_METRIC_VALUE_INVALID = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_METRIC_VALUE_INVALID', 2)
CUPTI_ACTIVITY_FLAG_INSTRUCTION_VALUE_INVALID = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_INSTRUCTION_VALUE_INVALID', 1)
CUPTI_ACTIVITY_FLAG_INSTRUCTION_CLASS_MASK = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_INSTRUCTION_CLASS_MASK', 510)
CUPTI_ACTIVITY_FLAG_FLUSH_FORCED = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_FLUSH_FORCED', 1)
CUPTI_ACTIVITY_FLAG_SHARED_ACCESS_KIND_SIZE_MASK = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_SHARED_ACCESS_KIND_SIZE_MASK', 255)
CUPTI_ACTIVITY_FLAG_SHARED_ACCESS_KIND_LOAD = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_SHARED_ACCESS_KIND_LOAD', 256)
CUPTI_ACTIVITY_FLAG_MEMSET_ASYNC = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_MEMSET_ASYNC', 1)
CUPTI_ACTIVITY_FLAG_THRASHING_IN_CPU = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_THRASHING_IN_CPU', 1)
CUPTI_ACTIVITY_FLAG_THROTTLING_IN_CPU = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_THROTTLING_IN_CPU', 1)
CUPTI_ACTIVITY_FLAG_FORCE_INT = CUpti_ActivityFlag.define('CUPTI_ACTIVITY_FLAG_FORCE_INT', 2147483647)
class CUpti_ActivityPCSamplingStallReason(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_PC_SAMPLING_STALL_INVALID = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_INVALID', 0)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_NONE = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_NONE', 1)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_INST_FETCH = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_INST_FETCH', 2)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_EXEC_DEPENDENCY = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_EXEC_DEPENDENCY', 3)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_MEMORY_DEPENDENCY = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_MEMORY_DEPENDENCY', 4)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_TEXTURE = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_TEXTURE', 5)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_SYNC = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_SYNC', 6)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_CONSTANT_MEMORY_DEPENDENCY = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_CONSTANT_MEMORY_DEPENDENCY', 7)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_PIPE_BUSY = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_PIPE_BUSY', 8)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_MEMORY_THROTTLE = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_MEMORY_THROTTLE', 9)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_NOT_SELECTED = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_NOT_SELECTED', 10)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_OTHER = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_OTHER', 11)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_SLEEPING = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_SLEEPING', 12)
CUPTI_ACTIVITY_PC_SAMPLING_STALL_FORCE_INT = CUpti_ActivityPCSamplingStallReason.define('CUPTI_ACTIVITY_PC_SAMPLING_STALL_FORCE_INT', 2147483647)
class CUpti_ActivityPCSamplingPeriod(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_INVALID = CUpti_ActivityPCSamplingPeriod.define('CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_INVALID', 0)
CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_MIN = CUpti_ActivityPCSamplingPeriod.define('CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_MIN', 1)
CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_LOW = CUpti_ActivityPCSamplingPeriod.define('CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_LOW', 2)
CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_MID = CUpti_ActivityPCSamplingPeriod.define('CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_MID', 3)
CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_HIGH = CUpti_ActivityPCSamplingPeriod.define('CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_HIGH', 4)
CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_MAX = CUpti_ActivityPCSamplingPeriod.define('CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_MAX', 5)
CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_FORCE_INT = CUpti_ActivityPCSamplingPeriod.define('CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_FORCE_INT', 2147483647)
class CUpti_ActivityMemcpyKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_MEMCPY_KIND_UNKNOWN = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_UNKNOWN', 0)
CUPTI_ACTIVITY_MEMCPY_KIND_HTOD = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_HTOD', 1)
CUPTI_ACTIVITY_MEMCPY_KIND_DTOH = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_DTOH', 2)
CUPTI_ACTIVITY_MEMCPY_KIND_HTOA = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_HTOA', 3)
CUPTI_ACTIVITY_MEMCPY_KIND_ATOH = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_ATOH', 4)
CUPTI_ACTIVITY_MEMCPY_KIND_ATOA = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_ATOA', 5)
CUPTI_ACTIVITY_MEMCPY_KIND_ATOD = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_ATOD', 6)
CUPTI_ACTIVITY_MEMCPY_KIND_DTOA = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_DTOA', 7)
CUPTI_ACTIVITY_MEMCPY_KIND_DTOD = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_DTOD', 8)
CUPTI_ACTIVITY_MEMCPY_KIND_HTOH = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_HTOH', 9)
CUPTI_ACTIVITY_MEMCPY_KIND_PTOP = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_PTOP', 10)
CUPTI_ACTIVITY_MEMCPY_KIND_FORCE_INT = CUpti_ActivityMemcpyKind.define('CUPTI_ACTIVITY_MEMCPY_KIND_FORCE_INT', 2147483647)
class CUpti_ActivityMemoryKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_MEMORY_KIND_UNKNOWN = CUpti_ActivityMemoryKind.define('CUPTI_ACTIVITY_MEMORY_KIND_UNKNOWN', 0)
CUPTI_ACTIVITY_MEMORY_KIND_PAGEABLE = CUpti_ActivityMemoryKind.define('CUPTI_ACTIVITY_MEMORY_KIND_PAGEABLE', 1)
CUPTI_ACTIVITY_MEMORY_KIND_PINNED = CUpti_ActivityMemoryKind.define('CUPTI_ACTIVITY_MEMORY_KIND_PINNED', 2)
CUPTI_ACTIVITY_MEMORY_KIND_DEVICE = CUpti_ActivityMemoryKind.define('CUPTI_ACTIVITY_MEMORY_KIND_DEVICE', 3)
CUPTI_ACTIVITY_MEMORY_KIND_ARRAY = CUpti_ActivityMemoryKind.define('CUPTI_ACTIVITY_MEMORY_KIND_ARRAY', 4)
CUPTI_ACTIVITY_MEMORY_KIND_MANAGED = CUpti_ActivityMemoryKind.define('CUPTI_ACTIVITY_MEMORY_KIND_MANAGED', 5)
CUPTI_ACTIVITY_MEMORY_KIND_DEVICE_STATIC = CUpti_ActivityMemoryKind.define('CUPTI_ACTIVITY_MEMORY_KIND_DEVICE_STATIC', 6)
CUPTI_ACTIVITY_MEMORY_KIND_MANAGED_STATIC = CUpti_ActivityMemoryKind.define('CUPTI_ACTIVITY_MEMORY_KIND_MANAGED_STATIC', 7)
CUPTI_ACTIVITY_MEMORY_KIND_FORCE_INT = CUpti_ActivityMemoryKind.define('CUPTI_ACTIVITY_MEMORY_KIND_FORCE_INT', 2147483647)
class CUpti_ActivityPreemptionKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_PREEMPTION_KIND_UNKNOWN = CUpti_ActivityPreemptionKind.define('CUPTI_ACTIVITY_PREEMPTION_KIND_UNKNOWN', 0)
CUPTI_ACTIVITY_PREEMPTION_KIND_SAVE = CUpti_ActivityPreemptionKind.define('CUPTI_ACTIVITY_PREEMPTION_KIND_SAVE', 1)
CUPTI_ACTIVITY_PREEMPTION_KIND_RESTORE = CUpti_ActivityPreemptionKind.define('CUPTI_ACTIVITY_PREEMPTION_KIND_RESTORE', 2)
CUPTI_ACTIVITY_PREEMPTION_KIND_FORCE_INT = CUpti_ActivityPreemptionKind.define('CUPTI_ACTIVITY_PREEMPTION_KIND_FORCE_INT', 2147483647)
class CUpti_ActivityEnvironmentKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_ENVIRONMENT_UNKNOWN = CUpti_ActivityEnvironmentKind.define('CUPTI_ACTIVITY_ENVIRONMENT_UNKNOWN', 0)
CUPTI_ACTIVITY_ENVIRONMENT_SPEED = CUpti_ActivityEnvironmentKind.define('CUPTI_ACTIVITY_ENVIRONMENT_SPEED', 1)
CUPTI_ACTIVITY_ENVIRONMENT_TEMPERATURE = CUpti_ActivityEnvironmentKind.define('CUPTI_ACTIVITY_ENVIRONMENT_TEMPERATURE', 2)
CUPTI_ACTIVITY_ENVIRONMENT_POWER = CUpti_ActivityEnvironmentKind.define('CUPTI_ACTIVITY_ENVIRONMENT_POWER', 3)
CUPTI_ACTIVITY_ENVIRONMENT_COOLING = CUpti_ActivityEnvironmentKind.define('CUPTI_ACTIVITY_ENVIRONMENT_COOLING', 4)
CUPTI_ACTIVITY_ENVIRONMENT_COUNT = CUpti_ActivityEnvironmentKind.define('CUPTI_ACTIVITY_ENVIRONMENT_COUNT', 5)
CUPTI_ACTIVITY_ENVIRONMENT_KIND_FORCE_INT = CUpti_ActivityEnvironmentKind.define('CUPTI_ACTIVITY_ENVIRONMENT_KIND_FORCE_INT', 2147483647)
class CUpti_EnvironmentClocksThrottleReason(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_CLOCKS_THROTTLE_REASON_GPU_IDLE = CUpti_EnvironmentClocksThrottleReason.define('CUPTI_CLOCKS_THROTTLE_REASON_GPU_IDLE', 1)
CUPTI_CLOCKS_THROTTLE_REASON_USER_DEFINED_CLOCKS = CUpti_EnvironmentClocksThrottleReason.define('CUPTI_CLOCKS_THROTTLE_REASON_USER_DEFINED_CLOCKS', 2)
CUPTI_CLOCKS_THROTTLE_REASON_SW_POWER_CAP = CUpti_EnvironmentClocksThrottleReason.define('CUPTI_CLOCKS_THROTTLE_REASON_SW_POWER_CAP', 4)
CUPTI_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN = CUpti_EnvironmentClocksThrottleReason.define('CUPTI_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN', 8)
CUPTI_CLOCKS_THROTTLE_REASON_UNKNOWN = CUpti_EnvironmentClocksThrottleReason.define('CUPTI_CLOCKS_THROTTLE_REASON_UNKNOWN', 2147483648)
CUPTI_CLOCKS_THROTTLE_REASON_UNSUPPORTED = CUpti_EnvironmentClocksThrottleReason.define('CUPTI_CLOCKS_THROTTLE_REASON_UNSUPPORTED', 1073741824)
CUPTI_CLOCKS_THROTTLE_REASON_NONE = CUpti_EnvironmentClocksThrottleReason.define('CUPTI_CLOCKS_THROTTLE_REASON_NONE', 0)
CUPTI_CLOCKS_THROTTLE_REASON_FORCE_INT = CUpti_EnvironmentClocksThrottleReason.define('CUPTI_CLOCKS_THROTTLE_REASON_FORCE_INT', 2147483647)
class CUpti_ActivityUnifiedMemoryCounterScope(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_UNKNOWN = CUpti_ActivityUnifiedMemoryCounterScope.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_UNKNOWN', 0)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE = CUpti_ActivityUnifiedMemoryCounterScope.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE', 1)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_ALL_DEVICES = CUpti_ActivityUnifiedMemoryCounterScope.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_ALL_DEVICES', 2)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_COUNT = CUpti_ActivityUnifiedMemoryCounterScope.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_COUNT', 3)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_FORCE_INT = CUpti_ActivityUnifiedMemoryCounterScope.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_FORCE_INT', 2147483647)
class CUpti_ActivityUnifiedMemoryCounterKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_UNKNOWN = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_UNKNOWN', 0)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD', 1)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH', 2)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT', 3)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT', 4)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING', 5)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THROTTLING = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THROTTLING', 6)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP', 7)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOD = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOD', 8)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_COUNT = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_COUNT', 9)
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_FORCE_INT = CUpti_ActivityUnifiedMemoryCounterKind.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_FORCE_INT', 2147483647)
class CUpti_ActivityUnifiedMemoryAccessType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_UNKNOWN = CUpti_ActivityUnifiedMemoryAccessType.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_UNKNOWN', 0)
CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_READ = CUpti_ActivityUnifiedMemoryAccessType.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_READ', 1)
CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_WRITE = CUpti_ActivityUnifiedMemoryAccessType.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_WRITE', 2)
CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_ATOMIC = CUpti_ActivityUnifiedMemoryAccessType.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_ATOMIC', 3)
CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_PREFETCH = CUpti_ActivityUnifiedMemoryAccessType.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_PREFETCH', 4)
class CUpti_ActivityUnifiedMemoryMigrationCause(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_UNKNOWN = CUpti_ActivityUnifiedMemoryMigrationCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_UNKNOWN', 0)
CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_USER = CUpti_ActivityUnifiedMemoryMigrationCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_USER', 1)
CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_COHERENCE = CUpti_ActivityUnifiedMemoryMigrationCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_COHERENCE', 2)
CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_PREFETCH = CUpti_ActivityUnifiedMemoryMigrationCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_PREFETCH', 3)
CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_EVICTION = CUpti_ActivityUnifiedMemoryMigrationCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_EVICTION', 4)
CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_ACCESS_COUNTERS = CUpti_ActivityUnifiedMemoryMigrationCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_ACCESS_COUNTERS', 5)
class CUpti_ActivityUnifiedMemoryRemoteMapCause(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_UNKNOWN = CUpti_ActivityUnifiedMemoryRemoteMapCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_UNKNOWN', 0)
CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_COHERENCE = CUpti_ActivityUnifiedMemoryRemoteMapCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_COHERENCE', 1)
CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_THRASHING = CUpti_ActivityUnifiedMemoryRemoteMapCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_THRASHING', 2)
CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_POLICY = CUpti_ActivityUnifiedMemoryRemoteMapCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_POLICY', 3)
CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_OUT_OF_MEMORY = CUpti_ActivityUnifiedMemoryRemoteMapCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_OUT_OF_MEMORY', 4)
CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_EVICTION = CUpti_ActivityUnifiedMemoryRemoteMapCause.define('CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_EVICTION', 5)
class CUpti_ActivityInstructionClass(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_INSTRUCTION_CLASS_UNKNOWN = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_UNKNOWN', 0)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_FP_32 = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_FP_32', 1)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_FP_64 = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_FP_64', 2)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_INTEGER = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_INTEGER', 3)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_BIT_CONVERSION = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_BIT_CONVERSION', 4)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_CONTROL_FLOW = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_CONTROL_FLOW', 5)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_GLOBAL = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_GLOBAL', 6)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_SHARED = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_SHARED', 7)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_LOCAL = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_LOCAL', 8)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_GENERIC = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_GENERIC', 9)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_SURFACE = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_SURFACE', 10)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_CONSTANT = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_CONSTANT', 11)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_TEXTURE = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_TEXTURE', 12)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_GLOBAL_ATOMIC = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_GLOBAL_ATOMIC', 13)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_SHARED_ATOMIC = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_SHARED_ATOMIC', 14)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_SURFACE_ATOMIC = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_SURFACE_ATOMIC', 15)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_INTER_THREAD_COMMUNICATION = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_INTER_THREAD_COMMUNICATION', 16)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_BARRIER = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_BARRIER', 17)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_MISCELLANEOUS = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_MISCELLANEOUS', 18)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_FP_16 = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_FP_16', 19)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_UNIFORM = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_UNIFORM', 20)
CUPTI_ACTIVITY_INSTRUCTION_CLASS_KIND_FORCE_INT = CUpti_ActivityInstructionClass.define('CUPTI_ACTIVITY_INSTRUCTION_CLASS_KIND_FORCE_INT', 2147483647)
class CUpti_ActivityPartitionedGlobalCacheConfig(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_UNKNOWN = CUpti_ActivityPartitionedGlobalCacheConfig.define('CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_UNKNOWN', 0)
CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_NOT_SUPPORTED = CUpti_ActivityPartitionedGlobalCacheConfig.define('CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_NOT_SUPPORTED', 1)
CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_OFF = CUpti_ActivityPartitionedGlobalCacheConfig.define('CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_OFF', 2)
CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_ON = CUpti_ActivityPartitionedGlobalCacheConfig.define('CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_ON', 3)
CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_FORCE_INT = CUpti_ActivityPartitionedGlobalCacheConfig.define('CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_FORCE_INT', 2147483647)
class CUpti_ActivitySynchronizationType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_UNKNOWN = CUpti_ActivitySynchronizationType.define('CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_UNKNOWN', 0)
CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_EVENT_SYNCHRONIZE = CUpti_ActivitySynchronizationType.define('CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_EVENT_SYNCHRONIZE', 1)
CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_STREAM_WAIT_EVENT = CUpti_ActivitySynchronizationType.define('CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_STREAM_WAIT_EVENT', 2)
CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_STREAM_SYNCHRONIZE = CUpti_ActivitySynchronizationType.define('CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_STREAM_SYNCHRONIZE', 3)
CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_CONTEXT_SYNCHRONIZE = CUpti_ActivitySynchronizationType.define('CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_CONTEXT_SYNCHRONIZE', 4)
CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_FORCE_INT = CUpti_ActivitySynchronizationType.define('CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_FORCE_INT', 2147483647)
class CUpti_ActivityStreamFlag(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_STREAM_CREATE_FLAG_UNKNOWN = CUpti_ActivityStreamFlag.define('CUPTI_ACTIVITY_STREAM_CREATE_FLAG_UNKNOWN', 0)
CUPTI_ACTIVITY_STREAM_CREATE_FLAG_DEFAULT = CUpti_ActivityStreamFlag.define('CUPTI_ACTIVITY_STREAM_CREATE_FLAG_DEFAULT', 1)
CUPTI_ACTIVITY_STREAM_CREATE_FLAG_NON_BLOCKING = CUpti_ActivityStreamFlag.define('CUPTI_ACTIVITY_STREAM_CREATE_FLAG_NON_BLOCKING', 2)
CUPTI_ACTIVITY_STREAM_CREATE_FLAG_NULL = CUpti_ActivityStreamFlag.define('CUPTI_ACTIVITY_STREAM_CREATE_FLAG_NULL', 3)
CUPTI_ACTIVITY_STREAM_CREATE_MASK = CUpti_ActivityStreamFlag.define('CUPTI_ACTIVITY_STREAM_CREATE_MASK', 65535)
CUPTI_ACTIVITY_STREAM_CREATE_FLAG_FORCE_INT = CUpti_ActivityStreamFlag.define('CUPTI_ACTIVITY_STREAM_CREATE_FLAG_FORCE_INT', 2147483647)
class CUpti_LinkFlag(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_LINK_FLAG_INVALID = CUpti_LinkFlag.define('CUPTI_LINK_FLAG_INVALID', 0)
CUPTI_LINK_FLAG_PEER_ACCESS = CUpti_LinkFlag.define('CUPTI_LINK_FLAG_PEER_ACCESS', 2)
CUPTI_LINK_FLAG_SYSMEM_ACCESS = CUpti_LinkFlag.define('CUPTI_LINK_FLAG_SYSMEM_ACCESS', 4)
CUPTI_LINK_FLAG_PEER_ATOMICS = CUpti_LinkFlag.define('CUPTI_LINK_FLAG_PEER_ATOMICS', 8)
CUPTI_LINK_FLAG_SYSMEM_ATOMICS = CUpti_LinkFlag.define('CUPTI_LINK_FLAG_SYSMEM_ATOMICS', 16)
CUPTI_LINK_FLAG_FORCE_INT = CUpti_LinkFlag.define('CUPTI_LINK_FLAG_FORCE_INT', 2147483647)
class CUpti_ActivityMemoryOperationType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_INVALID = CUpti_ActivityMemoryOperationType.define('CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_INVALID', 0)
CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_ALLOCATION = CUpti_ActivityMemoryOperationType.define('CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_ALLOCATION', 1)
CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_RELEASE = CUpti_ActivityMemoryOperationType.define('CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_RELEASE', 2)
CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_FORCE_INT = CUpti_ActivityMemoryOperationType.define('CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_FORCE_INT', 2147483647)
class CUpti_ActivityMemoryPoolType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_MEMORY_POOL_TYPE_INVALID = CUpti_ActivityMemoryPoolType.define('CUPTI_ACTIVITY_MEMORY_POOL_TYPE_INVALID', 0)
CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL = CUpti_ActivityMemoryPoolType.define('CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL', 1)
CUPTI_ACTIVITY_MEMORY_POOL_TYPE_IMPORTED = CUpti_ActivityMemoryPoolType.define('CUPTI_ACTIVITY_MEMORY_POOL_TYPE_IMPORTED', 2)
CUPTI_ACTIVITY_MEMORY_POOL_TYPE_FORCE_INT = CUpti_ActivityMemoryPoolType.define('CUPTI_ACTIVITY_MEMORY_POOL_TYPE_FORCE_INT', 2147483647)
class CUpti_ActivityMemoryPoolOperationType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_INVALID = CUpti_ActivityMemoryPoolOperationType.define('CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_INVALID', 0)
CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_CREATED = CUpti_ActivityMemoryPoolOperationType.define('CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_CREATED', 1)
CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_DESTROYED = CUpti_ActivityMemoryPoolOperationType.define('CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_DESTROYED', 2)
CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_TRIMMED = CUpti_ActivityMemoryPoolOperationType.define('CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_TRIMMED', 3)
CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_FORCE_INT = CUpti_ActivityMemoryPoolOperationType.define('CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_FORCE_INT', 2147483647)
class CUpti_ChannelType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_CHANNEL_TYPE_INVALID = CUpti_ChannelType.define('CUPTI_CHANNEL_TYPE_INVALID', 0)
CUPTI_CHANNEL_TYPE_COMPUTE = CUpti_ChannelType.define('CUPTI_CHANNEL_TYPE_COMPUTE', 1)
CUPTI_CHANNEL_TYPE_ASYNC_MEMCPY = CUpti_ChannelType.define('CUPTI_CHANNEL_TYPE_ASYNC_MEMCPY', 2)
CUPTI_CHANNEL_TYPE_DECOMP = CUpti_ChannelType.define('CUPTI_CHANNEL_TYPE_DECOMP', 3)
CUPTI_CHANNEL_TYPE_FORCE_INT = CUpti_ChannelType.define('CUPTI_CHANNEL_TYPE_FORCE_INT', 2147483647)
class CUpti_ContextCigMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_CONTEXT_CIG_MODE_NONE = CUpti_ContextCigMode.define('CUPTI_CONTEXT_CIG_MODE_NONE', 0)
CUPTI_CONTEXT_CIG_MODE_CIG = CUpti_ContextCigMode.define('CUPTI_CONTEXT_CIG_MODE_CIG', 1)
CUPTI_CONTEXT_CIG_MODE_CIG_FALLBACK = CUpti_ContextCigMode.define('CUPTI_CONTEXT_CIG_MODE_CIG_FALLBACK', 2)
CUPTI_CONTEXT_CIG_MODE_FORCE_INT = CUpti_ContextCigMode.define('CUPTI_CONTEXT_CIG_MODE_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityUnifiedMemoryCounterConfig(c.Struct):
SIZE = 16
scope: Annotated[CUpti_ActivityUnifiedMemoryCounterScope, 0]
kind: Annotated[CUpti_ActivityUnifiedMemoryCounterKind, 4]
deviceId: Annotated[uint32_t, 8]
enable: Annotated[uint32_t, 12]
@c.record
class CUpti_ActivityAutoBoostState(c.Struct):
SIZE = 8
enabled: Annotated[uint32_t, 0]
pid: Annotated[uint32_t, 4]
@c.record
class CUpti_ActivityPCSamplingConfig(c.Struct):
SIZE = 16
size: Annotated[uint32_t, 0]
samplingPeriod: Annotated[CUpti_ActivityPCSamplingPeriod, 4]
samplingPeriod2: Annotated[uint32_t, 8]
@c.record
class CUpti_Activity(c.Struct):
SIZE = 8
kind: Annotated[CUpti_ActivityKind, 0]
@c.record
class CUpti_ActivityMemcpy6(c.Struct):
SIZE = 96
kind: Annotated[CUpti_ActivityKind, 0]
copyKind: Annotated[uint8_t, 4]
srcKind: Annotated[uint8_t, 5]
dstKind: Annotated[uint8_t, 6]
flags: Annotated[uint8_t, 7]
bytes: Annotated[uint64_t, 8]
start: Annotated[uint64_t, 16]
end: Annotated[uint64_t, 24]
deviceId: Annotated[uint32_t, 32]
contextId: Annotated[uint32_t, 36]
streamId: Annotated[uint32_t, 40]
correlationId: Annotated[uint32_t, 44]
runtimeCorrelationId: Annotated[uint32_t, 48]
pad: Annotated[uint32_t, 52]
reserved0: Annotated[ctypes.c_void_p, 56]
graphNodeId: Annotated[uint64_t, 64]
graphId: Annotated[uint32_t, 72]
channelID: Annotated[uint32_t, 76]
channelType: Annotated[CUpti_ChannelType, 80]
pad2: Annotated[uint32_t, 84]
copyCount: Annotated[uint64_t, 88]
uint8_t: TypeAlias = Annotated[int, ctypes.c_ubyte]
uint64_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@c.record
class CUpti_ActivityMemcpyPtoP4(c.Struct):
SIZE = 96
kind: Annotated[CUpti_ActivityKind, 0]
copyKind: Annotated[uint8_t, 4]
srcKind: Annotated[uint8_t, 5]
dstKind: Annotated[uint8_t, 6]
flags: Annotated[uint8_t, 7]
bytes: Annotated[uint64_t, 8]
start: Annotated[uint64_t, 16]
end: Annotated[uint64_t, 24]
deviceId: Annotated[uint32_t, 32]
contextId: Annotated[uint32_t, 36]
streamId: Annotated[uint32_t, 40]
srcDeviceId: Annotated[uint32_t, 44]
srcContextId: Annotated[uint32_t, 48]
dstDeviceId: Annotated[uint32_t, 52]
dstContextId: Annotated[uint32_t, 56]
correlationId: Annotated[uint32_t, 60]
reserved0: Annotated[ctypes.c_void_p, 64]
graphNodeId: Annotated[uint64_t, 72]
graphId: Annotated[uint32_t, 80]
channelID: Annotated[uint32_t, 84]
channelType: Annotated[CUpti_ChannelType, 88]
@c.record
class CUpti_ActivityMemset4(c.Struct):
SIZE = 88
kind: Annotated[CUpti_ActivityKind, 0]
value: Annotated[uint32_t, 4]
bytes: Annotated[uint64_t, 8]
start: Annotated[uint64_t, 16]
end: Annotated[uint64_t, 24]
deviceId: Annotated[uint32_t, 32]
contextId: Annotated[uint32_t, 36]
streamId: Annotated[uint32_t, 40]
correlationId: Annotated[uint32_t, 44]
flags: Annotated[uint16_t, 48]
memoryKind: Annotated[uint16_t, 50]
pad: Annotated[uint32_t, 52]
reserved0: Annotated[ctypes.c_void_p, 56]
graphNodeId: Annotated[uint64_t, 64]
graphId: Annotated[uint32_t, 72]
channelID: Annotated[uint32_t, 76]
channelType: Annotated[CUpti_ChannelType, 80]
pad2: Annotated[uint32_t, 84]
uint16_t: TypeAlias = Annotated[int, ctypes.c_uint16]
@c.record
class CUpti_ActivityMemory(c.Struct):
SIZE = 80
kind: Annotated[CUpti_ActivityKind, 0]
memoryKind: Annotated[CUpti_ActivityMemoryKind, 4]
address: Annotated[uint64_t, 8]
bytes: Annotated[uint64_t, 16]
start: Annotated[uint64_t, 24]
end: Annotated[uint64_t, 32]
allocPC: Annotated[uint64_t, 40]
freePC: Annotated[uint64_t, 48]
processId: Annotated[uint32_t, 56]
deviceId: Annotated[uint32_t, 60]
contextId: Annotated[uint32_t, 64]
pad: Annotated[uint32_t, 68]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 72]
@c.record
class CUpti_ActivityMemory4(c.Struct):
SIZE = 128
kind: Annotated[CUpti_ActivityKind, 0]
memoryOperationType: Annotated[CUpti_ActivityMemoryOperationType, 4]
memoryKind: Annotated[CUpti_ActivityMemoryKind, 8]
correlationId: Annotated[uint32_t, 12]
address: Annotated[uint64_t, 16]
bytes: Annotated[uint64_t, 24]
timestamp: Annotated[uint64_t, 32]
PC: Annotated[uint64_t, 40]
processId: Annotated[uint32_t, 48]
deviceId: Annotated[uint32_t, 52]
contextId: Annotated[uint32_t, 56]
streamId: Annotated[uint32_t, 60]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 64]
isAsync: Annotated[uint32_t, 72]
pad1: Annotated[uint32_t, 76]
memoryPoolConfig: Annotated[CUpti_ActivityMemory4_memoryPoolConfig, 80]
source: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 120]
@c.record
class CUpti_ActivityMemory4_memoryPoolConfig(c.Struct):
SIZE = 40
memoryPoolType: Annotated[CUpti_ActivityMemoryPoolType, 0]
pad2: Annotated[uint32_t, 4]
address: Annotated[uint64_t, 8]
releaseThreshold: Annotated[uint64_t, 16]
pool: Annotated[CUpti_ActivityMemory4_memoryPoolConfig_pool, 24]
utilizedSize: Annotated[uint64_t, 32]
@c.record
class CUpti_ActivityMemory4_memoryPoolConfig_pool(c.Struct):
SIZE = 8
size: Annotated[uint64_t, 0]
processId: Annotated[uint64_t, 0]
@c.record
class CUpti_ActivityMemoryPool2(c.Struct):
SIZE = 72
kind: Annotated[CUpti_ActivityKind, 0]
memoryPoolOperationType: Annotated[CUpti_ActivityMemoryPoolOperationType, 4]
memoryPoolType: Annotated[CUpti_ActivityMemoryPoolType, 8]
correlationId: Annotated[uint32_t, 12]
processId: Annotated[uint32_t, 16]
deviceId: Annotated[uint32_t, 20]
minBytesToKeep: Annotated[size_t, 24]
address: Annotated[uint64_t, 32]
size: Annotated[uint64_t, 40]
releaseThreshold: Annotated[uint64_t, 48]
timestamp: Annotated[uint64_t, 56]
utilizedSize: Annotated[uint64_t, 64]
size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
class CUpti_ActivityLaunchType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_LAUNCH_TYPE_REGULAR = CUpti_ActivityLaunchType.define('CUPTI_ACTIVITY_LAUNCH_TYPE_REGULAR', 0)
CUPTI_ACTIVITY_LAUNCH_TYPE_COOPERATIVE_SINGLE_DEVICE = CUpti_ActivityLaunchType.define('CUPTI_ACTIVITY_LAUNCH_TYPE_COOPERATIVE_SINGLE_DEVICE', 1)
CUPTI_ACTIVITY_LAUNCH_TYPE_COOPERATIVE_MULTI_DEVICE = CUpti_ActivityLaunchType.define('CUPTI_ACTIVITY_LAUNCH_TYPE_COOPERATIVE_MULTI_DEVICE', 2)
CUPTI_ACTIVITY_LAUNCH_TYPE_CBL_COMMANDLIST = CUpti_ActivityLaunchType.define('CUPTI_ACTIVITY_LAUNCH_TYPE_CBL_COMMANDLIST', 3)
class CUpti_FuncShmemLimitConfig(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_FUNC_SHMEM_LIMIT_DEFAULT = CUpti_FuncShmemLimitConfig.define('CUPTI_FUNC_SHMEM_LIMIT_DEFAULT', 0)
CUPTI_FUNC_SHMEM_LIMIT_OPTIN = CUpti_FuncShmemLimitConfig.define('CUPTI_FUNC_SHMEM_LIMIT_OPTIN', 1)
CUPTI_FUNC_SHMEM_LIMIT_FORCE_INT = CUpti_FuncShmemLimitConfig.define('CUPTI_FUNC_SHMEM_LIMIT_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityKernel9(c.Struct):
SIZE = 208
kind: Annotated[CUpti_ActivityKind, 0]
cacheConfig: Annotated[CUpti_ActivityKernel9_cacheConfig, 4]
sharedMemoryConfig: Annotated[uint8_t, 5]
registersPerThread: Annotated[uint16_t, 6]
partitionedGlobalCacheRequested: Annotated[CUpti_ActivityPartitionedGlobalCacheConfig, 8]
partitionedGlobalCacheExecuted: Annotated[CUpti_ActivityPartitionedGlobalCacheConfig, 12]
start: Annotated[uint64_t, 16]
end: Annotated[uint64_t, 24]
completed: Annotated[uint64_t, 32]
deviceId: Annotated[uint32_t, 40]
contextId: Annotated[uint32_t, 44]
streamId: Annotated[uint32_t, 48]
gridX: Annotated[int32_t, 52]
gridY: Annotated[int32_t, 56]
gridZ: Annotated[int32_t, 60]
blockX: Annotated[int32_t, 64]
blockY: Annotated[int32_t, 68]
blockZ: Annotated[int32_t, 72]
staticSharedMemory: Annotated[int32_t, 76]
dynamicSharedMemory: Annotated[int32_t, 80]
localMemoryPerThread: Annotated[uint32_t, 84]
localMemoryTotal: Annotated[uint32_t, 88]
correlationId: Annotated[uint32_t, 92]
gridId: Annotated[int64_t, 96]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 104]
reserved0: Annotated[ctypes.c_void_p, 112]
queued: Annotated[uint64_t, 120]
submitted: Annotated[uint64_t, 128]
launchType: Annotated[uint8_t, 136]
isSharedMemoryCarveoutRequested: Annotated[uint8_t, 137]
sharedMemoryCarveoutRequested: Annotated[uint8_t, 138]
padding: Annotated[uint8_t, 139]
sharedMemoryExecuted: Annotated[uint32_t, 140]
graphNodeId: Annotated[uint64_t, 144]
shmemLimitConfig: Annotated[CUpti_FuncShmemLimitConfig, 152]
graphId: Annotated[uint32_t, 156]
pAccessPolicyWindow: Annotated[c.POINTER[CUaccessPolicyWindow], 160]
channelID: Annotated[uint32_t, 168]
channelType: Annotated[CUpti_ChannelType, 172]
clusterX: Annotated[uint32_t, 176]
clusterY: Annotated[uint32_t, 180]
clusterZ: Annotated[uint32_t, 184]
clusterSchedulingPolicy: Annotated[uint32_t, 188]
localMemoryTotal_v2: Annotated[uint64_t, 192]
maxPotentialClusterSize: Annotated[uint32_t, 200]
maxActiveClusters: Annotated[uint32_t, 204]
@c.record
class CUpti_ActivityKernel9_cacheConfig(c.Struct):
SIZE = 1
both: Annotated[uint8_t, 0]
config: Annotated[CUpti_ActivityKernel9_cacheConfig_config, 0]
@c.record
class CUpti_ActivityKernel9_cacheConfig_config(c.Struct):
SIZE = 1
requested: Annotated[uint8_t, 0, 4, 0]
executed: Annotated[uint8_t, 0, 4, 4]
int32_t: TypeAlias = Annotated[int, ctypes.c_int32]
int64_t: TypeAlias = Annotated[int, ctypes.c_int64]
@c.record
class struct_CUaccessPolicyWindow_st(c.Struct):
SIZE = 32
base_ptr: Annotated[ctypes.c_void_p, 0]
num_bytes: Annotated[size_t, 8]
hitRatio: Annotated[Annotated[float, ctypes.c_float], 16]
hitProp: Annotated[CUaccessProperty, 20]
missProp: Annotated[CUaccessProperty, 24]
CUaccessPolicyWindow: TypeAlias = struct_CUaccessPolicyWindow_st
class enum_CUaccessProperty_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_ACCESS_PROPERTY_NORMAL = enum_CUaccessProperty_enum.define('CU_ACCESS_PROPERTY_NORMAL', 0)
CU_ACCESS_PROPERTY_STREAMING = enum_CUaccessProperty_enum.define('CU_ACCESS_PROPERTY_STREAMING', 1)
CU_ACCESS_PROPERTY_PERSISTING = enum_CUaccessProperty_enum.define('CU_ACCESS_PROPERTY_PERSISTING', 2)
CUaccessProperty: TypeAlias = enum_CUaccessProperty_enum
@c.record
class CUpti_ActivityCdpKernel(c.Struct):
SIZE = 144
kind: Annotated[CUpti_ActivityKind, 0]
cacheConfig: Annotated[CUpti_ActivityCdpKernel_cacheConfig, 4]
sharedMemoryConfig: Annotated[uint8_t, 5]
registersPerThread: Annotated[uint16_t, 6]
start: Annotated[uint64_t, 8]
end: Annotated[uint64_t, 16]
deviceId: Annotated[uint32_t, 24]
contextId: Annotated[uint32_t, 28]
streamId: Annotated[uint32_t, 32]
gridX: Annotated[int32_t, 36]
gridY: Annotated[int32_t, 40]
gridZ: Annotated[int32_t, 44]
blockX: Annotated[int32_t, 48]
blockY: Annotated[int32_t, 52]
blockZ: Annotated[int32_t, 56]
staticSharedMemory: Annotated[int32_t, 60]
dynamicSharedMemory: Annotated[int32_t, 64]
localMemoryPerThread: Annotated[uint32_t, 68]
localMemoryTotal: Annotated[uint32_t, 72]
correlationId: Annotated[uint32_t, 76]
gridId: Annotated[int64_t, 80]
parentGridId: Annotated[int64_t, 88]
queued: Annotated[uint64_t, 96]
submitted: Annotated[uint64_t, 104]
completed: Annotated[uint64_t, 112]
parentBlockX: Annotated[uint32_t, 120]
parentBlockY: Annotated[uint32_t, 124]
parentBlockZ: Annotated[uint32_t, 128]
pad: Annotated[uint32_t, 132]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 136]
@c.record
class CUpti_ActivityCdpKernel_cacheConfig(c.Struct):
SIZE = 1
both: Annotated[uint8_t, 0]
config: Annotated[CUpti_ActivityCdpKernel_cacheConfig_config, 0]
@c.record
class CUpti_ActivityCdpKernel_cacheConfig_config(c.Struct):
SIZE = 1
requested: Annotated[uint8_t, 0, 4, 0]
executed: Annotated[uint8_t, 0, 4, 4]
@c.record
class CUpti_ActivityPreemption(c.Struct):
SIZE = 40
kind: Annotated[CUpti_ActivityKind, 0]
preemptionKind: Annotated[CUpti_ActivityPreemptionKind, 4]
timestamp: Annotated[uint64_t, 8]
gridId: Annotated[int64_t, 16]
blockX: Annotated[uint32_t, 24]
blockY: Annotated[uint32_t, 28]
blockZ: Annotated[uint32_t, 32]
pad: Annotated[uint32_t, 36]
@c.record
class CUpti_ActivityAPI(c.Struct):
SIZE = 40
kind: Annotated[CUpti_ActivityKind, 0]
cbid: Annotated[CUpti_CallbackId, 4]
start: Annotated[uint64_t, 8]
end: Annotated[uint64_t, 16]
processId: Annotated[uint32_t, 24]
threadId: Annotated[uint32_t, 28]
correlationId: Annotated[uint32_t, 32]
returnValue: Annotated[uint32_t, 36]
CUpti_CallbackId: TypeAlias = Annotated[int, ctypes.c_uint32]
@c.record
class CUpti_ActivityEvent(c.Struct):
SIZE = 24
kind: Annotated[CUpti_ActivityKind, 0]
id: Annotated[CUpti_EventID, 4]
value: Annotated[uint64_t, 8]
domain: Annotated[CUpti_EventDomainID, 16]
correlationId: Annotated[uint32_t, 20]
CUpti_EventID: TypeAlias = Annotated[int, ctypes.c_uint32]
CUpti_EventDomainID: TypeAlias = Annotated[int, ctypes.c_uint32]
@c.record
class CUpti_ActivityEventInstance(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
id: Annotated[CUpti_EventID, 4]
domain: Annotated[CUpti_EventDomainID, 8]
instance: Annotated[uint32_t, 12]
value: Annotated[uint64_t, 16]
correlationId: Annotated[uint32_t, 24]
pad: Annotated[uint32_t, 28]
@c.record
class CUpti_ActivityMetric(c.Struct):
SIZE = 24
kind: Annotated[CUpti_ActivityKind, 0]
id: Annotated[CUpti_MetricID, 4]
value: Annotated[CUpti_MetricValue, 8]
correlationId: Annotated[uint32_t, 16]
flags: Annotated[uint8_t, 20]
pad: Annotated[c.Array[uint8_t, Literal[3]], 21]
CUpti_MetricID: TypeAlias = Annotated[int, ctypes.c_uint32]
@c.record
class CUpti_MetricValue(c.Struct):
SIZE = 8
metricValueDouble: Annotated[Annotated[float, ctypes.c_double], 0]
metricValueUint64: Annotated[uint64_t, 0]
metricValueInt64: Annotated[int64_t, 0]
metricValuePercent: Annotated[Annotated[float, ctypes.c_double], 0]
metricValueThroughput: Annotated[uint64_t, 0]
metricValueUtilizationLevel: Annotated[CUpti_MetricValueUtilizationLevel, 0]
class CUpti_MetricValueUtilizationLevel(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_METRIC_VALUE_UTILIZATION_IDLE = CUpti_MetricValueUtilizationLevel.define('CUPTI_METRIC_VALUE_UTILIZATION_IDLE', 0)
CUPTI_METRIC_VALUE_UTILIZATION_LOW = CUpti_MetricValueUtilizationLevel.define('CUPTI_METRIC_VALUE_UTILIZATION_LOW', 2)
CUPTI_METRIC_VALUE_UTILIZATION_MID = CUpti_MetricValueUtilizationLevel.define('CUPTI_METRIC_VALUE_UTILIZATION_MID', 5)
CUPTI_METRIC_VALUE_UTILIZATION_HIGH = CUpti_MetricValueUtilizationLevel.define('CUPTI_METRIC_VALUE_UTILIZATION_HIGH', 8)
CUPTI_METRIC_VALUE_UTILIZATION_MAX = CUpti_MetricValueUtilizationLevel.define('CUPTI_METRIC_VALUE_UTILIZATION_MAX', 10)
CUPTI_METRIC_VALUE_UTILIZATION_FORCE_INT = CUpti_MetricValueUtilizationLevel.define('CUPTI_METRIC_VALUE_UTILIZATION_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityMetricInstance(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
id: Annotated[CUpti_MetricID, 4]
value: Annotated[CUpti_MetricValue, 8]
instance: Annotated[uint32_t, 16]
correlationId: Annotated[uint32_t, 20]
flags: Annotated[uint8_t, 24]
pad: Annotated[c.Array[uint8_t, Literal[7]], 25]
@c.record
class CUpti_ActivitySourceLocator(c.Struct):
SIZE = 24
kind: Annotated[CUpti_ActivityKind, 0]
id: Annotated[uint32_t, 4]
lineNumber: Annotated[uint32_t, 8]
pad: Annotated[uint32_t, 12]
fileName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
@c.record
class CUpti_ActivityGlobalAccess3(c.Struct):
SIZE = 56
kind: Annotated[CUpti_ActivityKind, 0]
flags: Annotated[CUpti_ActivityFlag, 4]
sourceLocatorId: Annotated[uint32_t, 8]
correlationId: Annotated[uint32_t, 12]
functionId: Annotated[uint32_t, 16]
executed: Annotated[uint32_t, 20]
pcOffset: Annotated[uint64_t, 24]
threadsExecuted: Annotated[uint64_t, 32]
l2_transactions: Annotated[uint64_t, 40]
theoreticalL2Transactions: Annotated[uint64_t, 48]
@c.record
class CUpti_ActivityBranch2(c.Struct):
SIZE = 40
kind: Annotated[CUpti_ActivityKind, 0]
sourceLocatorId: Annotated[uint32_t, 4]
correlationId: Annotated[uint32_t, 8]
functionId: Annotated[uint32_t, 12]
pcOffset: Annotated[uint32_t, 16]
diverged: Annotated[uint32_t, 20]
threadsExecuted: Annotated[uint64_t, 24]
executed: Annotated[uint32_t, 32]
pad: Annotated[uint32_t, 36]
@c.record
class CUpti_ActivityDevice5(c.Struct):
SIZE = 184
kind: Annotated[CUpti_ActivityKind, 0]
flags: Annotated[CUpti_ActivityFlag, 4]
globalMemoryBandwidth: Annotated[uint64_t, 8]
globalMemorySize: Annotated[uint64_t, 16]
constantMemorySize: Annotated[uint32_t, 24]
l2CacheSize: Annotated[uint32_t, 28]
numThreadsPerWarp: Annotated[uint32_t, 32]
coreClockRate: Annotated[uint32_t, 36]
numMemcpyEngines: Annotated[uint32_t, 40]
numMultiprocessors: Annotated[uint32_t, 44]
maxIPC: Annotated[uint32_t, 48]
maxWarpsPerMultiprocessor: Annotated[uint32_t, 52]
maxBlocksPerMultiprocessor: Annotated[uint32_t, 56]
maxSharedMemoryPerMultiprocessor: Annotated[uint32_t, 60]
maxRegistersPerMultiprocessor: Annotated[uint32_t, 64]
maxRegistersPerBlock: Annotated[uint32_t, 68]
maxSharedMemoryPerBlock: Annotated[uint32_t, 72]
maxThreadsPerBlock: Annotated[uint32_t, 76]
maxBlockDimX: Annotated[uint32_t, 80]
maxBlockDimY: Annotated[uint32_t, 84]
maxBlockDimZ: Annotated[uint32_t, 88]
maxGridDimX: Annotated[uint32_t, 92]
maxGridDimY: Annotated[uint32_t, 96]
maxGridDimZ: Annotated[uint32_t, 100]
computeCapabilityMajor: Annotated[uint32_t, 104]
computeCapabilityMinor: Annotated[uint32_t, 108]
id: Annotated[uint32_t, 112]
eccEnabled: Annotated[uint32_t, 116]
uuid: Annotated[CUuuid, 120]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 136]
isCudaVisible: Annotated[uint8_t, 144]
isMigEnabled: Annotated[uint8_t, 145]
reserved: Annotated[c.Array[uint8_t, Literal[6]], 146]
gpuInstanceId: Annotated[uint32_t, 152]
computeInstanceId: Annotated[uint32_t, 156]
migUuid: Annotated[CUuuid, 160]
isNumaNode: Annotated[uint32_t, 176]
numaId: Annotated[uint32_t, 180]
@c.record
class struct_CUuuid_st(c.Struct):
SIZE = 16
bytes: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[16]], 0]
CUuuid: TypeAlias = struct_CUuuid_st
@c.record
class CUpti_ActivityDeviceAttribute(c.Struct):
SIZE = 24
kind: Annotated[CUpti_ActivityKind, 0]
flags: Annotated[CUpti_ActivityFlag, 4]
deviceId: Annotated[uint32_t, 8]
attribute: Annotated[CUpti_ActivityDeviceAttribute_attribute, 12]
value: Annotated[CUpti_ActivityDeviceAttribute_value, 16]
@c.record
class CUpti_ActivityDeviceAttribute_attribute(c.Struct):
SIZE = 4
cu: Annotated[CUdevice_attribute, 0]
cupti: Annotated[CUpti_DeviceAttribute, 0]
class enum_CUdevice_attribute_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK', 1)
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X', 2)
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y', 3)
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z', 4)
CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X', 5)
CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y', 6)
CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z', 7)
CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK', 8)
CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK', 8)
CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY', 9)
CU_DEVICE_ATTRIBUTE_WARP_SIZE = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_WARP_SIZE', 10)
CU_DEVICE_ATTRIBUTE_MAX_PITCH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_PITCH', 11)
CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK', 12)
CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK', 12)
CU_DEVICE_ATTRIBUTE_CLOCK_RATE = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CLOCK_RATE', 13)
CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT', 14)
CU_DEVICE_ATTRIBUTE_GPU_OVERLAP = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_GPU_OVERLAP', 15)
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT', 16)
CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT', 17)
CU_DEVICE_ATTRIBUTE_INTEGRATED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_INTEGRATED', 18)
CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY', 19)
CU_DEVICE_ATTRIBUTE_COMPUTE_MODE = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_COMPUTE_MODE', 20)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH', 21)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH', 22)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT', 23)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH', 24)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT', 25)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH', 26)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH', 27)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT', 28)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS', 29)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH', 27)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT', 28)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES', 29)
CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT', 30)
CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS', 31)
CU_DEVICE_ATTRIBUTE_ECC_ENABLED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_ECC_ENABLED', 32)
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_PCI_BUS_ID', 33)
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID', 34)
CU_DEVICE_ATTRIBUTE_TCC_DRIVER = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_TCC_DRIVER', 35)
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE', 36)
CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH', 37)
CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE', 38)
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR', 39)
CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT', 40)
CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING', 41)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH', 42)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS', 43)
CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER', 44)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH', 45)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT', 46)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE', 47)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE', 48)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE', 49)
CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID', 50)
CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT', 51)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH', 52)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH', 53)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS', 54)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH', 55)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH', 56)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT', 57)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH', 58)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT', 59)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH', 60)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH', 61)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS', 62)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH', 63)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT', 64)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS', 65)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH', 66)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH', 67)
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS', 68)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH', 69)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH', 70)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT', 71)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH', 72)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH', 73)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT', 74)
CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR', 75)
CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR', 76)
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH', 77)
CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED', 78)
CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED', 79)
CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED', 80)
CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR', 81)
CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR', 82)
CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY', 83)
CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD', 84)
CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID', 85)
CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED', 86)
CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO', 87)
CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS', 88)
CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS', 89)
CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED', 90)
CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM', 91)
CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1', 92)
CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1', 93)
CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1', 94)
CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH', 95)
CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH', 96)
CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN', 97)
CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES', 98)
CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED', 99)
CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES', 100)
CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST', 101)
CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED', 102)
CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED', 102)
CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED', 103)
CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED', 104)
CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED', 105)
CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR', 106)
CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED', 107)
CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE', 108)
CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE', 109)
CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED', 110)
CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK', 111)
CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED', 112)
CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED', 113)
CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED', 114)
CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED', 115)
CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED', 116)
CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS', 117)
CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING', 118)
CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES', 119)
CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH', 120)
CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED', 121)
CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS', 122)
CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR', 123)
CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED', 124)
CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED', 125)
CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT', 126)
CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED', 127)
CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED', 128)
CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS', 129)
CU_DEVICE_ATTRIBUTE_NUMA_CONFIG = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_NUMA_CONFIG', 130)
CU_DEVICE_ATTRIBUTE_NUMA_ID = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_NUMA_ID', 131)
CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED', 132)
CU_DEVICE_ATTRIBUTE_MPS_ENABLED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MPS_ENABLED', 133)
CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID', 134)
CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED', 135)
CU_DEVICE_ATTRIBUTE_MEM_DECOMPRESS_ALGORITHM_MASK = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MEM_DECOMPRESS_ALGORITHM_MASK', 136)
CU_DEVICE_ATTRIBUTE_MEM_DECOMPRESS_MAXIMUM_LENGTH = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MEM_DECOMPRESS_MAXIMUM_LENGTH', 137)
CU_DEVICE_ATTRIBUTE_GPU_PCI_DEVICE_ID = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_GPU_PCI_DEVICE_ID', 139)
CU_DEVICE_ATTRIBUTE_GPU_PCI_SUBSYSTEM_ID = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_GPU_PCI_SUBSYSTEM_ID', 140)
CU_DEVICE_ATTRIBUTE_HOST_NUMA_MULTINODE_IPC_SUPPORTED = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_HOST_NUMA_MULTINODE_IPC_SUPPORTED', 143)
CU_DEVICE_ATTRIBUTE_MAX = enum_CUdevice_attribute_enum.define('CU_DEVICE_ATTRIBUTE_MAX', 144)
CUdevice_attribute: TypeAlias = enum_CUdevice_attribute_enum
class CUpti_DeviceAttribute(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_DEVICE_ATTR_MAX_EVENT_ID = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_MAX_EVENT_ID', 1)
CUPTI_DEVICE_ATTR_MAX_EVENT_DOMAIN_ID = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_MAX_EVENT_DOMAIN_ID', 2)
CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH', 3)
CUPTI_DEVICE_ATTR_INSTRUCTION_PER_CYCLE = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_INSTRUCTION_PER_CYCLE', 4)
CUPTI_DEVICE_ATTR_INSTRUCTION_THROUGHPUT_SINGLE_PRECISION = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_INSTRUCTION_THROUGHPUT_SINGLE_PRECISION', 5)
CUPTI_DEVICE_ATTR_MAX_FRAME_BUFFERS = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_MAX_FRAME_BUFFERS', 6)
CUPTI_DEVICE_ATTR_PCIE_LINK_RATE = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_PCIE_LINK_RATE', 7)
CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH', 8)
CUPTI_DEVICE_ATTR_PCIE_GEN = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_PCIE_GEN', 9)
CUPTI_DEVICE_ATTR_DEVICE_CLASS = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_DEVICE_CLASS', 10)
CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE', 11)
CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE', 12)
CUPTI_DEVICE_ATTR_MAX_L2_UNITS = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_MAX_L2_UNITS', 13)
CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_SHARED = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_SHARED', 14)
CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_L1 = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_L1', 15)
CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_EQUAL = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_EQUAL', 16)
CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE', 17)
CUPTI_DEVICE_ATTR_NVLINK_PRESENT = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_NVLINK_PRESENT', 18)
CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW', 19)
CUPTI_DEVICE_ATTR_NVSWITCH_PRESENT = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_NVSWITCH_PRESENT', 20)
CUPTI_DEVICE_ATTR_FORCE_INT = CUpti_DeviceAttribute.define('CUPTI_DEVICE_ATTR_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityDeviceAttribute_value(c.Struct):
SIZE = 8
vDouble: Annotated[Annotated[float, ctypes.c_double], 0]
vUint32: Annotated[uint32_t, 0]
vUint64: Annotated[uint64_t, 0]
vInt32: Annotated[int32_t, 0]
vInt64: Annotated[int64_t, 0]
@c.record
class CUpti_ActivityContext3(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
contextId: Annotated[uint32_t, 4]
deviceId: Annotated[uint32_t, 8]
computeApiKind: Annotated[uint16_t, 12]
nullStreamId: Annotated[uint16_t, 14]
parentContextId: Annotated[uint32_t, 16]
isGreenContext: Annotated[uint8_t, 20]
padding: Annotated[uint8_t, 21]
numMultiprocessors: Annotated[uint16_t, 22]
cigMode: Annotated[CUpti_ContextCigMode, 24]
padding2: Annotated[uint32_t, 28]
@c.record
class CUpti_ActivityName(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
objectKind: Annotated[CUpti_ActivityObjectKind, 4]
objectId: Annotated[CUpti_ActivityObjectKindId, 8]
pad: Annotated[uint32_t, 20]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
@c.record
class CUpti_ActivityMarker2(c.Struct):
SIZE = 56
kind: Annotated[CUpti_ActivityKind, 0]
flags: Annotated[CUpti_ActivityFlag, 4]
timestamp: Annotated[uint64_t, 8]
id: Annotated[uint32_t, 16]
objectKind: Annotated[CUpti_ActivityObjectKind, 20]
objectId: Annotated[CUpti_ActivityObjectKindId, 24]
pad: Annotated[uint32_t, 36]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 40]
domain: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 48]
@c.record
class CUpti_ActivityMarkerData(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
flags: Annotated[CUpti_ActivityFlag, 4]
id: Annotated[uint32_t, 8]
payloadKind: Annotated[CUpti_MetricValueKind, 12]
payload: Annotated[CUpti_MetricValue, 16]
color: Annotated[uint32_t, 24]
category: Annotated[uint32_t, 28]
class CUpti_MetricValueKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_METRIC_VALUE_KIND_DOUBLE = CUpti_MetricValueKind.define('CUPTI_METRIC_VALUE_KIND_DOUBLE', 0)
CUPTI_METRIC_VALUE_KIND_UINT64 = CUpti_MetricValueKind.define('CUPTI_METRIC_VALUE_KIND_UINT64', 1)
CUPTI_METRIC_VALUE_KIND_PERCENT = CUpti_MetricValueKind.define('CUPTI_METRIC_VALUE_KIND_PERCENT', 2)
CUPTI_METRIC_VALUE_KIND_THROUGHPUT = CUpti_MetricValueKind.define('CUPTI_METRIC_VALUE_KIND_THROUGHPUT', 3)
CUPTI_METRIC_VALUE_KIND_INT64 = CUpti_MetricValueKind.define('CUPTI_METRIC_VALUE_KIND_INT64', 4)
CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL = CUpti_MetricValueKind.define('CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL', 5)
CUPTI_METRIC_VALUE_KIND_FORCE_INT = CUpti_MetricValueKind.define('CUPTI_METRIC_VALUE_KIND_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityOverhead3(c.Struct):
SIZE = 56
kind: Annotated[CUpti_ActivityKind, 0]
overheadKind: Annotated[CUpti_ActivityOverheadKind, 4]
objectKind: Annotated[CUpti_ActivityObjectKind, 8]
objectId: Annotated[CUpti_ActivityObjectKindId, 12]
start: Annotated[uint64_t, 24]
end: Annotated[uint64_t, 32]
correlationId: Annotated[uint32_t, 40]
reserved0: Annotated[uint32_t, 44]
overheadData: Annotated[ctypes.c_void_p, 48]
@c.record
class CUpti_ActivityEnvironment(c.Struct):
SIZE = 40
kind: Annotated[CUpti_ActivityKind, 0]
deviceId: Annotated[uint32_t, 4]
timestamp: Annotated[uint64_t, 8]
environmentKind: Annotated[CUpti_ActivityEnvironmentKind, 16]
data: Annotated[CUpti_ActivityEnvironment_data, 20]
@c.record
class CUpti_ActivityEnvironment_data(c.Struct):
SIZE = 20
speed: Annotated[CUpti_ActivityEnvironment_data_speed, 0]
temperature: Annotated[CUpti_ActivityEnvironment_data_temperature, 0]
power: Annotated[CUpti_ActivityEnvironment_data_power, 0]
cooling: Annotated[CUpti_ActivityEnvironment_data_cooling, 0]
@c.record
class CUpti_ActivityEnvironment_data_speed(c.Struct):
SIZE = 20
smClock: Annotated[uint32_t, 0]
memoryClock: Annotated[uint32_t, 4]
pcieLinkGen: Annotated[uint32_t, 8]
pcieLinkWidth: Annotated[uint32_t, 12]
clocksThrottleReasons: Annotated[CUpti_EnvironmentClocksThrottleReason, 16]
@c.record
class CUpti_ActivityEnvironment_data_temperature(c.Struct):
SIZE = 4
gpuTemperature: Annotated[uint32_t, 0]
@c.record
class CUpti_ActivityEnvironment_data_power(c.Struct):
SIZE = 8
power: Annotated[uint32_t, 0]
powerLimit: Annotated[uint32_t, 4]
@c.record
class CUpti_ActivityEnvironment_data_cooling(c.Struct):
SIZE = 4
fanSpeed: Annotated[uint32_t, 0]
@c.record
class CUpti_ActivityInstructionExecution(c.Struct):
SIZE = 48
kind: Annotated[CUpti_ActivityKind, 0]
flags: Annotated[CUpti_ActivityFlag, 4]
sourceLocatorId: Annotated[uint32_t, 8]
correlationId: Annotated[uint32_t, 12]
functionId: Annotated[uint32_t, 16]
pcOffset: Annotated[uint32_t, 20]
threadsExecuted: Annotated[uint64_t, 24]
notPredOffThreadsExecuted: Annotated[uint64_t, 32]
executed: Annotated[uint32_t, 40]
pad: Annotated[uint32_t, 44]
@c.record
class CUpti_ActivityPCSampling3(c.Struct):
SIZE = 40
kind: Annotated[CUpti_ActivityKind, 0]
flags: Annotated[CUpti_ActivityFlag, 4]
sourceLocatorId: Annotated[uint32_t, 8]
correlationId: Annotated[uint32_t, 12]
functionId: Annotated[uint32_t, 16]
latencySamples: Annotated[uint32_t, 20]
samples: Annotated[uint32_t, 24]
stallReason: Annotated[CUpti_ActivityPCSamplingStallReason, 28]
pcOffset: Annotated[uint64_t, 32]
@c.record
class CUpti_ActivityPCSamplingRecordInfo(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
correlationId: Annotated[uint32_t, 4]
totalSamples: Annotated[uint64_t, 8]
droppedSamples: Annotated[uint64_t, 16]
samplingPeriodInCycles: Annotated[uint64_t, 24]
@c.record
class CUpti_ActivityUnifiedMemoryCounter3(c.Struct):
SIZE = 104
kind: Annotated[CUpti_ActivityKind, 0]
counterKind: Annotated[CUpti_ActivityUnifiedMemoryCounterKind, 4]
value: Annotated[uint64_t, 8]
start: Annotated[uint64_t, 16]
end: Annotated[uint64_t, 24]
address: Annotated[uint64_t, 32]
srcId: Annotated[uint32_t, 40]
dstId: Annotated[uint32_t, 44]
streamId: Annotated[uint32_t, 48]
processId: Annotated[uint32_t, 52]
flags: Annotated[uint32_t, 56]
pad: Annotated[uint32_t, 60]
processors: Annotated[c.Array[uint64_t, Literal[5]], 64]
@c.record
class CUpti_ActivityFunction(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
id: Annotated[uint32_t, 4]
contextId: Annotated[uint32_t, 8]
moduleId: Annotated[uint32_t, 12]
functionIndex: Annotated[uint32_t, 16]
pad: Annotated[uint32_t, 20]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
@c.record
class CUpti_ActivityModule(c.Struct):
SIZE = 24
kind: Annotated[CUpti_ActivityKind, 0]
contextId: Annotated[uint32_t, 4]
id: Annotated[uint32_t, 8]
cubinSize: Annotated[uint32_t, 12]
cubin: Annotated[ctypes.c_void_p, 16]
@c.record
class CUpti_ActivitySharedAccess(c.Struct):
SIZE = 56
kind: Annotated[CUpti_ActivityKind, 0]
flags: Annotated[CUpti_ActivityFlag, 4]
sourceLocatorId: Annotated[uint32_t, 8]
correlationId: Annotated[uint32_t, 12]
functionId: Annotated[uint32_t, 16]
pcOffset: Annotated[uint32_t, 20]
threadsExecuted: Annotated[uint64_t, 24]
sharedTransactions: Annotated[uint64_t, 32]
theoreticalSharedTransactions: Annotated[uint64_t, 40]
executed: Annotated[uint32_t, 48]
pad: Annotated[uint32_t, 52]
@c.record
class CUpti_ActivityCudaEvent2(c.Struct):
SIZE = 56
kind: Annotated[CUpti_ActivityKind, 0]
correlationId: Annotated[uint32_t, 4]
contextId: Annotated[uint32_t, 8]
streamId: Annotated[uint32_t, 12]
eventId: Annotated[uint32_t, 16]
pad: Annotated[uint32_t, 20]
deviceId: Annotated[uint32_t, 24]
pad2: Annotated[uint32_t, 28]
reserved0: Annotated[ctypes.c_void_p, 32]
deviceTimestamp: Annotated[uint64_t, 40]
cudaEventSyncId: Annotated[uint64_t, 48]
@c.record
class CUpti_ActivityStream(c.Struct):
SIZE = 24
kind: Annotated[CUpti_ActivityKind, 0]
contextId: Annotated[uint32_t, 4]
streamId: Annotated[uint32_t, 8]
priority: Annotated[uint32_t, 12]
flag: Annotated[CUpti_ActivityStreamFlag, 16]
correlationId: Annotated[uint32_t, 20]
@c.record
class CUpti_ActivitySynchronization2(c.Struct):
SIZE = 56
kind: Annotated[CUpti_ActivityKind, 0]
type: Annotated[CUpti_ActivitySynchronizationType, 4]
start: Annotated[uint64_t, 8]
end: Annotated[uint64_t, 16]
correlationId: Annotated[uint32_t, 24]
contextId: Annotated[uint32_t, 28]
streamId: Annotated[uint32_t, 32]
cudaEventId: Annotated[uint32_t, 36]
cudaEventSyncId: Annotated[uint64_t, 40]
returnValue: Annotated[uint32_t, 48]
pad: Annotated[uint32_t, 52]
@c.record
class CUpti_ActivityInstructionCorrelation(c.Struct):
SIZE = 24
kind: Annotated[CUpti_ActivityKind, 0]
flags: Annotated[CUpti_ActivityFlag, 4]
sourceLocatorId: Annotated[uint32_t, 8]
functionId: Annotated[uint32_t, 12]
pcOffset: Annotated[uint32_t, 16]
pad: Annotated[uint32_t, 20]
class CUpti_OpenAccEventKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_OPENACC_EVENT_KIND_INVALID = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_INVALID', 0)
CUPTI_OPENACC_EVENT_KIND_DEVICE_INIT = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_DEVICE_INIT', 1)
CUPTI_OPENACC_EVENT_KIND_DEVICE_SHUTDOWN = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_DEVICE_SHUTDOWN', 2)
CUPTI_OPENACC_EVENT_KIND_RUNTIME_SHUTDOWN = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_RUNTIME_SHUTDOWN', 3)
CUPTI_OPENACC_EVENT_KIND_ENQUEUE_LAUNCH = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_ENQUEUE_LAUNCH', 4)
CUPTI_OPENACC_EVENT_KIND_ENQUEUE_UPLOAD = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_ENQUEUE_UPLOAD', 5)
CUPTI_OPENACC_EVENT_KIND_ENQUEUE_DOWNLOAD = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_ENQUEUE_DOWNLOAD', 6)
CUPTI_OPENACC_EVENT_KIND_WAIT = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_WAIT', 7)
CUPTI_OPENACC_EVENT_KIND_IMPLICIT_WAIT = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_IMPLICIT_WAIT', 8)
CUPTI_OPENACC_EVENT_KIND_COMPUTE_CONSTRUCT = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_COMPUTE_CONSTRUCT', 9)
CUPTI_OPENACC_EVENT_KIND_UPDATE = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_UPDATE', 10)
CUPTI_OPENACC_EVENT_KIND_ENTER_DATA = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_ENTER_DATA', 11)
CUPTI_OPENACC_EVENT_KIND_EXIT_DATA = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_EXIT_DATA', 12)
CUPTI_OPENACC_EVENT_KIND_CREATE = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_CREATE', 13)
CUPTI_OPENACC_EVENT_KIND_DELETE = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_DELETE', 14)
CUPTI_OPENACC_EVENT_KIND_ALLOC = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_ALLOC', 15)
CUPTI_OPENACC_EVENT_KIND_FREE = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_FREE', 16)
CUPTI_OPENACC_EVENT_KIND_FORCE_INT = CUpti_OpenAccEventKind.define('CUPTI_OPENACC_EVENT_KIND_FORCE_INT', 2147483647)
class CUpti_OpenAccConstructKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_OPENACC_CONSTRUCT_KIND_UNKNOWN = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_UNKNOWN', 0)
CUPTI_OPENACC_CONSTRUCT_KIND_PARALLEL = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_PARALLEL', 1)
CUPTI_OPENACC_CONSTRUCT_KIND_KERNELS = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_KERNELS', 2)
CUPTI_OPENACC_CONSTRUCT_KIND_LOOP = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_LOOP', 3)
CUPTI_OPENACC_CONSTRUCT_KIND_DATA = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_DATA', 4)
CUPTI_OPENACC_CONSTRUCT_KIND_ENTER_DATA = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_ENTER_DATA', 5)
CUPTI_OPENACC_CONSTRUCT_KIND_EXIT_DATA = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_EXIT_DATA', 6)
CUPTI_OPENACC_CONSTRUCT_KIND_HOST_DATA = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_HOST_DATA', 7)
CUPTI_OPENACC_CONSTRUCT_KIND_ATOMIC = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_ATOMIC', 8)
CUPTI_OPENACC_CONSTRUCT_KIND_DECLARE = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_DECLARE', 9)
CUPTI_OPENACC_CONSTRUCT_KIND_INIT = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_INIT', 10)
CUPTI_OPENACC_CONSTRUCT_KIND_SHUTDOWN = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_SHUTDOWN', 11)
CUPTI_OPENACC_CONSTRUCT_KIND_SET = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_SET', 12)
CUPTI_OPENACC_CONSTRUCT_KIND_UPDATE = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_UPDATE', 13)
CUPTI_OPENACC_CONSTRUCT_KIND_ROUTINE = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_ROUTINE', 14)
CUPTI_OPENACC_CONSTRUCT_KIND_WAIT = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_WAIT', 15)
CUPTI_OPENACC_CONSTRUCT_KIND_RUNTIME_API = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_RUNTIME_API', 16)
CUPTI_OPENACC_CONSTRUCT_KIND_FORCE_INT = CUpti_OpenAccConstructKind.define('CUPTI_OPENACC_CONSTRUCT_KIND_FORCE_INT', 2147483647)
class CUpti_OpenMpEventKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_OPENMP_EVENT_KIND_INVALID = CUpti_OpenMpEventKind.define('CUPTI_OPENMP_EVENT_KIND_INVALID', 0)
CUPTI_OPENMP_EVENT_KIND_PARALLEL = CUpti_OpenMpEventKind.define('CUPTI_OPENMP_EVENT_KIND_PARALLEL', 1)
CUPTI_OPENMP_EVENT_KIND_TASK = CUpti_OpenMpEventKind.define('CUPTI_OPENMP_EVENT_KIND_TASK', 2)
CUPTI_OPENMP_EVENT_KIND_THREAD = CUpti_OpenMpEventKind.define('CUPTI_OPENMP_EVENT_KIND_THREAD', 3)
CUPTI_OPENMP_EVENT_KIND_IDLE = CUpti_OpenMpEventKind.define('CUPTI_OPENMP_EVENT_KIND_IDLE', 4)
CUPTI_OPENMP_EVENT_KIND_WAIT_BARRIER = CUpti_OpenMpEventKind.define('CUPTI_OPENMP_EVENT_KIND_WAIT_BARRIER', 5)
CUPTI_OPENMP_EVENT_KIND_WAIT_TASKWAIT = CUpti_OpenMpEventKind.define('CUPTI_OPENMP_EVENT_KIND_WAIT_TASKWAIT', 6)
CUPTI_OPENMP_EVENT_KIND_FORCE_INT = CUpti_OpenMpEventKind.define('CUPTI_OPENMP_EVENT_KIND_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityOpenAcc(c.Struct):
SIZE = 120
kind: Annotated[CUpti_ActivityKind, 0]
eventKind: Annotated[CUpti_OpenAccEventKind, 4]
parentConstruct: Annotated[CUpti_OpenAccConstructKind, 8]
version: Annotated[uint32_t, 12]
implicit: Annotated[uint32_t, 16]
deviceType: Annotated[uint32_t, 20]
deviceNumber: Annotated[uint32_t, 24]
threadId: Annotated[uint32_t, 28]
_async: Annotated[uint64_t, 32]
asyncMap: Annotated[uint64_t, 40]
lineNo: Annotated[uint32_t, 48]
endLineNo: Annotated[uint32_t, 52]
funcLineNo: Annotated[uint32_t, 56]
funcEndLineNo: Annotated[uint32_t, 60]
start: Annotated[uint64_t, 64]
end: Annotated[uint64_t, 72]
cuDeviceId: Annotated[uint32_t, 80]
cuContextId: Annotated[uint32_t, 84]
cuStreamId: Annotated[uint32_t, 88]
cuProcessId: Annotated[uint32_t, 92]
cuThreadId: Annotated[uint32_t, 96]
externalId: Annotated[uint32_t, 100]
srcFile: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 104]
funcName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 112]
@c.record
class CUpti_ActivityOpenAccData(c.Struct):
SIZE = 152
kind: Annotated[CUpti_ActivityKind, 0]
eventKind: Annotated[CUpti_OpenAccEventKind, 4]
parentConstruct: Annotated[CUpti_OpenAccConstructKind, 8]
version: Annotated[uint32_t, 12]
implicit: Annotated[uint32_t, 16]
deviceType: Annotated[uint32_t, 20]
deviceNumber: Annotated[uint32_t, 24]
threadId: Annotated[uint32_t, 28]
_async: Annotated[uint64_t, 32]
asyncMap: Annotated[uint64_t, 40]
lineNo: Annotated[uint32_t, 48]
endLineNo: Annotated[uint32_t, 52]
funcLineNo: Annotated[uint32_t, 56]
funcEndLineNo: Annotated[uint32_t, 60]
start: Annotated[uint64_t, 64]
end: Annotated[uint64_t, 72]
cuDeviceId: Annotated[uint32_t, 80]
cuContextId: Annotated[uint32_t, 84]
cuStreamId: Annotated[uint32_t, 88]
cuProcessId: Annotated[uint32_t, 92]
cuThreadId: Annotated[uint32_t, 96]
externalId: Annotated[uint32_t, 100]
srcFile: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 104]
funcName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 112]
bytes: Annotated[uint64_t, 120]
hostPtr: Annotated[uint64_t, 128]
devicePtr: Annotated[uint64_t, 136]
varName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 144]
@c.record
class CUpti_ActivityOpenAccLaunch(c.Struct):
SIZE = 152
kind: Annotated[CUpti_ActivityKind, 0]
eventKind: Annotated[CUpti_OpenAccEventKind, 4]
parentConstruct: Annotated[CUpti_OpenAccConstructKind, 8]
version: Annotated[uint32_t, 12]
implicit: Annotated[uint32_t, 16]
deviceType: Annotated[uint32_t, 20]
deviceNumber: Annotated[uint32_t, 24]
threadId: Annotated[uint32_t, 28]
_async: Annotated[uint64_t, 32]
asyncMap: Annotated[uint64_t, 40]
lineNo: Annotated[uint32_t, 48]
endLineNo: Annotated[uint32_t, 52]
funcLineNo: Annotated[uint32_t, 56]
funcEndLineNo: Annotated[uint32_t, 60]
start: Annotated[uint64_t, 64]
end: Annotated[uint64_t, 72]
cuDeviceId: Annotated[uint32_t, 80]
cuContextId: Annotated[uint32_t, 84]
cuStreamId: Annotated[uint32_t, 88]
cuProcessId: Annotated[uint32_t, 92]
cuThreadId: Annotated[uint32_t, 96]
externalId: Annotated[uint32_t, 100]
srcFile: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 104]
funcName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 112]
numGangs: Annotated[uint64_t, 120]
numWorkers: Annotated[uint64_t, 128]
vectorLength: Annotated[uint64_t, 136]
kernelName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 144]
@c.record
class CUpti_ActivityOpenAccOther(c.Struct):
SIZE = 120
kind: Annotated[CUpti_ActivityKind, 0]
eventKind: Annotated[CUpti_OpenAccEventKind, 4]
parentConstruct: Annotated[CUpti_OpenAccConstructKind, 8]
version: Annotated[uint32_t, 12]
implicit: Annotated[uint32_t, 16]
deviceType: Annotated[uint32_t, 20]
deviceNumber: Annotated[uint32_t, 24]
threadId: Annotated[uint32_t, 28]
_async: Annotated[uint64_t, 32]
asyncMap: Annotated[uint64_t, 40]
lineNo: Annotated[uint32_t, 48]
endLineNo: Annotated[uint32_t, 52]
funcLineNo: Annotated[uint32_t, 56]
funcEndLineNo: Annotated[uint32_t, 60]
start: Annotated[uint64_t, 64]
end: Annotated[uint64_t, 72]
cuDeviceId: Annotated[uint32_t, 80]
cuContextId: Annotated[uint32_t, 84]
cuStreamId: Annotated[uint32_t, 88]
cuProcessId: Annotated[uint32_t, 92]
cuThreadId: Annotated[uint32_t, 96]
externalId: Annotated[uint32_t, 100]
srcFile: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 104]
funcName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 112]
@c.record
class CUpti_ActivityOpenMp(c.Struct):
SIZE = 40
kind: Annotated[CUpti_ActivityKind, 0]
eventKind: Annotated[CUpti_OpenMpEventKind, 4]
version: Annotated[uint32_t, 8]
threadId: Annotated[uint32_t, 12]
start: Annotated[uint64_t, 16]
end: Annotated[uint64_t, 24]
cuProcessId: Annotated[uint32_t, 32]
cuThreadId: Annotated[uint32_t, 36]
class CUpti_ExternalCorrelationKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_EXTERNAL_CORRELATION_KIND_INVALID = CUpti_ExternalCorrelationKind.define('CUPTI_EXTERNAL_CORRELATION_KIND_INVALID', 0)
CUPTI_EXTERNAL_CORRELATION_KIND_UNKNOWN = CUpti_ExternalCorrelationKind.define('CUPTI_EXTERNAL_CORRELATION_KIND_UNKNOWN', 1)
CUPTI_EXTERNAL_CORRELATION_KIND_OPENACC = CUpti_ExternalCorrelationKind.define('CUPTI_EXTERNAL_CORRELATION_KIND_OPENACC', 2)
CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM0 = CUpti_ExternalCorrelationKind.define('CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM0', 3)
CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM1 = CUpti_ExternalCorrelationKind.define('CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM1', 4)
CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM2 = CUpti_ExternalCorrelationKind.define('CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM2', 5)
CUPTI_EXTERNAL_CORRELATION_KIND_SIZE = CUpti_ExternalCorrelationKind.define('CUPTI_EXTERNAL_CORRELATION_KIND_SIZE', 6)
CUPTI_EXTERNAL_CORRELATION_KIND_FORCE_INT = CUpti_ExternalCorrelationKind.define('CUPTI_EXTERNAL_CORRELATION_KIND_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityExternalCorrelation(c.Struct):
SIZE = 24
kind: Annotated[CUpti_ActivityKind, 0]
externalKind: Annotated[CUpti_ExternalCorrelationKind, 4]
externalId: Annotated[uint64_t, 8]
correlationId: Annotated[uint32_t, 16]
reserved: Annotated[uint32_t, 20]
class CUpti_DevType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_DEV_TYPE_INVALID = CUpti_DevType.define('CUPTI_DEV_TYPE_INVALID', 0)
CUPTI_DEV_TYPE_GPU = CUpti_DevType.define('CUPTI_DEV_TYPE_GPU', 1)
CUPTI_DEV_TYPE_NPU = CUpti_DevType.define('CUPTI_DEV_TYPE_NPU', 2)
CUPTI_DEV_TYPE_FORCE_INT = CUpti_DevType.define('CUPTI_DEV_TYPE_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityNvLink4(c.Struct):
SIZE = 136
kind: Annotated[CUpti_ActivityKind, 0]
nvlinkVersion: Annotated[uint32_t, 4]
typeDev0: Annotated[CUpti_DevType, 8]
typeDev1: Annotated[CUpti_DevType, 12]
idDev0: Annotated[CUpti_ActivityNvLink4_idDev0, 16]
idDev1: Annotated[CUpti_ActivityNvLink4_idDev1, 32]
flag: Annotated[uint32_t, 48]
physicalNvLinkCount: Annotated[uint32_t, 52]
portDev0: Annotated[c.Array[int8_t, Literal[32]], 56]
portDev1: Annotated[c.Array[int8_t, Literal[32]], 88]
bandwidth: Annotated[uint64_t, 120]
nvswitchConnected: Annotated[uint8_t, 128]
pad: Annotated[c.Array[uint8_t, Literal[7]], 129]
@c.record
class CUpti_ActivityNvLink4_idDev0(c.Struct):
SIZE = 16
uuidDev: Annotated[CUuuid, 0]
npu: Annotated[CUpti_ActivityNvLink4_idDev0_npu, 0]
@c.record
class CUpti_ActivityNvLink4_idDev0_npu(c.Struct):
SIZE = 8
index: Annotated[uint32_t, 0]
domainId: Annotated[uint32_t, 4]
@c.record
class CUpti_ActivityNvLink4_idDev1(c.Struct):
SIZE = 16
uuidDev: Annotated[CUuuid, 0]
npu: Annotated[CUpti_ActivityNvLink4_idDev1_npu, 0]
@c.record
class CUpti_ActivityNvLink4_idDev1_npu(c.Struct):
SIZE = 8
index: Annotated[uint32_t, 0]
domainId: Annotated[uint32_t, 4]
int8_t: TypeAlias = Annotated[int, ctypes.c_byte]
class CUpti_PcieDeviceType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_PCIE_DEVICE_TYPE_GPU = CUpti_PcieDeviceType.define('CUPTI_PCIE_DEVICE_TYPE_GPU', 0)
CUPTI_PCIE_DEVICE_TYPE_BRIDGE = CUpti_PcieDeviceType.define('CUPTI_PCIE_DEVICE_TYPE_BRIDGE', 1)
CUPTI_PCIE_DEVICE_TYPE_FORCE_INT = CUpti_PcieDeviceType.define('CUPTI_PCIE_DEVICE_TYPE_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityPcie(c.Struct):
SIZE = 168
kind: Annotated[CUpti_ActivityKind, 0]
type: Annotated[CUpti_PcieDeviceType, 4]
id: Annotated[CUpti_ActivityPcie_id, 8]
domain: Annotated[uint32_t, 12]
pcieGeneration: Annotated[uint16_t, 16]
linkRate: Annotated[uint16_t, 18]
linkWidth: Annotated[uint16_t, 20]
upstreamBus: Annotated[uint16_t, 22]
attr: Annotated[CUpti_ActivityPcie_attr, 24]
@c.record
class CUpti_ActivityPcie_id(c.Struct):
SIZE = 4
devId: Annotated[CUdevice, 0]
bridgeId: Annotated[uint32_t, 0]
CUdevice: TypeAlias = Annotated[int, ctypes.c_int32]
@c.record
class CUpti_ActivityPcie_attr(c.Struct):
SIZE = 144
gpuAttr: Annotated[CUpti_ActivityPcie_attr_gpuAttr, 0]
bridgeAttr: Annotated[CUpti_ActivityPcie_attr_bridgeAttr, 0]
@c.record
class CUpti_ActivityPcie_attr_gpuAttr(c.Struct):
SIZE = 144
uuidDev: Annotated[CUuuid, 0]
peerDev: Annotated[c.Array[CUdevice, Literal[32]], 16]
@c.record
class CUpti_ActivityPcie_attr_bridgeAttr(c.Struct):
SIZE = 8
secondaryBus: Annotated[uint16_t, 0]
deviceId: Annotated[uint16_t, 2]
vendorId: Annotated[uint16_t, 4]
pad0: Annotated[uint16_t, 6]
class CUpti_PcieGen(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_PCIE_GEN_GEN1 = CUpti_PcieGen.define('CUPTI_PCIE_GEN_GEN1', 1)
CUPTI_PCIE_GEN_GEN2 = CUpti_PcieGen.define('CUPTI_PCIE_GEN_GEN2', 2)
CUPTI_PCIE_GEN_GEN3 = CUpti_PcieGen.define('CUPTI_PCIE_GEN_GEN3', 3)
CUPTI_PCIE_GEN_GEN4 = CUpti_PcieGen.define('CUPTI_PCIE_GEN_GEN4', 4)
CUPTI_PCIE_GEN_GEN5 = CUpti_PcieGen.define('CUPTI_PCIE_GEN_GEN5', 5)
CUPTI_PCIE_GEN_GEN6 = CUpti_PcieGen.define('CUPTI_PCIE_GEN_GEN6', 6)
CUPTI_PCIE_GEN_FORCE_INT = CUpti_PcieGen.define('CUPTI_PCIE_GEN_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityInstantaneousEvent(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
id: Annotated[CUpti_EventID, 4]
value: Annotated[uint64_t, 8]
timestamp: Annotated[uint64_t, 16]
deviceId: Annotated[uint32_t, 24]
reserved: Annotated[uint32_t, 28]
@c.record
class CUpti_ActivityInstantaneousEventInstance(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
id: Annotated[CUpti_EventID, 4]
value: Annotated[uint64_t, 8]
timestamp: Annotated[uint64_t, 16]
deviceId: Annotated[uint32_t, 24]
instance: Annotated[uint8_t, 28]
pad: Annotated[c.Array[uint8_t, Literal[3]], 29]
@c.record
class CUpti_ActivityInstantaneousMetric(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
id: Annotated[CUpti_MetricID, 4]
value: Annotated[CUpti_MetricValue, 8]
timestamp: Annotated[uint64_t, 16]
deviceId: Annotated[uint32_t, 24]
flags: Annotated[uint8_t, 28]
pad: Annotated[c.Array[uint8_t, Literal[3]], 29]
@c.record
class CUpti_ActivityInstantaneousMetricInstance(c.Struct):
SIZE = 32
kind: Annotated[CUpti_ActivityKind, 0]
id: Annotated[CUpti_MetricID, 4]
value: Annotated[CUpti_MetricValue, 8]
timestamp: Annotated[uint64_t, 16]
deviceId: Annotated[uint32_t, 24]
flags: Annotated[uint8_t, 28]
instance: Annotated[uint8_t, 29]
pad: Annotated[c.Array[uint8_t, Literal[2]], 30]
class CUpti_ActivityJitEntryType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_JIT_ENTRY_INVALID = CUpti_ActivityJitEntryType.define('CUPTI_ACTIVITY_JIT_ENTRY_INVALID', 0)
CUPTI_ACTIVITY_JIT_ENTRY_PTX_TO_CUBIN = CUpti_ActivityJitEntryType.define('CUPTI_ACTIVITY_JIT_ENTRY_PTX_TO_CUBIN', 1)
CUPTI_ACTIVITY_JIT_ENTRY_NVVM_IR_TO_PTX = CUpti_ActivityJitEntryType.define('CUPTI_ACTIVITY_JIT_ENTRY_NVVM_IR_TO_PTX', 2)
CUPTI_ACTIVITY_JIT_ENTRY_TYPE_FORCE_INT = CUpti_ActivityJitEntryType.define('CUPTI_ACTIVITY_JIT_ENTRY_TYPE_FORCE_INT', 2147483647)
class CUpti_ActivityJitOperationType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_JIT_OPERATION_INVALID = CUpti_ActivityJitOperationType.define('CUPTI_ACTIVITY_JIT_OPERATION_INVALID', 0)
CUPTI_ACTIVITY_JIT_OPERATION_CACHE_LOAD = CUpti_ActivityJitOperationType.define('CUPTI_ACTIVITY_JIT_OPERATION_CACHE_LOAD', 1)
CUPTI_ACTIVITY_JIT_OPERATION_CACHE_STORE = CUpti_ActivityJitOperationType.define('CUPTI_ACTIVITY_JIT_OPERATION_CACHE_STORE', 2)
CUPTI_ACTIVITY_JIT_OPERATION_COMPILE = CUpti_ActivityJitOperationType.define('CUPTI_ACTIVITY_JIT_OPERATION_COMPILE', 3)
CUPTI_ACTIVITY_JIT_OPERATION_TYPE_FORCE_INT = CUpti_ActivityJitOperationType.define('CUPTI_ACTIVITY_JIT_OPERATION_TYPE_FORCE_INT', 2147483647)
@c.record
class CUpti_ActivityJit2(c.Struct):
SIZE = 72
kind: Annotated[CUpti_ActivityKind, 0]
jitEntryType: Annotated[CUpti_ActivityJitEntryType, 4]
jitOperationType: Annotated[CUpti_ActivityJitOperationType, 8]
deviceId: Annotated[uint32_t, 12]
start: Annotated[uint64_t, 16]
end: Annotated[uint64_t, 24]
correlationId: Annotated[uint32_t, 32]
padding: Annotated[uint32_t, 36]
jitOperationCorrelationId: Annotated[uint64_t, 40]
cacheSize: Annotated[uint64_t, 48]
cachePath: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 56]
processId: Annotated[uint32_t, 64]
threadId: Annotated[uint32_t, 68]
@c.record
class CUpti_ActivityGraphTrace2(c.Struct):
SIZE = 56
kind: Annotated[CUpti_ActivityKind, 0]
correlationId: Annotated[uint32_t, 4]
start: Annotated[uint64_t, 8]
end: Annotated[uint64_t, 16]
deviceId: Annotated[uint32_t, 24]
graphId: Annotated[uint32_t, 28]
contextId: Annotated[uint32_t, 32]
streamId: Annotated[uint32_t, 36]
reserved: Annotated[ctypes.c_void_p, 40]
endDeviceId: Annotated[uint32_t, 48]
endContextId: Annotated[uint32_t, 52]
class CUpti_DeviceGraphLaunchMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_DEVICE_GRAPH_LAUNCH_MODE_INVALID = CUpti_DeviceGraphLaunchMode.define('CUPTI_DEVICE_GRAPH_LAUNCH_MODE_INVALID', 0)
CUPTI_DEVICE_GRAPH_LAUNCH_MODE_FIRE_AND_FORGET = CUpti_DeviceGraphLaunchMode.define('CUPTI_DEVICE_GRAPH_LAUNCH_MODE_FIRE_AND_FORGET', 1)
CUPTI_DEVICE_GRAPH_LAUNCH_MODE_TAIL = CUpti_DeviceGraphLaunchMode.define('CUPTI_DEVICE_GRAPH_LAUNCH_MODE_TAIL', 2)
CUPTI_DEVICE_GRAPH_LAUNCH_MODE_FIRE_AND_FORGET_AS_SIBLING = CUpti_DeviceGraphLaunchMode.define('CUPTI_DEVICE_GRAPH_LAUNCH_MODE_FIRE_AND_FORGET_AS_SIBLING', 3)
@c.record
class CUpti_ActivityDeviceGraphTrace(c.Struct):
SIZE = 56
kind: Annotated[CUpti_ActivityKind, 0]
deviceId: Annotated[uint32_t, 4]
start: Annotated[uint64_t, 8]
end: Annotated[uint64_t, 16]
graphId: Annotated[uint32_t, 24]
launcherGraphId: Annotated[uint32_t, 28]
deviceLaunchMode: Annotated[uint32_t, 32]
contextId: Annotated[uint32_t, 36]
streamId: Annotated[uint64_t, 40]
reserved: Annotated[ctypes.c_void_p, 48]
@c.record
class CUpti_ActivityMemDecompress(c.Struct):
SIZE = 64
kind: Annotated[CUpti_ActivityKind, 0]
deviceId: Annotated[uint32_t, 4]
contextId: Annotated[uint32_t, 8]
streamId: Annotated[uint32_t, 12]
channelID: Annotated[uint32_t, 16]
channelType: Annotated[CUpti_ChannelType, 20]
correlationId: Annotated[uint32_t, 24]
numberOfOperations: Annotated[uint32_t, 28]
sourceBytes: Annotated[uint64_t, 32]
reserved0: Annotated[ctypes.c_void_p, 40]
start: Annotated[uint64_t, 48]
end: Annotated[uint64_t, 56]
class CUpti_ActivityAttribute(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE', 0)
CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE_CDP = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE_CDP', 1)
CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_POOL_LIMIT = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_POOL_LIMIT', 2)
CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_SIZE = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_SIZE', 3)
CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_LIMIT = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_LIMIT', 4)
CUPTI_ACTIVITY_ATTR_ZEROED_OUT_ACTIVITY_BUFFER = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_ZEROED_OUT_ACTIVITY_BUFFER', 5)
CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_PRE_ALLOCATE_VALUE = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_PRE_ALLOCATE_VALUE', 6)
CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_PRE_ALLOCATE_VALUE = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_PRE_ALLOCATE_VALUE', 7)
CUPTI_ACTIVITY_ATTR_MEM_ALLOCATION_TYPE_HOST_PINNED = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_MEM_ALLOCATION_TYPE_HOST_PINNED', 8)
CUPTI_ACTIVITY_ATTR_PER_THREAD_ACTIVITY_BUFFER = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_PER_THREAD_ACTIVITY_BUFFER', 9)
CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_FORCE_INT = CUpti_ActivityAttribute.define('CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_FORCE_INT', 2147483647)
class CUpti_ActivityThreadIdType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_ACTIVITY_THREAD_ID_TYPE_DEFAULT = CUpti_ActivityThreadIdType.define('CUPTI_ACTIVITY_THREAD_ID_TYPE_DEFAULT', 0)
CUPTI_ACTIVITY_THREAD_ID_TYPE_SYSTEM = CUpti_ActivityThreadIdType.define('CUPTI_ACTIVITY_THREAD_ID_TYPE_SYSTEM', 1)
CUPTI_ACTIVITY_THREAD_ID_TYPE_SIZE = CUpti_ActivityThreadIdType.define('CUPTI_ACTIVITY_THREAD_ID_TYPE_SIZE', 2)
CUPTI_ACTIVITY_THREAD_ID_TYPE_FORCE_INT = CUpti_ActivityThreadIdType.define('CUPTI_ACTIVITY_THREAD_ID_TYPE_FORCE_INT', 2147483647)
@dll.bind
def cuptiGetTimestamp(timestamp:c.POINTER[uint64_t]) -> CUptiResult: ...
class struct_CUctx_st(ctypes.Structure): pass
CUcontext: TypeAlias = c.POINTER[struct_CUctx_st]
@dll.bind
def cuptiGetContextId(context:CUcontext, contextId:c.POINTER[uint32_t]) -> CUptiResult: ...
class struct_CUstream_st(ctypes.Structure): pass
CUstream: TypeAlias = c.POINTER[struct_CUstream_st]
@dll.bind
def cuptiGetStreamId(context:CUcontext, stream:CUstream, streamId:c.POINTER[uint32_t]) -> CUptiResult: ...
@dll.bind
def cuptiGetStreamIdEx(context:CUcontext, stream:CUstream, perThreadStream:uint8_t, streamId:c.POINTER[uint32_t]) -> CUptiResult: ...
@dll.bind
def cuptiGetDeviceId(context:CUcontext, deviceId:c.POINTER[uint32_t]) -> CUptiResult: ...
class struct_CUgraphNode_st(ctypes.Structure): pass
CUgraphNode: TypeAlias = c.POINTER[struct_CUgraphNode_st]
@dll.bind
def cuptiGetGraphNodeId(node:CUgraphNode, nodeId:c.POINTER[uint64_t]) -> CUptiResult: ...
class struct_CUgraph_st(ctypes.Structure): pass
CUgraph: TypeAlias = c.POINTER[struct_CUgraph_st]
@dll.bind
def cuptiGetGraphId(graph:CUgraph, pId:c.POINTER[uint32_t]) -> CUptiResult: ...
class struct_CUgraphExec_st(ctypes.Structure): pass
CUgraphExec: TypeAlias = c.POINTER[struct_CUgraphExec_st]
@dll.bind
def cuptiGetGraphExecId(graphExec:CUgraphExec, pId:c.POINTER[uint32_t]) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnable(kind:CUpti_ActivityKind) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnableAndDump(kind:CUpti_ActivityKind) -> CUptiResult: ...
@dll.bind
def cuptiActivityDisable(kind:CUpti_ActivityKind) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnableContext(context:CUcontext, kind:CUpti_ActivityKind) -> CUptiResult: ...
@dll.bind
def cuptiActivityDisableContext(context:CUcontext, kind:CUpti_ActivityKind) -> CUptiResult: ...
@dll.bind
def cuptiActivityGetNumDroppedRecords(context:CUcontext, streamId:uint32_t, dropped:c.POINTER[size_t]) -> CUptiResult: ...
@dll.bind
def cuptiActivityGetNextRecord(buffer:c.POINTER[uint8_t], validBufferSizeBytes:size_t, record:c.POINTER[c.POINTER[CUpti_Activity]]) -> CUptiResult: ...
CUpti_BuffersCallbackRequestFunc: TypeAlias = c.CFUNCTYPE[None, [c.POINTER[c.POINTER[Annotated[int, ctypes.c_ubyte]]], c.POINTER[Annotated[int, ctypes.c_uint64]], c.POINTER[Annotated[int, ctypes.c_uint64]]]]
CUpti_BuffersCallbackCompleteFunc: TypeAlias = c.CFUNCTYPE[None, [c.POINTER[struct_CUctx_st], Annotated[int, ctypes.c_uint32], c.POINTER[Annotated[int, ctypes.c_ubyte]], Annotated[int, ctypes.c_uint64], Annotated[int, ctypes.c_uint64]]]
@dll.bind
def cuptiActivityRegisterCallbacks(funcBufferRequested:CUpti_BuffersCallbackRequestFunc, funcBufferCompleted:CUpti_BuffersCallbackCompleteFunc) -> CUptiResult: ...
@dll.bind
def cuptiActivityFlush(context:CUcontext, streamId:uint32_t, flag:uint32_t) -> CUptiResult: ...
@dll.bind
def cuptiActivityFlushAll(flag:uint32_t) -> CUptiResult: ...
@dll.bind
def cuptiActivityGetAttribute(attr:CUpti_ActivityAttribute, valueSize:c.POINTER[size_t], value:ctypes.c_void_p) -> CUptiResult: ...
@dll.bind
def cuptiActivitySetAttribute(attr:CUpti_ActivityAttribute, valueSize:c.POINTER[size_t], value:ctypes.c_void_p) -> CUptiResult: ...
@dll.bind
def cuptiActivityConfigureUnifiedMemoryCounter(config:c.POINTER[CUpti_ActivityUnifiedMemoryCounterConfig], count:uint32_t) -> CUptiResult: ...
@dll.bind
def cuptiGetAutoBoostState(context:CUcontext, state:c.POINTER[CUpti_ActivityAutoBoostState]) -> CUptiResult: ...
@dll.bind
def cuptiActivityConfigurePCSampling(ctx:CUcontext, config:c.POINTER[CUpti_ActivityPCSamplingConfig]) -> CUptiResult: ...
@dll.bind
def cuptiGetLastError() -> CUptiResult: ...
@dll.bind
def cuptiSetThreadIdType(type:CUpti_ActivityThreadIdType) -> CUptiResult: ...
@dll.bind
def cuptiGetThreadIdType(type:c.POINTER[CUpti_ActivityThreadIdType]) -> CUptiResult: ...
@dll.bind
def cuptiComputeCapabilitySupported(major:Annotated[int, ctypes.c_int32], minor:Annotated[int, ctypes.c_int32], support:c.POINTER[Annotated[int, ctypes.c_int32]]) -> CUptiResult: ...
@dll.bind
def cuptiDeviceSupported(dev:CUdevice, support:c.POINTER[Annotated[int, ctypes.c_int32]]) -> CUptiResult: ...
class CUpti_DeviceVirtualizationMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_DEVICE_VIRTUALIZATION_MODE_NONE = CUpti_DeviceVirtualizationMode.define('CUPTI_DEVICE_VIRTUALIZATION_MODE_NONE', 0)
CUPTI_DEVICE_VIRTUALIZATION_MODE_PASS_THROUGH = CUpti_DeviceVirtualizationMode.define('CUPTI_DEVICE_VIRTUALIZATION_MODE_PASS_THROUGH', 1)
CUPTI_DEVICE_VIRTUALIZATION_MODE_VIRTUAL_GPU = CUpti_DeviceVirtualizationMode.define('CUPTI_DEVICE_VIRTUALIZATION_MODE_VIRTUAL_GPU', 2)
CUPTI_DEVICE_VIRTUALIZATION_MODE_FORCE_INT = CUpti_DeviceVirtualizationMode.define('CUPTI_DEVICE_VIRTUALIZATION_MODE_FORCE_INT', 2147483647)
@dll.bind
def cuptiDeviceVirtualizationMode(dev:CUdevice, mode:c.POINTER[CUpti_DeviceVirtualizationMode]) -> CUptiResult: ...
@dll.bind
def cuptiFinalize() -> CUptiResult: ...
@dll.bind
def cuptiActivityPushExternalCorrelationId(kind:CUpti_ExternalCorrelationKind, id:uint64_t) -> CUptiResult: ...
@dll.bind
def cuptiActivityPopExternalCorrelationId(kind:CUpti_ExternalCorrelationKind, lastId:c.POINTER[uint64_t]) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnableLatencyTimestamps(enable:uint8_t) -> CUptiResult: ...
@dll.bind
def cuptiActivityFlushPeriod(time:uint32_t) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnableLaunchAttributes(enable:uint8_t) -> CUptiResult: ...
CUpti_TimestampCallbackFunc: TypeAlias = c.CFUNCTYPE[Annotated[int, ctypes.c_uint64], []]
@dll.bind
def cuptiActivityRegisterTimestampCallback(funcTimestamp:CUpti_TimestampCallbackFunc) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnableDeviceGraph(enable:uint8_t) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnableDriverApi(cbid:CUpti_CallbackId, enable:uint8_t) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnableRuntimeApi(cbid:CUpti_CallbackId, enable:uint8_t) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnableHWTrace(enable:uint8_t) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnableAllocationSource(enable:uint8_t) -> CUptiResult: ...
@dll.bind
def cuptiActivityEnableAllSyncRecords(enable:uint8_t) -> CUptiResult: ...
class CUpti_ApiCallbackSite(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_API_ENTER = CUpti_ApiCallbackSite.define('CUPTI_API_ENTER', 0)
CUPTI_API_EXIT = CUpti_ApiCallbackSite.define('CUPTI_API_EXIT', 1)
CUPTI_API_CBSITE_FORCE_INT = CUpti_ApiCallbackSite.define('CUPTI_API_CBSITE_FORCE_INT', 2147483647)
class CUpti_CallbackDomain(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_CB_DOMAIN_INVALID = CUpti_CallbackDomain.define('CUPTI_CB_DOMAIN_INVALID', 0)
CUPTI_CB_DOMAIN_DRIVER_API = CUpti_CallbackDomain.define('CUPTI_CB_DOMAIN_DRIVER_API', 1)
CUPTI_CB_DOMAIN_RUNTIME_API = CUpti_CallbackDomain.define('CUPTI_CB_DOMAIN_RUNTIME_API', 2)
CUPTI_CB_DOMAIN_RESOURCE = CUpti_CallbackDomain.define('CUPTI_CB_DOMAIN_RESOURCE', 3)
CUPTI_CB_DOMAIN_SYNCHRONIZE = CUpti_CallbackDomain.define('CUPTI_CB_DOMAIN_SYNCHRONIZE', 4)
CUPTI_CB_DOMAIN_NVTX = CUpti_CallbackDomain.define('CUPTI_CB_DOMAIN_NVTX', 5)
CUPTI_CB_DOMAIN_STATE = CUpti_CallbackDomain.define('CUPTI_CB_DOMAIN_STATE', 6)
CUPTI_CB_DOMAIN_SIZE = CUpti_CallbackDomain.define('CUPTI_CB_DOMAIN_SIZE', 7)
CUPTI_CB_DOMAIN_FORCE_INT = CUpti_CallbackDomain.define('CUPTI_CB_DOMAIN_FORCE_INT', 2147483647)
class CUpti_CallbackIdResource(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_CBID_RESOURCE_INVALID = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_INVALID', 0)
CUPTI_CBID_RESOURCE_CONTEXT_CREATED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_CONTEXT_CREATED', 1)
CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING', 2)
CUPTI_CBID_RESOURCE_STREAM_CREATED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_STREAM_CREATED', 3)
CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING', 4)
CUPTI_CBID_RESOURCE_CU_INIT_FINISHED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_CU_INIT_FINISHED', 5)
CUPTI_CBID_RESOURCE_MODULE_LOADED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_MODULE_LOADED', 6)
CUPTI_CBID_RESOURCE_MODULE_UNLOAD_STARTING = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_MODULE_UNLOAD_STARTING', 7)
CUPTI_CBID_RESOURCE_MODULE_PROFILED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_MODULE_PROFILED', 8)
CUPTI_CBID_RESOURCE_GRAPH_CREATED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPH_CREATED', 9)
CUPTI_CBID_RESOURCE_GRAPH_DESTROY_STARTING = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPH_DESTROY_STARTING', 10)
CUPTI_CBID_RESOURCE_GRAPH_CLONED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPH_CLONED', 11)
CUPTI_CBID_RESOURCE_GRAPHNODE_CREATE_STARTING = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPHNODE_CREATE_STARTING', 12)
CUPTI_CBID_RESOURCE_GRAPHNODE_CREATED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPHNODE_CREATED', 13)
CUPTI_CBID_RESOURCE_GRAPHNODE_DESTROY_STARTING = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPHNODE_DESTROY_STARTING', 14)
CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_CREATED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_CREATED', 15)
CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_DESTROY_STARTING = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_DESTROY_STARTING', 16)
CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATE_STARTING = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATE_STARTING', 17)
CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATED', 18)
CUPTI_CBID_RESOURCE_GRAPHEXEC_DESTROY_STARTING = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPHEXEC_DESTROY_STARTING', 19)
CUPTI_CBID_RESOURCE_GRAPHNODE_CLONED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_GRAPHNODE_CLONED', 20)
CUPTI_CBID_RESOURCE_STREAM_ATTRIBUTE_CHANGED = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_STREAM_ATTRIBUTE_CHANGED', 21)
CUPTI_CBID_RESOURCE_SIZE = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_SIZE', 22)
CUPTI_CBID_RESOURCE_FORCE_INT = CUpti_CallbackIdResource.define('CUPTI_CBID_RESOURCE_FORCE_INT', 2147483647)
class CUpti_CallbackIdSync(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_CBID_SYNCHRONIZE_INVALID = CUpti_CallbackIdSync.define('CUPTI_CBID_SYNCHRONIZE_INVALID', 0)
CUPTI_CBID_SYNCHRONIZE_STREAM_SYNCHRONIZED = CUpti_CallbackIdSync.define('CUPTI_CBID_SYNCHRONIZE_STREAM_SYNCHRONIZED', 1)
CUPTI_CBID_SYNCHRONIZE_CONTEXT_SYNCHRONIZED = CUpti_CallbackIdSync.define('CUPTI_CBID_SYNCHRONIZE_CONTEXT_SYNCHRONIZED', 2)
CUPTI_CBID_SYNCHRONIZE_SIZE = CUpti_CallbackIdSync.define('CUPTI_CBID_SYNCHRONIZE_SIZE', 3)
CUPTI_CBID_SYNCHRONIZE_FORCE_INT = CUpti_CallbackIdSync.define('CUPTI_CBID_SYNCHRONIZE_FORCE_INT', 2147483647)
class CUpti_CallbackIdState(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_CBID_STATE_INVALID = CUpti_CallbackIdState.define('CUPTI_CBID_STATE_INVALID', 0)
CUPTI_CBID_STATE_FATAL_ERROR = CUpti_CallbackIdState.define('CUPTI_CBID_STATE_FATAL_ERROR', 1)
CUPTI_CBID_STATE_ERROR = CUpti_CallbackIdState.define('CUPTI_CBID_STATE_ERROR', 2)
CUPTI_CBID_STATE_WARNING = CUpti_CallbackIdState.define('CUPTI_CBID_STATE_WARNING', 3)
CUPTI_CBID_STATE_SIZE = CUpti_CallbackIdState.define('CUPTI_CBID_STATE_SIZE', 4)
CUPTI_CBID_STATE_FORCE_INT = CUpti_CallbackIdState.define('CUPTI_CBID_STATE_FORCE_INT', 2147483647)
@c.record
class CUpti_CallbackData(c.Struct):
SIZE = 72
callbackSite: Annotated[CUpti_ApiCallbackSite, 0]
functionName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
functionParams: Annotated[ctypes.c_void_p, 16]
functionReturnValue: Annotated[ctypes.c_void_p, 24]
symbolName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 32]
context: Annotated[CUcontext, 40]
contextUid: Annotated[uint32_t, 48]
correlationData: Annotated[c.POINTER[uint64_t], 56]
correlationId: Annotated[uint32_t, 64]
@c.record
class CUpti_ResourceData(c.Struct):
SIZE = 24
context: Annotated[CUcontext, 0]
resourceHandle: Annotated[CUpti_ResourceData_resourceHandle, 8]
resourceDescriptor: Annotated[ctypes.c_void_p, 16]
@c.record
class CUpti_ResourceData_resourceHandle(c.Struct):
SIZE = 8
stream: Annotated[CUstream, 0]
@c.record
class CUpti_ModuleResourceData(c.Struct):
SIZE = 24
moduleId: Annotated[uint32_t, 0]
cubinSize: Annotated[size_t, 8]
pCubin: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
@c.record
class CUpti_GraphData(c.Struct):
SIZE = 56
graph: Annotated[CUgraph, 0]
originalGraph: Annotated[CUgraph, 8]
node: Annotated[CUgraphNode, 16]
originalNode: Annotated[CUgraphNode, 24]
nodeType: Annotated[CUgraphNodeType, 32]
dependency: Annotated[CUgraphNode, 40]
graphExec: Annotated[CUgraphExec, 48]
class enum_CUgraphNodeType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_GRAPH_NODE_TYPE_KERNEL = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_KERNEL', 0)
CU_GRAPH_NODE_TYPE_MEMCPY = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_MEMCPY', 1)
CU_GRAPH_NODE_TYPE_MEMSET = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_MEMSET', 2)
CU_GRAPH_NODE_TYPE_HOST = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_HOST', 3)
CU_GRAPH_NODE_TYPE_GRAPH = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_GRAPH', 4)
CU_GRAPH_NODE_TYPE_EMPTY = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_EMPTY', 5)
CU_GRAPH_NODE_TYPE_WAIT_EVENT = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_WAIT_EVENT', 6)
CU_GRAPH_NODE_TYPE_EVENT_RECORD = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_EVENT_RECORD', 7)
CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL', 8)
CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT', 9)
CU_GRAPH_NODE_TYPE_MEM_ALLOC = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_MEM_ALLOC', 10)
CU_GRAPH_NODE_TYPE_MEM_FREE = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_MEM_FREE', 11)
CU_GRAPH_NODE_TYPE_BATCH_MEM_OP = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_BATCH_MEM_OP', 12)
CU_GRAPH_NODE_TYPE_CONDITIONAL = enum_CUgraphNodeType_enum.define('CU_GRAPH_NODE_TYPE_CONDITIONAL', 13)
CUgraphNodeType: TypeAlias = enum_CUgraphNodeType_enum
@c.record
class CUpti_SynchronizeData(c.Struct):
SIZE = 16
context: Annotated[CUcontext, 0]
stream: Annotated[CUstream, 8]
@c.record
class CUpti_NvtxData(c.Struct):
SIZE = 24
functionName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
functionParams: Annotated[ctypes.c_void_p, 8]
functionReturnValue: Annotated[ctypes.c_void_p, 16]
@c.record
class CUpti_StreamAttrData(c.Struct):
SIZE = 24
stream: Annotated[CUstream, 0]
attr: Annotated[CUstreamAttrID, 8]
value: Annotated[c.POINTER[CUstreamAttrValue], 16]
class enum_CUlaunchAttributeID_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_LAUNCH_ATTRIBUTE_IGNORE = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_IGNORE', 0)
CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW', 1)
CU_LAUNCH_ATTRIBUTE_COOPERATIVE = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_COOPERATIVE', 2)
CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY', 3)
CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION', 4)
CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE', 5)
CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION', 6)
CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT', 7)
CU_LAUNCH_ATTRIBUTE_PRIORITY = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_PRIORITY', 8)
CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP', 9)
CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN', 10)
CU_LAUNCH_ATTRIBUTE_PREFERRED_CLUSTER_DIMENSION = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_PREFERRED_CLUSTER_DIMENSION', 11)
CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT', 12)
CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE', 13)
CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT', 14)
CU_LAUNCH_ATTRIBUTE_MAX = enum_CUlaunchAttributeID_enum.define('CU_LAUNCH_ATTRIBUTE_MAX', 15)
CUstreamAttrID: TypeAlias = enum_CUlaunchAttributeID_enum
@c.record
class union_CUlaunchAttributeValue_union(c.Struct):
SIZE = 64
pad: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[64]], 0]
accessPolicyWindow: Annotated[CUaccessPolicyWindow, 0]
cooperative: Annotated[Annotated[int, ctypes.c_int32], 0]
syncPolicy: Annotated[CUsynchronizationPolicy, 0]
clusterDim: Annotated[union_CUlaunchAttributeValue_union_clusterDim, 0]
clusterSchedulingPolicyPreference: Annotated[CUclusterSchedulingPolicy, 0]
programmaticStreamSerializationAllowed: Annotated[Annotated[int, ctypes.c_int32], 0]
programmaticEvent: Annotated[union_CUlaunchAttributeValue_union_programmaticEvent, 0]
launchCompletionEvent: Annotated[union_CUlaunchAttributeValue_union_launchCompletionEvent, 0]
priority: Annotated[Annotated[int, ctypes.c_int32], 0]
memSyncDomainMap: Annotated[CUlaunchMemSyncDomainMap, 0]
memSyncDomain: Annotated[CUlaunchMemSyncDomain, 0]
preferredClusterDim: Annotated[union_CUlaunchAttributeValue_union_preferredClusterDim, 0]
deviceUpdatableKernelNode: Annotated[union_CUlaunchAttributeValue_union_deviceUpdatableKernelNode, 0]
sharedMemCarveout: Annotated[Annotated[int, ctypes.c_uint32], 0]
CUstreamAttrValue: TypeAlias = union_CUlaunchAttributeValue_union
class enum_CUsynchronizationPolicy_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_SYNC_POLICY_AUTO = enum_CUsynchronizationPolicy_enum.define('CU_SYNC_POLICY_AUTO', 1)
CU_SYNC_POLICY_SPIN = enum_CUsynchronizationPolicy_enum.define('CU_SYNC_POLICY_SPIN', 2)
CU_SYNC_POLICY_YIELD = enum_CUsynchronizationPolicy_enum.define('CU_SYNC_POLICY_YIELD', 3)
CU_SYNC_POLICY_BLOCKING_SYNC = enum_CUsynchronizationPolicy_enum.define('CU_SYNC_POLICY_BLOCKING_SYNC', 4)
CUsynchronizationPolicy: TypeAlias = enum_CUsynchronizationPolicy_enum
@c.record
class union_CUlaunchAttributeValue_union_clusterDim(c.Struct):
SIZE = 12
x: Annotated[Annotated[int, ctypes.c_uint32], 0]
y: Annotated[Annotated[int, ctypes.c_uint32], 4]
z: Annotated[Annotated[int, ctypes.c_uint32], 8]
class enum_CUclusterSchedulingPolicy_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_CLUSTER_SCHEDULING_POLICY_DEFAULT = enum_CUclusterSchedulingPolicy_enum.define('CU_CLUSTER_SCHEDULING_POLICY_DEFAULT', 0)
CU_CLUSTER_SCHEDULING_POLICY_SPREAD = enum_CUclusterSchedulingPolicy_enum.define('CU_CLUSTER_SCHEDULING_POLICY_SPREAD', 1)
CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING = enum_CUclusterSchedulingPolicy_enum.define('CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING', 2)
CUclusterSchedulingPolicy: TypeAlias = enum_CUclusterSchedulingPolicy_enum
@c.record
class union_CUlaunchAttributeValue_union_programmaticEvent(c.Struct):
SIZE = 16
event: Annotated[CUevent, 0]
flags: Annotated[Annotated[int, ctypes.c_int32], 8]
triggerAtBlockStart: Annotated[Annotated[int, ctypes.c_int32], 12]
class struct_CUevent_st(ctypes.Structure): pass
CUevent: TypeAlias = c.POINTER[struct_CUevent_st]
@c.record
class union_CUlaunchAttributeValue_union_launchCompletionEvent(c.Struct):
SIZE = 16
event: Annotated[CUevent, 0]
flags: Annotated[Annotated[int, ctypes.c_int32], 8]
@c.record
class struct_CUlaunchMemSyncDomainMap_st(c.Struct):
SIZE = 2
default_: Annotated[Annotated[int, ctypes.c_ubyte], 0]
remote: Annotated[Annotated[int, ctypes.c_ubyte], 1]
CUlaunchMemSyncDomainMap: TypeAlias = struct_CUlaunchMemSyncDomainMap_st
class enum_CUlaunchMemSyncDomain_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT = enum_CUlaunchMemSyncDomain_enum.define('CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT', 0)
CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE = enum_CUlaunchMemSyncDomain_enum.define('CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE', 1)
CUlaunchMemSyncDomain: TypeAlias = enum_CUlaunchMemSyncDomain_enum
@c.record
class union_CUlaunchAttributeValue_union_preferredClusterDim(c.Struct):
SIZE = 12
x: Annotated[Annotated[int, ctypes.c_uint32], 0]
y: Annotated[Annotated[int, ctypes.c_uint32], 4]
z: Annotated[Annotated[int, ctypes.c_uint32], 8]
@c.record
class union_CUlaunchAttributeValue_union_deviceUpdatableKernelNode(c.Struct):
SIZE = 16
deviceUpdatable: Annotated[Annotated[int, ctypes.c_int32], 0]
devNode: Annotated[CUgraphDeviceNode, 8]
class struct_CUgraphDeviceUpdatableNode_st(ctypes.Structure): pass
CUgraphDeviceNode: TypeAlias = c.POINTER[struct_CUgraphDeviceUpdatableNode_st]
@c.record
class CUpti_StateData(c.Struct):
SIZE = 16
notification: Annotated[CUpti_StateData_notification, 0]
@c.record
class CUpti_StateData_notification(c.Struct):
SIZE = 16
result: Annotated[CUptiResult, 0]
message: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
CUpti_CallbackFunc: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p, CUpti_CallbackDomain, Annotated[int, ctypes.c_uint32], ctypes.c_void_p]]
class struct_CUpti_Subscriber_st(ctypes.Structure): pass
CUpti_SubscriberHandle: TypeAlias = c.POINTER[struct_CUpti_Subscriber_st]
CUpti_DomainTable: TypeAlias = c.POINTER[CUpti_CallbackDomain]
@dll.bind
def cuptiSupportedDomains(domainCount:c.POINTER[size_t], domainTable:c.POINTER[CUpti_DomainTable]) -> CUptiResult: ...
@dll.bind
def cuptiSubscribe(subscriber:c.POINTER[CUpti_SubscriberHandle], callback:CUpti_CallbackFunc, userdata:ctypes.c_void_p) -> CUptiResult: ...
@dll.bind
def cuptiUnsubscribe(subscriber:CUpti_SubscriberHandle) -> CUptiResult: ...
@dll.bind
def cuptiGetCallbackState(enable:c.POINTER[uint32_t], subscriber:CUpti_SubscriberHandle, domain:CUpti_CallbackDomain, cbid:CUpti_CallbackId) -> CUptiResult: ...
@dll.bind
def cuptiEnableCallback(enable:uint32_t, subscriber:CUpti_SubscriberHandle, domain:CUpti_CallbackDomain, cbid:CUpti_CallbackId) -> CUptiResult: ...
@dll.bind
def cuptiEnableDomain(enable:uint32_t, subscriber:CUpti_SubscriberHandle, domain:CUpti_CallbackDomain) -> CUptiResult: ...
@dll.bind
def cuptiEnableAllDomains(enable:uint32_t, subscriber:CUpti_SubscriberHandle) -> CUptiResult: ...
@dll.bind
def cuptiGetCallbackName(domain:CUpti_CallbackDomain, cbid:uint32_t, name:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]]) -> CUptiResult: ...
CUpti_EventGroup: TypeAlias = ctypes.c_void_p
class CUpti_DeviceAttributeDeviceClass(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_DEVICE_ATTR_DEVICE_CLASS_TESLA = CUpti_DeviceAttributeDeviceClass.define('CUPTI_DEVICE_ATTR_DEVICE_CLASS_TESLA', 0)
CUPTI_DEVICE_ATTR_DEVICE_CLASS_QUADRO = CUpti_DeviceAttributeDeviceClass.define('CUPTI_DEVICE_ATTR_DEVICE_CLASS_QUADRO', 1)
CUPTI_DEVICE_ATTR_DEVICE_CLASS_GEFORCE = CUpti_DeviceAttributeDeviceClass.define('CUPTI_DEVICE_ATTR_DEVICE_CLASS_GEFORCE', 2)
CUPTI_DEVICE_ATTR_DEVICE_CLASS_TEGRA = CUpti_DeviceAttributeDeviceClass.define('CUPTI_DEVICE_ATTR_DEVICE_CLASS_TEGRA', 3)
class CUpti_EventDomainAttribute(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_EVENT_DOMAIN_ATTR_NAME = CUpti_EventDomainAttribute.define('CUPTI_EVENT_DOMAIN_ATTR_NAME', 0)
CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT = CUpti_EventDomainAttribute.define('CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT', 1)
CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT = CUpti_EventDomainAttribute.define('CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT', 3)
CUPTI_EVENT_DOMAIN_ATTR_COLLECTION_METHOD = CUpti_EventDomainAttribute.define('CUPTI_EVENT_DOMAIN_ATTR_COLLECTION_METHOD', 4)
CUPTI_EVENT_DOMAIN_ATTR_FORCE_INT = CUpti_EventDomainAttribute.define('CUPTI_EVENT_DOMAIN_ATTR_FORCE_INT', 2147483647)
class CUpti_EventCollectionMethod(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_EVENT_COLLECTION_METHOD_PM = CUpti_EventCollectionMethod.define('CUPTI_EVENT_COLLECTION_METHOD_PM', 0)
CUPTI_EVENT_COLLECTION_METHOD_SM = CUpti_EventCollectionMethod.define('CUPTI_EVENT_COLLECTION_METHOD_SM', 1)
CUPTI_EVENT_COLLECTION_METHOD_INSTRUMENTED = CUpti_EventCollectionMethod.define('CUPTI_EVENT_COLLECTION_METHOD_INSTRUMENTED', 2)
CUPTI_EVENT_COLLECTION_METHOD_NVLINK_TC = CUpti_EventCollectionMethod.define('CUPTI_EVENT_COLLECTION_METHOD_NVLINK_TC', 3)
CUPTI_EVENT_COLLECTION_METHOD_FORCE_INT = CUpti_EventCollectionMethod.define('CUPTI_EVENT_COLLECTION_METHOD_FORCE_INT', 2147483647)
class CUpti_EventGroupAttribute(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID = CUpti_EventGroupAttribute.define('CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID', 0)
CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES = CUpti_EventGroupAttribute.define('CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES', 1)
CUPTI_EVENT_GROUP_ATTR_USER_DATA = CUpti_EventGroupAttribute.define('CUPTI_EVENT_GROUP_ATTR_USER_DATA', 2)
CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS = CUpti_EventGroupAttribute.define('CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS', 3)
CUPTI_EVENT_GROUP_ATTR_EVENTS = CUpti_EventGroupAttribute.define('CUPTI_EVENT_GROUP_ATTR_EVENTS', 4)
CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT = CUpti_EventGroupAttribute.define('CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT', 5)
CUPTI_EVENT_GROUP_ATTR_PROFILING_SCOPE = CUpti_EventGroupAttribute.define('CUPTI_EVENT_GROUP_ATTR_PROFILING_SCOPE', 6)
CUPTI_EVENT_GROUP_ATTR_FORCE_INT = CUpti_EventGroupAttribute.define('CUPTI_EVENT_GROUP_ATTR_FORCE_INT', 2147483647)
class CUpti_EventProfilingScope(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_EVENT_PROFILING_SCOPE_CONTEXT = CUpti_EventProfilingScope.define('CUPTI_EVENT_PROFILING_SCOPE_CONTEXT', 0)
CUPTI_EVENT_PROFILING_SCOPE_DEVICE = CUpti_EventProfilingScope.define('CUPTI_EVENT_PROFILING_SCOPE_DEVICE', 1)
CUPTI_EVENT_PROFILING_SCOPE_BOTH = CUpti_EventProfilingScope.define('CUPTI_EVENT_PROFILING_SCOPE_BOTH', 2)
CUPTI_EVENT_PROFILING_SCOPE_FORCE_INT = CUpti_EventProfilingScope.define('CUPTI_EVENT_PROFILING_SCOPE_FORCE_INT', 2147483647)
class CUpti_EventAttribute(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_EVENT_ATTR_NAME = CUpti_EventAttribute.define('CUPTI_EVENT_ATTR_NAME', 0)
CUPTI_EVENT_ATTR_SHORT_DESCRIPTION = CUpti_EventAttribute.define('CUPTI_EVENT_ATTR_SHORT_DESCRIPTION', 1)
CUPTI_EVENT_ATTR_LONG_DESCRIPTION = CUpti_EventAttribute.define('CUPTI_EVENT_ATTR_LONG_DESCRIPTION', 2)
CUPTI_EVENT_ATTR_CATEGORY = CUpti_EventAttribute.define('CUPTI_EVENT_ATTR_CATEGORY', 3)
CUPTI_EVENT_ATTR_PROFILING_SCOPE = CUpti_EventAttribute.define('CUPTI_EVENT_ATTR_PROFILING_SCOPE', 5)
CUPTI_EVENT_ATTR_FORCE_INT = CUpti_EventAttribute.define('CUPTI_EVENT_ATTR_FORCE_INT', 2147483647)
class CUpti_EventCollectionMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS = CUpti_EventCollectionMode.define('CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS', 0)
CUPTI_EVENT_COLLECTION_MODE_KERNEL = CUpti_EventCollectionMode.define('CUPTI_EVENT_COLLECTION_MODE_KERNEL', 1)
CUPTI_EVENT_COLLECTION_MODE_FORCE_INT = CUpti_EventCollectionMode.define('CUPTI_EVENT_COLLECTION_MODE_FORCE_INT', 2147483647)
class CUpti_EventCategory(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_EVENT_CATEGORY_INSTRUCTION = CUpti_EventCategory.define('CUPTI_EVENT_CATEGORY_INSTRUCTION', 0)
CUPTI_EVENT_CATEGORY_MEMORY = CUpti_EventCategory.define('CUPTI_EVENT_CATEGORY_MEMORY', 1)
CUPTI_EVENT_CATEGORY_CACHE = CUpti_EventCategory.define('CUPTI_EVENT_CATEGORY_CACHE', 2)
CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER = CUpti_EventCategory.define('CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER', 3)
CUPTI_EVENT_CATEGORY_SYSTEM = CUpti_EventCategory.define('CUPTI_EVENT_CATEGORY_SYSTEM', 4)
CUPTI_EVENT_CATEGORY_FORCE_INT = CUpti_EventCategory.define('CUPTI_EVENT_CATEGORY_FORCE_INT', 2147483647)
class CUpti_ReadEventFlags(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_EVENT_READ_FLAG_NONE = CUpti_ReadEventFlags.define('CUPTI_EVENT_READ_FLAG_NONE', 0)
CUPTI_EVENT_READ_FLAG_FORCE_INT = CUpti_ReadEventFlags.define('CUPTI_EVENT_READ_FLAG_FORCE_INT', 2147483647)
@c.record
class CUpti_EventGroupSet(c.Struct):
SIZE = 16
numEventGroups: Annotated[uint32_t, 0]
eventGroups: Annotated[c.POINTER[CUpti_EventGroup], 8]
@c.record
class CUpti_EventGroupSets(c.Struct):
SIZE = 16
numSets: Annotated[uint32_t, 0]
sets: Annotated[c.POINTER[CUpti_EventGroupSet], 8]
@dll.bind
def cuptiSetEventCollectionMode(context:CUcontext, mode:CUpti_EventCollectionMode) -> CUptiResult: ...
@dll.bind
def cuptiDeviceGetAttribute(device:CUdevice, attrib:CUpti_DeviceAttribute, valueSize:c.POINTER[size_t], value:ctypes.c_void_p) -> CUptiResult: ...
@dll.bind
def cuptiDeviceGetNumEventDomains(device:CUdevice, numDomains:c.POINTER[uint32_t]) -> CUptiResult: ...
@dll.bind
def cuptiDeviceEnumEventDomains(device:CUdevice, arraySizeBytes:c.POINTER[size_t], domainArray:c.POINTER[CUpti_EventDomainID]) -> CUptiResult: ...
@dll.bind
def cuptiDeviceGetEventDomainAttribute(device:CUdevice, eventDomain:CUpti_EventDomainID, attrib:CUpti_EventDomainAttribute, valueSize:c.POINTER[size_t], value:ctypes.c_void_p) -> CUptiResult: ...
@dll.bind
def cuptiGetNumEventDomains(numDomains:c.POINTER[uint32_t]) -> CUptiResult: ...
@dll.bind
def cuptiEnumEventDomains(arraySizeBytes:c.POINTER[size_t], domainArray:c.POINTER[CUpti_EventDomainID]) -> CUptiResult: ...
@dll.bind
def cuptiEventDomainGetAttribute(eventDomain:CUpti_EventDomainID, attrib:CUpti_EventDomainAttribute, valueSize:c.POINTER[size_t], value:ctypes.c_void_p) -> CUptiResult: ...
@dll.bind
def cuptiEventDomainGetNumEvents(eventDomain:CUpti_EventDomainID, numEvents:c.POINTER[uint32_t]) -> CUptiResult: ...
@dll.bind
def cuptiEventDomainEnumEvents(eventDomain:CUpti_EventDomainID, arraySizeBytes:c.POINTER[size_t], eventArray:c.POINTER[CUpti_EventID]) -> CUptiResult: ...
@dll.bind
def cuptiEventGetAttribute(event:CUpti_EventID, attrib:CUpti_EventAttribute, valueSize:c.POINTER[size_t], value:ctypes.c_void_p) -> CUptiResult: ...
@dll.bind
def cuptiEventGetIdFromName(device:CUdevice, eventName:c.POINTER[Annotated[bytes, ctypes.c_char]], event:c.POINTER[CUpti_EventID]) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupCreate(context:CUcontext, eventGroup:c.POINTER[CUpti_EventGroup], flags:uint32_t) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupDestroy(eventGroup:CUpti_EventGroup) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupGetAttribute(eventGroup:CUpti_EventGroup, attrib:CUpti_EventGroupAttribute, valueSize:c.POINTER[size_t], value:ctypes.c_void_p) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupSetAttribute(eventGroup:CUpti_EventGroup, attrib:CUpti_EventGroupAttribute, valueSize:size_t, value:ctypes.c_void_p) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupAddEvent(eventGroup:CUpti_EventGroup, event:CUpti_EventID) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupRemoveEvent(eventGroup:CUpti_EventGroup, event:CUpti_EventID) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupRemoveAllEvents(eventGroup:CUpti_EventGroup) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupResetAllEvents(eventGroup:CUpti_EventGroup) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupEnable(eventGroup:CUpti_EventGroup) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupDisable(eventGroup:CUpti_EventGroup) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupReadEvent(eventGroup:CUpti_EventGroup, flags:CUpti_ReadEventFlags, event:CUpti_EventID, eventValueBufferSizeBytes:c.POINTER[size_t], eventValueBuffer:c.POINTER[uint64_t]) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupReadAllEvents(eventGroup:CUpti_EventGroup, flags:CUpti_ReadEventFlags, eventValueBufferSizeBytes:c.POINTER[size_t], eventValueBuffer:c.POINTER[uint64_t], eventIdArraySizeBytes:c.POINTER[size_t], eventIdArray:c.POINTER[CUpti_EventID], numEventIdsRead:c.POINTER[size_t]) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupSetsCreate(context:CUcontext, eventIdArraySizeBytes:size_t, eventIdArray:c.POINTER[CUpti_EventID], eventGroupPasses:c.POINTER[c.POINTER[CUpti_EventGroupSets]]) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupSetsDestroy(eventGroupSets:c.POINTER[CUpti_EventGroupSets]) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupSetEnable(eventGroupSet:c.POINTER[CUpti_EventGroupSet]) -> CUptiResult: ...
@dll.bind
def cuptiEventGroupSetDisable(eventGroupSet:c.POINTER[CUpti_EventGroupSet]) -> CUptiResult: ...
@dll.bind
def cuptiEnableKernelReplayMode(context:CUcontext) -> CUptiResult: ...
@dll.bind
def cuptiDisableKernelReplayMode(context:CUcontext) -> CUptiResult: ...
CUpti_KernelReplayUpdateFunc: TypeAlias = c.CFUNCTYPE[None, [c.POINTER[Annotated[bytes, ctypes.c_char]], Annotated[int, ctypes.c_int32], ctypes.c_void_p]]
@dll.bind
def cuptiKernelReplaySubscribeUpdate(updateFunc:CUpti_KernelReplayUpdateFunc, customData:ctypes.c_void_p) -> CUptiResult: ...
class CUpti_MetricCategory(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_METRIC_CATEGORY_MEMORY = CUpti_MetricCategory.define('CUPTI_METRIC_CATEGORY_MEMORY', 0)
CUPTI_METRIC_CATEGORY_INSTRUCTION = CUpti_MetricCategory.define('CUPTI_METRIC_CATEGORY_INSTRUCTION', 1)
CUPTI_METRIC_CATEGORY_MULTIPROCESSOR = CUpti_MetricCategory.define('CUPTI_METRIC_CATEGORY_MULTIPROCESSOR', 2)
CUPTI_METRIC_CATEGORY_CACHE = CUpti_MetricCategory.define('CUPTI_METRIC_CATEGORY_CACHE', 3)
CUPTI_METRIC_CATEGORY_TEXTURE = CUpti_MetricCategory.define('CUPTI_METRIC_CATEGORY_TEXTURE', 4)
CUPTI_METRIC_CATEGORY_NVLINK = CUpti_MetricCategory.define('CUPTI_METRIC_CATEGORY_NVLINK', 5)
CUPTI_METRIC_CATEGORY_PCIE = CUpti_MetricCategory.define('CUPTI_METRIC_CATEGORY_PCIE', 6)
CUPTI_METRIC_CATEGORY_FORCE_INT = CUpti_MetricCategory.define('CUPTI_METRIC_CATEGORY_FORCE_INT', 2147483647)
class CUpti_MetricEvaluationMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE = CUpti_MetricEvaluationMode.define('CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE', 1)
CUPTI_METRIC_EVALUATION_MODE_AGGREGATE = CUpti_MetricEvaluationMode.define('CUPTI_METRIC_EVALUATION_MODE_AGGREGATE', 2)
CUPTI_METRIC_EVALUATION_MODE_FORCE_INT = CUpti_MetricEvaluationMode.define('CUPTI_METRIC_EVALUATION_MODE_FORCE_INT', 2147483647)
class CUpti_MetricAttribute(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_METRIC_ATTR_NAME = CUpti_MetricAttribute.define('CUPTI_METRIC_ATTR_NAME', 0)
CUPTI_METRIC_ATTR_SHORT_DESCRIPTION = CUpti_MetricAttribute.define('CUPTI_METRIC_ATTR_SHORT_DESCRIPTION', 1)
CUPTI_METRIC_ATTR_LONG_DESCRIPTION = CUpti_MetricAttribute.define('CUPTI_METRIC_ATTR_LONG_DESCRIPTION', 2)
CUPTI_METRIC_ATTR_CATEGORY = CUpti_MetricAttribute.define('CUPTI_METRIC_ATTR_CATEGORY', 3)
CUPTI_METRIC_ATTR_VALUE_KIND = CUpti_MetricAttribute.define('CUPTI_METRIC_ATTR_VALUE_KIND', 4)
CUPTI_METRIC_ATTR_EVALUATION_MODE = CUpti_MetricAttribute.define('CUPTI_METRIC_ATTR_EVALUATION_MODE', 5)
CUPTI_METRIC_ATTR_FORCE_INT = CUpti_MetricAttribute.define('CUPTI_METRIC_ATTR_FORCE_INT', 2147483647)
class CUpti_MetricPropertyDeviceClass(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TESLA = CUpti_MetricPropertyDeviceClass.define('CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TESLA', 0)
CUPTI_METRIC_PROPERTY_DEVICE_CLASS_QUADRO = CUpti_MetricPropertyDeviceClass.define('CUPTI_METRIC_PROPERTY_DEVICE_CLASS_QUADRO', 1)
CUPTI_METRIC_PROPERTY_DEVICE_CLASS_GEFORCE = CUpti_MetricPropertyDeviceClass.define('CUPTI_METRIC_PROPERTY_DEVICE_CLASS_GEFORCE', 2)
CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TEGRA = CUpti_MetricPropertyDeviceClass.define('CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TEGRA', 3)
class CUpti_MetricPropertyID(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_METRIC_PROPERTY_MULTIPROCESSOR_COUNT = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_MULTIPROCESSOR_COUNT', 0)
CUPTI_METRIC_PROPERTY_WARPS_PER_MULTIPROCESSOR = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_WARPS_PER_MULTIPROCESSOR', 1)
CUPTI_METRIC_PROPERTY_KERNEL_GPU_TIME = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_KERNEL_GPU_TIME', 2)
CUPTI_METRIC_PROPERTY_CLOCK_RATE = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_CLOCK_RATE', 3)
CUPTI_METRIC_PROPERTY_FRAME_BUFFER_COUNT = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_FRAME_BUFFER_COUNT', 4)
CUPTI_METRIC_PROPERTY_GLOBAL_MEMORY_BANDWIDTH = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_GLOBAL_MEMORY_BANDWIDTH', 5)
CUPTI_METRIC_PROPERTY_PCIE_LINK_RATE = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_PCIE_LINK_RATE', 6)
CUPTI_METRIC_PROPERTY_PCIE_LINK_WIDTH = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_PCIE_LINK_WIDTH', 7)
CUPTI_METRIC_PROPERTY_PCIE_GEN = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_PCIE_GEN', 8)
CUPTI_METRIC_PROPERTY_DEVICE_CLASS = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_DEVICE_CLASS', 9)
CUPTI_METRIC_PROPERTY_FLOP_SP_PER_CYCLE = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_FLOP_SP_PER_CYCLE', 10)
CUPTI_METRIC_PROPERTY_FLOP_DP_PER_CYCLE = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_FLOP_DP_PER_CYCLE', 11)
CUPTI_METRIC_PROPERTY_L2_UNITS = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_L2_UNITS', 12)
CUPTI_METRIC_PROPERTY_ECC_ENABLED = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_ECC_ENABLED', 13)
CUPTI_METRIC_PROPERTY_FLOP_HP_PER_CYCLE = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_FLOP_HP_PER_CYCLE', 14)
CUPTI_METRIC_PROPERTY_GPU_CPU_NVLINK_BANDWIDTH = CUpti_MetricPropertyID.define('CUPTI_METRIC_PROPERTY_GPU_CPU_NVLINK_BANDWIDTH', 15)
@dll.bind
def cuptiGetNumMetrics(numMetrics:c.POINTER[uint32_t]) -> CUptiResult: ...
@dll.bind
def cuptiEnumMetrics(arraySizeBytes:c.POINTER[size_t], metricArray:c.POINTER[CUpti_MetricID]) -> CUptiResult: ...
@dll.bind
def cuptiDeviceGetNumMetrics(device:CUdevice, numMetrics:c.POINTER[uint32_t]) -> CUptiResult: ...
@dll.bind
def cuptiDeviceEnumMetrics(device:CUdevice, arraySizeBytes:c.POINTER[size_t], metricArray:c.POINTER[CUpti_MetricID]) -> CUptiResult: ...
@dll.bind
def cuptiMetricGetAttribute(metric:CUpti_MetricID, attrib:CUpti_MetricAttribute, valueSize:c.POINTER[size_t], value:ctypes.c_void_p) -> CUptiResult: ...
@dll.bind
def cuptiMetricGetIdFromName(device:CUdevice, metricName:c.POINTER[Annotated[bytes, ctypes.c_char]], metric:c.POINTER[CUpti_MetricID]) -> CUptiResult: ...
@dll.bind
def cuptiMetricGetNumEvents(metric:CUpti_MetricID, numEvents:c.POINTER[uint32_t]) -> CUptiResult: ...
@dll.bind
def cuptiMetricEnumEvents(metric:CUpti_MetricID, eventIdArraySizeBytes:c.POINTER[size_t], eventIdArray:c.POINTER[Annotated[int, ctypes.c_int32]]) -> CUptiResult: ...
@dll.bind
def cuptiMetricGetNumProperties(metric:CUpti_MetricID, numProp:c.POINTER[uint32_t]) -> CUptiResult: ...
@dll.bind
def cuptiMetricEnumProperties(metric:CUpti_MetricID, propIdArraySizeBytes:c.POINTER[size_t], propIdArray:c.POINTER[CUpti_MetricPropertyID]) -> CUptiResult: ...
@dll.bind
def cuptiMetricGetRequiredEventGroupSets(context:CUcontext, metric:CUpti_MetricID, eventGroupSets:c.POINTER[c.POINTER[Annotated[int, ctypes.c_int32]]]) -> CUptiResult: ...
@dll.bind
def cuptiMetricCreateEventGroupSets(context:CUcontext, metricIdArraySizeBytes:size_t, metricIdArray:c.POINTER[CUpti_MetricID], eventGroupPasses:c.POINTER[c.POINTER[Annotated[int, ctypes.c_int32]]]) -> CUptiResult: ...
@dll.bind
def cuptiMetricGetValue(device:CUdevice, metric:CUpti_MetricID, eventIdArraySizeBytes:size_t, eventIdArray:c.POINTER[Annotated[int, ctypes.c_int32]], eventValueArraySizeBytes:size_t, eventValueArray:c.POINTER[uint64_t], timeDuration:uint64_t, metricValue:c.POINTER[CUpti_MetricValue]) -> CUptiResult: ...
@dll.bind
def cuptiMetricGetValue2(metric:CUpti_MetricID, eventIdArraySizeBytes:size_t, eventIdArray:c.POINTER[Annotated[int, ctypes.c_int32]], eventValueArraySizeBytes:size_t, eventValueArray:c.POINTER[uint64_t], propIdArraySizeBytes:size_t, propIdArray:c.POINTER[CUpti_MetricPropertyID], propValueArraySizeBytes:size_t, propValueArray:c.POINTER[uint64_t], metricValue:c.POINTER[CUpti_MetricValue]) -> CUptiResult: ...
class enum_CUpti_driver_api_trace_cbid_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_DRIVER_TRACE_CBID_INVALID = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_INVALID', 0)
CUPTI_DRIVER_TRACE_CBID_cuInit = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuInit', 1)
CUPTI_DRIVER_TRACE_CBID_cuDriverGetVersion = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDriverGetVersion', 2)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGet = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGet', 3)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetCount = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetCount', 4)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetName = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetName', 5)
CUPTI_DRIVER_TRACE_CBID_cuDeviceComputeCapability = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceComputeCapability', 6)
CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem', 7)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetProperties = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetProperties', 8)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetAttribute', 9)
CUPTI_DRIVER_TRACE_CBID_cuCtxCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxCreate', 10)
CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy', 11)
CUPTI_DRIVER_TRACE_CBID_cuCtxAttach = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxAttach', 12)
CUPTI_DRIVER_TRACE_CBID_cuCtxDetach = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxDetach', 13)
CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent', 14)
CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent', 15)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevice', 16)
CUPTI_DRIVER_TRACE_CBID_cuCtxSynchronize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxSynchronize', 17)
CUPTI_DRIVER_TRACE_CBID_cuModuleLoad = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleLoad', 18)
CUPTI_DRIVER_TRACE_CBID_cuModuleLoadData = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleLoadData', 19)
CUPTI_DRIVER_TRACE_CBID_cuModuleLoadDataEx = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleLoadDataEx', 20)
CUPTI_DRIVER_TRACE_CBID_cuModuleLoadFatBinary = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleLoadFatBinary', 21)
CUPTI_DRIVER_TRACE_CBID_cuModuleUnload = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleUnload', 22)
CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunction = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunction', 23)
CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal', 24)
CUPTI_DRIVER_TRACE_CBID_cu64ModuleGetGlobal = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64ModuleGetGlobal', 25)
CUPTI_DRIVER_TRACE_CBID_cuModuleGetTexRef = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleGetTexRef', 26)
CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo', 27)
CUPTI_DRIVER_TRACE_CBID_cu64MemGetInfo = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemGetInfo', 28)
CUPTI_DRIVER_TRACE_CBID_cuMemAlloc = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAlloc', 29)
CUPTI_DRIVER_TRACE_CBID_cu64MemAlloc = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemAlloc', 30)
CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch', 31)
CUPTI_DRIVER_TRACE_CBID_cu64MemAllocPitch = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemAllocPitch', 32)
CUPTI_DRIVER_TRACE_CBID_cuMemFree = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemFree', 33)
CUPTI_DRIVER_TRACE_CBID_cu64MemFree = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemFree', 34)
CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange', 35)
CUPTI_DRIVER_TRACE_CBID_cu64MemGetAddressRange = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemGetAddressRange', 36)
CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost', 37)
CUPTI_DRIVER_TRACE_CBID_cuMemFreeHost = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemFreeHost', 38)
CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc', 39)
CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer', 40)
CUPTI_DRIVER_TRACE_CBID_cu64MemHostGetDevicePointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemHostGetDevicePointer', 41)
CUPTI_DRIVER_TRACE_CBID_cuMemHostGetFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemHostGetFlags', 42)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD', 43)
CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoD = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoD', 44)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH', 45)
CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoH = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoH', 46)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD', 47)
CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoD = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoD', 48)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA', 49)
CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoA = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoA', 50)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD', 51)
CUPTI_DRIVER_TRACE_CBID_cu64MemcpyAtoD = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemcpyAtoD', 52)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA', 53)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH', 54)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA', 55)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D', 56)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned', 57)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D', 58)
CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3D = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3D', 59)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync', 60)
CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoDAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoDAsync', 61)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync', 62)
CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoHAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoHAsync', 63)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync', 64)
CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoDAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoDAsync', 65)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync', 66)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync', 67)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync', 68)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync', 69)
CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3DAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3DAsync', 70)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD8 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD8', 71)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8', 72)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD16 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD16', 73)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16', 74)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD32 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD32', 75)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32', 76)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8', 77)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8', 78)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16', 79)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16', 80)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32', 81)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32', 82)
CUPTI_DRIVER_TRACE_CBID_cuFuncSetBlockShape = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncSetBlockShape', 83)
CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedSize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedSize', 84)
CUPTI_DRIVER_TRACE_CBID_cuFuncGetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncGetAttribute', 85)
CUPTI_DRIVER_TRACE_CBID_cuFuncSetCacheConfig = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncSetCacheConfig', 86)
CUPTI_DRIVER_TRACE_CBID_cuArrayCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArrayCreate', 87)
CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor', 88)
CUPTI_DRIVER_TRACE_CBID_cuArrayDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArrayDestroy', 89)
CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate', 90)
CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor', 91)
CUPTI_DRIVER_TRACE_CBID_cuTexRefCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefCreate', 92)
CUPTI_DRIVER_TRACE_CBID_cuTexRefDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefDestroy', 93)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetArray', 94)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress', 95)
CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress', 96)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D', 97)
CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress2D = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress2D', 98)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFormat = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFormat', 99)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddressMode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddressMode', 100)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFilterMode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFilterMode', 101)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFlags', 102)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress', 103)
CUPTI_DRIVER_TRACE_CBID_cu64TexRefGetAddress = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64TexRefGetAddress', 104)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetArray', 105)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddressMode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddressMode', 106)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFilterMode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFilterMode', 107)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFormat = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFormat', 108)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFlags', 109)
CUPTI_DRIVER_TRACE_CBID_cuParamSetSize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuParamSetSize', 110)
CUPTI_DRIVER_TRACE_CBID_cuParamSeti = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuParamSeti', 111)
CUPTI_DRIVER_TRACE_CBID_cuParamSetf = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuParamSetf', 112)
CUPTI_DRIVER_TRACE_CBID_cuParamSetv = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuParamSetv', 113)
CUPTI_DRIVER_TRACE_CBID_cuParamSetTexRef = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuParamSetTexRef', 114)
CUPTI_DRIVER_TRACE_CBID_cuLaunch = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunch', 115)
CUPTI_DRIVER_TRACE_CBID_cuLaunchGrid = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchGrid', 116)
CUPTI_DRIVER_TRACE_CBID_cuLaunchGridAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchGridAsync', 117)
CUPTI_DRIVER_TRACE_CBID_cuEventCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventCreate', 118)
CUPTI_DRIVER_TRACE_CBID_cuEventRecord = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventRecord', 119)
CUPTI_DRIVER_TRACE_CBID_cuEventQuery = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventQuery', 120)
CUPTI_DRIVER_TRACE_CBID_cuEventSynchronize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventSynchronize', 121)
CUPTI_DRIVER_TRACE_CBID_cuEventDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventDestroy', 122)
CUPTI_DRIVER_TRACE_CBID_cuEventElapsedTime = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventElapsedTime', 123)
CUPTI_DRIVER_TRACE_CBID_cuStreamCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamCreate', 124)
CUPTI_DRIVER_TRACE_CBID_cuStreamQuery = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamQuery', 125)
CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize', 126)
CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy', 127)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnregisterResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnregisterResource', 128)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsSubResourceGetMappedArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsSubResourceGetMappedArray', 129)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer', 130)
CUPTI_DRIVER_TRACE_CBID_cu64GraphicsResourceGetMappedPointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64GraphicsResourceGetMappedPointer', 131)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags', 132)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources', 133)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources', 134)
CUPTI_DRIVER_TRACE_CBID_cuGetExportTable = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGetExportTable', 135)
CUPTI_DRIVER_TRACE_CBID_cuCtxSetLimit = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxSetLimit', 136)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetLimit = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetLimit', 137)
CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevice', 138)
CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate', 139)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D10RegisterResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D10RegisterResource', 140)
CUPTI_DRIVER_TRACE_CBID_cuD3D10RegisterResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10RegisterResource', 141)
CUPTI_DRIVER_TRACE_CBID_cuD3D10UnregisterResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10UnregisterResource', 142)
CUPTI_DRIVER_TRACE_CBID_cuD3D10MapResources = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10MapResources', 143)
CUPTI_DRIVER_TRACE_CBID_cuD3D10UnmapResources = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10UnmapResources', 144)
CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceSetMapFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceSetMapFlags', 145)
CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedArray', 146)
CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer', 147)
CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize', 148)
CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch', 149)
CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions', 150)
CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevice', 151)
CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate', 152)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D11RegisterResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D11RegisterResource', 153)
CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevice', 154)
CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate', 155)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D9RegisterResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D9RegisterResource', 156)
CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDirect3DDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDirect3DDevice', 157)
CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterResource', 158)
CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterResource', 159)
CUPTI_DRIVER_TRACE_CBID_cuD3D9MapResources = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9MapResources', 160)
CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapResources = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapResources', 161)
CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceSetMapFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceSetMapFlags', 162)
CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions', 163)
CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedArray', 164)
CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer', 165)
CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize', 166)
CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch', 167)
CUPTI_DRIVER_TRACE_CBID_cuD3D9Begin = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9Begin', 168)
CUPTI_DRIVER_TRACE_CBID_cuD3D9End = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9End', 169)
CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterVertexBuffer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterVertexBuffer', 170)
CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer', 171)
CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapVertexBuffer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapVertexBuffer', 172)
CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterVertexBuffer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterVertexBuffer', 173)
CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate', 174)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterBuffer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterBuffer', 175)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterImage = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterImage', 176)
CUPTI_DRIVER_TRACE_CBID_cuWGLGetDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuWGLGetDevice', 177)
CUPTI_DRIVER_TRACE_CBID_cuGLInit = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLInit', 178)
CUPTI_DRIVER_TRACE_CBID_cuGLRegisterBufferObject = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLRegisterBufferObject', 179)
CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject', 180)
CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObject = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObject', 181)
CUPTI_DRIVER_TRACE_CBID_cuGLUnregisterBufferObject = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLUnregisterBufferObject', 182)
CUPTI_DRIVER_TRACE_CBID_cuGLSetBufferObjectMapFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLSetBufferObjectMapFlags', 183)
CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync', 184)
CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObjectAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObjectAsync', 185)
CUPTI_DRIVER_TRACE_CBID_cuVDPAUGetDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuVDPAUGetDevice', 186)
CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate', 187)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterVideoSurface = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterVideoSurface', 188)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterOutputSurface = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterOutputSurface', 189)
CUPTI_DRIVER_TRACE_CBID_cuModuleGetSurfRef = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleGetSurfRef', 190)
CUPTI_DRIVER_TRACE_CBID_cuSurfRefCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSurfRefCreate', 191)
CUPTI_DRIVER_TRACE_CBID_cuSurfRefDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSurfRefDestroy', 192)
CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetFormat = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetFormat', 193)
CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetArray', 194)
CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetFormat = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetFormat', 195)
CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetArray', 196)
CUPTI_DRIVER_TRACE_CBID_cu64DeviceTotalMem = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64DeviceTotalMem', 197)
CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPointer', 198)
CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedSize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedSize', 199)
CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPitch = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPitch', 200)
CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetSurfaceDimensions = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetSurfaceDimensions', 201)
CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetSurfaceDimensions = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetSurfaceDimensions', 202)
CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPointer', 203)
CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedSize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedSize', 204)
CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPitch = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPitch', 205)
CUPTI_DRIVER_TRACE_CBID_cu64D3D9MapVertexBuffer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64D3D9MapVertexBuffer', 206)
CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject', 207)
CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObjectAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObjectAsync', 208)
CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevices = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevices', 209)
CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreateOnDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreateOnDevice', 210)
CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevices = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevices', 211)
CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreateOnDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreateOnDevice', 212)
CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevices = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevices', 213)
CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreateOnDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreateOnDevice', 214)
CUPTI_DRIVER_TRACE_CBID_cu64MemHostAlloc = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemHostAlloc', 215)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async', 216)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8Async', 217)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async', 218)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16Async', 219)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async', 220)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32Async', 221)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async', 222)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8Async', 223)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async', 224)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16Async', 225)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async', 226)
CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32Async = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32Async', 227)
CUPTI_DRIVER_TRACE_CBID_cu64ArrayCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64ArrayCreate', 228)
CUPTI_DRIVER_TRACE_CBID_cu64ArrayGetDescriptor = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64ArrayGetDescriptor', 229)
CUPTI_DRIVER_TRACE_CBID_cu64Array3DCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64Array3DCreate', 230)
CUPTI_DRIVER_TRACE_CBID_cu64Array3DGetDescriptor = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64Array3DGetDescriptor', 231)
CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2D = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2D', 232)
CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DUnaligned = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DUnaligned', 233)
CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DAsync', 234)
CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v2', 235)
CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate_v2', 236)
CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate_v2', 237)
CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate_v2', 238)
CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate_v2', 239)
CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate_v2', 240)
CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal_v2', 241)
CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo_v2', 242)
CUPTI_DRIVER_TRACE_CBID_cuMemAlloc_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAlloc_v2', 243)
CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch_v2', 244)
CUPTI_DRIVER_TRACE_CBID_cuMemFree_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemFree_v2', 245)
CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange_v2', 246)
CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer_v2', 247)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy_v2', 248)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2', 249)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2', 250)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2', 251)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2', 252)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2', 253)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2', 254)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress_v2', 255)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v2', 256)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress_v2', 257)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer_v2', 258)
CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem_v2', 259)
CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer_v2', 260)
CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize_v2', 261)
CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch_v2', 262)
CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions_v2', 263)
CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions_v2', 264)
CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer_v2', 265)
CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize_v2', 266)
CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch_v2', 267)
CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer_v2', 268)
CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2', 269)
CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2', 270)
CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc_v2', 271)
CUPTI_DRIVER_TRACE_CBID_cuArrayCreate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArrayCreate_v2', 272)
CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor_v2', 273)
CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate_v2', 274)
CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor_v2', 275)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2', 276)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2', 277)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2', 278)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2', 279)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2', 280)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2', 281)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2', 282)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2', 283)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2', 284)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2', 285)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2', 286)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2', 287)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2', 288)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2', 289)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2', 290)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2', 291)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2', 292)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2', 293)
CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost_v2', 294)
CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent', 295)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetApiVersion = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetApiVersion', 296)
CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDirect3DDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDirect3DDevice', 297)
CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDirect3DDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDirect3DDevice', 298)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetCacheConfig = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetCacheConfig', 299)
CUPTI_DRIVER_TRACE_CBID_cuCtxSetCacheConfig = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxSetCacheConfig', 300)
CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister', 301)
CUPTI_DRIVER_TRACE_CBID_cuMemHostUnregister = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemHostUnregister', 302)
CUPTI_DRIVER_TRACE_CBID_cuCtxSetCurrent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxSetCurrent', 303)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetCurrent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetCurrent', 304)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy', 305)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync', 306)
CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel', 307)
CUPTI_DRIVER_TRACE_CBID_cuProfilerStart = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuProfilerStart', 308)
CUPTI_DRIVER_TRACE_CBID_cuProfilerStop = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuProfilerStop', 309)
CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttribute', 310)
CUPTI_DRIVER_TRACE_CBID_cuProfilerInitialize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuProfilerInitialize', 311)
CUPTI_DRIVER_TRACE_CBID_cuDeviceCanAccessPeer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceCanAccessPeer', 312)
CUPTI_DRIVER_TRACE_CBID_cuCtxEnablePeerAccess = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxEnablePeerAccess', 313)
CUPTI_DRIVER_TRACE_CBID_cuCtxDisablePeerAccess = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxDisablePeerAccess', 314)
CUPTI_DRIVER_TRACE_CBID_cuMemPeerRegister = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPeerRegister', 315)
CUPTI_DRIVER_TRACE_CBID_cuMemPeerUnregister = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPeerUnregister', 316)
CUPTI_DRIVER_TRACE_CBID_cuMemPeerGetDevicePointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPeerGetDevicePointer', 317)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer', 318)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync', 319)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer', 320)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync', 321)
CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy_v2', 322)
CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent_v2', 323)
CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent_v2', 324)
CUPTI_DRIVER_TRACE_CBID_cuEventDestroy_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventDestroy_v2', 325)
CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy_v2', 326)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v3 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v3', 327)
CUPTI_DRIVER_TRACE_CBID_cuIpcGetMemHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuIpcGetMemHandle', 328)
CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle', 329)
CUPTI_DRIVER_TRACE_CBID_cuIpcCloseMemHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuIpcCloseMemHandle', 330)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetByPCIBusId = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetByPCIBusId', 331)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetPCIBusId = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetPCIBusId', 332)
CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices', 333)
CUPTI_DRIVER_TRACE_CBID_cuIpcGetEventHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuIpcGetEventHandle', 334)
CUPTI_DRIVER_TRACE_CBID_cuIpcOpenEventHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuIpcOpenEventHandle', 335)
CUPTI_DRIVER_TRACE_CBID_cuCtxSetSharedMemConfig = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxSetSharedMemConfig', 336)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetSharedMemConfig = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetSharedMemConfig', 337)
CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedMemConfig = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedMemConfig', 338)
CUPTI_DRIVER_TRACE_CBID_cuTexObjectCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexObjectCreate', 339)
CUPTI_DRIVER_TRACE_CBID_cuTexObjectDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexObjectDestroy', 340)
CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceDesc = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceDesc', 341)
CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetTextureDesc = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetTextureDesc', 342)
CUPTI_DRIVER_TRACE_CBID_cuSurfObjectCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSurfObjectCreate', 343)
CUPTI_DRIVER_TRACE_CBID_cuSurfObjectDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSurfObjectDestroy', 344)
CUPTI_DRIVER_TRACE_CBID_cuSurfObjectGetResourceDesc = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSurfObjectGetResourceDesc', 345)
CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback', 346)
CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayCreate', 347)
CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetLevel = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetLevel', 348)
CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayDestroy', 349)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmappedArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmappedArray', 350)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapFilterMode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapFilterMode', 351)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelBias = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelBias', 352)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelClamp = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelClamp', 353)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMaxAnisotropy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMaxAnisotropy', 354)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmappedArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmappedArray', 355)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapFilterMode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapFilterMode', 356)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelBias = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelBias', 357)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelClamp = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelClamp', 358)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMaxAnisotropy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMaxAnisotropy', 359)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedMipmappedArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedMipmappedArray', 360)
CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceViewDesc = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceViewDesc', 361)
CUPTI_DRIVER_TRACE_CBID_cuLinkCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLinkCreate', 362)
CUPTI_DRIVER_TRACE_CBID_cuLinkAddData = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLinkAddData', 363)
CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile', 364)
CUPTI_DRIVER_TRACE_CBID_cuLinkComplete = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLinkComplete', 365)
CUPTI_DRIVER_TRACE_CBID_cuLinkDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLinkDestroy', 366)
CUPTI_DRIVER_TRACE_CBID_cuStreamCreateWithPriority = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamCreateWithPriority', 367)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority', 368)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags', 369)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetStreamPriorityRange = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetStreamPriorityRange', 370)
CUPTI_DRIVER_TRACE_CBID_cuMemAllocManaged = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAllocManaged', 371)
CUPTI_DRIVER_TRACE_CBID_cuGetErrorString = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGetErrorString', 372)
CUPTI_DRIVER_TRACE_CBID_cuGetErrorName = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGetErrorName', 373)
CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessor = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessor', 374)
CUPTI_DRIVER_TRACE_CBID_cuCompilePtx = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCompilePtx', 375)
CUPTI_DRIVER_TRACE_CBID_cuBinaryFree = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuBinaryFree', 376)
CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync', 377)
CUPTI_DRIVER_TRACE_CBID_cuPointerSetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuPointerSetAttribute', 378)
CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister_v2', 379)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags_v2', 380)
CUPTI_DRIVER_TRACE_CBID_cuLinkCreate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLinkCreate_v2', 381)
CUPTI_DRIVER_TRACE_CBID_cuLinkAddData_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLinkAddData_v2', 382)
CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile_v2', 383)
CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSize', 384)
CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices_v2', 385)
CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRetain = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRetain', 386)
CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease', 387)
CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags', 388)
CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset', 389)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsEGLRegisterImage = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsEGLRegisterImage', 390)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetFlags', 391)
CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxGetState = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxGetState', 392)
CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnect = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnect', 393)
CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerDisconnect = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerDisconnect', 394)
CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerAcquireFrame = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerAcquireFrame', 395)
CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerReleaseFrame = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerReleaseFrame', 396)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2_ptds', 397)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2_ptds', 398)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2_ptds', 399)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2_ptds', 400)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2_ptds', 401)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2_ptds', 402)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2_ptds', 403)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2_ptds', 404)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2_ptds', 405)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2_ptds', 406)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2_ptds', 407)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy_ptds', 408)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer_ptds', 409)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer_ptds', 410)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2_ptds', 411)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2_ptds', 412)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2_ptds', 413)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2_ptds', 414)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2_ptds', 415)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2_ptds', 416)
CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2_ptds = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2_ptds', 417)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync_ptsz', 418)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2_ptsz', 419)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2_ptsz', 420)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2_ptsz', 421)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2_ptsz', 422)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2_ptsz', 423)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2_ptsz', 424)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2_ptsz', 425)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync_ptsz', 426)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync_ptsz', 427)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async_ptsz', 428)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async_ptsz', 429)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async_ptsz', 430)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async_ptsz', 431)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async_ptsz', 432)
CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async_ptsz', 433)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority_ptsz', 434)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags_ptsz', 435)
CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent_ptsz', 436)
CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback_ptsz', 437)
CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync_ptsz', 438)
CUPTI_DRIVER_TRACE_CBID_cuStreamQuery_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamQuery_ptsz', 439)
CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize_ptsz', 440)
CUPTI_DRIVER_TRACE_CBID_cuEventRecord_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventRecord_ptsz', 441)
CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel_ptsz', 442)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources_ptsz', 443)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources_ptsz', 444)
CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2_ptsz', 445)
CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerConnect = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerConnect', 446)
CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerDisconnect = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerDisconnect', 447)
CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerPresentFrame = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerPresentFrame', 448)
CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedEglFrame = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedEglFrame', 449)
CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttributes = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttributes', 450)
CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags', 451)
CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSizeWithFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSizeWithFlags', 452)
CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerReturnFrame = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerReturnFrame', 453)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetP2PAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetP2PAttribute', 454)
CUPTI_DRIVER_TRACE_CBID_cuTexRefSetBorderColor = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefSetBorderColor', 455)
CUPTI_DRIVER_TRACE_CBID_cuTexRefGetBorderColor = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTexRefGetBorderColor', 456)
CUPTI_DRIVER_TRACE_CBID_cuMemAdvise = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAdvise', 457)
CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32', 458)
CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_ptsz', 459)
CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32', 460)
CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_ptsz', 461)
CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp', 462)
CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_ptsz', 463)
CUPTI_DRIVER_TRACE_CBID_cuNVNbufferGetPointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuNVNbufferGetPointer', 464)
CUPTI_DRIVER_TRACE_CBID_cuNVNtextureGetArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuNVNtextureGetArray', 465)
CUPTI_DRIVER_TRACE_CBID_cuNNSetAllocator = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuNNSetAllocator', 466)
CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync', 467)
CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_ptsz', 468)
CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromNVNSync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromNVNSync', 469)
CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnectWithFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnectWithFlags', 470)
CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttribute', 471)
CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttributes = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttributes', 472)
CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64', 473)
CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_ptsz', 474)
CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64', 475)
CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_ptsz', 476)
CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel', 477)
CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel_ptsz', 478)
CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromEGLSync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromEGLSync', 479)
CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernelMultiDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernelMultiDevice', 480)
CUPTI_DRIVER_TRACE_CBID_cuFuncSetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncSetAttribute', 481)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid', 482)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx', 483)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_ptsz', 484)
CUPTI_DRIVER_TRACE_CBID_cuImportExternalMemory = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuImportExternalMemory', 485)
CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedBuffer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedBuffer', 486)
CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedMipmappedArray = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedMipmappedArray', 487)
CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalMemory = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalMemory', 488)
CUPTI_DRIVER_TRACE_CBID_cuImportExternalSemaphore = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuImportExternalSemaphore', 489)
CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync', 490)
CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync_ptsz', 491)
CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync', 492)
CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync_ptsz', 493)
CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalSemaphore = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalSemaphore', 494)
CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture', 495)
CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_ptsz', 496)
CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture', 497)
CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture_ptsz', 498)
CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing', 499)
CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing_ptsz', 500)
CUPTI_DRIVER_TRACE_CBID_cuGraphCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphCreate', 501)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode', 502)
CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams', 503)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemcpyNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemcpyNode', 504)
CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeGetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeGetParams', 505)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemsetNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemsetNode', 506)
CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeGetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeGetParams', 507)
CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeSetParams', 508)
CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetType = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetType', 509)
CUPTI_DRIVER_TRACE_CBID_cuGraphGetRootNodes = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphGetRootNodes', 510)
CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies', 511)
CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes', 512)
CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate', 513)
CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch', 514)
CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch_ptsz', 515)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecDestroy', 516)
CUPTI_DRIVER_TRACE_CBID_cuGraphDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphDestroy', 517)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies', 518)
CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies', 519)
CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeSetParams', 520)
CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams', 521)
CUPTI_DRIVER_TRACE_CBID_cuGraphDestroyNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphDestroyNode', 522)
CUPTI_DRIVER_TRACE_CBID_cuGraphClone = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphClone', 523)
CUPTI_DRIVER_TRACE_CBID_cuGraphNodeFindInClone = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphNodeFindInClone', 524)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddChildGraphNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddChildGraphNode', 525)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddEmptyNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddEmptyNode', 526)
CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc', 527)
CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc_ptsz', 528)
CUPTI_DRIVER_TRACE_CBID_cuGraphChildGraphNodeGetGraph = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphChildGraphNodeGetGraph', 529)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddHostNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddHostNode', 530)
CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeGetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeGetParams', 531)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetLuid = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetLuid', 532)
CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeSetParams', 533)
CUPTI_DRIVER_TRACE_CBID_cuGraphGetNodes = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphGetNodes', 534)
CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges', 535)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo', 536)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_ptsz', 537)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams', 538)
CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2', 539)
CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2_ptsz', 540)
CUPTI_DRIVER_TRACE_CBID_cuThreadExchangeStreamCaptureMode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuThreadExchangeStreamCaptureMode', 541)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetNvSciSyncAttributes = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetNvSciSyncAttributes', 542)
CUPTI_DRIVER_TRACE_CBID_cuOccupancyAvailableDynamicSMemPerBlock = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuOccupancyAvailableDynamicSMemPerBlock', 543)
CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease_v2', 544)
CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset_v2', 545)
CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags_v2', 546)
CUPTI_DRIVER_TRACE_CBID_cuMemAddressReserve = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAddressReserve', 547)
CUPTI_DRIVER_TRACE_CBID_cuMemAddressFree = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAddressFree', 548)
CUPTI_DRIVER_TRACE_CBID_cuMemCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemCreate', 549)
CUPTI_DRIVER_TRACE_CBID_cuMemRelease = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemRelease', 550)
CUPTI_DRIVER_TRACE_CBID_cuMemMap = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemMap', 551)
CUPTI_DRIVER_TRACE_CBID_cuMemUnmap = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemUnmap', 552)
CUPTI_DRIVER_TRACE_CBID_cuMemSetAccess = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemSetAccess', 553)
CUPTI_DRIVER_TRACE_CBID_cuMemExportToShareableHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemExportToShareableHandle', 554)
CUPTI_DRIVER_TRACE_CBID_cuMemImportFromShareableHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemImportFromShareableHandle', 555)
CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationGranularity = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationGranularity', 556)
CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationPropertiesFromHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationPropertiesFromHandle', 557)
CUPTI_DRIVER_TRACE_CBID_cuMemGetAccess = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemGetAccess', 558)
CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags', 559)
CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags_ptsz', 560)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate', 561)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemcpyNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemcpyNodeSetParams', 562)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemsetNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemsetNodeSetParams', 563)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecHostNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecHostNodeSetParams', 564)
CUPTI_DRIVER_TRACE_CBID_cuMemRetainAllocationHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemRetainAllocationHandle', 565)
CUPTI_DRIVER_TRACE_CBID_cuFuncGetModule = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncGetModule', 566)
CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle_v2', 567)
CUPTI_DRIVER_TRACE_CBID_cuCtxResetPersistingL2Cache = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxResetPersistingL2Cache', 568)
CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeCopyAttributes = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeCopyAttributes', 569)
CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetAttribute', 570)
CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetAttribute', 571)
CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes', 572)
CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes_ptsz', 573)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute', 574)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute_ptsz', 575)
CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute', 576)
CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute_ptsz', 577)
CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate_v2', 578)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetTexture1DLinearMaxWidth = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetTexture1DLinearMaxWidth', 579)
CUPTI_DRIVER_TRACE_CBID_cuGraphUpload = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphUpload', 580)
CUPTI_DRIVER_TRACE_CBID_cuGraphUpload_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphUpload_ptsz', 581)
CUPTI_DRIVER_TRACE_CBID_cuArrayGetSparseProperties = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArrayGetSparseProperties', 582)
CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetSparseProperties = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetSparseProperties', 583)
CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync', 584)
CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync_ptsz', 585)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecChildGraphNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecChildGraphNodeSetParams', 586)
CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags', 587)
CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags_ptsz', 588)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventRecordNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventRecordNode', 589)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventWaitNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventWaitNode', 590)
CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeGetEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeGetEvent', 591)
CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeGetEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeGetEvent', 592)
CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeSetEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeSetEvent', 593)
CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeSetEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeSetEvent', 594)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventRecordNodeSetEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventRecordNodeSetEvent', 595)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventWaitNodeSetEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventWaitNodeSetEvent', 596)
CUPTI_DRIVER_TRACE_CBID_cuArrayGetPlane = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArrayGetPlane', 597)
CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync', 598)
CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync_ptsz', 599)
CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync', 600)
CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync_ptsz', 601)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolTrimTo = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolTrimTo', 602)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAttribute', 603)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAttribute', 604)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAccess = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAccess', 605)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDefaultMemPool = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDefaultMemPool', 606)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolCreate', 607)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolDestroy', 608)
CUPTI_DRIVER_TRACE_CBID_cuDeviceSetMemPool = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceSetMemPool', 609)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetMemPool = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetMemPool', 610)
CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync', 611)
CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync_ptsz', 612)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportToShareableHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportToShareableHandle', 613)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportFromShareableHandle = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportFromShareableHandle', 614)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportPointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportPointer', 615)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportPointer = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportPointer', 616)
CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAccess = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAccess', 617)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresSignalNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresSignalNode', 618)
CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeGetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeGetParams', 619)
CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeSetParams', 620)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresWaitNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresWaitNode', 621)
CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeGetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeGetParams', 622)
CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeSetParams', 623)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresSignalNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresSignalNodeSetParams', 624)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresWaitNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresWaitNodeSetParams', 625)
CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress', 626)
CUPTI_DRIVER_TRACE_CBID_cuFlushGPUDirectRDMAWrites = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFlushGPUDirectRDMAWrites', 627)
CUPTI_DRIVER_TRACE_CBID_cuGraphDebugDotPrint = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphDebugDotPrint', 628)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2', 629)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2_ptsz', 630)
CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies', 631)
CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_ptsz', 632)
CUPTI_DRIVER_TRACE_CBID_cuUserObjectCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuUserObjectCreate', 633)
CUPTI_DRIVER_TRACE_CBID_cuUserObjectRetain = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuUserObjectRetain', 634)
CUPTI_DRIVER_TRACE_CBID_cuUserObjectRelease = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuUserObjectRelease', 635)
CUPTI_DRIVER_TRACE_CBID_cuGraphRetainUserObject = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphRetainUserObject', 636)
CUPTI_DRIVER_TRACE_CBID_cuGraphReleaseUserObject = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphReleaseUserObject', 637)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemAllocNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemAllocNode', 638)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemFreeNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemFreeNode', 639)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGraphMemTrim = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGraphMemTrim', 640)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetGraphMemAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetGraphMemAttribute', 641)
CUPTI_DRIVER_TRACE_CBID_cuDeviceSetGraphMemAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceSetGraphMemAttribute', 642)
CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithFlags', 643)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetExecAffinitySupport = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetExecAffinitySupport', 644)
CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v3 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v3', 645)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetExecAffinity = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetExecAffinity', 646)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid_v2', 647)
CUPTI_DRIVER_TRACE_CBID_cuGraphMemAllocNodeGetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphMemAllocNodeGetParams', 648)
CUPTI_DRIVER_TRACE_CBID_cuGraphMemFreeNodeGetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphMemFreeNodeGetParams', 649)
CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetEnabled = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetEnabled', 650)
CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetEnabled = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetEnabled', 651)
CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx', 652)
CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx_ptsz', 653)
CUPTI_DRIVER_TRACE_CBID_cuArrayGetMemoryRequirements = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuArrayGetMemoryRequirements', 654)
CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetMemoryRequirements = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetMemoryRequirements', 655)
CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams', 656)
CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams_ptsz', 657)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecGetFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecGetFlags', 658)
CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2', 659)
CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2_ptsz', 660)
CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2', 661)
CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2_ptsz', 662)
CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2', 663)
CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2_ptsz', 664)
CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2', 665)
CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2_ptsz', 666)
CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2', 667)
CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2_ptsz', 668)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddBatchMemOpNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddBatchMemOpNode', 669)
CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeGetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeGetParams', 670)
CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeSetParams', 671)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecBatchMemOpNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecBatchMemOpNodeSetParams', 672)
CUPTI_DRIVER_TRACE_CBID_cuModuleGetLoadingMode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleGetLoadingMode', 673)
CUPTI_DRIVER_TRACE_CBID_cuMemGetHandleForAddressRange = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemGetHandleForAddressRange', 674)
CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialClusterSize = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialClusterSize', 675)
CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveClusters = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveClusters', 676)
CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress_v2', 677)
CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadData = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadData', 678)
CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadFromFile = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadFromFile', 679)
CUPTI_DRIVER_TRACE_CBID_cuLibraryUnload = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLibraryUnload', 680)
CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernel = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernel', 681)
CUPTI_DRIVER_TRACE_CBID_cuLibraryGetModule = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLibraryGetModule', 682)
CUPTI_DRIVER_TRACE_CBID_cuKernelGetFunction = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuKernelGetFunction', 683)
CUPTI_DRIVER_TRACE_CBID_cuLibraryGetGlobal = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLibraryGetGlobal', 684)
CUPTI_DRIVER_TRACE_CBID_cuLibraryGetManaged = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLibraryGetManaged', 685)
CUPTI_DRIVER_TRACE_CBID_cuKernelGetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuKernelGetAttribute', 686)
CUPTI_DRIVER_TRACE_CBID_cuKernelSetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuKernelSetAttribute', 687)
CUPTI_DRIVER_TRACE_CBID_cuKernelSetCacheConfig = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuKernelSetCacheConfig', 688)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode_v2', 689)
CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams_v2', 690)
CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams_v2', 691)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams_v2', 692)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetId = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetId', 693)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetId_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetId_ptsz', 694)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetId = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetId', 695)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate_v2', 696)
CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeTiled = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeTiled', 697)
CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeIm2col = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeIm2col', 698)
CUPTI_DRIVER_TRACE_CBID_cuTensorMapReplaceAddress = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTensorMapReplaceAddress', 699)
CUPTI_DRIVER_TRACE_CBID_cuLibraryGetUnifiedFunction = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLibraryGetUnifiedFunction', 700)
CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttribute', 701)
CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttributeGlobal = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttributeGlobal', 702)
CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttribute = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttribute', 703)
CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttributeGlobal = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttributeGlobal', 704)
CUPTI_DRIVER_TRACE_CBID_cuCtxSetFlags = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxSetFlags', 705)
CUPTI_DRIVER_TRACE_CBID_cuMulticastCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMulticastCreate', 706)
CUPTI_DRIVER_TRACE_CBID_cuMulticastAddDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMulticastAddDevice', 707)
CUPTI_DRIVER_TRACE_CBID_cuMulticastBindMem = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMulticastBindMem', 708)
CUPTI_DRIVER_TRACE_CBID_cuMulticastBindAddr = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMulticastBindAddr', 709)
CUPTI_DRIVER_TRACE_CBID_cuMulticastUnbind = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMulticastUnbind', 710)
CUPTI_DRIVER_TRACE_CBID_cuMulticastGetGranularity = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMulticastGetGranularity', 711)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddNode = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddNode', 712)
CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetParams', 713)
CUPTI_DRIVER_TRACE_CBID_cuGraphExecNodeSetParams = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphExecNodeSetParams', 714)
CUPTI_DRIVER_TRACE_CBID_cuMemAdvise_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemAdvise_v2', 715)
CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_v2', 716)
CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_v2_ptsz', 717)
CUPTI_DRIVER_TRACE_CBID_cuFuncGetName = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncGetName', 718)
CUPTI_DRIVER_TRACE_CBID_cuKernelGetName = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuKernelGetName', 719)
CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCaptureToGraph = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCaptureToGraph', 720)
CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCaptureToGraph_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCaptureToGraph_ptsz', 721)
CUPTI_DRIVER_TRACE_CBID_cuGraphConditionalHandleCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphConditionalHandleCreate', 722)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddNode_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddNode_v2', 723)
CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges_v2', 724)
CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies_v2', 725)
CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes_v2', 726)
CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies_v2', 727)
CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies_v2', 728)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v3 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v3', 729)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v3_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v3_ptsz', 730)
CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_v2', 731)
CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_v2_ptsz', 732)
CUPTI_DRIVER_TRACE_CBID_cuFuncGetParamInfo = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncGetParamInfo', 733)
CUPTI_DRIVER_TRACE_CBID_cuKernelGetParamInfo = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuKernelGetParamInfo', 734)
CUPTI_DRIVER_TRACE_CBID_cuDeviceRegisterAsyncNotification = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceRegisterAsyncNotification', 735)
CUPTI_DRIVER_TRACE_CBID_cuDeviceUnregisterAsyncNotification = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceUnregisterAsyncNotification', 736)
CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunctionCount = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunctionCount', 737)
CUPTI_DRIVER_TRACE_CBID_cuModuleEnumerateFunctions = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuModuleEnumerateFunctions', 738)
CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernelCount = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernelCount', 739)
CUPTI_DRIVER_TRACE_CBID_cuLibraryEnumerateKernels = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLibraryEnumerateKernels', 740)
CUPTI_DRIVER_TRACE_CBID_cuFuncIsLoaded = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncIsLoaded', 741)
CUPTI_DRIVER_TRACE_CBID_cuFuncLoad = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuFuncLoad', 742)
CUPTI_DRIVER_TRACE_CBID_cuGreenCtxCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGreenCtxCreate', 743)
CUPTI_DRIVER_TRACE_CBID_cuGreenCtxDestroy = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGreenCtxDestroy', 744)
CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDevResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDevResource', 745)
CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevResource', 746)
CUPTI_DRIVER_TRACE_CBID_cuGreenCtxGetDevResource = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGreenCtxGetDevResource', 747)
CUPTI_DRIVER_TRACE_CBID_cuDevResourceGenerateDesc = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDevResourceGenerateDesc', 748)
CUPTI_DRIVER_TRACE_CBID_cuGreenCtxRecordEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGreenCtxRecordEvent', 749)
CUPTI_DRIVER_TRACE_CBID_cuGreenCtxWaitEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGreenCtxWaitEvent', 750)
CUPTI_DRIVER_TRACE_CBID_cuDevSmResourceSplitByCount = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuDevSmResourceSplitByCount', 751)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetGreenCtx = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetGreenCtx', 752)
CUPTI_DRIVER_TRACE_CBID_cuCtxFromGreenCtx = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxFromGreenCtx', 753)
CUPTI_DRIVER_TRACE_CBID_cuKernelGetLibrary = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuKernelGetLibrary', 754)
CUPTI_DRIVER_TRACE_CBID_cuCtxRecordEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxRecordEvent', 755)
CUPTI_DRIVER_TRACE_CBID_cuCtxWaitEvent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxWaitEvent', 756)
CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v4 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v4', 757)
CUPTI_DRIVER_TRACE_CBID_cuGreenCtxStreamCreate = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuGreenCtxStreamCreate', 758)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_v2', 759)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_v2_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_v2_ptsz', 760)
CUPTI_DRIVER_TRACE_CBID_cuMemBatchDecompressAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemBatchDecompressAsync', 761)
CUPTI_DRIVER_TRACE_CBID_cuMemBatchDecompressAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemBatchDecompressAsync_ptsz', 762)
CUPTI_DRIVER_TRACE_CBID_cuLogsRegisterCallback = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLogsRegisterCallback', 763)
CUPTI_DRIVER_TRACE_CBID_cuLogsUnregisterCallback = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLogsUnregisterCallback', 764)
CUPTI_DRIVER_TRACE_CBID_cuLogsCurrent = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLogsCurrent', 765)
CUPTI_DRIVER_TRACE_CBID_cuLogsDumpToFile = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLogsDumpToFile', 766)
CUPTI_DRIVER_TRACE_CBID_cuLogsDumpToMemory = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuLogsDumpToMemory', 767)
CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessGetRestoreThreadId = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessGetRestoreThreadId', 768)
CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessGetState = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessGetState', 769)
CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessLock = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessLock', 770)
CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessCheckpoint = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessCheckpoint', 771)
CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessRestore = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessRestore', 772)
CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessUnlock = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuCheckpointProcessUnlock', 773)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetDevice = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetDevice', 774)
CUPTI_DRIVER_TRACE_CBID_cuStreamGetDevice_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuStreamGetDevice_ptsz', 775)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyBatchAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyBatchAsync', 776)
CUPTI_DRIVER_TRACE_CBID_cuMemcpyBatchAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpyBatchAsync_ptsz', 777)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DBatchAsync = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DBatchAsync', 778)
CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DBatchAsync_ptsz = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DBatchAsync_ptsz', 779)
CUPTI_DRIVER_TRACE_CBID_cuEventElapsedTime_v2 = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuEventElapsedTime_v2', 780)
CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeIm2colWide = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeIm2colWide', 781)
CUPTI_DRIVER_TRACE_CBID_SIZE = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_SIZE', 782)
CUPTI_DRIVER_TRACE_CBID_FORCE_INT = enum_CUpti_driver_api_trace_cbid_enum.define('CUPTI_DRIVER_TRACE_CBID_FORCE_INT', 2147483647)
CUpti_driver_api_trace_cbid: TypeAlias = enum_CUpti_driver_api_trace_cbid_enum
class enum_CUpti_runtime_api_trace_cbid_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_RUNTIME_TRACE_CBID_INVALID = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_INVALID', 0)
CUPTI_RUNTIME_TRACE_CBID_cudaDriverGetVersion_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDriverGetVersion_v3020', 1)
CUPTI_RUNTIME_TRACE_CBID_cudaRuntimeGetVersion_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaRuntimeGetVersion_v3020', 2)
CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceCount_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceCount_v3020', 3)
CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v3020', 4)
CUPTI_RUNTIME_TRACE_CBID_cudaChooseDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaChooseDevice_v3020', 5)
CUPTI_RUNTIME_TRACE_CBID_cudaGetChannelDesc_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetChannelDesc_v3020', 6)
CUPTI_RUNTIME_TRACE_CBID_cudaCreateChannelDesc_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaCreateChannelDesc_v3020', 7)
CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020', 8)
CUPTI_RUNTIME_TRACE_CBID_cudaSetupArgument_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaSetupArgument_v3020', 9)
CUPTI_RUNTIME_TRACE_CBID_cudaGetLastError_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetLastError_v3020', 10)
CUPTI_RUNTIME_TRACE_CBID_cudaPeekAtLastError_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaPeekAtLastError_v3020', 11)
CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorString_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorString_v3020', 12)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020', 13)
CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetCacheConfig_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetCacheConfig_v3020', 14)
CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetAttributes_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetAttributes_v3020', 15)
CUPTI_RUNTIME_TRACE_CBID_cudaSetDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaSetDevice_v3020', 16)
CUPTI_RUNTIME_TRACE_CBID_cudaGetDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetDevice_v3020', 17)
CUPTI_RUNTIME_TRACE_CBID_cudaSetValidDevices_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaSetValidDevices_v3020', 18)
CUPTI_RUNTIME_TRACE_CBID_cudaSetDeviceFlags_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaSetDeviceFlags_v3020', 19)
CUPTI_RUNTIME_TRACE_CBID_cudaMalloc_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMalloc_v3020', 20)
CUPTI_RUNTIME_TRACE_CBID_cudaMallocPitch_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMallocPitch_v3020', 21)
CUPTI_RUNTIME_TRACE_CBID_cudaFree_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFree_v3020', 22)
CUPTI_RUNTIME_TRACE_CBID_cudaMallocArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMallocArray_v3020', 23)
CUPTI_RUNTIME_TRACE_CBID_cudaFreeArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFreeArray_v3020', 24)
CUPTI_RUNTIME_TRACE_CBID_cudaMallocHost_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMallocHost_v3020', 25)
CUPTI_RUNTIME_TRACE_CBID_cudaFreeHost_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFreeHost_v3020', 26)
CUPTI_RUNTIME_TRACE_CBID_cudaHostAlloc_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaHostAlloc_v3020', 27)
CUPTI_RUNTIME_TRACE_CBID_cudaHostGetDevicePointer_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaHostGetDevicePointer_v3020', 28)
CUPTI_RUNTIME_TRACE_CBID_cudaHostGetFlags_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaHostGetFlags_v3020', 29)
CUPTI_RUNTIME_TRACE_CBID_cudaMemGetInfo_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemGetInfo_v3020', 30)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020', 31)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_v3020', 32)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_v3020', 33)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_v3020', 34)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_v3020', 35)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_v3020', 36)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_v3020', 37)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_v3020', 38)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_v3020', 39)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_v3020', 40)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_v3020', 41)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_v3020', 42)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_v3020', 43)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_v3020', 44)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_v3020', 45)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_v3020', 46)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_v3020', 47)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_v3020', 48)
CUPTI_RUNTIME_TRACE_CBID_cudaMemset_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemset_v3020', 49)
CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_v3020', 50)
CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_v3020', 51)
CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_v3020', 52)
CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolAddress_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolAddress_v3020', 53)
CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolSize_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolSize_v3020', 54)
CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture_v3020', 55)
CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture2D_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture2D_v3020', 56)
CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToArray_v3020', 57)
CUPTI_RUNTIME_TRACE_CBID_cudaUnbindTexture_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaUnbindTexture_v3020', 58)
CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureAlignmentOffset_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureAlignmentOffset_v3020', 59)
CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureReference_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureReference_v3020', 60)
CUPTI_RUNTIME_TRACE_CBID_cudaBindSurfaceToArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaBindSurfaceToArray_v3020', 61)
CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceReference_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceReference_v3020', 62)
CUPTI_RUNTIME_TRACE_CBID_cudaGLSetGLDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGLSetGLDevice_v3020', 63)
CUPTI_RUNTIME_TRACE_CBID_cudaGLRegisterBufferObject_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGLRegisterBufferObject_v3020', 64)
CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObject_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObject_v3020', 65)
CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObject_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObject_v3020', 66)
CUPTI_RUNTIME_TRACE_CBID_cudaGLUnregisterBufferObject_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGLUnregisterBufferObject_v3020', 67)
CUPTI_RUNTIME_TRACE_CBID_cudaGLSetBufferObjectMapFlags_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGLSetBufferObjectMapFlags_v3020', 68)
CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObjectAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObjectAsync_v3020', 69)
CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObjectAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObjectAsync_v3020', 70)
CUPTI_RUNTIME_TRACE_CBID_cudaWGLGetDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaWGLGetDevice_v3020', 71)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterImage_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterImage_v3020', 72)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterBuffer_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterBuffer_v3020', 73)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnregisterResource_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnregisterResource_v3020', 74)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceSetMapFlags_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceSetMapFlags_v3020', 75)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsMapResources_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsMapResources_v3020', 76)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnmapResources_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnmapResources_v3020', 77)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedPointer_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedPointer_v3020', 78)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsSubResourceGetMappedArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsSubResourceGetMappedArray_v3020', 79)
CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUGetDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUGetDevice_v3020', 80)
CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUSetVDPAUDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUSetVDPAUDevice_v3020', 81)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterVideoSurface_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterVideoSurface_v3020', 82)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterOutputSurface_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterOutputSurface_v3020', 83)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevice_v3020', 84)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevices_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevices_v3020', 85)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D11SetDirect3DDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D11SetDirect3DDevice_v3020', 86)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D11RegisterResource_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D11RegisterResource_v3020', 87)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevice_v3020', 88)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevices_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevices_v3020', 89)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10SetDirect3DDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10SetDirect3DDevice_v3020', 90)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D10RegisterResource_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D10RegisterResource_v3020', 91)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10RegisterResource_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10RegisterResource_v3020', 92)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnregisterResource_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnregisterResource_v3020', 93)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10MapResources_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10MapResources_v3020', 94)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnmapResources_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnmapResources_v3020', 95)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceSetMapFlags_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceSetMapFlags_v3020', 96)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetSurfaceDimensions_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetSurfaceDimensions_v3020', 97)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedArray_v3020', 98)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPointer_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPointer_v3020', 99)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedSize_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedSize_v3020', 100)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPitch_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPitch_v3020', 101)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevice_v3020', 102)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevices_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevices_v3020', 103)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9SetDirect3DDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9SetDirect3DDevice_v3020', 104)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDirect3DDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDirect3DDevice_v3020', 105)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D9RegisterResource_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D9RegisterResource_v3020', 106)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterResource_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterResource_v3020', 107)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterResource_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterResource_v3020', 108)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapResources_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapResources_v3020', 109)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapResources_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapResources_v3020', 110)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceSetMapFlags_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceSetMapFlags_v3020', 111)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetSurfaceDimensions_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetSurfaceDimensions_v3020', 112)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedArray_v3020', 113)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPointer_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPointer_v3020', 114)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedSize_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedSize_v3020', 115)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPitch_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPitch_v3020', 116)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9Begin_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9Begin_v3020', 117)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9End_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9End_v3020', 118)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterVertexBuffer_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterVertexBuffer_v3020', 119)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterVertexBuffer_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterVertexBuffer_v3020', 120)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapVertexBuffer_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapVertexBuffer_v3020', 121)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapVertexBuffer_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapVertexBuffer_v3020', 122)
CUPTI_RUNTIME_TRACE_CBID_cudaThreadExit_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaThreadExit_v3020', 123)
CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForDevice_v3020', 124)
CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForHost_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForHost_v3020', 125)
CUPTI_RUNTIME_TRACE_CBID_cudaThreadSynchronize_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaThreadSynchronize_v3020', 126)
CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetLimit_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetLimit_v3020', 127)
CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetLimit_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetLimit_v3020', 128)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreate_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreate_v3020', 129)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v3020', 130)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_v3020', 131)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_v3020', 132)
CUPTI_RUNTIME_TRACE_CBID_cudaEventCreate_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventCreate_v3020', 133)
CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateWithFlags_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateWithFlags_v3020', 134)
CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_v3020', 135)
CUPTI_RUNTIME_TRACE_CBID_cudaEventDestroy_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventDestroy_v3020', 136)
CUPTI_RUNTIME_TRACE_CBID_cudaEventSynchronize_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventSynchronize_v3020', 137)
CUPTI_RUNTIME_TRACE_CBID_cudaEventQuery_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventQuery_v3020', 138)
CUPTI_RUNTIME_TRACE_CBID_cudaEventElapsedTime_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventElapsedTime_v3020', 139)
CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3D_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3D_v3020', 140)
CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3DArray_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3DArray_v3020', 141)
CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_v3020', 142)
CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_v3020', 143)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_v3020', 144)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_v3020', 145)
CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetCacheConfig_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetCacheConfig_v3020', 146)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_v3020', 147)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDirect3DDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDirect3DDevice_v3020', 148)
CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDirect3DDevice_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDirect3DDevice_v3020', 149)
CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetCacheConfig_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetCacheConfig_v3020', 150)
CUPTI_RUNTIME_TRACE_CBID_cudaPointerGetAttributes_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaPointerGetAttributes_v4000', 151)
CUPTI_RUNTIME_TRACE_CBID_cudaHostRegister_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaHostRegister_v4000', 152)
CUPTI_RUNTIME_TRACE_CBID_cudaHostUnregister_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaHostUnregister_v4000', 153)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceCanAccessPeer_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceCanAccessPeer_v4000', 154)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceEnablePeerAccess_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceEnablePeerAccess_v4000', 155)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceDisablePeerAccess_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceDisablePeerAccess_v4000', 156)
CUPTI_RUNTIME_TRACE_CBID_cudaPeerRegister_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaPeerRegister_v4000', 157)
CUPTI_RUNTIME_TRACE_CBID_cudaPeerUnregister_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaPeerUnregister_v4000', 158)
CUPTI_RUNTIME_TRACE_CBID_cudaPeerGetDevicePointer_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaPeerGetDevicePointer_v4000', 159)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeer_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeer_v4000', 160)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeerAsync_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeerAsync_v4000', 161)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_v4000', 162)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_v4000', 163)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceReset_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceReset_v3020', 164)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSynchronize_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSynchronize_v3020', 165)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetLimit_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetLimit_v3020', 166)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetLimit_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetLimit_v3020', 167)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetCacheConfig_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetCacheConfig_v3020', 168)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetCacheConfig_v3020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetCacheConfig_v3020', 169)
CUPTI_RUNTIME_TRACE_CBID_cudaProfilerInitialize_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaProfilerInitialize_v4000', 170)
CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStart_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStart_v4000', 171)
CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStop_v4000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStop_v4000', 172)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetByPCIBusId_v4010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetByPCIBusId_v4010', 173)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetPCIBusId_v4010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetPCIBusId_v4010', 174)
CUPTI_RUNTIME_TRACE_CBID_cudaGLGetDevices_v4010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGLGetDevices_v4010', 175)
CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetEventHandle_v4010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetEventHandle_v4010', 176)
CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenEventHandle_v4010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenEventHandle_v4010', 177)
CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetMemHandle_v4010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetMemHandle_v4010', 178)
CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenMemHandle_v4010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenMemHandle_v4010', 179)
CUPTI_RUNTIME_TRACE_CBID_cudaIpcCloseMemHandle_v4010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaIpcCloseMemHandle_v4010', 180)
CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetInfo_v4010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetInfo_v4010', 181)
CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetSharedMemConfig_v4020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetSharedMemConfig_v4020', 182)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetSharedMemConfig_v4020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetSharedMemConfig_v4020', 183)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetSharedMemConfig_v4020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetSharedMemConfig_v4020', 184)
CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v5000', 185)
CUPTI_RUNTIME_TRACE_CBID_cudaDestroyTextureObject_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDestroyTextureObject_v5000', 186)
CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceDesc_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceDesc_v5000', 187)
CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v5000', 188)
CUPTI_RUNTIME_TRACE_CBID_cudaCreateSurfaceObject_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaCreateSurfaceObject_v5000', 189)
CUPTI_RUNTIME_TRACE_CBID_cudaDestroySurfaceObject_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDestroySurfaceObject_v5000', 190)
CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceObjectResourceDesc_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceObjectResourceDesc_v5000', 191)
CUPTI_RUNTIME_TRACE_CBID_cudaMallocMipmappedArray_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMallocMipmappedArray_v5000', 192)
CUPTI_RUNTIME_TRACE_CBID_cudaGetMipmappedArrayLevel_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetMipmappedArrayLevel_v5000', 193)
CUPTI_RUNTIME_TRACE_CBID_cudaFreeMipmappedArray_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFreeMipmappedArray_v5000', 194)
CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToMipmappedArray_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToMipmappedArray_v5000', 195)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedMipmappedArray_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedMipmappedArray_v5000', 196)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_v5000', 197)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithFlags_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithFlags_v5000', 198)
CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceViewDesc_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceViewDesc_v5000', 199)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetAttribute_v5000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetAttribute_v5000', 200)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v5050 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v5050', 201)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithPriority_v5050 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithPriority_v5050', 202)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_v5050 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_v5050', 203)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_v5050 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_v5050', 204)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetStreamPriorityRange_v5050 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetStreamPriorityRange_v5050', 205)
CUPTI_RUNTIME_TRACE_CBID_cudaMallocManaged_v6000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMallocManaged_v6000', 206)
CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000', 207)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_v6000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_v6000', 208)
CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorName_v6050 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorName_v6050', 209)
CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050', 210)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000', 211)
CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceFlags_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceFlags_v7000', 212)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_ptsz_v7000', 213)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_ptsz_v7000', 214)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_ptds_v7000', 215)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_ptds_v7000', 216)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_ptds_v7000', 217)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_ptds_v7000', 218)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_ptds_v7000', 219)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_ptds_v7000', 220)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_ptds_v7000', 221)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_ptds_v7000', 222)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_ptds_v7000', 223)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_ptds_v7000', 224)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_ptsz_v7000', 225)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_ptsz_v7000', 226)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_ptsz_v7000', 227)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_ptsz_v7000', 228)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_ptsz_v7000', 229)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_ptsz_v7000', 230)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_ptsz_v7000', 231)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_ptsz_v7000', 232)
CUPTI_RUNTIME_TRACE_CBID_cudaMemset_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemset_ptds_v7000', 233)
CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_ptds_v7000', 234)
CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_ptsz_v7000', 235)
CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_ptsz_v7000', 236)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_ptsz_v7000', 237)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_ptsz_v7000', 238)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_ptsz_v7000', 239)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_ptsz_v7000', 240)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_ptsz_v7000', 241)
CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_ptsz_v7000', 242)
CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_ptds_v7000', 243)
CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_ptsz_v7000', 244)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_ptds_v7000', 245)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_ptsz_v7000', 246)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_ptsz_v7000', 247)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_ptsz_v7000', 248)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_ptds_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_ptds_v7000', 249)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_ptsz_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_ptsz_v7000', 250)
CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000', 251)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v8000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v8000', 252)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_ptsz_v8000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_ptsz_v8000', 253)
CUPTI_RUNTIME_TRACE_CBID_cudaMemAdvise_v8000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemAdvise_v8000', 254)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetP2PAttribute_v8000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetP2PAttribute_v8000', 255)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsEGLRegisterImage_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsEGLRegisterImage_v7000', 256)
CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnect_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnect_v7000', 257)
CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerDisconnect_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerDisconnect_v7000', 258)
CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerAcquireFrame_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerAcquireFrame_v7000', 259)
CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerReleaseFrame_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerReleaseFrame_v7000', 260)
CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerConnect_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerConnect_v7000', 261)
CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerDisconnect_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerDisconnect_v7000', 262)
CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerPresentFrame_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerPresentFrame_v7000', 263)
CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerReturnFrame_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerReturnFrame_v7000', 264)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedEglFrame_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedEglFrame_v7000', 265)
CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttribute_v8000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttribute_v8000', 266)
CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttributes_v8000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttributes_v8000', 267)
CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnectWithFlags_v7000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnectWithFlags_v7000', 268)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_v9000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_v9000', 269)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_ptsz_v9000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_ptsz_v9000', 270)
CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateFromEGLSync_v9000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateFromEGLSync_v9000', 271)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernelMultiDevice_v9000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernelMultiDevice_v9000', 272)
CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetAttribute_v9000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetAttribute_v9000', 273)
CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalMemory_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalMemory_v10000', 274)
CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedBuffer_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedBuffer_v10000', 275)
CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedMipmappedArray_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedMipmappedArray_v10000', 276)
CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalMemory_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalMemory_v10000', 277)
CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalSemaphore_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalSemaphore_v10000', 278)
CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v10000', 279)
CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_ptsz_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_ptsz_v10000', 280)
CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v10000', 281)
CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_ptsz_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_ptsz_v10000', 282)
CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalSemaphore_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalSemaphore_v10000', 283)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_v10000', 284)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_ptsz_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_ptsz_v10000', 285)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphCreate_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphCreate_v10000', 286)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetParams_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetParams_v10000', 287)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetParams_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetParams_v10000', 288)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddKernelNode_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddKernelNode_v10000', 289)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode_v10000', 290)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeGetParams_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeGetParams_v10000', 291)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams_v10000', 292)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemsetNode_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemsetNode_v10000', 293)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeGetParams_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeGetParams_v10000', 294)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeSetParams_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeSetParams_v10000', 295)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddHostNode_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddHostNode_v10000', 296)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeGetParams_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeGetParams_v10000', 297)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddChildGraphNode_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddChildGraphNode_v10000', 298)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphChildGraphNodeGetGraph_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphChildGraphNodeGetGraph_v10000', 299)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEmptyNode_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEmptyNode_v10000', 300)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphClone_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphClone_v10000', 301)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeFindInClone_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeFindInClone_v10000', 302)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetType_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetType_v10000', 303)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetRootNodes_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetRootNodes_v10000', 304)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependencies_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependencies_v10000', 305)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependentNodes_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependentNodes_v10000', 306)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddDependencies_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddDependencies_v10000', 307)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphRemoveDependencies_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphRemoveDependencies_v10000', 308)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroyNode_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroyNode_v10000', 309)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v10000', 310)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_v10000', 311)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_ptsz_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_ptsz_v10000', 312)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecDestroy_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecDestroy_v10000', 313)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroy_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroy_v10000', 314)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_v10000', 315)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_ptsz_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_ptsz_v10000', 316)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_v10000', 317)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_ptsz_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_ptsz_v10000', 318)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_v10000', 319)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_ptsz_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_ptsz_v10000', 320)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeSetParams_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeSetParams_v10000', 321)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetNodes_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetNodes_v10000', 322)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetEdges_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetEdges_v10000', 323)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v10010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v10010', 324)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_ptsz_v10010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_ptsz_v10010', 325)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecKernelNodeSetParams_v10010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecKernelNodeSetParams_v10010', 326)
CUPTI_RUNTIME_TRACE_CBID_cudaThreadExchangeStreamCaptureMode_v10010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaThreadExchangeStreamCaptureMode_v10010', 327)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetNvSciSyncAttributes_v10020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetNvSciSyncAttributes_v10020', 328)
CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyAvailableDynamicSMemPerBlock_v10200 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyAvailableDynamicSMemPerBlock_v10200', 329)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_v10200 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_v10200', 330)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_ptsz_v10200 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_ptsz_v10200', 331)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams_v10020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams_v10020', 332)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemsetNodeSetParams_v10020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemsetNodeSetParams_v10020', 333)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecHostNodeSetParams_v10020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecHostNodeSetParams_v10020', 334)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecUpdate_v10020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecUpdate_v10020', 335)
CUPTI_RUNTIME_TRACE_CBID_cudaGetFuncBySymbol_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetFuncBySymbol_v11000', 336)
CUPTI_RUNTIME_TRACE_CBID_cudaCtxResetPersistingL2Cache_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaCtxResetPersistingL2Cache_v11000', 337)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeCopyAttributes_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeCopyAttributes_v11000', 338)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetAttribute_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetAttribute_v11000', 339)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetAttribute_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetAttribute_v11000', 340)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_v11000', 341)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_ptsz_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_ptsz_v11000', 342)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_v11000', 343)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_ptsz_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_ptsz_v11000', 344)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_v11000', 345)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_ptsz_v11000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_ptsz_v11000', 346)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetTexture1DLinearMaxWidth_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetTexture1DLinearMaxWidth_v11010', 347)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_v10000', 348)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_ptsz_v10000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_ptsz_v10000', 349)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeToSymbol_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeToSymbol_v11010', 350)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeFromSymbol_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeFromSymbol_v11010', 351)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode1D_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode1D_v11010', 352)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsToSymbol_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsToSymbol_v11010', 353)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsFromSymbol_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsFromSymbol_v11010', 354)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams1D_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams1D_v11010', 355)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010', 356)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010', 357)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams1D_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams1D_v11010', 358)
CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetSparseProperties_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetSparseProperties_v11010', 359)
CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetSparseProperties_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetSparseProperties_v11010', 360)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecChildGraphNodeSetParams_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecChildGraphNodeSetParams_v11010', 361)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventRecordNode_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventRecordNode_v11010', 362)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeGetEvent_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeGetEvent_v11010', 363)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeSetEvent_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeSetEvent_v11010', 364)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventWaitNode_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventWaitNode_v11010', 365)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeGetEvent_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeGetEvent_v11010', 366)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeSetEvent_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeSetEvent_v11010', 367)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventRecordNodeSetEvent_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventRecordNodeSetEvent_v11010', 368)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventWaitNodeSetEvent_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventWaitNodeSetEvent_v11010', 369)
CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_v11010', 370)
CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_ptsz_v11010 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_ptsz_v11010', 371)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetDefaultMemPool_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetDefaultMemPool_v11020', 372)
CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_v11020', 373)
CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_ptsz_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_ptsz_v11020', 374)
CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_v11020', 375)
CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_ptsz_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_ptsz_v11020', 376)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolTrimTo_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolTrimTo_v11020', 377)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAttribute_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAttribute_v11020', 378)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAttribute_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAttribute_v11020', 379)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAccess_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAccess_v11020', 380)
CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetPlane_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetPlane_v11020', 381)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAccess_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAccess_v11020', 382)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolCreate_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolCreate_v11020', 383)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolDestroy_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolDestroy_v11020', 384)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetMemPool_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetMemPool_v11020', 385)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetMemPool_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetMemPool_v11020', 386)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportToShareableHandle_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportToShareableHandle_v11020', 387)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportFromShareableHandle_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportFromShareableHandle_v11020', 388)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportPointer_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportPointer_v11020', 389)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportPointer_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportPointer_v11020', 390)
CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_v11020', 391)
CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_ptsz_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_ptsz_v11020', 392)
CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_v11020', 393)
CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020', 394)
CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_v11020', 395)
CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020', 396)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresSignalNode_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresSignalNode_v11020', 397)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeGetParams_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeGetParams_v11020', 398)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeSetParams_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeSetParams_v11020', 399)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresWaitNode_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresWaitNode_v11020', 400)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeGetParams_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeGetParams_v11020', 401)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeSetParams_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeSetParams_v11020', 402)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020', 403)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020', 404)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceFlushGPUDirectRDMAWrites_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceFlushGPUDirectRDMAWrites_v11030', 405)
CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_v11030', 406)
CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_ptsz_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_ptsz_v11030', 407)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphDebugDotPrint_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphDebugDotPrint_v11030', 408)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_v11030', 409)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_ptsz_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_ptsz_v11030', 410)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v11030', 411)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_ptsz_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_ptsz_v11030', 412)
CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectCreate_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectCreate_v11030', 413)
CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRetain_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRetain_v11030', 414)
CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRelease_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRelease_v11030', 415)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphRetainUserObject_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphRetainUserObject_v11030', 416)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphReleaseUserObject_v11030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphReleaseUserObject_v11030', 417)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithFlags_v11040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithFlags_v11040', 418)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemAllocNode_v11040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemAllocNode_v11040', 419)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemAllocNodeGetParams_v11040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemAllocNodeGetParams_v11040', 420)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemFreeNode_v11040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemFreeNode_v11040', 421)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemFreeNodeGetParams_v11040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemFreeNodeGetParams_v11040', 422)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGraphMemTrim_v11040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGraphMemTrim_v11040', 423)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetGraphMemAttribute_v11040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetGraphMemAttribute_v11040', 424)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetGraphMemAttribute_v11040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetGraphMemAttribute_v11040', 425)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeSetEnabled_v11060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeSetEnabled_v11060', 426)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetEnabled_v11060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetEnabled_v11060', 427)
CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetMemoryRequirements_v11060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetMemoryRequirements_v11060', 428)
CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetMemoryRequirements_v11060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetMemoryRequirements_v11060', 429)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernelExC_v11060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernelExC_v11060', 430)
CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernelExC_ptsz_v11060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernelExC_ptsz_v11060', 431)
CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxPotentialClusterSize_v11070 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxPotentialClusterSize_v11070', 432)
CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveClusters_v11070 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveClusters_v11070', 433)
CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v2_v11080 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v2_v11080', 434)
CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v2_v11080 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v2_v11080', 435)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithParams_v12000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithParams_v12000', 436)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithParams_ptsz_v12000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithParams_ptsz_v12000', 437)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecGetFlags_v12000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecGetFlags_v12000', 438)
CUPTI_RUNTIME_TRACE_CBID_cudaGetKernel_v12000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetKernel_v12000', 439)
CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v2_v12000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v2_v12000', 440)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetId_v12000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetId_v12000', 441)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetId_ptsz_v12000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetId_ptsz_v12000', 442)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v12000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v12000', 443)
CUPTI_RUNTIME_TRACE_CBID_cudaInitDevice_v12000 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaInitDevice_v12000', 444)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddNode_v12020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddNode_v12020', 445)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeSetParams_v12020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeSetParams_v12020', 446)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecNodeSetParams_v12020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecNodeSetParams_v12020', 447)
CUPTI_RUNTIME_TRACE_CBID_cudaMemAdvise_v2_v12020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemAdvise_v2_v12020', 448)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v2_v12020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v2_v12020', 449)
CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v2_ptsz_v12020 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v2_ptsz_v12020', 450)
CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetName_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetName_v12030', 451)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCaptureToGraph_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCaptureToGraph_v12030', 452)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCaptureToGraph_ptsz_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCaptureToGraph_ptsz_v12030', 453)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphConditionalHandleCreate_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphConditionalHandleCreate_v12030', 454)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetEdges_v2_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetEdges_v2_v12030', 455)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependencies_v2_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependencies_v2_v12030', 456)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependentNodes_v2_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependentNodes_v2_v12030', 457)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddDependencies_v2_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddDependencies_v2_v12030', 458)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphRemoveDependencies_v2_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphRemoveDependencies_v2_v12030', 459)
CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddNode_v2_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddNode_v2_v12030', 460)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v3_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v3_v12030', 461)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v3_ptsz_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v3_ptsz_v12030', 462)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v2_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v2_v12030', 463)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030', 464)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceRegisterAsyncNotification_v12040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceRegisterAsyncNotification_v12040', 465)
CUPTI_RUNTIME_TRACE_CBID_cudaDeviceUnregisterAsyncNotification_v12040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaDeviceUnregisterAsyncNotification_v12040', 466)
CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetParamInfo_v12040 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetParamInfo_v12040', 467)
CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPointByVersion_v12050 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPointByVersion_v12050', 468)
CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPointByVersion_ptsz_v12050 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPointByVersion_ptsz_v12050', 469)
CUPTI_RUNTIME_TRACE_CBID_cuda470_v12060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cuda470_v12060', 470)
CUPTI_RUNTIME_TRACE_CBID_cuda471_v12060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cuda471_v12060', 471)
CUPTI_RUNTIME_TRACE_CBID_cuda472_v12060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cuda472_v12060', 472)
CUPTI_RUNTIME_TRACE_CBID_cuda473_v12060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cuda473_v12060', 473)
CUPTI_RUNTIME_TRACE_CBID_cuda474_v12060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cuda474_v12060', 474)
CUPTI_RUNTIME_TRACE_CBID_cuda475_v12060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cuda475_v12060', 475)
CUPTI_RUNTIME_TRACE_CBID_cuda476_v12060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cuda476_v12060', 476)
CUPTI_RUNTIME_TRACE_CBID_cuda477_v12060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cuda477_v12060', 477)
CUPTI_RUNTIME_TRACE_CBID_cuda478_v12060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cuda478_v12060', 478)
CUPTI_RUNTIME_TRACE_CBID_cuda479_v12060 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cuda479_v12060', 479)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetDevice_v12080 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetDevice_v12080', 480)
CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetDevice_ptsz_v12080 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetDevice_ptsz_v12080', 481)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyBatchAsync_v12080 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyBatchAsync_v12080', 482)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyBatchAsync_ptsz_v12080 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyBatchAsync_ptsz_v12080', 483)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DBatchAsync_v12080 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DBatchAsync_v12080', 484)
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DBatchAsync_ptsz_v12080 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DBatchAsync_ptsz_v12080', 485)
CUPTI_RUNTIME_TRACE_CBID_cudaEventElapsedTime_v2_v12080 = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_cudaEventElapsedTime_v2_v12080', 486)
CUPTI_RUNTIME_TRACE_CBID_SIZE = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_SIZE', 487)
CUPTI_RUNTIME_TRACE_CBID_FORCE_INT = enum_CUpti_runtime_api_trace_cbid_enum.define('CUPTI_RUNTIME_TRACE_CBID_FORCE_INT', 2147483647)
CUpti_runtime_api_trace_cbid: TypeAlias = enum_CUpti_runtime_api_trace_cbid_enum
class CUpti_ProfilerRange(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_Range_INVALID = CUpti_ProfilerRange.define('CUPTI_Range_INVALID', 0)
CUPTI_AutoRange = CUpti_ProfilerRange.define('CUPTI_AutoRange', 1)
CUPTI_UserRange = CUpti_ProfilerRange.define('CUPTI_UserRange', 2)
CUPTI_Range_COUNT = CUpti_ProfilerRange.define('CUPTI_Range_COUNT', 3)
class CUpti_ProfilerReplayMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_Replay_INVALID = CUpti_ProfilerReplayMode.define('CUPTI_Replay_INVALID', 0)
CUPTI_ApplicationReplay = CUpti_ProfilerReplayMode.define('CUPTI_ApplicationReplay', 1)
CUPTI_KernelReplay = CUpti_ProfilerReplayMode.define('CUPTI_KernelReplay', 2)
CUPTI_UserReplay = CUpti_ProfilerReplayMode.define('CUPTI_UserReplay', 3)
CUPTI_Replay_COUNT = CUpti_ProfilerReplayMode.define('CUPTI_Replay_COUNT', 4)
@c.record
class struct_CUpti_Profiler_Initialize_Params(c.Struct):
SIZE = 16
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
CUpti_Profiler_Initialize_Params: TypeAlias = struct_CUpti_Profiler_Initialize_Params
@c.record
class struct_CUpti_Profiler_DeInitialize_Params(c.Struct):
SIZE = 16
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
CUpti_Profiler_DeInitialize_Params: TypeAlias = struct_CUpti_Profiler_DeInitialize_Params
@dll.bind
def cuptiProfilerInitialize(pParams:c.POINTER[CUpti_Profiler_Initialize_Params]) -> CUptiResult: ...
@dll.bind
def cuptiProfilerDeInitialize(pParams:c.POINTER[CUpti_Profiler_DeInitialize_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_CounterDataImageOptions(c.Struct):
SIZE = 48
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pCounterDataPrefix: Annotated[c.POINTER[uint8_t], 16]
counterDataPrefixSize: Annotated[size_t, 24]
maxNumRanges: Annotated[uint32_t, 32]
maxNumRangeTreeNodes: Annotated[uint32_t, 36]
maxRangeNameLength: Annotated[uint32_t, 40]
CUpti_Profiler_CounterDataImageOptions: TypeAlias = struct_CUpti_Profiler_CounterDataImageOptions
@c.record
class struct_CUpti_Profiler_CounterDataImage_CalculateSize_Params(c.Struct):
SIZE = 40
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
sizeofCounterDataImageOptions: Annotated[size_t, 16]
pOptions: Annotated[c.POINTER[CUpti_Profiler_CounterDataImageOptions], 24]
counterDataImageSize: Annotated[size_t, 32]
CUpti_Profiler_CounterDataImage_CalculateSize_Params: TypeAlias = struct_CUpti_Profiler_CounterDataImage_CalculateSize_Params
@c.record
class struct_CUpti_Profiler_CounterDataImage_Initialize_Params(c.Struct):
SIZE = 48
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
sizeofCounterDataImageOptions: Annotated[size_t, 16]
pOptions: Annotated[c.POINTER[CUpti_Profiler_CounterDataImageOptions], 24]
counterDataImageSize: Annotated[size_t, 32]
pCounterDataImage: Annotated[c.POINTER[uint8_t], 40]
CUpti_Profiler_CounterDataImage_Initialize_Params: TypeAlias = struct_CUpti_Profiler_CounterDataImage_Initialize_Params
@dll.bind
def cuptiProfilerCounterDataImageCalculateSize(pParams:c.POINTER[CUpti_Profiler_CounterDataImage_CalculateSize_Params]) -> CUptiResult: ...
@dll.bind
def cuptiProfilerCounterDataImageInitialize(pParams:c.POINTER[CUpti_Profiler_CounterDataImage_Initialize_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params(c.Struct):
SIZE = 40
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
counterDataImageSize: Annotated[size_t, 16]
pCounterDataImage: Annotated[c.POINTER[uint8_t], 24]
counterDataScratchBufferSize: Annotated[size_t, 32]
CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params: TypeAlias = struct_CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params
@c.record
class struct_CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params(c.Struct):
SIZE = 48
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
counterDataImageSize: Annotated[size_t, 16]
pCounterDataImage: Annotated[c.POINTER[uint8_t], 24]
counterDataScratchBufferSize: Annotated[size_t, 32]
pCounterDataScratchBuffer: Annotated[c.POINTER[uint8_t], 40]
CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params: TypeAlias = struct_CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params
@dll.bind
def cuptiProfilerCounterDataImageCalculateScratchBufferSize(pParams:c.POINTER[CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params]) -> CUptiResult: ...
@dll.bind
def cuptiProfilerCounterDataImageInitializeScratchBuffer(pParams:c.POINTER[CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_BeginSession_Params(c.Struct):
SIZE = 96
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
counterDataImageSize: Annotated[size_t, 24]
pCounterDataImage: Annotated[c.POINTER[uint8_t], 32]
counterDataScratchBufferSize: Annotated[size_t, 40]
pCounterDataScratchBuffer: Annotated[c.POINTER[uint8_t], 48]
bDumpCounterDataInFile: Annotated[uint8_t, 56]
pCounterDataFilePath: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 64]
range: Annotated[CUpti_ProfilerRange, 72]
replayMode: Annotated[CUpti_ProfilerReplayMode, 76]
maxRangesPerPass: Annotated[size_t, 80]
maxLaunchesPerPass: Annotated[size_t, 88]
CUpti_Profiler_BeginSession_Params: TypeAlias = struct_CUpti_Profiler_BeginSession_Params
@c.record
class struct_CUpti_Profiler_EndSession_Params(c.Struct):
SIZE = 24
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
CUpti_Profiler_EndSession_Params: TypeAlias = struct_CUpti_Profiler_EndSession_Params
@dll.bind
def cuptiProfilerBeginSession(pParams:c.POINTER[CUpti_Profiler_BeginSession_Params]) -> CUptiResult: ...
@dll.bind
def cuptiProfilerEndSession(pParams:c.POINTER[CUpti_Profiler_EndSession_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_SetConfig_Params(c.Struct):
SIZE = 64
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
pConfig: Annotated[c.POINTER[uint8_t], 24]
configSize: Annotated[size_t, 32]
minNestingLevel: Annotated[uint16_t, 40]
numNestingLevels: Annotated[uint16_t, 42]
passIndex: Annotated[size_t, 48]
targetNestingLevel: Annotated[uint16_t, 56]
CUpti_Profiler_SetConfig_Params: TypeAlias = struct_CUpti_Profiler_SetConfig_Params
@c.record
class struct_CUpti_Profiler_UnsetConfig_Params(c.Struct):
SIZE = 24
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
CUpti_Profiler_UnsetConfig_Params: TypeAlias = struct_CUpti_Profiler_UnsetConfig_Params
@dll.bind
def cuptiProfilerSetConfig(pParams:c.POINTER[CUpti_Profiler_SetConfig_Params]) -> CUptiResult: ...
@dll.bind
def cuptiProfilerUnsetConfig(pParams:c.POINTER[CUpti_Profiler_UnsetConfig_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_BeginPass_Params(c.Struct):
SIZE = 24
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
CUpti_Profiler_BeginPass_Params: TypeAlias = struct_CUpti_Profiler_BeginPass_Params
@c.record
class struct_CUpti_Profiler_EndPass_Params(c.Struct):
SIZE = 48
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
targetNestingLevel: Annotated[uint16_t, 24]
passIndex: Annotated[size_t, 32]
allPassesSubmitted: Annotated[uint8_t, 40]
CUpti_Profiler_EndPass_Params: TypeAlias = struct_CUpti_Profiler_EndPass_Params
@dll.bind
def cuptiProfilerBeginPass(pParams:c.POINTER[CUpti_Profiler_BeginPass_Params]) -> CUptiResult: ...
@dll.bind
def cuptiProfilerEndPass(pParams:c.POINTER[CUpti_Profiler_EndPass_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_EnableProfiling_Params(c.Struct):
SIZE = 24
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
CUpti_Profiler_EnableProfiling_Params: TypeAlias = struct_CUpti_Profiler_EnableProfiling_Params
@c.record
class struct_CUpti_Profiler_DisableProfiling_Params(c.Struct):
SIZE = 24
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
CUpti_Profiler_DisableProfiling_Params: TypeAlias = struct_CUpti_Profiler_DisableProfiling_Params
@dll.bind
def cuptiProfilerEnableProfiling(pParams:c.POINTER[CUpti_Profiler_EnableProfiling_Params]) -> CUptiResult: ...
@dll.bind
def cuptiProfilerDisableProfiling(pParams:c.POINTER[CUpti_Profiler_DisableProfiling_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_IsPassCollected_Params(c.Struct):
SIZE = 48
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
numRangesDropped: Annotated[size_t, 24]
numTraceBytesDropped: Annotated[size_t, 32]
onePassCollected: Annotated[uint8_t, 40]
allPassesCollected: Annotated[uint8_t, 41]
CUpti_Profiler_IsPassCollected_Params: TypeAlias = struct_CUpti_Profiler_IsPassCollected_Params
@dll.bind
def cuptiProfilerIsPassCollected(pParams:c.POINTER[CUpti_Profiler_IsPassCollected_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_FlushCounterData_Params(c.Struct):
SIZE = 40
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
numRangesDropped: Annotated[size_t, 24]
numTraceBytesDropped: Annotated[size_t, 32]
CUpti_Profiler_FlushCounterData_Params: TypeAlias = struct_CUpti_Profiler_FlushCounterData_Params
@dll.bind
def cuptiProfilerFlushCounterData(pParams:c.POINTER[CUpti_Profiler_FlushCounterData_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_PushRange_Params(c.Struct):
SIZE = 40
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
pRangeName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
rangeNameLength: Annotated[size_t, 32]
CUpti_Profiler_PushRange_Params: TypeAlias = struct_CUpti_Profiler_PushRange_Params
@c.record
class struct_CUpti_Profiler_PopRange_Params(c.Struct):
SIZE = 24
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
CUpti_Profiler_PopRange_Params: TypeAlias = struct_CUpti_Profiler_PopRange_Params
@dll.bind
def cuptiProfilerPushRange(pParams:c.POINTER[CUpti_Profiler_PushRange_Params]) -> CUptiResult: ...
@dll.bind
def cuptiProfilerPopRange(pParams:c.POINTER[CUpti_Profiler_PopRange_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_GetCounterAvailability_Params(c.Struct):
SIZE = 40
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
ctx: Annotated[CUcontext, 16]
counterAvailabilityImageSize: Annotated[size_t, 24]
pCounterAvailabilityImage: Annotated[c.POINTER[uint8_t], 32]
CUpti_Profiler_GetCounterAvailability_Params: TypeAlias = struct_CUpti_Profiler_GetCounterAvailability_Params
@dll.bind
def cuptiProfilerGetCounterAvailability(pParams:c.POINTER[CUpti_Profiler_GetCounterAvailability_Params]) -> CUptiResult: ...
class CUpti_Profiler_Support_Level(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_PROFILER_CONFIGURATION_UNKNOWN = CUpti_Profiler_Support_Level.define('CUPTI_PROFILER_CONFIGURATION_UNKNOWN', 0)
CUPTI_PROFILER_CONFIGURATION_UNSUPPORTED = CUpti_Profiler_Support_Level.define('CUPTI_PROFILER_CONFIGURATION_UNSUPPORTED', 1)
CUPTI_PROFILER_CONFIGURATION_DISABLED = CUpti_Profiler_Support_Level.define('CUPTI_PROFILER_CONFIGURATION_DISABLED', 2)
CUPTI_PROFILER_CONFIGURATION_SUPPORTED = CUpti_Profiler_Support_Level.define('CUPTI_PROFILER_CONFIGURATION_SUPPORTED', 3)
class CUpti_Profiler_API(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_PROFILER_RANGE_PROFILING = CUpti_Profiler_API.define('CUPTI_PROFILER_RANGE_PROFILING', 0)
CUPTI_PROFILER_PC_SAMPLING = CUpti_Profiler_API.define('CUPTI_PROFILER_PC_SAMPLING', 1)
CUPTI_PROFILER_SASS_METRICS = CUpti_Profiler_API.define('CUPTI_PROFILER_SASS_METRICS', 2)
CUPTI_PROFILER_PM_SAMPLING = CUpti_Profiler_API.define('CUPTI_PROFILER_PM_SAMPLING', 3)
CUPTI_PROFILER_UNKNOWN = CUpti_Profiler_API.define('CUPTI_PROFILER_UNKNOWN', 4)
@c.record
class CUpti_Profiler_DeviceSupported_Params(c.Struct):
SIZE = 56
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
cuDevice: Annotated[CUdevice, 16]
isSupported: Annotated[CUpti_Profiler_Support_Level, 20]
architecture: Annotated[CUpti_Profiler_Support_Level, 24]
sli: Annotated[CUpti_Profiler_Support_Level, 28]
vGpu: Annotated[CUpti_Profiler_Support_Level, 32]
confidentialCompute: Annotated[CUpti_Profiler_Support_Level, 36]
cmp: Annotated[CUpti_Profiler_Support_Level, 40]
wsl: Annotated[CUpti_Profiler_Support_Level, 44]
api: Annotated[CUpti_Profiler_API, 48]
@dll.bind
def cuptiProfilerDeviceSupported(pParams:c.POINTER[CUpti_Profiler_DeviceSupported_Params]) -> CUptiResult: ...
class enum_CUpti_MetricType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_METRIC_TYPE_COUNTER = enum_CUpti_MetricType.define('CUPTI_METRIC_TYPE_COUNTER', 0)
CUPTI_METRIC_TYPE_RATIO = enum_CUpti_MetricType.define('CUPTI_METRIC_TYPE_RATIO', 1)
CUPTI_METRIC_TYPE_THROUGHPUT = enum_CUpti_MetricType.define('CUPTI_METRIC_TYPE_THROUGHPUT', 2)
CUPTI_METRIC_TYPE__COUNT = enum_CUpti_MetricType.define('CUPTI_METRIC_TYPE__COUNT', 3)
CUpti_MetricType: TypeAlias = enum_CUpti_MetricType
class enum_CUpti_ProfilerType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_PROFILER_TYPE_RANGE_PROFILER = enum_CUpti_ProfilerType.define('CUPTI_PROFILER_TYPE_RANGE_PROFILER', 0)
CUPTI_PROFILER_TYPE_PM_SAMPLING = enum_CUpti_ProfilerType.define('CUPTI_PROFILER_TYPE_PM_SAMPLING', 1)
CUPTI_PROFILER_TYPE_PROFILER_INVALID = enum_CUpti_ProfilerType.define('CUPTI_PROFILER_TYPE_PROFILER_INVALID', 2)
CUpti_ProfilerType: TypeAlias = enum_CUpti_ProfilerType
class struct_CUpti_Profiler_Host_Object(ctypes.Structure): pass
CUpti_Profiler_Host_Object: TypeAlias = struct_CUpti_Profiler_Host_Object
@c.record
class struct_CUpti_Profiler_Host_Initialize_Params(c.Struct):
SIZE = 48
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
profilerType: Annotated[CUpti_ProfilerType, 16]
pChipName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
pCounterAvailabilityImage: Annotated[c.POINTER[uint8_t], 32]
pHostObject: Annotated[c.POINTER[CUpti_Profiler_Host_Object], 40]
CUpti_Profiler_Host_Initialize_Params: TypeAlias = struct_CUpti_Profiler_Host_Initialize_Params
@dll.bind
def cuptiProfilerHostInitialize(pParams:c.POINTER[CUpti_Profiler_Host_Initialize_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_Deinitialize_Params(c.Struct):
SIZE = 24
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pHostObject: Annotated[c.POINTER[struct_CUpti_Profiler_Host_Object], 16]
CUpti_Profiler_Host_Deinitialize_Params: TypeAlias = struct_CUpti_Profiler_Host_Deinitialize_Params
@dll.bind
def cuptiProfilerHostDeinitialize(pParams:c.POINTER[CUpti_Profiler_Host_Deinitialize_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_GetSupportedChips_Params(c.Struct):
SIZE = 32
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
numChips: Annotated[size_t, 16]
ppChipNames: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 24]
CUpti_Profiler_Host_GetSupportedChips_Params: TypeAlias = struct_CUpti_Profiler_Host_GetSupportedChips_Params
@dll.bind
def cuptiProfilerHostGetSupportedChips(pParams:c.POINTER[CUpti_Profiler_Host_GetSupportedChips_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_GetBaseMetrics_Params(c.Struct):
SIZE = 48
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pHostObject: Annotated[c.POINTER[struct_CUpti_Profiler_Host_Object], 16]
metricType: Annotated[CUpti_MetricType, 24]
ppMetricNames: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 32]
numMetrics: Annotated[size_t, 40]
CUpti_Profiler_Host_GetBaseMetrics_Params: TypeAlias = struct_CUpti_Profiler_Host_GetBaseMetrics_Params
@dll.bind
def cuptiProfilerHostGetBaseMetrics(pParams:c.POINTER[CUpti_Profiler_Host_GetBaseMetrics_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_GetSubMetrics_Params(c.Struct):
SIZE = 56
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pHostObject: Annotated[c.POINTER[CUpti_Profiler_Host_Object], 16]
metricType: Annotated[CUpti_MetricType, 24]
pMetricName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 32]
numOfSubmetrics: Annotated[size_t, 40]
ppSubMetrics: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 48]
CUpti_Profiler_Host_GetSubMetrics_Params: TypeAlias = struct_CUpti_Profiler_Host_GetSubMetrics_Params
@dll.bind
def cuptiProfilerHostGetSubMetrics(pParams:c.POINTER[CUpti_Profiler_Host_GetSubMetrics_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_GetMetricProperties_Params(c.Struct):
SIZE = 64
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pHostObject: Annotated[c.POINTER[CUpti_Profiler_Host_Object], 16]
pMetricName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
pDescription: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 32]
pHwUnit: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 40]
pDimUnit: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 48]
metricType: Annotated[CUpti_MetricType, 56]
CUpti_Profiler_Host_GetMetricProperties_Params: TypeAlias = struct_CUpti_Profiler_Host_GetMetricProperties_Params
@dll.bind
def cuptiProfilerHostGetMetricProperties(pParams:c.POINTER[CUpti_Profiler_Host_GetMetricProperties_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_GetRangeName_Params(c.Struct):
SIZE = 56
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pCounterDataImage: Annotated[c.POINTER[uint8_t], 16]
counterDataImageSize: Annotated[size_t, 24]
rangeIndex: Annotated[size_t, 32]
delimiter: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 40]
pRangeName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 48]
CUpti_Profiler_Host_GetRangeName_Params: TypeAlias = struct_CUpti_Profiler_Host_GetRangeName_Params
@dll.bind
def cuptiProfilerHostGetRangeName(pParams:c.POINTER[CUpti_Profiler_Host_GetRangeName_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_EvaluateToGpuValues_Params(c.Struct):
SIZE = 72
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pHostObject: Annotated[c.POINTER[CUpti_Profiler_Host_Object], 16]
pCounterDataImage: Annotated[c.POINTER[uint8_t], 24]
counterDataImageSize: Annotated[size_t, 32]
rangeIndex: Annotated[size_t, 40]
ppMetricNames: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 48]
numMetrics: Annotated[size_t, 56]
pMetricValues: Annotated[c.POINTER[Annotated[float, ctypes.c_double]], 64]
CUpti_Profiler_Host_EvaluateToGpuValues_Params: TypeAlias = struct_CUpti_Profiler_Host_EvaluateToGpuValues_Params
@dll.bind
def cuptiProfilerHostEvaluateToGpuValues(pParams:c.POINTER[CUpti_Profiler_Host_EvaluateToGpuValues_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_ConfigAddMetrics_Params(c.Struct):
SIZE = 40
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pHostObject: Annotated[c.POINTER[struct_CUpti_Profiler_Host_Object], 16]
ppMetricNames: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 24]
numMetrics: Annotated[size_t, 32]
CUpti_Profiler_Host_ConfigAddMetrics_Params: TypeAlias = struct_CUpti_Profiler_Host_ConfigAddMetrics_Params
@dll.bind
def cuptiProfilerHostConfigAddMetrics(pParams:c.POINTER[CUpti_Profiler_Host_ConfigAddMetrics_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_GetConfigImageSize_Params(c.Struct):
SIZE = 32
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pHostObject: Annotated[c.POINTER[CUpti_Profiler_Host_Object], 16]
configImageSize: Annotated[size_t, 24]
CUpti_Profiler_Host_GetConfigImageSize_Params: TypeAlias = struct_CUpti_Profiler_Host_GetConfigImageSize_Params
@dll.bind
def cuptiProfilerHostGetConfigImageSize(pParams:c.POINTER[CUpti_Profiler_Host_GetConfigImageSize_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_GetConfigImage_Params(c.Struct):
SIZE = 40
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pHostObject: Annotated[c.POINTER[CUpti_Profiler_Host_Object], 16]
configImageSize: Annotated[size_t, 24]
pConfigImage: Annotated[c.POINTER[uint8_t], 32]
CUpti_Profiler_Host_GetConfigImage_Params: TypeAlias = struct_CUpti_Profiler_Host_GetConfigImage_Params
@dll.bind
def cuptiProfilerHostGetConfigImage(pParams:c.POINTER[CUpti_Profiler_Host_GetConfigImage_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_GetNumOfPasses_Params(c.Struct):
SIZE = 40
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
configImageSize: Annotated[size_t, 16]
pConfigImage: Annotated[c.POINTER[uint8_t], 24]
numOfPasses: Annotated[size_t, 32]
CUpti_Profiler_Host_GetNumOfPasses_Params: TypeAlias = struct_CUpti_Profiler_Host_GetNumOfPasses_Params
@dll.bind
def cuptiProfilerHostGetNumOfPasses(pParams:c.POINTER[CUpti_Profiler_Host_GetNumOfPasses_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_Profiler_Host_GetMaxNumHardwareMetricsPerPass_Params(c.Struct):
SIZE = 48
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
profilerType: Annotated[CUpti_ProfilerType, 16]
pChipName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
pCounterAvailabilityImage: Annotated[c.POINTER[uint8_t], 32]
maxMetricsPerPass: Annotated[size_t, 40]
CUpti_Profiler_Host_GetMaxNumHardwareMetricsPerPass_Params: TypeAlias = struct_CUpti_Profiler_Host_GetMaxNumHardwareMetricsPerPass_Params
@dll.bind
def cuptiProfilerHostGetMaxNumHardwareMetricsPerPass(pParams:c.POINTER[CUpti_Profiler_Host_GetMaxNumHardwareMetricsPerPass_Params]) -> CUptiResult: ...
class struct_CUpti_PmSampling_Object(ctypes.Structure): pass
CUpti_PmSampling_Object: TypeAlias = struct_CUpti_PmSampling_Object
class enum_CUpti_PmSampling_TriggerMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_PM_SAMPLING_TRIGGER_MODE_GPU_SYSCLK_INTERVAL = enum_CUpti_PmSampling_TriggerMode.define('CUPTI_PM_SAMPLING_TRIGGER_MODE_GPU_SYSCLK_INTERVAL', 0)
CUPTI_PM_SAMPLING_TRIGGER_MODE_GPU_TIME_INTERVAL = enum_CUpti_PmSampling_TriggerMode.define('CUPTI_PM_SAMPLING_TRIGGER_MODE_GPU_TIME_INTERVAL', 1)
CUPTI_PM_SAMPLING_TRIGGER_MODE_COUNT = enum_CUpti_PmSampling_TriggerMode.define('CUPTI_PM_SAMPLING_TRIGGER_MODE_COUNT', 2)
CUpti_PmSampling_TriggerMode: TypeAlias = enum_CUpti_PmSampling_TriggerMode
class enum_CUpti_PmSampling_DecodeStopReason(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_PM_SAMPLING_DECODE_STOP_REASON_OTHER = enum_CUpti_PmSampling_DecodeStopReason.define('CUPTI_PM_SAMPLING_DECODE_STOP_REASON_OTHER', 0)
CUPTI_PM_SAMPLING_DECODE_STOP_REASON_COUNTER_DATA_FULL = enum_CUpti_PmSampling_DecodeStopReason.define('CUPTI_PM_SAMPLING_DECODE_STOP_REASON_COUNTER_DATA_FULL', 1)
CUPTI_PM_SAMPLING_DECODE_STOP_REASON_END_OF_RECORDS = enum_CUpti_PmSampling_DecodeStopReason.define('CUPTI_PM_SAMPLING_DECODE_STOP_REASON_END_OF_RECORDS', 2)
CUPTI_PM_SAMPLING_DECODE_STOP_REASON_COUNT = enum_CUpti_PmSampling_DecodeStopReason.define('CUPTI_PM_SAMPLING_DECODE_STOP_REASON_COUNT', 3)
CUpti_PmSampling_DecodeStopReason: TypeAlias = enum_CUpti_PmSampling_DecodeStopReason
class enum_CUpti_PmSampling_HardwareBuffer_AppendMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUPTI_PM_SAMPLING_HARDWARE_BUFFER_APPEND_MODE_KEEP_OLDEST = enum_CUpti_PmSampling_HardwareBuffer_AppendMode.define('CUPTI_PM_SAMPLING_HARDWARE_BUFFER_APPEND_MODE_KEEP_OLDEST', 0)
CUPTI_PM_SAMPLING_HARDWARE_BUFFER_APPEND_MODE_KEEP_LATEST = enum_CUpti_PmSampling_HardwareBuffer_AppendMode.define('CUPTI_PM_SAMPLING_HARDWARE_BUFFER_APPEND_MODE_KEEP_LATEST', 1)
CUpti_PmSampling_HardwareBuffer_AppendMode: TypeAlias = enum_CUpti_PmSampling_HardwareBuffer_AppendMode
@c.record
class struct_CUpti_PmSampling_SetConfig_Params(c.Struct):
SIZE = 64
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pPmSamplingObject: Annotated[c.POINTER[CUpti_PmSampling_Object], 16]
configSize: Annotated[size_t, 24]
pConfig: Annotated[c.POINTER[uint8_t], 32]
hardwareBufferSize: Annotated[size_t, 40]
samplingInterval: Annotated[uint64_t, 48]
triggerMode: Annotated[CUpti_PmSampling_TriggerMode, 56]
hwBufferAppendMode: Annotated[CUpti_PmSampling_HardwareBuffer_AppendMode, 60]
CUpti_PmSampling_SetConfig_Params: TypeAlias = struct_CUpti_PmSampling_SetConfig_Params
@dll.bind
def cuptiPmSamplingSetConfig(pParams:c.POINTER[CUpti_PmSampling_SetConfig_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_PmSampling_Enable_Params(c.Struct):
SIZE = 32
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
deviceIndex: Annotated[size_t, 16]
pPmSamplingObject: Annotated[c.POINTER[CUpti_PmSampling_Object], 24]
CUpti_PmSampling_Enable_Params: TypeAlias = struct_CUpti_PmSampling_Enable_Params
@dll.bind
def cuptiPmSamplingEnable(pParams:c.POINTER[CUpti_PmSampling_Enable_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_PmSampling_Disable_Params(c.Struct):
SIZE = 24
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pPmSamplingObject: Annotated[c.POINTER[CUpti_PmSampling_Object], 16]
CUpti_PmSampling_Disable_Params: TypeAlias = struct_CUpti_PmSampling_Disable_Params
@dll.bind
def cuptiPmSamplingDisable(pParams:c.POINTER[CUpti_PmSampling_Disable_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_PmSampling_Start_Params(c.Struct):
SIZE = 24
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pPmSamplingObject: Annotated[c.POINTER[CUpti_PmSampling_Object], 16]
CUpti_PmSampling_Start_Params: TypeAlias = struct_CUpti_PmSampling_Start_Params
@dll.bind
def cuptiPmSamplingStart(pParams:c.POINTER[CUpti_PmSampling_Start_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_PmSampling_Stop_Params(c.Struct):
SIZE = 24
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pPmSamplingObject: Annotated[c.POINTER[CUpti_PmSampling_Object], 16]
CUpti_PmSampling_Stop_Params: TypeAlias = struct_CUpti_PmSampling_Stop_Params
@dll.bind
def cuptiPmSamplingStop(pParams:c.POINTER[CUpti_PmSampling_Stop_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_PmSampling_DecodeData_Params(c.Struct):
SIZE = 48
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pPmSamplingObject: Annotated[c.POINTER[CUpti_PmSampling_Object], 16]
pCounterDataImage: Annotated[c.POINTER[uint8_t], 24]
counterDataImageSize: Annotated[size_t, 32]
decodeStopReason: Annotated[CUpti_PmSampling_DecodeStopReason, 40]
overflow: Annotated[uint8_t, 44]
CUpti_PmSampling_DecodeData_Params: TypeAlias = struct_CUpti_PmSampling_DecodeData_Params
@dll.bind
def cuptiPmSamplingDecodeData(pParams:c.POINTER[CUpti_PmSampling_DecodeData_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_PmSampling_GetCounterAvailability_Params(c.Struct):
SIZE = 40
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
deviceIndex: Annotated[size_t, 16]
counterAvailabilityImageSize: Annotated[size_t, 24]
pCounterAvailabilityImage: Annotated[c.POINTER[uint8_t], 32]
CUpti_PmSampling_GetCounterAvailability_Params: TypeAlias = struct_CUpti_PmSampling_GetCounterAvailability_Params
@dll.bind
def cuptiPmSamplingGetCounterAvailability(pParams:c.POINTER[CUpti_PmSampling_GetCounterAvailability_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_PmSampling_GetCounterDataSize_Params(c.Struct):
SIZE = 56
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pPmSamplingObject: Annotated[c.POINTER[CUpti_PmSampling_Object], 16]
pMetricNames: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 24]
numMetrics: Annotated[size_t, 32]
maxSamples: Annotated[uint32_t, 40]
counterDataSize: Annotated[size_t, 48]
CUpti_PmSampling_GetCounterDataSize_Params: TypeAlias = struct_CUpti_PmSampling_GetCounterDataSize_Params
@dll.bind
def cuptiPmSamplingGetCounterDataSize(pParams:c.POINTER[CUpti_PmSampling_GetCounterDataSize_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_PmSampling_CounterDataImage_Initialize_Params(c.Struct):
SIZE = 40
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pPmSamplingObject: Annotated[c.POINTER[CUpti_PmSampling_Object], 16]
counterDataSize: Annotated[size_t, 24]
pCounterData: Annotated[c.POINTER[uint8_t], 32]
CUpti_PmSampling_CounterDataImage_Initialize_Params: TypeAlias = struct_CUpti_PmSampling_CounterDataImage_Initialize_Params
@dll.bind
def cuptiPmSamplingCounterDataImageInitialize(pParams:c.POINTER[CUpti_PmSampling_CounterDataImage_Initialize_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_PmSampling_GetCounterDataInfo_Params(c.Struct):
SIZE = 56
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pCounterDataImage: Annotated[c.POINTER[uint8_t], 16]
counterDataImageSize: Annotated[size_t, 24]
numTotalSamples: Annotated[size_t, 32]
numPopulatedSamples: Annotated[size_t, 40]
numCompletedSamples: Annotated[size_t, 48]
CUpti_PmSampling_GetCounterDataInfo_Params: TypeAlias = struct_CUpti_PmSampling_GetCounterDataInfo_Params
@dll.bind
def cuptiPmSamplingGetCounterDataInfo(pParams:c.POINTER[CUpti_PmSampling_GetCounterDataInfo_Params]) -> CUptiResult: ...
@c.record
class struct_CUpti_PmSampling_CounterData_GetSampleInfo_Params(c.Struct):
SIZE = 64
structSize: Annotated[size_t, 0]
pPriv: Annotated[ctypes.c_void_p, 8]
pPmSamplingObject: Annotated[c.POINTER[CUpti_PmSampling_Object], 16]
pCounterDataImage: Annotated[c.POINTER[uint8_t], 24]
counterDataImageSize: Annotated[size_t, 32]
sampleIndex: Annotated[size_t, 40]
startTimestamp: Annotated[uint64_t, 48]
endTimestamp: Annotated[uint64_t, 56]
CUpti_PmSampling_CounterData_GetSampleInfo_Params: TypeAlias = struct_CUpti_PmSampling_CounterData_GetSampleInfo_Params
@dll.bind
def cuptiPmSamplingCounterDataGetSampleInfo(pParams:c.POINTER[CUpti_PmSampling_CounterData_GetSampleInfo_Params]) -> CUptiResult: ...
@c.record
class struct_cuGetErrorString_params_st(c.Struct):
SIZE = 16
error: Annotated[CUresult, 0]
pStr: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 8]
class enum_cudaError_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUDA_SUCCESS = enum_cudaError_enum.define('CUDA_SUCCESS', 0)
CUDA_ERROR_INVALID_VALUE = enum_cudaError_enum.define('CUDA_ERROR_INVALID_VALUE', 1)
CUDA_ERROR_OUT_OF_MEMORY = enum_cudaError_enum.define('CUDA_ERROR_OUT_OF_MEMORY', 2)
CUDA_ERROR_NOT_INITIALIZED = enum_cudaError_enum.define('CUDA_ERROR_NOT_INITIALIZED', 3)
CUDA_ERROR_DEINITIALIZED = enum_cudaError_enum.define('CUDA_ERROR_DEINITIALIZED', 4)
CUDA_ERROR_PROFILER_DISABLED = enum_cudaError_enum.define('CUDA_ERROR_PROFILER_DISABLED', 5)
CUDA_ERROR_PROFILER_NOT_INITIALIZED = enum_cudaError_enum.define('CUDA_ERROR_PROFILER_NOT_INITIALIZED', 6)
CUDA_ERROR_PROFILER_ALREADY_STARTED = enum_cudaError_enum.define('CUDA_ERROR_PROFILER_ALREADY_STARTED', 7)
CUDA_ERROR_PROFILER_ALREADY_STOPPED = enum_cudaError_enum.define('CUDA_ERROR_PROFILER_ALREADY_STOPPED', 8)
CUDA_ERROR_STUB_LIBRARY = enum_cudaError_enum.define('CUDA_ERROR_STUB_LIBRARY', 34)
CUDA_ERROR_DEVICE_UNAVAILABLE = enum_cudaError_enum.define('CUDA_ERROR_DEVICE_UNAVAILABLE', 46)
CUDA_ERROR_NO_DEVICE = enum_cudaError_enum.define('CUDA_ERROR_NO_DEVICE', 100)
CUDA_ERROR_INVALID_DEVICE = enum_cudaError_enum.define('CUDA_ERROR_INVALID_DEVICE', 101)
CUDA_ERROR_DEVICE_NOT_LICENSED = enum_cudaError_enum.define('CUDA_ERROR_DEVICE_NOT_LICENSED', 102)
CUDA_ERROR_INVALID_IMAGE = enum_cudaError_enum.define('CUDA_ERROR_INVALID_IMAGE', 200)
CUDA_ERROR_INVALID_CONTEXT = enum_cudaError_enum.define('CUDA_ERROR_INVALID_CONTEXT', 201)
CUDA_ERROR_CONTEXT_ALREADY_CURRENT = enum_cudaError_enum.define('CUDA_ERROR_CONTEXT_ALREADY_CURRENT', 202)
CUDA_ERROR_MAP_FAILED = enum_cudaError_enum.define('CUDA_ERROR_MAP_FAILED', 205)
CUDA_ERROR_UNMAP_FAILED = enum_cudaError_enum.define('CUDA_ERROR_UNMAP_FAILED', 206)
CUDA_ERROR_ARRAY_IS_MAPPED = enum_cudaError_enum.define('CUDA_ERROR_ARRAY_IS_MAPPED', 207)
CUDA_ERROR_ALREADY_MAPPED = enum_cudaError_enum.define('CUDA_ERROR_ALREADY_MAPPED', 208)
CUDA_ERROR_NO_BINARY_FOR_GPU = enum_cudaError_enum.define('CUDA_ERROR_NO_BINARY_FOR_GPU', 209)
CUDA_ERROR_ALREADY_ACQUIRED = enum_cudaError_enum.define('CUDA_ERROR_ALREADY_ACQUIRED', 210)
CUDA_ERROR_NOT_MAPPED = enum_cudaError_enum.define('CUDA_ERROR_NOT_MAPPED', 211)
CUDA_ERROR_NOT_MAPPED_AS_ARRAY = enum_cudaError_enum.define('CUDA_ERROR_NOT_MAPPED_AS_ARRAY', 212)
CUDA_ERROR_NOT_MAPPED_AS_POINTER = enum_cudaError_enum.define('CUDA_ERROR_NOT_MAPPED_AS_POINTER', 213)
CUDA_ERROR_ECC_UNCORRECTABLE = enum_cudaError_enum.define('CUDA_ERROR_ECC_UNCORRECTABLE', 214)
CUDA_ERROR_UNSUPPORTED_LIMIT = enum_cudaError_enum.define('CUDA_ERROR_UNSUPPORTED_LIMIT', 215)
CUDA_ERROR_CONTEXT_ALREADY_IN_USE = enum_cudaError_enum.define('CUDA_ERROR_CONTEXT_ALREADY_IN_USE', 216)
CUDA_ERROR_PEER_ACCESS_UNSUPPORTED = enum_cudaError_enum.define('CUDA_ERROR_PEER_ACCESS_UNSUPPORTED', 217)
CUDA_ERROR_INVALID_PTX = enum_cudaError_enum.define('CUDA_ERROR_INVALID_PTX', 218)
CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = enum_cudaError_enum.define('CUDA_ERROR_INVALID_GRAPHICS_CONTEXT', 219)
CUDA_ERROR_NVLINK_UNCORRECTABLE = enum_cudaError_enum.define('CUDA_ERROR_NVLINK_UNCORRECTABLE', 220)
CUDA_ERROR_JIT_COMPILER_NOT_FOUND = enum_cudaError_enum.define('CUDA_ERROR_JIT_COMPILER_NOT_FOUND', 221)
CUDA_ERROR_UNSUPPORTED_PTX_VERSION = enum_cudaError_enum.define('CUDA_ERROR_UNSUPPORTED_PTX_VERSION', 222)
CUDA_ERROR_JIT_COMPILATION_DISABLED = enum_cudaError_enum.define('CUDA_ERROR_JIT_COMPILATION_DISABLED', 223)
CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY = enum_cudaError_enum.define('CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY', 224)
CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC = enum_cudaError_enum.define('CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC', 225)
CUDA_ERROR_CONTAINED = enum_cudaError_enum.define('CUDA_ERROR_CONTAINED', 226)
CUDA_ERROR_INVALID_SOURCE = enum_cudaError_enum.define('CUDA_ERROR_INVALID_SOURCE', 300)
CUDA_ERROR_FILE_NOT_FOUND = enum_cudaError_enum.define('CUDA_ERROR_FILE_NOT_FOUND', 301)
CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = enum_cudaError_enum.define('CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND', 302)
CUDA_ERROR_SHARED_OBJECT_INIT_FAILED = enum_cudaError_enum.define('CUDA_ERROR_SHARED_OBJECT_INIT_FAILED', 303)
CUDA_ERROR_OPERATING_SYSTEM = enum_cudaError_enum.define('CUDA_ERROR_OPERATING_SYSTEM', 304)
CUDA_ERROR_INVALID_HANDLE = enum_cudaError_enum.define('CUDA_ERROR_INVALID_HANDLE', 400)
CUDA_ERROR_ILLEGAL_STATE = enum_cudaError_enum.define('CUDA_ERROR_ILLEGAL_STATE', 401)
CUDA_ERROR_LOSSY_QUERY = enum_cudaError_enum.define('CUDA_ERROR_LOSSY_QUERY', 402)
CUDA_ERROR_NOT_FOUND = enum_cudaError_enum.define('CUDA_ERROR_NOT_FOUND', 500)
CUDA_ERROR_NOT_READY = enum_cudaError_enum.define('CUDA_ERROR_NOT_READY', 600)
CUDA_ERROR_ILLEGAL_ADDRESS = enum_cudaError_enum.define('CUDA_ERROR_ILLEGAL_ADDRESS', 700)
CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES = enum_cudaError_enum.define('CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES', 701)
CUDA_ERROR_LAUNCH_TIMEOUT = enum_cudaError_enum.define('CUDA_ERROR_LAUNCH_TIMEOUT', 702)
CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING = enum_cudaError_enum.define('CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING', 703)
CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED = enum_cudaError_enum.define('CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED', 704)
CUDA_ERROR_PEER_ACCESS_NOT_ENABLED = enum_cudaError_enum.define('CUDA_ERROR_PEER_ACCESS_NOT_ENABLED', 705)
CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE = enum_cudaError_enum.define('CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE', 708)
CUDA_ERROR_CONTEXT_IS_DESTROYED = enum_cudaError_enum.define('CUDA_ERROR_CONTEXT_IS_DESTROYED', 709)
CUDA_ERROR_ASSERT = enum_cudaError_enum.define('CUDA_ERROR_ASSERT', 710)
CUDA_ERROR_TOO_MANY_PEERS = enum_cudaError_enum.define('CUDA_ERROR_TOO_MANY_PEERS', 711)
CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED = enum_cudaError_enum.define('CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED', 712)
CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED = enum_cudaError_enum.define('CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED', 713)
CUDA_ERROR_HARDWARE_STACK_ERROR = enum_cudaError_enum.define('CUDA_ERROR_HARDWARE_STACK_ERROR', 714)
CUDA_ERROR_ILLEGAL_INSTRUCTION = enum_cudaError_enum.define('CUDA_ERROR_ILLEGAL_INSTRUCTION', 715)
CUDA_ERROR_MISALIGNED_ADDRESS = enum_cudaError_enum.define('CUDA_ERROR_MISALIGNED_ADDRESS', 716)
CUDA_ERROR_INVALID_ADDRESS_SPACE = enum_cudaError_enum.define('CUDA_ERROR_INVALID_ADDRESS_SPACE', 717)
CUDA_ERROR_INVALID_PC = enum_cudaError_enum.define('CUDA_ERROR_INVALID_PC', 718)
CUDA_ERROR_LAUNCH_FAILED = enum_cudaError_enum.define('CUDA_ERROR_LAUNCH_FAILED', 719)
CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE = enum_cudaError_enum.define('CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE', 720)
CUDA_ERROR_TENSOR_MEMORY_LEAK = enum_cudaError_enum.define('CUDA_ERROR_TENSOR_MEMORY_LEAK', 721)
CUDA_ERROR_NOT_PERMITTED = enum_cudaError_enum.define('CUDA_ERROR_NOT_PERMITTED', 800)
CUDA_ERROR_NOT_SUPPORTED = enum_cudaError_enum.define('CUDA_ERROR_NOT_SUPPORTED', 801)
CUDA_ERROR_SYSTEM_NOT_READY = enum_cudaError_enum.define('CUDA_ERROR_SYSTEM_NOT_READY', 802)
CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = enum_cudaError_enum.define('CUDA_ERROR_SYSTEM_DRIVER_MISMATCH', 803)
CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE = enum_cudaError_enum.define('CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE', 804)
CUDA_ERROR_MPS_CONNECTION_FAILED = enum_cudaError_enum.define('CUDA_ERROR_MPS_CONNECTION_FAILED', 805)
CUDA_ERROR_MPS_RPC_FAILURE = enum_cudaError_enum.define('CUDA_ERROR_MPS_RPC_FAILURE', 806)
CUDA_ERROR_MPS_SERVER_NOT_READY = enum_cudaError_enum.define('CUDA_ERROR_MPS_SERVER_NOT_READY', 807)
CUDA_ERROR_MPS_MAX_CLIENTS_REACHED = enum_cudaError_enum.define('CUDA_ERROR_MPS_MAX_CLIENTS_REACHED', 808)
CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED = enum_cudaError_enum.define('CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED', 809)
CUDA_ERROR_MPS_CLIENT_TERMINATED = enum_cudaError_enum.define('CUDA_ERROR_MPS_CLIENT_TERMINATED', 810)
CUDA_ERROR_CDP_NOT_SUPPORTED = enum_cudaError_enum.define('CUDA_ERROR_CDP_NOT_SUPPORTED', 811)
CUDA_ERROR_CDP_VERSION_MISMATCH = enum_cudaError_enum.define('CUDA_ERROR_CDP_VERSION_MISMATCH', 812)
CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED = enum_cudaError_enum.define('CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED', 900)
CUDA_ERROR_STREAM_CAPTURE_INVALIDATED = enum_cudaError_enum.define('CUDA_ERROR_STREAM_CAPTURE_INVALIDATED', 901)
CUDA_ERROR_STREAM_CAPTURE_MERGE = enum_cudaError_enum.define('CUDA_ERROR_STREAM_CAPTURE_MERGE', 902)
CUDA_ERROR_STREAM_CAPTURE_UNMATCHED = enum_cudaError_enum.define('CUDA_ERROR_STREAM_CAPTURE_UNMATCHED', 903)
CUDA_ERROR_STREAM_CAPTURE_UNJOINED = enum_cudaError_enum.define('CUDA_ERROR_STREAM_CAPTURE_UNJOINED', 904)
CUDA_ERROR_STREAM_CAPTURE_ISOLATION = enum_cudaError_enum.define('CUDA_ERROR_STREAM_CAPTURE_ISOLATION', 905)
CUDA_ERROR_STREAM_CAPTURE_IMPLICIT = enum_cudaError_enum.define('CUDA_ERROR_STREAM_CAPTURE_IMPLICIT', 906)
CUDA_ERROR_CAPTURED_EVENT = enum_cudaError_enum.define('CUDA_ERROR_CAPTURED_EVENT', 907)
CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD = enum_cudaError_enum.define('CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD', 908)
CUDA_ERROR_TIMEOUT = enum_cudaError_enum.define('CUDA_ERROR_TIMEOUT', 909)
CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE = enum_cudaError_enum.define('CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE', 910)
CUDA_ERROR_EXTERNAL_DEVICE = enum_cudaError_enum.define('CUDA_ERROR_EXTERNAL_DEVICE', 911)
CUDA_ERROR_INVALID_CLUSTER_SIZE = enum_cudaError_enum.define('CUDA_ERROR_INVALID_CLUSTER_SIZE', 912)
CUDA_ERROR_FUNCTION_NOT_LOADED = enum_cudaError_enum.define('CUDA_ERROR_FUNCTION_NOT_LOADED', 913)
CUDA_ERROR_INVALID_RESOURCE_TYPE = enum_cudaError_enum.define('CUDA_ERROR_INVALID_RESOURCE_TYPE', 914)
CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION = enum_cudaError_enum.define('CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION', 915)
CUDA_ERROR_KEY_ROTATION = enum_cudaError_enum.define('CUDA_ERROR_KEY_ROTATION', 916)
CUDA_ERROR_UNKNOWN = enum_cudaError_enum.define('CUDA_ERROR_UNKNOWN', 999)
CUresult: TypeAlias = enum_cudaError_enum
cuGetErrorString_params: TypeAlias = struct_cuGetErrorString_params_st
@c.record
class struct_cuGetErrorName_params_st(c.Struct):
SIZE = 16
error: Annotated[CUresult, 0]
pStr: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 8]
cuGetErrorName_params: TypeAlias = struct_cuGetErrorName_params_st
@c.record
class struct_cuInit_params_st(c.Struct):
SIZE = 4
Flags: Annotated[Annotated[int, ctypes.c_uint32], 0]
cuInit_params: TypeAlias = struct_cuInit_params_st
@c.record
class struct_cuDriverGetVersion_params_st(c.Struct):
SIZE = 8
driverVersion: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
cuDriverGetVersion_params: TypeAlias = struct_cuDriverGetVersion_params_st
@c.record
class struct_cuDeviceGet_params_st(c.Struct):
SIZE = 16
device: Annotated[c.POINTER[CUdevice], 0]
ordinal: Annotated[Annotated[int, ctypes.c_int32], 8]
cuDeviceGet_params: TypeAlias = struct_cuDeviceGet_params_st
@c.record
class struct_cuDeviceGetCount_params_st(c.Struct):
SIZE = 8
count: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
cuDeviceGetCount_params: TypeAlias = struct_cuDeviceGetCount_params_st
@c.record
class struct_cuDeviceGetName_params_st(c.Struct):
SIZE = 16
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
len: Annotated[Annotated[int, ctypes.c_int32], 8]
dev: Annotated[CUdevice, 12]
cuDeviceGetName_params: TypeAlias = struct_cuDeviceGetName_params_st
@c.record
class struct_cuDeviceGetUuid_params_st(c.Struct):
SIZE = 16
uuid: Annotated[c.POINTER[CUuuid], 0]
dev: Annotated[CUdevice, 8]
cuDeviceGetUuid_params: TypeAlias = struct_cuDeviceGetUuid_params_st
@c.record
class struct_cuDeviceGetUuid_v2_params_st(c.Struct):
SIZE = 16
uuid: Annotated[c.POINTER[CUuuid], 0]
dev: Annotated[CUdevice, 8]
cuDeviceGetUuid_v2_params: TypeAlias = struct_cuDeviceGetUuid_v2_params_st
@c.record
class struct_cuDeviceGetLuid_params_st(c.Struct):
SIZE = 24
luid: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
deviceNodeMask: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
dev: Annotated[CUdevice, 16]
cuDeviceGetLuid_params: TypeAlias = struct_cuDeviceGetLuid_params_st
@c.record
class struct_cuDeviceTotalMem_v2_params_st(c.Struct):
SIZE = 16
bytes: Annotated[c.POINTER[size_t], 0]
dev: Annotated[CUdevice, 8]
cuDeviceTotalMem_v2_params: TypeAlias = struct_cuDeviceTotalMem_v2_params_st
@c.record
class struct_cuDeviceGetTexture1DLinearMaxWidth_params_st(c.Struct):
SIZE = 24
maxWidthInElements: Annotated[c.POINTER[size_t], 0]
format: Annotated[CUarray_format, 8]
numChannels: Annotated[Annotated[int, ctypes.c_uint32], 12]
dev: Annotated[CUdevice, 16]
class enum_CUarray_format_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_AD_FORMAT_UNSIGNED_INT8 = enum_CUarray_format_enum.define('CU_AD_FORMAT_UNSIGNED_INT8', 1)
CU_AD_FORMAT_UNSIGNED_INT16 = enum_CUarray_format_enum.define('CU_AD_FORMAT_UNSIGNED_INT16', 2)
CU_AD_FORMAT_UNSIGNED_INT32 = enum_CUarray_format_enum.define('CU_AD_FORMAT_UNSIGNED_INT32', 3)
CU_AD_FORMAT_SIGNED_INT8 = enum_CUarray_format_enum.define('CU_AD_FORMAT_SIGNED_INT8', 8)
CU_AD_FORMAT_SIGNED_INT16 = enum_CUarray_format_enum.define('CU_AD_FORMAT_SIGNED_INT16', 9)
CU_AD_FORMAT_SIGNED_INT32 = enum_CUarray_format_enum.define('CU_AD_FORMAT_SIGNED_INT32', 10)
CU_AD_FORMAT_HALF = enum_CUarray_format_enum.define('CU_AD_FORMAT_HALF', 16)
CU_AD_FORMAT_FLOAT = enum_CUarray_format_enum.define('CU_AD_FORMAT_FLOAT', 32)
CU_AD_FORMAT_NV12 = enum_CUarray_format_enum.define('CU_AD_FORMAT_NV12', 176)
CU_AD_FORMAT_UNORM_INT8X1 = enum_CUarray_format_enum.define('CU_AD_FORMAT_UNORM_INT8X1', 192)
CU_AD_FORMAT_UNORM_INT8X2 = enum_CUarray_format_enum.define('CU_AD_FORMAT_UNORM_INT8X2', 193)
CU_AD_FORMAT_UNORM_INT8X4 = enum_CUarray_format_enum.define('CU_AD_FORMAT_UNORM_INT8X4', 194)
CU_AD_FORMAT_UNORM_INT16X1 = enum_CUarray_format_enum.define('CU_AD_FORMAT_UNORM_INT16X1', 195)
CU_AD_FORMAT_UNORM_INT16X2 = enum_CUarray_format_enum.define('CU_AD_FORMAT_UNORM_INT16X2', 196)
CU_AD_FORMAT_UNORM_INT16X4 = enum_CUarray_format_enum.define('CU_AD_FORMAT_UNORM_INT16X4', 197)
CU_AD_FORMAT_SNORM_INT8X1 = enum_CUarray_format_enum.define('CU_AD_FORMAT_SNORM_INT8X1', 198)
CU_AD_FORMAT_SNORM_INT8X2 = enum_CUarray_format_enum.define('CU_AD_FORMAT_SNORM_INT8X2', 199)
CU_AD_FORMAT_SNORM_INT8X4 = enum_CUarray_format_enum.define('CU_AD_FORMAT_SNORM_INT8X4', 200)
CU_AD_FORMAT_SNORM_INT16X1 = enum_CUarray_format_enum.define('CU_AD_FORMAT_SNORM_INT16X1', 201)
CU_AD_FORMAT_SNORM_INT16X2 = enum_CUarray_format_enum.define('CU_AD_FORMAT_SNORM_INT16X2', 202)
CU_AD_FORMAT_SNORM_INT16X4 = enum_CUarray_format_enum.define('CU_AD_FORMAT_SNORM_INT16X4', 203)
CU_AD_FORMAT_BC1_UNORM = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC1_UNORM', 145)
CU_AD_FORMAT_BC1_UNORM_SRGB = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC1_UNORM_SRGB', 146)
CU_AD_FORMAT_BC2_UNORM = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC2_UNORM', 147)
CU_AD_FORMAT_BC2_UNORM_SRGB = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC2_UNORM_SRGB', 148)
CU_AD_FORMAT_BC3_UNORM = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC3_UNORM', 149)
CU_AD_FORMAT_BC3_UNORM_SRGB = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC3_UNORM_SRGB', 150)
CU_AD_FORMAT_BC4_UNORM = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC4_UNORM', 151)
CU_AD_FORMAT_BC4_SNORM = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC4_SNORM', 152)
CU_AD_FORMAT_BC5_UNORM = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC5_UNORM', 153)
CU_AD_FORMAT_BC5_SNORM = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC5_SNORM', 154)
CU_AD_FORMAT_BC6H_UF16 = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC6H_UF16', 155)
CU_AD_FORMAT_BC6H_SF16 = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC6H_SF16', 156)
CU_AD_FORMAT_BC7_UNORM = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC7_UNORM', 157)
CU_AD_FORMAT_BC7_UNORM_SRGB = enum_CUarray_format_enum.define('CU_AD_FORMAT_BC7_UNORM_SRGB', 158)
CU_AD_FORMAT_P010 = enum_CUarray_format_enum.define('CU_AD_FORMAT_P010', 159)
CU_AD_FORMAT_P016 = enum_CUarray_format_enum.define('CU_AD_FORMAT_P016', 161)
CU_AD_FORMAT_NV16 = enum_CUarray_format_enum.define('CU_AD_FORMAT_NV16', 162)
CU_AD_FORMAT_P210 = enum_CUarray_format_enum.define('CU_AD_FORMAT_P210', 163)
CU_AD_FORMAT_P216 = enum_CUarray_format_enum.define('CU_AD_FORMAT_P216', 164)
CU_AD_FORMAT_YUY2 = enum_CUarray_format_enum.define('CU_AD_FORMAT_YUY2', 165)
CU_AD_FORMAT_Y210 = enum_CUarray_format_enum.define('CU_AD_FORMAT_Y210', 166)
CU_AD_FORMAT_Y216 = enum_CUarray_format_enum.define('CU_AD_FORMAT_Y216', 167)
CU_AD_FORMAT_AYUV = enum_CUarray_format_enum.define('CU_AD_FORMAT_AYUV', 168)
CU_AD_FORMAT_Y410 = enum_CUarray_format_enum.define('CU_AD_FORMAT_Y410', 169)
CU_AD_FORMAT_Y416 = enum_CUarray_format_enum.define('CU_AD_FORMAT_Y416', 177)
CU_AD_FORMAT_Y444_PLANAR8 = enum_CUarray_format_enum.define('CU_AD_FORMAT_Y444_PLANAR8', 178)
CU_AD_FORMAT_Y444_PLANAR10 = enum_CUarray_format_enum.define('CU_AD_FORMAT_Y444_PLANAR10', 179)
CU_AD_FORMAT_YUV444_8bit_SemiPlanar = enum_CUarray_format_enum.define('CU_AD_FORMAT_YUV444_8bit_SemiPlanar', 180)
CU_AD_FORMAT_YUV444_16bit_SemiPlanar = enum_CUarray_format_enum.define('CU_AD_FORMAT_YUV444_16bit_SemiPlanar', 181)
CU_AD_FORMAT_UNORM_INT_101010_2 = enum_CUarray_format_enum.define('CU_AD_FORMAT_UNORM_INT_101010_2', 80)
CU_AD_FORMAT_MAX = enum_CUarray_format_enum.define('CU_AD_FORMAT_MAX', 2147483647)
CUarray_format: TypeAlias = enum_CUarray_format_enum
cuDeviceGetTexture1DLinearMaxWidth_params: TypeAlias = struct_cuDeviceGetTexture1DLinearMaxWidth_params_st
@c.record
class struct_cuDeviceGetAttribute_params_st(c.Struct):
SIZE = 16
pi: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
attrib: Annotated[CUdevice_attribute, 8]
dev: Annotated[CUdevice, 12]
cuDeviceGetAttribute_params: TypeAlias = struct_cuDeviceGetAttribute_params_st
@c.record
class struct_cuDeviceGetNvSciSyncAttributes_params_st(c.Struct):
SIZE = 16
nvSciSyncAttrList: Annotated[ctypes.c_void_p, 0]
dev: Annotated[CUdevice, 8]
flags: Annotated[Annotated[int, ctypes.c_int32], 12]
cuDeviceGetNvSciSyncAttributes_params: TypeAlias = struct_cuDeviceGetNvSciSyncAttributes_params_st
@c.record
class struct_cuDeviceSetMemPool_params_st(c.Struct):
SIZE = 16
dev: Annotated[CUdevice, 0]
pool: Annotated[CUmemoryPool, 8]
class struct_CUmemPoolHandle_st(ctypes.Structure): pass
CUmemoryPool: TypeAlias = c.POINTER[struct_CUmemPoolHandle_st]
cuDeviceSetMemPool_params: TypeAlias = struct_cuDeviceSetMemPool_params_st
@c.record
class struct_cuDeviceGetMemPool_params_st(c.Struct):
SIZE = 16
pool: Annotated[c.POINTER[CUmemoryPool], 0]
dev: Annotated[CUdevice, 8]
cuDeviceGetMemPool_params: TypeAlias = struct_cuDeviceGetMemPool_params_st
@c.record
class struct_cuDeviceGetDefaultMemPool_params_st(c.Struct):
SIZE = 16
pool_out: Annotated[c.POINTER[CUmemoryPool], 0]
dev: Annotated[CUdevice, 8]
cuDeviceGetDefaultMemPool_params: TypeAlias = struct_cuDeviceGetDefaultMemPool_params_st
@c.record
class struct_cuDeviceGetExecAffinitySupport_params_st(c.Struct):
SIZE = 16
pi: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
type: Annotated[CUexecAffinityType, 8]
dev: Annotated[CUdevice, 12]
class enum_CUexecAffinityType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_EXEC_AFFINITY_TYPE_SM_COUNT = enum_CUexecAffinityType_enum.define('CU_EXEC_AFFINITY_TYPE_SM_COUNT', 0)
CU_EXEC_AFFINITY_TYPE_MAX = enum_CUexecAffinityType_enum.define('CU_EXEC_AFFINITY_TYPE_MAX', 1)
CUexecAffinityType: TypeAlias = enum_CUexecAffinityType_enum
cuDeviceGetExecAffinitySupport_params: TypeAlias = struct_cuDeviceGetExecAffinitySupport_params_st
@c.record
class struct_cuFlushGPUDirectRDMAWrites_params_st(c.Struct):
SIZE = 8
target: Annotated[CUflushGPUDirectRDMAWritesTarget, 0]
scope: Annotated[CUflushGPUDirectRDMAWritesScope, 4]
class enum_CUflushGPUDirectRDMAWritesTarget_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX = enum_CUflushGPUDirectRDMAWritesTarget_enum.define('CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX', 0)
CUflushGPUDirectRDMAWritesTarget: TypeAlias = enum_CUflushGPUDirectRDMAWritesTarget_enum
class enum_CUflushGPUDirectRDMAWritesScope_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER = enum_CUflushGPUDirectRDMAWritesScope_enum.define('CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER', 100)
CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES = enum_CUflushGPUDirectRDMAWritesScope_enum.define('CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES', 200)
CUflushGPUDirectRDMAWritesScope: TypeAlias = enum_CUflushGPUDirectRDMAWritesScope_enum
cuFlushGPUDirectRDMAWrites_params: TypeAlias = struct_cuFlushGPUDirectRDMAWrites_params_st
@c.record
class struct_cuDeviceGetProperties_params_st(c.Struct):
SIZE = 16
prop: Annotated[c.POINTER[CUdevprop], 0]
dev: Annotated[CUdevice, 8]
@c.record
class struct_CUdevprop_st(c.Struct):
SIZE = 56
maxThreadsPerBlock: Annotated[Annotated[int, ctypes.c_int32], 0]
maxThreadsDim: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 4]
maxGridSize: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 16]
sharedMemPerBlock: Annotated[Annotated[int, ctypes.c_int32], 28]
totalConstantMemory: Annotated[Annotated[int, ctypes.c_int32], 32]
SIMDWidth: Annotated[Annotated[int, ctypes.c_int32], 36]
memPitch: Annotated[Annotated[int, ctypes.c_int32], 40]
regsPerBlock: Annotated[Annotated[int, ctypes.c_int32], 44]
clockRate: Annotated[Annotated[int, ctypes.c_int32], 48]
textureAlign: Annotated[Annotated[int, ctypes.c_int32], 52]
CUdevprop: TypeAlias = struct_CUdevprop_st
cuDeviceGetProperties_params: TypeAlias = struct_cuDeviceGetProperties_params_st
@c.record
class struct_cuDeviceComputeCapability_params_st(c.Struct):
SIZE = 24
major: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
minor: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
dev: Annotated[CUdevice, 16]
cuDeviceComputeCapability_params: TypeAlias = struct_cuDeviceComputeCapability_params_st
@c.record
class struct_cuDevicePrimaryCtxRetain_params_st(c.Struct):
SIZE = 16
pctx: Annotated[c.POINTER[CUcontext], 0]
dev: Annotated[CUdevice, 8]
cuDevicePrimaryCtxRetain_params: TypeAlias = struct_cuDevicePrimaryCtxRetain_params_st
@c.record
class struct_cuDevicePrimaryCtxRelease_v2_params_st(c.Struct):
SIZE = 4
dev: Annotated[CUdevice, 0]
cuDevicePrimaryCtxRelease_v2_params: TypeAlias = struct_cuDevicePrimaryCtxRelease_v2_params_st
@c.record
class struct_cuDevicePrimaryCtxSetFlags_v2_params_st(c.Struct):
SIZE = 8
dev: Annotated[CUdevice, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
cuDevicePrimaryCtxSetFlags_v2_params: TypeAlias = struct_cuDevicePrimaryCtxSetFlags_v2_params_st
@c.record
class struct_cuDevicePrimaryCtxGetState_params_st(c.Struct):
SIZE = 24
dev: Annotated[CUdevice, 0]
flags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
active: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 16]
cuDevicePrimaryCtxGetState_params: TypeAlias = struct_cuDevicePrimaryCtxGetState_params_st
@c.record
class struct_cuDevicePrimaryCtxReset_v2_params_st(c.Struct):
SIZE = 4
dev: Annotated[CUdevice, 0]
cuDevicePrimaryCtxReset_v2_params: TypeAlias = struct_cuDevicePrimaryCtxReset_v2_params_st
@c.record
class struct_cuCtxCreate_v2_params_st(c.Struct):
SIZE = 16
pctx: Annotated[c.POINTER[CUcontext], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
dev: Annotated[CUdevice, 12]
cuCtxCreate_v2_params: TypeAlias = struct_cuCtxCreate_v2_params_st
@c.record
class struct_cuCtxCreate_v3_params_st(c.Struct):
SIZE = 32
pctx: Annotated[c.POINTER[CUcontext], 0]
paramsArray: Annotated[c.POINTER[CUexecAffinityParam], 8]
numParams: Annotated[Annotated[int, ctypes.c_int32], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
dev: Annotated[CUdevice, 24]
@c.record
class struct_CUexecAffinityParam_st(c.Struct):
SIZE = 8
type: Annotated[CUexecAffinityType, 0]
param: Annotated[struct_CUexecAffinityParam_st_param, 4]
CUexecAffinityParam: TypeAlias = struct_CUexecAffinityParam_st
@c.record
class struct_CUexecAffinityParam_st_param(c.Struct):
SIZE = 4
smCount: Annotated[CUexecAffinitySmCount, 0]
@c.record
class struct_CUexecAffinitySmCount_st(c.Struct):
SIZE = 4
val: Annotated[Annotated[int, ctypes.c_uint32], 0]
CUexecAffinitySmCount: TypeAlias = struct_CUexecAffinitySmCount_st
cuCtxCreate_v3_params: TypeAlias = struct_cuCtxCreate_v3_params_st
@c.record
class struct_cuCtxCreate_v4_params_st(c.Struct):
SIZE = 24
pctx: Annotated[c.POINTER[CUcontext], 0]
ctxCreateParams: Annotated[c.POINTER[CUctxCreateParams], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
dev: Annotated[CUdevice, 20]
@c.record
class struct_CUctxCreateParams_st(c.Struct):
SIZE = 24
execAffinityParams: Annotated[c.POINTER[CUexecAffinityParam], 0]
numExecAffinityParams: Annotated[Annotated[int, ctypes.c_int32], 8]
cigParams: Annotated[c.POINTER[CUctxCigParam], 16]
CUctxCreateParams: TypeAlias = struct_CUctxCreateParams_st
@c.record
class struct_CUctxCigParam_st(c.Struct):
SIZE = 16
sharedDataType: Annotated[CUcigDataType, 0]
sharedData: Annotated[ctypes.c_void_p, 8]
CUctxCigParam: TypeAlias = struct_CUctxCigParam_st
class enum_CUcigDataType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CIG_DATA_TYPE_D3D12_COMMAND_QUEUE = enum_CUcigDataType_enum.define('CIG_DATA_TYPE_D3D12_COMMAND_QUEUE', 1)
CUcigDataType: TypeAlias = enum_CUcigDataType_enum
cuCtxCreate_v4_params: TypeAlias = struct_cuCtxCreate_v4_params_st
@c.record
class struct_cuCtxDestroy_v2_params_st(c.Struct):
SIZE = 8
ctx: Annotated[CUcontext, 0]
cuCtxDestroy_v2_params: TypeAlias = struct_cuCtxDestroy_v2_params_st
@c.record
class struct_cuCtxPushCurrent_v2_params_st(c.Struct):
SIZE = 8
ctx: Annotated[CUcontext, 0]
cuCtxPushCurrent_v2_params: TypeAlias = struct_cuCtxPushCurrent_v2_params_st
@c.record
class struct_cuCtxPopCurrent_v2_params_st(c.Struct):
SIZE = 8
pctx: Annotated[c.POINTER[CUcontext], 0]
cuCtxPopCurrent_v2_params: TypeAlias = struct_cuCtxPopCurrent_v2_params_st
@c.record
class struct_cuCtxSetCurrent_params_st(c.Struct):
SIZE = 8
ctx: Annotated[CUcontext, 0]
cuCtxSetCurrent_params: TypeAlias = struct_cuCtxSetCurrent_params_st
@c.record
class struct_cuCtxGetCurrent_params_st(c.Struct):
SIZE = 8
pctx: Annotated[c.POINTER[CUcontext], 0]
cuCtxGetCurrent_params: TypeAlias = struct_cuCtxGetCurrent_params_st
@c.record
class struct_cuCtxGetDevice_params_st(c.Struct):
SIZE = 8
device: Annotated[c.POINTER[CUdevice], 0]
cuCtxGetDevice_params: TypeAlias = struct_cuCtxGetDevice_params_st
@c.record
class struct_cuCtxGetFlags_params_st(c.Struct):
SIZE = 8
flags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
cuCtxGetFlags_params: TypeAlias = struct_cuCtxGetFlags_params_st
@c.record
class struct_cuCtxSetFlags_params_st(c.Struct):
SIZE = 4
flags: Annotated[Annotated[int, ctypes.c_uint32], 0]
cuCtxSetFlags_params: TypeAlias = struct_cuCtxSetFlags_params_st
@c.record
class struct_cuCtxGetId_params_st(c.Struct):
SIZE = 16
ctx: Annotated[CUcontext, 0]
ctxId: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 8]
cuCtxGetId_params: TypeAlias = struct_cuCtxGetId_params_st
@c.record
class struct_cuCtxSetLimit_params_st(c.Struct):
SIZE = 16
limit: Annotated[CUlimit, 0]
value: Annotated[size_t, 8]
class enum_CUlimit_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_LIMIT_STACK_SIZE = enum_CUlimit_enum.define('CU_LIMIT_STACK_SIZE', 0)
CU_LIMIT_PRINTF_FIFO_SIZE = enum_CUlimit_enum.define('CU_LIMIT_PRINTF_FIFO_SIZE', 1)
CU_LIMIT_MALLOC_HEAP_SIZE = enum_CUlimit_enum.define('CU_LIMIT_MALLOC_HEAP_SIZE', 2)
CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH = enum_CUlimit_enum.define('CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH', 3)
CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT = enum_CUlimit_enum.define('CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT', 4)
CU_LIMIT_MAX_L2_FETCH_GRANULARITY = enum_CUlimit_enum.define('CU_LIMIT_MAX_L2_FETCH_GRANULARITY', 5)
CU_LIMIT_PERSISTING_L2_CACHE_SIZE = enum_CUlimit_enum.define('CU_LIMIT_PERSISTING_L2_CACHE_SIZE', 6)
CU_LIMIT_SHMEM_SIZE = enum_CUlimit_enum.define('CU_LIMIT_SHMEM_SIZE', 7)
CU_LIMIT_CIG_ENABLED = enum_CUlimit_enum.define('CU_LIMIT_CIG_ENABLED', 8)
CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED = enum_CUlimit_enum.define('CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED', 9)
CU_LIMIT_MAX = enum_CUlimit_enum.define('CU_LIMIT_MAX', 10)
CUlimit: TypeAlias = enum_CUlimit_enum
cuCtxSetLimit_params: TypeAlias = struct_cuCtxSetLimit_params_st
@c.record
class struct_cuCtxGetLimit_params_st(c.Struct):
SIZE = 16
pvalue: Annotated[c.POINTER[size_t], 0]
limit: Annotated[CUlimit, 8]
cuCtxGetLimit_params: TypeAlias = struct_cuCtxGetLimit_params_st
@c.record
class struct_cuCtxGetCacheConfig_params_st(c.Struct):
SIZE = 8
pconfig: Annotated[c.POINTER[CUfunc_cache], 0]
class enum_CUfunc_cache_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_FUNC_CACHE_PREFER_NONE = enum_CUfunc_cache_enum.define('CU_FUNC_CACHE_PREFER_NONE', 0)
CU_FUNC_CACHE_PREFER_SHARED = enum_CUfunc_cache_enum.define('CU_FUNC_CACHE_PREFER_SHARED', 1)
CU_FUNC_CACHE_PREFER_L1 = enum_CUfunc_cache_enum.define('CU_FUNC_CACHE_PREFER_L1', 2)
CU_FUNC_CACHE_PREFER_EQUAL = enum_CUfunc_cache_enum.define('CU_FUNC_CACHE_PREFER_EQUAL', 3)
CUfunc_cache: TypeAlias = enum_CUfunc_cache_enum
cuCtxGetCacheConfig_params: TypeAlias = struct_cuCtxGetCacheConfig_params_st
@c.record
class struct_cuCtxSetCacheConfig_params_st(c.Struct):
SIZE = 4
config: Annotated[CUfunc_cache, 0]
cuCtxSetCacheConfig_params: TypeAlias = struct_cuCtxSetCacheConfig_params_st
@c.record
class struct_cuCtxGetApiVersion_params_st(c.Struct):
SIZE = 16
ctx: Annotated[CUcontext, 0]
version: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
cuCtxGetApiVersion_params: TypeAlias = struct_cuCtxGetApiVersion_params_st
@c.record
class struct_cuCtxGetStreamPriorityRange_params_st(c.Struct):
SIZE = 16
leastPriority: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
greatestPriority: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
cuCtxGetStreamPriorityRange_params: TypeAlias = struct_cuCtxGetStreamPriorityRange_params_st
@c.record
class struct_cuCtxGetExecAffinity_params_st(c.Struct):
SIZE = 16
pExecAffinity: Annotated[c.POINTER[CUexecAffinityParam], 0]
type: Annotated[CUexecAffinityType, 8]
cuCtxGetExecAffinity_params: TypeAlias = struct_cuCtxGetExecAffinity_params_st
@c.record
class struct_cuCtxRecordEvent_params_st(c.Struct):
SIZE = 16
hCtx: Annotated[CUcontext, 0]
hEvent: Annotated[CUevent, 8]
cuCtxRecordEvent_params: TypeAlias = struct_cuCtxRecordEvent_params_st
@c.record
class struct_cuCtxWaitEvent_params_st(c.Struct):
SIZE = 16
hCtx: Annotated[CUcontext, 0]
hEvent: Annotated[CUevent, 8]
cuCtxWaitEvent_params: TypeAlias = struct_cuCtxWaitEvent_params_st
@c.record
class struct_cuCtxAttach_params_st(c.Struct):
SIZE = 16
pctx: Annotated[c.POINTER[CUcontext], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuCtxAttach_params: TypeAlias = struct_cuCtxAttach_params_st
@c.record
class struct_cuCtxDetach_params_st(c.Struct):
SIZE = 8
ctx: Annotated[CUcontext, 0]
cuCtxDetach_params: TypeAlias = struct_cuCtxDetach_params_st
@c.record
class struct_cuCtxGetSharedMemConfig_params_st(c.Struct):
SIZE = 8
pConfig: Annotated[c.POINTER[CUsharedconfig], 0]
class enum_CUsharedconfig_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE = enum_CUsharedconfig_enum.define('CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE', 0)
CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE = enum_CUsharedconfig_enum.define('CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE', 1)
CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE = enum_CUsharedconfig_enum.define('CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE', 2)
CUsharedconfig: TypeAlias = enum_CUsharedconfig_enum
cuCtxGetSharedMemConfig_params: TypeAlias = struct_cuCtxGetSharedMemConfig_params_st
@c.record
class struct_cuCtxSetSharedMemConfig_params_st(c.Struct):
SIZE = 4
config: Annotated[CUsharedconfig, 0]
cuCtxSetSharedMemConfig_params: TypeAlias = struct_cuCtxSetSharedMemConfig_params_st
@c.record
class struct_cuModuleLoad_params_st(c.Struct):
SIZE = 16
module: Annotated[c.POINTER[CUmodule], 0]
fname: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
class struct_CUmod_st(ctypes.Structure): pass
CUmodule: TypeAlias = c.POINTER[struct_CUmod_st]
cuModuleLoad_params: TypeAlias = struct_cuModuleLoad_params_st
@c.record
class struct_cuModuleLoadData_params_st(c.Struct):
SIZE = 16
module: Annotated[c.POINTER[CUmodule], 0]
image: Annotated[ctypes.c_void_p, 8]
cuModuleLoadData_params: TypeAlias = struct_cuModuleLoadData_params_st
@c.record
class struct_cuModuleLoadDataEx_params_st(c.Struct):
SIZE = 40
module: Annotated[c.POINTER[CUmodule], 0]
image: Annotated[ctypes.c_void_p, 8]
numOptions: Annotated[Annotated[int, ctypes.c_uint32], 16]
options: Annotated[c.POINTER[CUjit_option], 24]
optionValues: Annotated[c.POINTER[ctypes.c_void_p], 32]
class enum_CUjit_option_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_JIT_MAX_REGISTERS = enum_CUjit_option_enum.define('CU_JIT_MAX_REGISTERS', 0)
CU_JIT_THREADS_PER_BLOCK = enum_CUjit_option_enum.define('CU_JIT_THREADS_PER_BLOCK', 1)
CU_JIT_WALL_TIME = enum_CUjit_option_enum.define('CU_JIT_WALL_TIME', 2)
CU_JIT_INFO_LOG_BUFFER = enum_CUjit_option_enum.define('CU_JIT_INFO_LOG_BUFFER', 3)
CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES = enum_CUjit_option_enum.define('CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES', 4)
CU_JIT_ERROR_LOG_BUFFER = enum_CUjit_option_enum.define('CU_JIT_ERROR_LOG_BUFFER', 5)
CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES = enum_CUjit_option_enum.define('CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES', 6)
CU_JIT_OPTIMIZATION_LEVEL = enum_CUjit_option_enum.define('CU_JIT_OPTIMIZATION_LEVEL', 7)
CU_JIT_TARGET_FROM_CUCONTEXT = enum_CUjit_option_enum.define('CU_JIT_TARGET_FROM_CUCONTEXT', 8)
CU_JIT_TARGET = enum_CUjit_option_enum.define('CU_JIT_TARGET', 9)
CU_JIT_FALLBACK_STRATEGY = enum_CUjit_option_enum.define('CU_JIT_FALLBACK_STRATEGY', 10)
CU_JIT_GENERATE_DEBUG_INFO = enum_CUjit_option_enum.define('CU_JIT_GENERATE_DEBUG_INFO', 11)
CU_JIT_LOG_VERBOSE = enum_CUjit_option_enum.define('CU_JIT_LOG_VERBOSE', 12)
CU_JIT_GENERATE_LINE_INFO = enum_CUjit_option_enum.define('CU_JIT_GENERATE_LINE_INFO', 13)
CU_JIT_CACHE_MODE = enum_CUjit_option_enum.define('CU_JIT_CACHE_MODE', 14)
CU_JIT_NEW_SM3X_OPT = enum_CUjit_option_enum.define('CU_JIT_NEW_SM3X_OPT', 15)
CU_JIT_FAST_COMPILE = enum_CUjit_option_enum.define('CU_JIT_FAST_COMPILE', 16)
CU_JIT_GLOBAL_SYMBOL_NAMES = enum_CUjit_option_enum.define('CU_JIT_GLOBAL_SYMBOL_NAMES', 17)
CU_JIT_GLOBAL_SYMBOL_ADDRESSES = enum_CUjit_option_enum.define('CU_JIT_GLOBAL_SYMBOL_ADDRESSES', 18)
CU_JIT_GLOBAL_SYMBOL_COUNT = enum_CUjit_option_enum.define('CU_JIT_GLOBAL_SYMBOL_COUNT', 19)
CU_JIT_LTO = enum_CUjit_option_enum.define('CU_JIT_LTO', 20)
CU_JIT_FTZ = enum_CUjit_option_enum.define('CU_JIT_FTZ', 21)
CU_JIT_PREC_DIV = enum_CUjit_option_enum.define('CU_JIT_PREC_DIV', 22)
CU_JIT_PREC_SQRT = enum_CUjit_option_enum.define('CU_JIT_PREC_SQRT', 23)
CU_JIT_FMA = enum_CUjit_option_enum.define('CU_JIT_FMA', 24)
CU_JIT_REFERENCED_KERNEL_NAMES = enum_CUjit_option_enum.define('CU_JIT_REFERENCED_KERNEL_NAMES', 25)
CU_JIT_REFERENCED_KERNEL_COUNT = enum_CUjit_option_enum.define('CU_JIT_REFERENCED_KERNEL_COUNT', 26)
CU_JIT_REFERENCED_VARIABLE_NAMES = enum_CUjit_option_enum.define('CU_JIT_REFERENCED_VARIABLE_NAMES', 27)
CU_JIT_REFERENCED_VARIABLE_COUNT = enum_CUjit_option_enum.define('CU_JIT_REFERENCED_VARIABLE_COUNT', 28)
CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES = enum_CUjit_option_enum.define('CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES', 29)
CU_JIT_POSITION_INDEPENDENT_CODE = enum_CUjit_option_enum.define('CU_JIT_POSITION_INDEPENDENT_CODE', 30)
CU_JIT_MIN_CTA_PER_SM = enum_CUjit_option_enum.define('CU_JIT_MIN_CTA_PER_SM', 31)
CU_JIT_MAX_THREADS_PER_BLOCK = enum_CUjit_option_enum.define('CU_JIT_MAX_THREADS_PER_BLOCK', 32)
CU_JIT_OVERRIDE_DIRECTIVE_VALUES = enum_CUjit_option_enum.define('CU_JIT_OVERRIDE_DIRECTIVE_VALUES', 33)
CU_JIT_NUM_OPTIONS = enum_CUjit_option_enum.define('CU_JIT_NUM_OPTIONS', 34)
CUjit_option: TypeAlias = enum_CUjit_option_enum
cuModuleLoadDataEx_params: TypeAlias = struct_cuModuleLoadDataEx_params_st
@c.record
class struct_cuModuleLoadFatBinary_params_st(c.Struct):
SIZE = 16
module: Annotated[c.POINTER[CUmodule], 0]
fatCubin: Annotated[ctypes.c_void_p, 8]
cuModuleLoadFatBinary_params: TypeAlias = struct_cuModuleLoadFatBinary_params_st
@c.record
class struct_cuModuleUnload_params_st(c.Struct):
SIZE = 8
hmod: Annotated[CUmodule, 0]
cuModuleUnload_params: TypeAlias = struct_cuModuleUnload_params_st
@c.record
class struct_cuModuleGetLoadingMode_params_st(c.Struct):
SIZE = 8
mode: Annotated[c.POINTER[CUmoduleLoadingMode], 0]
class enum_CUmoduleLoadingMode_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MODULE_EAGER_LOADING = enum_CUmoduleLoadingMode_enum.define('CU_MODULE_EAGER_LOADING', 1)
CU_MODULE_LAZY_LOADING = enum_CUmoduleLoadingMode_enum.define('CU_MODULE_LAZY_LOADING', 2)
CUmoduleLoadingMode: TypeAlias = enum_CUmoduleLoadingMode_enum
cuModuleGetLoadingMode_params: TypeAlias = struct_cuModuleGetLoadingMode_params_st
@c.record
class struct_cuModuleGetFunction_params_st(c.Struct):
SIZE = 24
hfunc: Annotated[c.POINTER[CUfunction], 0]
hmod: Annotated[CUmodule, 8]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
class struct_CUfunc_st(ctypes.Structure): pass
CUfunction: TypeAlias = c.POINTER[struct_CUfunc_st]
cuModuleGetFunction_params: TypeAlias = struct_cuModuleGetFunction_params_st
@c.record
class struct_cuModuleGetFunctionCount_params_st(c.Struct):
SIZE = 16
count: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
mod: Annotated[CUmodule, 8]
cuModuleGetFunctionCount_params: TypeAlias = struct_cuModuleGetFunctionCount_params_st
@c.record
class struct_cuModuleEnumerateFunctions_params_st(c.Struct):
SIZE = 24
functions: Annotated[c.POINTER[CUfunction], 0]
numFunctions: Annotated[Annotated[int, ctypes.c_uint32], 8]
mod: Annotated[CUmodule, 16]
cuModuleEnumerateFunctions_params: TypeAlias = struct_cuModuleEnumerateFunctions_params_st
@c.record
class struct_cuModuleGetGlobal_v2_params_st(c.Struct):
SIZE = 32
dptr: Annotated[c.POINTER[CUdeviceptr], 0]
bytes: Annotated[c.POINTER[size_t], 8]
hmod: Annotated[CUmodule, 16]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
CUdeviceptr: TypeAlias = Annotated[int, ctypes.c_uint64]
cuModuleGetGlobal_v2_params: TypeAlias = struct_cuModuleGetGlobal_v2_params_st
@c.record
class struct_cuLinkCreate_v2_params_st(c.Struct):
SIZE = 32
numOptions: Annotated[Annotated[int, ctypes.c_uint32], 0]
options: Annotated[c.POINTER[CUjit_option], 8]
optionValues: Annotated[c.POINTER[ctypes.c_void_p], 16]
stateOut: Annotated[c.POINTER[CUlinkState], 24]
class struct_CUlinkState_st(ctypes.Structure): pass
CUlinkState: TypeAlias = c.POINTER[struct_CUlinkState_st]
cuLinkCreate_v2_params: TypeAlias = struct_cuLinkCreate_v2_params_st
@c.record
class struct_cuLinkAddData_v2_params_st(c.Struct):
SIZE = 64
state: Annotated[CUlinkState, 0]
type: Annotated[CUjitInputType, 8]
data: Annotated[ctypes.c_void_p, 16]
size: Annotated[size_t, 24]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 32]
numOptions: Annotated[Annotated[int, ctypes.c_uint32], 40]
options: Annotated[c.POINTER[CUjit_option], 48]
optionValues: Annotated[c.POINTER[ctypes.c_void_p], 56]
class enum_CUjitInputType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_JIT_INPUT_CUBIN = enum_CUjitInputType_enum.define('CU_JIT_INPUT_CUBIN', 0)
CU_JIT_INPUT_PTX = enum_CUjitInputType_enum.define('CU_JIT_INPUT_PTX', 1)
CU_JIT_INPUT_FATBINARY = enum_CUjitInputType_enum.define('CU_JIT_INPUT_FATBINARY', 2)
CU_JIT_INPUT_OBJECT = enum_CUjitInputType_enum.define('CU_JIT_INPUT_OBJECT', 3)
CU_JIT_INPUT_LIBRARY = enum_CUjitInputType_enum.define('CU_JIT_INPUT_LIBRARY', 4)
CU_JIT_INPUT_NVVM = enum_CUjitInputType_enum.define('CU_JIT_INPUT_NVVM', 5)
CU_JIT_NUM_INPUT_TYPES = enum_CUjitInputType_enum.define('CU_JIT_NUM_INPUT_TYPES', 6)
CUjitInputType: TypeAlias = enum_CUjitInputType_enum
cuLinkAddData_v2_params: TypeAlias = struct_cuLinkAddData_v2_params_st
@c.record
class struct_cuLinkAddFile_v2_params_st(c.Struct):
SIZE = 48
state: Annotated[CUlinkState, 0]
type: Annotated[CUjitInputType, 8]
path: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
numOptions: Annotated[Annotated[int, ctypes.c_uint32], 24]
options: Annotated[c.POINTER[CUjit_option], 32]
optionValues: Annotated[c.POINTER[ctypes.c_void_p], 40]
cuLinkAddFile_v2_params: TypeAlias = struct_cuLinkAddFile_v2_params_st
@c.record
class struct_cuLinkComplete_params_st(c.Struct):
SIZE = 24
state: Annotated[CUlinkState, 0]
cubinOut: Annotated[c.POINTER[ctypes.c_void_p], 8]
sizeOut: Annotated[c.POINTER[size_t], 16]
cuLinkComplete_params: TypeAlias = struct_cuLinkComplete_params_st
@c.record
class struct_cuLinkDestroy_params_st(c.Struct):
SIZE = 8
state: Annotated[CUlinkState, 0]
cuLinkDestroy_params: TypeAlias = struct_cuLinkDestroy_params_st
@c.record
class struct_cuModuleGetTexRef_params_st(c.Struct):
SIZE = 24
pTexRef: Annotated[c.POINTER[CUtexref], 0]
hmod: Annotated[CUmodule, 8]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
class struct_CUtexref_st(ctypes.Structure): pass
CUtexref: TypeAlias = c.POINTER[struct_CUtexref_st]
cuModuleGetTexRef_params: TypeAlias = struct_cuModuleGetTexRef_params_st
@c.record
class struct_cuModuleGetSurfRef_params_st(c.Struct):
SIZE = 24
pSurfRef: Annotated[c.POINTER[CUsurfref], 0]
hmod: Annotated[CUmodule, 8]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
class struct_CUsurfref_st(ctypes.Structure): pass
CUsurfref: TypeAlias = c.POINTER[struct_CUsurfref_st]
cuModuleGetSurfRef_params: TypeAlias = struct_cuModuleGetSurfRef_params_st
@c.record
class struct_cuLibraryLoadData_params_st(c.Struct):
SIZE = 64
library: Annotated[c.POINTER[CUlibrary], 0]
code: Annotated[ctypes.c_void_p, 8]
jitOptions: Annotated[c.POINTER[CUjit_option], 16]
jitOptionsValues: Annotated[c.POINTER[ctypes.c_void_p], 24]
numJitOptions: Annotated[Annotated[int, ctypes.c_uint32], 32]
libraryOptions: Annotated[c.POINTER[CUlibraryOption], 40]
libraryOptionValues: Annotated[c.POINTER[ctypes.c_void_p], 48]
numLibraryOptions: Annotated[Annotated[int, ctypes.c_uint32], 56]
class struct_CUlib_st(ctypes.Structure): pass
CUlibrary: TypeAlias = c.POINTER[struct_CUlib_st]
class enum_CUlibraryOption_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE = enum_CUlibraryOption_enum.define('CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE', 0)
CU_LIBRARY_BINARY_IS_PRESERVED = enum_CUlibraryOption_enum.define('CU_LIBRARY_BINARY_IS_PRESERVED', 1)
CU_LIBRARY_NUM_OPTIONS = enum_CUlibraryOption_enum.define('CU_LIBRARY_NUM_OPTIONS', 2)
CUlibraryOption: TypeAlias = enum_CUlibraryOption_enum
cuLibraryLoadData_params: TypeAlias = struct_cuLibraryLoadData_params_st
@c.record
class struct_cuLibraryLoadFromFile_params_st(c.Struct):
SIZE = 64
library: Annotated[c.POINTER[CUlibrary], 0]
fileName: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
jitOptions: Annotated[c.POINTER[CUjit_option], 16]
jitOptionsValues: Annotated[c.POINTER[ctypes.c_void_p], 24]
numJitOptions: Annotated[Annotated[int, ctypes.c_uint32], 32]
libraryOptions: Annotated[c.POINTER[CUlibraryOption], 40]
libraryOptionValues: Annotated[c.POINTER[ctypes.c_void_p], 48]
numLibraryOptions: Annotated[Annotated[int, ctypes.c_uint32], 56]
cuLibraryLoadFromFile_params: TypeAlias = struct_cuLibraryLoadFromFile_params_st
@c.record
class struct_cuLibraryUnload_params_st(c.Struct):
SIZE = 8
library: Annotated[CUlibrary, 0]
cuLibraryUnload_params: TypeAlias = struct_cuLibraryUnload_params_st
@c.record
class struct_cuLibraryGetKernel_params_st(c.Struct):
SIZE = 24
pKernel: Annotated[c.POINTER[CUkernel], 0]
library: Annotated[CUlibrary, 8]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
class struct_CUkern_st(ctypes.Structure): pass
CUkernel: TypeAlias = c.POINTER[struct_CUkern_st]
cuLibraryGetKernel_params: TypeAlias = struct_cuLibraryGetKernel_params_st
@c.record
class struct_cuLibraryGetKernelCount_params_st(c.Struct):
SIZE = 16
count: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
lib: Annotated[CUlibrary, 8]
cuLibraryGetKernelCount_params: TypeAlias = struct_cuLibraryGetKernelCount_params_st
@c.record
class struct_cuLibraryEnumerateKernels_params_st(c.Struct):
SIZE = 24
kernels: Annotated[c.POINTER[CUkernel], 0]
numKernels: Annotated[Annotated[int, ctypes.c_uint32], 8]
lib: Annotated[CUlibrary, 16]
cuLibraryEnumerateKernels_params: TypeAlias = struct_cuLibraryEnumerateKernels_params_st
@c.record
class struct_cuLibraryGetModule_params_st(c.Struct):
SIZE = 16
pMod: Annotated[c.POINTER[CUmodule], 0]
library: Annotated[CUlibrary, 8]
cuLibraryGetModule_params: TypeAlias = struct_cuLibraryGetModule_params_st
@c.record
class struct_cuKernelGetFunction_params_st(c.Struct):
SIZE = 16
pFunc: Annotated[c.POINTER[CUfunction], 0]
kernel: Annotated[CUkernel, 8]
cuKernelGetFunction_params: TypeAlias = struct_cuKernelGetFunction_params_st
@c.record
class struct_cuKernelGetLibrary_params_st(c.Struct):
SIZE = 16
pLib: Annotated[c.POINTER[CUlibrary], 0]
kernel: Annotated[CUkernel, 8]
cuKernelGetLibrary_params: TypeAlias = struct_cuKernelGetLibrary_params_st
@c.record
class struct_cuLibraryGetGlobal_params_st(c.Struct):
SIZE = 32
dptr: Annotated[c.POINTER[CUdeviceptr], 0]
bytes: Annotated[c.POINTER[size_t], 8]
library: Annotated[CUlibrary, 16]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
cuLibraryGetGlobal_params: TypeAlias = struct_cuLibraryGetGlobal_params_st
@c.record
class struct_cuLibraryGetManaged_params_st(c.Struct):
SIZE = 32
dptr: Annotated[c.POINTER[CUdeviceptr], 0]
bytes: Annotated[c.POINTER[size_t], 8]
library: Annotated[CUlibrary, 16]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
cuLibraryGetManaged_params: TypeAlias = struct_cuLibraryGetManaged_params_st
@c.record
class struct_cuLibraryGetUnifiedFunction_params_st(c.Struct):
SIZE = 24
fptr: Annotated[c.POINTER[ctypes.c_void_p], 0]
library: Annotated[CUlibrary, 8]
symbol: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
cuLibraryGetUnifiedFunction_params: TypeAlias = struct_cuLibraryGetUnifiedFunction_params_st
@c.record
class struct_cuKernelGetAttribute_params_st(c.Struct):
SIZE = 32
pi: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
attrib: Annotated[CUfunction_attribute, 8]
kernel: Annotated[CUkernel, 16]
dev: Annotated[CUdevice, 24]
class enum_CUfunction_attribute_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK', 0)
CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES', 1)
CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES', 2)
CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES', 3)
CU_FUNC_ATTRIBUTE_NUM_REGS = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_NUM_REGS', 4)
CU_FUNC_ATTRIBUTE_PTX_VERSION = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_PTX_VERSION', 5)
CU_FUNC_ATTRIBUTE_BINARY_VERSION = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_BINARY_VERSION', 6)
CU_FUNC_ATTRIBUTE_CACHE_MODE_CA = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_CACHE_MODE_CA', 7)
CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES', 8)
CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT', 9)
CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET', 10)
CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH', 11)
CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT', 12)
CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH', 13)
CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED', 14)
CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE', 15)
CU_FUNC_ATTRIBUTE_MAX = enum_CUfunction_attribute_enum.define('CU_FUNC_ATTRIBUTE_MAX', 16)
CUfunction_attribute: TypeAlias = enum_CUfunction_attribute_enum
cuKernelGetAttribute_params: TypeAlias = struct_cuKernelGetAttribute_params_st
@c.record
class struct_cuKernelSetAttribute_params_st(c.Struct):
SIZE = 24
attrib: Annotated[CUfunction_attribute, 0]
val: Annotated[Annotated[int, ctypes.c_int32], 4]
kernel: Annotated[CUkernel, 8]
dev: Annotated[CUdevice, 16]
cuKernelSetAttribute_params: TypeAlias = struct_cuKernelSetAttribute_params_st
@c.record
class struct_cuKernelSetCacheConfig_params_st(c.Struct):
SIZE = 16
kernel: Annotated[CUkernel, 0]
config: Annotated[CUfunc_cache, 8]
dev: Annotated[CUdevice, 12]
cuKernelSetCacheConfig_params: TypeAlias = struct_cuKernelSetCacheConfig_params_st
@c.record
class struct_cuKernelGetName_params_st(c.Struct):
SIZE = 16
name: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 0]
hfunc: Annotated[CUkernel, 8]
cuKernelGetName_params: TypeAlias = struct_cuKernelGetName_params_st
@c.record
class struct_cuKernelGetParamInfo_params_st(c.Struct):
SIZE = 32
kernel: Annotated[CUkernel, 0]
paramIndex: Annotated[size_t, 8]
paramOffset: Annotated[c.POINTER[size_t], 16]
paramSize: Annotated[c.POINTER[size_t], 24]
cuKernelGetParamInfo_params: TypeAlias = struct_cuKernelGetParamInfo_params_st
@c.record
class struct_cuMemGetInfo_v2_params_st(c.Struct):
SIZE = 16
free: Annotated[c.POINTER[size_t], 0]
total: Annotated[c.POINTER[size_t], 8]
cuMemGetInfo_v2_params: TypeAlias = struct_cuMemGetInfo_v2_params_st
@c.record
class struct_cuMemAlloc_v2_params_st(c.Struct):
SIZE = 16
dptr: Annotated[c.POINTER[CUdeviceptr], 0]
bytesize: Annotated[size_t, 8]
cuMemAlloc_v2_params: TypeAlias = struct_cuMemAlloc_v2_params_st
@c.record
class struct_cuMemAllocPitch_v2_params_st(c.Struct):
SIZE = 40
dptr: Annotated[c.POINTER[CUdeviceptr], 0]
pPitch: Annotated[c.POINTER[size_t], 8]
WidthInBytes: Annotated[size_t, 16]
Height: Annotated[size_t, 24]
ElementSizeBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
cuMemAllocPitch_v2_params: TypeAlias = struct_cuMemAllocPitch_v2_params_st
@c.record
class struct_cuMemFree_v2_params_st(c.Struct):
SIZE = 8
dptr: Annotated[CUdeviceptr, 0]
cuMemFree_v2_params: TypeAlias = struct_cuMemFree_v2_params_st
@c.record
class struct_cuMemGetAddressRange_v2_params_st(c.Struct):
SIZE = 24
pbase: Annotated[c.POINTER[CUdeviceptr], 0]
psize: Annotated[c.POINTER[size_t], 8]
dptr: Annotated[CUdeviceptr, 16]
cuMemGetAddressRange_v2_params: TypeAlias = struct_cuMemGetAddressRange_v2_params_st
@c.record
class struct_cuMemAllocHost_v2_params_st(c.Struct):
SIZE = 16
pp: Annotated[c.POINTER[ctypes.c_void_p], 0]
bytesize: Annotated[size_t, 8]
cuMemAllocHost_v2_params: TypeAlias = struct_cuMemAllocHost_v2_params_st
@c.record
class struct_cuMemFreeHost_params_st(c.Struct):
SIZE = 8
p: Annotated[ctypes.c_void_p, 0]
cuMemFreeHost_params: TypeAlias = struct_cuMemFreeHost_params_st
@c.record
class struct_cuMemHostAlloc_params_st(c.Struct):
SIZE = 24
pp: Annotated[c.POINTER[ctypes.c_void_p], 0]
bytesize: Annotated[size_t, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemHostAlloc_params: TypeAlias = struct_cuMemHostAlloc_params_st
@c.record
class struct_cuMemHostGetDevicePointer_v2_params_st(c.Struct):
SIZE = 24
pdptr: Annotated[c.POINTER[CUdeviceptr], 0]
p: Annotated[ctypes.c_void_p, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemHostGetDevicePointer_v2_params: TypeAlias = struct_cuMemHostGetDevicePointer_v2_params_st
@c.record
class struct_cuMemHostGetFlags_params_st(c.Struct):
SIZE = 16
pFlags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
p: Annotated[ctypes.c_void_p, 8]
cuMemHostGetFlags_params: TypeAlias = struct_cuMemHostGetFlags_params_st
@c.record
class struct_cuMemAllocManaged_params_st(c.Struct):
SIZE = 24
dptr: Annotated[c.POINTER[CUdeviceptr], 0]
bytesize: Annotated[size_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemAllocManaged_params: TypeAlias = struct_cuMemAllocManaged_params_st
@c.record
class struct_cuDeviceRegisterAsyncNotification_params_st(c.Struct):
SIZE = 32
device: Annotated[CUdevice, 0]
callbackFunc: Annotated[CUasyncCallback, 8]
userData: Annotated[ctypes.c_void_p, 16]
callback: Annotated[c.POINTER[CUasyncCallbackHandle], 24]
@c.record
class struct_CUasyncNotificationInfo_st(c.Struct):
SIZE = 16
type: Annotated[CUasyncNotificationType, 0]
info: Annotated[struct_CUasyncNotificationInfo_st_info, 8]
class enum_CUasyncNotificationType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET = enum_CUasyncNotificationType_enum.define('CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET', 1)
CUasyncNotificationType: TypeAlias = enum_CUasyncNotificationType_enum
@c.record
class struct_CUasyncNotificationInfo_st_info(c.Struct):
SIZE = 8
overBudget: Annotated[struct_CUasyncNotificationInfo_st_info_overBudget, 0]
@c.record
class struct_CUasyncNotificationInfo_st_info_overBudget(c.Struct):
SIZE = 8
bytesOverBudget: Annotated[Annotated[int, ctypes.c_uint64], 0]
class struct_CUasyncCallbackEntry_st(ctypes.Structure): pass
CUasyncCallback: TypeAlias = c.CFUNCTYPE[None, [c.POINTER[struct_CUasyncNotificationInfo_st], ctypes.c_void_p, c.POINTER[struct_CUasyncCallbackEntry_st]]]
CUasyncCallbackHandle: TypeAlias = c.POINTER[struct_CUasyncCallbackEntry_st]
cuDeviceRegisterAsyncNotification_params: TypeAlias = struct_cuDeviceRegisterAsyncNotification_params_st
@c.record
class struct_cuDeviceUnregisterAsyncNotification_params_st(c.Struct):
SIZE = 16
device: Annotated[CUdevice, 0]
callback: Annotated[CUasyncCallbackHandle, 8]
cuDeviceUnregisterAsyncNotification_params: TypeAlias = struct_cuDeviceUnregisterAsyncNotification_params_st
@c.record
class struct_cuDeviceGetByPCIBusId_params_st(c.Struct):
SIZE = 16
dev: Annotated[c.POINTER[CUdevice], 0]
pciBusId: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
cuDeviceGetByPCIBusId_params: TypeAlias = struct_cuDeviceGetByPCIBusId_params_st
@c.record
class struct_cuDeviceGetPCIBusId_params_st(c.Struct):
SIZE = 16
pciBusId: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
len: Annotated[Annotated[int, ctypes.c_int32], 8]
dev: Annotated[CUdevice, 12]
cuDeviceGetPCIBusId_params: TypeAlias = struct_cuDeviceGetPCIBusId_params_st
@c.record
class struct_cuIpcGetEventHandle_params_st(c.Struct):
SIZE = 16
pHandle: Annotated[c.POINTER[CUipcEventHandle], 0]
event: Annotated[CUevent, 8]
@c.record
class struct_CUipcEventHandle_st(c.Struct):
SIZE = 64
reserved: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[64]], 0]
CUipcEventHandle: TypeAlias = struct_CUipcEventHandle_st
cuIpcGetEventHandle_params: TypeAlias = struct_cuIpcGetEventHandle_params_st
@c.record
class struct_cuIpcOpenEventHandle_params_st(c.Struct):
SIZE = 72
phEvent: Annotated[c.POINTER[CUevent], 0]
handle: Annotated[CUipcEventHandle, 8]
cuIpcOpenEventHandle_params: TypeAlias = struct_cuIpcOpenEventHandle_params_st
@c.record
class struct_cuIpcGetMemHandle_params_st(c.Struct):
SIZE = 16
pHandle: Annotated[c.POINTER[CUipcMemHandle], 0]
dptr: Annotated[CUdeviceptr, 8]
@c.record
class struct_CUipcMemHandle_st(c.Struct):
SIZE = 64
reserved: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[64]], 0]
CUipcMemHandle: TypeAlias = struct_CUipcMemHandle_st
cuIpcGetMemHandle_params: TypeAlias = struct_cuIpcGetMemHandle_params_st
@c.record
class struct_cuIpcOpenMemHandle_v2_params_st(c.Struct):
SIZE = 80
pdptr: Annotated[c.POINTER[CUdeviceptr], 0]
handle: Annotated[CUipcMemHandle, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 72]
cuIpcOpenMemHandle_v2_params: TypeAlias = struct_cuIpcOpenMemHandle_v2_params_st
@c.record
class struct_cuIpcCloseMemHandle_params_st(c.Struct):
SIZE = 8
dptr: Annotated[CUdeviceptr, 0]
cuIpcCloseMemHandle_params: TypeAlias = struct_cuIpcCloseMemHandle_params_st
@c.record
class struct_cuMemHostRegister_v2_params_st(c.Struct):
SIZE = 24
p: Annotated[ctypes.c_void_p, 0]
bytesize: Annotated[size_t, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemHostRegister_v2_params: TypeAlias = struct_cuMemHostRegister_v2_params_st
@c.record
class struct_cuMemHostUnregister_params_st(c.Struct):
SIZE = 8
p: Annotated[ctypes.c_void_p, 0]
cuMemHostUnregister_params: TypeAlias = struct_cuMemHostUnregister_params_st
@c.record
class struct_cuMemcpy_ptds_params_st(c.Struct):
SIZE = 24
dst: Annotated[CUdeviceptr, 0]
src: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
cuMemcpy_ptds_params: TypeAlias = struct_cuMemcpy_ptds_params_st
@c.record
class struct_cuMemcpyPeer_ptds_params_st(c.Struct):
SIZE = 40
dstDevice: Annotated[CUdeviceptr, 0]
dstContext: Annotated[CUcontext, 8]
srcDevice: Annotated[CUdeviceptr, 16]
srcContext: Annotated[CUcontext, 24]
ByteCount: Annotated[size_t, 32]
cuMemcpyPeer_ptds_params: TypeAlias = struct_cuMemcpyPeer_ptds_params_st
@c.record
class struct_cuMemcpyHtoD_v2_ptds_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr, 0]
srcHost: Annotated[ctypes.c_void_p, 8]
ByteCount: Annotated[size_t, 16]
cuMemcpyHtoD_v2_ptds_params: TypeAlias = struct_cuMemcpyHtoD_v2_ptds_params_st
@c.record
class struct_cuMemcpyDtoH_v2_ptds_params_st(c.Struct):
SIZE = 24
dstHost: Annotated[ctypes.c_void_p, 0]
srcDevice: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
cuMemcpyDtoH_v2_ptds_params: TypeAlias = struct_cuMemcpyDtoH_v2_ptds_params_st
@c.record
class struct_cuMemcpyDtoD_v2_ptds_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr, 0]
srcDevice: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
cuMemcpyDtoD_v2_ptds_params: TypeAlias = struct_cuMemcpyDtoD_v2_ptds_params_st
@c.record
class struct_cuMemcpyDtoA_v2_ptds_params_st(c.Struct):
SIZE = 32
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[size_t, 8]
srcDevice: Annotated[CUdeviceptr, 16]
ByteCount: Annotated[size_t, 24]
class struct_CUarray_st(ctypes.Structure): pass
CUarray: TypeAlias = c.POINTER[struct_CUarray_st]
cuMemcpyDtoA_v2_ptds_params: TypeAlias = struct_cuMemcpyDtoA_v2_ptds_params_st
@c.record
class struct_cuMemcpyAtoD_v2_ptds_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
srcArray: Annotated[CUarray, 8]
srcOffset: Annotated[size_t, 16]
ByteCount: Annotated[size_t, 24]
cuMemcpyAtoD_v2_ptds_params: TypeAlias = struct_cuMemcpyAtoD_v2_ptds_params_st
@c.record
class struct_cuMemcpyHtoA_v2_ptds_params_st(c.Struct):
SIZE = 32
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[size_t, 8]
srcHost: Annotated[ctypes.c_void_p, 16]
ByteCount: Annotated[size_t, 24]
cuMemcpyHtoA_v2_ptds_params: TypeAlias = struct_cuMemcpyHtoA_v2_ptds_params_st
@c.record
class struct_cuMemcpyAtoH_v2_ptds_params_st(c.Struct):
SIZE = 32
dstHost: Annotated[ctypes.c_void_p, 0]
srcArray: Annotated[CUarray, 8]
srcOffset: Annotated[size_t, 16]
ByteCount: Annotated[size_t, 24]
cuMemcpyAtoH_v2_ptds_params: TypeAlias = struct_cuMemcpyAtoH_v2_ptds_params_st
@c.record
class struct_cuMemcpyAtoA_v2_ptds_params_st(c.Struct):
SIZE = 40
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[size_t, 8]
srcArray: Annotated[CUarray, 16]
srcOffset: Annotated[size_t, 24]
ByteCount: Annotated[size_t, 32]
cuMemcpyAtoA_v2_ptds_params: TypeAlias = struct_cuMemcpyAtoA_v2_ptds_params_st
@c.record
class struct_cuMemcpy2D_v2_ptds_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY2D], 0]
@c.record
class struct_CUDA_MEMCPY2D_st(c.Struct):
SIZE = 128
srcXInBytes: Annotated[size_t, 0]
srcY: Annotated[size_t, 8]
srcMemoryType: Annotated[CUmemorytype, 16]
srcHost: Annotated[ctypes.c_void_p, 24]
srcDevice: Annotated[CUdeviceptr, 32]
srcArray: Annotated[CUarray, 40]
srcPitch: Annotated[size_t, 48]
dstXInBytes: Annotated[size_t, 56]
dstY: Annotated[size_t, 64]
dstMemoryType: Annotated[CUmemorytype, 72]
dstHost: Annotated[ctypes.c_void_p, 80]
dstDevice: Annotated[CUdeviceptr, 88]
dstArray: Annotated[CUarray, 96]
dstPitch: Annotated[size_t, 104]
WidthInBytes: Annotated[size_t, 112]
Height: Annotated[size_t, 120]
CUDA_MEMCPY2D: TypeAlias = struct_CUDA_MEMCPY2D_st
class enum_CUmemorytype_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEMORYTYPE_HOST = enum_CUmemorytype_enum.define('CU_MEMORYTYPE_HOST', 1)
CU_MEMORYTYPE_DEVICE = enum_CUmemorytype_enum.define('CU_MEMORYTYPE_DEVICE', 2)
CU_MEMORYTYPE_ARRAY = enum_CUmemorytype_enum.define('CU_MEMORYTYPE_ARRAY', 3)
CU_MEMORYTYPE_UNIFIED = enum_CUmemorytype_enum.define('CU_MEMORYTYPE_UNIFIED', 4)
CUmemorytype: TypeAlias = enum_CUmemorytype_enum
cuMemcpy2D_v2_ptds_params: TypeAlias = struct_cuMemcpy2D_v2_ptds_params_st
@c.record
class struct_cuMemcpy2DUnaligned_v2_ptds_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY2D], 0]
cuMemcpy2DUnaligned_v2_ptds_params: TypeAlias = struct_cuMemcpy2DUnaligned_v2_ptds_params_st
@c.record
class struct_cuMemcpy3D_v2_ptds_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY3D], 0]
@c.record
class struct_CUDA_MEMCPY3D_st(c.Struct):
SIZE = 200
srcXInBytes: Annotated[size_t, 0]
srcY: Annotated[size_t, 8]
srcZ: Annotated[size_t, 16]
srcLOD: Annotated[size_t, 24]
srcMemoryType: Annotated[CUmemorytype, 32]
srcHost: Annotated[ctypes.c_void_p, 40]
srcDevice: Annotated[CUdeviceptr, 48]
srcArray: Annotated[CUarray, 56]
reserved0: Annotated[ctypes.c_void_p, 64]
srcPitch: Annotated[size_t, 72]
srcHeight: Annotated[size_t, 80]
dstXInBytes: Annotated[size_t, 88]
dstY: Annotated[size_t, 96]
dstZ: Annotated[size_t, 104]
dstLOD: Annotated[size_t, 112]
dstMemoryType: Annotated[CUmemorytype, 120]
dstHost: Annotated[ctypes.c_void_p, 128]
dstDevice: Annotated[CUdeviceptr, 136]
dstArray: Annotated[CUarray, 144]
reserved1: Annotated[ctypes.c_void_p, 152]
dstPitch: Annotated[size_t, 160]
dstHeight: Annotated[size_t, 168]
WidthInBytes: Annotated[size_t, 176]
Height: Annotated[size_t, 184]
Depth: Annotated[size_t, 192]
CUDA_MEMCPY3D: TypeAlias = struct_CUDA_MEMCPY3D_st
cuMemcpy3D_v2_ptds_params: TypeAlias = struct_cuMemcpy3D_v2_ptds_params_st
@c.record
class struct_cuMemcpy3DPeer_ptds_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY3D_PEER], 0]
@c.record
class struct_CUDA_MEMCPY3D_PEER_st(c.Struct):
SIZE = 200
srcXInBytes: Annotated[size_t, 0]
srcY: Annotated[size_t, 8]
srcZ: Annotated[size_t, 16]
srcLOD: Annotated[size_t, 24]
srcMemoryType: Annotated[CUmemorytype, 32]
srcHost: Annotated[ctypes.c_void_p, 40]
srcDevice: Annotated[CUdeviceptr, 48]
srcArray: Annotated[CUarray, 56]
srcContext: Annotated[CUcontext, 64]
srcPitch: Annotated[size_t, 72]
srcHeight: Annotated[size_t, 80]
dstXInBytes: Annotated[size_t, 88]
dstY: Annotated[size_t, 96]
dstZ: Annotated[size_t, 104]
dstLOD: Annotated[size_t, 112]
dstMemoryType: Annotated[CUmemorytype, 120]
dstHost: Annotated[ctypes.c_void_p, 128]
dstDevice: Annotated[CUdeviceptr, 136]
dstArray: Annotated[CUarray, 144]
dstContext: Annotated[CUcontext, 152]
dstPitch: Annotated[size_t, 160]
dstHeight: Annotated[size_t, 168]
WidthInBytes: Annotated[size_t, 176]
Height: Annotated[size_t, 184]
Depth: Annotated[size_t, 192]
CUDA_MEMCPY3D_PEER: TypeAlias = struct_CUDA_MEMCPY3D_PEER_st
cuMemcpy3DPeer_ptds_params: TypeAlias = struct_cuMemcpy3DPeer_ptds_params_st
@c.record
class struct_cuMemcpyAsync_ptsz_params_st(c.Struct):
SIZE = 32
dst: Annotated[CUdeviceptr, 0]
src: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemcpyAsync_ptsz_params: TypeAlias = struct_cuMemcpyAsync_ptsz_params_st
@c.record
class struct_cuMemcpyPeerAsync_ptsz_params_st(c.Struct):
SIZE = 48
dstDevice: Annotated[CUdeviceptr, 0]
dstContext: Annotated[CUcontext, 8]
srcDevice: Annotated[CUdeviceptr, 16]
srcContext: Annotated[CUcontext, 24]
ByteCount: Annotated[size_t, 32]
hStream: Annotated[CUstream, 40]
cuMemcpyPeerAsync_ptsz_params: TypeAlias = struct_cuMemcpyPeerAsync_ptsz_params_st
@c.record
class struct_cuMemcpyHtoDAsync_v2_ptsz_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
srcHost: Annotated[ctypes.c_void_p, 8]
ByteCount: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemcpyHtoDAsync_v2_ptsz_params: TypeAlias = struct_cuMemcpyHtoDAsync_v2_ptsz_params_st
@c.record
class struct_cuMemcpyDtoHAsync_v2_ptsz_params_st(c.Struct):
SIZE = 32
dstHost: Annotated[ctypes.c_void_p, 0]
srcDevice: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemcpyDtoHAsync_v2_ptsz_params: TypeAlias = struct_cuMemcpyDtoHAsync_v2_ptsz_params_st
@c.record
class struct_cuMemcpyDtoDAsync_v2_ptsz_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
srcDevice: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemcpyDtoDAsync_v2_ptsz_params: TypeAlias = struct_cuMemcpyDtoDAsync_v2_ptsz_params_st
@c.record
class struct_cuMemcpyHtoAAsync_v2_ptsz_params_st(c.Struct):
SIZE = 40
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[size_t, 8]
srcHost: Annotated[ctypes.c_void_p, 16]
ByteCount: Annotated[size_t, 24]
hStream: Annotated[CUstream, 32]
cuMemcpyHtoAAsync_v2_ptsz_params: TypeAlias = struct_cuMemcpyHtoAAsync_v2_ptsz_params_st
@c.record
class struct_cuMemcpyAtoHAsync_v2_ptsz_params_st(c.Struct):
SIZE = 40
dstHost: Annotated[ctypes.c_void_p, 0]
srcArray: Annotated[CUarray, 8]
srcOffset: Annotated[size_t, 16]
ByteCount: Annotated[size_t, 24]
hStream: Annotated[CUstream, 32]
cuMemcpyAtoHAsync_v2_ptsz_params: TypeAlias = struct_cuMemcpyAtoHAsync_v2_ptsz_params_st
@c.record
class struct_cuMemcpy2DAsync_v2_ptsz_params_st(c.Struct):
SIZE = 16
pCopy: Annotated[c.POINTER[CUDA_MEMCPY2D], 0]
hStream: Annotated[CUstream, 8]
cuMemcpy2DAsync_v2_ptsz_params: TypeAlias = struct_cuMemcpy2DAsync_v2_ptsz_params_st
@c.record
class struct_cuMemcpy3DAsync_v2_ptsz_params_st(c.Struct):
SIZE = 16
pCopy: Annotated[c.POINTER[CUDA_MEMCPY3D], 0]
hStream: Annotated[CUstream, 8]
cuMemcpy3DAsync_v2_ptsz_params: TypeAlias = struct_cuMemcpy3DAsync_v2_ptsz_params_st
@c.record
class struct_cuMemcpy3DPeerAsync_ptsz_params_st(c.Struct):
SIZE = 16
pCopy: Annotated[c.POINTER[CUDA_MEMCPY3D_PEER], 0]
hStream: Annotated[CUstream, 8]
cuMemcpy3DPeerAsync_ptsz_params: TypeAlias = struct_cuMemcpy3DPeerAsync_ptsz_params_st
@c.record
class struct_cuMemcpyBatchAsync_ptsz_params_st(c.Struct):
SIZE = 72
dsts: Annotated[c.POINTER[CUdeviceptr], 0]
srcs: Annotated[c.POINTER[CUdeviceptr], 8]
sizes: Annotated[c.POINTER[size_t], 16]
count: Annotated[size_t, 24]
attrs: Annotated[c.POINTER[CUmemcpyAttributes], 32]
attrsIdxs: Annotated[c.POINTER[size_t], 40]
numAttrs: Annotated[size_t, 48]
failIdx: Annotated[c.POINTER[size_t], 56]
hStream: Annotated[CUstream, 64]
@c.record
class struct_CUmemcpyAttributes_st(c.Struct):
SIZE = 24
srcAccessOrder: Annotated[CUmemcpySrcAccessOrder, 0]
srcLocHint: Annotated[CUmemLocation, 4]
dstLocHint: Annotated[CUmemLocation, 12]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
CUmemcpyAttributes: TypeAlias = struct_CUmemcpyAttributes_st
class enum_CUmemcpySrcAccessOrder_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEMCPY_SRC_ACCESS_ORDER_INVALID = enum_CUmemcpySrcAccessOrder_enum.define('CU_MEMCPY_SRC_ACCESS_ORDER_INVALID', 0)
CU_MEMCPY_SRC_ACCESS_ORDER_STREAM = enum_CUmemcpySrcAccessOrder_enum.define('CU_MEMCPY_SRC_ACCESS_ORDER_STREAM', 1)
CU_MEMCPY_SRC_ACCESS_ORDER_DURING_API_CALL = enum_CUmemcpySrcAccessOrder_enum.define('CU_MEMCPY_SRC_ACCESS_ORDER_DURING_API_CALL', 2)
CU_MEMCPY_SRC_ACCESS_ORDER_ANY = enum_CUmemcpySrcAccessOrder_enum.define('CU_MEMCPY_SRC_ACCESS_ORDER_ANY', 3)
CU_MEMCPY_SRC_ACCESS_ORDER_MAX = enum_CUmemcpySrcAccessOrder_enum.define('CU_MEMCPY_SRC_ACCESS_ORDER_MAX', 2147483647)
CUmemcpySrcAccessOrder: TypeAlias = enum_CUmemcpySrcAccessOrder_enum
@c.record
class struct_CUmemLocation_st(c.Struct):
SIZE = 8
type: Annotated[CUmemLocationType, 0]
id: Annotated[Annotated[int, ctypes.c_int32], 4]
CUmemLocation: TypeAlias = struct_CUmemLocation_st
class enum_CUmemLocationType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_LOCATION_TYPE_INVALID = enum_CUmemLocationType_enum.define('CU_MEM_LOCATION_TYPE_INVALID', 0)
CU_MEM_LOCATION_TYPE_DEVICE = enum_CUmemLocationType_enum.define('CU_MEM_LOCATION_TYPE_DEVICE', 1)
CU_MEM_LOCATION_TYPE_HOST = enum_CUmemLocationType_enum.define('CU_MEM_LOCATION_TYPE_HOST', 2)
CU_MEM_LOCATION_TYPE_HOST_NUMA = enum_CUmemLocationType_enum.define('CU_MEM_LOCATION_TYPE_HOST_NUMA', 3)
CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT = enum_CUmemLocationType_enum.define('CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT', 4)
CU_MEM_LOCATION_TYPE_MAX = enum_CUmemLocationType_enum.define('CU_MEM_LOCATION_TYPE_MAX', 2147483647)
CUmemLocationType: TypeAlias = enum_CUmemLocationType_enum
cuMemcpyBatchAsync_ptsz_params: TypeAlias = struct_cuMemcpyBatchAsync_ptsz_params_st
@c.record
class struct_cuMemcpy3DBatchAsync_ptsz_params_st(c.Struct):
SIZE = 40
numOps: Annotated[size_t, 0]
opList: Annotated[c.POINTER[CUDA_MEMCPY3D_BATCH_OP], 8]
failIdx: Annotated[c.POINTER[size_t], 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
hStream: Annotated[CUstream, 32]
@c.record
class struct_CUDA_MEMCPY3D_BATCH_OP_st(c.Struct):
SIZE = 112
src: Annotated[CUmemcpy3DOperand, 0]
dst: Annotated[CUmemcpy3DOperand, 40]
extent: Annotated[CUextent3D, 80]
srcAccessOrder: Annotated[CUmemcpySrcAccessOrder, 104]
flags: Annotated[Annotated[int, ctypes.c_uint32], 108]
CUDA_MEMCPY3D_BATCH_OP: TypeAlias = struct_CUDA_MEMCPY3D_BATCH_OP_st
@c.record
class struct_CUmemcpy3DOperand_st(c.Struct):
SIZE = 40
type: Annotated[CUmemcpy3DOperandType, 0]
op: Annotated[struct_CUmemcpy3DOperand_st_op, 8]
CUmemcpy3DOperand: TypeAlias = struct_CUmemcpy3DOperand_st
class enum_CUmemcpy3DOperandType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEMCPY_OPERAND_TYPE_POINTER = enum_CUmemcpy3DOperandType_enum.define('CU_MEMCPY_OPERAND_TYPE_POINTER', 1)
CU_MEMCPY_OPERAND_TYPE_ARRAY = enum_CUmemcpy3DOperandType_enum.define('CU_MEMCPY_OPERAND_TYPE_ARRAY', 2)
CU_MEMCPY_OPERAND_TYPE_MAX = enum_CUmemcpy3DOperandType_enum.define('CU_MEMCPY_OPERAND_TYPE_MAX', 2147483647)
CUmemcpy3DOperandType: TypeAlias = enum_CUmemcpy3DOperandType_enum
@c.record
class struct_CUmemcpy3DOperand_st_op(c.Struct):
SIZE = 32
ptr: Annotated[struct_CUmemcpy3DOperand_st_op_ptr, 0]
array: Annotated[struct_CUmemcpy3DOperand_st_op_array, 0]
@c.record
class struct_CUmemcpy3DOperand_st_op_ptr(c.Struct):
SIZE = 32
ptr: Annotated[CUdeviceptr, 0]
rowLength: Annotated[size_t, 8]
layerHeight: Annotated[size_t, 16]
locHint: Annotated[CUmemLocation, 24]
@c.record
class struct_CUmemcpy3DOperand_st_op_array(c.Struct):
SIZE = 32
array: Annotated[CUarray, 0]
offset: Annotated[CUoffset3D, 8]
@c.record
class struct_CUoffset3D_st(c.Struct):
SIZE = 24
x: Annotated[size_t, 0]
y: Annotated[size_t, 8]
z: Annotated[size_t, 16]
CUoffset3D: TypeAlias = struct_CUoffset3D_st
@c.record
class struct_CUextent3D_st(c.Struct):
SIZE = 24
width: Annotated[size_t, 0]
height: Annotated[size_t, 8]
depth: Annotated[size_t, 16]
CUextent3D: TypeAlias = struct_CUextent3D_st
cuMemcpy3DBatchAsync_ptsz_params: TypeAlias = struct_cuMemcpy3DBatchAsync_ptsz_params_st
@c.record
class struct_cuMemsetD8_v2_ptds_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr, 0]
uc: Annotated[Annotated[int, ctypes.c_ubyte], 8]
N: Annotated[size_t, 16]
cuMemsetD8_v2_ptds_params: TypeAlias = struct_cuMemsetD8_v2_ptds_params_st
@c.record
class struct_cuMemsetD16_v2_ptds_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr, 0]
us: Annotated[Annotated[int, ctypes.c_uint16], 8]
N: Annotated[size_t, 16]
cuMemsetD16_v2_ptds_params: TypeAlias = struct_cuMemsetD16_v2_ptds_params_st
@c.record
class struct_cuMemsetD32_v2_ptds_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr, 0]
ui: Annotated[Annotated[int, ctypes.c_uint32], 8]
N: Annotated[size_t, 16]
cuMemsetD32_v2_ptds_params: TypeAlias = struct_cuMemsetD32_v2_ptds_params_st
@c.record
class struct_cuMemsetD2D8_v2_ptds_params_st(c.Struct):
SIZE = 40
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
uc: Annotated[Annotated[int, ctypes.c_ubyte], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
cuMemsetD2D8_v2_ptds_params: TypeAlias = struct_cuMemsetD2D8_v2_ptds_params_st
@c.record
class struct_cuMemsetD2D16_v2_ptds_params_st(c.Struct):
SIZE = 40
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
us: Annotated[Annotated[int, ctypes.c_uint16], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
cuMemsetD2D16_v2_ptds_params: TypeAlias = struct_cuMemsetD2D16_v2_ptds_params_st
@c.record
class struct_cuMemsetD2D32_v2_ptds_params_st(c.Struct):
SIZE = 40
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
ui: Annotated[Annotated[int, ctypes.c_uint32], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
cuMemsetD2D32_v2_ptds_params: TypeAlias = struct_cuMemsetD2D32_v2_ptds_params_st
@c.record
class struct_cuMemsetD8Async_ptsz_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
uc: Annotated[Annotated[int, ctypes.c_ubyte], 8]
N: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemsetD8Async_ptsz_params: TypeAlias = struct_cuMemsetD8Async_ptsz_params_st
@c.record
class struct_cuMemsetD16Async_ptsz_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
us: Annotated[Annotated[int, ctypes.c_uint16], 8]
N: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemsetD16Async_ptsz_params: TypeAlias = struct_cuMemsetD16Async_ptsz_params_st
@c.record
class struct_cuMemsetD32Async_ptsz_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
ui: Annotated[Annotated[int, ctypes.c_uint32], 8]
N: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemsetD32Async_ptsz_params: TypeAlias = struct_cuMemsetD32Async_ptsz_params_st
@c.record
class struct_cuMemsetD2D8Async_ptsz_params_st(c.Struct):
SIZE = 48
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
uc: Annotated[Annotated[int, ctypes.c_ubyte], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
hStream: Annotated[CUstream, 40]
cuMemsetD2D8Async_ptsz_params: TypeAlias = struct_cuMemsetD2D8Async_ptsz_params_st
@c.record
class struct_cuMemsetD2D16Async_ptsz_params_st(c.Struct):
SIZE = 48
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
us: Annotated[Annotated[int, ctypes.c_uint16], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
hStream: Annotated[CUstream, 40]
cuMemsetD2D16Async_ptsz_params: TypeAlias = struct_cuMemsetD2D16Async_ptsz_params_st
@c.record
class struct_cuMemsetD2D32Async_ptsz_params_st(c.Struct):
SIZE = 48
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
ui: Annotated[Annotated[int, ctypes.c_uint32], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
hStream: Annotated[CUstream, 40]
cuMemsetD2D32Async_ptsz_params: TypeAlias = struct_cuMemsetD2D32Async_ptsz_params_st
@c.record
class struct_cuArrayCreate_v2_params_st(c.Struct):
SIZE = 16
pHandle: Annotated[c.POINTER[CUarray], 0]
pAllocateArray: Annotated[c.POINTER[CUDA_ARRAY_DESCRIPTOR], 8]
@c.record
class struct_CUDA_ARRAY_DESCRIPTOR_st(c.Struct):
SIZE = 24
Width: Annotated[size_t, 0]
Height: Annotated[size_t, 8]
Format: Annotated[CUarray_format, 16]
NumChannels: Annotated[Annotated[int, ctypes.c_uint32], 20]
CUDA_ARRAY_DESCRIPTOR: TypeAlias = struct_CUDA_ARRAY_DESCRIPTOR_st
cuArrayCreate_v2_params: TypeAlias = struct_cuArrayCreate_v2_params_st
@c.record
class struct_cuArrayGetDescriptor_v2_params_st(c.Struct):
SIZE = 16
pArrayDescriptor: Annotated[c.POINTER[CUDA_ARRAY_DESCRIPTOR], 0]
hArray: Annotated[CUarray, 8]
cuArrayGetDescriptor_v2_params: TypeAlias = struct_cuArrayGetDescriptor_v2_params_st
@c.record
class struct_cuArrayGetSparseProperties_params_st(c.Struct):
SIZE = 16
sparseProperties: Annotated[c.POINTER[CUDA_ARRAY_SPARSE_PROPERTIES], 0]
array: Annotated[CUarray, 8]
@c.record
class struct_CUDA_ARRAY_SPARSE_PROPERTIES_st(c.Struct):
SIZE = 48
tileExtent: Annotated[struct_CUDA_ARRAY_SPARSE_PROPERTIES_st_tileExtent, 0]
miptailFirstLevel: Annotated[Annotated[int, ctypes.c_uint32], 12]
miptailSize: Annotated[Annotated[int, ctypes.c_uint64], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[4]], 28]
CUDA_ARRAY_SPARSE_PROPERTIES: TypeAlias = struct_CUDA_ARRAY_SPARSE_PROPERTIES_st
@c.record
class struct_CUDA_ARRAY_SPARSE_PROPERTIES_st_tileExtent(c.Struct):
SIZE = 12
width: Annotated[Annotated[int, ctypes.c_uint32], 0]
height: Annotated[Annotated[int, ctypes.c_uint32], 4]
depth: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuArrayGetSparseProperties_params: TypeAlias = struct_cuArrayGetSparseProperties_params_st
@c.record
class struct_cuMipmappedArrayGetSparseProperties_params_st(c.Struct):
SIZE = 16
sparseProperties: Annotated[c.POINTER[CUDA_ARRAY_SPARSE_PROPERTIES], 0]
mipmap: Annotated[CUmipmappedArray, 8]
class struct_CUmipmappedArray_st(ctypes.Structure): pass
CUmipmappedArray: TypeAlias = c.POINTER[struct_CUmipmappedArray_st]
cuMipmappedArrayGetSparseProperties_params: TypeAlias = struct_cuMipmappedArrayGetSparseProperties_params_st
@c.record
class struct_cuArrayGetMemoryRequirements_params_st(c.Struct):
SIZE = 24
memoryRequirements: Annotated[c.POINTER[CUDA_ARRAY_MEMORY_REQUIREMENTS], 0]
array: Annotated[CUarray, 8]
device: Annotated[CUdevice, 16]
@c.record
class struct_CUDA_ARRAY_MEMORY_REQUIREMENTS_st(c.Struct):
SIZE = 32
size: Annotated[size_t, 0]
alignment: Annotated[size_t, 8]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[4]], 16]
CUDA_ARRAY_MEMORY_REQUIREMENTS: TypeAlias = struct_CUDA_ARRAY_MEMORY_REQUIREMENTS_st
cuArrayGetMemoryRequirements_params: TypeAlias = struct_cuArrayGetMemoryRequirements_params_st
@c.record
class struct_cuMipmappedArrayGetMemoryRequirements_params_st(c.Struct):
SIZE = 24
memoryRequirements: Annotated[c.POINTER[CUDA_ARRAY_MEMORY_REQUIREMENTS], 0]
mipmap: Annotated[CUmipmappedArray, 8]
device: Annotated[CUdevice, 16]
cuMipmappedArrayGetMemoryRequirements_params: TypeAlias = struct_cuMipmappedArrayGetMemoryRequirements_params_st
@c.record
class struct_cuArrayGetPlane_params_st(c.Struct):
SIZE = 24
pPlaneArray: Annotated[c.POINTER[CUarray], 0]
hArray: Annotated[CUarray, 8]
planeIdx: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuArrayGetPlane_params: TypeAlias = struct_cuArrayGetPlane_params_st
@c.record
class struct_cuArrayDestroy_params_st(c.Struct):
SIZE = 8
hArray: Annotated[CUarray, 0]
cuArrayDestroy_params: TypeAlias = struct_cuArrayDestroy_params_st
@c.record
class struct_cuArray3DCreate_v2_params_st(c.Struct):
SIZE = 16
pHandle: Annotated[c.POINTER[CUarray], 0]
pAllocateArray: Annotated[c.POINTER[CUDA_ARRAY3D_DESCRIPTOR], 8]
@c.record
class struct_CUDA_ARRAY3D_DESCRIPTOR_st(c.Struct):
SIZE = 40
Width: Annotated[size_t, 0]
Height: Annotated[size_t, 8]
Depth: Annotated[size_t, 16]
Format: Annotated[CUarray_format, 24]
NumChannels: Annotated[Annotated[int, ctypes.c_uint32], 28]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
CUDA_ARRAY3D_DESCRIPTOR: TypeAlias = struct_CUDA_ARRAY3D_DESCRIPTOR_st
cuArray3DCreate_v2_params: TypeAlias = struct_cuArray3DCreate_v2_params_st
@c.record
class struct_cuArray3DGetDescriptor_v2_params_st(c.Struct):
SIZE = 16
pArrayDescriptor: Annotated[c.POINTER[CUDA_ARRAY3D_DESCRIPTOR], 0]
hArray: Annotated[CUarray, 8]
cuArray3DGetDescriptor_v2_params: TypeAlias = struct_cuArray3DGetDescriptor_v2_params_st
@c.record
class struct_cuMipmappedArrayCreate_params_st(c.Struct):
SIZE = 24
pHandle: Annotated[c.POINTER[CUmipmappedArray], 0]
pMipmappedArrayDesc: Annotated[c.POINTER[CUDA_ARRAY3D_DESCRIPTOR], 8]
numMipmapLevels: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMipmappedArrayCreate_params: TypeAlias = struct_cuMipmappedArrayCreate_params_st
@c.record
class struct_cuMipmappedArrayGetLevel_params_st(c.Struct):
SIZE = 24
pLevelArray: Annotated[c.POINTER[CUarray], 0]
hMipmappedArray: Annotated[CUmipmappedArray, 8]
level: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMipmappedArrayGetLevel_params: TypeAlias = struct_cuMipmappedArrayGetLevel_params_st
@c.record
class struct_cuMipmappedArrayDestroy_params_st(c.Struct):
SIZE = 8
hMipmappedArray: Annotated[CUmipmappedArray, 0]
cuMipmappedArrayDestroy_params: TypeAlias = struct_cuMipmappedArrayDestroy_params_st
@c.record
class struct_cuMemGetHandleForAddressRange_params_st(c.Struct):
SIZE = 40
handle: Annotated[ctypes.c_void_p, 0]
dptr: Annotated[CUdeviceptr, 8]
size: Annotated[size_t, 16]
handleType: Annotated[CUmemRangeHandleType, 24]
flags: Annotated[Annotated[int, ctypes.c_uint64], 32]
class enum_CUmemRangeHandleType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD = enum_CUmemRangeHandleType_enum.define('CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD', 1)
CU_MEM_RANGE_HANDLE_TYPE_MAX = enum_CUmemRangeHandleType_enum.define('CU_MEM_RANGE_HANDLE_TYPE_MAX', 2147483647)
CUmemRangeHandleType: TypeAlias = enum_CUmemRangeHandleType_enum
cuMemGetHandleForAddressRange_params: TypeAlias = struct_cuMemGetHandleForAddressRange_params_st
@c.record
class struct_cuMemBatchDecompressAsync_ptsz_params_st(c.Struct):
SIZE = 40
paramsArray: Annotated[c.POINTER[CUmemDecompressParams], 0]
count: Annotated[size_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
errorIndex: Annotated[c.POINTER[size_t], 24]
stream: Annotated[CUstream, 32]
@c.record
class struct_CUmemDecompressParams_st(c.Struct):
SIZE = 64
srcNumBytes: Annotated[size_t, 0]
dstNumBytes: Annotated[size_t, 8]
dstActBytes: Annotated[c.POINTER[cuuint32_t], 16]
src: Annotated[ctypes.c_void_p, 24]
dst: Annotated[ctypes.c_void_p, 32]
algo: Annotated[CUmemDecompressAlgorithm, 40]
padding: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[20]], 44]
CUmemDecompressParams: TypeAlias = struct_CUmemDecompressParams_st
cuuint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
class enum_CUmemDecompressAlgorithm_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_DECOMPRESS_UNSUPPORTED = enum_CUmemDecompressAlgorithm_enum.define('CU_MEM_DECOMPRESS_UNSUPPORTED', 0)
CU_MEM_DECOMPRESS_ALGORITHM_DEFLATE = enum_CUmemDecompressAlgorithm_enum.define('CU_MEM_DECOMPRESS_ALGORITHM_DEFLATE', 1)
CU_MEM_DECOMPRESS_ALGORITHM_SNAPPY = enum_CUmemDecompressAlgorithm_enum.define('CU_MEM_DECOMPRESS_ALGORITHM_SNAPPY', 2)
CUmemDecompressAlgorithm: TypeAlias = enum_CUmemDecompressAlgorithm_enum
cuMemBatchDecompressAsync_ptsz_params: TypeAlias = struct_cuMemBatchDecompressAsync_ptsz_params_st
@c.record
class struct_cuMemAddressReserve_params_st(c.Struct):
SIZE = 40
ptr: Annotated[c.POINTER[CUdeviceptr], 0]
size: Annotated[size_t, 8]
alignment: Annotated[size_t, 16]
addr: Annotated[CUdeviceptr, 24]
flags: Annotated[Annotated[int, ctypes.c_uint64], 32]
cuMemAddressReserve_params: TypeAlias = struct_cuMemAddressReserve_params_st
@c.record
class struct_cuMemAddressFree_params_st(c.Struct):
SIZE = 16
ptr: Annotated[CUdeviceptr, 0]
size: Annotated[size_t, 8]
cuMemAddressFree_params: TypeAlias = struct_cuMemAddressFree_params_st
@c.record
class struct_cuMemCreate_params_st(c.Struct):
SIZE = 32
handle: Annotated[c.POINTER[CUmemGenericAllocationHandle], 0]
size: Annotated[size_t, 8]
prop: Annotated[c.POINTER[CUmemAllocationProp], 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
CUmemGenericAllocationHandle: TypeAlias = Annotated[int, ctypes.c_uint64]
@c.record
class struct_CUmemAllocationProp_st(c.Struct):
SIZE = 32
type: Annotated[CUmemAllocationType, 0]
requestedHandleTypes: Annotated[CUmemAllocationHandleType, 4]
location: Annotated[CUmemLocation, 8]
win32HandleMetaData: Annotated[ctypes.c_void_p, 16]
allocFlags: Annotated[struct_CUmemAllocationProp_st_allocFlags, 24]
CUmemAllocationProp: TypeAlias = struct_CUmemAllocationProp_st
class enum_CUmemAllocationType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_ALLOCATION_TYPE_INVALID = enum_CUmemAllocationType_enum.define('CU_MEM_ALLOCATION_TYPE_INVALID', 0)
CU_MEM_ALLOCATION_TYPE_PINNED = enum_CUmemAllocationType_enum.define('CU_MEM_ALLOCATION_TYPE_PINNED', 1)
CU_MEM_ALLOCATION_TYPE_MAX = enum_CUmemAllocationType_enum.define('CU_MEM_ALLOCATION_TYPE_MAX', 2147483647)
CUmemAllocationType: TypeAlias = enum_CUmemAllocationType_enum
class enum_CUmemAllocationHandleType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_HANDLE_TYPE_NONE = enum_CUmemAllocationHandleType_enum.define('CU_MEM_HANDLE_TYPE_NONE', 0)
CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR = enum_CUmemAllocationHandleType_enum.define('CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR', 1)
CU_MEM_HANDLE_TYPE_WIN32 = enum_CUmemAllocationHandleType_enum.define('CU_MEM_HANDLE_TYPE_WIN32', 2)
CU_MEM_HANDLE_TYPE_WIN32_KMT = enum_CUmemAllocationHandleType_enum.define('CU_MEM_HANDLE_TYPE_WIN32_KMT', 4)
CU_MEM_HANDLE_TYPE_FABRIC = enum_CUmemAllocationHandleType_enum.define('CU_MEM_HANDLE_TYPE_FABRIC', 8)
CU_MEM_HANDLE_TYPE_MAX = enum_CUmemAllocationHandleType_enum.define('CU_MEM_HANDLE_TYPE_MAX', 2147483647)
CUmemAllocationHandleType: TypeAlias = enum_CUmemAllocationHandleType_enum
@c.record
class struct_CUmemAllocationProp_st_allocFlags(c.Struct):
SIZE = 8
compressionType: Annotated[Annotated[int, ctypes.c_ubyte], 0]
gpuDirectRDMACapable: Annotated[Annotated[int, ctypes.c_ubyte], 1]
usage: Annotated[Annotated[int, ctypes.c_uint16], 2]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[4]], 4]
cuMemCreate_params: TypeAlias = struct_cuMemCreate_params_st
@c.record
class struct_cuMemRelease_params_st(c.Struct):
SIZE = 8
handle: Annotated[CUmemGenericAllocationHandle, 0]
cuMemRelease_params: TypeAlias = struct_cuMemRelease_params_st
@c.record
class struct_cuMemMap_params_st(c.Struct):
SIZE = 40
ptr: Annotated[CUdeviceptr, 0]
size: Annotated[size_t, 8]
offset: Annotated[size_t, 16]
handle: Annotated[CUmemGenericAllocationHandle, 24]
flags: Annotated[Annotated[int, ctypes.c_uint64], 32]
cuMemMap_params: TypeAlias = struct_cuMemMap_params_st
@c.record
class struct_cuMemMapArrayAsync_ptsz_params_st(c.Struct):
SIZE = 24
mapInfoList: Annotated[c.POINTER[CUarrayMapInfo], 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
hStream: Annotated[CUstream, 16]
@c.record
class struct_CUarrayMapInfo_st(c.Struct):
SIZE = 96
resourceType: Annotated[CUresourcetype, 0]
resource: Annotated[struct_CUarrayMapInfo_st_resource, 8]
subresourceType: Annotated[CUarraySparseSubresourceType, 16]
subresource: Annotated[struct_CUarrayMapInfo_st_subresource, 24]
memOperationType: Annotated[CUmemOperationType, 56]
memHandleType: Annotated[CUmemHandleType, 60]
memHandle: Annotated[struct_CUarrayMapInfo_st_memHandle, 64]
offset: Annotated[Annotated[int, ctypes.c_uint64], 72]
deviceBitMask: Annotated[Annotated[int, ctypes.c_uint32], 80]
flags: Annotated[Annotated[int, ctypes.c_uint32], 84]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[2]], 88]
CUarrayMapInfo: TypeAlias = struct_CUarrayMapInfo_st
class enum_CUresourcetype_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_RESOURCE_TYPE_ARRAY = enum_CUresourcetype_enum.define('CU_RESOURCE_TYPE_ARRAY', 0)
CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = enum_CUresourcetype_enum.define('CU_RESOURCE_TYPE_MIPMAPPED_ARRAY', 1)
CU_RESOURCE_TYPE_LINEAR = enum_CUresourcetype_enum.define('CU_RESOURCE_TYPE_LINEAR', 2)
CU_RESOURCE_TYPE_PITCH2D = enum_CUresourcetype_enum.define('CU_RESOURCE_TYPE_PITCH2D', 3)
CUresourcetype: TypeAlias = enum_CUresourcetype_enum
@c.record
class struct_CUarrayMapInfo_st_resource(c.Struct):
SIZE = 8
mipmap: Annotated[CUmipmappedArray, 0]
array: Annotated[CUarray, 0]
class enum_CUarraySparseSubresourceType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = enum_CUarraySparseSubresourceType_enum.define('CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL', 0)
CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = enum_CUarraySparseSubresourceType_enum.define('CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL', 1)
CUarraySparseSubresourceType: TypeAlias = enum_CUarraySparseSubresourceType_enum
@c.record
class struct_CUarrayMapInfo_st_subresource(c.Struct):
SIZE = 32
sparseLevel: Annotated[struct_CUarrayMapInfo_st_subresource_sparseLevel, 0]
miptail: Annotated[struct_CUarrayMapInfo_st_subresource_miptail, 0]
@c.record
class struct_CUarrayMapInfo_st_subresource_sparseLevel(c.Struct):
SIZE = 32
level: Annotated[Annotated[int, ctypes.c_uint32], 0]
layer: Annotated[Annotated[int, ctypes.c_uint32], 4]
offsetX: Annotated[Annotated[int, ctypes.c_uint32], 8]
offsetY: Annotated[Annotated[int, ctypes.c_uint32], 12]
offsetZ: Annotated[Annotated[int, ctypes.c_uint32], 16]
extentWidth: Annotated[Annotated[int, ctypes.c_uint32], 20]
extentHeight: Annotated[Annotated[int, ctypes.c_uint32], 24]
extentDepth: Annotated[Annotated[int, ctypes.c_uint32], 28]
@c.record
class struct_CUarrayMapInfo_st_subresource_miptail(c.Struct):
SIZE = 24
layer: Annotated[Annotated[int, ctypes.c_uint32], 0]
offset: Annotated[Annotated[int, ctypes.c_uint64], 8]
size: Annotated[Annotated[int, ctypes.c_uint64], 16]
class enum_CUmemOperationType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_OPERATION_TYPE_MAP = enum_CUmemOperationType_enum.define('CU_MEM_OPERATION_TYPE_MAP', 1)
CU_MEM_OPERATION_TYPE_UNMAP = enum_CUmemOperationType_enum.define('CU_MEM_OPERATION_TYPE_UNMAP', 2)
CUmemOperationType: TypeAlias = enum_CUmemOperationType_enum
class enum_CUmemHandleType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_HANDLE_TYPE_GENERIC = enum_CUmemHandleType_enum.define('CU_MEM_HANDLE_TYPE_GENERIC', 0)
CUmemHandleType: TypeAlias = enum_CUmemHandleType_enum
@c.record
class struct_CUarrayMapInfo_st_memHandle(c.Struct):
SIZE = 8
memHandle: Annotated[CUmemGenericAllocationHandle, 0]
cuMemMapArrayAsync_ptsz_params: TypeAlias = struct_cuMemMapArrayAsync_ptsz_params_st
@c.record
class struct_cuMemUnmap_params_st(c.Struct):
SIZE = 16
ptr: Annotated[CUdeviceptr, 0]
size: Annotated[size_t, 8]
cuMemUnmap_params: TypeAlias = struct_cuMemUnmap_params_st
@c.record
class struct_cuMemSetAccess_params_st(c.Struct):
SIZE = 32
ptr: Annotated[CUdeviceptr, 0]
size: Annotated[size_t, 8]
desc: Annotated[c.POINTER[CUmemAccessDesc], 16]
count: Annotated[size_t, 24]
@c.record
class struct_CUmemAccessDesc_st(c.Struct):
SIZE = 12
location: Annotated[CUmemLocation, 0]
flags: Annotated[CUmemAccess_flags, 8]
CUmemAccessDesc: TypeAlias = struct_CUmemAccessDesc_st
class enum_CUmemAccess_flags_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_ACCESS_FLAGS_PROT_NONE = enum_CUmemAccess_flags_enum.define('CU_MEM_ACCESS_FLAGS_PROT_NONE', 0)
CU_MEM_ACCESS_FLAGS_PROT_READ = enum_CUmemAccess_flags_enum.define('CU_MEM_ACCESS_FLAGS_PROT_READ', 1)
CU_MEM_ACCESS_FLAGS_PROT_READWRITE = enum_CUmemAccess_flags_enum.define('CU_MEM_ACCESS_FLAGS_PROT_READWRITE', 3)
CU_MEM_ACCESS_FLAGS_PROT_MAX = enum_CUmemAccess_flags_enum.define('CU_MEM_ACCESS_FLAGS_PROT_MAX', 2147483647)
CUmemAccess_flags: TypeAlias = enum_CUmemAccess_flags_enum
cuMemSetAccess_params: TypeAlias = struct_cuMemSetAccess_params_st
@c.record
class struct_cuMemGetAccess_params_st(c.Struct):
SIZE = 24
flags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 0]
location: Annotated[c.POINTER[CUmemLocation], 8]
ptr: Annotated[CUdeviceptr, 16]
cuMemGetAccess_params: TypeAlias = struct_cuMemGetAccess_params_st
@c.record
class struct_cuMemExportToShareableHandle_params_st(c.Struct):
SIZE = 32
shareableHandle: Annotated[ctypes.c_void_p, 0]
handle: Annotated[CUmemGenericAllocationHandle, 8]
handleType: Annotated[CUmemAllocationHandleType, 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
cuMemExportToShareableHandle_params: TypeAlias = struct_cuMemExportToShareableHandle_params_st
@c.record
class struct_cuMemImportFromShareableHandle_params_st(c.Struct):
SIZE = 24
handle: Annotated[c.POINTER[CUmemGenericAllocationHandle], 0]
osHandle: Annotated[ctypes.c_void_p, 8]
shHandleType: Annotated[CUmemAllocationHandleType, 16]
cuMemImportFromShareableHandle_params: TypeAlias = struct_cuMemImportFromShareableHandle_params_st
@c.record
class struct_cuMemGetAllocationGranularity_params_st(c.Struct):
SIZE = 24
granularity: Annotated[c.POINTER[size_t], 0]
prop: Annotated[c.POINTER[CUmemAllocationProp], 8]
option: Annotated[CUmemAllocationGranularity_flags, 16]
class enum_CUmemAllocationGranularity_flags_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_ALLOC_GRANULARITY_MINIMUM = enum_CUmemAllocationGranularity_flags_enum.define('CU_MEM_ALLOC_GRANULARITY_MINIMUM', 0)
CU_MEM_ALLOC_GRANULARITY_RECOMMENDED = enum_CUmemAllocationGranularity_flags_enum.define('CU_MEM_ALLOC_GRANULARITY_RECOMMENDED', 1)
CUmemAllocationGranularity_flags: TypeAlias = enum_CUmemAllocationGranularity_flags_enum
cuMemGetAllocationGranularity_params: TypeAlias = struct_cuMemGetAllocationGranularity_params_st
@c.record
class struct_cuMemGetAllocationPropertiesFromHandle_params_st(c.Struct):
SIZE = 16
prop: Annotated[c.POINTER[CUmemAllocationProp], 0]
handle: Annotated[CUmemGenericAllocationHandle, 8]
cuMemGetAllocationPropertiesFromHandle_params: TypeAlias = struct_cuMemGetAllocationPropertiesFromHandle_params_st
@c.record
class struct_cuMemRetainAllocationHandle_params_st(c.Struct):
SIZE = 16
handle: Annotated[c.POINTER[CUmemGenericAllocationHandle], 0]
addr: Annotated[ctypes.c_void_p, 8]
cuMemRetainAllocationHandle_params: TypeAlias = struct_cuMemRetainAllocationHandle_params_st
@c.record
class struct_cuMemFreeAsync_ptsz_params_st(c.Struct):
SIZE = 16
dptr: Annotated[CUdeviceptr, 0]
hStream: Annotated[CUstream, 8]
cuMemFreeAsync_ptsz_params: TypeAlias = struct_cuMemFreeAsync_ptsz_params_st
@c.record
class struct_cuMemAllocAsync_ptsz_params_st(c.Struct):
SIZE = 24
dptr: Annotated[c.POINTER[CUdeviceptr], 0]
bytesize: Annotated[size_t, 8]
hStream: Annotated[CUstream, 16]
cuMemAllocAsync_ptsz_params: TypeAlias = struct_cuMemAllocAsync_ptsz_params_st
@c.record
class struct_cuMemPoolTrimTo_params_st(c.Struct):
SIZE = 16
pool: Annotated[CUmemoryPool, 0]
minBytesToKeep: Annotated[size_t, 8]
cuMemPoolTrimTo_params: TypeAlias = struct_cuMemPoolTrimTo_params_st
@c.record
class struct_cuMemPoolSetAttribute_params_st(c.Struct):
SIZE = 24
pool: Annotated[CUmemoryPool, 0]
attr: Annotated[CUmemPool_attribute, 8]
value: Annotated[ctypes.c_void_p, 16]
class enum_CUmemPool_attribute_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES = enum_CUmemPool_attribute_enum.define('CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES', 1)
CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC = enum_CUmemPool_attribute_enum.define('CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC', 2)
CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES = enum_CUmemPool_attribute_enum.define('CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES', 3)
CU_MEMPOOL_ATTR_RELEASE_THRESHOLD = enum_CUmemPool_attribute_enum.define('CU_MEMPOOL_ATTR_RELEASE_THRESHOLD', 4)
CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT = enum_CUmemPool_attribute_enum.define('CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT', 5)
CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH = enum_CUmemPool_attribute_enum.define('CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH', 6)
CU_MEMPOOL_ATTR_USED_MEM_CURRENT = enum_CUmemPool_attribute_enum.define('CU_MEMPOOL_ATTR_USED_MEM_CURRENT', 7)
CU_MEMPOOL_ATTR_USED_MEM_HIGH = enum_CUmemPool_attribute_enum.define('CU_MEMPOOL_ATTR_USED_MEM_HIGH', 8)
CUmemPool_attribute: TypeAlias = enum_CUmemPool_attribute_enum
cuMemPoolSetAttribute_params: TypeAlias = struct_cuMemPoolSetAttribute_params_st
@c.record
class struct_cuMemPoolGetAttribute_params_st(c.Struct):
SIZE = 24
pool: Annotated[CUmemoryPool, 0]
attr: Annotated[CUmemPool_attribute, 8]
value: Annotated[ctypes.c_void_p, 16]
cuMemPoolGetAttribute_params: TypeAlias = struct_cuMemPoolGetAttribute_params_st
@c.record
class struct_cuMemPoolSetAccess_params_st(c.Struct):
SIZE = 24
pool: Annotated[CUmemoryPool, 0]
map: Annotated[c.POINTER[CUmemAccessDesc], 8]
count: Annotated[size_t, 16]
cuMemPoolSetAccess_params: TypeAlias = struct_cuMemPoolSetAccess_params_st
@c.record
class struct_cuMemPoolGetAccess_params_st(c.Struct):
SIZE = 24
flags: Annotated[c.POINTER[CUmemAccess_flags], 0]
memPool: Annotated[CUmemoryPool, 8]
location: Annotated[c.POINTER[CUmemLocation], 16]
cuMemPoolGetAccess_params: TypeAlias = struct_cuMemPoolGetAccess_params_st
@c.record
class struct_cuMemPoolCreate_params_st(c.Struct):
SIZE = 16
pool: Annotated[c.POINTER[CUmemoryPool], 0]
poolProps: Annotated[c.POINTER[CUmemPoolProps], 8]
@c.record
class struct_CUmemPoolProps_st(c.Struct):
SIZE = 88
allocType: Annotated[CUmemAllocationType, 0]
handleTypes: Annotated[CUmemAllocationHandleType, 4]
location: Annotated[CUmemLocation, 8]
win32SecurityAttributes: Annotated[ctypes.c_void_p, 16]
maxSize: Annotated[size_t, 24]
usage: Annotated[Annotated[int, ctypes.c_uint16], 32]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[54]], 34]
CUmemPoolProps: TypeAlias = struct_CUmemPoolProps_st
cuMemPoolCreate_params: TypeAlias = struct_cuMemPoolCreate_params_st
@c.record
class struct_cuMemPoolDestroy_params_st(c.Struct):
SIZE = 8
pool: Annotated[CUmemoryPool, 0]
cuMemPoolDestroy_params: TypeAlias = struct_cuMemPoolDestroy_params_st
@c.record
class struct_cuMemAllocFromPoolAsync_ptsz_params_st(c.Struct):
SIZE = 32
dptr: Annotated[c.POINTER[CUdeviceptr], 0]
bytesize: Annotated[size_t, 8]
pool: Annotated[CUmemoryPool, 16]
hStream: Annotated[CUstream, 24]
cuMemAllocFromPoolAsync_ptsz_params: TypeAlias = struct_cuMemAllocFromPoolAsync_ptsz_params_st
@c.record
class struct_cuMemPoolExportToShareableHandle_params_st(c.Struct):
SIZE = 32
handle_out: Annotated[ctypes.c_void_p, 0]
pool: Annotated[CUmemoryPool, 8]
handleType: Annotated[CUmemAllocationHandleType, 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
cuMemPoolExportToShareableHandle_params: TypeAlias = struct_cuMemPoolExportToShareableHandle_params_st
@c.record
class struct_cuMemPoolImportFromShareableHandle_params_st(c.Struct):
SIZE = 32
pool_out: Annotated[c.POINTER[CUmemoryPool], 0]
handle: Annotated[ctypes.c_void_p, 8]
handleType: Annotated[CUmemAllocationHandleType, 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
cuMemPoolImportFromShareableHandle_params: TypeAlias = struct_cuMemPoolImportFromShareableHandle_params_st
@c.record
class struct_cuMemPoolExportPointer_params_st(c.Struct):
SIZE = 16
shareData_out: Annotated[c.POINTER[CUmemPoolPtrExportData], 0]
ptr: Annotated[CUdeviceptr, 8]
@c.record
class struct_CUmemPoolPtrExportData_st(c.Struct):
SIZE = 64
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[64]], 0]
CUmemPoolPtrExportData: TypeAlias = struct_CUmemPoolPtrExportData_st
cuMemPoolExportPointer_params: TypeAlias = struct_cuMemPoolExportPointer_params_st
@c.record
class struct_cuMemPoolImportPointer_params_st(c.Struct):
SIZE = 24
ptr_out: Annotated[c.POINTER[CUdeviceptr], 0]
pool: Annotated[CUmemoryPool, 8]
shareData: Annotated[c.POINTER[CUmemPoolPtrExportData], 16]
cuMemPoolImportPointer_params: TypeAlias = struct_cuMemPoolImportPointer_params_st
@c.record
class struct_cuMulticastCreate_params_st(c.Struct):
SIZE = 16
mcHandle: Annotated[c.POINTER[CUmemGenericAllocationHandle], 0]
prop: Annotated[c.POINTER[CUmulticastObjectProp], 8]
@c.record
class struct_CUmulticastObjectProp_st(c.Struct):
SIZE = 32
numDevices: Annotated[Annotated[int, ctypes.c_uint32], 0]
size: Annotated[size_t, 8]
handleTypes: Annotated[Annotated[int, ctypes.c_uint64], 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
CUmulticastObjectProp: TypeAlias = struct_CUmulticastObjectProp_st
cuMulticastCreate_params: TypeAlias = struct_cuMulticastCreate_params_st
@c.record
class struct_cuMulticastAddDevice_params_st(c.Struct):
SIZE = 16
mcHandle: Annotated[CUmemGenericAllocationHandle, 0]
dev: Annotated[CUdevice, 8]
cuMulticastAddDevice_params: TypeAlias = struct_cuMulticastAddDevice_params_st
@c.record
class struct_cuMulticastBindMem_params_st(c.Struct):
SIZE = 48
mcHandle: Annotated[CUmemGenericAllocationHandle, 0]
mcOffset: Annotated[size_t, 8]
memHandle: Annotated[CUmemGenericAllocationHandle, 16]
memOffset: Annotated[size_t, 24]
size: Annotated[size_t, 32]
flags: Annotated[Annotated[int, ctypes.c_uint64], 40]
cuMulticastBindMem_params: TypeAlias = struct_cuMulticastBindMem_params_st
@c.record
class struct_cuMulticastBindAddr_params_st(c.Struct):
SIZE = 40
mcHandle: Annotated[CUmemGenericAllocationHandle, 0]
mcOffset: Annotated[size_t, 8]
memptr: Annotated[CUdeviceptr, 16]
size: Annotated[size_t, 24]
flags: Annotated[Annotated[int, ctypes.c_uint64], 32]
cuMulticastBindAddr_params: TypeAlias = struct_cuMulticastBindAddr_params_st
@c.record
class struct_cuMulticastUnbind_params_st(c.Struct):
SIZE = 32
mcHandle: Annotated[CUmemGenericAllocationHandle, 0]
dev: Annotated[CUdevice, 8]
mcOffset: Annotated[size_t, 16]
size: Annotated[size_t, 24]
cuMulticastUnbind_params: TypeAlias = struct_cuMulticastUnbind_params_st
@c.record
class struct_cuMulticastGetGranularity_params_st(c.Struct):
SIZE = 24
granularity: Annotated[c.POINTER[size_t], 0]
prop: Annotated[c.POINTER[CUmulticastObjectProp], 8]
option: Annotated[CUmulticastGranularity_flags, 16]
class enum_CUmulticastGranularity_flags_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MULTICAST_GRANULARITY_MINIMUM = enum_CUmulticastGranularity_flags_enum.define('CU_MULTICAST_GRANULARITY_MINIMUM', 0)
CU_MULTICAST_GRANULARITY_RECOMMENDED = enum_CUmulticastGranularity_flags_enum.define('CU_MULTICAST_GRANULARITY_RECOMMENDED', 1)
CUmulticastGranularity_flags: TypeAlias = enum_CUmulticastGranularity_flags_enum
cuMulticastGetGranularity_params: TypeAlias = struct_cuMulticastGetGranularity_params_st
@c.record
class struct_cuPointerGetAttribute_params_st(c.Struct):
SIZE = 24
data: Annotated[ctypes.c_void_p, 0]
attribute: Annotated[CUpointer_attribute, 8]
ptr: Annotated[CUdeviceptr, 16]
class enum_CUpointer_attribute_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_POINTER_ATTRIBUTE_CONTEXT = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_CONTEXT', 1)
CU_POINTER_ATTRIBUTE_MEMORY_TYPE = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_MEMORY_TYPE', 2)
CU_POINTER_ATTRIBUTE_DEVICE_POINTER = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_DEVICE_POINTER', 3)
CU_POINTER_ATTRIBUTE_HOST_POINTER = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_HOST_POINTER', 4)
CU_POINTER_ATTRIBUTE_P2P_TOKENS = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_P2P_TOKENS', 5)
CU_POINTER_ATTRIBUTE_SYNC_MEMOPS = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_SYNC_MEMOPS', 6)
CU_POINTER_ATTRIBUTE_BUFFER_ID = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_BUFFER_ID', 7)
CU_POINTER_ATTRIBUTE_IS_MANAGED = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_IS_MANAGED', 8)
CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL', 9)
CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE', 10)
CU_POINTER_ATTRIBUTE_RANGE_START_ADDR = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_RANGE_START_ADDR', 11)
CU_POINTER_ATTRIBUTE_RANGE_SIZE = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_RANGE_SIZE', 12)
CU_POINTER_ATTRIBUTE_MAPPED = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_MAPPED', 13)
CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES', 14)
CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE', 15)
CU_POINTER_ATTRIBUTE_ACCESS_FLAGS = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_ACCESS_FLAGS', 16)
CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE', 17)
CU_POINTER_ATTRIBUTE_MAPPING_SIZE = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_MAPPING_SIZE', 18)
CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR', 19)
CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID', 20)
CU_POINTER_ATTRIBUTE_IS_HW_DECOMPRESS_CAPABLE = enum_CUpointer_attribute_enum.define('CU_POINTER_ATTRIBUTE_IS_HW_DECOMPRESS_CAPABLE', 21)
CUpointer_attribute: TypeAlias = enum_CUpointer_attribute_enum
cuPointerGetAttribute_params: TypeAlias = struct_cuPointerGetAttribute_params_st
@c.record
class struct_cuMemPrefetchAsync_ptsz_params_st(c.Struct):
SIZE = 32
devPtr: Annotated[CUdeviceptr, 0]
count: Annotated[size_t, 8]
dstDevice: Annotated[CUdevice, 16]
hStream: Annotated[CUstream, 24]
cuMemPrefetchAsync_ptsz_params: TypeAlias = struct_cuMemPrefetchAsync_ptsz_params_st
@c.record
class struct_cuMemPrefetchAsync_v2_ptsz_params_st(c.Struct):
SIZE = 40
devPtr: Annotated[CUdeviceptr, 0]
count: Annotated[size_t, 8]
location: Annotated[CUmemLocation, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
hStream: Annotated[CUstream, 32]
cuMemPrefetchAsync_v2_ptsz_params: TypeAlias = struct_cuMemPrefetchAsync_v2_ptsz_params_st
@c.record
class struct_cuMemAdvise_params_st(c.Struct):
SIZE = 24
devPtr: Annotated[CUdeviceptr, 0]
count: Annotated[size_t, 8]
advice: Annotated[CUmem_advise, 16]
device: Annotated[CUdevice, 20]
class enum_CUmem_advise_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_ADVISE_SET_READ_MOSTLY = enum_CUmem_advise_enum.define('CU_MEM_ADVISE_SET_READ_MOSTLY', 1)
CU_MEM_ADVISE_UNSET_READ_MOSTLY = enum_CUmem_advise_enum.define('CU_MEM_ADVISE_UNSET_READ_MOSTLY', 2)
CU_MEM_ADVISE_SET_PREFERRED_LOCATION = enum_CUmem_advise_enum.define('CU_MEM_ADVISE_SET_PREFERRED_LOCATION', 3)
CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION = enum_CUmem_advise_enum.define('CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION', 4)
CU_MEM_ADVISE_SET_ACCESSED_BY = enum_CUmem_advise_enum.define('CU_MEM_ADVISE_SET_ACCESSED_BY', 5)
CU_MEM_ADVISE_UNSET_ACCESSED_BY = enum_CUmem_advise_enum.define('CU_MEM_ADVISE_UNSET_ACCESSED_BY', 6)
CUmem_advise: TypeAlias = enum_CUmem_advise_enum
cuMemAdvise_params: TypeAlias = struct_cuMemAdvise_params_st
@c.record
class struct_cuMemAdvise_v2_params_st(c.Struct):
SIZE = 32
devPtr: Annotated[CUdeviceptr, 0]
count: Annotated[size_t, 8]
advice: Annotated[CUmem_advise, 16]
location: Annotated[CUmemLocation, 20]
cuMemAdvise_v2_params: TypeAlias = struct_cuMemAdvise_v2_params_st
@c.record
class struct_cuMemRangeGetAttribute_params_st(c.Struct):
SIZE = 40
data: Annotated[ctypes.c_void_p, 0]
dataSize: Annotated[size_t, 8]
attribute: Annotated[CUmem_range_attribute, 16]
devPtr: Annotated[CUdeviceptr, 24]
count: Annotated[size_t, 32]
class enum_CUmem_range_attribute_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY = enum_CUmem_range_attribute_enum.define('CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY', 1)
CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION = enum_CUmem_range_attribute_enum.define('CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION', 2)
CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY = enum_CUmem_range_attribute_enum.define('CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY', 3)
CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION = enum_CUmem_range_attribute_enum.define('CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION', 4)
CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE = enum_CUmem_range_attribute_enum.define('CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE', 5)
CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID = enum_CUmem_range_attribute_enum.define('CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID', 6)
CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE = enum_CUmem_range_attribute_enum.define('CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE', 7)
CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID = enum_CUmem_range_attribute_enum.define('CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID', 8)
CUmem_range_attribute: TypeAlias = enum_CUmem_range_attribute_enum
cuMemRangeGetAttribute_params: TypeAlias = struct_cuMemRangeGetAttribute_params_st
@c.record
class struct_cuMemRangeGetAttributes_params_st(c.Struct):
SIZE = 48
data: Annotated[c.POINTER[ctypes.c_void_p], 0]
dataSizes: Annotated[c.POINTER[size_t], 8]
attributes: Annotated[c.POINTER[CUmem_range_attribute], 16]
numAttributes: Annotated[size_t, 24]
devPtr: Annotated[CUdeviceptr, 32]
count: Annotated[size_t, 40]
cuMemRangeGetAttributes_params: TypeAlias = struct_cuMemRangeGetAttributes_params_st
@c.record
class struct_cuPointerSetAttribute_params_st(c.Struct):
SIZE = 24
value: Annotated[ctypes.c_void_p, 0]
attribute: Annotated[CUpointer_attribute, 8]
ptr: Annotated[CUdeviceptr, 16]
cuPointerSetAttribute_params: TypeAlias = struct_cuPointerSetAttribute_params_st
@c.record
class struct_cuPointerGetAttributes_params_st(c.Struct):
SIZE = 32
numAttributes: Annotated[Annotated[int, ctypes.c_uint32], 0]
attributes: Annotated[c.POINTER[CUpointer_attribute], 8]
data: Annotated[c.POINTER[ctypes.c_void_p], 16]
ptr: Annotated[CUdeviceptr, 24]
cuPointerGetAttributes_params: TypeAlias = struct_cuPointerGetAttributes_params_st
@c.record
class struct_cuStreamCreate_params_st(c.Struct):
SIZE = 16
phStream: Annotated[c.POINTER[CUstream], 0]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuStreamCreate_params: TypeAlias = struct_cuStreamCreate_params_st
@c.record
class struct_cuStreamCreateWithPriority_params_st(c.Struct):
SIZE = 16
phStream: Annotated[c.POINTER[CUstream], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
priority: Annotated[Annotated[int, ctypes.c_int32], 12]
cuStreamCreateWithPriority_params: TypeAlias = struct_cuStreamCreateWithPriority_params_st
@c.record
class struct_cuStreamGetPriority_ptsz_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
priority: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
cuStreamGetPriority_ptsz_params: TypeAlias = struct_cuStreamGetPriority_ptsz_params_st
@c.record
class struct_cuStreamGetDevice_ptsz_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
device: Annotated[c.POINTER[CUdevice], 8]
cuStreamGetDevice_ptsz_params: TypeAlias = struct_cuStreamGetDevice_ptsz_params_st
@c.record
class struct_cuStreamGetFlags_ptsz_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
flags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
cuStreamGetFlags_ptsz_params: TypeAlias = struct_cuStreamGetFlags_ptsz_params_st
@c.record
class struct_cuStreamGetId_ptsz_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
streamId: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 8]
cuStreamGetId_ptsz_params: TypeAlias = struct_cuStreamGetId_ptsz_params_st
@c.record
class struct_cuStreamGetCtx_ptsz_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
pctx: Annotated[c.POINTER[CUcontext], 8]
cuStreamGetCtx_ptsz_params: TypeAlias = struct_cuStreamGetCtx_ptsz_params_st
@c.record
class struct_cuStreamGetCtx_v2_ptsz_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
pCtx: Annotated[c.POINTER[CUcontext], 8]
pGreenCtx: Annotated[c.POINTER[CUgreenCtx], 16]
class struct_CUgreenCtx_st(ctypes.Structure): pass
CUgreenCtx: TypeAlias = c.POINTER[struct_CUgreenCtx_st]
cuStreamGetCtx_v2_ptsz_params: TypeAlias = struct_cuStreamGetCtx_v2_ptsz_params_st
@c.record
class struct_cuStreamWaitEvent_ptsz_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
hEvent: Annotated[CUevent, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuStreamWaitEvent_ptsz_params: TypeAlias = struct_cuStreamWaitEvent_ptsz_params_st
@c.record
class struct_cuStreamAddCallback_ptsz_params_st(c.Struct):
SIZE = 32
hStream: Annotated[CUstream, 0]
callback: Annotated[CUstreamCallback, 8]
userData: Annotated[ctypes.c_void_p, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
CUstreamCallback: TypeAlias = c.CFUNCTYPE[None, [c.POINTER[struct_CUstream_st], enum_cudaError_enum, ctypes.c_void_p]]
cuStreamAddCallback_ptsz_params: TypeAlias = struct_cuStreamAddCallback_ptsz_params_st
@c.record
class struct_cuStreamBeginCapture_v2_ptsz_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
mode: Annotated[CUstreamCaptureMode, 8]
class enum_CUstreamCaptureMode_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_STREAM_CAPTURE_MODE_GLOBAL = enum_CUstreamCaptureMode_enum.define('CU_STREAM_CAPTURE_MODE_GLOBAL', 0)
CU_STREAM_CAPTURE_MODE_THREAD_LOCAL = enum_CUstreamCaptureMode_enum.define('CU_STREAM_CAPTURE_MODE_THREAD_LOCAL', 1)
CU_STREAM_CAPTURE_MODE_RELAXED = enum_CUstreamCaptureMode_enum.define('CU_STREAM_CAPTURE_MODE_RELAXED', 2)
CUstreamCaptureMode: TypeAlias = enum_CUstreamCaptureMode_enum
cuStreamBeginCapture_v2_ptsz_params: TypeAlias = struct_cuStreamBeginCapture_v2_ptsz_params_st
@c.record
class struct_cuStreamBeginCaptureToGraph_ptsz_params_st(c.Struct):
SIZE = 48
hStream: Annotated[CUstream, 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
dependencyData: Annotated[c.POINTER[CUgraphEdgeData], 24]
numDependencies: Annotated[size_t, 32]
mode: Annotated[CUstreamCaptureMode, 40]
@c.record
class struct_CUgraphEdgeData_st(c.Struct):
SIZE = 8
from_port: Annotated[Annotated[int, ctypes.c_ubyte], 0]
to_port: Annotated[Annotated[int, ctypes.c_ubyte], 1]
type: Annotated[Annotated[int, ctypes.c_ubyte], 2]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[5]], 3]
CUgraphEdgeData: TypeAlias = struct_CUgraphEdgeData_st
cuStreamBeginCaptureToGraph_ptsz_params: TypeAlias = struct_cuStreamBeginCaptureToGraph_ptsz_params_st
@c.record
class struct_cuThreadExchangeStreamCaptureMode_params_st(c.Struct):
SIZE = 8
mode: Annotated[c.POINTER[CUstreamCaptureMode], 0]
cuThreadExchangeStreamCaptureMode_params: TypeAlias = struct_cuThreadExchangeStreamCaptureMode_params_st
@c.record
class struct_cuStreamEndCapture_ptsz_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
phGraph: Annotated[c.POINTER[CUgraph], 8]
cuStreamEndCapture_ptsz_params: TypeAlias = struct_cuStreamEndCapture_ptsz_params_st
@c.record
class struct_cuStreamIsCapturing_ptsz_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
captureStatus: Annotated[c.POINTER[CUstreamCaptureStatus], 8]
class enum_CUstreamCaptureStatus_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_STREAM_CAPTURE_STATUS_NONE = enum_CUstreamCaptureStatus_enum.define('CU_STREAM_CAPTURE_STATUS_NONE', 0)
CU_STREAM_CAPTURE_STATUS_ACTIVE = enum_CUstreamCaptureStatus_enum.define('CU_STREAM_CAPTURE_STATUS_ACTIVE', 1)
CU_STREAM_CAPTURE_STATUS_INVALIDATED = enum_CUstreamCaptureStatus_enum.define('CU_STREAM_CAPTURE_STATUS_INVALIDATED', 2)
CUstreamCaptureStatus: TypeAlias = enum_CUstreamCaptureStatus_enum
cuStreamIsCapturing_ptsz_params: TypeAlias = struct_cuStreamIsCapturing_ptsz_params_st
@c.record
class struct_cuStreamGetCaptureInfo_v2_ptsz_params_st(c.Struct):
SIZE = 48
hStream: Annotated[CUstream, 0]
captureStatus_out: Annotated[c.POINTER[CUstreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[cuuint64_t], 16]
graph_out: Annotated[c.POINTER[CUgraph], 24]
dependencies_out: Annotated[c.POINTER[c.POINTER[CUgraphNode]], 32]
numDependencies_out: Annotated[c.POINTER[size_t], 40]
cuuint64_t: TypeAlias = Annotated[int, ctypes.c_uint64]
cuStreamGetCaptureInfo_v2_ptsz_params: TypeAlias = struct_cuStreamGetCaptureInfo_v2_ptsz_params_st
@c.record
class struct_cuStreamGetCaptureInfo_v3_ptsz_params_st(c.Struct):
SIZE = 56
hStream: Annotated[CUstream, 0]
captureStatus_out: Annotated[c.POINTER[CUstreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[cuuint64_t], 16]
graph_out: Annotated[c.POINTER[CUgraph], 24]
dependencies_out: Annotated[c.POINTER[c.POINTER[CUgraphNode]], 32]
edgeData_out: Annotated[c.POINTER[c.POINTER[CUgraphEdgeData]], 40]
numDependencies_out: Annotated[c.POINTER[size_t], 48]
cuStreamGetCaptureInfo_v3_ptsz_params: TypeAlias = struct_cuStreamGetCaptureInfo_v3_ptsz_params_st
@c.record
class struct_cuStreamUpdateCaptureDependencies_ptsz_params_st(c.Struct):
SIZE = 32
hStream: Annotated[CUstream, 0]
dependencies: Annotated[c.POINTER[CUgraphNode], 8]
numDependencies: Annotated[size_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamUpdateCaptureDependencies_ptsz_params: TypeAlias = struct_cuStreamUpdateCaptureDependencies_ptsz_params_st
@c.record
class struct_cuStreamUpdateCaptureDependencies_v2_ptsz_params_st(c.Struct):
SIZE = 40
hStream: Annotated[CUstream, 0]
dependencies: Annotated[c.POINTER[CUgraphNode], 8]
dependencyData: Annotated[c.POINTER[CUgraphEdgeData], 16]
numDependencies: Annotated[size_t, 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
cuStreamUpdateCaptureDependencies_v2_ptsz_params: TypeAlias = struct_cuStreamUpdateCaptureDependencies_v2_ptsz_params_st
@c.record
class struct_cuStreamAttachMemAsync_ptsz_params_st(c.Struct):
SIZE = 32
hStream: Annotated[CUstream, 0]
dptr: Annotated[CUdeviceptr, 8]
length: Annotated[size_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamAttachMemAsync_ptsz_params: TypeAlias = struct_cuStreamAttachMemAsync_ptsz_params_st
@c.record
class struct_cuStreamQuery_ptsz_params_st(c.Struct):
SIZE = 8
hStream: Annotated[CUstream, 0]
cuStreamQuery_ptsz_params: TypeAlias = struct_cuStreamQuery_ptsz_params_st
@c.record
class struct_cuStreamSynchronize_ptsz_params_st(c.Struct):
SIZE = 8
hStream: Annotated[CUstream, 0]
cuStreamSynchronize_ptsz_params: TypeAlias = struct_cuStreamSynchronize_ptsz_params_st
@c.record
class struct_cuStreamDestroy_v2_params_st(c.Struct):
SIZE = 8
hStream: Annotated[CUstream, 0]
cuStreamDestroy_v2_params: TypeAlias = struct_cuStreamDestroy_v2_params_st
@c.record
class struct_cuStreamCopyAttributes_ptsz_params_st(c.Struct):
SIZE = 16
dst: Annotated[CUstream, 0]
src: Annotated[CUstream, 8]
cuStreamCopyAttributes_ptsz_params: TypeAlias = struct_cuStreamCopyAttributes_ptsz_params_st
@c.record
class struct_cuStreamGetAttribute_ptsz_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
attr: Annotated[CUstreamAttrID, 8]
value_out: Annotated[c.POINTER[CUstreamAttrValue], 16]
cuStreamGetAttribute_ptsz_params: TypeAlias = struct_cuStreamGetAttribute_ptsz_params_st
@c.record
class struct_cuStreamSetAttribute_ptsz_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
attr: Annotated[CUstreamAttrID, 8]
value: Annotated[c.POINTER[CUstreamAttrValue], 16]
cuStreamSetAttribute_ptsz_params: TypeAlias = struct_cuStreamSetAttribute_ptsz_params_st
@c.record
class struct_cuEventCreate_params_st(c.Struct):
SIZE = 16
phEvent: Annotated[c.POINTER[CUevent], 0]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuEventCreate_params: TypeAlias = struct_cuEventCreate_params_st
@c.record
class struct_cuEventRecord_ptsz_params_st(c.Struct):
SIZE = 16
hEvent: Annotated[CUevent, 0]
hStream: Annotated[CUstream, 8]
cuEventRecord_ptsz_params: TypeAlias = struct_cuEventRecord_ptsz_params_st
@c.record
class struct_cuEventRecordWithFlags_ptsz_params_st(c.Struct):
SIZE = 24
hEvent: Annotated[CUevent, 0]
hStream: Annotated[CUstream, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuEventRecordWithFlags_ptsz_params: TypeAlias = struct_cuEventRecordWithFlags_ptsz_params_st
@c.record
class struct_cuEventQuery_params_st(c.Struct):
SIZE = 8
hEvent: Annotated[CUevent, 0]
cuEventQuery_params: TypeAlias = struct_cuEventQuery_params_st
@c.record
class struct_cuEventSynchronize_params_st(c.Struct):
SIZE = 8
hEvent: Annotated[CUevent, 0]
cuEventSynchronize_params: TypeAlias = struct_cuEventSynchronize_params_st
@c.record
class struct_cuEventDestroy_v2_params_st(c.Struct):
SIZE = 8
hEvent: Annotated[CUevent, 0]
cuEventDestroy_v2_params: TypeAlias = struct_cuEventDestroy_v2_params_st
@c.record
class struct_cuEventElapsedTime_params_st(c.Struct):
SIZE = 24
pMilliseconds: Annotated[c.POINTER[Annotated[float, ctypes.c_float]], 0]
hStart: Annotated[CUevent, 8]
hEnd: Annotated[CUevent, 16]
cuEventElapsedTime_params: TypeAlias = struct_cuEventElapsedTime_params_st
@c.record
class struct_cuEventElapsedTime_v2_params_st(c.Struct):
SIZE = 24
pMilliseconds: Annotated[c.POINTER[Annotated[float, ctypes.c_float]], 0]
hStart: Annotated[CUevent, 8]
hEnd: Annotated[CUevent, 16]
cuEventElapsedTime_v2_params: TypeAlias = struct_cuEventElapsedTime_v2_params_st
@c.record
class struct_cuImportExternalMemory_params_st(c.Struct):
SIZE = 16
extMem_out: Annotated[c.POINTER[CUexternalMemory], 0]
memHandleDesc: Annotated[c.POINTER[CUDA_EXTERNAL_MEMORY_HANDLE_DESC], 8]
class struct_CUextMemory_st(ctypes.Structure): pass
CUexternalMemory: TypeAlias = c.POINTER[struct_CUextMemory_st]
@c.record
class struct_CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st(c.Struct):
SIZE = 104
type: Annotated[CUexternalMemoryHandleType, 0]
handle: Annotated[struct_CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st_handle, 8]
size: Annotated[Annotated[int, ctypes.c_uint64], 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 36]
CUDA_EXTERNAL_MEMORY_HANDLE_DESC: TypeAlias = struct_CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st
class enum_CUexternalMemoryHandleType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = enum_CUexternalMemoryHandleType_enum.define('CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD', 1)
CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = enum_CUexternalMemoryHandleType_enum.define('CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32', 2)
CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = enum_CUexternalMemoryHandleType_enum.define('CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT', 3)
CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = enum_CUexternalMemoryHandleType_enum.define('CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP', 4)
CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = enum_CUexternalMemoryHandleType_enum.define('CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE', 5)
CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = enum_CUexternalMemoryHandleType_enum.define('CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE', 6)
CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = enum_CUexternalMemoryHandleType_enum.define('CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT', 7)
CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = enum_CUexternalMemoryHandleType_enum.define('CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF', 8)
CUexternalMemoryHandleType: TypeAlias = enum_CUexternalMemoryHandleType_enum
@c.record
class struct_CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st_handle(c.Struct):
SIZE = 16
fd: Annotated[Annotated[int, ctypes.c_int32], 0]
win32: Annotated[struct_CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st_handle_win32, 0]
nvSciBufObject: Annotated[ctypes.c_void_p, 0]
@c.record
class struct_CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st_handle_win32(c.Struct):
SIZE = 16
handle: Annotated[ctypes.c_void_p, 0]
name: Annotated[ctypes.c_void_p, 8]
cuImportExternalMemory_params: TypeAlias = struct_cuImportExternalMemory_params_st
@c.record
class struct_cuExternalMemoryGetMappedBuffer_params_st(c.Struct):
SIZE = 24
devPtr: Annotated[c.POINTER[CUdeviceptr], 0]
extMem: Annotated[CUexternalMemory, 8]
bufferDesc: Annotated[c.POINTER[CUDA_EXTERNAL_MEMORY_BUFFER_DESC], 16]
@c.record
class struct_CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st(c.Struct):
SIZE = 88
offset: Annotated[Annotated[int, ctypes.c_uint64], 0]
size: Annotated[Annotated[int, ctypes.c_uint64], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 20]
CUDA_EXTERNAL_MEMORY_BUFFER_DESC: TypeAlias = struct_CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st
cuExternalMemoryGetMappedBuffer_params: TypeAlias = struct_cuExternalMemoryGetMappedBuffer_params_st
@c.record
class struct_cuExternalMemoryGetMappedMipmappedArray_params_st(c.Struct):
SIZE = 24
mipmap: Annotated[c.POINTER[CUmipmappedArray], 0]
extMem: Annotated[CUexternalMemory, 8]
mipmapDesc: Annotated[c.POINTER[CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC], 16]
@c.record
class struct_CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st(c.Struct):
SIZE = 120
offset: Annotated[Annotated[int, ctypes.c_uint64], 0]
arrayDesc: Annotated[CUDA_ARRAY3D_DESCRIPTOR, 8]
numLevels: Annotated[Annotated[int, ctypes.c_uint32], 48]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 52]
CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC: TypeAlias = struct_CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st
cuExternalMemoryGetMappedMipmappedArray_params: TypeAlias = struct_cuExternalMemoryGetMappedMipmappedArray_params_st
@c.record
class struct_cuDestroyExternalMemory_params_st(c.Struct):
SIZE = 8
extMem: Annotated[CUexternalMemory, 0]
cuDestroyExternalMemory_params: TypeAlias = struct_cuDestroyExternalMemory_params_st
@c.record
class struct_cuImportExternalSemaphore_params_st(c.Struct):
SIZE = 16
extSem_out: Annotated[c.POINTER[CUexternalSemaphore], 0]
semHandleDesc: Annotated[c.POINTER[CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC], 8]
class struct_CUextSemaphore_st(ctypes.Structure): pass
CUexternalSemaphore: TypeAlias = c.POINTER[struct_CUextSemaphore_st]
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st(c.Struct):
SIZE = 96
type: Annotated[CUexternalSemaphoreHandleType, 0]
handle: Annotated[struct_CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st_handle, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 28]
CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC: TypeAlias = struct_CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st
class enum_CUexternalSemaphoreHandleType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = enum_CUexternalSemaphoreHandleType_enum.define('CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD', 1)
CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = enum_CUexternalSemaphoreHandleType_enum.define('CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32', 2)
CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = enum_CUexternalSemaphoreHandleType_enum.define('CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT', 3)
CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = enum_CUexternalSemaphoreHandleType_enum.define('CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE', 4)
CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = enum_CUexternalSemaphoreHandleType_enum.define('CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE', 5)
CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = enum_CUexternalSemaphoreHandleType_enum.define('CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC', 6)
CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = enum_CUexternalSemaphoreHandleType_enum.define('CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX', 7)
CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = enum_CUexternalSemaphoreHandleType_enum.define('CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT', 8)
CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = enum_CUexternalSemaphoreHandleType_enum.define('CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD', 9)
CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = enum_CUexternalSemaphoreHandleType_enum.define('CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32', 10)
CUexternalSemaphoreHandleType: TypeAlias = enum_CUexternalSemaphoreHandleType_enum
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st_handle(c.Struct):
SIZE = 16
fd: Annotated[Annotated[int, ctypes.c_int32], 0]
win32: Annotated[struct_CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st_handle_win32, 0]
nvSciSyncObj: Annotated[ctypes.c_void_p, 0]
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st_handle_win32(c.Struct):
SIZE = 16
handle: Annotated[ctypes.c_void_p, 0]
name: Annotated[ctypes.c_void_p, 8]
cuImportExternalSemaphore_params: TypeAlias = struct_cuImportExternalSemaphore_params_st
@c.record
class struct_cuSignalExternalSemaphoresAsync_ptsz_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[CUexternalSemaphore], 0]
paramsArray: Annotated[c.POINTER[CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[CUstream, 24]
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st(c.Struct):
SIZE = 144
params: Annotated[struct_CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st_params, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 72]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 76]
CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS: TypeAlias = struct_CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st_params(c.Struct):
SIZE = 72
fence: Annotated[struct_CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st_params_fence, 0]
nvSciSync: Annotated[struct_CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st_params_nvSciSync, 8]
keyedMutex: Annotated[struct_CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st_params_keyedMutex, 16]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[12]], 24]
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st_params_fence(c.Struct):
SIZE = 8
value: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st_params_nvSciSync(c.Struct):
SIZE = 8
fence: Annotated[ctypes.c_void_p, 0]
reserved: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st_params_keyedMutex(c.Struct):
SIZE = 8
key: Annotated[Annotated[int, ctypes.c_uint64], 0]
cuSignalExternalSemaphoresAsync_ptsz_params: TypeAlias = struct_cuSignalExternalSemaphoresAsync_ptsz_params_st
@c.record
class struct_cuWaitExternalSemaphoresAsync_ptsz_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[CUexternalSemaphore], 0]
paramsArray: Annotated[c.POINTER[CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[CUstream, 24]
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st(c.Struct):
SIZE = 144
params: Annotated[struct_CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st_params, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 72]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 76]
CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS: TypeAlias = struct_CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st_params(c.Struct):
SIZE = 72
fence: Annotated[struct_CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st_params_fence, 0]
nvSciSync: Annotated[struct_CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st_params_nvSciSync, 8]
keyedMutex: Annotated[struct_CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st_params_keyedMutex, 16]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[10]], 32]
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st_params_fence(c.Struct):
SIZE = 8
value: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st_params_nvSciSync(c.Struct):
SIZE = 8
fence: Annotated[ctypes.c_void_p, 0]
reserved: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st_params_keyedMutex(c.Struct):
SIZE = 16
key: Annotated[Annotated[int, ctypes.c_uint64], 0]
timeoutMs: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuWaitExternalSemaphoresAsync_ptsz_params: TypeAlias = struct_cuWaitExternalSemaphoresAsync_ptsz_params_st
@c.record
class struct_cuDestroyExternalSemaphore_params_st(c.Struct):
SIZE = 8
extSem: Annotated[CUexternalSemaphore, 0]
cuDestroyExternalSemaphore_params: TypeAlias = struct_cuDestroyExternalSemaphore_params_st
@c.record
class struct_cuStreamWaitValue32_v2_ptsz_params_st(c.Struct):
SIZE = 24
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint32_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuStreamWaitValue32_v2_ptsz_params: TypeAlias = struct_cuStreamWaitValue32_v2_ptsz_params_st
@c.record
class struct_cuStreamWaitValue64_v2_ptsz_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint64_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamWaitValue64_v2_ptsz_params: TypeAlias = struct_cuStreamWaitValue64_v2_ptsz_params_st
@c.record
class struct_cuStreamWriteValue32_v2_ptsz_params_st(c.Struct):
SIZE = 24
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint32_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuStreamWriteValue32_v2_ptsz_params: TypeAlias = struct_cuStreamWriteValue32_v2_ptsz_params_st
@c.record
class struct_cuStreamWriteValue64_v2_ptsz_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint64_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamWriteValue64_v2_ptsz_params: TypeAlias = struct_cuStreamWriteValue64_v2_ptsz_params_st
@c.record
class struct_cuStreamBatchMemOp_v2_ptsz_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
paramArray: Annotated[c.POINTER[CUstreamBatchMemOpParams], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
@c.record
class union_CUstreamBatchMemOpParams_union(c.Struct):
SIZE = 48
operation: Annotated[CUstreamBatchMemOpType, 0]
waitValue: Annotated[struct_CUstreamMemOpWaitValueParams_st, 0]
writeValue: Annotated[struct_CUstreamMemOpWriteValueParams_st, 0]
flushRemoteWrites: Annotated[struct_CUstreamMemOpFlushRemoteWritesParams_st, 0]
memoryBarrier: Annotated[struct_CUstreamMemOpMemoryBarrierParams_st, 0]
pad: Annotated[c.Array[cuuint64_t, Literal[6]], 0]
CUstreamBatchMemOpParams: TypeAlias = union_CUstreamBatchMemOpParams_union
class enum_CUstreamBatchMemOpType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_STREAM_MEM_OP_WAIT_VALUE_32 = enum_CUstreamBatchMemOpType_enum.define('CU_STREAM_MEM_OP_WAIT_VALUE_32', 1)
CU_STREAM_MEM_OP_WRITE_VALUE_32 = enum_CUstreamBatchMemOpType_enum.define('CU_STREAM_MEM_OP_WRITE_VALUE_32', 2)
CU_STREAM_MEM_OP_WAIT_VALUE_64 = enum_CUstreamBatchMemOpType_enum.define('CU_STREAM_MEM_OP_WAIT_VALUE_64', 4)
CU_STREAM_MEM_OP_WRITE_VALUE_64 = enum_CUstreamBatchMemOpType_enum.define('CU_STREAM_MEM_OP_WRITE_VALUE_64', 5)
CU_STREAM_MEM_OP_BARRIER = enum_CUstreamBatchMemOpType_enum.define('CU_STREAM_MEM_OP_BARRIER', 6)
CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES = enum_CUstreamBatchMemOpType_enum.define('CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES', 3)
CUstreamBatchMemOpType: TypeAlias = enum_CUstreamBatchMemOpType_enum
@c.record
class struct_CUstreamMemOpWaitValueParams_st(c.Struct):
SIZE = 40
operation: Annotated[CUstreamBatchMemOpType, 0]
address: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint32_t, 16]
value64: Annotated[cuuint64_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
alias: Annotated[CUdeviceptr, 32]
@c.record
class struct_CUstreamMemOpWriteValueParams_st(c.Struct):
SIZE = 40
operation: Annotated[CUstreamBatchMemOpType, 0]
address: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint32_t, 16]
value64: Annotated[cuuint64_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
alias: Annotated[CUdeviceptr, 32]
@c.record
class struct_CUstreamMemOpFlushRemoteWritesParams_st(c.Struct):
SIZE = 8
operation: Annotated[CUstreamBatchMemOpType, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_CUstreamMemOpMemoryBarrierParams_st(c.Struct):
SIZE = 8
operation: Annotated[CUstreamBatchMemOpType, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
cuStreamBatchMemOp_v2_ptsz_params: TypeAlias = struct_cuStreamBatchMemOp_v2_ptsz_params_st
@c.record
class struct_cuFuncGetAttribute_params_st(c.Struct):
SIZE = 24
pi: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
attrib: Annotated[CUfunction_attribute, 8]
hfunc: Annotated[CUfunction, 16]
cuFuncGetAttribute_params: TypeAlias = struct_cuFuncGetAttribute_params_st
@c.record
class struct_cuFuncSetAttribute_params_st(c.Struct):
SIZE = 16
hfunc: Annotated[CUfunction, 0]
attrib: Annotated[CUfunction_attribute, 8]
value: Annotated[Annotated[int, ctypes.c_int32], 12]
cuFuncSetAttribute_params: TypeAlias = struct_cuFuncSetAttribute_params_st
@c.record
class struct_cuFuncSetCacheConfig_params_st(c.Struct):
SIZE = 16
hfunc: Annotated[CUfunction, 0]
config: Annotated[CUfunc_cache, 8]
cuFuncSetCacheConfig_params: TypeAlias = struct_cuFuncSetCacheConfig_params_st
@c.record
class struct_cuFuncGetModule_params_st(c.Struct):
SIZE = 16
hmod: Annotated[c.POINTER[CUmodule], 0]
hfunc: Annotated[CUfunction, 8]
cuFuncGetModule_params: TypeAlias = struct_cuFuncGetModule_params_st
@c.record
class struct_cuFuncGetName_params_st(c.Struct):
SIZE = 16
name: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 0]
hfunc: Annotated[CUfunction, 8]
cuFuncGetName_params: TypeAlias = struct_cuFuncGetName_params_st
@c.record
class struct_cuFuncGetParamInfo_params_st(c.Struct):
SIZE = 32
func: Annotated[CUfunction, 0]
paramIndex: Annotated[size_t, 8]
paramOffset: Annotated[c.POINTER[size_t], 16]
paramSize: Annotated[c.POINTER[size_t], 24]
cuFuncGetParamInfo_params: TypeAlias = struct_cuFuncGetParamInfo_params_st
@c.record
class struct_cuFuncIsLoaded_params_st(c.Struct):
SIZE = 16
state: Annotated[c.POINTER[CUfunctionLoadingState], 0]
function: Annotated[CUfunction, 8]
class enum_CUfunctionLoadingState_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_FUNCTION_LOADING_STATE_UNLOADED = enum_CUfunctionLoadingState_enum.define('CU_FUNCTION_LOADING_STATE_UNLOADED', 0)
CU_FUNCTION_LOADING_STATE_LOADED = enum_CUfunctionLoadingState_enum.define('CU_FUNCTION_LOADING_STATE_LOADED', 1)
CU_FUNCTION_LOADING_STATE_MAX = enum_CUfunctionLoadingState_enum.define('CU_FUNCTION_LOADING_STATE_MAX', 2)
CUfunctionLoadingState: TypeAlias = enum_CUfunctionLoadingState_enum
cuFuncIsLoaded_params: TypeAlias = struct_cuFuncIsLoaded_params_st
@c.record
class struct_cuFuncLoad_params_st(c.Struct):
SIZE = 8
function: Annotated[CUfunction, 0]
cuFuncLoad_params: TypeAlias = struct_cuFuncLoad_params_st
@c.record
class struct_cuLaunchKernel_ptsz_params_st(c.Struct):
SIZE = 64
f: Annotated[CUfunction, 0]
gridDimX: Annotated[Annotated[int, ctypes.c_uint32], 8]
gridDimY: Annotated[Annotated[int, ctypes.c_uint32], 12]
gridDimZ: Annotated[Annotated[int, ctypes.c_uint32], 16]
blockDimX: Annotated[Annotated[int, ctypes.c_uint32], 20]
blockDimY: Annotated[Annotated[int, ctypes.c_uint32], 24]
blockDimZ: Annotated[Annotated[int, ctypes.c_uint32], 28]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
hStream: Annotated[CUstream, 40]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 48]
extra: Annotated[c.POINTER[ctypes.c_void_p], 56]
cuLaunchKernel_ptsz_params: TypeAlias = struct_cuLaunchKernel_ptsz_params_st
@c.record
class struct_cuLaunchKernelEx_ptsz_params_st(c.Struct):
SIZE = 32
config: Annotated[c.POINTER[CUlaunchConfig], 0]
f: Annotated[CUfunction, 8]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 16]
extra: Annotated[c.POINTER[ctypes.c_void_p], 24]
@c.record
class struct_CUlaunchConfig_st(c.Struct):
SIZE = 56
gridDimX: Annotated[Annotated[int, ctypes.c_uint32], 0]
gridDimY: Annotated[Annotated[int, ctypes.c_uint32], 4]
gridDimZ: Annotated[Annotated[int, ctypes.c_uint32], 8]
blockDimX: Annotated[Annotated[int, ctypes.c_uint32], 12]
blockDimY: Annotated[Annotated[int, ctypes.c_uint32], 16]
blockDimZ: Annotated[Annotated[int, ctypes.c_uint32], 20]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 24]
hStream: Annotated[CUstream, 32]
attrs: Annotated[c.POINTER[CUlaunchAttribute], 40]
numAttrs: Annotated[Annotated[int, ctypes.c_uint32], 48]
CUlaunchConfig: TypeAlias = struct_CUlaunchConfig_st
@c.record
class struct_CUlaunchAttribute_st(c.Struct):
SIZE = 72
id: Annotated[CUlaunchAttributeID, 0]
pad: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[4]], 4]
value: Annotated[CUlaunchAttributeValue, 8]
CUlaunchAttribute: TypeAlias = struct_CUlaunchAttribute_st
CUlaunchAttributeID: TypeAlias = enum_CUlaunchAttributeID_enum
CUlaunchAttributeValue: TypeAlias = union_CUlaunchAttributeValue_union
cuLaunchKernelEx_ptsz_params: TypeAlias = struct_cuLaunchKernelEx_ptsz_params_st
@c.record
class struct_cuLaunchCooperativeKernel_ptsz_params_st(c.Struct):
SIZE = 56
f: Annotated[CUfunction, 0]
gridDimX: Annotated[Annotated[int, ctypes.c_uint32], 8]
gridDimY: Annotated[Annotated[int, ctypes.c_uint32], 12]
gridDimZ: Annotated[Annotated[int, ctypes.c_uint32], 16]
blockDimX: Annotated[Annotated[int, ctypes.c_uint32], 20]
blockDimY: Annotated[Annotated[int, ctypes.c_uint32], 24]
blockDimZ: Annotated[Annotated[int, ctypes.c_uint32], 28]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
hStream: Annotated[CUstream, 40]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 48]
cuLaunchCooperativeKernel_ptsz_params: TypeAlias = struct_cuLaunchCooperativeKernel_ptsz_params_st
@c.record
class struct_cuLaunchCooperativeKernelMultiDevice_params_st(c.Struct):
SIZE = 16
launchParamsList: Annotated[c.POINTER[CUDA_LAUNCH_PARAMS], 0]
numDevices: Annotated[Annotated[int, ctypes.c_uint32], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_CUDA_LAUNCH_PARAMS_st(c.Struct):
SIZE = 56
function: Annotated[CUfunction, 0]
gridDimX: Annotated[Annotated[int, ctypes.c_uint32], 8]
gridDimY: Annotated[Annotated[int, ctypes.c_uint32], 12]
gridDimZ: Annotated[Annotated[int, ctypes.c_uint32], 16]
blockDimX: Annotated[Annotated[int, ctypes.c_uint32], 20]
blockDimY: Annotated[Annotated[int, ctypes.c_uint32], 24]
blockDimZ: Annotated[Annotated[int, ctypes.c_uint32], 28]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
hStream: Annotated[CUstream, 40]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 48]
CUDA_LAUNCH_PARAMS: TypeAlias = struct_CUDA_LAUNCH_PARAMS_st
cuLaunchCooperativeKernelMultiDevice_params: TypeAlias = struct_cuLaunchCooperativeKernelMultiDevice_params_st
@c.record
class struct_cuLaunchHostFunc_ptsz_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
fn: Annotated[CUhostFn, 8]
userData: Annotated[ctypes.c_void_p, 16]
CUhostFn: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p]]
cuLaunchHostFunc_ptsz_params: TypeAlias = struct_cuLaunchHostFunc_ptsz_params_st
@c.record
class struct_cuFuncSetBlockShape_params_st(c.Struct):
SIZE = 24
hfunc: Annotated[CUfunction, 0]
x: Annotated[Annotated[int, ctypes.c_int32], 8]
y: Annotated[Annotated[int, ctypes.c_int32], 12]
z: Annotated[Annotated[int, ctypes.c_int32], 16]
cuFuncSetBlockShape_params: TypeAlias = struct_cuFuncSetBlockShape_params_st
@c.record
class struct_cuFuncSetSharedSize_params_st(c.Struct):
SIZE = 16
hfunc: Annotated[CUfunction, 0]
bytes: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuFuncSetSharedSize_params: TypeAlias = struct_cuFuncSetSharedSize_params_st
@c.record
class struct_cuParamSetSize_params_st(c.Struct):
SIZE = 16
hfunc: Annotated[CUfunction, 0]
numbytes: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuParamSetSize_params: TypeAlias = struct_cuParamSetSize_params_st
@c.record
class struct_cuParamSeti_params_st(c.Struct):
SIZE = 16
hfunc: Annotated[CUfunction, 0]
offset: Annotated[Annotated[int, ctypes.c_int32], 8]
value: Annotated[Annotated[int, ctypes.c_uint32], 12]
cuParamSeti_params: TypeAlias = struct_cuParamSeti_params_st
@c.record
class struct_cuParamSetf_params_st(c.Struct):
SIZE = 16
hfunc: Annotated[CUfunction, 0]
offset: Annotated[Annotated[int, ctypes.c_int32], 8]
value: Annotated[Annotated[float, ctypes.c_float], 12]
cuParamSetf_params: TypeAlias = struct_cuParamSetf_params_st
@c.record
class struct_cuParamSetv_params_st(c.Struct):
SIZE = 32
hfunc: Annotated[CUfunction, 0]
offset: Annotated[Annotated[int, ctypes.c_int32], 8]
ptr: Annotated[ctypes.c_void_p, 16]
numbytes: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuParamSetv_params: TypeAlias = struct_cuParamSetv_params_st
@c.record
class struct_cuLaunch_params_st(c.Struct):
SIZE = 8
f: Annotated[CUfunction, 0]
cuLaunch_params: TypeAlias = struct_cuLaunch_params_st
@c.record
class struct_cuLaunchGrid_params_st(c.Struct):
SIZE = 16
f: Annotated[CUfunction, 0]
grid_width: Annotated[Annotated[int, ctypes.c_int32], 8]
grid_height: Annotated[Annotated[int, ctypes.c_int32], 12]
cuLaunchGrid_params: TypeAlias = struct_cuLaunchGrid_params_st
@c.record
class struct_cuLaunchGridAsync_params_st(c.Struct):
SIZE = 24
f: Annotated[CUfunction, 0]
grid_width: Annotated[Annotated[int, ctypes.c_int32], 8]
grid_height: Annotated[Annotated[int, ctypes.c_int32], 12]
hStream: Annotated[CUstream, 16]
cuLaunchGridAsync_params: TypeAlias = struct_cuLaunchGridAsync_params_st
@c.record
class struct_cuParamSetTexRef_params_st(c.Struct):
SIZE = 24
hfunc: Annotated[CUfunction, 0]
texunit: Annotated[Annotated[int, ctypes.c_int32], 8]
hTexRef: Annotated[CUtexref, 16]
cuParamSetTexRef_params: TypeAlias = struct_cuParamSetTexRef_params_st
@c.record
class struct_cuFuncSetSharedMemConfig_params_st(c.Struct):
SIZE = 16
hfunc: Annotated[CUfunction, 0]
config: Annotated[CUsharedconfig, 8]
cuFuncSetSharedMemConfig_params: TypeAlias = struct_cuFuncSetSharedMemConfig_params_st
@c.record
class struct_cuGraphCreate_params_st(c.Struct):
SIZE = 16
phGraph: Annotated[c.POINTER[CUgraph], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuGraphCreate_params: TypeAlias = struct_cuGraphCreate_params_st
@c.record
class struct_cuGraphAddKernelNode_v2_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[CUDA_KERNEL_NODE_PARAMS], 32]
@c.record
class struct_CUDA_KERNEL_NODE_PARAMS_v2_st(c.Struct):
SIZE = 72
func: Annotated[CUfunction, 0]
gridDimX: Annotated[Annotated[int, ctypes.c_uint32], 8]
gridDimY: Annotated[Annotated[int, ctypes.c_uint32], 12]
gridDimZ: Annotated[Annotated[int, ctypes.c_uint32], 16]
blockDimX: Annotated[Annotated[int, ctypes.c_uint32], 20]
blockDimY: Annotated[Annotated[int, ctypes.c_uint32], 24]
blockDimZ: Annotated[Annotated[int, ctypes.c_uint32], 28]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 40]
extra: Annotated[c.POINTER[ctypes.c_void_p], 48]
kern: Annotated[CUkernel, 56]
ctx: Annotated[CUcontext, 64]
CUDA_KERNEL_NODE_PARAMS: TypeAlias = struct_CUDA_KERNEL_NODE_PARAMS_v2_st
cuGraphAddKernelNode_v2_params: TypeAlias = struct_cuGraphAddKernelNode_v2_params_st
@c.record
class struct_cuGraphKernelNodeGetParams_v2_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_KERNEL_NODE_PARAMS], 8]
cuGraphKernelNodeGetParams_v2_params: TypeAlias = struct_cuGraphKernelNodeGetParams_v2_params_st
@c.record
class struct_cuGraphKernelNodeSetParams_v2_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_KERNEL_NODE_PARAMS], 8]
cuGraphKernelNodeSetParams_v2_params: TypeAlias = struct_cuGraphKernelNodeSetParams_v2_params_st
@c.record
class struct_cuGraphAddMemcpyNode_params_st(c.Struct):
SIZE = 48
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
copyParams: Annotated[c.POINTER[CUDA_MEMCPY3D], 32]
ctx: Annotated[CUcontext, 40]
cuGraphAddMemcpyNode_params: TypeAlias = struct_cuGraphAddMemcpyNode_params_st
@c.record
class struct_cuGraphMemcpyNodeGetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_MEMCPY3D], 8]
cuGraphMemcpyNodeGetParams_params: TypeAlias = struct_cuGraphMemcpyNodeGetParams_params_st
@c.record
class struct_cuGraphMemcpyNodeSetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_MEMCPY3D], 8]
cuGraphMemcpyNodeSetParams_params: TypeAlias = struct_cuGraphMemcpyNodeSetParams_params_st
@c.record
class struct_cuGraphAddMemsetNode_params_st(c.Struct):
SIZE = 48
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
memsetParams: Annotated[c.POINTER[CUDA_MEMSET_NODE_PARAMS], 32]
ctx: Annotated[CUcontext, 40]
@c.record
class struct_CUDA_MEMSET_NODE_PARAMS_st(c.Struct):
SIZE = 40
dst: Annotated[CUdeviceptr, 0]
pitch: Annotated[size_t, 8]
value: Annotated[Annotated[int, ctypes.c_uint32], 16]
elementSize: Annotated[Annotated[int, ctypes.c_uint32], 20]
width: Annotated[size_t, 24]
height: Annotated[size_t, 32]
CUDA_MEMSET_NODE_PARAMS: TypeAlias = struct_CUDA_MEMSET_NODE_PARAMS_st
cuGraphAddMemsetNode_params: TypeAlias = struct_cuGraphAddMemsetNode_params_st
@c.record
class struct_cuGraphMemsetNodeGetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_MEMSET_NODE_PARAMS], 8]
cuGraphMemsetNodeGetParams_params: TypeAlias = struct_cuGraphMemsetNodeGetParams_params_st
@c.record
class struct_cuGraphMemsetNodeSetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_MEMSET_NODE_PARAMS], 8]
cuGraphMemsetNodeSetParams_params: TypeAlias = struct_cuGraphMemsetNodeSetParams_params_st
@c.record
class struct_cuGraphAddHostNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[CUDA_HOST_NODE_PARAMS], 32]
@c.record
class struct_CUDA_HOST_NODE_PARAMS_st(c.Struct):
SIZE = 16
fn: Annotated[CUhostFn, 0]
userData: Annotated[ctypes.c_void_p, 8]
CUDA_HOST_NODE_PARAMS: TypeAlias = struct_CUDA_HOST_NODE_PARAMS_st
cuGraphAddHostNode_params: TypeAlias = struct_cuGraphAddHostNode_params_st
@c.record
class struct_cuGraphHostNodeGetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_HOST_NODE_PARAMS], 8]
cuGraphHostNodeGetParams_params: TypeAlias = struct_cuGraphHostNodeGetParams_params_st
@c.record
class struct_cuGraphHostNodeSetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_HOST_NODE_PARAMS], 8]
cuGraphHostNodeSetParams_params: TypeAlias = struct_cuGraphHostNodeSetParams_params_st
@c.record
class struct_cuGraphAddChildGraphNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
childGraph: Annotated[CUgraph, 32]
cuGraphAddChildGraphNode_params: TypeAlias = struct_cuGraphAddChildGraphNode_params_st
@c.record
class struct_cuGraphChildGraphNodeGetGraph_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
phGraph: Annotated[c.POINTER[CUgraph], 8]
cuGraphChildGraphNodeGetGraph_params: TypeAlias = struct_cuGraphChildGraphNodeGetGraph_params_st
@c.record
class struct_cuGraphAddEmptyNode_params_st(c.Struct):
SIZE = 32
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
cuGraphAddEmptyNode_params: TypeAlias = struct_cuGraphAddEmptyNode_params_st
@c.record
class struct_cuGraphAddEventRecordNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
event: Annotated[CUevent, 32]
cuGraphAddEventRecordNode_params: TypeAlias = struct_cuGraphAddEventRecordNode_params_st
@c.record
class struct_cuGraphEventRecordNodeGetEvent_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
event_out: Annotated[c.POINTER[CUevent], 8]
cuGraphEventRecordNodeGetEvent_params: TypeAlias = struct_cuGraphEventRecordNodeGetEvent_params_st
@c.record
class struct_cuGraphEventRecordNodeSetEvent_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
event: Annotated[CUevent, 8]
cuGraphEventRecordNodeSetEvent_params: TypeAlias = struct_cuGraphEventRecordNodeSetEvent_params_st
@c.record
class struct_cuGraphAddEventWaitNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
event: Annotated[CUevent, 32]
cuGraphAddEventWaitNode_params: TypeAlias = struct_cuGraphAddEventWaitNode_params_st
@c.record
class struct_cuGraphEventWaitNodeGetEvent_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
event_out: Annotated[c.POINTER[CUevent], 8]
cuGraphEventWaitNodeGetEvent_params: TypeAlias = struct_cuGraphEventWaitNodeGetEvent_params_st
@c.record
class struct_cuGraphEventWaitNodeSetEvent_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
event: Annotated[CUevent, 8]
cuGraphEventWaitNodeSetEvent_params: TypeAlias = struct_cuGraphEventWaitNodeSetEvent_params_st
@c.record
class struct_cuGraphAddExternalSemaphoresSignalNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS], 32]
@c.record
class struct_CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st(c.Struct):
SIZE = 24
extSemArray: Annotated[c.POINTER[CUexternalSemaphore], 0]
paramsArray: Annotated[c.POINTER[CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
CUDA_EXT_SEM_SIGNAL_NODE_PARAMS: TypeAlias = struct_CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st
cuGraphAddExternalSemaphoresSignalNode_params: TypeAlias = struct_cuGraphAddExternalSemaphoresSignalNode_params_st
@c.record
class struct_cuGraphExternalSemaphoresSignalNodeGetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
params_out: Annotated[c.POINTER[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS], 8]
cuGraphExternalSemaphoresSignalNodeGetParams_params: TypeAlias = struct_cuGraphExternalSemaphoresSignalNodeGetParams_params_st
@c.record
class struct_cuGraphExternalSemaphoresSignalNodeSetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS], 8]
cuGraphExternalSemaphoresSignalNodeSetParams_params: TypeAlias = struct_cuGraphExternalSemaphoresSignalNodeSetParams_params_st
@c.record
class struct_cuGraphAddExternalSemaphoresWaitNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[CUDA_EXT_SEM_WAIT_NODE_PARAMS], 32]
@c.record
class struct_CUDA_EXT_SEM_WAIT_NODE_PARAMS_st(c.Struct):
SIZE = 24
extSemArray: Annotated[c.POINTER[CUexternalSemaphore], 0]
paramsArray: Annotated[c.POINTER[CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
CUDA_EXT_SEM_WAIT_NODE_PARAMS: TypeAlias = struct_CUDA_EXT_SEM_WAIT_NODE_PARAMS_st
cuGraphAddExternalSemaphoresWaitNode_params: TypeAlias = struct_cuGraphAddExternalSemaphoresWaitNode_params_st
@c.record
class struct_cuGraphExternalSemaphoresWaitNodeGetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
params_out: Annotated[c.POINTER[CUDA_EXT_SEM_WAIT_NODE_PARAMS], 8]
cuGraphExternalSemaphoresWaitNodeGetParams_params: TypeAlias = struct_cuGraphExternalSemaphoresWaitNodeGetParams_params_st
@c.record
class struct_cuGraphExternalSemaphoresWaitNodeSetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_EXT_SEM_WAIT_NODE_PARAMS], 8]
cuGraphExternalSemaphoresWaitNodeSetParams_params: TypeAlias = struct_cuGraphExternalSemaphoresWaitNodeSetParams_params_st
@c.record
class struct_cuGraphAddBatchMemOpNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[CUDA_BATCH_MEM_OP_NODE_PARAMS], 32]
@c.record
class struct_CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st(c.Struct):
SIZE = 32
ctx: Annotated[CUcontext, 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
paramArray: Annotated[c.POINTER[CUstreamBatchMemOpParams], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
CUDA_BATCH_MEM_OP_NODE_PARAMS: TypeAlias = struct_CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st
cuGraphAddBatchMemOpNode_params: TypeAlias = struct_cuGraphAddBatchMemOpNode_params_st
@c.record
class struct_cuGraphBatchMemOpNodeGetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams_out: Annotated[c.POINTER[CUDA_BATCH_MEM_OP_NODE_PARAMS], 8]
cuGraphBatchMemOpNodeGetParams_params: TypeAlias = struct_cuGraphBatchMemOpNodeGetParams_params_st
@c.record
class struct_cuGraphBatchMemOpNodeSetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_BATCH_MEM_OP_NODE_PARAMS], 8]
cuGraphBatchMemOpNodeSetParams_params: TypeAlias = struct_cuGraphBatchMemOpNodeSetParams_params_st
@c.record
class struct_cuGraphExecBatchMemOpNodeSetParams_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
nodeParams: Annotated[c.POINTER[CUDA_BATCH_MEM_OP_NODE_PARAMS], 16]
cuGraphExecBatchMemOpNodeSetParams_params: TypeAlias = struct_cuGraphExecBatchMemOpNodeSetParams_params_st
@c.record
class struct_cuGraphAddMemAllocNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[CUDA_MEM_ALLOC_NODE_PARAMS], 32]
@c.record
class struct_CUDA_MEM_ALLOC_NODE_PARAMS_v1_st(c.Struct):
SIZE = 120
poolProps: Annotated[CUmemPoolProps, 0]
accessDescs: Annotated[c.POINTER[CUmemAccessDesc], 88]
accessDescCount: Annotated[size_t, 96]
bytesize: Annotated[size_t, 104]
dptr: Annotated[CUdeviceptr, 112]
CUDA_MEM_ALLOC_NODE_PARAMS: TypeAlias = struct_CUDA_MEM_ALLOC_NODE_PARAMS_v1_st
cuGraphAddMemAllocNode_params: TypeAlias = struct_cuGraphAddMemAllocNode_params_st
@c.record
class struct_cuGraphMemAllocNodeGetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
params_out: Annotated[c.POINTER[CUDA_MEM_ALLOC_NODE_PARAMS], 8]
cuGraphMemAllocNodeGetParams_params: TypeAlias = struct_cuGraphMemAllocNodeGetParams_params_st
@c.record
class struct_cuGraphAddMemFreeNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
dptr: Annotated[CUdeviceptr, 32]
cuGraphAddMemFreeNode_params: TypeAlias = struct_cuGraphAddMemFreeNode_params_st
@c.record
class struct_cuGraphMemFreeNodeGetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
dptr_out: Annotated[c.POINTER[CUdeviceptr], 8]
cuGraphMemFreeNodeGetParams_params: TypeAlias = struct_cuGraphMemFreeNodeGetParams_params_st
@c.record
class struct_cuDeviceGraphMemTrim_params_st(c.Struct):
SIZE = 4
device: Annotated[CUdevice, 0]
cuDeviceGraphMemTrim_params: TypeAlias = struct_cuDeviceGraphMemTrim_params_st
@c.record
class struct_cuDeviceGetGraphMemAttribute_params_st(c.Struct):
SIZE = 16
device: Annotated[CUdevice, 0]
attr: Annotated[CUgraphMem_attribute, 4]
value: Annotated[ctypes.c_void_p, 8]
class enum_CUgraphMem_attribute_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT = enum_CUgraphMem_attribute_enum.define('CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT', 0)
CU_GRAPH_MEM_ATTR_USED_MEM_HIGH = enum_CUgraphMem_attribute_enum.define('CU_GRAPH_MEM_ATTR_USED_MEM_HIGH', 1)
CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT = enum_CUgraphMem_attribute_enum.define('CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT', 2)
CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH = enum_CUgraphMem_attribute_enum.define('CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH', 3)
CUgraphMem_attribute: TypeAlias = enum_CUgraphMem_attribute_enum
cuDeviceGetGraphMemAttribute_params: TypeAlias = struct_cuDeviceGetGraphMemAttribute_params_st
@c.record
class struct_cuDeviceSetGraphMemAttribute_params_st(c.Struct):
SIZE = 16
device: Annotated[CUdevice, 0]
attr: Annotated[CUgraphMem_attribute, 4]
value: Annotated[ctypes.c_void_p, 8]
cuDeviceSetGraphMemAttribute_params: TypeAlias = struct_cuDeviceSetGraphMemAttribute_params_st
@c.record
class struct_cuGraphClone_params_st(c.Struct):
SIZE = 16
phGraphClone: Annotated[c.POINTER[CUgraph], 0]
originalGraph: Annotated[CUgraph, 8]
cuGraphClone_params: TypeAlias = struct_cuGraphClone_params_st
@c.record
class struct_cuGraphNodeFindInClone_params_st(c.Struct):
SIZE = 24
phNode: Annotated[c.POINTER[CUgraphNode], 0]
hOriginalNode: Annotated[CUgraphNode, 8]
hClonedGraph: Annotated[CUgraph, 16]
cuGraphNodeFindInClone_params: TypeAlias = struct_cuGraphNodeFindInClone_params_st
@c.record
class struct_cuGraphNodeGetType_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
type: Annotated[c.POINTER[CUgraphNodeType], 8]
cuGraphNodeGetType_params: TypeAlias = struct_cuGraphNodeGetType_params_st
@c.record
class struct_cuGraphGetNodes_params_st(c.Struct):
SIZE = 24
hGraph: Annotated[CUgraph, 0]
nodes: Annotated[c.POINTER[CUgraphNode], 8]
numNodes: Annotated[c.POINTER[size_t], 16]
cuGraphGetNodes_params: TypeAlias = struct_cuGraphGetNodes_params_st
@c.record
class struct_cuGraphGetRootNodes_params_st(c.Struct):
SIZE = 24
hGraph: Annotated[CUgraph, 0]
rootNodes: Annotated[c.POINTER[CUgraphNode], 8]
numRootNodes: Annotated[c.POINTER[size_t], 16]
cuGraphGetRootNodes_params: TypeAlias = struct_cuGraphGetRootNodes_params_st
@c.record
class struct_cuGraphGetEdges_params_st(c.Struct):
SIZE = 32
hGraph: Annotated[CUgraph, 0]
_from: Annotated[c.POINTER[CUgraphNode], 8]
to: Annotated[c.POINTER[CUgraphNode], 16]
numEdges: Annotated[c.POINTER[size_t], 24]
cuGraphGetEdges_params: TypeAlias = struct_cuGraphGetEdges_params_st
@c.record
class struct_cuGraphGetEdges_v2_params_st(c.Struct):
SIZE = 40
hGraph: Annotated[CUgraph, 0]
_from: Annotated[c.POINTER[CUgraphNode], 8]
to: Annotated[c.POINTER[CUgraphNode], 16]
edgeData: Annotated[c.POINTER[CUgraphEdgeData], 24]
numEdges: Annotated[c.POINTER[size_t], 32]
cuGraphGetEdges_v2_params: TypeAlias = struct_cuGraphGetEdges_v2_params_st
@c.record
class struct_cuGraphNodeGetDependencies_params_st(c.Struct):
SIZE = 24
hNode: Annotated[CUgraphNode, 0]
dependencies: Annotated[c.POINTER[CUgraphNode], 8]
numDependencies: Annotated[c.POINTER[size_t], 16]
cuGraphNodeGetDependencies_params: TypeAlias = struct_cuGraphNodeGetDependencies_params_st
@c.record
class struct_cuGraphNodeGetDependencies_v2_params_st(c.Struct):
SIZE = 32
hNode: Annotated[CUgraphNode, 0]
dependencies: Annotated[c.POINTER[CUgraphNode], 8]
edgeData: Annotated[c.POINTER[CUgraphEdgeData], 16]
numDependencies: Annotated[c.POINTER[size_t], 24]
cuGraphNodeGetDependencies_v2_params: TypeAlias = struct_cuGraphNodeGetDependencies_v2_params_st
@c.record
class struct_cuGraphNodeGetDependentNodes_params_st(c.Struct):
SIZE = 24
hNode: Annotated[CUgraphNode, 0]
dependentNodes: Annotated[c.POINTER[CUgraphNode], 8]
numDependentNodes: Annotated[c.POINTER[size_t], 16]
cuGraphNodeGetDependentNodes_params: TypeAlias = struct_cuGraphNodeGetDependentNodes_params_st
@c.record
class struct_cuGraphNodeGetDependentNodes_v2_params_st(c.Struct):
SIZE = 32
hNode: Annotated[CUgraphNode, 0]
dependentNodes: Annotated[c.POINTER[CUgraphNode], 8]
edgeData: Annotated[c.POINTER[CUgraphEdgeData], 16]
numDependentNodes: Annotated[c.POINTER[size_t], 24]
cuGraphNodeGetDependentNodes_v2_params: TypeAlias = struct_cuGraphNodeGetDependentNodes_v2_params_st
@c.record
class struct_cuGraphAddDependencies_params_st(c.Struct):
SIZE = 32
hGraph: Annotated[CUgraph, 0]
_from: Annotated[c.POINTER[CUgraphNode], 8]
to: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
cuGraphAddDependencies_params: TypeAlias = struct_cuGraphAddDependencies_params_st
@c.record
class struct_cuGraphAddDependencies_v2_params_st(c.Struct):
SIZE = 40
hGraph: Annotated[CUgraph, 0]
_from: Annotated[c.POINTER[CUgraphNode], 8]
to: Annotated[c.POINTER[CUgraphNode], 16]
edgeData: Annotated[c.POINTER[CUgraphEdgeData], 24]
numDependencies: Annotated[size_t, 32]
cuGraphAddDependencies_v2_params: TypeAlias = struct_cuGraphAddDependencies_v2_params_st
@c.record
class struct_cuGraphRemoveDependencies_params_st(c.Struct):
SIZE = 32
hGraph: Annotated[CUgraph, 0]
_from: Annotated[c.POINTER[CUgraphNode], 8]
to: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
cuGraphRemoveDependencies_params: TypeAlias = struct_cuGraphRemoveDependencies_params_st
@c.record
class struct_cuGraphRemoveDependencies_v2_params_st(c.Struct):
SIZE = 40
hGraph: Annotated[CUgraph, 0]
_from: Annotated[c.POINTER[CUgraphNode], 8]
to: Annotated[c.POINTER[CUgraphNode], 16]
edgeData: Annotated[c.POINTER[CUgraphEdgeData], 24]
numDependencies: Annotated[size_t, 32]
cuGraphRemoveDependencies_v2_params: TypeAlias = struct_cuGraphRemoveDependencies_v2_params_st
@c.record
class struct_cuGraphDestroyNode_params_st(c.Struct):
SIZE = 8
hNode: Annotated[CUgraphNode, 0]
cuGraphDestroyNode_params: TypeAlias = struct_cuGraphDestroyNode_params_st
@c.record
class struct_cuGraphInstantiateWithFlags_params_st(c.Struct):
SIZE = 24
phGraphExec: Annotated[c.POINTER[CUgraphExec], 0]
hGraph: Annotated[CUgraph, 8]
flags: Annotated[Annotated[int, ctypes.c_uint64], 16]
cuGraphInstantiateWithFlags_params: TypeAlias = struct_cuGraphInstantiateWithFlags_params_st
@c.record
class struct_cuGraphInstantiateWithParams_ptsz_params_st(c.Struct):
SIZE = 24
phGraphExec: Annotated[c.POINTER[CUgraphExec], 0]
hGraph: Annotated[CUgraph, 8]
instantiateParams: Annotated[c.POINTER[CUDA_GRAPH_INSTANTIATE_PARAMS], 16]
@c.record
class struct_CUDA_GRAPH_INSTANTIATE_PARAMS_st(c.Struct):
SIZE = 32
flags: Annotated[cuuint64_t, 0]
hUploadStream: Annotated[CUstream, 8]
hErrNode_out: Annotated[CUgraphNode, 16]
result_out: Annotated[CUgraphInstantiateResult, 24]
CUDA_GRAPH_INSTANTIATE_PARAMS: TypeAlias = struct_CUDA_GRAPH_INSTANTIATE_PARAMS_st
class enum_CUgraphInstantiateResult_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CUDA_GRAPH_INSTANTIATE_SUCCESS = enum_CUgraphInstantiateResult_enum.define('CUDA_GRAPH_INSTANTIATE_SUCCESS', 0)
CUDA_GRAPH_INSTANTIATE_ERROR = enum_CUgraphInstantiateResult_enum.define('CUDA_GRAPH_INSTANTIATE_ERROR', 1)
CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE = enum_CUgraphInstantiateResult_enum.define('CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE', 2)
CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED = enum_CUgraphInstantiateResult_enum.define('CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED', 3)
CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED = enum_CUgraphInstantiateResult_enum.define('CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED', 4)
CUDA_GRAPH_INSTANTIATE_CONDITIONAL_HANDLE_UNUSED = enum_CUgraphInstantiateResult_enum.define('CUDA_GRAPH_INSTANTIATE_CONDITIONAL_HANDLE_UNUSED', 5)
CUgraphInstantiateResult: TypeAlias = enum_CUgraphInstantiateResult_enum
cuGraphInstantiateWithParams_ptsz_params: TypeAlias = struct_cuGraphInstantiateWithParams_ptsz_params_st
@c.record
class struct_cuGraphExecGetFlags_params_st(c.Struct):
SIZE = 16
hGraphExec: Annotated[CUgraphExec, 0]
flags: Annotated[c.POINTER[cuuint64_t], 8]
cuGraphExecGetFlags_params: TypeAlias = struct_cuGraphExecGetFlags_params_st
@c.record
class struct_cuGraphExecKernelNodeSetParams_v2_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
nodeParams: Annotated[c.POINTER[CUDA_KERNEL_NODE_PARAMS], 16]
cuGraphExecKernelNodeSetParams_v2_params: TypeAlias = struct_cuGraphExecKernelNodeSetParams_v2_params_st
@c.record
class struct_cuGraphExecMemcpyNodeSetParams_params_st(c.Struct):
SIZE = 32
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
copyParams: Annotated[c.POINTER[CUDA_MEMCPY3D], 16]
ctx: Annotated[CUcontext, 24]
cuGraphExecMemcpyNodeSetParams_params: TypeAlias = struct_cuGraphExecMemcpyNodeSetParams_params_st
@c.record
class struct_cuGraphExecMemsetNodeSetParams_params_st(c.Struct):
SIZE = 32
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
memsetParams: Annotated[c.POINTER[CUDA_MEMSET_NODE_PARAMS], 16]
ctx: Annotated[CUcontext, 24]
cuGraphExecMemsetNodeSetParams_params: TypeAlias = struct_cuGraphExecMemsetNodeSetParams_params_st
@c.record
class struct_cuGraphExecHostNodeSetParams_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
nodeParams: Annotated[c.POINTER[CUDA_HOST_NODE_PARAMS], 16]
cuGraphExecHostNodeSetParams_params: TypeAlias = struct_cuGraphExecHostNodeSetParams_params_st
@c.record
class struct_cuGraphExecChildGraphNodeSetParams_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
childGraph: Annotated[CUgraph, 16]
cuGraphExecChildGraphNodeSetParams_params: TypeAlias = struct_cuGraphExecChildGraphNodeSetParams_params_st
@c.record
class struct_cuGraphExecEventRecordNodeSetEvent_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
event: Annotated[CUevent, 16]
cuGraphExecEventRecordNodeSetEvent_params: TypeAlias = struct_cuGraphExecEventRecordNodeSetEvent_params_st
@c.record
class struct_cuGraphExecEventWaitNodeSetEvent_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
event: Annotated[CUevent, 16]
cuGraphExecEventWaitNodeSetEvent_params: TypeAlias = struct_cuGraphExecEventWaitNodeSetEvent_params_st
@c.record
class struct_cuGraphExecExternalSemaphoresSignalNodeSetParams_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
nodeParams: Annotated[c.POINTER[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS], 16]
cuGraphExecExternalSemaphoresSignalNodeSetParams_params: TypeAlias = struct_cuGraphExecExternalSemaphoresSignalNodeSetParams_params_st
@c.record
class struct_cuGraphExecExternalSemaphoresWaitNodeSetParams_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
nodeParams: Annotated[c.POINTER[CUDA_EXT_SEM_WAIT_NODE_PARAMS], 16]
cuGraphExecExternalSemaphoresWaitNodeSetParams_params: TypeAlias = struct_cuGraphExecExternalSemaphoresWaitNodeSetParams_params_st
@c.record
class struct_cuGraphNodeSetEnabled_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
isEnabled: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuGraphNodeSetEnabled_params: TypeAlias = struct_cuGraphNodeSetEnabled_params_st
@c.record
class struct_cuGraphNodeGetEnabled_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
isEnabled: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 16]
cuGraphNodeGetEnabled_params: TypeAlias = struct_cuGraphNodeGetEnabled_params_st
@c.record
class struct_cuGraphUpload_ptsz_params_st(c.Struct):
SIZE = 16
hGraphExec: Annotated[CUgraphExec, 0]
hStream: Annotated[CUstream, 8]
cuGraphUpload_ptsz_params: TypeAlias = struct_cuGraphUpload_ptsz_params_st
@c.record
class struct_cuGraphLaunch_ptsz_params_st(c.Struct):
SIZE = 16
hGraphExec: Annotated[CUgraphExec, 0]
hStream: Annotated[CUstream, 8]
cuGraphLaunch_ptsz_params: TypeAlias = struct_cuGraphLaunch_ptsz_params_st
@c.record
class struct_cuGraphExecDestroy_params_st(c.Struct):
SIZE = 8
hGraphExec: Annotated[CUgraphExec, 0]
cuGraphExecDestroy_params: TypeAlias = struct_cuGraphExecDestroy_params_st
@c.record
class struct_cuGraphDestroy_params_st(c.Struct):
SIZE = 8
hGraph: Annotated[CUgraph, 0]
cuGraphDestroy_params: TypeAlias = struct_cuGraphDestroy_params_st
@c.record
class struct_cuGraphExecUpdate_v2_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hGraph: Annotated[CUgraph, 8]
resultInfo: Annotated[c.POINTER[CUgraphExecUpdateResultInfo], 16]
@c.record
class struct_CUgraphExecUpdateResultInfo_st(c.Struct):
SIZE = 24
result: Annotated[CUgraphExecUpdateResult, 0]
errorNode: Annotated[CUgraphNode, 8]
errorFromNode: Annotated[CUgraphNode, 16]
CUgraphExecUpdateResultInfo: TypeAlias = struct_CUgraphExecUpdateResultInfo_st
class enum_CUgraphExecUpdateResult_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_GRAPH_EXEC_UPDATE_SUCCESS = enum_CUgraphExecUpdateResult_enum.define('CU_GRAPH_EXEC_UPDATE_SUCCESS', 0)
CU_GRAPH_EXEC_UPDATE_ERROR = enum_CUgraphExecUpdateResult_enum.define('CU_GRAPH_EXEC_UPDATE_ERROR', 1)
CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED = enum_CUgraphExecUpdateResult_enum.define('CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED', 2)
CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED = enum_CUgraphExecUpdateResult_enum.define('CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED', 3)
CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED = enum_CUgraphExecUpdateResult_enum.define('CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED', 4)
CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED = enum_CUgraphExecUpdateResult_enum.define('CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED', 5)
CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED = enum_CUgraphExecUpdateResult_enum.define('CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED', 6)
CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE = enum_CUgraphExecUpdateResult_enum.define('CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE', 7)
CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED = enum_CUgraphExecUpdateResult_enum.define('CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED', 8)
CUgraphExecUpdateResult: TypeAlias = enum_CUgraphExecUpdateResult_enum
cuGraphExecUpdate_v2_params: TypeAlias = struct_cuGraphExecUpdate_v2_params_st
@c.record
class struct_cuGraphKernelNodeCopyAttributes_params_st(c.Struct):
SIZE = 16
dst: Annotated[CUgraphNode, 0]
src: Annotated[CUgraphNode, 8]
cuGraphKernelNodeCopyAttributes_params: TypeAlias = struct_cuGraphKernelNodeCopyAttributes_params_st
@c.record
class struct_cuGraphKernelNodeGetAttribute_params_st(c.Struct):
SIZE = 24
hNode: Annotated[CUgraphNode, 0]
attr: Annotated[CUkernelNodeAttrID, 8]
value_out: Annotated[c.POINTER[CUkernelNodeAttrValue], 16]
CUkernelNodeAttrID: TypeAlias = enum_CUlaunchAttributeID_enum
CUkernelNodeAttrValue: TypeAlias = union_CUlaunchAttributeValue_union
cuGraphKernelNodeGetAttribute_params: TypeAlias = struct_cuGraphKernelNodeGetAttribute_params_st
@c.record
class struct_cuGraphKernelNodeSetAttribute_params_st(c.Struct):
SIZE = 24
hNode: Annotated[CUgraphNode, 0]
attr: Annotated[CUkernelNodeAttrID, 8]
value: Annotated[c.POINTER[CUkernelNodeAttrValue], 16]
cuGraphKernelNodeSetAttribute_params: TypeAlias = struct_cuGraphKernelNodeSetAttribute_params_st
@c.record
class struct_cuGraphDebugDotPrint_params_st(c.Struct):
SIZE = 24
hGraph: Annotated[CUgraph, 0]
path: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuGraphDebugDotPrint_params: TypeAlias = struct_cuGraphDebugDotPrint_params_st
@c.record
class struct_cuUserObjectCreate_params_st(c.Struct):
SIZE = 32
object_out: Annotated[c.POINTER[CUuserObject], 0]
ptr: Annotated[ctypes.c_void_p, 8]
destroy: Annotated[CUhostFn, 16]
initialRefcount: Annotated[Annotated[int, ctypes.c_uint32], 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 28]
class struct_CUuserObject_st(ctypes.Structure): pass
CUuserObject: TypeAlias = c.POINTER[struct_CUuserObject_st]
cuUserObjectCreate_params: TypeAlias = struct_cuUserObjectCreate_params_st
@c.record
class struct_cuUserObjectRetain_params_st(c.Struct):
SIZE = 16
object: Annotated[CUuserObject, 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuUserObjectRetain_params: TypeAlias = struct_cuUserObjectRetain_params_st
@c.record
class struct_cuUserObjectRelease_params_st(c.Struct):
SIZE = 16
object: Annotated[CUuserObject, 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuUserObjectRelease_params: TypeAlias = struct_cuUserObjectRelease_params_st
@c.record
class struct_cuGraphRetainUserObject_params_st(c.Struct):
SIZE = 24
graph: Annotated[CUgraph, 0]
object: Annotated[CUuserObject, 8]
count: Annotated[Annotated[int, ctypes.c_uint32], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuGraphRetainUserObject_params: TypeAlias = struct_cuGraphRetainUserObject_params_st
@c.record
class struct_cuGraphReleaseUserObject_params_st(c.Struct):
SIZE = 24
graph: Annotated[CUgraph, 0]
object: Annotated[CUuserObject, 8]
count: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuGraphReleaseUserObject_params: TypeAlias = struct_cuGraphReleaseUserObject_params_st
@c.record
class struct_cuGraphAddNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[CUgraphNodeParams], 32]
@c.record
class struct_CUgraphNodeParams_st(c.Struct):
SIZE = 256
type: Annotated[CUgraphNodeType, 0]
reserved0: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 4]
reserved1: Annotated[c.Array[Annotated[int, ctypes.c_int64], Literal[29]], 16]
kernel: Annotated[CUDA_KERNEL_NODE_PARAMS_v3, 16]
memcpy: Annotated[CUDA_MEMCPY_NODE_PARAMS, 16]
memset: Annotated[CUDA_MEMSET_NODE_PARAMS_v2, 16]
host: Annotated[CUDA_HOST_NODE_PARAMS_v2, 16]
graph: Annotated[CUDA_CHILD_GRAPH_NODE_PARAMS, 16]
eventWait: Annotated[CUDA_EVENT_WAIT_NODE_PARAMS, 16]
eventRecord: Annotated[CUDA_EVENT_RECORD_NODE_PARAMS, 16]
extSemSignal: Annotated[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2, 16]
extSemWait: Annotated[CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2, 16]
alloc: Annotated[CUDA_MEM_ALLOC_NODE_PARAMS_v2, 16]
free: Annotated[CUDA_MEM_FREE_NODE_PARAMS, 16]
memOp: Annotated[CUDA_BATCH_MEM_OP_NODE_PARAMS_v2, 16]
conditional: Annotated[CUDA_CONDITIONAL_NODE_PARAMS, 16]
reserved2: Annotated[Annotated[int, ctypes.c_int64], 248]
CUgraphNodeParams: TypeAlias = struct_CUgraphNodeParams_st
@c.record
class struct_CUDA_KERNEL_NODE_PARAMS_v3_st(c.Struct):
SIZE = 72
func: Annotated[CUfunction, 0]
gridDimX: Annotated[Annotated[int, ctypes.c_uint32], 8]
gridDimY: Annotated[Annotated[int, ctypes.c_uint32], 12]
gridDimZ: Annotated[Annotated[int, ctypes.c_uint32], 16]
blockDimX: Annotated[Annotated[int, ctypes.c_uint32], 20]
blockDimY: Annotated[Annotated[int, ctypes.c_uint32], 24]
blockDimZ: Annotated[Annotated[int, ctypes.c_uint32], 28]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 40]
extra: Annotated[c.POINTER[ctypes.c_void_p], 48]
kern: Annotated[CUkernel, 56]
ctx: Annotated[CUcontext, 64]
CUDA_KERNEL_NODE_PARAMS_v3: TypeAlias = struct_CUDA_KERNEL_NODE_PARAMS_v3_st
@c.record
class struct_CUDA_MEMCPY_NODE_PARAMS_st(c.Struct):
SIZE = 216
flags: Annotated[Annotated[int, ctypes.c_int32], 0]
reserved: Annotated[Annotated[int, ctypes.c_int32], 4]
copyCtx: Annotated[CUcontext, 8]
copyParams: Annotated[CUDA_MEMCPY3D, 16]
CUDA_MEMCPY_NODE_PARAMS: TypeAlias = struct_CUDA_MEMCPY_NODE_PARAMS_st
@c.record
class struct_CUDA_MEMSET_NODE_PARAMS_v2_st(c.Struct):
SIZE = 48
dst: Annotated[CUdeviceptr, 0]
pitch: Annotated[size_t, 8]
value: Annotated[Annotated[int, ctypes.c_uint32], 16]
elementSize: Annotated[Annotated[int, ctypes.c_uint32], 20]
width: Annotated[size_t, 24]
height: Annotated[size_t, 32]
ctx: Annotated[CUcontext, 40]
CUDA_MEMSET_NODE_PARAMS_v2: TypeAlias = struct_CUDA_MEMSET_NODE_PARAMS_v2_st
@c.record
class struct_CUDA_HOST_NODE_PARAMS_v2_st(c.Struct):
SIZE = 16
fn: Annotated[CUhostFn, 0]
userData: Annotated[ctypes.c_void_p, 8]
CUDA_HOST_NODE_PARAMS_v2: TypeAlias = struct_CUDA_HOST_NODE_PARAMS_v2_st
@c.record
class struct_CUDA_CHILD_GRAPH_NODE_PARAMS_st(c.Struct):
SIZE = 8
graph: Annotated[CUgraph, 0]
CUDA_CHILD_GRAPH_NODE_PARAMS: TypeAlias = struct_CUDA_CHILD_GRAPH_NODE_PARAMS_st
@c.record
class struct_CUDA_EVENT_WAIT_NODE_PARAMS_st(c.Struct):
SIZE = 8
event: Annotated[CUevent, 0]
CUDA_EVENT_WAIT_NODE_PARAMS: TypeAlias = struct_CUDA_EVENT_WAIT_NODE_PARAMS_st
@c.record
class struct_CUDA_EVENT_RECORD_NODE_PARAMS_st(c.Struct):
SIZE = 8
event: Annotated[CUevent, 0]
CUDA_EVENT_RECORD_NODE_PARAMS: TypeAlias = struct_CUDA_EVENT_RECORD_NODE_PARAMS_st
@c.record
class struct_CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st(c.Struct):
SIZE = 24
extSemArray: Annotated[c.POINTER[CUexternalSemaphore], 0]
paramsArray: Annotated[c.POINTER[CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2: TypeAlias = struct_CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st
@c.record
class struct_CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st(c.Struct):
SIZE = 24
extSemArray: Annotated[c.POINTER[CUexternalSemaphore], 0]
paramsArray: Annotated[c.POINTER[CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2: TypeAlias = struct_CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st
@c.record
class struct_CUDA_MEM_ALLOC_NODE_PARAMS_v2_st(c.Struct):
SIZE = 120
poolProps: Annotated[CUmemPoolProps, 0]
accessDescs: Annotated[c.POINTER[CUmemAccessDesc], 88]
accessDescCount: Annotated[size_t, 96]
bytesize: Annotated[size_t, 104]
dptr: Annotated[CUdeviceptr, 112]
CUDA_MEM_ALLOC_NODE_PARAMS_v2: TypeAlias = struct_CUDA_MEM_ALLOC_NODE_PARAMS_v2_st
@c.record
class struct_CUDA_MEM_FREE_NODE_PARAMS_st(c.Struct):
SIZE = 8
dptr: Annotated[CUdeviceptr, 0]
CUDA_MEM_FREE_NODE_PARAMS: TypeAlias = struct_CUDA_MEM_FREE_NODE_PARAMS_st
@c.record
class struct_CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st(c.Struct):
SIZE = 32
ctx: Annotated[CUcontext, 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
paramArray: Annotated[c.POINTER[CUstreamBatchMemOpParams], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
CUDA_BATCH_MEM_OP_NODE_PARAMS_v2: TypeAlias = struct_CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st
@c.record
class struct_CUDA_CONDITIONAL_NODE_PARAMS(c.Struct):
SIZE = 32
handle: Annotated[CUgraphConditionalHandle, 0]
type: Annotated[CUgraphConditionalNodeType, 8]
size: Annotated[Annotated[int, ctypes.c_uint32], 12]
phGraph_out: Annotated[c.POINTER[CUgraph], 16]
ctx: Annotated[CUcontext, 24]
CUDA_CONDITIONAL_NODE_PARAMS: TypeAlias = struct_CUDA_CONDITIONAL_NODE_PARAMS
CUgraphConditionalHandle: TypeAlias = Annotated[int, ctypes.c_uint64]
class enum_CUgraphConditionalNodeType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_GRAPH_COND_TYPE_IF = enum_CUgraphConditionalNodeType_enum.define('CU_GRAPH_COND_TYPE_IF', 0)
CU_GRAPH_COND_TYPE_WHILE = enum_CUgraphConditionalNodeType_enum.define('CU_GRAPH_COND_TYPE_WHILE', 1)
CU_GRAPH_COND_TYPE_SWITCH = enum_CUgraphConditionalNodeType_enum.define('CU_GRAPH_COND_TYPE_SWITCH', 2)
CUgraphConditionalNodeType: TypeAlias = enum_CUgraphConditionalNodeType_enum
cuGraphAddNode_params: TypeAlias = struct_cuGraphAddNode_params_st
@c.record
class struct_cuGraphAddNode_v2_params_st(c.Struct):
SIZE = 48
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
dependencyData: Annotated[c.POINTER[CUgraphEdgeData], 24]
numDependencies: Annotated[size_t, 32]
nodeParams: Annotated[c.POINTER[CUgraphNodeParams], 40]
cuGraphAddNode_v2_params: TypeAlias = struct_cuGraphAddNode_v2_params_st
@c.record
class struct_cuGraphNodeSetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUgraphNodeParams], 8]
cuGraphNodeSetParams_params: TypeAlias = struct_cuGraphNodeSetParams_params_st
@c.record
class struct_cuGraphExecNodeSetParams_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
nodeParams: Annotated[c.POINTER[CUgraphNodeParams], 16]
cuGraphExecNodeSetParams_params: TypeAlias = struct_cuGraphExecNodeSetParams_params_st
@c.record
class struct_cuGraphConditionalHandleCreate_params_st(c.Struct):
SIZE = 32
pHandle_out: Annotated[c.POINTER[CUgraphConditionalHandle], 0]
hGraph: Annotated[CUgraph, 8]
ctx: Annotated[CUcontext, 16]
defaultLaunchValue: Annotated[Annotated[int, ctypes.c_uint32], 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 28]
cuGraphConditionalHandleCreate_params: TypeAlias = struct_cuGraphConditionalHandleCreate_params_st
@c.record
class struct_cuOccupancyMaxActiveBlocksPerMultiprocessor_params_st(c.Struct):
SIZE = 32
numBlocks: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
func: Annotated[CUfunction, 8]
blockSize: Annotated[Annotated[int, ctypes.c_int32], 16]
dynamicSMemSize: Annotated[size_t, 24]
cuOccupancyMaxActiveBlocksPerMultiprocessor_params: TypeAlias = struct_cuOccupancyMaxActiveBlocksPerMultiprocessor_params_st
@c.record
class struct_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_params_st(c.Struct):
SIZE = 40
numBlocks: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
func: Annotated[CUfunction, 8]
blockSize: Annotated[Annotated[int, ctypes.c_int32], 16]
dynamicSMemSize: Annotated[size_t, 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_params: TypeAlias = struct_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_params_st
@c.record
class struct_cuOccupancyMaxPotentialBlockSize_params_st(c.Struct):
SIZE = 48
minGridSize: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
blockSize: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
func: Annotated[CUfunction, 16]
blockSizeToDynamicSMemSize: Annotated[CUoccupancyB2DSize, 24]
dynamicSMemSize: Annotated[size_t, 32]
blockSizeLimit: Annotated[Annotated[int, ctypes.c_int32], 40]
CUoccupancyB2DSize: TypeAlias = c.CFUNCTYPE[Annotated[int, ctypes.c_uint64], [Annotated[int, ctypes.c_int32]]]
cuOccupancyMaxPotentialBlockSize_params: TypeAlias = struct_cuOccupancyMaxPotentialBlockSize_params_st
@c.record
class struct_cuOccupancyMaxPotentialBlockSizeWithFlags_params_st(c.Struct):
SIZE = 48
minGridSize: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
blockSize: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
func: Annotated[CUfunction, 16]
blockSizeToDynamicSMemSize: Annotated[CUoccupancyB2DSize, 24]
dynamicSMemSize: Annotated[size_t, 32]
blockSizeLimit: Annotated[Annotated[int, ctypes.c_int32], 40]
flags: Annotated[Annotated[int, ctypes.c_uint32], 44]
cuOccupancyMaxPotentialBlockSizeWithFlags_params: TypeAlias = struct_cuOccupancyMaxPotentialBlockSizeWithFlags_params_st
@c.record
class struct_cuOccupancyAvailableDynamicSMemPerBlock_params_st(c.Struct):
SIZE = 24
dynamicSmemSize: Annotated[c.POINTER[size_t], 0]
func: Annotated[CUfunction, 8]
numBlocks: Annotated[Annotated[int, ctypes.c_int32], 16]
blockSize: Annotated[Annotated[int, ctypes.c_int32], 20]
cuOccupancyAvailableDynamicSMemPerBlock_params: TypeAlias = struct_cuOccupancyAvailableDynamicSMemPerBlock_params_st
@c.record
class struct_cuOccupancyMaxPotentialClusterSize_params_st(c.Struct):
SIZE = 24
clusterSize: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
func: Annotated[CUfunction, 8]
config: Annotated[c.POINTER[CUlaunchConfig], 16]
cuOccupancyMaxPotentialClusterSize_params: TypeAlias = struct_cuOccupancyMaxPotentialClusterSize_params_st
@c.record
class struct_cuOccupancyMaxActiveClusters_params_st(c.Struct):
SIZE = 24
numClusters: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
func: Annotated[CUfunction, 8]
config: Annotated[c.POINTER[CUlaunchConfig], 16]
cuOccupancyMaxActiveClusters_params: TypeAlias = struct_cuOccupancyMaxActiveClusters_params_st
@c.record
class struct_cuTexRefSetArray_params_st(c.Struct):
SIZE = 24
hTexRef: Annotated[CUtexref, 0]
hArray: Annotated[CUarray, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuTexRefSetArray_params: TypeAlias = struct_cuTexRefSetArray_params_st
@c.record
class struct_cuTexRefSetMipmappedArray_params_st(c.Struct):
SIZE = 24
hTexRef: Annotated[CUtexref, 0]
hMipmappedArray: Annotated[CUmipmappedArray, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuTexRefSetMipmappedArray_params: TypeAlias = struct_cuTexRefSetMipmappedArray_params_st
@c.record
class struct_cuTexRefSetAddress_v2_params_st(c.Struct):
SIZE = 32
ByteOffset: Annotated[c.POINTER[size_t], 0]
hTexRef: Annotated[CUtexref, 8]
dptr: Annotated[CUdeviceptr, 16]
bytes: Annotated[size_t, 24]
cuTexRefSetAddress_v2_params: TypeAlias = struct_cuTexRefSetAddress_v2_params_st
@c.record
class struct_cuTexRefSetAddress2D_v3_params_st(c.Struct):
SIZE = 32
hTexRef: Annotated[CUtexref, 0]
desc: Annotated[c.POINTER[CUDA_ARRAY_DESCRIPTOR], 8]
dptr: Annotated[CUdeviceptr, 16]
Pitch: Annotated[size_t, 24]
cuTexRefSetAddress2D_v3_params: TypeAlias = struct_cuTexRefSetAddress2D_v3_params_st
@c.record
class struct_cuTexRefSetFormat_params_st(c.Struct):
SIZE = 16
hTexRef: Annotated[CUtexref, 0]
fmt: Annotated[CUarray_format, 8]
NumPackedComponents: Annotated[Annotated[int, ctypes.c_int32], 12]
cuTexRefSetFormat_params: TypeAlias = struct_cuTexRefSetFormat_params_st
@c.record
class struct_cuTexRefSetAddressMode_params_st(c.Struct):
SIZE = 16
hTexRef: Annotated[CUtexref, 0]
dim: Annotated[Annotated[int, ctypes.c_int32], 8]
am: Annotated[CUaddress_mode, 12]
class enum_CUaddress_mode_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_TR_ADDRESS_MODE_WRAP = enum_CUaddress_mode_enum.define('CU_TR_ADDRESS_MODE_WRAP', 0)
CU_TR_ADDRESS_MODE_CLAMP = enum_CUaddress_mode_enum.define('CU_TR_ADDRESS_MODE_CLAMP', 1)
CU_TR_ADDRESS_MODE_MIRROR = enum_CUaddress_mode_enum.define('CU_TR_ADDRESS_MODE_MIRROR', 2)
CU_TR_ADDRESS_MODE_BORDER = enum_CUaddress_mode_enum.define('CU_TR_ADDRESS_MODE_BORDER', 3)
CUaddress_mode: TypeAlias = enum_CUaddress_mode_enum
cuTexRefSetAddressMode_params: TypeAlias = struct_cuTexRefSetAddressMode_params_st
@c.record
class struct_cuTexRefSetFilterMode_params_st(c.Struct):
SIZE = 16
hTexRef: Annotated[CUtexref, 0]
fm: Annotated[CUfilter_mode, 8]
class enum_CUfilter_mode_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_TR_FILTER_MODE_POINT = enum_CUfilter_mode_enum.define('CU_TR_FILTER_MODE_POINT', 0)
CU_TR_FILTER_MODE_LINEAR = enum_CUfilter_mode_enum.define('CU_TR_FILTER_MODE_LINEAR', 1)
CUfilter_mode: TypeAlias = enum_CUfilter_mode_enum
cuTexRefSetFilterMode_params: TypeAlias = struct_cuTexRefSetFilterMode_params_st
@c.record
class struct_cuTexRefSetMipmapFilterMode_params_st(c.Struct):
SIZE = 16
hTexRef: Annotated[CUtexref, 0]
fm: Annotated[CUfilter_mode, 8]
cuTexRefSetMipmapFilterMode_params: TypeAlias = struct_cuTexRefSetMipmapFilterMode_params_st
@c.record
class struct_cuTexRefSetMipmapLevelBias_params_st(c.Struct):
SIZE = 16
hTexRef: Annotated[CUtexref, 0]
bias: Annotated[Annotated[float, ctypes.c_float], 8]
cuTexRefSetMipmapLevelBias_params: TypeAlias = struct_cuTexRefSetMipmapLevelBias_params_st
@c.record
class struct_cuTexRefSetMipmapLevelClamp_params_st(c.Struct):
SIZE = 16
hTexRef: Annotated[CUtexref, 0]
minMipmapLevelClamp: Annotated[Annotated[float, ctypes.c_float], 8]
maxMipmapLevelClamp: Annotated[Annotated[float, ctypes.c_float], 12]
cuTexRefSetMipmapLevelClamp_params: TypeAlias = struct_cuTexRefSetMipmapLevelClamp_params_st
@c.record
class struct_cuTexRefSetMaxAnisotropy_params_st(c.Struct):
SIZE = 16
hTexRef: Annotated[CUtexref, 0]
maxAniso: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuTexRefSetMaxAnisotropy_params: TypeAlias = struct_cuTexRefSetMaxAnisotropy_params_st
@c.record
class struct_cuTexRefSetBorderColor_params_st(c.Struct):
SIZE = 16
hTexRef: Annotated[CUtexref, 0]
pBorderColor: Annotated[c.POINTER[Annotated[float, ctypes.c_float]], 8]
cuTexRefSetBorderColor_params: TypeAlias = struct_cuTexRefSetBorderColor_params_st
@c.record
class struct_cuTexRefSetFlags_params_st(c.Struct):
SIZE = 16
hTexRef: Annotated[CUtexref, 0]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuTexRefSetFlags_params: TypeAlias = struct_cuTexRefSetFlags_params_st
@c.record
class struct_cuTexRefGetAddress_v2_params_st(c.Struct):
SIZE = 16
pdptr: Annotated[c.POINTER[CUdeviceptr], 0]
hTexRef: Annotated[CUtexref, 8]
cuTexRefGetAddress_v2_params: TypeAlias = struct_cuTexRefGetAddress_v2_params_st
@c.record
class struct_cuTexRefGetArray_params_st(c.Struct):
SIZE = 16
phArray: Annotated[c.POINTER[CUarray], 0]
hTexRef: Annotated[CUtexref, 8]
cuTexRefGetArray_params: TypeAlias = struct_cuTexRefGetArray_params_st
@c.record
class struct_cuTexRefGetMipmappedArray_params_st(c.Struct):
SIZE = 16
phMipmappedArray: Annotated[c.POINTER[CUmipmappedArray], 0]
hTexRef: Annotated[CUtexref, 8]
cuTexRefGetMipmappedArray_params: TypeAlias = struct_cuTexRefGetMipmappedArray_params_st
@c.record
class struct_cuTexRefGetAddressMode_params_st(c.Struct):
SIZE = 24
pam: Annotated[c.POINTER[CUaddress_mode], 0]
hTexRef: Annotated[CUtexref, 8]
dim: Annotated[Annotated[int, ctypes.c_int32], 16]
cuTexRefGetAddressMode_params: TypeAlias = struct_cuTexRefGetAddressMode_params_st
@c.record
class struct_cuTexRefGetFilterMode_params_st(c.Struct):
SIZE = 16
pfm: Annotated[c.POINTER[CUfilter_mode], 0]
hTexRef: Annotated[CUtexref, 8]
cuTexRefGetFilterMode_params: TypeAlias = struct_cuTexRefGetFilterMode_params_st
@c.record
class struct_cuTexRefGetFormat_params_st(c.Struct):
SIZE = 24
pFormat: Annotated[c.POINTER[CUarray_format], 0]
pNumChannels: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
hTexRef: Annotated[CUtexref, 16]
cuTexRefGetFormat_params: TypeAlias = struct_cuTexRefGetFormat_params_st
@c.record
class struct_cuTexRefGetMipmapFilterMode_params_st(c.Struct):
SIZE = 16
pfm: Annotated[c.POINTER[CUfilter_mode], 0]
hTexRef: Annotated[CUtexref, 8]
cuTexRefGetMipmapFilterMode_params: TypeAlias = struct_cuTexRefGetMipmapFilterMode_params_st
@c.record
class struct_cuTexRefGetMipmapLevelBias_params_st(c.Struct):
SIZE = 16
pbias: Annotated[c.POINTER[Annotated[float, ctypes.c_float]], 0]
hTexRef: Annotated[CUtexref, 8]
cuTexRefGetMipmapLevelBias_params: TypeAlias = struct_cuTexRefGetMipmapLevelBias_params_st
@c.record
class struct_cuTexRefGetMipmapLevelClamp_params_st(c.Struct):
SIZE = 24
pminMipmapLevelClamp: Annotated[c.POINTER[Annotated[float, ctypes.c_float]], 0]
pmaxMipmapLevelClamp: Annotated[c.POINTER[Annotated[float, ctypes.c_float]], 8]
hTexRef: Annotated[CUtexref, 16]
cuTexRefGetMipmapLevelClamp_params: TypeAlias = struct_cuTexRefGetMipmapLevelClamp_params_st
@c.record
class struct_cuTexRefGetMaxAnisotropy_params_st(c.Struct):
SIZE = 16
pmaxAniso: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
hTexRef: Annotated[CUtexref, 8]
cuTexRefGetMaxAnisotropy_params: TypeAlias = struct_cuTexRefGetMaxAnisotropy_params_st
@c.record
class struct_cuTexRefGetBorderColor_params_st(c.Struct):
SIZE = 16
pBorderColor: Annotated[c.POINTER[Annotated[float, ctypes.c_float]], 0]
hTexRef: Annotated[CUtexref, 8]
cuTexRefGetBorderColor_params: TypeAlias = struct_cuTexRefGetBorderColor_params_st
@c.record
class struct_cuTexRefGetFlags_params_st(c.Struct):
SIZE = 16
pFlags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
hTexRef: Annotated[CUtexref, 8]
cuTexRefGetFlags_params: TypeAlias = struct_cuTexRefGetFlags_params_st
@c.record
class struct_cuTexRefCreate_params_st(c.Struct):
SIZE = 8
pTexRef: Annotated[c.POINTER[CUtexref], 0]
cuTexRefCreate_params: TypeAlias = struct_cuTexRefCreate_params_st
@c.record
class struct_cuTexRefDestroy_params_st(c.Struct):
SIZE = 8
hTexRef: Annotated[CUtexref, 0]
cuTexRefDestroy_params: TypeAlias = struct_cuTexRefDestroy_params_st
@c.record
class struct_cuSurfRefSetArray_params_st(c.Struct):
SIZE = 24
hSurfRef: Annotated[CUsurfref, 0]
hArray: Annotated[CUarray, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuSurfRefSetArray_params: TypeAlias = struct_cuSurfRefSetArray_params_st
@c.record
class struct_cuSurfRefGetArray_params_st(c.Struct):
SIZE = 16
phArray: Annotated[c.POINTER[CUarray], 0]
hSurfRef: Annotated[CUsurfref, 8]
cuSurfRefGetArray_params: TypeAlias = struct_cuSurfRefGetArray_params_st
@c.record
class struct_cuTexObjectCreate_params_st(c.Struct):
SIZE = 32
pTexObject: Annotated[c.POINTER[CUtexObject], 0]
pResDesc: Annotated[c.POINTER[CUDA_RESOURCE_DESC], 8]
pTexDesc: Annotated[c.POINTER[CUDA_TEXTURE_DESC], 16]
pResViewDesc: Annotated[c.POINTER[CUDA_RESOURCE_VIEW_DESC], 24]
CUtexObject: TypeAlias = Annotated[int, ctypes.c_uint64]
@c.record
class struct_CUDA_RESOURCE_DESC_st(c.Struct):
SIZE = 144
resType: Annotated[CUresourcetype, 0]
res: Annotated[struct_CUDA_RESOURCE_DESC_st_res, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 136]
CUDA_RESOURCE_DESC: TypeAlias = struct_CUDA_RESOURCE_DESC_st
@c.record
class struct_CUDA_RESOURCE_DESC_st_res(c.Struct):
SIZE = 128
array: Annotated[struct_CUDA_RESOURCE_DESC_st_res_array, 0]
mipmap: Annotated[struct_CUDA_RESOURCE_DESC_st_res_mipmap, 0]
linear: Annotated[struct_CUDA_RESOURCE_DESC_st_res_linear, 0]
pitch2D: Annotated[struct_CUDA_RESOURCE_DESC_st_res_pitch2D, 0]
reserved: Annotated[struct_CUDA_RESOURCE_DESC_st_res_reserved, 0]
@c.record
class struct_CUDA_RESOURCE_DESC_st_res_array(c.Struct):
SIZE = 8
hArray: Annotated[CUarray, 0]
@c.record
class struct_CUDA_RESOURCE_DESC_st_res_mipmap(c.Struct):
SIZE = 8
hMipmappedArray: Annotated[CUmipmappedArray, 0]
@c.record
class struct_CUDA_RESOURCE_DESC_st_res_linear(c.Struct):
SIZE = 24
devPtr: Annotated[CUdeviceptr, 0]
format: Annotated[CUarray_format, 8]
numChannels: Annotated[Annotated[int, ctypes.c_uint32], 12]
sizeInBytes: Annotated[size_t, 16]
@c.record
class struct_CUDA_RESOURCE_DESC_st_res_pitch2D(c.Struct):
SIZE = 40
devPtr: Annotated[CUdeviceptr, 0]
format: Annotated[CUarray_format, 8]
numChannels: Annotated[Annotated[int, ctypes.c_uint32], 12]
width: Annotated[size_t, 16]
height: Annotated[size_t, 24]
pitchInBytes: Annotated[size_t, 32]
@c.record
class struct_CUDA_RESOURCE_DESC_st_res_reserved(c.Struct):
SIZE = 128
reserved: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[32]], 0]
@c.record
class struct_CUDA_TEXTURE_DESC_st(c.Struct):
SIZE = 104
addressMode: Annotated[c.Array[CUaddress_mode, Literal[3]], 0]
filterMode: Annotated[CUfilter_mode, 12]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
maxAnisotropy: Annotated[Annotated[int, ctypes.c_uint32], 20]
mipmapFilterMode: Annotated[CUfilter_mode, 24]
mipmapLevelBias: Annotated[Annotated[float, ctypes.c_float], 28]
minMipmapLevelClamp: Annotated[Annotated[float, ctypes.c_float], 32]
maxMipmapLevelClamp: Annotated[Annotated[float, ctypes.c_float], 36]
borderColor: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[4]], 40]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[12]], 56]
CUDA_TEXTURE_DESC: TypeAlias = struct_CUDA_TEXTURE_DESC_st
@c.record
class struct_CUDA_RESOURCE_VIEW_DESC_st(c.Struct):
SIZE = 112
format: Annotated[CUresourceViewFormat, 0]
width: Annotated[size_t, 8]
height: Annotated[size_t, 16]
depth: Annotated[size_t, 24]
firstMipmapLevel: Annotated[Annotated[int, ctypes.c_uint32], 32]
lastMipmapLevel: Annotated[Annotated[int, ctypes.c_uint32], 36]
firstLayer: Annotated[Annotated[int, ctypes.c_uint32], 40]
lastLayer: Annotated[Annotated[int, ctypes.c_uint32], 44]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 48]
CUDA_RESOURCE_VIEW_DESC: TypeAlias = struct_CUDA_RESOURCE_VIEW_DESC_st
class enum_CUresourceViewFormat_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_RES_VIEW_FORMAT_NONE = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_NONE', 0)
CU_RES_VIEW_FORMAT_UINT_1X8 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UINT_1X8', 1)
CU_RES_VIEW_FORMAT_UINT_2X8 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UINT_2X8', 2)
CU_RES_VIEW_FORMAT_UINT_4X8 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UINT_4X8', 3)
CU_RES_VIEW_FORMAT_SINT_1X8 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SINT_1X8', 4)
CU_RES_VIEW_FORMAT_SINT_2X8 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SINT_2X8', 5)
CU_RES_VIEW_FORMAT_SINT_4X8 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SINT_4X8', 6)
CU_RES_VIEW_FORMAT_UINT_1X16 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UINT_1X16', 7)
CU_RES_VIEW_FORMAT_UINT_2X16 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UINT_2X16', 8)
CU_RES_VIEW_FORMAT_UINT_4X16 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UINT_4X16', 9)
CU_RES_VIEW_FORMAT_SINT_1X16 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SINT_1X16', 10)
CU_RES_VIEW_FORMAT_SINT_2X16 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SINT_2X16', 11)
CU_RES_VIEW_FORMAT_SINT_4X16 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SINT_4X16', 12)
CU_RES_VIEW_FORMAT_UINT_1X32 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UINT_1X32', 13)
CU_RES_VIEW_FORMAT_UINT_2X32 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UINT_2X32', 14)
CU_RES_VIEW_FORMAT_UINT_4X32 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UINT_4X32', 15)
CU_RES_VIEW_FORMAT_SINT_1X32 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SINT_1X32', 16)
CU_RES_VIEW_FORMAT_SINT_2X32 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SINT_2X32', 17)
CU_RES_VIEW_FORMAT_SINT_4X32 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SINT_4X32', 18)
CU_RES_VIEW_FORMAT_FLOAT_1X16 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_FLOAT_1X16', 19)
CU_RES_VIEW_FORMAT_FLOAT_2X16 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_FLOAT_2X16', 20)
CU_RES_VIEW_FORMAT_FLOAT_4X16 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_FLOAT_4X16', 21)
CU_RES_VIEW_FORMAT_FLOAT_1X32 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_FLOAT_1X32', 22)
CU_RES_VIEW_FORMAT_FLOAT_2X32 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_FLOAT_2X32', 23)
CU_RES_VIEW_FORMAT_FLOAT_4X32 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_FLOAT_4X32', 24)
CU_RES_VIEW_FORMAT_UNSIGNED_BC1 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UNSIGNED_BC1', 25)
CU_RES_VIEW_FORMAT_UNSIGNED_BC2 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UNSIGNED_BC2', 26)
CU_RES_VIEW_FORMAT_UNSIGNED_BC3 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UNSIGNED_BC3', 27)
CU_RES_VIEW_FORMAT_UNSIGNED_BC4 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UNSIGNED_BC4', 28)
CU_RES_VIEW_FORMAT_SIGNED_BC4 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SIGNED_BC4', 29)
CU_RES_VIEW_FORMAT_UNSIGNED_BC5 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UNSIGNED_BC5', 30)
CU_RES_VIEW_FORMAT_SIGNED_BC5 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SIGNED_BC5', 31)
CU_RES_VIEW_FORMAT_UNSIGNED_BC6H = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UNSIGNED_BC6H', 32)
CU_RES_VIEW_FORMAT_SIGNED_BC6H = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_SIGNED_BC6H', 33)
CU_RES_VIEW_FORMAT_UNSIGNED_BC7 = enum_CUresourceViewFormat_enum.define('CU_RES_VIEW_FORMAT_UNSIGNED_BC7', 34)
CUresourceViewFormat: TypeAlias = enum_CUresourceViewFormat_enum
cuTexObjectCreate_params: TypeAlias = struct_cuTexObjectCreate_params_st
@c.record
class struct_cuTexObjectDestroy_params_st(c.Struct):
SIZE = 8
texObject: Annotated[CUtexObject, 0]
cuTexObjectDestroy_params: TypeAlias = struct_cuTexObjectDestroy_params_st
@c.record
class struct_cuTexObjectGetResourceDesc_params_st(c.Struct):
SIZE = 16
pResDesc: Annotated[c.POINTER[CUDA_RESOURCE_DESC], 0]
texObject: Annotated[CUtexObject, 8]
cuTexObjectGetResourceDesc_params: TypeAlias = struct_cuTexObjectGetResourceDesc_params_st
@c.record
class struct_cuTexObjectGetTextureDesc_params_st(c.Struct):
SIZE = 16
pTexDesc: Annotated[c.POINTER[CUDA_TEXTURE_DESC], 0]
texObject: Annotated[CUtexObject, 8]
cuTexObjectGetTextureDesc_params: TypeAlias = struct_cuTexObjectGetTextureDesc_params_st
@c.record
class struct_cuTexObjectGetResourceViewDesc_params_st(c.Struct):
SIZE = 16
pResViewDesc: Annotated[c.POINTER[CUDA_RESOURCE_VIEW_DESC], 0]
texObject: Annotated[CUtexObject, 8]
cuTexObjectGetResourceViewDesc_params: TypeAlias = struct_cuTexObjectGetResourceViewDesc_params_st
@c.record
class struct_cuSurfObjectCreate_params_st(c.Struct):
SIZE = 16
pSurfObject: Annotated[c.POINTER[CUsurfObject], 0]
pResDesc: Annotated[c.POINTER[CUDA_RESOURCE_DESC], 8]
CUsurfObject: TypeAlias = Annotated[int, ctypes.c_uint64]
cuSurfObjectCreate_params: TypeAlias = struct_cuSurfObjectCreate_params_st
@c.record
class struct_cuSurfObjectDestroy_params_st(c.Struct):
SIZE = 8
surfObject: Annotated[CUsurfObject, 0]
cuSurfObjectDestroy_params: TypeAlias = struct_cuSurfObjectDestroy_params_st
@c.record
class struct_cuSurfObjectGetResourceDesc_params_st(c.Struct):
SIZE = 16
pResDesc: Annotated[c.POINTER[CUDA_RESOURCE_DESC], 0]
surfObject: Annotated[CUsurfObject, 8]
cuSurfObjectGetResourceDesc_params: TypeAlias = struct_cuSurfObjectGetResourceDesc_params_st
@c.record
class struct_cuTensorMapEncodeTiled_params_st(c.Struct):
SIZE = 72
tensorMap: Annotated[c.POINTER[CUtensorMap], 0]
tensorDataType: Annotated[CUtensorMapDataType, 8]
tensorRank: Annotated[cuuint32_t, 12]
globalAddress: Annotated[ctypes.c_void_p, 16]
globalDim: Annotated[c.POINTER[cuuint64_t], 24]
globalStrides: Annotated[c.POINTER[cuuint64_t], 32]
boxDim: Annotated[c.POINTER[cuuint32_t], 40]
elementStrides: Annotated[c.POINTER[cuuint32_t], 48]
interleave: Annotated[CUtensorMapInterleave, 56]
swizzle: Annotated[CUtensorMapSwizzle, 60]
l2Promotion: Annotated[CUtensorMapL2promotion, 64]
oobFill: Annotated[CUtensorMapFloatOOBfill, 68]
@c.record
class struct_CUtensorMap_st(c.Struct):
SIZE = 128
opaque: Annotated[c.Array[cuuint64_t, Literal[16]], 0]
CUtensorMap: TypeAlias = struct_CUtensorMap_st
class enum_CUtensorMapDataType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_TENSOR_MAP_DATA_TYPE_UINT8 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_UINT8', 0)
CU_TENSOR_MAP_DATA_TYPE_UINT16 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_UINT16', 1)
CU_TENSOR_MAP_DATA_TYPE_UINT32 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_UINT32', 2)
CU_TENSOR_MAP_DATA_TYPE_INT32 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_INT32', 3)
CU_TENSOR_MAP_DATA_TYPE_UINT64 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_UINT64', 4)
CU_TENSOR_MAP_DATA_TYPE_INT64 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_INT64', 5)
CU_TENSOR_MAP_DATA_TYPE_FLOAT16 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_FLOAT16', 6)
CU_TENSOR_MAP_DATA_TYPE_FLOAT32 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_FLOAT32', 7)
CU_TENSOR_MAP_DATA_TYPE_FLOAT64 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_FLOAT64', 8)
CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_BFLOAT16', 9)
CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ', 10)
CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_TFLOAT32', 11)
CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ', 12)
CU_TENSOR_MAP_DATA_TYPE_16U4_ALIGN8B = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_16U4_ALIGN8B', 13)
CU_TENSOR_MAP_DATA_TYPE_16U4_ALIGN16B = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_16U4_ALIGN16B', 14)
CU_TENSOR_MAP_DATA_TYPE_16U6_ALIGN16B = enum_CUtensorMapDataType_enum.define('CU_TENSOR_MAP_DATA_TYPE_16U6_ALIGN16B', 15)
CUtensorMapDataType: TypeAlias = enum_CUtensorMapDataType_enum
class enum_CUtensorMapInterleave_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_TENSOR_MAP_INTERLEAVE_NONE = enum_CUtensorMapInterleave_enum.define('CU_TENSOR_MAP_INTERLEAVE_NONE', 0)
CU_TENSOR_MAP_INTERLEAVE_16B = enum_CUtensorMapInterleave_enum.define('CU_TENSOR_MAP_INTERLEAVE_16B', 1)
CU_TENSOR_MAP_INTERLEAVE_32B = enum_CUtensorMapInterleave_enum.define('CU_TENSOR_MAP_INTERLEAVE_32B', 2)
CUtensorMapInterleave: TypeAlias = enum_CUtensorMapInterleave_enum
class enum_CUtensorMapSwizzle_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_TENSOR_MAP_SWIZZLE_NONE = enum_CUtensorMapSwizzle_enum.define('CU_TENSOR_MAP_SWIZZLE_NONE', 0)
CU_TENSOR_MAP_SWIZZLE_32B = enum_CUtensorMapSwizzle_enum.define('CU_TENSOR_MAP_SWIZZLE_32B', 1)
CU_TENSOR_MAP_SWIZZLE_64B = enum_CUtensorMapSwizzle_enum.define('CU_TENSOR_MAP_SWIZZLE_64B', 2)
CU_TENSOR_MAP_SWIZZLE_128B = enum_CUtensorMapSwizzle_enum.define('CU_TENSOR_MAP_SWIZZLE_128B', 3)
CU_TENSOR_MAP_SWIZZLE_128B_ATOM_32B = enum_CUtensorMapSwizzle_enum.define('CU_TENSOR_MAP_SWIZZLE_128B_ATOM_32B', 4)
CU_TENSOR_MAP_SWIZZLE_128B_ATOM_32B_FLIP_8B = enum_CUtensorMapSwizzle_enum.define('CU_TENSOR_MAP_SWIZZLE_128B_ATOM_32B_FLIP_8B', 5)
CU_TENSOR_MAP_SWIZZLE_128B_ATOM_64B = enum_CUtensorMapSwizzle_enum.define('CU_TENSOR_MAP_SWIZZLE_128B_ATOM_64B', 6)
CUtensorMapSwizzle: TypeAlias = enum_CUtensorMapSwizzle_enum
class enum_CUtensorMapL2promotion_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_TENSOR_MAP_L2_PROMOTION_NONE = enum_CUtensorMapL2promotion_enum.define('CU_TENSOR_MAP_L2_PROMOTION_NONE', 0)
CU_TENSOR_MAP_L2_PROMOTION_L2_64B = enum_CUtensorMapL2promotion_enum.define('CU_TENSOR_MAP_L2_PROMOTION_L2_64B', 1)
CU_TENSOR_MAP_L2_PROMOTION_L2_128B = enum_CUtensorMapL2promotion_enum.define('CU_TENSOR_MAP_L2_PROMOTION_L2_128B', 2)
CU_TENSOR_MAP_L2_PROMOTION_L2_256B = enum_CUtensorMapL2promotion_enum.define('CU_TENSOR_MAP_L2_PROMOTION_L2_256B', 3)
CUtensorMapL2promotion: TypeAlias = enum_CUtensorMapL2promotion_enum
class enum_CUtensorMapFloatOOBfill_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = enum_CUtensorMapFloatOOBfill_enum.define('CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE', 0)
CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA = enum_CUtensorMapFloatOOBfill_enum.define('CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA', 1)
CUtensorMapFloatOOBfill: TypeAlias = enum_CUtensorMapFloatOOBfill_enum
cuTensorMapEncodeTiled_params: TypeAlias = struct_cuTensorMapEncodeTiled_params_st
@c.record
class struct_cuTensorMapEncodeIm2col_params_st(c.Struct):
SIZE = 88
tensorMap: Annotated[c.POINTER[CUtensorMap], 0]
tensorDataType: Annotated[CUtensorMapDataType, 8]
tensorRank: Annotated[cuuint32_t, 12]
globalAddress: Annotated[ctypes.c_void_p, 16]
globalDim: Annotated[c.POINTER[cuuint64_t], 24]
globalStrides: Annotated[c.POINTER[cuuint64_t], 32]
pixelBoxLowerCorner: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 40]
pixelBoxUpperCorner: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 48]
channelsPerPixel: Annotated[cuuint32_t, 56]
pixelsPerColumn: Annotated[cuuint32_t, 60]
elementStrides: Annotated[c.POINTER[cuuint32_t], 64]
interleave: Annotated[CUtensorMapInterleave, 72]
swizzle: Annotated[CUtensorMapSwizzle, 76]
l2Promotion: Annotated[CUtensorMapL2promotion, 80]
oobFill: Annotated[CUtensorMapFloatOOBfill, 84]
cuTensorMapEncodeIm2col_params: TypeAlias = struct_cuTensorMapEncodeIm2col_params_st
@c.record
class struct_cuTensorMapReplaceAddress_params_st(c.Struct):
SIZE = 16
tensorMap: Annotated[c.POINTER[CUtensorMap], 0]
globalAddress: Annotated[ctypes.c_void_p, 8]
cuTensorMapReplaceAddress_params: TypeAlias = struct_cuTensorMapReplaceAddress_params_st
@c.record
class struct_cuDeviceCanAccessPeer_params_st(c.Struct):
SIZE = 16
canAccessPeer: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
dev: Annotated[CUdevice, 8]
peerDev: Annotated[CUdevice, 12]
cuDeviceCanAccessPeer_params: TypeAlias = struct_cuDeviceCanAccessPeer_params_st
@c.record
class struct_cuCtxEnablePeerAccess_params_st(c.Struct):
SIZE = 16
peerContext: Annotated[CUcontext, 0]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuCtxEnablePeerAccess_params: TypeAlias = struct_cuCtxEnablePeerAccess_params_st
@c.record
class struct_cuCtxDisablePeerAccess_params_st(c.Struct):
SIZE = 8
peerContext: Annotated[CUcontext, 0]
cuCtxDisablePeerAccess_params: TypeAlias = struct_cuCtxDisablePeerAccess_params_st
@c.record
class struct_cuDeviceGetP2PAttribute_params_st(c.Struct):
SIZE = 24
value: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
attrib: Annotated[CUdevice_P2PAttribute, 8]
srcDevice: Annotated[CUdevice, 12]
dstDevice: Annotated[CUdevice, 16]
class enum_CUdevice_P2PAttribute_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK = enum_CUdevice_P2PAttribute_enum.define('CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK', 1)
CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED = enum_CUdevice_P2PAttribute_enum.define('CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED', 2)
CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED = enum_CUdevice_P2PAttribute_enum.define('CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED', 3)
CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED = enum_CUdevice_P2PAttribute_enum.define('CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED', 4)
CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED = enum_CUdevice_P2PAttribute_enum.define('CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED', 4)
CUdevice_P2PAttribute: TypeAlias = enum_CUdevice_P2PAttribute_enum
cuDeviceGetP2PAttribute_params: TypeAlias = struct_cuDeviceGetP2PAttribute_params_st
@c.record
class struct_cuGraphicsUnregisterResource_params_st(c.Struct):
SIZE = 8
resource: Annotated[CUgraphicsResource, 0]
class struct_CUgraphicsResource_st(ctypes.Structure): pass
CUgraphicsResource: TypeAlias = c.POINTER[struct_CUgraphicsResource_st]
cuGraphicsUnregisterResource_params: TypeAlias = struct_cuGraphicsUnregisterResource_params_st
@c.record
class struct_cuGraphicsSubResourceGetMappedArray_params_st(c.Struct):
SIZE = 24
pArray: Annotated[c.POINTER[CUarray], 0]
resource: Annotated[CUgraphicsResource, 8]
arrayIndex: Annotated[Annotated[int, ctypes.c_uint32], 16]
mipLevel: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuGraphicsSubResourceGetMappedArray_params: TypeAlias = struct_cuGraphicsSubResourceGetMappedArray_params_st
@c.record
class struct_cuGraphicsResourceGetMappedMipmappedArray_params_st(c.Struct):
SIZE = 16
pMipmappedArray: Annotated[c.POINTER[CUmipmappedArray], 0]
resource: Annotated[CUgraphicsResource, 8]
cuGraphicsResourceGetMappedMipmappedArray_params: TypeAlias = struct_cuGraphicsResourceGetMappedMipmappedArray_params_st
@c.record
class struct_cuGraphicsResourceGetMappedPointer_v2_params_st(c.Struct):
SIZE = 24
pDevPtr: Annotated[c.POINTER[CUdeviceptr], 0]
pSize: Annotated[c.POINTER[size_t], 8]
resource: Annotated[CUgraphicsResource, 16]
cuGraphicsResourceGetMappedPointer_v2_params: TypeAlias = struct_cuGraphicsResourceGetMappedPointer_v2_params_st
@c.record
class struct_cuGraphicsResourceSetMapFlags_v2_params_st(c.Struct):
SIZE = 16
resource: Annotated[CUgraphicsResource, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuGraphicsResourceSetMapFlags_v2_params: TypeAlias = struct_cuGraphicsResourceSetMapFlags_v2_params_st
@c.record
class struct_cuGraphicsMapResources_ptsz_params_st(c.Struct):
SIZE = 24
count: Annotated[Annotated[int, ctypes.c_uint32], 0]
resources: Annotated[c.POINTER[CUgraphicsResource], 8]
hStream: Annotated[CUstream, 16]
cuGraphicsMapResources_ptsz_params: TypeAlias = struct_cuGraphicsMapResources_ptsz_params_st
@c.record
class struct_cuGraphicsUnmapResources_ptsz_params_st(c.Struct):
SIZE = 24
count: Annotated[Annotated[int, ctypes.c_uint32], 0]
resources: Annotated[c.POINTER[CUgraphicsResource], 8]
hStream: Annotated[CUstream, 16]
cuGraphicsUnmapResources_ptsz_params: TypeAlias = struct_cuGraphicsUnmapResources_ptsz_params_st
@c.record
class struct_cuGetProcAddress_v2_params_st(c.Struct):
SIZE = 40
symbol: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
pfn: Annotated[c.POINTER[ctypes.c_void_p], 8]
cudaVersion: Annotated[Annotated[int, ctypes.c_int32], 16]
flags: Annotated[cuuint64_t, 24]
symbolStatus: Annotated[c.POINTER[CUdriverProcAddressQueryResult], 32]
class enum_CUdriverProcAddressQueryResult_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_GET_PROC_ADDRESS_SUCCESS = enum_CUdriverProcAddressQueryResult_enum.define('CU_GET_PROC_ADDRESS_SUCCESS', 0)
CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND = enum_CUdriverProcAddressQueryResult_enum.define('CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND', 1)
CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT = enum_CUdriverProcAddressQueryResult_enum.define('CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT', 2)
CUdriverProcAddressQueryResult: TypeAlias = enum_CUdriverProcAddressQueryResult_enum
cuGetProcAddress_v2_params: TypeAlias = struct_cuGetProcAddress_v2_params_st
@c.record
class struct_cuCoredumpGetAttribute_params_st(c.Struct):
SIZE = 24
attrib: Annotated[CUcoredumpSettings, 0]
value: Annotated[ctypes.c_void_p, 8]
size: Annotated[c.POINTER[size_t], 16]
class enum_CUcoredumpSettings_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_COREDUMP_ENABLE_ON_EXCEPTION = enum_CUcoredumpSettings_enum.define('CU_COREDUMP_ENABLE_ON_EXCEPTION', 1)
CU_COREDUMP_TRIGGER_HOST = enum_CUcoredumpSettings_enum.define('CU_COREDUMP_TRIGGER_HOST', 2)
CU_COREDUMP_LIGHTWEIGHT = enum_CUcoredumpSettings_enum.define('CU_COREDUMP_LIGHTWEIGHT', 3)
CU_COREDUMP_ENABLE_USER_TRIGGER = enum_CUcoredumpSettings_enum.define('CU_COREDUMP_ENABLE_USER_TRIGGER', 4)
CU_COREDUMP_FILE = enum_CUcoredumpSettings_enum.define('CU_COREDUMP_FILE', 5)
CU_COREDUMP_PIPE = enum_CUcoredumpSettings_enum.define('CU_COREDUMP_PIPE', 6)
CU_COREDUMP_GENERATION_FLAGS = enum_CUcoredumpSettings_enum.define('CU_COREDUMP_GENERATION_FLAGS', 7)
CU_COREDUMP_MAX = enum_CUcoredumpSettings_enum.define('CU_COREDUMP_MAX', 8)
CUcoredumpSettings: TypeAlias = enum_CUcoredumpSettings_enum
cuCoredumpGetAttribute_params: TypeAlias = struct_cuCoredumpGetAttribute_params_st
@c.record
class struct_cuCoredumpGetAttributeGlobal_params_st(c.Struct):
SIZE = 24
attrib: Annotated[CUcoredumpSettings, 0]
value: Annotated[ctypes.c_void_p, 8]
size: Annotated[c.POINTER[size_t], 16]
cuCoredumpGetAttributeGlobal_params: TypeAlias = struct_cuCoredumpGetAttributeGlobal_params_st
@c.record
class struct_cuCoredumpSetAttribute_params_st(c.Struct):
SIZE = 24
attrib: Annotated[CUcoredumpSettings, 0]
value: Annotated[ctypes.c_void_p, 8]
size: Annotated[c.POINTER[size_t], 16]
cuCoredumpSetAttribute_params: TypeAlias = struct_cuCoredumpSetAttribute_params_st
@c.record
class struct_cuCoredumpSetAttributeGlobal_params_st(c.Struct):
SIZE = 24
attrib: Annotated[CUcoredumpSettings, 0]
value: Annotated[ctypes.c_void_p, 8]
size: Annotated[c.POINTER[size_t], 16]
cuCoredumpSetAttributeGlobal_params: TypeAlias = struct_cuCoredumpSetAttributeGlobal_params_st
@c.record
class struct_cuGetExportTable_params_st(c.Struct):
SIZE = 16
ppExportTable: Annotated[c.POINTER[ctypes.c_void_p], 0]
pExportTableId: Annotated[c.POINTER[CUuuid], 8]
cuGetExportTable_params: TypeAlias = struct_cuGetExportTable_params_st
@c.record
class struct_cuGreenCtxCreate_params_st(c.Struct):
SIZE = 24
phCtx: Annotated[c.POINTER[CUgreenCtx], 0]
desc: Annotated[CUdevResourceDesc, 8]
dev: Annotated[CUdevice, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
class struct_CUdevResourceDesc_st(ctypes.Structure): pass
CUdevResourceDesc: TypeAlias = c.POINTER[struct_CUdevResourceDesc_st]
cuGreenCtxCreate_params: TypeAlias = struct_cuGreenCtxCreate_params_st
@c.record
class struct_cuGreenCtxDestroy_params_st(c.Struct):
SIZE = 8
hCtx: Annotated[CUgreenCtx, 0]
cuGreenCtxDestroy_params: TypeAlias = struct_cuGreenCtxDestroy_params_st
@c.record
class struct_cuCtxFromGreenCtx_params_st(c.Struct):
SIZE = 16
pContext: Annotated[c.POINTER[CUcontext], 0]
hCtx: Annotated[CUgreenCtx, 8]
cuCtxFromGreenCtx_params: TypeAlias = struct_cuCtxFromGreenCtx_params_st
@c.record
class struct_cuDeviceGetDevResource_params_st(c.Struct):
SIZE = 24
device: Annotated[CUdevice, 0]
resource: Annotated[c.POINTER[CUdevResource], 8]
type: Annotated[CUdevResourceType, 16]
@c.record
class struct_CUdevResource_st(c.Struct):
SIZE = 144
type: Annotated[CUdevResourceType, 0]
_internal_padding: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[92]], 4]
sm: Annotated[CUdevSmResource, 96]
_oversize: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[48]], 96]
CUdevResource: TypeAlias = struct_CUdevResource_st
class CUdevResourceType(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_DEV_RESOURCE_TYPE_INVALID = CUdevResourceType.define('CU_DEV_RESOURCE_TYPE_INVALID', 0)
CU_DEV_RESOURCE_TYPE_SM = CUdevResourceType.define('CU_DEV_RESOURCE_TYPE_SM', 1)
CU_DEV_RESOURCE_TYPE_MAX = CUdevResourceType.define('CU_DEV_RESOURCE_TYPE_MAX', 2)
@c.record
class struct_CUdevSmResource_st(c.Struct):
SIZE = 4
smCount: Annotated[Annotated[int, ctypes.c_uint32], 0]
CUdevSmResource: TypeAlias = struct_CUdevSmResource_st
cuDeviceGetDevResource_params: TypeAlias = struct_cuDeviceGetDevResource_params_st
@c.record
class struct_cuCtxGetDevResource_params_st(c.Struct):
SIZE = 24
hCtx: Annotated[CUcontext, 0]
resource: Annotated[c.POINTER[CUdevResource], 8]
type: Annotated[CUdevResourceType, 16]
cuCtxGetDevResource_params: TypeAlias = struct_cuCtxGetDevResource_params_st
@c.record
class struct_cuGreenCtxGetDevResource_params_st(c.Struct):
SIZE = 24
hCtx: Annotated[CUgreenCtx, 0]
resource: Annotated[c.POINTER[CUdevResource], 8]
type: Annotated[CUdevResourceType, 16]
cuGreenCtxGetDevResource_params: TypeAlias = struct_cuGreenCtxGetDevResource_params_st
@c.record
class struct_cuDevSmResourceSplitByCount_params_st(c.Struct):
SIZE = 40
result: Annotated[c.POINTER[CUdevResource], 0]
nbGroups: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
input: Annotated[c.POINTER[CUdevResource], 16]
remaining: Annotated[c.POINTER[CUdevResource], 24]
useFlags: Annotated[Annotated[int, ctypes.c_uint32], 32]
minCount: Annotated[Annotated[int, ctypes.c_uint32], 36]
cuDevSmResourceSplitByCount_params: TypeAlias = struct_cuDevSmResourceSplitByCount_params_st
@c.record
class struct_cuDevResourceGenerateDesc_params_st(c.Struct):
SIZE = 24
phDesc: Annotated[c.POINTER[CUdevResourceDesc], 0]
resources: Annotated[c.POINTER[CUdevResource], 8]
nbResources: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuDevResourceGenerateDesc_params: TypeAlias = struct_cuDevResourceGenerateDesc_params_st
@c.record
class struct_cuGreenCtxRecordEvent_params_st(c.Struct):
SIZE = 16
hCtx: Annotated[CUgreenCtx, 0]
hEvent: Annotated[CUevent, 8]
cuGreenCtxRecordEvent_params: TypeAlias = struct_cuGreenCtxRecordEvent_params_st
@c.record
class struct_cuGreenCtxWaitEvent_params_st(c.Struct):
SIZE = 16
hCtx: Annotated[CUgreenCtx, 0]
hEvent: Annotated[CUevent, 8]
cuGreenCtxWaitEvent_params: TypeAlias = struct_cuGreenCtxWaitEvent_params_st
@c.record
class struct_cuStreamGetGreenCtx_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
phCtx: Annotated[c.POINTER[CUgreenCtx], 8]
cuStreamGetGreenCtx_params: TypeAlias = struct_cuStreamGetGreenCtx_params_st
@c.record
class struct_cuGreenCtxStreamCreate_params_st(c.Struct):
SIZE = 24
phStream: Annotated[c.POINTER[CUstream], 0]
greenCtx: Annotated[CUgreenCtx, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
priority: Annotated[Annotated[int, ctypes.c_int32], 20]
cuGreenCtxStreamCreate_params: TypeAlias = struct_cuGreenCtxStreamCreate_params_st
@c.record
class struct_cuMemHostRegister_params_st(c.Struct):
SIZE = 24
p: Annotated[ctypes.c_void_p, 0]
bytesize: Annotated[size_t, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemHostRegister_params: TypeAlias = struct_cuMemHostRegister_params_st
@c.record
class struct_cuGraphicsResourceSetMapFlags_params_st(c.Struct):
SIZE = 16
resource: Annotated[CUgraphicsResource, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuGraphicsResourceSetMapFlags_params: TypeAlias = struct_cuGraphicsResourceSetMapFlags_params_st
@c.record
class struct_cuLinkCreate_params_st(c.Struct):
SIZE = 32
numOptions: Annotated[Annotated[int, ctypes.c_uint32], 0]
options: Annotated[c.POINTER[CUjit_option], 8]
optionValues: Annotated[c.POINTER[ctypes.c_void_p], 16]
stateOut: Annotated[c.POINTER[CUlinkState], 24]
cuLinkCreate_params: TypeAlias = struct_cuLinkCreate_params_st
@c.record
class struct_cuLinkAddData_params_st(c.Struct):
SIZE = 64
state: Annotated[CUlinkState, 0]
type: Annotated[CUjitInputType, 8]
data: Annotated[ctypes.c_void_p, 16]
size: Annotated[size_t, 24]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 32]
numOptions: Annotated[Annotated[int, ctypes.c_uint32], 40]
options: Annotated[c.POINTER[CUjit_option], 48]
optionValues: Annotated[c.POINTER[ctypes.c_void_p], 56]
cuLinkAddData_params: TypeAlias = struct_cuLinkAddData_params_st
@c.record
class struct_cuLinkAddFile_params_st(c.Struct):
SIZE = 48
state: Annotated[CUlinkState, 0]
type: Annotated[CUjitInputType, 8]
path: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
numOptions: Annotated[Annotated[int, ctypes.c_uint32], 24]
options: Annotated[c.POINTER[CUjit_option], 32]
optionValues: Annotated[c.POINTER[ctypes.c_void_p], 40]
cuLinkAddFile_params: TypeAlias = struct_cuLinkAddFile_params_st
@c.record
class struct_cuTexRefSetAddress2D_v2_params_st(c.Struct):
SIZE = 32
hTexRef: Annotated[CUtexref, 0]
desc: Annotated[c.POINTER[CUDA_ARRAY_DESCRIPTOR], 8]
dptr: Annotated[CUdeviceptr, 16]
Pitch: Annotated[size_t, 24]
cuTexRefSetAddress2D_v2_params: TypeAlias = struct_cuTexRefSetAddress2D_v2_params_st
@c.record
class struct_cuDeviceTotalMem_params_st(c.Struct):
SIZE = 16
bytes: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
dev: Annotated[CUdevice, 8]
cuDeviceTotalMem_params: TypeAlias = struct_cuDeviceTotalMem_params_st
@c.record
class struct_cuCtxCreate_params_st(c.Struct):
SIZE = 16
pctx: Annotated[c.POINTER[CUcontext], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
dev: Annotated[CUdevice, 12]
cuCtxCreate_params: TypeAlias = struct_cuCtxCreate_params_st
@c.record
class struct_cuModuleGetGlobal_params_st(c.Struct):
SIZE = 32
dptr: Annotated[c.POINTER[CUdeviceptr_v1], 0]
bytes: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
hmod: Annotated[CUmodule, 16]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
CUdeviceptr_v1: TypeAlias = Annotated[int, ctypes.c_uint32]
cuModuleGetGlobal_params: TypeAlias = struct_cuModuleGetGlobal_params_st
@c.record
class struct_cuMemGetInfo_params_st(c.Struct):
SIZE = 16
free: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
total: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
cuMemGetInfo_params: TypeAlias = struct_cuMemGetInfo_params_st
@c.record
class struct_cuMemAlloc_params_st(c.Struct):
SIZE = 16
dptr: Annotated[c.POINTER[CUdeviceptr_v1], 0]
bytesize: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuMemAlloc_params: TypeAlias = struct_cuMemAlloc_params_st
@c.record
class struct_cuMemAllocPitch_params_st(c.Struct):
SIZE = 32
dptr: Annotated[c.POINTER[CUdeviceptr_v1], 0]
pPitch: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
WidthInBytes: Annotated[Annotated[int, ctypes.c_uint32], 16]
Height: Annotated[Annotated[int, ctypes.c_uint32], 20]
ElementSizeBytes: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuMemAllocPitch_params: TypeAlias = struct_cuMemAllocPitch_params_st
@c.record
class struct_cuMemFree_params_st(c.Struct):
SIZE = 4
dptr: Annotated[CUdeviceptr_v1, 0]
cuMemFree_params: TypeAlias = struct_cuMemFree_params_st
@c.record
class struct_cuMemGetAddressRange_params_st(c.Struct):
SIZE = 24
pbase: Annotated[c.POINTER[CUdeviceptr_v1], 0]
psize: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
dptr: Annotated[CUdeviceptr_v1, 16]
cuMemGetAddressRange_params: TypeAlias = struct_cuMemGetAddressRange_params_st
@c.record
class struct_cuMemAllocHost_params_st(c.Struct):
SIZE = 16
pp: Annotated[c.POINTER[ctypes.c_void_p], 0]
bytesize: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuMemAllocHost_params: TypeAlias = struct_cuMemAllocHost_params_st
@c.record
class struct_cuMemHostGetDevicePointer_params_st(c.Struct):
SIZE = 24
pdptr: Annotated[c.POINTER[CUdeviceptr_v1], 0]
p: Annotated[ctypes.c_void_p, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemHostGetDevicePointer_params: TypeAlias = struct_cuMemHostGetDevicePointer_params_st
@c.record
class struct_cuMemcpyHtoD_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr_v1, 0]
srcHost: Annotated[ctypes.c_void_p, 8]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemcpyHtoD_params: TypeAlias = struct_cuMemcpyHtoD_params_st
@c.record
class struct_cuMemcpyDtoH_params_st(c.Struct):
SIZE = 16
dstHost: Annotated[ctypes.c_void_p, 0]
srcDevice: Annotated[CUdeviceptr_v1, 8]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 12]
cuMemcpyDtoH_params: TypeAlias = struct_cuMemcpyDtoH_params_st
@c.record
class struct_cuMemcpyDtoD_params_st(c.Struct):
SIZE = 12
dstDevice: Annotated[CUdeviceptr_v1, 0]
srcDevice: Annotated[CUdeviceptr_v1, 4]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuMemcpyDtoD_params: TypeAlias = struct_cuMemcpyDtoD_params_st
@c.record
class struct_cuMemcpyDtoA_params_st(c.Struct):
SIZE = 24
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[Annotated[int, ctypes.c_uint32], 8]
srcDevice: Annotated[CUdeviceptr_v1, 12]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemcpyDtoA_params: TypeAlias = struct_cuMemcpyDtoA_params_st
@c.record
class struct_cuMemcpyAtoD_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr_v1, 0]
srcArray: Annotated[CUarray, 8]
srcOffset: Annotated[Annotated[int, ctypes.c_uint32], 16]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuMemcpyAtoD_params: TypeAlias = struct_cuMemcpyAtoD_params_st
@c.record
class struct_cuMemcpyHtoA_params_st(c.Struct):
SIZE = 32
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[Annotated[int, ctypes.c_uint32], 8]
srcHost: Annotated[ctypes.c_void_p, 16]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuMemcpyHtoA_params: TypeAlias = struct_cuMemcpyHtoA_params_st
@c.record
class struct_cuMemcpyAtoH_params_st(c.Struct):
SIZE = 24
dstHost: Annotated[ctypes.c_void_p, 0]
srcArray: Annotated[CUarray, 8]
srcOffset: Annotated[Annotated[int, ctypes.c_uint32], 16]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuMemcpyAtoH_params: TypeAlias = struct_cuMemcpyAtoH_params_st
@c.record
class struct_cuMemcpyAtoA_params_st(c.Struct):
SIZE = 32
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[Annotated[int, ctypes.c_uint32], 8]
srcArray: Annotated[CUarray, 16]
srcOffset: Annotated[Annotated[int, ctypes.c_uint32], 24]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 28]
cuMemcpyAtoA_params: TypeAlias = struct_cuMemcpyAtoA_params_st
@c.record
class struct_cuMemcpyHtoAAsync_params_st(c.Struct):
SIZE = 40
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[Annotated[int, ctypes.c_uint32], 8]
srcHost: Annotated[ctypes.c_void_p, 16]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 24]
hStream: Annotated[CUstream, 32]
cuMemcpyHtoAAsync_params: TypeAlias = struct_cuMemcpyHtoAAsync_params_st
@c.record
class struct_cuMemcpyAtoHAsync_params_st(c.Struct):
SIZE = 32
dstHost: Annotated[ctypes.c_void_p, 0]
srcArray: Annotated[CUarray, 8]
srcOffset: Annotated[Annotated[int, ctypes.c_uint32], 16]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 20]
hStream: Annotated[CUstream, 24]
cuMemcpyAtoHAsync_params: TypeAlias = struct_cuMemcpyAtoHAsync_params_st
@c.record
class struct_cuMemcpy2D_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY2D_v1], 0]
@c.record
class struct_CUDA_MEMCPY2D_v1_st(c.Struct):
SIZE = 96
srcXInBytes: Annotated[Annotated[int, ctypes.c_uint32], 0]
srcY: Annotated[Annotated[int, ctypes.c_uint32], 4]
srcMemoryType: Annotated[CUmemorytype, 8]
srcHost: Annotated[ctypes.c_void_p, 16]
srcDevice: Annotated[CUdeviceptr_v1, 24]
srcArray: Annotated[CUarray, 32]
srcPitch: Annotated[Annotated[int, ctypes.c_uint32], 40]
dstXInBytes: Annotated[Annotated[int, ctypes.c_uint32], 44]
dstY: Annotated[Annotated[int, ctypes.c_uint32], 48]
dstMemoryType: Annotated[CUmemorytype, 52]
dstHost: Annotated[ctypes.c_void_p, 56]
dstDevice: Annotated[CUdeviceptr_v1, 64]
dstArray: Annotated[CUarray, 72]
dstPitch: Annotated[Annotated[int, ctypes.c_uint32], 80]
WidthInBytes: Annotated[Annotated[int, ctypes.c_uint32], 84]
Height: Annotated[Annotated[int, ctypes.c_uint32], 88]
CUDA_MEMCPY2D_v1: TypeAlias = struct_CUDA_MEMCPY2D_v1_st
cuMemcpy2D_params: TypeAlias = struct_cuMemcpy2D_params_st
@c.record
class struct_cuMemcpy2DUnaligned_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY2D_v1], 0]
cuMemcpy2DUnaligned_params: TypeAlias = struct_cuMemcpy2DUnaligned_params_st
@c.record
class struct_cuMemcpy3D_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY3D_v1], 0]
@c.record
class struct_CUDA_MEMCPY3D_v1_st(c.Struct):
SIZE = 144
srcXInBytes: Annotated[Annotated[int, ctypes.c_uint32], 0]
srcY: Annotated[Annotated[int, ctypes.c_uint32], 4]
srcZ: Annotated[Annotated[int, ctypes.c_uint32], 8]
srcLOD: Annotated[Annotated[int, ctypes.c_uint32], 12]
srcMemoryType: Annotated[CUmemorytype, 16]
srcHost: Annotated[ctypes.c_void_p, 24]
srcDevice: Annotated[CUdeviceptr_v1, 32]
srcArray: Annotated[CUarray, 40]
reserved0: Annotated[ctypes.c_void_p, 48]
srcPitch: Annotated[Annotated[int, ctypes.c_uint32], 56]
srcHeight: Annotated[Annotated[int, ctypes.c_uint32], 60]
dstXInBytes: Annotated[Annotated[int, ctypes.c_uint32], 64]
dstY: Annotated[Annotated[int, ctypes.c_uint32], 68]
dstZ: Annotated[Annotated[int, ctypes.c_uint32], 72]
dstLOD: Annotated[Annotated[int, ctypes.c_uint32], 76]
dstMemoryType: Annotated[CUmemorytype, 80]
dstHost: Annotated[ctypes.c_void_p, 88]
dstDevice: Annotated[CUdeviceptr_v1, 96]
dstArray: Annotated[CUarray, 104]
reserved1: Annotated[ctypes.c_void_p, 112]
dstPitch: Annotated[Annotated[int, ctypes.c_uint32], 120]
dstHeight: Annotated[Annotated[int, ctypes.c_uint32], 124]
WidthInBytes: Annotated[Annotated[int, ctypes.c_uint32], 128]
Height: Annotated[Annotated[int, ctypes.c_uint32], 132]
Depth: Annotated[Annotated[int, ctypes.c_uint32], 136]
CUDA_MEMCPY3D_v1: TypeAlias = struct_CUDA_MEMCPY3D_v1_st
cuMemcpy3D_params: TypeAlias = struct_cuMemcpy3D_params_st
@c.record
class struct_cuMemcpyHtoDAsync_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr_v1, 0]
srcHost: Annotated[ctypes.c_void_p, 8]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 16]
hStream: Annotated[CUstream, 24]
cuMemcpyHtoDAsync_params: TypeAlias = struct_cuMemcpyHtoDAsync_params_st
@c.record
class struct_cuMemcpyDtoHAsync_params_st(c.Struct):
SIZE = 24
dstHost: Annotated[ctypes.c_void_p, 0]
srcDevice: Annotated[CUdeviceptr_v1, 8]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 12]
hStream: Annotated[CUstream, 16]
cuMemcpyDtoHAsync_params: TypeAlias = struct_cuMemcpyDtoHAsync_params_st
@c.record
class struct_cuMemcpyDtoDAsync_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr_v1, 0]
srcDevice: Annotated[CUdeviceptr_v1, 4]
ByteCount: Annotated[Annotated[int, ctypes.c_uint32], 8]
hStream: Annotated[CUstream, 16]
cuMemcpyDtoDAsync_params: TypeAlias = struct_cuMemcpyDtoDAsync_params_st
@c.record
class struct_cuMemcpy2DAsync_params_st(c.Struct):
SIZE = 16
pCopy: Annotated[c.POINTER[CUDA_MEMCPY2D_v1], 0]
hStream: Annotated[CUstream, 8]
cuMemcpy2DAsync_params: TypeAlias = struct_cuMemcpy2DAsync_params_st
@c.record
class struct_cuMemcpy3DAsync_params_st(c.Struct):
SIZE = 16
pCopy: Annotated[c.POINTER[CUDA_MEMCPY3D_v1], 0]
hStream: Annotated[CUstream, 8]
cuMemcpy3DAsync_params: TypeAlias = struct_cuMemcpy3DAsync_params_st
@c.record
class struct_cuMemsetD8_params_st(c.Struct):
SIZE = 12
dstDevice: Annotated[CUdeviceptr_v1, 0]
uc: Annotated[Annotated[int, ctypes.c_ubyte], 4]
N: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuMemsetD8_params: TypeAlias = struct_cuMemsetD8_params_st
@c.record
class struct_cuMemsetD16_params_st(c.Struct):
SIZE = 12
dstDevice: Annotated[CUdeviceptr_v1, 0]
us: Annotated[Annotated[int, ctypes.c_uint16], 4]
N: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuMemsetD16_params: TypeAlias = struct_cuMemsetD16_params_st
@c.record
class struct_cuMemsetD32_params_st(c.Struct):
SIZE = 12
dstDevice: Annotated[CUdeviceptr_v1, 0]
ui: Annotated[Annotated[int, ctypes.c_uint32], 4]
N: Annotated[Annotated[int, ctypes.c_uint32], 8]
cuMemsetD32_params: TypeAlias = struct_cuMemsetD32_params_st
@c.record
class struct_cuMemsetD2D8_params_st(c.Struct):
SIZE = 20
dstDevice: Annotated[CUdeviceptr_v1, 0]
dstPitch: Annotated[Annotated[int, ctypes.c_uint32], 4]
uc: Annotated[Annotated[int, ctypes.c_ubyte], 8]
Width: Annotated[Annotated[int, ctypes.c_uint32], 12]
Height: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemsetD2D8_params: TypeAlias = struct_cuMemsetD2D8_params_st
@c.record
class struct_cuMemsetD2D16_params_st(c.Struct):
SIZE = 20
dstDevice: Annotated[CUdeviceptr_v1, 0]
dstPitch: Annotated[Annotated[int, ctypes.c_uint32], 4]
us: Annotated[Annotated[int, ctypes.c_uint16], 8]
Width: Annotated[Annotated[int, ctypes.c_uint32], 12]
Height: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemsetD2D16_params: TypeAlias = struct_cuMemsetD2D16_params_st
@c.record
class struct_cuMemsetD2D32_params_st(c.Struct):
SIZE = 20
dstDevice: Annotated[CUdeviceptr_v1, 0]
dstPitch: Annotated[Annotated[int, ctypes.c_uint32], 4]
ui: Annotated[Annotated[int, ctypes.c_uint32], 8]
Width: Annotated[Annotated[int, ctypes.c_uint32], 12]
Height: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuMemsetD2D32_params: TypeAlias = struct_cuMemsetD2D32_params_st
@c.record
class struct_cuArrayCreate_params_st(c.Struct):
SIZE = 16
pHandle: Annotated[c.POINTER[CUarray], 0]
pAllocateArray: Annotated[c.POINTER[CUDA_ARRAY_DESCRIPTOR_v1], 8]
@c.record
class struct_CUDA_ARRAY_DESCRIPTOR_v1_st(c.Struct):
SIZE = 16
Width: Annotated[Annotated[int, ctypes.c_uint32], 0]
Height: Annotated[Annotated[int, ctypes.c_uint32], 4]
Format: Annotated[CUarray_format, 8]
NumChannels: Annotated[Annotated[int, ctypes.c_uint32], 12]
CUDA_ARRAY_DESCRIPTOR_v1: TypeAlias = struct_CUDA_ARRAY_DESCRIPTOR_v1_st
cuArrayCreate_params: TypeAlias = struct_cuArrayCreate_params_st
@c.record
class struct_cuArrayGetDescriptor_params_st(c.Struct):
SIZE = 16
pArrayDescriptor: Annotated[c.POINTER[CUDA_ARRAY_DESCRIPTOR_v1], 0]
hArray: Annotated[CUarray, 8]
cuArrayGetDescriptor_params: TypeAlias = struct_cuArrayGetDescriptor_params_st
@c.record
class struct_cuArray3DCreate_params_st(c.Struct):
SIZE = 16
pHandle: Annotated[c.POINTER[CUarray], 0]
pAllocateArray: Annotated[c.POINTER[CUDA_ARRAY3D_DESCRIPTOR_v1], 8]
@c.record
class struct_CUDA_ARRAY3D_DESCRIPTOR_v1_st(c.Struct):
SIZE = 24
Width: Annotated[Annotated[int, ctypes.c_uint32], 0]
Height: Annotated[Annotated[int, ctypes.c_uint32], 4]
Depth: Annotated[Annotated[int, ctypes.c_uint32], 8]
Format: Annotated[CUarray_format, 12]
NumChannels: Annotated[Annotated[int, ctypes.c_uint32], 16]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
CUDA_ARRAY3D_DESCRIPTOR_v1: TypeAlias = struct_CUDA_ARRAY3D_DESCRIPTOR_v1_st
cuArray3DCreate_params: TypeAlias = struct_cuArray3DCreate_params_st
@c.record
class struct_cuArray3DGetDescriptor_params_st(c.Struct):
SIZE = 16
pArrayDescriptor: Annotated[c.POINTER[CUDA_ARRAY3D_DESCRIPTOR_v1], 0]
hArray: Annotated[CUarray, 8]
cuArray3DGetDescriptor_params: TypeAlias = struct_cuArray3DGetDescriptor_params_st
@c.record
class struct_cuTexRefSetAddress_params_st(c.Struct):
SIZE = 24
ByteOffset: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
hTexRef: Annotated[CUtexref, 8]
dptr: Annotated[CUdeviceptr_v1, 16]
bytes: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuTexRefSetAddress_params: TypeAlias = struct_cuTexRefSetAddress_params_st
@c.record
class struct_cuTexRefSetAddress2D_params_st(c.Struct):
SIZE = 24
hTexRef: Annotated[CUtexref, 0]
desc: Annotated[c.POINTER[CUDA_ARRAY_DESCRIPTOR_v1], 8]
dptr: Annotated[CUdeviceptr_v1, 16]
Pitch: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuTexRefSetAddress2D_params: TypeAlias = struct_cuTexRefSetAddress2D_params_st
@c.record
class struct_cuTexRefGetAddress_params_st(c.Struct):
SIZE = 16
pdptr: Annotated[c.POINTER[CUdeviceptr_v1], 0]
hTexRef: Annotated[CUtexref, 8]
cuTexRefGetAddress_params: TypeAlias = struct_cuTexRefGetAddress_params_st
@c.record
class struct_cuGraphicsResourceGetMappedPointer_params_st(c.Struct):
SIZE = 24
pDevPtr: Annotated[c.POINTER[CUdeviceptr_v1], 0]
pSize: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
resource: Annotated[CUgraphicsResource, 16]
cuGraphicsResourceGetMappedPointer_params: TypeAlias = struct_cuGraphicsResourceGetMappedPointer_params_st
@c.record
class struct_cuCtxDestroy_params_st(c.Struct):
SIZE = 8
ctx: Annotated[CUcontext, 0]
cuCtxDestroy_params: TypeAlias = struct_cuCtxDestroy_params_st
@c.record
class struct_cuCtxPopCurrent_params_st(c.Struct):
SIZE = 8
pctx: Annotated[c.POINTER[CUcontext], 0]
cuCtxPopCurrent_params: TypeAlias = struct_cuCtxPopCurrent_params_st
@c.record
class struct_cuCtxPushCurrent_params_st(c.Struct):
SIZE = 8
ctx: Annotated[CUcontext, 0]
cuCtxPushCurrent_params: TypeAlias = struct_cuCtxPushCurrent_params_st
@c.record
class struct_cuStreamDestroy_params_st(c.Struct):
SIZE = 8
hStream: Annotated[CUstream, 0]
cuStreamDestroy_params: TypeAlias = struct_cuStreamDestroy_params_st
@c.record
class struct_cuEventDestroy_params_st(c.Struct):
SIZE = 8
hEvent: Annotated[CUevent, 0]
cuEventDestroy_params: TypeAlias = struct_cuEventDestroy_params_st
@c.record
class struct_cuDevicePrimaryCtxRelease_params_st(c.Struct):
SIZE = 4
dev: Annotated[CUdevice, 0]
cuDevicePrimaryCtxRelease_params: TypeAlias = struct_cuDevicePrimaryCtxRelease_params_st
@c.record
class struct_cuDevicePrimaryCtxReset_params_st(c.Struct):
SIZE = 4
dev: Annotated[CUdevice, 0]
cuDevicePrimaryCtxReset_params: TypeAlias = struct_cuDevicePrimaryCtxReset_params_st
@c.record
class struct_cuDevicePrimaryCtxSetFlags_params_st(c.Struct):
SIZE = 8
dev: Annotated[CUdevice, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
cuDevicePrimaryCtxSetFlags_params: TypeAlias = struct_cuDevicePrimaryCtxSetFlags_params_st
@c.record
class struct_cuMemcpyHtoD_v2_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr, 0]
srcHost: Annotated[ctypes.c_void_p, 8]
ByteCount: Annotated[size_t, 16]
cuMemcpyHtoD_v2_params: TypeAlias = struct_cuMemcpyHtoD_v2_params_st
@c.record
class struct_cuMemcpyDtoH_v2_params_st(c.Struct):
SIZE = 24
dstHost: Annotated[ctypes.c_void_p, 0]
srcDevice: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
cuMemcpyDtoH_v2_params: TypeAlias = struct_cuMemcpyDtoH_v2_params_st
@c.record
class struct_cuMemcpyDtoD_v2_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr, 0]
srcDevice: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
cuMemcpyDtoD_v2_params: TypeAlias = struct_cuMemcpyDtoD_v2_params_st
@c.record
class struct_cuMemcpyDtoA_v2_params_st(c.Struct):
SIZE = 32
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[size_t, 8]
srcDevice: Annotated[CUdeviceptr, 16]
ByteCount: Annotated[size_t, 24]
cuMemcpyDtoA_v2_params: TypeAlias = struct_cuMemcpyDtoA_v2_params_st
@c.record
class struct_cuMemcpyAtoD_v2_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
srcArray: Annotated[CUarray, 8]
srcOffset: Annotated[size_t, 16]
ByteCount: Annotated[size_t, 24]
cuMemcpyAtoD_v2_params: TypeAlias = struct_cuMemcpyAtoD_v2_params_st
@c.record
class struct_cuMemcpyHtoA_v2_params_st(c.Struct):
SIZE = 32
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[size_t, 8]
srcHost: Annotated[ctypes.c_void_p, 16]
ByteCount: Annotated[size_t, 24]
cuMemcpyHtoA_v2_params: TypeAlias = struct_cuMemcpyHtoA_v2_params_st
@c.record
class struct_cuMemcpyAtoH_v2_params_st(c.Struct):
SIZE = 32
dstHost: Annotated[ctypes.c_void_p, 0]
srcArray: Annotated[CUarray, 8]
srcOffset: Annotated[size_t, 16]
ByteCount: Annotated[size_t, 24]
cuMemcpyAtoH_v2_params: TypeAlias = struct_cuMemcpyAtoH_v2_params_st
@c.record
class struct_cuMemcpyAtoA_v2_params_st(c.Struct):
SIZE = 40
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[size_t, 8]
srcArray: Annotated[CUarray, 16]
srcOffset: Annotated[size_t, 24]
ByteCount: Annotated[size_t, 32]
cuMemcpyAtoA_v2_params: TypeAlias = struct_cuMemcpyAtoA_v2_params_st
@c.record
class struct_cuMemcpyHtoAAsync_v2_params_st(c.Struct):
SIZE = 40
dstArray: Annotated[CUarray, 0]
dstOffset: Annotated[size_t, 8]
srcHost: Annotated[ctypes.c_void_p, 16]
ByteCount: Annotated[size_t, 24]
hStream: Annotated[CUstream, 32]
cuMemcpyHtoAAsync_v2_params: TypeAlias = struct_cuMemcpyHtoAAsync_v2_params_st
@c.record
class struct_cuMemcpyAtoHAsync_v2_params_st(c.Struct):
SIZE = 40
dstHost: Annotated[ctypes.c_void_p, 0]
srcArray: Annotated[CUarray, 8]
srcOffset: Annotated[size_t, 16]
ByteCount: Annotated[size_t, 24]
hStream: Annotated[CUstream, 32]
cuMemcpyAtoHAsync_v2_params: TypeAlias = struct_cuMemcpyAtoHAsync_v2_params_st
@c.record
class struct_cuMemcpy2D_v2_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY2D], 0]
cuMemcpy2D_v2_params: TypeAlias = struct_cuMemcpy2D_v2_params_st
@c.record
class struct_cuMemcpy2DUnaligned_v2_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY2D], 0]
cuMemcpy2DUnaligned_v2_params: TypeAlias = struct_cuMemcpy2DUnaligned_v2_params_st
@c.record
class struct_cuMemcpy3D_v2_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY3D], 0]
cuMemcpy3D_v2_params: TypeAlias = struct_cuMemcpy3D_v2_params_st
@c.record
class struct_cuMemcpyHtoDAsync_v2_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
srcHost: Annotated[ctypes.c_void_p, 8]
ByteCount: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemcpyHtoDAsync_v2_params: TypeAlias = struct_cuMemcpyHtoDAsync_v2_params_st
@c.record
class struct_cuMemcpyDtoHAsync_v2_params_st(c.Struct):
SIZE = 32
dstHost: Annotated[ctypes.c_void_p, 0]
srcDevice: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemcpyDtoHAsync_v2_params: TypeAlias = struct_cuMemcpyDtoHAsync_v2_params_st
@c.record
class struct_cuMemcpyDtoDAsync_v2_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
srcDevice: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemcpyDtoDAsync_v2_params: TypeAlias = struct_cuMemcpyDtoDAsync_v2_params_st
@c.record
class struct_cuMemcpy2DAsync_v2_params_st(c.Struct):
SIZE = 16
pCopy: Annotated[c.POINTER[CUDA_MEMCPY2D], 0]
hStream: Annotated[CUstream, 8]
cuMemcpy2DAsync_v2_params: TypeAlias = struct_cuMemcpy2DAsync_v2_params_st
@c.record
class struct_cuMemcpy3DAsync_v2_params_st(c.Struct):
SIZE = 16
pCopy: Annotated[c.POINTER[CUDA_MEMCPY3D], 0]
hStream: Annotated[CUstream, 8]
cuMemcpy3DAsync_v2_params: TypeAlias = struct_cuMemcpy3DAsync_v2_params_st
@c.record
class struct_cuMemsetD8_v2_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr, 0]
uc: Annotated[Annotated[int, ctypes.c_ubyte], 8]
N: Annotated[size_t, 16]
cuMemsetD8_v2_params: TypeAlias = struct_cuMemsetD8_v2_params_st
@c.record
class struct_cuMemsetD16_v2_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr, 0]
us: Annotated[Annotated[int, ctypes.c_uint16], 8]
N: Annotated[size_t, 16]
cuMemsetD16_v2_params: TypeAlias = struct_cuMemsetD16_v2_params_st
@c.record
class struct_cuMemsetD32_v2_params_st(c.Struct):
SIZE = 24
dstDevice: Annotated[CUdeviceptr, 0]
ui: Annotated[Annotated[int, ctypes.c_uint32], 8]
N: Annotated[size_t, 16]
cuMemsetD32_v2_params: TypeAlias = struct_cuMemsetD32_v2_params_st
@c.record
class struct_cuMemsetD2D8_v2_params_st(c.Struct):
SIZE = 40
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
uc: Annotated[Annotated[int, ctypes.c_ubyte], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
cuMemsetD2D8_v2_params: TypeAlias = struct_cuMemsetD2D8_v2_params_st
@c.record
class struct_cuMemsetD2D16_v2_params_st(c.Struct):
SIZE = 40
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
us: Annotated[Annotated[int, ctypes.c_uint16], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
cuMemsetD2D16_v2_params: TypeAlias = struct_cuMemsetD2D16_v2_params_st
@c.record
class struct_cuMemsetD2D32_v2_params_st(c.Struct):
SIZE = 40
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
ui: Annotated[Annotated[int, ctypes.c_uint32], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
cuMemsetD2D32_v2_params: TypeAlias = struct_cuMemsetD2D32_v2_params_st
@c.record
class struct_cuMemcpy_params_st(c.Struct):
SIZE = 24
dst: Annotated[CUdeviceptr, 0]
src: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
cuMemcpy_params: TypeAlias = struct_cuMemcpy_params_st
@c.record
class struct_cuMemcpyAsync_params_st(c.Struct):
SIZE = 32
dst: Annotated[CUdeviceptr, 0]
src: Annotated[CUdeviceptr, 8]
ByteCount: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemcpyAsync_params: TypeAlias = struct_cuMemcpyAsync_params_st
@c.record
class struct_cuMemcpyPeer_params_st(c.Struct):
SIZE = 40
dstDevice: Annotated[CUdeviceptr, 0]
dstContext: Annotated[CUcontext, 8]
srcDevice: Annotated[CUdeviceptr, 16]
srcContext: Annotated[CUcontext, 24]
ByteCount: Annotated[size_t, 32]
cuMemcpyPeer_params: TypeAlias = struct_cuMemcpyPeer_params_st
@c.record
class struct_cuMemcpyPeerAsync_params_st(c.Struct):
SIZE = 48
dstDevice: Annotated[CUdeviceptr, 0]
dstContext: Annotated[CUcontext, 8]
srcDevice: Annotated[CUdeviceptr, 16]
srcContext: Annotated[CUcontext, 24]
ByteCount: Annotated[size_t, 32]
hStream: Annotated[CUstream, 40]
cuMemcpyPeerAsync_params: TypeAlias = struct_cuMemcpyPeerAsync_params_st
@c.record
class struct_cuMemcpy3DPeer_params_st(c.Struct):
SIZE = 8
pCopy: Annotated[c.POINTER[CUDA_MEMCPY3D_PEER], 0]
cuMemcpy3DPeer_params: TypeAlias = struct_cuMemcpy3DPeer_params_st
@c.record
class struct_cuMemcpy3DPeerAsync_params_st(c.Struct):
SIZE = 16
pCopy: Annotated[c.POINTER[CUDA_MEMCPY3D_PEER], 0]
hStream: Annotated[CUstream, 8]
cuMemcpy3DPeerAsync_params: TypeAlias = struct_cuMemcpy3DPeerAsync_params_st
@c.record
class struct_cuMemcpyBatchAsync_params_st(c.Struct):
SIZE = 72
dsts: Annotated[c.POINTER[CUdeviceptr], 0]
srcs: Annotated[c.POINTER[CUdeviceptr], 8]
sizes: Annotated[c.POINTER[size_t], 16]
count: Annotated[size_t, 24]
attrs: Annotated[c.POINTER[CUmemcpyAttributes], 32]
attrsIdxs: Annotated[c.POINTER[size_t], 40]
numAttrs: Annotated[size_t, 48]
failIdx: Annotated[c.POINTER[size_t], 56]
hStream: Annotated[CUstream, 64]
cuMemcpyBatchAsync_params: TypeAlias = struct_cuMemcpyBatchAsync_params_st
@c.record
class struct_cuMemcpy3DBatchAsync_params_st(c.Struct):
SIZE = 40
numOps: Annotated[size_t, 0]
opList: Annotated[c.POINTER[CUDA_MEMCPY3D_BATCH_OP], 8]
failIdx: Annotated[c.POINTER[size_t], 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
hStream: Annotated[CUstream, 32]
cuMemcpy3DBatchAsync_params: TypeAlias = struct_cuMemcpy3DBatchAsync_params_st
@c.record
class struct_cuMemsetD8Async_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
uc: Annotated[Annotated[int, ctypes.c_ubyte], 8]
N: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemsetD8Async_params: TypeAlias = struct_cuMemsetD8Async_params_st
@c.record
class struct_cuMemsetD16Async_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
us: Annotated[Annotated[int, ctypes.c_uint16], 8]
N: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemsetD16Async_params: TypeAlias = struct_cuMemsetD16Async_params_st
@c.record
class struct_cuMemsetD32Async_params_st(c.Struct):
SIZE = 32
dstDevice: Annotated[CUdeviceptr, 0]
ui: Annotated[Annotated[int, ctypes.c_uint32], 8]
N: Annotated[size_t, 16]
hStream: Annotated[CUstream, 24]
cuMemsetD32Async_params: TypeAlias = struct_cuMemsetD32Async_params_st
@c.record
class struct_cuMemsetD2D8Async_params_st(c.Struct):
SIZE = 48
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
uc: Annotated[Annotated[int, ctypes.c_ubyte], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
hStream: Annotated[CUstream, 40]
cuMemsetD2D8Async_params: TypeAlias = struct_cuMemsetD2D8Async_params_st
@c.record
class struct_cuMemsetD2D16Async_params_st(c.Struct):
SIZE = 48
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
us: Annotated[Annotated[int, ctypes.c_uint16], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
hStream: Annotated[CUstream, 40]
cuMemsetD2D16Async_params: TypeAlias = struct_cuMemsetD2D16Async_params_st
@c.record
class struct_cuMemsetD2D32Async_params_st(c.Struct):
SIZE = 48
dstDevice: Annotated[CUdeviceptr, 0]
dstPitch: Annotated[size_t, 8]
ui: Annotated[Annotated[int, ctypes.c_uint32], 16]
Width: Annotated[size_t, 24]
Height: Annotated[size_t, 32]
hStream: Annotated[CUstream, 40]
cuMemsetD2D32Async_params: TypeAlias = struct_cuMemsetD2D32Async_params_st
@c.record
class struct_cuStreamGetPriority_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
priority: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
cuStreamGetPriority_params: TypeAlias = struct_cuStreamGetPriority_params_st
@c.record
class struct_cuStreamGetId_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
streamId: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 8]
cuStreamGetId_params: TypeAlias = struct_cuStreamGetId_params_st
@c.record
class struct_cuStreamGetFlags_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
flags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
cuStreamGetFlags_params: TypeAlias = struct_cuStreamGetFlags_params_st
@c.record
class struct_cuStreamGetDevice_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
device: Annotated[c.POINTER[CUdevice], 8]
cuStreamGetDevice_params: TypeAlias = struct_cuStreamGetDevice_params_st
@c.record
class struct_cuStreamGetCtx_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
pctx: Annotated[c.POINTER[CUcontext], 8]
cuStreamGetCtx_params: TypeAlias = struct_cuStreamGetCtx_params_st
@c.record
class struct_cuStreamGetCtx_v2_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
pCtx: Annotated[c.POINTER[CUcontext], 8]
pGreenCtx: Annotated[c.POINTER[CUgreenCtx], 16]
cuStreamGetCtx_v2_params: TypeAlias = struct_cuStreamGetCtx_v2_params_st
@c.record
class struct_cuStreamWaitEvent_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
hEvent: Annotated[CUevent, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuStreamWaitEvent_params: TypeAlias = struct_cuStreamWaitEvent_params_st
@c.record
class struct_cuStreamAddCallback_params_st(c.Struct):
SIZE = 32
hStream: Annotated[CUstream, 0]
callback: Annotated[CUstreamCallback, 8]
userData: Annotated[ctypes.c_void_p, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamAddCallback_params: TypeAlias = struct_cuStreamAddCallback_params_st
@c.record
class struct_cuStreamAttachMemAsync_params_st(c.Struct):
SIZE = 32
hStream: Annotated[CUstream, 0]
dptr: Annotated[CUdeviceptr, 8]
length: Annotated[size_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamAttachMemAsync_params: TypeAlias = struct_cuStreamAttachMemAsync_params_st
@c.record
class struct_cuStreamQuery_params_st(c.Struct):
SIZE = 8
hStream: Annotated[CUstream, 0]
cuStreamQuery_params: TypeAlias = struct_cuStreamQuery_params_st
@c.record
class struct_cuStreamSynchronize_params_st(c.Struct):
SIZE = 8
hStream: Annotated[CUstream, 0]
cuStreamSynchronize_params: TypeAlias = struct_cuStreamSynchronize_params_st
@c.record
class struct_cuEventRecord_params_st(c.Struct):
SIZE = 16
hEvent: Annotated[CUevent, 0]
hStream: Annotated[CUstream, 8]
cuEventRecord_params: TypeAlias = struct_cuEventRecord_params_st
@c.record
class struct_cuEventRecordWithFlags_params_st(c.Struct):
SIZE = 24
hEvent: Annotated[CUevent, 0]
hStream: Annotated[CUstream, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cuEventRecordWithFlags_params: TypeAlias = struct_cuEventRecordWithFlags_params_st
@c.record
class struct_cuLaunchKernel_params_st(c.Struct):
SIZE = 64
f: Annotated[CUfunction, 0]
gridDimX: Annotated[Annotated[int, ctypes.c_uint32], 8]
gridDimY: Annotated[Annotated[int, ctypes.c_uint32], 12]
gridDimZ: Annotated[Annotated[int, ctypes.c_uint32], 16]
blockDimX: Annotated[Annotated[int, ctypes.c_uint32], 20]
blockDimY: Annotated[Annotated[int, ctypes.c_uint32], 24]
blockDimZ: Annotated[Annotated[int, ctypes.c_uint32], 28]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
hStream: Annotated[CUstream, 40]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 48]
extra: Annotated[c.POINTER[ctypes.c_void_p], 56]
cuLaunchKernel_params: TypeAlias = struct_cuLaunchKernel_params_st
@c.record
class struct_cuLaunchKernelEx_params_st(c.Struct):
SIZE = 32
config: Annotated[c.POINTER[CUlaunchConfig], 0]
f: Annotated[CUfunction, 8]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 16]
extra: Annotated[c.POINTER[ctypes.c_void_p], 24]
cuLaunchKernelEx_params: TypeAlias = struct_cuLaunchKernelEx_params_st
@c.record
class struct_cuLaunchHostFunc_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
fn: Annotated[CUhostFn, 8]
userData: Annotated[ctypes.c_void_p, 16]
cuLaunchHostFunc_params: TypeAlias = struct_cuLaunchHostFunc_params_st
@c.record
class struct_cuGraphicsMapResources_params_st(c.Struct):
SIZE = 24
count: Annotated[Annotated[int, ctypes.c_uint32], 0]
resources: Annotated[c.POINTER[CUgraphicsResource], 8]
hStream: Annotated[CUstream, 16]
cuGraphicsMapResources_params: TypeAlias = struct_cuGraphicsMapResources_params_st
@c.record
class struct_cuGraphicsUnmapResources_params_st(c.Struct):
SIZE = 24
count: Annotated[Annotated[int, ctypes.c_uint32], 0]
resources: Annotated[c.POINTER[CUgraphicsResource], 8]
hStream: Annotated[CUstream, 16]
cuGraphicsUnmapResources_params: TypeAlias = struct_cuGraphicsUnmapResources_params_st
@c.record
class struct_cuStreamWriteValue32_params_st(c.Struct):
SIZE = 24
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint32_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuStreamWriteValue32_params: TypeAlias = struct_cuStreamWriteValue32_params_st
@c.record
class struct_cuStreamWaitValue32_params_st(c.Struct):
SIZE = 24
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint32_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuStreamWaitValue32_params: TypeAlias = struct_cuStreamWaitValue32_params_st
@c.record
class struct_cuStreamWriteValue64_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint64_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamWriteValue64_params: TypeAlias = struct_cuStreamWriteValue64_params_st
@c.record
class struct_cuStreamWaitValue64_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint64_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamWaitValue64_params: TypeAlias = struct_cuStreamWaitValue64_params_st
@c.record
class struct_cuStreamBatchMemOp_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
paramArray: Annotated[c.POINTER[CUstreamBatchMemOpParams], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamBatchMemOp_params: TypeAlias = struct_cuStreamBatchMemOp_params_st
@c.record
class struct_cuStreamWriteValue32_ptsz_params_st(c.Struct):
SIZE = 24
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint32_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuStreamWriteValue32_ptsz_params: TypeAlias = struct_cuStreamWriteValue32_ptsz_params_st
@c.record
class struct_cuStreamWaitValue32_ptsz_params_st(c.Struct):
SIZE = 24
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint32_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuStreamWaitValue32_ptsz_params: TypeAlias = struct_cuStreamWaitValue32_ptsz_params_st
@c.record
class struct_cuStreamWriteValue64_ptsz_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint64_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamWriteValue64_ptsz_params: TypeAlias = struct_cuStreamWriteValue64_ptsz_params_st
@c.record
class struct_cuStreamWaitValue64_ptsz_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint64_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamWaitValue64_ptsz_params: TypeAlias = struct_cuStreamWaitValue64_ptsz_params_st
@c.record
class struct_cuStreamBatchMemOp_ptsz_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
paramArray: Annotated[c.POINTER[CUstreamBatchMemOpParams], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamBatchMemOp_ptsz_params: TypeAlias = struct_cuStreamBatchMemOp_ptsz_params_st
@c.record
class struct_cuStreamWriteValue32_v2_params_st(c.Struct):
SIZE = 24
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint32_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuStreamWriteValue32_v2_params: TypeAlias = struct_cuStreamWriteValue32_v2_params_st
@c.record
class struct_cuStreamWaitValue32_v2_params_st(c.Struct):
SIZE = 24
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint32_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cuStreamWaitValue32_v2_params: TypeAlias = struct_cuStreamWaitValue32_v2_params_st
@c.record
class struct_cuStreamWriteValue64_v2_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint64_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamWriteValue64_v2_params: TypeAlias = struct_cuStreamWriteValue64_v2_params_st
@c.record
class struct_cuStreamWaitValue64_v2_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
addr: Annotated[CUdeviceptr, 8]
value: Annotated[cuuint64_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamWaitValue64_v2_params: TypeAlias = struct_cuStreamWaitValue64_v2_params_st
@c.record
class struct_cuStreamBatchMemOp_v2_params_st(c.Struct):
SIZE = 32
stream: Annotated[CUstream, 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
paramArray: Annotated[c.POINTER[CUstreamBatchMemOpParams], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamBatchMemOp_v2_params: TypeAlias = struct_cuStreamBatchMemOp_v2_params_st
@c.record
class struct_cuMemPrefetchAsync_params_st(c.Struct):
SIZE = 32
devPtr: Annotated[CUdeviceptr, 0]
count: Annotated[size_t, 8]
dstDevice: Annotated[CUdevice, 16]
hStream: Annotated[CUstream, 24]
cuMemPrefetchAsync_params: TypeAlias = struct_cuMemPrefetchAsync_params_st
@c.record
class struct_cuMemPrefetchAsync_v2_params_st(c.Struct):
SIZE = 40
devPtr: Annotated[CUdeviceptr, 0]
count: Annotated[size_t, 8]
location: Annotated[CUmemLocation, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
hStream: Annotated[CUstream, 32]
cuMemPrefetchAsync_v2_params: TypeAlias = struct_cuMemPrefetchAsync_v2_params_st
@c.record
class struct_cuLaunchCooperativeKernel_params_st(c.Struct):
SIZE = 56
f: Annotated[CUfunction, 0]
gridDimX: Annotated[Annotated[int, ctypes.c_uint32], 8]
gridDimY: Annotated[Annotated[int, ctypes.c_uint32], 12]
gridDimZ: Annotated[Annotated[int, ctypes.c_uint32], 16]
blockDimX: Annotated[Annotated[int, ctypes.c_uint32], 20]
blockDimY: Annotated[Annotated[int, ctypes.c_uint32], 24]
blockDimZ: Annotated[Annotated[int, ctypes.c_uint32], 28]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
hStream: Annotated[CUstream, 40]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 48]
cuLaunchCooperativeKernel_params: TypeAlias = struct_cuLaunchCooperativeKernel_params_st
@c.record
class struct_cuSignalExternalSemaphoresAsync_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[CUexternalSemaphore], 0]
paramsArray: Annotated[c.POINTER[CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[CUstream, 24]
cuSignalExternalSemaphoresAsync_params: TypeAlias = struct_cuSignalExternalSemaphoresAsync_params_st
@c.record
class struct_cuWaitExternalSemaphoresAsync_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[CUexternalSemaphore], 0]
paramsArray: Annotated[c.POINTER[CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[CUstream, 24]
cuWaitExternalSemaphoresAsync_params: TypeAlias = struct_cuWaitExternalSemaphoresAsync_params_st
@c.record
class struct_cuStreamBeginCapture_params_st(c.Struct):
SIZE = 8
hStream: Annotated[CUstream, 0]
cuStreamBeginCapture_params: TypeAlias = struct_cuStreamBeginCapture_params_st
@c.record
class struct_cuStreamBeginCapture_ptsz_params_st(c.Struct):
SIZE = 8
hStream: Annotated[CUstream, 0]
cuStreamBeginCapture_ptsz_params: TypeAlias = struct_cuStreamBeginCapture_ptsz_params_st
@c.record
class struct_cuStreamBeginCapture_v2_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
mode: Annotated[CUstreamCaptureMode, 8]
cuStreamBeginCapture_v2_params: TypeAlias = struct_cuStreamBeginCapture_v2_params_st
@c.record
class struct_cuStreamBeginCaptureToGraph_params_st(c.Struct):
SIZE = 48
hStream: Annotated[CUstream, 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
dependencyData: Annotated[c.POINTER[CUgraphEdgeData], 24]
numDependencies: Annotated[size_t, 32]
mode: Annotated[CUstreamCaptureMode, 40]
cuStreamBeginCaptureToGraph_params: TypeAlias = struct_cuStreamBeginCaptureToGraph_params_st
@c.record
class struct_cuStreamEndCapture_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
phGraph: Annotated[c.POINTER[CUgraph], 8]
cuStreamEndCapture_params: TypeAlias = struct_cuStreamEndCapture_params_st
@c.record
class struct_cuStreamIsCapturing_params_st(c.Struct):
SIZE = 16
hStream: Annotated[CUstream, 0]
captureStatus: Annotated[c.POINTER[CUstreamCaptureStatus], 8]
cuStreamIsCapturing_params: TypeAlias = struct_cuStreamIsCapturing_params_st
@c.record
class struct_cuStreamGetCaptureInfo_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
captureStatus_out: Annotated[c.POINTER[CUstreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[cuuint64_t], 16]
cuStreamGetCaptureInfo_params: TypeAlias = struct_cuStreamGetCaptureInfo_params_st
@c.record
class struct_cuStreamGetCaptureInfo_ptsz_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
captureStatus_out: Annotated[c.POINTER[CUstreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[cuuint64_t], 16]
cuStreamGetCaptureInfo_ptsz_params: TypeAlias = struct_cuStreamGetCaptureInfo_ptsz_params_st
@c.record
class struct_cuStreamGetCaptureInfo_v2_params_st(c.Struct):
SIZE = 48
hStream: Annotated[CUstream, 0]
captureStatus_out: Annotated[c.POINTER[CUstreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[cuuint64_t], 16]
graph_out: Annotated[c.POINTER[CUgraph], 24]
dependencies_out: Annotated[c.POINTER[c.POINTER[CUgraphNode]], 32]
numDependencies_out: Annotated[c.POINTER[size_t], 40]
cuStreamGetCaptureInfo_v2_params: TypeAlias = struct_cuStreamGetCaptureInfo_v2_params_st
@c.record
class struct_cuStreamGetCaptureInfo_v3_params_st(c.Struct):
SIZE = 56
hStream: Annotated[CUstream, 0]
captureStatus_out: Annotated[c.POINTER[CUstreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[cuuint64_t], 16]
graph_out: Annotated[c.POINTER[CUgraph], 24]
dependencies_out: Annotated[c.POINTER[c.POINTER[CUgraphNode]], 32]
edgeData_out: Annotated[c.POINTER[c.POINTER[CUgraphEdgeData]], 40]
numDependencies_out: Annotated[c.POINTER[size_t], 48]
cuStreamGetCaptureInfo_v3_params: TypeAlias = struct_cuStreamGetCaptureInfo_v3_params_st
@c.record
class struct_cuGraphAddKernelNode_params_st(c.Struct):
SIZE = 40
phGraphNode: Annotated[c.POINTER[CUgraphNode], 0]
hGraph: Annotated[CUgraph, 8]
dependencies: Annotated[c.POINTER[CUgraphNode], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[CUDA_KERNEL_NODE_PARAMS_v1], 32]
@c.record
class struct_CUDA_KERNEL_NODE_PARAMS_st(c.Struct):
SIZE = 56
func: Annotated[CUfunction, 0]
gridDimX: Annotated[Annotated[int, ctypes.c_uint32], 8]
gridDimY: Annotated[Annotated[int, ctypes.c_uint32], 12]
gridDimZ: Annotated[Annotated[int, ctypes.c_uint32], 16]
blockDimX: Annotated[Annotated[int, ctypes.c_uint32], 20]
blockDimY: Annotated[Annotated[int, ctypes.c_uint32], 24]
blockDimZ: Annotated[Annotated[int, ctypes.c_uint32], 28]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 40]
extra: Annotated[c.POINTER[ctypes.c_void_p], 48]
CUDA_KERNEL_NODE_PARAMS_v1: TypeAlias = struct_CUDA_KERNEL_NODE_PARAMS_st
cuGraphAddKernelNode_params: TypeAlias = struct_cuGraphAddKernelNode_params_st
@c.record
class struct_cuGraphKernelNodeGetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_KERNEL_NODE_PARAMS_v1], 8]
cuGraphKernelNodeGetParams_params: TypeAlias = struct_cuGraphKernelNodeGetParams_params_st
@c.record
class struct_cuGraphKernelNodeSetParams_params_st(c.Struct):
SIZE = 16
hNode: Annotated[CUgraphNode, 0]
nodeParams: Annotated[c.POINTER[CUDA_KERNEL_NODE_PARAMS_v1], 8]
cuGraphKernelNodeSetParams_params: TypeAlias = struct_cuGraphKernelNodeSetParams_params_st
@c.record
class struct_cuGraphExecKernelNodeSetParams_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[CUgraphExec, 0]
hNode: Annotated[CUgraphNode, 8]
nodeParams: Annotated[c.POINTER[CUDA_KERNEL_NODE_PARAMS_v1], 16]
cuGraphExecKernelNodeSetParams_params: TypeAlias = struct_cuGraphExecKernelNodeSetParams_params_st
@c.record
class struct_cuGraphInstantiateWithParams_params_st(c.Struct):
SIZE = 24
phGraphExec: Annotated[c.POINTER[CUgraphExec], 0]
hGraph: Annotated[CUgraph, 8]
instantiateParams: Annotated[c.POINTER[CUDA_GRAPH_INSTANTIATE_PARAMS], 16]
cuGraphInstantiateWithParams_params: TypeAlias = struct_cuGraphInstantiateWithParams_params_st
@c.record
class struct_cuGraphExecUpdate_params_st(c.Struct):
SIZE = 32
hGraphExec: Annotated[CUgraphExec, 0]
hGraph: Annotated[CUgraph, 8]
hErrorNode_out: Annotated[c.POINTER[CUgraphNode], 16]
updateResult_out: Annotated[c.POINTER[CUgraphExecUpdateResult], 24]
cuGraphExecUpdate_params: TypeAlias = struct_cuGraphExecUpdate_params_st
@c.record
class struct_cuGraphUpload_params_st(c.Struct):
SIZE = 16
hGraph: Annotated[CUgraphExec, 0]
hStream: Annotated[CUstream, 8]
cuGraphUpload_params: TypeAlias = struct_cuGraphUpload_params_st
@c.record
class struct_cuGraphLaunch_params_st(c.Struct):
SIZE = 16
hGraph: Annotated[CUgraphExec, 0]
hStream: Annotated[CUstream, 8]
cuGraphLaunch_params: TypeAlias = struct_cuGraphLaunch_params_st
@c.record
class struct_cuStreamCopyAttributes_params_st(c.Struct):
SIZE = 16
dstStream: Annotated[CUstream, 0]
srcStream: Annotated[CUstream, 8]
cuStreamCopyAttributes_params: TypeAlias = struct_cuStreamCopyAttributes_params_st
@c.record
class struct_cuStreamGetAttribute_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
attr: Annotated[CUstreamAttrID, 8]
value: Annotated[c.POINTER[CUstreamAttrValue], 16]
cuStreamGetAttribute_params: TypeAlias = struct_cuStreamGetAttribute_params_st
@c.record
class struct_cuStreamSetAttribute_params_st(c.Struct):
SIZE = 24
hStream: Annotated[CUstream, 0]
attr: Annotated[CUstreamAttrID, 8]
param: Annotated[c.POINTER[CUstreamAttrValue], 16]
cuStreamSetAttribute_params: TypeAlias = struct_cuStreamSetAttribute_params_st
@c.record
class struct_cuIpcOpenMemHandle_params_st(c.Struct):
SIZE = 80
pdptr: Annotated[c.POINTER[CUdeviceptr], 0]
handle: Annotated[CUipcMemHandle, 8]
Flags: Annotated[Annotated[int, ctypes.c_uint32], 72]
cuIpcOpenMemHandle_params: TypeAlias = struct_cuIpcOpenMemHandle_params_st
@c.record
class struct_cuGraphInstantiate_params_st(c.Struct):
SIZE = 40
phGraphExec: Annotated[c.POINTER[CUgraphExec], 0]
hGraph: Annotated[CUgraph, 8]
phErrorNode: Annotated[c.POINTER[CUgraphNode], 16]
logBuffer: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
bufferSize: Annotated[size_t, 32]
cuGraphInstantiate_params: TypeAlias = struct_cuGraphInstantiate_params_st
@c.record
class struct_cuGraphInstantiate_v2_params_st(c.Struct):
SIZE = 40
phGraphExec: Annotated[c.POINTER[CUgraphExec], 0]
hGraph: Annotated[CUgraph, 8]
phErrorNode: Annotated[c.POINTER[CUgraphNode], 16]
logBuffer: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
bufferSize: Annotated[size_t, 32]
cuGraphInstantiate_v2_params: TypeAlias = struct_cuGraphInstantiate_v2_params_st
@c.record
class struct_cuMemMapArrayAsync_params_st(c.Struct):
SIZE = 24
mapInfoList: Annotated[c.POINTER[CUarrayMapInfo], 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
hStream: Annotated[CUstream, 16]
cuMemMapArrayAsync_params: TypeAlias = struct_cuMemMapArrayAsync_params_st
@c.record
class struct_cuMemFreeAsync_params_st(c.Struct):
SIZE = 16
dptr: Annotated[CUdeviceptr, 0]
hStream: Annotated[CUstream, 8]
cuMemFreeAsync_params: TypeAlias = struct_cuMemFreeAsync_params_st
@c.record
class struct_cuMemAllocAsync_params_st(c.Struct):
SIZE = 24
dptr: Annotated[c.POINTER[CUdeviceptr], 0]
bytesize: Annotated[size_t, 8]
hStream: Annotated[CUstream, 16]
cuMemAllocAsync_params: TypeAlias = struct_cuMemAllocAsync_params_st
@c.record
class struct_cuMemAllocFromPoolAsync_params_st(c.Struct):
SIZE = 32
dptr: Annotated[c.POINTER[CUdeviceptr], 0]
bytesize: Annotated[size_t, 8]
pool: Annotated[CUmemoryPool, 16]
hStream: Annotated[CUstream, 24]
cuMemAllocFromPoolAsync_params: TypeAlias = struct_cuMemAllocFromPoolAsync_params_st
@c.record
class struct_cuStreamUpdateCaptureDependencies_params_st(c.Struct):
SIZE = 32
hStream: Annotated[CUstream, 0]
dependencies: Annotated[c.POINTER[CUgraphNode], 8]
numDependencies: Annotated[size_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cuStreamUpdateCaptureDependencies_params: TypeAlias = struct_cuStreamUpdateCaptureDependencies_params_st
@c.record
class struct_cuStreamUpdateCaptureDependencies_v2_params_st(c.Struct):
SIZE = 40
hStream: Annotated[CUstream, 0]
dependencies: Annotated[c.POINTER[CUgraphNode], 8]
dependencyData: Annotated[c.POINTER[CUgraphEdgeData], 16]
numDependencies: Annotated[size_t, 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
cuStreamUpdateCaptureDependencies_v2_params: TypeAlias = struct_cuStreamUpdateCaptureDependencies_v2_params_st
@c.record
class struct_cuMemBatchDecompressAsync_params_st(c.Struct):
SIZE = 40
paramsArray: Annotated[c.POINTER[CUmemDecompressParams], 0]
count: Annotated[size_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
errorIndex: Annotated[c.POINTER[size_t], 24]
stream: Annotated[CUstream, 32]
cuMemBatchDecompressAsync_params: TypeAlias = struct_cuMemBatchDecompressAsync_params_st
@c.record
class struct_cuGetProcAddress_params_st(c.Struct):
SIZE = 32
symbol: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
pfn: Annotated[c.POINTER[ctypes.c_void_p], 8]
cudaVersion: Annotated[Annotated[int, ctypes.c_int32], 16]
flags: Annotated[cuuint64_t, 24]
cuGetProcAddress_params: TypeAlias = struct_cuGetProcAddress_params_st
@c.record
class struct_cuCheckpointProcessGetRestoreThreadId_params_st(c.Struct):
SIZE = 16
pid: Annotated[Annotated[int, ctypes.c_int32], 0]
tid: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
cuCheckpointProcessGetRestoreThreadId_params: TypeAlias = struct_cuCheckpointProcessGetRestoreThreadId_params_st
@c.record
class struct_cuCheckpointProcessGetState_params_st(c.Struct):
SIZE = 16
pid: Annotated[Annotated[int, ctypes.c_int32], 0]
state: Annotated[c.POINTER[CUprocessState], 8]
class enum_CUprocessState_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
CU_PROCESS_STATE_RUNNING = enum_CUprocessState_enum.define('CU_PROCESS_STATE_RUNNING', 0)
CU_PROCESS_STATE_LOCKED = enum_CUprocessState_enum.define('CU_PROCESS_STATE_LOCKED', 1)
CU_PROCESS_STATE_CHECKPOINTED = enum_CUprocessState_enum.define('CU_PROCESS_STATE_CHECKPOINTED', 2)
CU_PROCESS_STATE_FAILED = enum_CUprocessState_enum.define('CU_PROCESS_STATE_FAILED', 3)
CUprocessState: TypeAlias = enum_CUprocessState_enum
cuCheckpointProcessGetState_params: TypeAlias = struct_cuCheckpointProcessGetState_params_st
@c.record
class struct_cuCheckpointProcessLock_params_st(c.Struct):
SIZE = 16
pid: Annotated[Annotated[int, ctypes.c_int32], 0]
args: Annotated[c.POINTER[CUcheckpointLockArgs], 8]
@c.record
class struct_CUcheckpointLockArgs_st(c.Struct):
SIZE = 64
timeoutMs: Annotated[Annotated[int, ctypes.c_uint32], 0]
reserved0: Annotated[Annotated[int, ctypes.c_uint32], 4]
reserved1: Annotated[c.Array[cuuint64_t, Literal[7]], 8]
CUcheckpointLockArgs: TypeAlias = struct_CUcheckpointLockArgs_st
cuCheckpointProcessLock_params: TypeAlias = struct_cuCheckpointProcessLock_params_st
@c.record
class struct_cuCheckpointProcessCheckpoint_params_st(c.Struct):
SIZE = 16
pid: Annotated[Annotated[int, ctypes.c_int32], 0]
args: Annotated[c.POINTER[CUcheckpointCheckpointArgs], 8]
@c.record
class struct_CUcheckpointCheckpointArgs_st(c.Struct):
SIZE = 64
reserved: Annotated[c.Array[cuuint64_t, Literal[8]], 0]
CUcheckpointCheckpointArgs: TypeAlias = struct_CUcheckpointCheckpointArgs_st
cuCheckpointProcessCheckpoint_params: TypeAlias = struct_cuCheckpointProcessCheckpoint_params_st
@c.record
class struct_cuCheckpointProcessRestore_params_st(c.Struct):
SIZE = 16
pid: Annotated[Annotated[int, ctypes.c_int32], 0]
args: Annotated[c.POINTER[CUcheckpointRestoreArgs], 8]
@c.record
class struct_CUcheckpointRestoreArgs_st(c.Struct):
SIZE = 64
reserved: Annotated[c.Array[cuuint64_t, Literal[8]], 0]
CUcheckpointRestoreArgs: TypeAlias = struct_CUcheckpointRestoreArgs_st
cuCheckpointProcessRestore_params: TypeAlias = struct_cuCheckpointProcessRestore_params_st
@c.record
class struct_cuCheckpointProcessUnlock_params_st(c.Struct):
SIZE = 16
pid: Annotated[Annotated[int, ctypes.c_int32], 0]
args: Annotated[c.POINTER[CUcheckpointUnlockArgs], 8]
@c.record
class struct_CUcheckpointUnlockArgs_st(c.Struct):
SIZE = 64
reserved: Annotated[c.Array[cuuint64_t, Literal[8]], 0]
CUcheckpointUnlockArgs: TypeAlias = struct_CUcheckpointUnlockArgs_st
cuCheckpointProcessUnlock_params: TypeAlias = struct_cuCheckpointProcessUnlock_params_st
@c.record
class struct_cudaDeviceSetLimit_v3020_params_st(c.Struct):
SIZE = 16
limit: Annotated[enum_cudaLimit, 0]
value: Annotated[size_t, 8]
class enum_cudaLimit(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaLimitStackSize = enum_cudaLimit.define('cudaLimitStackSize', 0)
cudaLimitPrintfFifoSize = enum_cudaLimit.define('cudaLimitPrintfFifoSize', 1)
cudaLimitMallocHeapSize = enum_cudaLimit.define('cudaLimitMallocHeapSize', 2)
cudaLimitDevRuntimeSyncDepth = enum_cudaLimit.define('cudaLimitDevRuntimeSyncDepth', 3)
cudaLimitDevRuntimePendingLaunchCount = enum_cudaLimit.define('cudaLimitDevRuntimePendingLaunchCount', 4)
cudaLimitMaxL2FetchGranularity = enum_cudaLimit.define('cudaLimitMaxL2FetchGranularity', 5)
cudaLimitPersistingL2CacheSize = enum_cudaLimit.define('cudaLimitPersistingL2CacheSize', 6)
cudaDeviceSetLimit_v3020_params: TypeAlias = struct_cudaDeviceSetLimit_v3020_params_st
@c.record
class struct_cudaDeviceGetLimit_v3020_params_st(c.Struct):
SIZE = 16
pValue: Annotated[c.POINTER[size_t], 0]
limit: Annotated[enum_cudaLimit, 8]
cudaDeviceGetLimit_v3020_params: TypeAlias = struct_cudaDeviceGetLimit_v3020_params_st
@c.record
class struct_cudaDeviceGetTexture1DLinearMaxWidth_v11010_params_st(c.Struct):
SIZE = 24
maxWidthInElements: Annotated[c.POINTER[size_t], 0]
fmtDesc: Annotated[c.POINTER[struct_cudaChannelFormatDesc], 8]
device: Annotated[Annotated[int, ctypes.c_int32], 16]
@c.record
class struct_cudaChannelFormatDesc(c.Struct):
SIZE = 20
x: Annotated[Annotated[int, ctypes.c_int32], 0]
y: Annotated[Annotated[int, ctypes.c_int32], 4]
z: Annotated[Annotated[int, ctypes.c_int32], 8]
w: Annotated[Annotated[int, ctypes.c_int32], 12]
f: Annotated[enum_cudaChannelFormatKind, 16]
class enum_cudaChannelFormatKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaChannelFormatKindSigned = enum_cudaChannelFormatKind.define('cudaChannelFormatKindSigned', 0)
cudaChannelFormatKindUnsigned = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsigned', 1)
cudaChannelFormatKindFloat = enum_cudaChannelFormatKind.define('cudaChannelFormatKindFloat', 2)
cudaChannelFormatKindNone = enum_cudaChannelFormatKind.define('cudaChannelFormatKindNone', 3)
cudaChannelFormatKindNV12 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindNV12', 4)
cudaChannelFormatKindUnsignedNormalized8X1 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedNormalized8X1', 5)
cudaChannelFormatKindUnsignedNormalized8X2 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedNormalized8X2', 6)
cudaChannelFormatKindUnsignedNormalized8X4 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedNormalized8X4', 7)
cudaChannelFormatKindUnsignedNormalized16X1 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedNormalized16X1', 8)
cudaChannelFormatKindUnsignedNormalized16X2 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedNormalized16X2', 9)
cudaChannelFormatKindUnsignedNormalized16X4 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedNormalized16X4', 10)
cudaChannelFormatKindSignedNormalized8X1 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindSignedNormalized8X1', 11)
cudaChannelFormatKindSignedNormalized8X2 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindSignedNormalized8X2', 12)
cudaChannelFormatKindSignedNormalized8X4 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindSignedNormalized8X4', 13)
cudaChannelFormatKindSignedNormalized16X1 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindSignedNormalized16X1', 14)
cudaChannelFormatKindSignedNormalized16X2 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindSignedNormalized16X2', 15)
cudaChannelFormatKindSignedNormalized16X4 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindSignedNormalized16X4', 16)
cudaChannelFormatKindUnsignedBlockCompressed1 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed1', 17)
cudaChannelFormatKindUnsignedBlockCompressed1SRGB = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed1SRGB', 18)
cudaChannelFormatKindUnsignedBlockCompressed2 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed2', 19)
cudaChannelFormatKindUnsignedBlockCompressed2SRGB = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed2SRGB', 20)
cudaChannelFormatKindUnsignedBlockCompressed3 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed3', 21)
cudaChannelFormatKindUnsignedBlockCompressed3SRGB = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed3SRGB', 22)
cudaChannelFormatKindUnsignedBlockCompressed4 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed4', 23)
cudaChannelFormatKindSignedBlockCompressed4 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindSignedBlockCompressed4', 24)
cudaChannelFormatKindUnsignedBlockCompressed5 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed5', 25)
cudaChannelFormatKindSignedBlockCompressed5 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindSignedBlockCompressed5', 26)
cudaChannelFormatKindUnsignedBlockCompressed6H = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed6H', 27)
cudaChannelFormatKindSignedBlockCompressed6H = enum_cudaChannelFormatKind.define('cudaChannelFormatKindSignedBlockCompressed6H', 28)
cudaChannelFormatKindUnsignedBlockCompressed7 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed7', 29)
cudaChannelFormatKindUnsignedBlockCompressed7SRGB = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedBlockCompressed7SRGB', 30)
cudaChannelFormatKindUnsignedNormalized1010102 = enum_cudaChannelFormatKind.define('cudaChannelFormatKindUnsignedNormalized1010102', 31)
cudaDeviceGetTexture1DLinearMaxWidth_v11010_params: TypeAlias = struct_cudaDeviceGetTexture1DLinearMaxWidth_v11010_params_st
@c.record
class struct_cudaDeviceGetCacheConfig_v3020_params_st(c.Struct):
SIZE = 8
pCacheConfig: Annotated[c.POINTER[enum_cudaFuncCache], 0]
class enum_cudaFuncCache(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaFuncCachePreferNone = enum_cudaFuncCache.define('cudaFuncCachePreferNone', 0)
cudaFuncCachePreferShared = enum_cudaFuncCache.define('cudaFuncCachePreferShared', 1)
cudaFuncCachePreferL1 = enum_cudaFuncCache.define('cudaFuncCachePreferL1', 2)
cudaFuncCachePreferEqual = enum_cudaFuncCache.define('cudaFuncCachePreferEqual', 3)
cudaDeviceGetCacheConfig_v3020_params: TypeAlias = struct_cudaDeviceGetCacheConfig_v3020_params_st
@c.record
class struct_cudaDeviceGetStreamPriorityRange_v5050_params_st(c.Struct):
SIZE = 16
leastPriority: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
greatestPriority: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
cudaDeviceGetStreamPriorityRange_v5050_params: TypeAlias = struct_cudaDeviceGetStreamPriorityRange_v5050_params_st
@c.record
class struct_cudaDeviceSetCacheConfig_v3020_params_st(c.Struct):
SIZE = 4
cacheConfig: Annotated[enum_cudaFuncCache, 0]
cudaDeviceSetCacheConfig_v3020_params: TypeAlias = struct_cudaDeviceSetCacheConfig_v3020_params_st
@c.record
class struct_cudaDeviceGetByPCIBusId_v4010_params_st(c.Struct):
SIZE = 16
device: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
pciBusId: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
cudaDeviceGetByPCIBusId_v4010_params: TypeAlias = struct_cudaDeviceGetByPCIBusId_v4010_params_st
@c.record
class struct_cudaDeviceGetPCIBusId_v4010_params_st(c.Struct):
SIZE = 16
pciBusId: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
len: Annotated[Annotated[int, ctypes.c_int32], 8]
device: Annotated[Annotated[int, ctypes.c_int32], 12]
cudaDeviceGetPCIBusId_v4010_params: TypeAlias = struct_cudaDeviceGetPCIBusId_v4010_params_st
@c.record
class struct_cudaIpcGetEventHandle_v4010_params_st(c.Struct):
SIZE = 16
handle: Annotated[c.POINTER[cudaIpcEventHandle_t], 0]
event: Annotated[cudaEvent_t, 8]
@c.record
class struct_cudaIpcEventHandle_st(c.Struct):
SIZE = 64
reserved: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[64]], 0]
cudaIpcEventHandle_t: TypeAlias = struct_cudaIpcEventHandle_st
cudaEvent_t: TypeAlias = c.POINTER[struct_CUevent_st]
cudaIpcGetEventHandle_v4010_params: TypeAlias = struct_cudaIpcGetEventHandle_v4010_params_st
@c.record
class struct_cudaIpcOpenEventHandle_v4010_params_st(c.Struct):
SIZE = 72
event: Annotated[c.POINTER[cudaEvent_t], 0]
handle: Annotated[cudaIpcEventHandle_t, 8]
cudaIpcOpenEventHandle_v4010_params: TypeAlias = struct_cudaIpcOpenEventHandle_v4010_params_st
@c.record
class struct_cudaIpcGetMemHandle_v4010_params_st(c.Struct):
SIZE = 16
handle: Annotated[c.POINTER[cudaIpcMemHandle_t], 0]
devPtr: Annotated[ctypes.c_void_p, 8]
@c.record
class struct_cudaIpcMemHandle_st(c.Struct):
SIZE = 64
reserved: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[64]], 0]
cudaIpcMemHandle_t: TypeAlias = struct_cudaIpcMemHandle_st
cudaIpcGetMemHandle_v4010_params: TypeAlias = struct_cudaIpcGetMemHandle_v4010_params_st
@c.record
class struct_cudaIpcOpenMemHandle_v4010_params_st(c.Struct):
SIZE = 80
devPtr: Annotated[c.POINTER[ctypes.c_void_p], 0]
handle: Annotated[cudaIpcMemHandle_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 72]
cudaIpcOpenMemHandle_v4010_params: TypeAlias = struct_cudaIpcOpenMemHandle_v4010_params_st
@c.record
class struct_cudaIpcCloseMemHandle_v4010_params_st(c.Struct):
SIZE = 8
devPtr: Annotated[ctypes.c_void_p, 0]
cudaIpcCloseMemHandle_v4010_params: TypeAlias = struct_cudaIpcCloseMemHandle_v4010_params_st
@c.record
class struct_cudaDeviceFlushGPUDirectRDMAWrites_v11030_params_st(c.Struct):
SIZE = 8
target: Annotated[enum_cudaFlushGPUDirectRDMAWritesTarget, 0]
scope: Annotated[enum_cudaFlushGPUDirectRDMAWritesScope, 4]
class enum_cudaFlushGPUDirectRDMAWritesTarget(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaFlushGPUDirectRDMAWritesTargetCurrentDevice = enum_cudaFlushGPUDirectRDMAWritesTarget.define('cudaFlushGPUDirectRDMAWritesTargetCurrentDevice', 0)
class enum_cudaFlushGPUDirectRDMAWritesScope(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaFlushGPUDirectRDMAWritesToOwner = enum_cudaFlushGPUDirectRDMAWritesScope.define('cudaFlushGPUDirectRDMAWritesToOwner', 100)
cudaFlushGPUDirectRDMAWritesToAllDevices = enum_cudaFlushGPUDirectRDMAWritesScope.define('cudaFlushGPUDirectRDMAWritesToAllDevices', 200)
cudaDeviceFlushGPUDirectRDMAWrites_v11030_params: TypeAlias = struct_cudaDeviceFlushGPUDirectRDMAWrites_v11030_params_st
@c.record
class struct_cudaDeviceRegisterAsyncNotification_v12040_params_st(c.Struct):
SIZE = 32
device: Annotated[Annotated[int, ctypes.c_int32], 0]
callbackFunc: Annotated[cudaAsyncCallback, 8]
userData: Annotated[ctypes.c_void_p, 16]
callback: Annotated[c.POINTER[cudaAsyncCallbackHandle_t], 24]
@c.record
class struct_cudaAsyncNotificationInfo(c.Struct):
SIZE = 16
type: Annotated[cudaAsyncNotificationType, 0]
info: Annotated[struct_cudaAsyncNotificationInfo_info, 8]
class enum_cudaAsyncNotificationType_enum(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaAsyncNotificationTypeOverBudget = enum_cudaAsyncNotificationType_enum.define('cudaAsyncNotificationTypeOverBudget', 1)
cudaAsyncNotificationType: TypeAlias = enum_cudaAsyncNotificationType_enum
@c.record
class struct_cudaAsyncNotificationInfo_info(c.Struct):
SIZE = 8
overBudget: Annotated[struct_cudaAsyncNotificationInfo_info_overBudget, 0]
@c.record
class struct_cudaAsyncNotificationInfo_info_overBudget(c.Struct):
SIZE = 8
bytesOverBudget: Annotated[Annotated[int, ctypes.c_uint64], 0]
class struct_cudaAsyncCallbackEntry(ctypes.Structure): pass
cudaAsyncCallback: TypeAlias = c.CFUNCTYPE[None, [c.POINTER[struct_cudaAsyncNotificationInfo], ctypes.c_void_p, c.POINTER[struct_cudaAsyncCallbackEntry]]]
cudaAsyncCallbackHandle_t: TypeAlias = c.POINTER[struct_cudaAsyncCallbackEntry]
cudaDeviceRegisterAsyncNotification_v12040_params: TypeAlias = struct_cudaDeviceRegisterAsyncNotification_v12040_params_st
@c.record
class struct_cudaDeviceUnregisterAsyncNotification_v12040_params_st(c.Struct):
SIZE = 16
device: Annotated[Annotated[int, ctypes.c_int32], 0]
callback: Annotated[cudaAsyncCallbackHandle_t, 8]
cudaDeviceUnregisterAsyncNotification_v12040_params: TypeAlias = struct_cudaDeviceUnregisterAsyncNotification_v12040_params_st
@c.record
class struct_cudaDeviceGetSharedMemConfig_v4020_params_st(c.Struct):
SIZE = 8
pConfig: Annotated[c.POINTER[enum_cudaSharedMemConfig], 0]
class enum_cudaSharedMemConfig(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaSharedMemBankSizeDefault = enum_cudaSharedMemConfig.define('cudaSharedMemBankSizeDefault', 0)
cudaSharedMemBankSizeFourByte = enum_cudaSharedMemConfig.define('cudaSharedMemBankSizeFourByte', 1)
cudaSharedMemBankSizeEightByte = enum_cudaSharedMemConfig.define('cudaSharedMemBankSizeEightByte', 2)
cudaDeviceGetSharedMemConfig_v4020_params: TypeAlias = struct_cudaDeviceGetSharedMemConfig_v4020_params_st
@c.record
class struct_cudaDeviceSetSharedMemConfig_v4020_params_st(c.Struct):
SIZE = 4
config: Annotated[enum_cudaSharedMemConfig, 0]
cudaDeviceSetSharedMemConfig_v4020_params: TypeAlias = struct_cudaDeviceSetSharedMemConfig_v4020_params_st
@c.record
class struct_cudaGetErrorName_v6050_params_st(c.Struct):
SIZE = 4
error: Annotated[cudaError_t, 0]
class enum_cudaError(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaSuccess = enum_cudaError.define('cudaSuccess', 0)
cudaErrorInvalidValue = enum_cudaError.define('cudaErrorInvalidValue', 1)
cudaErrorMemoryAllocation = enum_cudaError.define('cudaErrorMemoryAllocation', 2)
cudaErrorInitializationError = enum_cudaError.define('cudaErrorInitializationError', 3)
cudaErrorCudartUnloading = enum_cudaError.define('cudaErrorCudartUnloading', 4)
cudaErrorProfilerDisabled = enum_cudaError.define('cudaErrorProfilerDisabled', 5)
cudaErrorProfilerNotInitialized = enum_cudaError.define('cudaErrorProfilerNotInitialized', 6)
cudaErrorProfilerAlreadyStarted = enum_cudaError.define('cudaErrorProfilerAlreadyStarted', 7)
cudaErrorProfilerAlreadyStopped = enum_cudaError.define('cudaErrorProfilerAlreadyStopped', 8)
cudaErrorInvalidConfiguration = enum_cudaError.define('cudaErrorInvalidConfiguration', 9)
cudaErrorInvalidPitchValue = enum_cudaError.define('cudaErrorInvalidPitchValue', 12)
cudaErrorInvalidSymbol = enum_cudaError.define('cudaErrorInvalidSymbol', 13)
cudaErrorInvalidHostPointer = enum_cudaError.define('cudaErrorInvalidHostPointer', 16)
cudaErrorInvalidDevicePointer = enum_cudaError.define('cudaErrorInvalidDevicePointer', 17)
cudaErrorInvalidTexture = enum_cudaError.define('cudaErrorInvalidTexture', 18)
cudaErrorInvalidTextureBinding = enum_cudaError.define('cudaErrorInvalidTextureBinding', 19)
cudaErrorInvalidChannelDescriptor = enum_cudaError.define('cudaErrorInvalidChannelDescriptor', 20)
cudaErrorInvalidMemcpyDirection = enum_cudaError.define('cudaErrorInvalidMemcpyDirection', 21)
cudaErrorAddressOfConstant = enum_cudaError.define('cudaErrorAddressOfConstant', 22)
cudaErrorTextureFetchFailed = enum_cudaError.define('cudaErrorTextureFetchFailed', 23)
cudaErrorTextureNotBound = enum_cudaError.define('cudaErrorTextureNotBound', 24)
cudaErrorSynchronizationError = enum_cudaError.define('cudaErrorSynchronizationError', 25)
cudaErrorInvalidFilterSetting = enum_cudaError.define('cudaErrorInvalidFilterSetting', 26)
cudaErrorInvalidNormSetting = enum_cudaError.define('cudaErrorInvalidNormSetting', 27)
cudaErrorMixedDeviceExecution = enum_cudaError.define('cudaErrorMixedDeviceExecution', 28)
cudaErrorNotYetImplemented = enum_cudaError.define('cudaErrorNotYetImplemented', 31)
cudaErrorMemoryValueTooLarge = enum_cudaError.define('cudaErrorMemoryValueTooLarge', 32)
cudaErrorStubLibrary = enum_cudaError.define('cudaErrorStubLibrary', 34)
cudaErrorInsufficientDriver = enum_cudaError.define('cudaErrorInsufficientDriver', 35)
cudaErrorCallRequiresNewerDriver = enum_cudaError.define('cudaErrorCallRequiresNewerDriver', 36)
cudaErrorInvalidSurface = enum_cudaError.define('cudaErrorInvalidSurface', 37)
cudaErrorDuplicateVariableName = enum_cudaError.define('cudaErrorDuplicateVariableName', 43)
cudaErrorDuplicateTextureName = enum_cudaError.define('cudaErrorDuplicateTextureName', 44)
cudaErrorDuplicateSurfaceName = enum_cudaError.define('cudaErrorDuplicateSurfaceName', 45)
cudaErrorDevicesUnavailable = enum_cudaError.define('cudaErrorDevicesUnavailable', 46)
cudaErrorIncompatibleDriverContext = enum_cudaError.define('cudaErrorIncompatibleDriverContext', 49)
cudaErrorMissingConfiguration = enum_cudaError.define('cudaErrorMissingConfiguration', 52)
cudaErrorPriorLaunchFailure = enum_cudaError.define('cudaErrorPriorLaunchFailure', 53)
cudaErrorLaunchMaxDepthExceeded = enum_cudaError.define('cudaErrorLaunchMaxDepthExceeded', 65)
cudaErrorLaunchFileScopedTex = enum_cudaError.define('cudaErrorLaunchFileScopedTex', 66)
cudaErrorLaunchFileScopedSurf = enum_cudaError.define('cudaErrorLaunchFileScopedSurf', 67)
cudaErrorSyncDepthExceeded = enum_cudaError.define('cudaErrorSyncDepthExceeded', 68)
cudaErrorLaunchPendingCountExceeded = enum_cudaError.define('cudaErrorLaunchPendingCountExceeded', 69)
cudaErrorInvalidDeviceFunction = enum_cudaError.define('cudaErrorInvalidDeviceFunction', 98)
cudaErrorNoDevice = enum_cudaError.define('cudaErrorNoDevice', 100)
cudaErrorInvalidDevice = enum_cudaError.define('cudaErrorInvalidDevice', 101)
cudaErrorDeviceNotLicensed = enum_cudaError.define('cudaErrorDeviceNotLicensed', 102)
cudaErrorSoftwareValidityNotEstablished = enum_cudaError.define('cudaErrorSoftwareValidityNotEstablished', 103)
cudaErrorStartupFailure = enum_cudaError.define('cudaErrorStartupFailure', 127)
cudaErrorInvalidKernelImage = enum_cudaError.define('cudaErrorInvalidKernelImage', 200)
cudaErrorDeviceUninitialized = enum_cudaError.define('cudaErrorDeviceUninitialized', 201)
cudaErrorMapBufferObjectFailed = enum_cudaError.define('cudaErrorMapBufferObjectFailed', 205)
cudaErrorUnmapBufferObjectFailed = enum_cudaError.define('cudaErrorUnmapBufferObjectFailed', 206)
cudaErrorArrayIsMapped = enum_cudaError.define('cudaErrorArrayIsMapped', 207)
cudaErrorAlreadyMapped = enum_cudaError.define('cudaErrorAlreadyMapped', 208)
cudaErrorNoKernelImageForDevice = enum_cudaError.define('cudaErrorNoKernelImageForDevice', 209)
cudaErrorAlreadyAcquired = enum_cudaError.define('cudaErrorAlreadyAcquired', 210)
cudaErrorNotMapped = enum_cudaError.define('cudaErrorNotMapped', 211)
cudaErrorNotMappedAsArray = enum_cudaError.define('cudaErrorNotMappedAsArray', 212)
cudaErrorNotMappedAsPointer = enum_cudaError.define('cudaErrorNotMappedAsPointer', 213)
cudaErrorECCUncorrectable = enum_cudaError.define('cudaErrorECCUncorrectable', 214)
cudaErrorUnsupportedLimit = enum_cudaError.define('cudaErrorUnsupportedLimit', 215)
cudaErrorDeviceAlreadyInUse = enum_cudaError.define('cudaErrorDeviceAlreadyInUse', 216)
cudaErrorPeerAccessUnsupported = enum_cudaError.define('cudaErrorPeerAccessUnsupported', 217)
cudaErrorInvalidPtx = enum_cudaError.define('cudaErrorInvalidPtx', 218)
cudaErrorInvalidGraphicsContext = enum_cudaError.define('cudaErrorInvalidGraphicsContext', 219)
cudaErrorNvlinkUncorrectable = enum_cudaError.define('cudaErrorNvlinkUncorrectable', 220)
cudaErrorJitCompilerNotFound = enum_cudaError.define('cudaErrorJitCompilerNotFound', 221)
cudaErrorUnsupportedPtxVersion = enum_cudaError.define('cudaErrorUnsupportedPtxVersion', 222)
cudaErrorJitCompilationDisabled = enum_cudaError.define('cudaErrorJitCompilationDisabled', 223)
cudaErrorUnsupportedExecAffinity = enum_cudaError.define('cudaErrorUnsupportedExecAffinity', 224)
cudaErrorUnsupportedDevSideSync = enum_cudaError.define('cudaErrorUnsupportedDevSideSync', 225)
cudaErrorContained = enum_cudaError.define('cudaErrorContained', 226)
cudaErrorInvalidSource = enum_cudaError.define('cudaErrorInvalidSource', 300)
cudaErrorFileNotFound = enum_cudaError.define('cudaErrorFileNotFound', 301)
cudaErrorSharedObjectSymbolNotFound = enum_cudaError.define('cudaErrorSharedObjectSymbolNotFound', 302)
cudaErrorSharedObjectInitFailed = enum_cudaError.define('cudaErrorSharedObjectInitFailed', 303)
cudaErrorOperatingSystem = enum_cudaError.define('cudaErrorOperatingSystem', 304)
cudaErrorInvalidResourceHandle = enum_cudaError.define('cudaErrorInvalidResourceHandle', 400)
cudaErrorIllegalState = enum_cudaError.define('cudaErrorIllegalState', 401)
cudaErrorLossyQuery = enum_cudaError.define('cudaErrorLossyQuery', 402)
cudaErrorSymbolNotFound = enum_cudaError.define('cudaErrorSymbolNotFound', 500)
cudaErrorNotReady = enum_cudaError.define('cudaErrorNotReady', 600)
cudaErrorIllegalAddress = enum_cudaError.define('cudaErrorIllegalAddress', 700)
cudaErrorLaunchOutOfResources = enum_cudaError.define('cudaErrorLaunchOutOfResources', 701)
cudaErrorLaunchTimeout = enum_cudaError.define('cudaErrorLaunchTimeout', 702)
cudaErrorLaunchIncompatibleTexturing = enum_cudaError.define('cudaErrorLaunchIncompatibleTexturing', 703)
cudaErrorPeerAccessAlreadyEnabled = enum_cudaError.define('cudaErrorPeerAccessAlreadyEnabled', 704)
cudaErrorPeerAccessNotEnabled = enum_cudaError.define('cudaErrorPeerAccessNotEnabled', 705)
cudaErrorSetOnActiveProcess = enum_cudaError.define('cudaErrorSetOnActiveProcess', 708)
cudaErrorContextIsDestroyed = enum_cudaError.define('cudaErrorContextIsDestroyed', 709)
cudaErrorAssert = enum_cudaError.define('cudaErrorAssert', 710)
cudaErrorTooManyPeers = enum_cudaError.define('cudaErrorTooManyPeers', 711)
cudaErrorHostMemoryAlreadyRegistered = enum_cudaError.define('cudaErrorHostMemoryAlreadyRegistered', 712)
cudaErrorHostMemoryNotRegistered = enum_cudaError.define('cudaErrorHostMemoryNotRegistered', 713)
cudaErrorHardwareStackError = enum_cudaError.define('cudaErrorHardwareStackError', 714)
cudaErrorIllegalInstruction = enum_cudaError.define('cudaErrorIllegalInstruction', 715)
cudaErrorMisalignedAddress = enum_cudaError.define('cudaErrorMisalignedAddress', 716)
cudaErrorInvalidAddressSpace = enum_cudaError.define('cudaErrorInvalidAddressSpace', 717)
cudaErrorInvalidPc = enum_cudaError.define('cudaErrorInvalidPc', 718)
cudaErrorLaunchFailure = enum_cudaError.define('cudaErrorLaunchFailure', 719)
cudaErrorCooperativeLaunchTooLarge = enum_cudaError.define('cudaErrorCooperativeLaunchTooLarge', 720)
cudaErrorTensorMemoryLeak = enum_cudaError.define('cudaErrorTensorMemoryLeak', 721)
cudaErrorNotPermitted = enum_cudaError.define('cudaErrorNotPermitted', 800)
cudaErrorNotSupported = enum_cudaError.define('cudaErrorNotSupported', 801)
cudaErrorSystemNotReady = enum_cudaError.define('cudaErrorSystemNotReady', 802)
cudaErrorSystemDriverMismatch = enum_cudaError.define('cudaErrorSystemDriverMismatch', 803)
cudaErrorCompatNotSupportedOnDevice = enum_cudaError.define('cudaErrorCompatNotSupportedOnDevice', 804)
cudaErrorMpsConnectionFailed = enum_cudaError.define('cudaErrorMpsConnectionFailed', 805)
cudaErrorMpsRpcFailure = enum_cudaError.define('cudaErrorMpsRpcFailure', 806)
cudaErrorMpsServerNotReady = enum_cudaError.define('cudaErrorMpsServerNotReady', 807)
cudaErrorMpsMaxClientsReached = enum_cudaError.define('cudaErrorMpsMaxClientsReached', 808)
cudaErrorMpsMaxConnectionsReached = enum_cudaError.define('cudaErrorMpsMaxConnectionsReached', 809)
cudaErrorMpsClientTerminated = enum_cudaError.define('cudaErrorMpsClientTerminated', 810)
cudaErrorCdpNotSupported = enum_cudaError.define('cudaErrorCdpNotSupported', 811)
cudaErrorCdpVersionMismatch = enum_cudaError.define('cudaErrorCdpVersionMismatch', 812)
cudaErrorStreamCaptureUnsupported = enum_cudaError.define('cudaErrorStreamCaptureUnsupported', 900)
cudaErrorStreamCaptureInvalidated = enum_cudaError.define('cudaErrorStreamCaptureInvalidated', 901)
cudaErrorStreamCaptureMerge = enum_cudaError.define('cudaErrorStreamCaptureMerge', 902)
cudaErrorStreamCaptureUnmatched = enum_cudaError.define('cudaErrorStreamCaptureUnmatched', 903)
cudaErrorStreamCaptureUnjoined = enum_cudaError.define('cudaErrorStreamCaptureUnjoined', 904)
cudaErrorStreamCaptureIsolation = enum_cudaError.define('cudaErrorStreamCaptureIsolation', 905)
cudaErrorStreamCaptureImplicit = enum_cudaError.define('cudaErrorStreamCaptureImplicit', 906)
cudaErrorCapturedEvent = enum_cudaError.define('cudaErrorCapturedEvent', 907)
cudaErrorStreamCaptureWrongThread = enum_cudaError.define('cudaErrorStreamCaptureWrongThread', 908)
cudaErrorTimeout = enum_cudaError.define('cudaErrorTimeout', 909)
cudaErrorGraphExecUpdateFailure = enum_cudaError.define('cudaErrorGraphExecUpdateFailure', 910)
cudaErrorExternalDevice = enum_cudaError.define('cudaErrorExternalDevice', 911)
cudaErrorInvalidClusterSize = enum_cudaError.define('cudaErrorInvalidClusterSize', 912)
cudaErrorFunctionNotLoaded = enum_cudaError.define('cudaErrorFunctionNotLoaded', 913)
cudaErrorInvalidResourceType = enum_cudaError.define('cudaErrorInvalidResourceType', 914)
cudaErrorInvalidResourceConfiguration = enum_cudaError.define('cudaErrorInvalidResourceConfiguration', 915)
cudaErrorUnknown = enum_cudaError.define('cudaErrorUnknown', 999)
cudaErrorApiFailureBase = enum_cudaError.define('cudaErrorApiFailureBase', 10000)
cudaError_t: TypeAlias = enum_cudaError
cudaGetErrorName_v6050_params: TypeAlias = struct_cudaGetErrorName_v6050_params_st
@c.record
class struct_cudaGetErrorString_v3020_params_st(c.Struct):
SIZE = 4
error: Annotated[cudaError_t, 0]
cudaGetErrorString_v3020_params: TypeAlias = struct_cudaGetErrorString_v3020_params_st
@c.record
class struct_cudaGetDeviceCount_v3020_params_st(c.Struct):
SIZE = 8
count: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
cudaGetDeviceCount_v3020_params: TypeAlias = struct_cudaGetDeviceCount_v3020_params_st
@c.record
class struct_cudaGetDeviceProperties_v2_v12000_params_st(c.Struct):
SIZE = 16
prop: Annotated[c.POINTER[struct_cudaDeviceProp], 0]
device: Annotated[Annotated[int, ctypes.c_int32], 8]
@c.record
class struct_cudaDeviceProp(c.Struct):
SIZE = 1032
name: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 0]
uuid: Annotated[cudaUUID_t, 256]
luid: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[8]], 272]
luidDeviceNodeMask: Annotated[Annotated[int, ctypes.c_uint32], 280]
totalGlobalMem: Annotated[size_t, 288]
sharedMemPerBlock: Annotated[size_t, 296]
regsPerBlock: Annotated[Annotated[int, ctypes.c_int32], 304]
warpSize: Annotated[Annotated[int, ctypes.c_int32], 308]
memPitch: Annotated[size_t, 312]
maxThreadsPerBlock: Annotated[Annotated[int, ctypes.c_int32], 320]
maxThreadsDim: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 324]
maxGridSize: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 336]
clockRate: Annotated[Annotated[int, ctypes.c_int32], 348]
totalConstMem: Annotated[size_t, 352]
major: Annotated[Annotated[int, ctypes.c_int32], 360]
minor: Annotated[Annotated[int, ctypes.c_int32], 364]
textureAlignment: Annotated[size_t, 368]
texturePitchAlignment: Annotated[size_t, 376]
deviceOverlap: Annotated[Annotated[int, ctypes.c_int32], 384]
multiProcessorCount: Annotated[Annotated[int, ctypes.c_int32], 388]
kernelExecTimeoutEnabled: Annotated[Annotated[int, ctypes.c_int32], 392]
integrated: Annotated[Annotated[int, ctypes.c_int32], 396]
canMapHostMemory: Annotated[Annotated[int, ctypes.c_int32], 400]
computeMode: Annotated[Annotated[int, ctypes.c_int32], 404]
maxTexture1D: Annotated[Annotated[int, ctypes.c_int32], 408]
maxTexture1DMipmap: Annotated[Annotated[int, ctypes.c_int32], 412]
maxTexture1DLinear: Annotated[Annotated[int, ctypes.c_int32], 416]
maxTexture2D: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[2]], 420]
maxTexture2DMipmap: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[2]], 428]
maxTexture2DLinear: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 436]
maxTexture2DGather: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[2]], 448]
maxTexture3D: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 456]
maxTexture3DAlt: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 468]
maxTextureCubemap: Annotated[Annotated[int, ctypes.c_int32], 480]
maxTexture1DLayered: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[2]], 484]
maxTexture2DLayered: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 492]
maxTextureCubemapLayered: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[2]], 504]
maxSurface1D: Annotated[Annotated[int, ctypes.c_int32], 512]
maxSurface2D: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[2]], 516]
maxSurface3D: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 524]
maxSurface1DLayered: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[2]], 536]
maxSurface2DLayered: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 544]
maxSurfaceCubemap: Annotated[Annotated[int, ctypes.c_int32], 556]
maxSurfaceCubemapLayered: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[2]], 560]
surfaceAlignment: Annotated[size_t, 568]
concurrentKernels: Annotated[Annotated[int, ctypes.c_int32], 576]
ECCEnabled: Annotated[Annotated[int, ctypes.c_int32], 580]
pciBusID: Annotated[Annotated[int, ctypes.c_int32], 584]
pciDeviceID: Annotated[Annotated[int, ctypes.c_int32], 588]
pciDomainID: Annotated[Annotated[int, ctypes.c_int32], 592]
tccDriver: Annotated[Annotated[int, ctypes.c_int32], 596]
asyncEngineCount: Annotated[Annotated[int, ctypes.c_int32], 600]
unifiedAddressing: Annotated[Annotated[int, ctypes.c_int32], 604]
memoryClockRate: Annotated[Annotated[int, ctypes.c_int32], 608]
memoryBusWidth: Annotated[Annotated[int, ctypes.c_int32], 612]
l2CacheSize: Annotated[Annotated[int, ctypes.c_int32], 616]
persistingL2CacheMaxSize: Annotated[Annotated[int, ctypes.c_int32], 620]
maxThreadsPerMultiProcessor: Annotated[Annotated[int, ctypes.c_int32], 624]
streamPrioritiesSupported: Annotated[Annotated[int, ctypes.c_int32], 628]
globalL1CacheSupported: Annotated[Annotated[int, ctypes.c_int32], 632]
localL1CacheSupported: Annotated[Annotated[int, ctypes.c_int32], 636]
sharedMemPerMultiprocessor: Annotated[size_t, 640]
regsPerMultiprocessor: Annotated[Annotated[int, ctypes.c_int32], 648]
managedMemory: Annotated[Annotated[int, ctypes.c_int32], 652]
isMultiGpuBoard: Annotated[Annotated[int, ctypes.c_int32], 656]
multiGpuBoardGroupID: Annotated[Annotated[int, ctypes.c_int32], 660]
hostNativeAtomicSupported: Annotated[Annotated[int, ctypes.c_int32], 664]
singleToDoublePrecisionPerfRatio: Annotated[Annotated[int, ctypes.c_int32], 668]
pageableMemoryAccess: Annotated[Annotated[int, ctypes.c_int32], 672]
concurrentManagedAccess: Annotated[Annotated[int, ctypes.c_int32], 676]
computePreemptionSupported: Annotated[Annotated[int, ctypes.c_int32], 680]
canUseHostPointerForRegisteredMem: Annotated[Annotated[int, ctypes.c_int32], 684]
cooperativeLaunch: Annotated[Annotated[int, ctypes.c_int32], 688]
cooperativeMultiDeviceLaunch: Annotated[Annotated[int, ctypes.c_int32], 692]
sharedMemPerBlockOptin: Annotated[size_t, 696]
pageableMemoryAccessUsesHostPageTables: Annotated[Annotated[int, ctypes.c_int32], 704]
directManagedMemAccessFromHost: Annotated[Annotated[int, ctypes.c_int32], 708]
maxBlocksPerMultiProcessor: Annotated[Annotated[int, ctypes.c_int32], 712]
accessPolicyMaxWindowSize: Annotated[Annotated[int, ctypes.c_int32], 716]
reservedSharedMemPerBlock: Annotated[size_t, 720]
hostRegisterSupported: Annotated[Annotated[int, ctypes.c_int32], 728]
sparseCudaArraySupported: Annotated[Annotated[int, ctypes.c_int32], 732]
hostRegisterReadOnlySupported: Annotated[Annotated[int, ctypes.c_int32], 736]
timelineSemaphoreInteropSupported: Annotated[Annotated[int, ctypes.c_int32], 740]
memoryPoolsSupported: Annotated[Annotated[int, ctypes.c_int32], 744]
gpuDirectRDMASupported: Annotated[Annotated[int, ctypes.c_int32], 748]
gpuDirectRDMAFlushWritesOptions: Annotated[Annotated[int, ctypes.c_uint32], 752]
gpuDirectRDMAWritesOrdering: Annotated[Annotated[int, ctypes.c_int32], 756]
memoryPoolSupportedHandleTypes: Annotated[Annotated[int, ctypes.c_uint32], 760]
deferredMappingCudaArraySupported: Annotated[Annotated[int, ctypes.c_int32], 764]
ipcEventSupported: Annotated[Annotated[int, ctypes.c_int32], 768]
clusterLaunch: Annotated[Annotated[int, ctypes.c_int32], 772]
unifiedFunctionPointers: Annotated[Annotated[int, ctypes.c_int32], 776]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[63]], 780]
cudaUUID_t: TypeAlias = struct_CUuuid_st
cudaGetDeviceProperties_v2_v12000_params: TypeAlias = struct_cudaGetDeviceProperties_v2_v12000_params_st
@c.record
class struct_cudaDeviceGetAttribute_v5000_params_st(c.Struct):
SIZE = 16
value: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
attr: Annotated[enum_cudaDeviceAttr, 8]
device: Annotated[Annotated[int, ctypes.c_int32], 12]
class enum_cudaDeviceAttr(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaDevAttrMaxThreadsPerBlock = enum_cudaDeviceAttr.define('cudaDevAttrMaxThreadsPerBlock', 1)
cudaDevAttrMaxBlockDimX = enum_cudaDeviceAttr.define('cudaDevAttrMaxBlockDimX', 2)
cudaDevAttrMaxBlockDimY = enum_cudaDeviceAttr.define('cudaDevAttrMaxBlockDimY', 3)
cudaDevAttrMaxBlockDimZ = enum_cudaDeviceAttr.define('cudaDevAttrMaxBlockDimZ', 4)
cudaDevAttrMaxGridDimX = enum_cudaDeviceAttr.define('cudaDevAttrMaxGridDimX', 5)
cudaDevAttrMaxGridDimY = enum_cudaDeviceAttr.define('cudaDevAttrMaxGridDimY', 6)
cudaDevAttrMaxGridDimZ = enum_cudaDeviceAttr.define('cudaDevAttrMaxGridDimZ', 7)
cudaDevAttrMaxSharedMemoryPerBlock = enum_cudaDeviceAttr.define('cudaDevAttrMaxSharedMemoryPerBlock', 8)
cudaDevAttrTotalConstantMemory = enum_cudaDeviceAttr.define('cudaDevAttrTotalConstantMemory', 9)
cudaDevAttrWarpSize = enum_cudaDeviceAttr.define('cudaDevAttrWarpSize', 10)
cudaDevAttrMaxPitch = enum_cudaDeviceAttr.define('cudaDevAttrMaxPitch', 11)
cudaDevAttrMaxRegistersPerBlock = enum_cudaDeviceAttr.define('cudaDevAttrMaxRegistersPerBlock', 12)
cudaDevAttrClockRate = enum_cudaDeviceAttr.define('cudaDevAttrClockRate', 13)
cudaDevAttrTextureAlignment = enum_cudaDeviceAttr.define('cudaDevAttrTextureAlignment', 14)
cudaDevAttrGpuOverlap = enum_cudaDeviceAttr.define('cudaDevAttrGpuOverlap', 15)
cudaDevAttrMultiProcessorCount = enum_cudaDeviceAttr.define('cudaDevAttrMultiProcessorCount', 16)
cudaDevAttrKernelExecTimeout = enum_cudaDeviceAttr.define('cudaDevAttrKernelExecTimeout', 17)
cudaDevAttrIntegrated = enum_cudaDeviceAttr.define('cudaDevAttrIntegrated', 18)
cudaDevAttrCanMapHostMemory = enum_cudaDeviceAttr.define('cudaDevAttrCanMapHostMemory', 19)
cudaDevAttrComputeMode = enum_cudaDeviceAttr.define('cudaDevAttrComputeMode', 20)
cudaDevAttrMaxTexture1DWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture1DWidth', 21)
cudaDevAttrMaxTexture2DWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DWidth', 22)
cudaDevAttrMaxTexture2DHeight = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DHeight', 23)
cudaDevAttrMaxTexture3DWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture3DWidth', 24)
cudaDevAttrMaxTexture3DHeight = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture3DHeight', 25)
cudaDevAttrMaxTexture3DDepth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture3DDepth', 26)
cudaDevAttrMaxTexture2DLayeredWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DLayeredWidth', 27)
cudaDevAttrMaxTexture2DLayeredHeight = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DLayeredHeight', 28)
cudaDevAttrMaxTexture2DLayeredLayers = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DLayeredLayers', 29)
cudaDevAttrSurfaceAlignment = enum_cudaDeviceAttr.define('cudaDevAttrSurfaceAlignment', 30)
cudaDevAttrConcurrentKernels = enum_cudaDeviceAttr.define('cudaDevAttrConcurrentKernels', 31)
cudaDevAttrEccEnabled = enum_cudaDeviceAttr.define('cudaDevAttrEccEnabled', 32)
cudaDevAttrPciBusId = enum_cudaDeviceAttr.define('cudaDevAttrPciBusId', 33)
cudaDevAttrPciDeviceId = enum_cudaDeviceAttr.define('cudaDevAttrPciDeviceId', 34)
cudaDevAttrTccDriver = enum_cudaDeviceAttr.define('cudaDevAttrTccDriver', 35)
cudaDevAttrMemoryClockRate = enum_cudaDeviceAttr.define('cudaDevAttrMemoryClockRate', 36)
cudaDevAttrGlobalMemoryBusWidth = enum_cudaDeviceAttr.define('cudaDevAttrGlobalMemoryBusWidth', 37)
cudaDevAttrL2CacheSize = enum_cudaDeviceAttr.define('cudaDevAttrL2CacheSize', 38)
cudaDevAttrMaxThreadsPerMultiProcessor = enum_cudaDeviceAttr.define('cudaDevAttrMaxThreadsPerMultiProcessor', 39)
cudaDevAttrAsyncEngineCount = enum_cudaDeviceAttr.define('cudaDevAttrAsyncEngineCount', 40)
cudaDevAttrUnifiedAddressing = enum_cudaDeviceAttr.define('cudaDevAttrUnifiedAddressing', 41)
cudaDevAttrMaxTexture1DLayeredWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture1DLayeredWidth', 42)
cudaDevAttrMaxTexture1DLayeredLayers = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture1DLayeredLayers', 43)
cudaDevAttrMaxTexture2DGatherWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DGatherWidth', 45)
cudaDevAttrMaxTexture2DGatherHeight = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DGatherHeight', 46)
cudaDevAttrMaxTexture3DWidthAlt = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture3DWidthAlt', 47)
cudaDevAttrMaxTexture3DHeightAlt = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture3DHeightAlt', 48)
cudaDevAttrMaxTexture3DDepthAlt = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture3DDepthAlt', 49)
cudaDevAttrPciDomainId = enum_cudaDeviceAttr.define('cudaDevAttrPciDomainId', 50)
cudaDevAttrTexturePitchAlignment = enum_cudaDeviceAttr.define('cudaDevAttrTexturePitchAlignment', 51)
cudaDevAttrMaxTextureCubemapWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTextureCubemapWidth', 52)
cudaDevAttrMaxTextureCubemapLayeredWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTextureCubemapLayeredWidth', 53)
cudaDevAttrMaxTextureCubemapLayeredLayers = enum_cudaDeviceAttr.define('cudaDevAttrMaxTextureCubemapLayeredLayers', 54)
cudaDevAttrMaxSurface1DWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface1DWidth', 55)
cudaDevAttrMaxSurface2DWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface2DWidth', 56)
cudaDevAttrMaxSurface2DHeight = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface2DHeight', 57)
cudaDevAttrMaxSurface3DWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface3DWidth', 58)
cudaDevAttrMaxSurface3DHeight = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface3DHeight', 59)
cudaDevAttrMaxSurface3DDepth = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface3DDepth', 60)
cudaDevAttrMaxSurface1DLayeredWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface1DLayeredWidth', 61)
cudaDevAttrMaxSurface1DLayeredLayers = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface1DLayeredLayers', 62)
cudaDevAttrMaxSurface2DLayeredWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface2DLayeredWidth', 63)
cudaDevAttrMaxSurface2DLayeredHeight = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface2DLayeredHeight', 64)
cudaDevAttrMaxSurface2DLayeredLayers = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurface2DLayeredLayers', 65)
cudaDevAttrMaxSurfaceCubemapWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurfaceCubemapWidth', 66)
cudaDevAttrMaxSurfaceCubemapLayeredWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurfaceCubemapLayeredWidth', 67)
cudaDevAttrMaxSurfaceCubemapLayeredLayers = enum_cudaDeviceAttr.define('cudaDevAttrMaxSurfaceCubemapLayeredLayers', 68)
cudaDevAttrMaxTexture1DLinearWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture1DLinearWidth', 69)
cudaDevAttrMaxTexture2DLinearWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DLinearWidth', 70)
cudaDevAttrMaxTexture2DLinearHeight = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DLinearHeight', 71)
cudaDevAttrMaxTexture2DLinearPitch = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DLinearPitch', 72)
cudaDevAttrMaxTexture2DMipmappedWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DMipmappedWidth', 73)
cudaDevAttrMaxTexture2DMipmappedHeight = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture2DMipmappedHeight', 74)
cudaDevAttrComputeCapabilityMajor = enum_cudaDeviceAttr.define('cudaDevAttrComputeCapabilityMajor', 75)
cudaDevAttrComputeCapabilityMinor = enum_cudaDeviceAttr.define('cudaDevAttrComputeCapabilityMinor', 76)
cudaDevAttrMaxTexture1DMipmappedWidth = enum_cudaDeviceAttr.define('cudaDevAttrMaxTexture1DMipmappedWidth', 77)
cudaDevAttrStreamPrioritiesSupported = enum_cudaDeviceAttr.define('cudaDevAttrStreamPrioritiesSupported', 78)
cudaDevAttrGlobalL1CacheSupported = enum_cudaDeviceAttr.define('cudaDevAttrGlobalL1CacheSupported', 79)
cudaDevAttrLocalL1CacheSupported = enum_cudaDeviceAttr.define('cudaDevAttrLocalL1CacheSupported', 80)
cudaDevAttrMaxSharedMemoryPerMultiprocessor = enum_cudaDeviceAttr.define('cudaDevAttrMaxSharedMemoryPerMultiprocessor', 81)
cudaDevAttrMaxRegistersPerMultiprocessor = enum_cudaDeviceAttr.define('cudaDevAttrMaxRegistersPerMultiprocessor', 82)
cudaDevAttrManagedMemory = enum_cudaDeviceAttr.define('cudaDevAttrManagedMemory', 83)
cudaDevAttrIsMultiGpuBoard = enum_cudaDeviceAttr.define('cudaDevAttrIsMultiGpuBoard', 84)
cudaDevAttrMultiGpuBoardGroupID = enum_cudaDeviceAttr.define('cudaDevAttrMultiGpuBoardGroupID', 85)
cudaDevAttrHostNativeAtomicSupported = enum_cudaDeviceAttr.define('cudaDevAttrHostNativeAtomicSupported', 86)
cudaDevAttrSingleToDoublePrecisionPerfRatio = enum_cudaDeviceAttr.define('cudaDevAttrSingleToDoublePrecisionPerfRatio', 87)
cudaDevAttrPageableMemoryAccess = enum_cudaDeviceAttr.define('cudaDevAttrPageableMemoryAccess', 88)
cudaDevAttrConcurrentManagedAccess = enum_cudaDeviceAttr.define('cudaDevAttrConcurrentManagedAccess', 89)
cudaDevAttrComputePreemptionSupported = enum_cudaDeviceAttr.define('cudaDevAttrComputePreemptionSupported', 90)
cudaDevAttrCanUseHostPointerForRegisteredMem = enum_cudaDeviceAttr.define('cudaDevAttrCanUseHostPointerForRegisteredMem', 91)
cudaDevAttrReserved92 = enum_cudaDeviceAttr.define('cudaDevAttrReserved92', 92)
cudaDevAttrReserved93 = enum_cudaDeviceAttr.define('cudaDevAttrReserved93', 93)
cudaDevAttrReserved94 = enum_cudaDeviceAttr.define('cudaDevAttrReserved94', 94)
cudaDevAttrCooperativeLaunch = enum_cudaDeviceAttr.define('cudaDevAttrCooperativeLaunch', 95)
cudaDevAttrCooperativeMultiDeviceLaunch = enum_cudaDeviceAttr.define('cudaDevAttrCooperativeMultiDeviceLaunch', 96)
cudaDevAttrMaxSharedMemoryPerBlockOptin = enum_cudaDeviceAttr.define('cudaDevAttrMaxSharedMemoryPerBlockOptin', 97)
cudaDevAttrCanFlushRemoteWrites = enum_cudaDeviceAttr.define('cudaDevAttrCanFlushRemoteWrites', 98)
cudaDevAttrHostRegisterSupported = enum_cudaDeviceAttr.define('cudaDevAttrHostRegisterSupported', 99)
cudaDevAttrPageableMemoryAccessUsesHostPageTables = enum_cudaDeviceAttr.define('cudaDevAttrPageableMemoryAccessUsesHostPageTables', 100)
cudaDevAttrDirectManagedMemAccessFromHost = enum_cudaDeviceAttr.define('cudaDevAttrDirectManagedMemAccessFromHost', 101)
cudaDevAttrMaxBlocksPerMultiprocessor = enum_cudaDeviceAttr.define('cudaDevAttrMaxBlocksPerMultiprocessor', 106)
cudaDevAttrMaxPersistingL2CacheSize = enum_cudaDeviceAttr.define('cudaDevAttrMaxPersistingL2CacheSize', 108)
cudaDevAttrMaxAccessPolicyWindowSize = enum_cudaDeviceAttr.define('cudaDevAttrMaxAccessPolicyWindowSize', 109)
cudaDevAttrReservedSharedMemoryPerBlock = enum_cudaDeviceAttr.define('cudaDevAttrReservedSharedMemoryPerBlock', 111)
cudaDevAttrSparseCudaArraySupported = enum_cudaDeviceAttr.define('cudaDevAttrSparseCudaArraySupported', 112)
cudaDevAttrHostRegisterReadOnlySupported = enum_cudaDeviceAttr.define('cudaDevAttrHostRegisterReadOnlySupported', 113)
cudaDevAttrTimelineSemaphoreInteropSupported = enum_cudaDeviceAttr.define('cudaDevAttrTimelineSemaphoreInteropSupported', 114)
cudaDevAttrMaxTimelineSemaphoreInteropSupported = enum_cudaDeviceAttr.define('cudaDevAttrMaxTimelineSemaphoreInteropSupported', 114)
cudaDevAttrMemoryPoolsSupported = enum_cudaDeviceAttr.define('cudaDevAttrMemoryPoolsSupported', 115)
cudaDevAttrGPUDirectRDMASupported = enum_cudaDeviceAttr.define('cudaDevAttrGPUDirectRDMASupported', 116)
cudaDevAttrGPUDirectRDMAFlushWritesOptions = enum_cudaDeviceAttr.define('cudaDevAttrGPUDirectRDMAFlushWritesOptions', 117)
cudaDevAttrGPUDirectRDMAWritesOrdering = enum_cudaDeviceAttr.define('cudaDevAttrGPUDirectRDMAWritesOrdering', 118)
cudaDevAttrMemoryPoolSupportedHandleTypes = enum_cudaDeviceAttr.define('cudaDevAttrMemoryPoolSupportedHandleTypes', 119)
cudaDevAttrClusterLaunch = enum_cudaDeviceAttr.define('cudaDevAttrClusterLaunch', 120)
cudaDevAttrDeferredMappingCudaArraySupported = enum_cudaDeviceAttr.define('cudaDevAttrDeferredMappingCudaArraySupported', 121)
cudaDevAttrReserved122 = enum_cudaDeviceAttr.define('cudaDevAttrReserved122', 122)
cudaDevAttrReserved123 = enum_cudaDeviceAttr.define('cudaDevAttrReserved123', 123)
cudaDevAttrReserved124 = enum_cudaDeviceAttr.define('cudaDevAttrReserved124', 124)
cudaDevAttrIpcEventSupport = enum_cudaDeviceAttr.define('cudaDevAttrIpcEventSupport', 125)
cudaDevAttrMemSyncDomainCount = enum_cudaDeviceAttr.define('cudaDevAttrMemSyncDomainCount', 126)
cudaDevAttrReserved127 = enum_cudaDeviceAttr.define('cudaDevAttrReserved127', 127)
cudaDevAttrReserved128 = enum_cudaDeviceAttr.define('cudaDevAttrReserved128', 128)
cudaDevAttrReserved129 = enum_cudaDeviceAttr.define('cudaDevAttrReserved129', 129)
cudaDevAttrNumaConfig = enum_cudaDeviceAttr.define('cudaDevAttrNumaConfig', 130)
cudaDevAttrNumaId = enum_cudaDeviceAttr.define('cudaDevAttrNumaId', 131)
cudaDevAttrReserved132 = enum_cudaDeviceAttr.define('cudaDevAttrReserved132', 132)
cudaDevAttrMpsEnabled = enum_cudaDeviceAttr.define('cudaDevAttrMpsEnabled', 133)
cudaDevAttrHostNumaId = enum_cudaDeviceAttr.define('cudaDevAttrHostNumaId', 134)
cudaDevAttrD3D12CigSupported = enum_cudaDeviceAttr.define('cudaDevAttrD3D12CigSupported', 135)
cudaDevAttrGpuPciDeviceId = enum_cudaDeviceAttr.define('cudaDevAttrGpuPciDeviceId', 139)
cudaDevAttrGpuPciSubsystemId = enum_cudaDeviceAttr.define('cudaDevAttrGpuPciSubsystemId', 140)
cudaDevAttrHostNumaMultinodeIpcSupported = enum_cudaDeviceAttr.define('cudaDevAttrHostNumaMultinodeIpcSupported', 143)
cudaDevAttrMax = enum_cudaDeviceAttr.define('cudaDevAttrMax', 144)
cudaDeviceGetAttribute_v5000_params: TypeAlias = struct_cudaDeviceGetAttribute_v5000_params_st
@c.record
class struct_cudaDeviceGetDefaultMemPool_v11020_params_st(c.Struct):
SIZE = 16
memPool: Annotated[c.POINTER[cudaMemPool_t], 0]
device: Annotated[Annotated[int, ctypes.c_int32], 8]
cudaMemPool_t: TypeAlias = c.POINTER[struct_CUmemPoolHandle_st]
cudaDeviceGetDefaultMemPool_v11020_params: TypeAlias = struct_cudaDeviceGetDefaultMemPool_v11020_params_st
@c.record
class struct_cudaDeviceSetMemPool_v11020_params_st(c.Struct):
SIZE = 16
device: Annotated[Annotated[int, ctypes.c_int32], 0]
memPool: Annotated[cudaMemPool_t, 8]
cudaDeviceSetMemPool_v11020_params: TypeAlias = struct_cudaDeviceSetMemPool_v11020_params_st
@c.record
class struct_cudaDeviceGetMemPool_v11020_params_st(c.Struct):
SIZE = 16
memPool: Annotated[c.POINTER[cudaMemPool_t], 0]
device: Annotated[Annotated[int, ctypes.c_int32], 8]
cudaDeviceGetMemPool_v11020_params: TypeAlias = struct_cudaDeviceGetMemPool_v11020_params_st
@c.record
class struct_cudaDeviceGetNvSciSyncAttributes_v10020_params_st(c.Struct):
SIZE = 16
nvSciSyncAttrList: Annotated[ctypes.c_void_p, 0]
device: Annotated[Annotated[int, ctypes.c_int32], 8]
flags: Annotated[Annotated[int, ctypes.c_int32], 12]
cudaDeviceGetNvSciSyncAttributes_v10020_params: TypeAlias = struct_cudaDeviceGetNvSciSyncAttributes_v10020_params_st
@c.record
class struct_cudaDeviceGetP2PAttribute_v8000_params_st(c.Struct):
SIZE = 24
value: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
attr: Annotated[enum_cudaDeviceP2PAttr, 8]
srcDevice: Annotated[Annotated[int, ctypes.c_int32], 12]
dstDevice: Annotated[Annotated[int, ctypes.c_int32], 16]
class enum_cudaDeviceP2PAttr(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaDevP2PAttrPerformanceRank = enum_cudaDeviceP2PAttr.define('cudaDevP2PAttrPerformanceRank', 1)
cudaDevP2PAttrAccessSupported = enum_cudaDeviceP2PAttr.define('cudaDevP2PAttrAccessSupported', 2)
cudaDevP2PAttrNativeAtomicSupported = enum_cudaDeviceP2PAttr.define('cudaDevP2PAttrNativeAtomicSupported', 3)
cudaDevP2PAttrCudaArrayAccessSupported = enum_cudaDeviceP2PAttr.define('cudaDevP2PAttrCudaArrayAccessSupported', 4)
cudaDeviceGetP2PAttribute_v8000_params: TypeAlias = struct_cudaDeviceGetP2PAttribute_v8000_params_st
@c.record
class struct_cudaChooseDevice_v3020_params_st(c.Struct):
SIZE = 16
device: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
prop: Annotated[c.POINTER[struct_cudaDeviceProp], 8]
cudaChooseDevice_v3020_params: TypeAlias = struct_cudaChooseDevice_v3020_params_st
@c.record
class struct_cudaInitDevice_v12000_params_st(c.Struct):
SIZE = 12
device: Annotated[Annotated[int, ctypes.c_int32], 0]
deviceFlags: Annotated[Annotated[int, ctypes.c_uint32], 4]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cudaInitDevice_v12000_params: TypeAlias = struct_cudaInitDevice_v12000_params_st
@c.record
class struct_cudaSetDevice_v3020_params_st(c.Struct):
SIZE = 4
device: Annotated[Annotated[int, ctypes.c_int32], 0]
cudaSetDevice_v3020_params: TypeAlias = struct_cudaSetDevice_v3020_params_st
@c.record
class struct_cudaGetDevice_v3020_params_st(c.Struct):
SIZE = 8
device: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
cudaGetDevice_v3020_params: TypeAlias = struct_cudaGetDevice_v3020_params_st
@c.record
class struct_cudaSetValidDevices_v3020_params_st(c.Struct):
SIZE = 16
device_arr: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
len: Annotated[Annotated[int, ctypes.c_int32], 8]
cudaSetValidDevices_v3020_params: TypeAlias = struct_cudaSetValidDevices_v3020_params_st
@c.record
class struct_cudaSetDeviceFlags_v3020_params_st(c.Struct):
SIZE = 4
flags: Annotated[Annotated[int, ctypes.c_uint32], 0]
cudaSetDeviceFlags_v3020_params: TypeAlias = struct_cudaSetDeviceFlags_v3020_params_st
@c.record
class struct_cudaGetDeviceFlags_v7000_params_st(c.Struct):
SIZE = 8
flags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
cudaGetDeviceFlags_v7000_params: TypeAlias = struct_cudaGetDeviceFlags_v7000_params_st
@c.record
class struct_cudaStreamCreate_v3020_params_st(c.Struct):
SIZE = 8
pStream: Annotated[c.POINTER[cudaStream_t], 0]
cudaStream_t: TypeAlias = c.POINTER[struct_CUstream_st]
cudaStreamCreate_v3020_params: TypeAlias = struct_cudaStreamCreate_v3020_params_st
@c.record
class struct_cudaStreamCreateWithFlags_v5000_params_st(c.Struct):
SIZE = 16
pStream: Annotated[c.POINTER[cudaStream_t], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cudaStreamCreateWithFlags_v5000_params: TypeAlias = struct_cudaStreamCreateWithFlags_v5000_params_st
@c.record
class struct_cudaStreamCreateWithPriority_v5050_params_st(c.Struct):
SIZE = 16
pStream: Annotated[c.POINTER[cudaStream_t], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
priority: Annotated[Annotated[int, ctypes.c_int32], 12]
cudaStreamCreateWithPriority_v5050_params: TypeAlias = struct_cudaStreamCreateWithPriority_v5050_params_st
@c.record
class struct_cudaStreamGetPriority_ptsz_v7000_params_st(c.Struct):
SIZE = 16
hStream: Annotated[cudaStream_t, 0]
priority: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
cudaStreamGetPriority_ptsz_v7000_params: TypeAlias = struct_cudaStreamGetPriority_ptsz_v7000_params_st
@c.record
class struct_cudaStreamGetFlags_ptsz_v7000_params_st(c.Struct):
SIZE = 16
hStream: Annotated[cudaStream_t, 0]
flags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
cudaStreamGetFlags_ptsz_v7000_params: TypeAlias = struct_cudaStreamGetFlags_ptsz_v7000_params_st
@c.record
class struct_cudaStreamGetId_ptsz_v12000_params_st(c.Struct):
SIZE = 16
hStream: Annotated[cudaStream_t, 0]
streamId: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 8]
cudaStreamGetId_ptsz_v12000_params: TypeAlias = struct_cudaStreamGetId_ptsz_v12000_params_st
@c.record
class struct_cudaStreamGetDevice_ptsz_v12080_params_st(c.Struct):
SIZE = 16
hStream: Annotated[cudaStream_t, 0]
device: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
cudaStreamGetDevice_ptsz_v12080_params: TypeAlias = struct_cudaStreamGetDevice_ptsz_v12080_params_st
@c.record
class struct_cudaStreamCopyAttributes_ptsz_v11000_params_st(c.Struct):
SIZE = 16
dst: Annotated[cudaStream_t, 0]
src: Annotated[cudaStream_t, 8]
cudaStreamCopyAttributes_ptsz_v11000_params: TypeAlias = struct_cudaStreamCopyAttributes_ptsz_v11000_params_st
@c.record
class struct_cudaStreamGetAttribute_ptsz_v11000_params_st(c.Struct):
SIZE = 24
hStream: Annotated[cudaStream_t, 0]
attr: Annotated[cudaLaunchAttributeID, 8]
value_out: Annotated[c.POINTER[cudaLaunchAttributeValue], 16]
class enum_cudaLaunchAttributeID(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaLaunchAttributeIgnore = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeIgnore', 0)
cudaLaunchAttributeAccessPolicyWindow = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeAccessPolicyWindow', 1)
cudaLaunchAttributeCooperative = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeCooperative', 2)
cudaLaunchAttributeSynchronizationPolicy = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeSynchronizationPolicy', 3)
cudaLaunchAttributeClusterDimension = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeClusterDimension', 4)
cudaLaunchAttributeClusterSchedulingPolicyPreference = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeClusterSchedulingPolicyPreference', 5)
cudaLaunchAttributeProgrammaticStreamSerialization = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeProgrammaticStreamSerialization', 6)
cudaLaunchAttributeProgrammaticEvent = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeProgrammaticEvent', 7)
cudaLaunchAttributePriority = enum_cudaLaunchAttributeID.define('cudaLaunchAttributePriority', 8)
cudaLaunchAttributeMemSyncDomainMap = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeMemSyncDomainMap', 9)
cudaLaunchAttributeMemSyncDomain = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeMemSyncDomain', 10)
cudaLaunchAttributePreferredClusterDimension = enum_cudaLaunchAttributeID.define('cudaLaunchAttributePreferredClusterDimension', 11)
cudaLaunchAttributeLaunchCompletionEvent = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeLaunchCompletionEvent', 12)
cudaLaunchAttributeDeviceUpdatableKernelNode = enum_cudaLaunchAttributeID.define('cudaLaunchAttributeDeviceUpdatableKernelNode', 13)
cudaLaunchAttributePreferredSharedMemoryCarveout = enum_cudaLaunchAttributeID.define('cudaLaunchAttributePreferredSharedMemoryCarveout', 14)
cudaLaunchAttributeID: TypeAlias = enum_cudaLaunchAttributeID
@c.record
class union_cudaLaunchAttributeValue(c.Struct):
SIZE = 64
pad: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[64]], 0]
accessPolicyWindow: Annotated[struct_cudaAccessPolicyWindow, 0]
cooperative: Annotated[Annotated[int, ctypes.c_int32], 0]
syncPolicy: Annotated[enum_cudaSynchronizationPolicy, 0]
clusterDim: Annotated[union_cudaLaunchAttributeValue_clusterDim, 0]
clusterSchedulingPolicyPreference: Annotated[enum_cudaClusterSchedulingPolicy, 0]
programmaticStreamSerializationAllowed: Annotated[Annotated[int, ctypes.c_int32], 0]
programmaticEvent: Annotated[union_cudaLaunchAttributeValue_programmaticEvent, 0]
priority: Annotated[Annotated[int, ctypes.c_int32], 0]
memSyncDomainMap: Annotated[cudaLaunchMemSyncDomainMap, 0]
memSyncDomain: Annotated[cudaLaunchMemSyncDomain, 0]
preferredClusterDim: Annotated[union_cudaLaunchAttributeValue_preferredClusterDim, 0]
launchCompletionEvent: Annotated[union_cudaLaunchAttributeValue_launchCompletionEvent, 0]
deviceUpdatableKernelNode: Annotated[union_cudaLaunchAttributeValue_deviceUpdatableKernelNode, 0]
sharedMemCarveout: Annotated[Annotated[int, ctypes.c_uint32], 0]
cudaLaunchAttributeValue: TypeAlias = union_cudaLaunchAttributeValue
@c.record
class struct_cudaAccessPolicyWindow(c.Struct):
SIZE = 32
base_ptr: Annotated[ctypes.c_void_p, 0]
num_bytes: Annotated[size_t, 8]
hitRatio: Annotated[Annotated[float, ctypes.c_float], 16]
hitProp: Annotated[enum_cudaAccessProperty, 20]
missProp: Annotated[enum_cudaAccessProperty, 24]
class enum_cudaAccessProperty(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaAccessPropertyNormal = enum_cudaAccessProperty.define('cudaAccessPropertyNormal', 0)
cudaAccessPropertyStreaming = enum_cudaAccessProperty.define('cudaAccessPropertyStreaming', 1)
cudaAccessPropertyPersisting = enum_cudaAccessProperty.define('cudaAccessPropertyPersisting', 2)
class enum_cudaSynchronizationPolicy(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaSyncPolicyAuto = enum_cudaSynchronizationPolicy.define('cudaSyncPolicyAuto', 1)
cudaSyncPolicySpin = enum_cudaSynchronizationPolicy.define('cudaSyncPolicySpin', 2)
cudaSyncPolicyYield = enum_cudaSynchronizationPolicy.define('cudaSyncPolicyYield', 3)
cudaSyncPolicyBlockingSync = enum_cudaSynchronizationPolicy.define('cudaSyncPolicyBlockingSync', 4)
@c.record
class union_cudaLaunchAttributeValue_clusterDim(c.Struct):
SIZE = 12
x: Annotated[Annotated[int, ctypes.c_uint32], 0]
y: Annotated[Annotated[int, ctypes.c_uint32], 4]
z: Annotated[Annotated[int, ctypes.c_uint32], 8]
class enum_cudaClusterSchedulingPolicy(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaClusterSchedulingPolicyDefault = enum_cudaClusterSchedulingPolicy.define('cudaClusterSchedulingPolicyDefault', 0)
cudaClusterSchedulingPolicySpread = enum_cudaClusterSchedulingPolicy.define('cudaClusterSchedulingPolicySpread', 1)
cudaClusterSchedulingPolicyLoadBalancing = enum_cudaClusterSchedulingPolicy.define('cudaClusterSchedulingPolicyLoadBalancing', 2)
@c.record
class union_cudaLaunchAttributeValue_programmaticEvent(c.Struct):
SIZE = 16
event: Annotated[cudaEvent_t, 0]
flags: Annotated[Annotated[int, ctypes.c_int32], 8]
triggerAtBlockStart: Annotated[Annotated[int, ctypes.c_int32], 12]
@c.record
class struct_cudaLaunchMemSyncDomainMap_st(c.Struct):
SIZE = 2
default_: Annotated[Annotated[int, ctypes.c_ubyte], 0]
remote: Annotated[Annotated[int, ctypes.c_ubyte], 1]
cudaLaunchMemSyncDomainMap: TypeAlias = struct_cudaLaunchMemSyncDomainMap_st
class enum_cudaLaunchMemSyncDomain(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaLaunchMemSyncDomainDefault = enum_cudaLaunchMemSyncDomain.define('cudaLaunchMemSyncDomainDefault', 0)
cudaLaunchMemSyncDomainRemote = enum_cudaLaunchMemSyncDomain.define('cudaLaunchMemSyncDomainRemote', 1)
cudaLaunchMemSyncDomain: TypeAlias = enum_cudaLaunchMemSyncDomain
@c.record
class union_cudaLaunchAttributeValue_preferredClusterDim(c.Struct):
SIZE = 12
x: Annotated[Annotated[int, ctypes.c_uint32], 0]
y: Annotated[Annotated[int, ctypes.c_uint32], 4]
z: Annotated[Annotated[int, ctypes.c_uint32], 8]
@c.record
class union_cudaLaunchAttributeValue_launchCompletionEvent(c.Struct):
SIZE = 16
event: Annotated[cudaEvent_t, 0]
flags: Annotated[Annotated[int, ctypes.c_int32], 8]
@c.record
class union_cudaLaunchAttributeValue_deviceUpdatableKernelNode(c.Struct):
SIZE = 16
deviceUpdatable: Annotated[Annotated[int, ctypes.c_int32], 0]
devNode: Annotated[cudaGraphDeviceNode_t, 8]
cudaGraphDeviceNode_t: TypeAlias = c.POINTER[struct_CUgraphDeviceUpdatableNode_st]
cudaStreamGetAttribute_ptsz_v11000_params: TypeAlias = struct_cudaStreamGetAttribute_ptsz_v11000_params_st
@c.record
class struct_cudaStreamSetAttribute_ptsz_v11000_params_st(c.Struct):
SIZE = 24
hStream: Annotated[cudaStream_t, 0]
attr: Annotated[cudaLaunchAttributeID, 8]
value: Annotated[c.POINTER[cudaLaunchAttributeValue], 16]
cudaStreamSetAttribute_ptsz_v11000_params: TypeAlias = struct_cudaStreamSetAttribute_ptsz_v11000_params_st
@c.record
class struct_cudaStreamDestroy_v5050_params_st(c.Struct):
SIZE = 8
stream: Annotated[cudaStream_t, 0]
cudaStreamDestroy_v5050_params: TypeAlias = struct_cudaStreamDestroy_v5050_params_st
@c.record
class struct_cudaStreamWaitEvent_ptsz_v7000_params_st(c.Struct):
SIZE = 24
stream: Annotated[cudaStream_t, 0]
event: Annotated[cudaEvent_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaStreamWaitEvent_ptsz_v7000_params: TypeAlias = struct_cudaStreamWaitEvent_ptsz_v7000_params_st
@c.record
class struct_cudaStreamAddCallback_ptsz_v7000_params_st(c.Struct):
SIZE = 32
stream: Annotated[cudaStream_t, 0]
callback: Annotated[cudaStreamCallback_t, 8]
userData: Annotated[ctypes.c_void_p, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cudaStreamCallback_t: TypeAlias = c.CFUNCTYPE[None, [c.POINTER[struct_CUstream_st], enum_cudaError, ctypes.c_void_p]]
cudaStreamAddCallback_ptsz_v7000_params: TypeAlias = struct_cudaStreamAddCallback_ptsz_v7000_params_st
@c.record
class struct_cudaStreamSynchronize_ptsz_v7000_params_st(c.Struct):
SIZE = 8
stream: Annotated[cudaStream_t, 0]
cudaStreamSynchronize_ptsz_v7000_params: TypeAlias = struct_cudaStreamSynchronize_ptsz_v7000_params_st
@c.record
class struct_cudaStreamQuery_ptsz_v7000_params_st(c.Struct):
SIZE = 8
stream: Annotated[cudaStream_t, 0]
cudaStreamQuery_ptsz_v7000_params: TypeAlias = struct_cudaStreamQuery_ptsz_v7000_params_st
@c.record
class struct_cudaStreamAttachMemAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 32
stream: Annotated[cudaStream_t, 0]
devPtr: Annotated[ctypes.c_void_p, 8]
length: Annotated[size_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cudaStreamAttachMemAsync_ptsz_v7000_params: TypeAlias = struct_cudaStreamAttachMemAsync_ptsz_v7000_params_st
@c.record
class struct_cudaStreamBeginCapture_ptsz_v10000_params_st(c.Struct):
SIZE = 16
stream: Annotated[cudaStream_t, 0]
mode: Annotated[enum_cudaStreamCaptureMode, 8]
class enum_cudaStreamCaptureMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaStreamCaptureModeGlobal = enum_cudaStreamCaptureMode.define('cudaStreamCaptureModeGlobal', 0)
cudaStreamCaptureModeThreadLocal = enum_cudaStreamCaptureMode.define('cudaStreamCaptureModeThreadLocal', 1)
cudaStreamCaptureModeRelaxed = enum_cudaStreamCaptureMode.define('cudaStreamCaptureModeRelaxed', 2)
cudaStreamBeginCapture_ptsz_v10000_params: TypeAlias = struct_cudaStreamBeginCapture_ptsz_v10000_params_st
@c.record
class struct_cudaStreamBeginCaptureToGraph_ptsz_v12030_params_st(c.Struct):
SIZE = 48
stream: Annotated[cudaStream_t, 0]
graph: Annotated[cudaGraph_t, 8]
dependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
dependencyData: Annotated[c.POINTER[cudaGraphEdgeData], 24]
numDependencies: Annotated[size_t, 32]
mode: Annotated[enum_cudaStreamCaptureMode, 40]
cudaGraph_t: TypeAlias = c.POINTER[struct_CUgraph_st]
cudaGraphNode_t: TypeAlias = c.POINTER[struct_CUgraphNode_st]
@c.record
class struct_cudaGraphEdgeData_st(c.Struct):
SIZE = 8
from_port: Annotated[Annotated[int, ctypes.c_ubyte], 0]
to_port: Annotated[Annotated[int, ctypes.c_ubyte], 1]
type: Annotated[Annotated[int, ctypes.c_ubyte], 2]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[5]], 3]
cudaGraphEdgeData: TypeAlias = struct_cudaGraphEdgeData_st
cudaStreamBeginCaptureToGraph_ptsz_v12030_params: TypeAlias = struct_cudaStreamBeginCaptureToGraph_ptsz_v12030_params_st
@c.record
class struct_cudaThreadExchangeStreamCaptureMode_v10010_params_st(c.Struct):
SIZE = 8
mode: Annotated[c.POINTER[enum_cudaStreamCaptureMode], 0]
cudaThreadExchangeStreamCaptureMode_v10010_params: TypeAlias = struct_cudaThreadExchangeStreamCaptureMode_v10010_params_st
@c.record
class struct_cudaStreamEndCapture_ptsz_v10000_params_st(c.Struct):
SIZE = 16
stream: Annotated[cudaStream_t, 0]
pGraph: Annotated[c.POINTER[cudaGraph_t], 8]
cudaStreamEndCapture_ptsz_v10000_params: TypeAlias = struct_cudaStreamEndCapture_ptsz_v10000_params_st
@c.record
class struct_cudaStreamIsCapturing_ptsz_v10000_params_st(c.Struct):
SIZE = 16
stream: Annotated[cudaStream_t, 0]
pCaptureStatus: Annotated[c.POINTER[enum_cudaStreamCaptureStatus], 8]
class enum_cudaStreamCaptureStatus(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaStreamCaptureStatusNone = enum_cudaStreamCaptureStatus.define('cudaStreamCaptureStatusNone', 0)
cudaStreamCaptureStatusActive = enum_cudaStreamCaptureStatus.define('cudaStreamCaptureStatusActive', 1)
cudaStreamCaptureStatusInvalidated = enum_cudaStreamCaptureStatus.define('cudaStreamCaptureStatusInvalidated', 2)
cudaStreamIsCapturing_ptsz_v10000_params: TypeAlias = struct_cudaStreamIsCapturing_ptsz_v10000_params_st
@c.record
class struct_cudaStreamGetCaptureInfo_v2_ptsz_v11030_params_st(c.Struct):
SIZE = 48
stream: Annotated[cudaStream_t, 0]
captureStatus_out: Annotated[c.POINTER[enum_cudaStreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 16]
graph_out: Annotated[c.POINTER[cudaGraph_t], 24]
dependencies_out: Annotated[c.POINTER[c.POINTER[cudaGraphNode_t]], 32]
numDependencies_out: Annotated[c.POINTER[size_t], 40]
cudaStreamGetCaptureInfo_v2_ptsz_v11030_params: TypeAlias = struct_cudaStreamGetCaptureInfo_v2_ptsz_v11030_params_st
@c.record
class struct_cudaStreamGetCaptureInfo_v3_ptsz_v12030_params_st(c.Struct):
SIZE = 56
stream: Annotated[cudaStream_t, 0]
captureStatus_out: Annotated[c.POINTER[enum_cudaStreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 16]
graph_out: Annotated[c.POINTER[cudaGraph_t], 24]
dependencies_out: Annotated[c.POINTER[c.POINTER[cudaGraphNode_t]], 32]
edgeData_out: Annotated[c.POINTER[c.POINTER[cudaGraphEdgeData]], 40]
numDependencies_out: Annotated[c.POINTER[size_t], 48]
cudaStreamGetCaptureInfo_v3_ptsz_v12030_params: TypeAlias = struct_cudaStreamGetCaptureInfo_v3_ptsz_v12030_params_st
@c.record
class struct_cudaStreamUpdateCaptureDependencies_ptsz_v11030_params_st(c.Struct):
SIZE = 32
stream: Annotated[cudaStream_t, 0]
dependencies: Annotated[c.POINTER[cudaGraphNode_t], 8]
numDependencies: Annotated[size_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cudaStreamUpdateCaptureDependencies_ptsz_v11030_params: TypeAlias = struct_cudaStreamUpdateCaptureDependencies_ptsz_v11030_params_st
@c.record
class struct_cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030_params_st(c.Struct):
SIZE = 40
stream: Annotated[cudaStream_t, 0]
dependencies: Annotated[c.POINTER[cudaGraphNode_t], 8]
dependencyData: Annotated[c.POINTER[cudaGraphEdgeData], 16]
numDependencies: Annotated[size_t, 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030_params: TypeAlias = struct_cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030_params_st
@c.record
class struct_cudaEventCreate_v3020_params_st(c.Struct):
SIZE = 8
event: Annotated[c.POINTER[cudaEvent_t], 0]
cudaEventCreate_v3020_params: TypeAlias = struct_cudaEventCreate_v3020_params_st
@c.record
class struct_cudaEventCreateWithFlags_v3020_params_st(c.Struct):
SIZE = 16
event: Annotated[c.POINTER[cudaEvent_t], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cudaEventCreateWithFlags_v3020_params: TypeAlias = struct_cudaEventCreateWithFlags_v3020_params_st
@c.record
class struct_cudaEventRecord_ptsz_v7000_params_st(c.Struct):
SIZE = 16
event: Annotated[cudaEvent_t, 0]
stream: Annotated[cudaStream_t, 8]
cudaEventRecord_ptsz_v7000_params: TypeAlias = struct_cudaEventRecord_ptsz_v7000_params_st
@c.record
class struct_cudaEventRecordWithFlags_ptsz_v11010_params_st(c.Struct):
SIZE = 24
event: Annotated[cudaEvent_t, 0]
stream: Annotated[cudaStream_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaEventRecordWithFlags_ptsz_v11010_params: TypeAlias = struct_cudaEventRecordWithFlags_ptsz_v11010_params_st
@c.record
class struct_cudaEventQuery_v3020_params_st(c.Struct):
SIZE = 8
event: Annotated[cudaEvent_t, 0]
cudaEventQuery_v3020_params: TypeAlias = struct_cudaEventQuery_v3020_params_st
@c.record
class struct_cudaEventSynchronize_v3020_params_st(c.Struct):
SIZE = 8
event: Annotated[cudaEvent_t, 0]
cudaEventSynchronize_v3020_params: TypeAlias = struct_cudaEventSynchronize_v3020_params_st
@c.record
class struct_cudaEventDestroy_v3020_params_st(c.Struct):
SIZE = 8
event: Annotated[cudaEvent_t, 0]
cudaEventDestroy_v3020_params: TypeAlias = struct_cudaEventDestroy_v3020_params_st
@c.record
class struct_cudaEventElapsedTime_v3020_params_st(c.Struct):
SIZE = 24
ms: Annotated[c.POINTER[Annotated[float, ctypes.c_float]], 0]
start: Annotated[cudaEvent_t, 8]
end: Annotated[cudaEvent_t, 16]
cudaEventElapsedTime_v3020_params: TypeAlias = struct_cudaEventElapsedTime_v3020_params_st
@c.record
class struct_cudaEventElapsedTime_v2_v12080_params_st(c.Struct):
SIZE = 24
ms: Annotated[c.POINTER[Annotated[float, ctypes.c_float]], 0]
start: Annotated[cudaEvent_t, 8]
end: Annotated[cudaEvent_t, 16]
cudaEventElapsedTime_v2_v12080_params: TypeAlias = struct_cudaEventElapsedTime_v2_v12080_params_st
@c.record
class struct_cudaImportExternalMemory_v10000_params_st(c.Struct):
SIZE = 16
extMem_out: Annotated[c.POINTER[cudaExternalMemory_t], 0]
memHandleDesc: Annotated[c.POINTER[struct_cudaExternalMemoryHandleDesc], 8]
class struct_CUexternalMemory_st(ctypes.Structure): pass
cudaExternalMemory_t: TypeAlias = c.POINTER[struct_CUexternalMemory_st]
@c.record
class struct_cudaExternalMemoryHandleDesc(c.Struct):
SIZE = 40
type: Annotated[enum_cudaExternalMemoryHandleType, 0]
handle: Annotated[struct_cudaExternalMemoryHandleDesc_handle, 8]
size: Annotated[Annotated[int, ctypes.c_uint64], 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
class enum_cudaExternalMemoryHandleType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaExternalMemoryHandleTypeOpaqueFd = enum_cudaExternalMemoryHandleType.define('cudaExternalMemoryHandleTypeOpaqueFd', 1)
cudaExternalMemoryHandleTypeOpaqueWin32 = enum_cudaExternalMemoryHandleType.define('cudaExternalMemoryHandleTypeOpaqueWin32', 2)
cudaExternalMemoryHandleTypeOpaqueWin32Kmt = enum_cudaExternalMemoryHandleType.define('cudaExternalMemoryHandleTypeOpaqueWin32Kmt', 3)
cudaExternalMemoryHandleTypeD3D12Heap = enum_cudaExternalMemoryHandleType.define('cudaExternalMemoryHandleTypeD3D12Heap', 4)
cudaExternalMemoryHandleTypeD3D12Resource = enum_cudaExternalMemoryHandleType.define('cudaExternalMemoryHandleTypeD3D12Resource', 5)
cudaExternalMemoryHandleTypeD3D11Resource = enum_cudaExternalMemoryHandleType.define('cudaExternalMemoryHandleTypeD3D11Resource', 6)
cudaExternalMemoryHandleTypeD3D11ResourceKmt = enum_cudaExternalMemoryHandleType.define('cudaExternalMemoryHandleTypeD3D11ResourceKmt', 7)
cudaExternalMemoryHandleTypeNvSciBuf = enum_cudaExternalMemoryHandleType.define('cudaExternalMemoryHandleTypeNvSciBuf', 8)
@c.record
class struct_cudaExternalMemoryHandleDesc_handle(c.Struct):
SIZE = 16
fd: Annotated[Annotated[int, ctypes.c_int32], 0]
win32: Annotated[struct_cudaExternalMemoryHandleDesc_handle_win32, 0]
nvSciBufObject: Annotated[ctypes.c_void_p, 0]
@c.record
class struct_cudaExternalMemoryHandleDesc_handle_win32(c.Struct):
SIZE = 16
handle: Annotated[ctypes.c_void_p, 0]
name: Annotated[ctypes.c_void_p, 8]
cudaImportExternalMemory_v10000_params: TypeAlias = struct_cudaImportExternalMemory_v10000_params_st
@c.record
class struct_cudaExternalMemoryGetMappedBuffer_v10000_params_st(c.Struct):
SIZE = 24
devPtr: Annotated[c.POINTER[ctypes.c_void_p], 0]
extMem: Annotated[cudaExternalMemory_t, 8]
bufferDesc: Annotated[c.POINTER[struct_cudaExternalMemoryBufferDesc], 16]
@c.record
class struct_cudaExternalMemoryBufferDesc(c.Struct):
SIZE = 24
offset: Annotated[Annotated[int, ctypes.c_uint64], 0]
size: Annotated[Annotated[int, ctypes.c_uint64], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaExternalMemoryGetMappedBuffer_v10000_params: TypeAlias = struct_cudaExternalMemoryGetMappedBuffer_v10000_params_st
@c.record
class struct_cudaExternalMemoryGetMappedMipmappedArray_v10000_params_st(c.Struct):
SIZE = 24
mipmap: Annotated[c.POINTER[cudaMipmappedArray_t], 0]
extMem: Annotated[cudaExternalMemory_t, 8]
mipmapDesc: Annotated[c.POINTER[struct_cudaExternalMemoryMipmappedArrayDesc], 16]
class struct_cudaMipmappedArray(ctypes.Structure): pass
cudaMipmappedArray_t: TypeAlias = c.POINTER[struct_cudaMipmappedArray]
@c.record
class struct_cudaExternalMemoryMipmappedArrayDesc(c.Struct):
SIZE = 64
offset: Annotated[Annotated[int, ctypes.c_uint64], 0]
formatDesc: Annotated[struct_cudaChannelFormatDesc, 8]
extent: Annotated[struct_cudaExtent, 32]
flags: Annotated[Annotated[int, ctypes.c_uint32], 56]
numLevels: Annotated[Annotated[int, ctypes.c_uint32], 60]
@c.record
class struct_cudaExtent(c.Struct):
SIZE = 24
width: Annotated[size_t, 0]
height: Annotated[size_t, 8]
depth: Annotated[size_t, 16]
cudaExternalMemoryGetMappedMipmappedArray_v10000_params: TypeAlias = struct_cudaExternalMemoryGetMappedMipmappedArray_v10000_params_st
@c.record
class struct_cudaDestroyExternalMemory_v10000_params_st(c.Struct):
SIZE = 8
extMem: Annotated[cudaExternalMemory_t, 0]
cudaDestroyExternalMemory_v10000_params: TypeAlias = struct_cudaDestroyExternalMemory_v10000_params_st
@c.record
class struct_cudaImportExternalSemaphore_v10000_params_st(c.Struct):
SIZE = 16
extSem_out: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
semHandleDesc: Annotated[c.POINTER[struct_cudaExternalSemaphoreHandleDesc], 8]
class struct_CUexternalSemaphore_st(ctypes.Structure): pass
cudaExternalSemaphore_t: TypeAlias = c.POINTER[struct_CUexternalSemaphore_st]
@c.record
class struct_cudaExternalSemaphoreHandleDesc(c.Struct):
SIZE = 32
type: Annotated[enum_cudaExternalSemaphoreHandleType, 0]
handle: Annotated[struct_cudaExternalSemaphoreHandleDesc_handle, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
class enum_cudaExternalSemaphoreHandleType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaExternalSemaphoreHandleTypeOpaqueFd = enum_cudaExternalSemaphoreHandleType.define('cudaExternalSemaphoreHandleTypeOpaqueFd', 1)
cudaExternalSemaphoreHandleTypeOpaqueWin32 = enum_cudaExternalSemaphoreHandleType.define('cudaExternalSemaphoreHandleTypeOpaqueWin32', 2)
cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt = enum_cudaExternalSemaphoreHandleType.define('cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt', 3)
cudaExternalSemaphoreHandleTypeD3D12Fence = enum_cudaExternalSemaphoreHandleType.define('cudaExternalSemaphoreHandleTypeD3D12Fence', 4)
cudaExternalSemaphoreHandleTypeD3D11Fence = enum_cudaExternalSemaphoreHandleType.define('cudaExternalSemaphoreHandleTypeD3D11Fence', 5)
cudaExternalSemaphoreHandleTypeNvSciSync = enum_cudaExternalSemaphoreHandleType.define('cudaExternalSemaphoreHandleTypeNvSciSync', 6)
cudaExternalSemaphoreHandleTypeKeyedMutex = enum_cudaExternalSemaphoreHandleType.define('cudaExternalSemaphoreHandleTypeKeyedMutex', 7)
cudaExternalSemaphoreHandleTypeKeyedMutexKmt = enum_cudaExternalSemaphoreHandleType.define('cudaExternalSemaphoreHandleTypeKeyedMutexKmt', 8)
cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd = enum_cudaExternalSemaphoreHandleType.define('cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd', 9)
cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 = enum_cudaExternalSemaphoreHandleType.define('cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32', 10)
@c.record
class struct_cudaExternalSemaphoreHandleDesc_handle(c.Struct):
SIZE = 16
fd: Annotated[Annotated[int, ctypes.c_int32], 0]
win32: Annotated[struct_cudaExternalSemaphoreHandleDesc_handle_win32, 0]
nvSciSyncObj: Annotated[ctypes.c_void_p, 0]
@c.record
class struct_cudaExternalSemaphoreHandleDesc_handle_win32(c.Struct):
SIZE = 16
handle: Annotated[ctypes.c_void_p, 0]
name: Annotated[ctypes.c_void_p, 8]
cudaImportExternalSemaphore_v10000_params: TypeAlias = struct_cudaImportExternalSemaphore_v10000_params_st
@c.record
class struct_cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreSignalParams], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[cudaStream_t, 24]
@c.record
class struct_cudaExternalSemaphoreSignalParams(c.Struct):
SIZE = 144
params: Annotated[struct_cudaExternalSemaphoreSignalParams_params, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 72]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 76]
@c.record
class struct_cudaExternalSemaphoreSignalParams_params(c.Struct):
SIZE = 72
fence: Annotated[struct_cudaExternalSemaphoreSignalParams_params_fence, 0]
nvSciSync: Annotated[struct_cudaExternalSemaphoreSignalParams_params_nvSciSync, 8]
keyedMutex: Annotated[struct_cudaExternalSemaphoreSignalParams_params_keyedMutex, 16]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[12]], 24]
@c.record
class struct_cudaExternalSemaphoreSignalParams_params_fence(c.Struct):
SIZE = 8
value: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_cudaExternalSemaphoreSignalParams_params_nvSciSync(c.Struct):
SIZE = 8
fence: Annotated[ctypes.c_void_p, 0]
reserved: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_cudaExternalSemaphoreSignalParams_params_keyedMutex(c.Struct):
SIZE = 8
key: Annotated[Annotated[int, ctypes.c_uint64], 0]
cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params: TypeAlias = struct_cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params_st
@c.record
class struct_cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreWaitParams], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[cudaStream_t, 24]
@c.record
class struct_cudaExternalSemaphoreWaitParams(c.Struct):
SIZE = 144
params: Annotated[struct_cudaExternalSemaphoreWaitParams_params, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 72]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[16]], 76]
@c.record
class struct_cudaExternalSemaphoreWaitParams_params(c.Struct):
SIZE = 72
fence: Annotated[struct_cudaExternalSemaphoreWaitParams_params_fence, 0]
nvSciSync: Annotated[struct_cudaExternalSemaphoreWaitParams_params_nvSciSync, 8]
keyedMutex: Annotated[struct_cudaExternalSemaphoreWaitParams_params_keyedMutex, 16]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[10]], 32]
@c.record
class struct_cudaExternalSemaphoreWaitParams_params_fence(c.Struct):
SIZE = 8
value: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_cudaExternalSemaphoreWaitParams_params_nvSciSync(c.Struct):
SIZE = 8
fence: Annotated[ctypes.c_void_p, 0]
reserved: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_cudaExternalSemaphoreWaitParams_params_keyedMutex(c.Struct):
SIZE = 16
key: Annotated[Annotated[int, ctypes.c_uint64], 0]
timeoutMs: Annotated[Annotated[int, ctypes.c_uint32], 8]
cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params: TypeAlias = struct_cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params_st
@c.record
class struct_cudaDestroyExternalSemaphore_v10000_params_st(c.Struct):
SIZE = 8
extSem: Annotated[cudaExternalSemaphore_t, 0]
cudaDestroyExternalSemaphore_v10000_params: TypeAlias = struct_cudaDestroyExternalSemaphore_v10000_params_st
@c.record
class struct_cudaLaunchKernel_ptsz_v7000_params_st(c.Struct):
SIZE = 56
func: Annotated[ctypes.c_void_p, 0]
gridDim: Annotated[dim3, 8]
blockDim: Annotated[dim3, 20]
args: Annotated[c.POINTER[ctypes.c_void_p], 32]
sharedMem: Annotated[size_t, 40]
stream: Annotated[cudaStream_t, 48]
@c.record
class struct_dim3(c.Struct):
SIZE = 12
x: Annotated[Annotated[int, ctypes.c_uint32], 0]
y: Annotated[Annotated[int, ctypes.c_uint32], 4]
z: Annotated[Annotated[int, ctypes.c_uint32], 8]
dim3: TypeAlias = struct_dim3
cudaLaunchKernel_ptsz_v7000_params: TypeAlias = struct_cudaLaunchKernel_ptsz_v7000_params_st
@c.record
class struct_cudaLaunchKernelExC_ptsz_v11060_params_st(c.Struct):
SIZE = 24
config: Annotated[c.POINTER[cudaLaunchConfig_t], 0]
func: Annotated[ctypes.c_void_p, 8]
args: Annotated[c.POINTER[ctypes.c_void_p], 16]
@c.record
class struct_cudaLaunchConfig_st(c.Struct):
SIZE = 56
gridDim: Annotated[dim3, 0]
blockDim: Annotated[dim3, 12]
dynamicSmemBytes: Annotated[size_t, 24]
stream: Annotated[cudaStream_t, 32]
attrs: Annotated[c.POINTER[cudaLaunchAttribute], 40]
numAttrs: Annotated[Annotated[int, ctypes.c_uint32], 48]
cudaLaunchConfig_t: TypeAlias = struct_cudaLaunchConfig_st
@c.record
class struct_cudaLaunchAttribute_st(c.Struct):
SIZE = 72
id: Annotated[cudaLaunchAttributeID, 0]
pad: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[4]], 4]
val: Annotated[cudaLaunchAttributeValue, 8]
cudaLaunchAttribute: TypeAlias = struct_cudaLaunchAttribute_st
cudaLaunchKernelExC_ptsz_v11060_params: TypeAlias = struct_cudaLaunchKernelExC_ptsz_v11060_params_st
@c.record
class struct_cudaLaunchCooperativeKernel_ptsz_v9000_params_st(c.Struct):
SIZE = 56
func: Annotated[ctypes.c_void_p, 0]
gridDim: Annotated[dim3, 8]
blockDim: Annotated[dim3, 20]
args: Annotated[c.POINTER[ctypes.c_void_p], 32]
sharedMem: Annotated[size_t, 40]
stream: Annotated[cudaStream_t, 48]
cudaLaunchCooperativeKernel_ptsz_v9000_params: TypeAlias = struct_cudaLaunchCooperativeKernel_ptsz_v9000_params_st
@c.record
class struct_cudaLaunchCooperativeKernelMultiDevice_v9000_params_st(c.Struct):
SIZE = 16
launchParamsList: Annotated[c.POINTER[struct_cudaLaunchParams], 0]
numDevices: Annotated[Annotated[int, ctypes.c_uint32], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_cudaLaunchParams(c.Struct):
SIZE = 56
func: Annotated[ctypes.c_void_p, 0]
gridDim: Annotated[dim3, 8]
blockDim: Annotated[dim3, 20]
args: Annotated[c.POINTER[ctypes.c_void_p], 32]
sharedMem: Annotated[size_t, 40]
stream: Annotated[cudaStream_t, 48]
cudaLaunchCooperativeKernelMultiDevice_v9000_params: TypeAlias = struct_cudaLaunchCooperativeKernelMultiDevice_v9000_params_st
@c.record
class struct_cudaFuncSetCacheConfig_v3020_params_st(c.Struct):
SIZE = 16
func: Annotated[ctypes.c_void_p, 0]
cacheConfig: Annotated[enum_cudaFuncCache, 8]
cudaFuncSetCacheConfig_v3020_params: TypeAlias = struct_cudaFuncSetCacheConfig_v3020_params_st
@c.record
class struct_cudaFuncGetAttributes_v3020_params_st(c.Struct):
SIZE = 16
attr: Annotated[c.POINTER[struct_cudaFuncAttributes], 0]
func: Annotated[ctypes.c_void_p, 8]
@c.record
class struct_cudaFuncAttributes(c.Struct):
SIZE = 144
sharedSizeBytes: Annotated[size_t, 0]
constSizeBytes: Annotated[size_t, 8]
localSizeBytes: Annotated[size_t, 16]
maxThreadsPerBlock: Annotated[Annotated[int, ctypes.c_int32], 24]
numRegs: Annotated[Annotated[int, ctypes.c_int32], 28]
ptxVersion: Annotated[Annotated[int, ctypes.c_int32], 32]
binaryVersion: Annotated[Annotated[int, ctypes.c_int32], 36]
cacheModeCA: Annotated[Annotated[int, ctypes.c_int32], 40]
maxDynamicSharedSizeBytes: Annotated[Annotated[int, ctypes.c_int32], 44]
preferredShmemCarveout: Annotated[Annotated[int, ctypes.c_int32], 48]
clusterDimMustBeSet: Annotated[Annotated[int, ctypes.c_int32], 52]
requiredClusterWidth: Annotated[Annotated[int, ctypes.c_int32], 56]
requiredClusterHeight: Annotated[Annotated[int, ctypes.c_int32], 60]
requiredClusterDepth: Annotated[Annotated[int, ctypes.c_int32], 64]
clusterSchedulingPolicyPreference: Annotated[Annotated[int, ctypes.c_int32], 68]
nonPortableClusterSizeAllowed: Annotated[Annotated[int, ctypes.c_int32], 72]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[16]], 76]
cudaFuncGetAttributes_v3020_params: TypeAlias = struct_cudaFuncGetAttributes_v3020_params_st
@c.record
class struct_cudaFuncSetAttribute_v9000_params_st(c.Struct):
SIZE = 16
func: Annotated[ctypes.c_void_p, 0]
attr: Annotated[enum_cudaFuncAttribute, 8]
value: Annotated[Annotated[int, ctypes.c_int32], 12]
class enum_cudaFuncAttribute(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaFuncAttributeMaxDynamicSharedMemorySize = enum_cudaFuncAttribute.define('cudaFuncAttributeMaxDynamicSharedMemorySize', 8)
cudaFuncAttributePreferredSharedMemoryCarveout = enum_cudaFuncAttribute.define('cudaFuncAttributePreferredSharedMemoryCarveout', 9)
cudaFuncAttributeClusterDimMustBeSet = enum_cudaFuncAttribute.define('cudaFuncAttributeClusterDimMustBeSet', 10)
cudaFuncAttributeRequiredClusterWidth = enum_cudaFuncAttribute.define('cudaFuncAttributeRequiredClusterWidth', 11)
cudaFuncAttributeRequiredClusterHeight = enum_cudaFuncAttribute.define('cudaFuncAttributeRequiredClusterHeight', 12)
cudaFuncAttributeRequiredClusterDepth = enum_cudaFuncAttribute.define('cudaFuncAttributeRequiredClusterDepth', 13)
cudaFuncAttributeNonPortableClusterSizeAllowed = enum_cudaFuncAttribute.define('cudaFuncAttributeNonPortableClusterSizeAllowed', 14)
cudaFuncAttributeClusterSchedulingPolicyPreference = enum_cudaFuncAttribute.define('cudaFuncAttributeClusterSchedulingPolicyPreference', 15)
cudaFuncAttributeMax = enum_cudaFuncAttribute.define('cudaFuncAttributeMax', 16)
cudaFuncSetAttribute_v9000_params: TypeAlias = struct_cudaFuncSetAttribute_v9000_params_st
@c.record
class struct_cudaFuncGetName_v12030_params_st(c.Struct):
SIZE = 16
name: Annotated[c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], 0]
func: Annotated[ctypes.c_void_p, 8]
cudaFuncGetName_v12030_params: TypeAlias = struct_cudaFuncGetName_v12030_params_st
@c.record
class struct_cudaFuncGetParamInfo_v12040_params_st(c.Struct):
SIZE = 32
func: Annotated[ctypes.c_void_p, 0]
paramIndex: Annotated[size_t, 8]
paramOffset: Annotated[c.POINTER[size_t], 16]
paramSize: Annotated[c.POINTER[size_t], 24]
cudaFuncGetParamInfo_v12040_params: TypeAlias = struct_cudaFuncGetParamInfo_v12040_params_st
@c.record
class struct_cudaLaunchHostFunc_ptsz_v10000_params_st(c.Struct):
SIZE = 24
stream: Annotated[cudaStream_t, 0]
fn: Annotated[cudaHostFn_t, 8]
userData: Annotated[ctypes.c_void_p, 16]
cudaHostFn_t: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p]]
cudaLaunchHostFunc_ptsz_v10000_params: TypeAlias = struct_cudaLaunchHostFunc_ptsz_v10000_params_st
@c.record
class struct_cudaFuncSetSharedMemConfig_v4020_params_st(c.Struct):
SIZE = 16
func: Annotated[ctypes.c_void_p, 0]
config: Annotated[enum_cudaSharedMemConfig, 8]
cudaFuncSetSharedMemConfig_v4020_params: TypeAlias = struct_cudaFuncSetSharedMemConfig_v4020_params_st
@c.record
class struct_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params_st(c.Struct):
SIZE = 32
numBlocks: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
func: Annotated[ctypes.c_void_p, 8]
blockSize: Annotated[Annotated[int, ctypes.c_int32], 16]
dynamicSMemSize: Annotated[size_t, 24]
cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params: TypeAlias = struct_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params_st
@c.record
class struct_cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params_st(c.Struct):
SIZE = 24
dynamicSmemSize: Annotated[c.POINTER[size_t], 0]
func: Annotated[ctypes.c_void_p, 8]
numBlocks: Annotated[Annotated[int, ctypes.c_int32], 16]
blockSize: Annotated[Annotated[int, ctypes.c_int32], 20]
cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params: TypeAlias = struct_cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params_st
@c.record
class struct_cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params_st(c.Struct):
SIZE = 40
numBlocks: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
func: Annotated[ctypes.c_void_p, 8]
blockSize: Annotated[Annotated[int, ctypes.c_int32], 16]
dynamicSMemSize: Annotated[size_t, 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params: TypeAlias = struct_cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params_st
@c.record
class struct_cudaOccupancyMaxPotentialClusterSize_v11070_params_st(c.Struct):
SIZE = 24
clusterSize: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
func: Annotated[ctypes.c_void_p, 8]
launchConfig: Annotated[c.POINTER[cudaLaunchConfig_t], 16]
cudaOccupancyMaxPotentialClusterSize_v11070_params: TypeAlias = struct_cudaOccupancyMaxPotentialClusterSize_v11070_params_st
@c.record
class struct_cudaOccupancyMaxActiveClusters_v11070_params_st(c.Struct):
SIZE = 24
numClusters: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
func: Annotated[ctypes.c_void_p, 8]
launchConfig: Annotated[c.POINTER[cudaLaunchConfig_t], 16]
cudaOccupancyMaxActiveClusters_v11070_params: TypeAlias = struct_cudaOccupancyMaxActiveClusters_v11070_params_st
@c.record
class struct_cudaMallocManaged_v6000_params_st(c.Struct):
SIZE = 24
devPtr: Annotated[c.POINTER[ctypes.c_void_p], 0]
size: Annotated[size_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaMallocManaged_v6000_params: TypeAlias = struct_cudaMallocManaged_v6000_params_st
@c.record
class struct_cudaMalloc_v3020_params_st(c.Struct):
SIZE = 16
devPtr: Annotated[c.POINTER[ctypes.c_void_p], 0]
size: Annotated[size_t, 8]
cudaMalloc_v3020_params: TypeAlias = struct_cudaMalloc_v3020_params_st
@c.record
class struct_cudaMallocHost_v3020_params_st(c.Struct):
SIZE = 16
ptr: Annotated[c.POINTER[ctypes.c_void_p], 0]
size: Annotated[size_t, 8]
cudaMallocHost_v3020_params: TypeAlias = struct_cudaMallocHost_v3020_params_st
@c.record
class struct_cudaMallocPitch_v3020_params_st(c.Struct):
SIZE = 32
devPtr: Annotated[c.POINTER[ctypes.c_void_p], 0]
pitch: Annotated[c.POINTER[size_t], 8]
width: Annotated[size_t, 16]
height: Annotated[size_t, 24]
cudaMallocPitch_v3020_params: TypeAlias = struct_cudaMallocPitch_v3020_params_st
@c.record
class struct_cudaMallocArray_v3020_params_st(c.Struct):
SIZE = 40
array: Annotated[c.POINTER[cudaArray_t], 0]
desc: Annotated[c.POINTER[struct_cudaChannelFormatDesc], 8]
width: Annotated[size_t, 16]
height: Annotated[size_t, 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
class struct_cudaArray(ctypes.Structure): pass
cudaArray_t: TypeAlias = c.POINTER[struct_cudaArray]
cudaMallocArray_v3020_params: TypeAlias = struct_cudaMallocArray_v3020_params_st
@c.record
class struct_cudaFree_v3020_params_st(c.Struct):
SIZE = 8
devPtr: Annotated[ctypes.c_void_p, 0]
cudaFree_v3020_params: TypeAlias = struct_cudaFree_v3020_params_st
@c.record
class struct_cudaFreeHost_v3020_params_st(c.Struct):
SIZE = 8
ptr: Annotated[ctypes.c_void_p, 0]
cudaFreeHost_v3020_params: TypeAlias = struct_cudaFreeHost_v3020_params_st
@c.record
class struct_cudaFreeArray_v3020_params_st(c.Struct):
SIZE = 8
array: Annotated[cudaArray_t, 0]
cudaFreeArray_v3020_params: TypeAlias = struct_cudaFreeArray_v3020_params_st
@c.record
class struct_cudaFreeMipmappedArray_v5000_params_st(c.Struct):
SIZE = 8
mipmappedArray: Annotated[cudaMipmappedArray_t, 0]
cudaFreeMipmappedArray_v5000_params: TypeAlias = struct_cudaFreeMipmappedArray_v5000_params_st
@c.record
class struct_cudaHostAlloc_v3020_params_st(c.Struct):
SIZE = 24
pHost: Annotated[c.POINTER[ctypes.c_void_p], 0]
size: Annotated[size_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaHostAlloc_v3020_params: TypeAlias = struct_cudaHostAlloc_v3020_params_st
@c.record
class struct_cudaHostRegister_v4000_params_st(c.Struct):
SIZE = 24
ptr: Annotated[ctypes.c_void_p, 0]
size: Annotated[size_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaHostRegister_v4000_params: TypeAlias = struct_cudaHostRegister_v4000_params_st
@c.record
class struct_cudaHostUnregister_v4000_params_st(c.Struct):
SIZE = 8
ptr: Annotated[ctypes.c_void_p, 0]
cudaHostUnregister_v4000_params: TypeAlias = struct_cudaHostUnregister_v4000_params_st
@c.record
class struct_cudaHostGetDevicePointer_v3020_params_st(c.Struct):
SIZE = 24
pDevice: Annotated[c.POINTER[ctypes.c_void_p], 0]
pHost: Annotated[ctypes.c_void_p, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaHostGetDevicePointer_v3020_params: TypeAlias = struct_cudaHostGetDevicePointer_v3020_params_st
@c.record
class struct_cudaHostGetFlags_v3020_params_st(c.Struct):
SIZE = 16
pFlags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 0]
pHost: Annotated[ctypes.c_void_p, 8]
cudaHostGetFlags_v3020_params: TypeAlias = struct_cudaHostGetFlags_v3020_params_st
@c.record
class struct_cudaMalloc3D_v3020_params_st(c.Struct):
SIZE = 32
pitchedDevPtr: Annotated[c.POINTER[struct_cudaPitchedPtr], 0]
extent: Annotated[struct_cudaExtent, 8]
@c.record
class struct_cudaPitchedPtr(c.Struct):
SIZE = 32
ptr: Annotated[ctypes.c_void_p, 0]
pitch: Annotated[size_t, 8]
xsize: Annotated[size_t, 16]
ysize: Annotated[size_t, 24]
cudaMalloc3D_v3020_params: TypeAlias = struct_cudaMalloc3D_v3020_params_st
@c.record
class struct_cudaMalloc3DArray_v3020_params_st(c.Struct):
SIZE = 48
array: Annotated[c.POINTER[cudaArray_t], 0]
desc: Annotated[c.POINTER[struct_cudaChannelFormatDesc], 8]
extent: Annotated[struct_cudaExtent, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 40]
cudaMalloc3DArray_v3020_params: TypeAlias = struct_cudaMalloc3DArray_v3020_params_st
@c.record
class struct_cudaMallocMipmappedArray_v5000_params_st(c.Struct):
SIZE = 48
mipmappedArray: Annotated[c.POINTER[cudaMipmappedArray_t], 0]
desc: Annotated[c.POINTER[struct_cudaChannelFormatDesc], 8]
extent: Annotated[struct_cudaExtent, 16]
numLevels: Annotated[Annotated[int, ctypes.c_uint32], 40]
flags: Annotated[Annotated[int, ctypes.c_uint32], 44]
cudaMallocMipmappedArray_v5000_params: TypeAlias = struct_cudaMallocMipmappedArray_v5000_params_st
@c.record
class struct_cudaGetMipmappedArrayLevel_v5000_params_st(c.Struct):
SIZE = 24
levelArray: Annotated[c.POINTER[cudaArray_t], 0]
mipmappedArray: Annotated[cudaMipmappedArray_const_t, 8]
level: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaMipmappedArray_const_t: TypeAlias = c.POINTER[struct_cudaMipmappedArray]
cudaGetMipmappedArrayLevel_v5000_params: TypeAlias = struct_cudaGetMipmappedArrayLevel_v5000_params_st
@c.record
class struct_cudaMemcpy3D_ptds_v7000_params_st(c.Struct):
SIZE = 8
p: Annotated[c.POINTER[struct_cudaMemcpy3DParms], 0]
@c.record
class struct_cudaMemcpy3DParms(c.Struct):
SIZE = 160
srcArray: Annotated[cudaArray_t, 0]
srcPos: Annotated[struct_cudaPos, 8]
srcPtr: Annotated[struct_cudaPitchedPtr, 32]
dstArray: Annotated[cudaArray_t, 64]
dstPos: Annotated[struct_cudaPos, 72]
dstPtr: Annotated[struct_cudaPitchedPtr, 96]
extent: Annotated[struct_cudaExtent, 128]
kind: Annotated[enum_cudaMemcpyKind, 152]
@c.record
class struct_cudaPos(c.Struct):
SIZE = 24
x: Annotated[size_t, 0]
y: Annotated[size_t, 8]
z: Annotated[size_t, 16]
class enum_cudaMemcpyKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemcpyHostToHost = enum_cudaMemcpyKind.define('cudaMemcpyHostToHost', 0)
cudaMemcpyHostToDevice = enum_cudaMemcpyKind.define('cudaMemcpyHostToDevice', 1)
cudaMemcpyDeviceToHost = enum_cudaMemcpyKind.define('cudaMemcpyDeviceToHost', 2)
cudaMemcpyDeviceToDevice = enum_cudaMemcpyKind.define('cudaMemcpyDeviceToDevice', 3)
cudaMemcpyDefault = enum_cudaMemcpyKind.define('cudaMemcpyDefault', 4)
cudaMemcpy3D_ptds_v7000_params: TypeAlias = struct_cudaMemcpy3D_ptds_v7000_params_st
@c.record
class struct_cudaMemcpy3DPeer_ptds_v7000_params_st(c.Struct):
SIZE = 8
p: Annotated[c.POINTER[struct_cudaMemcpy3DPeerParms], 0]
@c.record
class struct_cudaMemcpy3DPeerParms(c.Struct):
SIZE = 168
srcArray: Annotated[cudaArray_t, 0]
srcPos: Annotated[struct_cudaPos, 8]
srcPtr: Annotated[struct_cudaPitchedPtr, 32]
srcDevice: Annotated[Annotated[int, ctypes.c_int32], 64]
dstArray: Annotated[cudaArray_t, 72]
dstPos: Annotated[struct_cudaPos, 80]
dstPtr: Annotated[struct_cudaPitchedPtr, 104]
dstDevice: Annotated[Annotated[int, ctypes.c_int32], 136]
extent: Annotated[struct_cudaExtent, 144]
cudaMemcpy3DPeer_ptds_v7000_params: TypeAlias = struct_cudaMemcpy3DPeer_ptds_v7000_params_st
@c.record
class struct_cudaMemcpy3DAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 16
p: Annotated[c.POINTER[struct_cudaMemcpy3DParms], 0]
stream: Annotated[cudaStream_t, 8]
cudaMemcpy3DAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemcpy3DAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemcpy3DPeerAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 16
p: Annotated[c.POINTER[struct_cudaMemcpy3DPeerParms], 0]
stream: Annotated[cudaStream_t, 8]
cudaMemcpy3DPeerAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemcpy3DPeerAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemGetInfo_v3020_params_st(c.Struct):
SIZE = 16
free: Annotated[c.POINTER[size_t], 0]
total: Annotated[c.POINTER[size_t], 8]
cudaMemGetInfo_v3020_params: TypeAlias = struct_cudaMemGetInfo_v3020_params_st
@c.record
class struct_cudaArrayGetInfo_v4010_params_st(c.Struct):
SIZE = 32
desc: Annotated[c.POINTER[struct_cudaChannelFormatDesc], 0]
extent: Annotated[c.POINTER[struct_cudaExtent], 8]
flags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 16]
array: Annotated[cudaArray_t, 24]
cudaArrayGetInfo_v4010_params: TypeAlias = struct_cudaArrayGetInfo_v4010_params_st
@c.record
class struct_cudaArrayGetPlane_v11020_params_st(c.Struct):
SIZE = 24
pPlaneArray: Annotated[c.POINTER[cudaArray_t], 0]
hArray: Annotated[cudaArray_t, 8]
planeIdx: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaArrayGetPlane_v11020_params: TypeAlias = struct_cudaArrayGetPlane_v11020_params_st
@c.record
class struct_cudaArrayGetMemoryRequirements_v11060_params_st(c.Struct):
SIZE = 24
memoryRequirements: Annotated[c.POINTER[struct_cudaArrayMemoryRequirements], 0]
array: Annotated[cudaArray_t, 8]
device: Annotated[Annotated[int, ctypes.c_int32], 16]
@c.record
class struct_cudaArrayMemoryRequirements(c.Struct):
SIZE = 32
size: Annotated[size_t, 0]
alignment: Annotated[size_t, 8]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[4]], 16]
cudaArrayGetMemoryRequirements_v11060_params: TypeAlias = struct_cudaArrayGetMemoryRequirements_v11060_params_st
@c.record
class struct_cudaMipmappedArrayGetMemoryRequirements_v11060_params_st(c.Struct):
SIZE = 24
memoryRequirements: Annotated[c.POINTER[struct_cudaArrayMemoryRequirements], 0]
mipmap: Annotated[cudaMipmappedArray_t, 8]
device: Annotated[Annotated[int, ctypes.c_int32], 16]
cudaMipmappedArrayGetMemoryRequirements_v11060_params: TypeAlias = struct_cudaMipmappedArrayGetMemoryRequirements_v11060_params_st
@c.record
class struct_cudaArrayGetSparseProperties_v11010_params_st(c.Struct):
SIZE = 16
sparseProperties: Annotated[c.POINTER[struct_cudaArraySparseProperties], 0]
array: Annotated[cudaArray_t, 8]
@c.record
class struct_cudaArraySparseProperties(c.Struct):
SIZE = 48
tileExtent: Annotated[struct_cudaArraySparseProperties_tileExtent, 0]
miptailFirstLevel: Annotated[Annotated[int, ctypes.c_uint32], 12]
miptailSize: Annotated[Annotated[int, ctypes.c_uint64], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[4]], 28]
@c.record
class struct_cudaArraySparseProperties_tileExtent(c.Struct):
SIZE = 12
width: Annotated[Annotated[int, ctypes.c_uint32], 0]
height: Annotated[Annotated[int, ctypes.c_uint32], 4]
depth: Annotated[Annotated[int, ctypes.c_uint32], 8]
cudaArrayGetSparseProperties_v11010_params: TypeAlias = struct_cudaArrayGetSparseProperties_v11010_params_st
@c.record
class struct_cudaMipmappedArrayGetSparseProperties_v11010_params_st(c.Struct):
SIZE = 16
sparseProperties: Annotated[c.POINTER[struct_cudaArraySparseProperties], 0]
mipmap: Annotated[cudaMipmappedArray_t, 8]
cudaMipmappedArrayGetSparseProperties_v11010_params: TypeAlias = struct_cudaMipmappedArrayGetSparseProperties_v11010_params_st
@c.record
class struct_cudaMemcpy_ptds_v7000_params_st(c.Struct):
SIZE = 32
dst: Annotated[ctypes.c_void_p, 0]
src: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
kind: Annotated[enum_cudaMemcpyKind, 24]
cudaMemcpy_ptds_v7000_params: TypeAlias = struct_cudaMemcpy_ptds_v7000_params_st
@c.record
class struct_cudaMemcpyPeer_v4000_params_st(c.Struct):
SIZE = 40
dst: Annotated[ctypes.c_void_p, 0]
dstDevice: Annotated[Annotated[int, ctypes.c_int32], 8]
src: Annotated[ctypes.c_void_p, 16]
srcDevice: Annotated[Annotated[int, ctypes.c_int32], 24]
count: Annotated[size_t, 32]
cudaMemcpyPeer_v4000_params: TypeAlias = struct_cudaMemcpyPeer_v4000_params_st
@c.record
class struct_cudaMemcpy2D_ptds_v7000_params_st(c.Struct):
SIZE = 56
dst: Annotated[ctypes.c_void_p, 0]
dpitch: Annotated[size_t, 8]
src: Annotated[ctypes.c_void_p, 16]
spitch: Annotated[size_t, 24]
width: Annotated[size_t, 32]
height: Annotated[size_t, 40]
kind: Annotated[enum_cudaMemcpyKind, 48]
cudaMemcpy2D_ptds_v7000_params: TypeAlias = struct_cudaMemcpy2D_ptds_v7000_params_st
@c.record
class struct_cudaMemcpy2DToArray_ptds_v7000_params_st(c.Struct):
SIZE = 64
dst: Annotated[cudaArray_t, 0]
wOffset: Annotated[size_t, 8]
hOffset: Annotated[size_t, 16]
src: Annotated[ctypes.c_void_p, 24]
spitch: Annotated[size_t, 32]
width: Annotated[size_t, 40]
height: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
cudaMemcpy2DToArray_ptds_v7000_params: TypeAlias = struct_cudaMemcpy2DToArray_ptds_v7000_params_st
@c.record
class struct_cudaMemcpy2DFromArray_ptds_v7000_params_st(c.Struct):
SIZE = 64
dst: Annotated[ctypes.c_void_p, 0]
dpitch: Annotated[size_t, 8]
src: Annotated[cudaArray_const_t, 16]
wOffset: Annotated[size_t, 24]
hOffset: Annotated[size_t, 32]
width: Annotated[size_t, 40]
height: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
cudaArray_const_t: TypeAlias = c.POINTER[struct_cudaArray]
cudaMemcpy2DFromArray_ptds_v7000_params: TypeAlias = struct_cudaMemcpy2DFromArray_ptds_v7000_params_st
@c.record
class struct_cudaMemcpy2DArrayToArray_ptds_v7000_params_st(c.Struct):
SIZE = 72
dst: Annotated[cudaArray_t, 0]
wOffsetDst: Annotated[size_t, 8]
hOffsetDst: Annotated[size_t, 16]
src: Annotated[cudaArray_const_t, 24]
wOffsetSrc: Annotated[size_t, 32]
hOffsetSrc: Annotated[size_t, 40]
width: Annotated[size_t, 48]
height: Annotated[size_t, 56]
kind: Annotated[enum_cudaMemcpyKind, 64]
cudaMemcpy2DArrayToArray_ptds_v7000_params: TypeAlias = struct_cudaMemcpy2DArrayToArray_ptds_v7000_params_st
@c.record
class struct_cudaMemcpyToSymbol_ptds_v7000_params_st(c.Struct):
SIZE = 40
symbol: Annotated[ctypes.c_void_p, 0]
src: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
offset: Annotated[size_t, 24]
kind: Annotated[enum_cudaMemcpyKind, 32]
cudaMemcpyToSymbol_ptds_v7000_params: TypeAlias = struct_cudaMemcpyToSymbol_ptds_v7000_params_st
@c.record
class struct_cudaMemcpyFromSymbol_ptds_v7000_params_st(c.Struct):
SIZE = 40
dst: Annotated[ctypes.c_void_p, 0]
symbol: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
offset: Annotated[size_t, 24]
kind: Annotated[enum_cudaMemcpyKind, 32]
cudaMemcpyFromSymbol_ptds_v7000_params: TypeAlias = struct_cudaMemcpyFromSymbol_ptds_v7000_params_st
@c.record
class struct_cudaMemcpyAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 40
dst: Annotated[ctypes.c_void_p, 0]
src: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
kind: Annotated[enum_cudaMemcpyKind, 24]
stream: Annotated[cudaStream_t, 32]
cudaMemcpyAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemcpyAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemcpyPeerAsync_v4000_params_st(c.Struct):
SIZE = 48
dst: Annotated[ctypes.c_void_p, 0]
dstDevice: Annotated[Annotated[int, ctypes.c_int32], 8]
src: Annotated[ctypes.c_void_p, 16]
srcDevice: Annotated[Annotated[int, ctypes.c_int32], 24]
count: Annotated[size_t, 32]
stream: Annotated[cudaStream_t, 40]
cudaMemcpyPeerAsync_v4000_params: TypeAlias = struct_cudaMemcpyPeerAsync_v4000_params_st
@c.record
class struct_cudaMemcpyBatchAsync_ptsz_v12080_params_st(c.Struct):
SIZE = 72
dsts: Annotated[c.POINTER[ctypes.c_void_p], 0]
srcs: Annotated[c.POINTER[ctypes.c_void_p], 8]
sizes: Annotated[c.POINTER[size_t], 16]
count: Annotated[size_t, 24]
attrs: Annotated[c.POINTER[struct_cudaMemcpyAttributes], 32]
attrsIdxs: Annotated[c.POINTER[size_t], 40]
numAttrs: Annotated[size_t, 48]
failIdx: Annotated[c.POINTER[size_t], 56]
stream: Annotated[cudaStream_t, 64]
@c.record
class struct_cudaMemcpyAttributes(c.Struct):
SIZE = 24
srcAccessOrder: Annotated[enum_cudaMemcpySrcAccessOrder, 0]
srcLocHint: Annotated[struct_cudaMemLocation, 4]
dstLocHint: Annotated[struct_cudaMemLocation, 12]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
class enum_cudaMemcpySrcAccessOrder(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemcpySrcAccessOrderInvalid = enum_cudaMemcpySrcAccessOrder.define('cudaMemcpySrcAccessOrderInvalid', 0)
cudaMemcpySrcAccessOrderStream = enum_cudaMemcpySrcAccessOrder.define('cudaMemcpySrcAccessOrderStream', 1)
cudaMemcpySrcAccessOrderDuringApiCall = enum_cudaMemcpySrcAccessOrder.define('cudaMemcpySrcAccessOrderDuringApiCall', 2)
cudaMemcpySrcAccessOrderAny = enum_cudaMemcpySrcAccessOrder.define('cudaMemcpySrcAccessOrderAny', 3)
cudaMemcpySrcAccessOrderMax = enum_cudaMemcpySrcAccessOrder.define('cudaMemcpySrcAccessOrderMax', 2147483647)
@c.record
class struct_cudaMemLocation(c.Struct):
SIZE = 8
type: Annotated[enum_cudaMemLocationType, 0]
id: Annotated[Annotated[int, ctypes.c_int32], 4]
class enum_cudaMemLocationType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemLocationTypeInvalid = enum_cudaMemLocationType.define('cudaMemLocationTypeInvalid', 0)
cudaMemLocationTypeDevice = enum_cudaMemLocationType.define('cudaMemLocationTypeDevice', 1)
cudaMemLocationTypeHost = enum_cudaMemLocationType.define('cudaMemLocationTypeHost', 2)
cudaMemLocationTypeHostNuma = enum_cudaMemLocationType.define('cudaMemLocationTypeHostNuma', 3)
cudaMemLocationTypeHostNumaCurrent = enum_cudaMemLocationType.define('cudaMemLocationTypeHostNumaCurrent', 4)
cudaMemcpyBatchAsync_ptsz_v12080_params: TypeAlias = struct_cudaMemcpyBatchAsync_ptsz_v12080_params_st
@c.record
class struct_cudaMemcpy3DBatchAsync_ptsz_v12080_params_st(c.Struct):
SIZE = 40
numOps: Annotated[size_t, 0]
opList: Annotated[c.POINTER[struct_cudaMemcpy3DBatchOp], 8]
failIdx: Annotated[c.POINTER[size_t], 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
stream: Annotated[cudaStream_t, 32]
@c.record
class struct_cudaMemcpy3DBatchOp(c.Struct):
SIZE = 112
src: Annotated[struct_cudaMemcpy3DOperand, 0]
dst: Annotated[struct_cudaMemcpy3DOperand, 40]
extent: Annotated[struct_cudaExtent, 80]
srcAccessOrder: Annotated[enum_cudaMemcpySrcAccessOrder, 104]
flags: Annotated[Annotated[int, ctypes.c_uint32], 108]
@c.record
class struct_cudaMemcpy3DOperand(c.Struct):
SIZE = 40
type: Annotated[enum_cudaMemcpy3DOperandType, 0]
op: Annotated[struct_cudaMemcpy3DOperand_op, 8]
class enum_cudaMemcpy3DOperandType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemcpyOperandTypePointer = enum_cudaMemcpy3DOperandType.define('cudaMemcpyOperandTypePointer', 1)
cudaMemcpyOperandTypeArray = enum_cudaMemcpy3DOperandType.define('cudaMemcpyOperandTypeArray', 2)
cudaMemcpyOperandTypeMax = enum_cudaMemcpy3DOperandType.define('cudaMemcpyOperandTypeMax', 2147483647)
@c.record
class struct_cudaMemcpy3DOperand_op(c.Struct):
SIZE = 32
ptr: Annotated[struct_cudaMemcpy3DOperand_op_ptr, 0]
array: Annotated[struct_cudaMemcpy3DOperand_op_array, 0]
@c.record
class struct_cudaMemcpy3DOperand_op_ptr(c.Struct):
SIZE = 32
ptr: Annotated[ctypes.c_void_p, 0]
rowLength: Annotated[size_t, 8]
layerHeight: Annotated[size_t, 16]
locHint: Annotated[struct_cudaMemLocation, 24]
@c.record
class struct_cudaMemcpy3DOperand_op_array(c.Struct):
SIZE = 32
array: Annotated[cudaArray_t, 0]
offset: Annotated[struct_cudaOffset3D, 8]
@c.record
class struct_cudaOffset3D(c.Struct):
SIZE = 24
x: Annotated[size_t, 0]
y: Annotated[size_t, 8]
z: Annotated[size_t, 16]
cudaMemcpy3DBatchAsync_ptsz_v12080_params: TypeAlias = struct_cudaMemcpy3DBatchAsync_ptsz_v12080_params_st
@c.record
class struct_cudaMemcpy2DAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 64
dst: Annotated[ctypes.c_void_p, 0]
dpitch: Annotated[size_t, 8]
src: Annotated[ctypes.c_void_p, 16]
spitch: Annotated[size_t, 24]
width: Annotated[size_t, 32]
height: Annotated[size_t, 40]
kind: Annotated[enum_cudaMemcpyKind, 48]
stream: Annotated[cudaStream_t, 56]
cudaMemcpy2DAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemcpy2DAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemcpy2DToArrayAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 72
dst: Annotated[cudaArray_t, 0]
wOffset: Annotated[size_t, 8]
hOffset: Annotated[size_t, 16]
src: Annotated[ctypes.c_void_p, 24]
spitch: Annotated[size_t, 32]
width: Annotated[size_t, 40]
height: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
stream: Annotated[cudaStream_t, 64]
cudaMemcpy2DToArrayAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemcpy2DToArrayAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemcpy2DFromArrayAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 72
dst: Annotated[ctypes.c_void_p, 0]
dpitch: Annotated[size_t, 8]
src: Annotated[cudaArray_const_t, 16]
wOffset: Annotated[size_t, 24]
hOffset: Annotated[size_t, 32]
width: Annotated[size_t, 40]
height: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
stream: Annotated[cudaStream_t, 64]
cudaMemcpy2DFromArrayAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemcpy2DFromArrayAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemcpyToSymbolAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 48
symbol: Annotated[ctypes.c_void_p, 0]
src: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
offset: Annotated[size_t, 24]
kind: Annotated[enum_cudaMemcpyKind, 32]
stream: Annotated[cudaStream_t, 40]
cudaMemcpyToSymbolAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemcpyToSymbolAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemcpyFromSymbolAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 48
dst: Annotated[ctypes.c_void_p, 0]
symbol: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
offset: Annotated[size_t, 24]
kind: Annotated[enum_cudaMemcpyKind, 32]
stream: Annotated[cudaStream_t, 40]
cudaMemcpyFromSymbolAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemcpyFromSymbolAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemset_ptds_v7000_params_st(c.Struct):
SIZE = 24
devPtr: Annotated[ctypes.c_void_p, 0]
value: Annotated[Annotated[int, ctypes.c_int32], 8]
count: Annotated[size_t, 16]
cudaMemset_ptds_v7000_params: TypeAlias = struct_cudaMemset_ptds_v7000_params_st
@c.record
class struct_cudaMemset2D_ptds_v7000_params_st(c.Struct):
SIZE = 40
devPtr: Annotated[ctypes.c_void_p, 0]
pitch: Annotated[size_t, 8]
value: Annotated[Annotated[int, ctypes.c_int32], 16]
width: Annotated[size_t, 24]
height: Annotated[size_t, 32]
cudaMemset2D_ptds_v7000_params: TypeAlias = struct_cudaMemset2D_ptds_v7000_params_st
@c.record
class struct_cudaMemset3D_ptds_v7000_params_st(c.Struct):
SIZE = 64
pitchedDevPtr: Annotated[struct_cudaPitchedPtr, 0]
value: Annotated[Annotated[int, ctypes.c_int32], 32]
extent: Annotated[struct_cudaExtent, 40]
cudaMemset3D_ptds_v7000_params: TypeAlias = struct_cudaMemset3D_ptds_v7000_params_st
@c.record
class struct_cudaMemsetAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 32
devPtr: Annotated[ctypes.c_void_p, 0]
value: Annotated[Annotated[int, ctypes.c_int32], 8]
count: Annotated[size_t, 16]
stream: Annotated[cudaStream_t, 24]
cudaMemsetAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemsetAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemset2DAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 48
devPtr: Annotated[ctypes.c_void_p, 0]
pitch: Annotated[size_t, 8]
value: Annotated[Annotated[int, ctypes.c_int32], 16]
width: Annotated[size_t, 24]
height: Annotated[size_t, 32]
stream: Annotated[cudaStream_t, 40]
cudaMemset2DAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemset2DAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemset3DAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 72
pitchedDevPtr: Annotated[struct_cudaPitchedPtr, 0]
value: Annotated[Annotated[int, ctypes.c_int32], 32]
extent: Annotated[struct_cudaExtent, 40]
stream: Annotated[cudaStream_t, 64]
cudaMemset3DAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemset3DAsync_ptsz_v7000_params_st
@c.record
class struct_cudaGetSymbolAddress_v3020_params_st(c.Struct):
SIZE = 16
devPtr: Annotated[c.POINTER[ctypes.c_void_p], 0]
symbol: Annotated[ctypes.c_void_p, 8]
cudaGetSymbolAddress_v3020_params: TypeAlias = struct_cudaGetSymbolAddress_v3020_params_st
@c.record
class struct_cudaGetSymbolSize_v3020_params_st(c.Struct):
SIZE = 16
size: Annotated[c.POINTER[size_t], 0]
symbol: Annotated[ctypes.c_void_p, 8]
cudaGetSymbolSize_v3020_params: TypeAlias = struct_cudaGetSymbolSize_v3020_params_st
@c.record
class struct_cudaMemPrefetchAsync_ptsz_v8000_params_st(c.Struct):
SIZE = 32
devPtr: Annotated[ctypes.c_void_p, 0]
count: Annotated[size_t, 8]
dstDevice: Annotated[Annotated[int, ctypes.c_int32], 16]
stream: Annotated[cudaStream_t, 24]
cudaMemPrefetchAsync_ptsz_v8000_params: TypeAlias = struct_cudaMemPrefetchAsync_ptsz_v8000_params_st
@c.record
class struct_cudaMemPrefetchAsync_v2_ptsz_v12020_params_st(c.Struct):
SIZE = 40
devPtr: Annotated[ctypes.c_void_p, 0]
count: Annotated[size_t, 8]
location: Annotated[struct_cudaMemLocation, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
stream: Annotated[cudaStream_t, 32]
cudaMemPrefetchAsync_v2_ptsz_v12020_params: TypeAlias = struct_cudaMemPrefetchAsync_v2_ptsz_v12020_params_st
@c.record
class struct_cudaMemAdvise_v8000_params_st(c.Struct):
SIZE = 24
devPtr: Annotated[ctypes.c_void_p, 0]
count: Annotated[size_t, 8]
advice: Annotated[enum_cudaMemoryAdvise, 16]
device: Annotated[Annotated[int, ctypes.c_int32], 20]
class enum_cudaMemoryAdvise(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemAdviseSetReadMostly = enum_cudaMemoryAdvise.define('cudaMemAdviseSetReadMostly', 1)
cudaMemAdviseUnsetReadMostly = enum_cudaMemoryAdvise.define('cudaMemAdviseUnsetReadMostly', 2)
cudaMemAdviseSetPreferredLocation = enum_cudaMemoryAdvise.define('cudaMemAdviseSetPreferredLocation', 3)
cudaMemAdviseUnsetPreferredLocation = enum_cudaMemoryAdvise.define('cudaMemAdviseUnsetPreferredLocation', 4)
cudaMemAdviseSetAccessedBy = enum_cudaMemoryAdvise.define('cudaMemAdviseSetAccessedBy', 5)
cudaMemAdviseUnsetAccessedBy = enum_cudaMemoryAdvise.define('cudaMemAdviseUnsetAccessedBy', 6)
cudaMemAdvise_v8000_params: TypeAlias = struct_cudaMemAdvise_v8000_params_st
@c.record
class struct_cudaMemAdvise_v2_v12020_params_st(c.Struct):
SIZE = 32
devPtr: Annotated[ctypes.c_void_p, 0]
count: Annotated[size_t, 8]
advice: Annotated[enum_cudaMemoryAdvise, 16]
location: Annotated[struct_cudaMemLocation, 20]
cudaMemAdvise_v2_v12020_params: TypeAlias = struct_cudaMemAdvise_v2_v12020_params_st
@c.record
class struct_cudaMemRangeGetAttribute_v8000_params_st(c.Struct):
SIZE = 40
data: Annotated[ctypes.c_void_p, 0]
dataSize: Annotated[size_t, 8]
attribute: Annotated[enum_cudaMemRangeAttribute, 16]
devPtr: Annotated[ctypes.c_void_p, 24]
count: Annotated[size_t, 32]
class enum_cudaMemRangeAttribute(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemRangeAttributeReadMostly = enum_cudaMemRangeAttribute.define('cudaMemRangeAttributeReadMostly', 1)
cudaMemRangeAttributePreferredLocation = enum_cudaMemRangeAttribute.define('cudaMemRangeAttributePreferredLocation', 2)
cudaMemRangeAttributeAccessedBy = enum_cudaMemRangeAttribute.define('cudaMemRangeAttributeAccessedBy', 3)
cudaMemRangeAttributeLastPrefetchLocation = enum_cudaMemRangeAttribute.define('cudaMemRangeAttributeLastPrefetchLocation', 4)
cudaMemRangeAttributePreferredLocationType = enum_cudaMemRangeAttribute.define('cudaMemRangeAttributePreferredLocationType', 5)
cudaMemRangeAttributePreferredLocationId = enum_cudaMemRangeAttribute.define('cudaMemRangeAttributePreferredLocationId', 6)
cudaMemRangeAttributeLastPrefetchLocationType = enum_cudaMemRangeAttribute.define('cudaMemRangeAttributeLastPrefetchLocationType', 7)
cudaMemRangeAttributeLastPrefetchLocationId = enum_cudaMemRangeAttribute.define('cudaMemRangeAttributeLastPrefetchLocationId', 8)
cudaMemRangeGetAttribute_v8000_params: TypeAlias = struct_cudaMemRangeGetAttribute_v8000_params_st
@c.record
class struct_cudaMemRangeGetAttributes_v8000_params_st(c.Struct):
SIZE = 48
data: Annotated[c.POINTER[ctypes.c_void_p], 0]
dataSizes: Annotated[c.POINTER[size_t], 8]
attributes: Annotated[c.POINTER[enum_cudaMemRangeAttribute], 16]
numAttributes: Annotated[size_t, 24]
devPtr: Annotated[ctypes.c_void_p, 32]
count: Annotated[size_t, 40]
cudaMemRangeGetAttributes_v8000_params: TypeAlias = struct_cudaMemRangeGetAttributes_v8000_params_st
@c.record
class struct_cudaMemcpyToArray_ptds_v7000_params_st(c.Struct):
SIZE = 48
dst: Annotated[cudaArray_t, 0]
wOffset: Annotated[size_t, 8]
hOffset: Annotated[size_t, 16]
src: Annotated[ctypes.c_void_p, 24]
count: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
cudaMemcpyToArray_ptds_v7000_params: TypeAlias = struct_cudaMemcpyToArray_ptds_v7000_params_st
@c.record
class struct_cudaMemcpyFromArray_ptds_v7000_params_st(c.Struct):
SIZE = 48
dst: Annotated[ctypes.c_void_p, 0]
src: Annotated[cudaArray_const_t, 8]
wOffset: Annotated[size_t, 16]
hOffset: Annotated[size_t, 24]
count: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
cudaMemcpyFromArray_ptds_v7000_params: TypeAlias = struct_cudaMemcpyFromArray_ptds_v7000_params_st
@c.record
class struct_cudaMemcpyArrayToArray_ptds_v7000_params_st(c.Struct):
SIZE = 64
dst: Annotated[cudaArray_t, 0]
wOffsetDst: Annotated[size_t, 8]
hOffsetDst: Annotated[size_t, 16]
src: Annotated[cudaArray_const_t, 24]
wOffsetSrc: Annotated[size_t, 32]
hOffsetSrc: Annotated[size_t, 40]
count: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
cudaMemcpyArrayToArray_ptds_v7000_params: TypeAlias = struct_cudaMemcpyArrayToArray_ptds_v7000_params_st
@c.record
class struct_cudaMemcpyToArrayAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 56
dst: Annotated[cudaArray_t, 0]
wOffset: Annotated[size_t, 8]
hOffset: Annotated[size_t, 16]
src: Annotated[ctypes.c_void_p, 24]
count: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
stream: Annotated[cudaStream_t, 48]
cudaMemcpyToArrayAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemcpyToArrayAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMemcpyFromArrayAsync_ptsz_v7000_params_st(c.Struct):
SIZE = 56
dst: Annotated[ctypes.c_void_p, 0]
src: Annotated[cudaArray_const_t, 8]
wOffset: Annotated[size_t, 16]
hOffset: Annotated[size_t, 24]
count: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
stream: Annotated[cudaStream_t, 48]
cudaMemcpyFromArrayAsync_ptsz_v7000_params: TypeAlias = struct_cudaMemcpyFromArrayAsync_ptsz_v7000_params_st
@c.record
class struct_cudaMallocAsync_ptsz_v11020_params_st(c.Struct):
SIZE = 24
devPtr: Annotated[c.POINTER[ctypes.c_void_p], 0]
size: Annotated[size_t, 8]
hStream: Annotated[cudaStream_t, 16]
cudaMallocAsync_ptsz_v11020_params: TypeAlias = struct_cudaMallocAsync_ptsz_v11020_params_st
@c.record
class struct_cudaFreeAsync_ptsz_v11020_params_st(c.Struct):
SIZE = 16
devPtr: Annotated[ctypes.c_void_p, 0]
hStream: Annotated[cudaStream_t, 8]
cudaFreeAsync_ptsz_v11020_params: TypeAlias = struct_cudaFreeAsync_ptsz_v11020_params_st
@c.record
class struct_cudaMemPoolTrimTo_v11020_params_st(c.Struct):
SIZE = 16
memPool: Annotated[cudaMemPool_t, 0]
minBytesToKeep: Annotated[size_t, 8]
cudaMemPoolTrimTo_v11020_params: TypeAlias = struct_cudaMemPoolTrimTo_v11020_params_st
@c.record
class struct_cudaMemPoolSetAttribute_v11020_params_st(c.Struct):
SIZE = 24
memPool: Annotated[cudaMemPool_t, 0]
attr: Annotated[enum_cudaMemPoolAttr, 8]
value: Annotated[ctypes.c_void_p, 16]
class enum_cudaMemPoolAttr(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemPoolReuseFollowEventDependencies = enum_cudaMemPoolAttr.define('cudaMemPoolReuseFollowEventDependencies', 1)
cudaMemPoolReuseAllowOpportunistic = enum_cudaMemPoolAttr.define('cudaMemPoolReuseAllowOpportunistic', 2)
cudaMemPoolReuseAllowInternalDependencies = enum_cudaMemPoolAttr.define('cudaMemPoolReuseAllowInternalDependencies', 3)
cudaMemPoolAttrReleaseThreshold = enum_cudaMemPoolAttr.define('cudaMemPoolAttrReleaseThreshold', 4)
cudaMemPoolAttrReservedMemCurrent = enum_cudaMemPoolAttr.define('cudaMemPoolAttrReservedMemCurrent', 5)
cudaMemPoolAttrReservedMemHigh = enum_cudaMemPoolAttr.define('cudaMemPoolAttrReservedMemHigh', 6)
cudaMemPoolAttrUsedMemCurrent = enum_cudaMemPoolAttr.define('cudaMemPoolAttrUsedMemCurrent', 7)
cudaMemPoolAttrUsedMemHigh = enum_cudaMemPoolAttr.define('cudaMemPoolAttrUsedMemHigh', 8)
cudaMemPoolSetAttribute_v11020_params: TypeAlias = struct_cudaMemPoolSetAttribute_v11020_params_st
@c.record
class struct_cudaMemPoolGetAttribute_v11020_params_st(c.Struct):
SIZE = 24
memPool: Annotated[cudaMemPool_t, 0]
attr: Annotated[enum_cudaMemPoolAttr, 8]
value: Annotated[ctypes.c_void_p, 16]
cudaMemPoolGetAttribute_v11020_params: TypeAlias = struct_cudaMemPoolGetAttribute_v11020_params_st
@c.record
class struct_cudaMemPoolSetAccess_v11020_params_st(c.Struct):
SIZE = 24
memPool: Annotated[cudaMemPool_t, 0]
descList: Annotated[c.POINTER[struct_cudaMemAccessDesc], 8]
count: Annotated[size_t, 16]
@c.record
class struct_cudaMemAccessDesc(c.Struct):
SIZE = 12
location: Annotated[struct_cudaMemLocation, 0]
flags: Annotated[enum_cudaMemAccessFlags, 8]
class enum_cudaMemAccessFlags(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemAccessFlagsProtNone = enum_cudaMemAccessFlags.define('cudaMemAccessFlagsProtNone', 0)
cudaMemAccessFlagsProtRead = enum_cudaMemAccessFlags.define('cudaMemAccessFlagsProtRead', 1)
cudaMemAccessFlagsProtReadWrite = enum_cudaMemAccessFlags.define('cudaMemAccessFlagsProtReadWrite', 3)
cudaMemPoolSetAccess_v11020_params: TypeAlias = struct_cudaMemPoolSetAccess_v11020_params_st
@c.record
class struct_cudaMemPoolGetAccess_v11020_params_st(c.Struct):
SIZE = 24
flags: Annotated[c.POINTER[enum_cudaMemAccessFlags], 0]
memPool: Annotated[cudaMemPool_t, 8]
location: Annotated[c.POINTER[struct_cudaMemLocation], 16]
cudaMemPoolGetAccess_v11020_params: TypeAlias = struct_cudaMemPoolGetAccess_v11020_params_st
@c.record
class struct_cudaMemPoolCreate_v11020_params_st(c.Struct):
SIZE = 16
memPool: Annotated[c.POINTER[cudaMemPool_t], 0]
poolProps: Annotated[c.POINTER[struct_cudaMemPoolProps], 8]
@c.record
class struct_cudaMemPoolProps(c.Struct):
SIZE = 88
allocType: Annotated[enum_cudaMemAllocationType, 0]
handleTypes: Annotated[enum_cudaMemAllocationHandleType, 4]
location: Annotated[struct_cudaMemLocation, 8]
win32SecurityAttributes: Annotated[ctypes.c_void_p, 16]
maxSize: Annotated[size_t, 24]
usage: Annotated[Annotated[int, ctypes.c_uint16], 32]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[54]], 34]
class enum_cudaMemAllocationType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemAllocationTypeInvalid = enum_cudaMemAllocationType.define('cudaMemAllocationTypeInvalid', 0)
cudaMemAllocationTypePinned = enum_cudaMemAllocationType.define('cudaMemAllocationTypePinned', 1)
cudaMemAllocationTypeMax = enum_cudaMemAllocationType.define('cudaMemAllocationTypeMax', 2147483647)
class enum_cudaMemAllocationHandleType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemHandleTypeNone = enum_cudaMemAllocationHandleType.define('cudaMemHandleTypeNone', 0)
cudaMemHandleTypePosixFileDescriptor = enum_cudaMemAllocationHandleType.define('cudaMemHandleTypePosixFileDescriptor', 1)
cudaMemHandleTypeWin32 = enum_cudaMemAllocationHandleType.define('cudaMemHandleTypeWin32', 2)
cudaMemHandleTypeWin32Kmt = enum_cudaMemAllocationHandleType.define('cudaMemHandleTypeWin32Kmt', 4)
cudaMemHandleTypeFabric = enum_cudaMemAllocationHandleType.define('cudaMemHandleTypeFabric', 8)
cudaMemPoolCreate_v11020_params: TypeAlias = struct_cudaMemPoolCreate_v11020_params_st
@c.record
class struct_cudaMemPoolDestroy_v11020_params_st(c.Struct):
SIZE = 8
memPool: Annotated[cudaMemPool_t, 0]
cudaMemPoolDestroy_v11020_params: TypeAlias = struct_cudaMemPoolDestroy_v11020_params_st
@c.record
class struct_cudaMallocFromPoolAsync_ptsz_v11020_params_st(c.Struct):
SIZE = 32
ptr: Annotated[c.POINTER[ctypes.c_void_p], 0]
size: Annotated[size_t, 8]
memPool: Annotated[cudaMemPool_t, 16]
stream: Annotated[cudaStream_t, 24]
cudaMallocFromPoolAsync_ptsz_v11020_params: TypeAlias = struct_cudaMallocFromPoolAsync_ptsz_v11020_params_st
@c.record
class struct_cudaMemPoolExportToShareableHandle_v11020_params_st(c.Struct):
SIZE = 24
shareableHandle: Annotated[ctypes.c_void_p, 0]
memPool: Annotated[cudaMemPool_t, 8]
handleType: Annotated[enum_cudaMemAllocationHandleType, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cudaMemPoolExportToShareableHandle_v11020_params: TypeAlias = struct_cudaMemPoolExportToShareableHandle_v11020_params_st
@c.record
class struct_cudaMemPoolImportFromShareableHandle_v11020_params_st(c.Struct):
SIZE = 24
memPool: Annotated[c.POINTER[cudaMemPool_t], 0]
shareableHandle: Annotated[ctypes.c_void_p, 8]
handleType: Annotated[enum_cudaMemAllocationHandleType, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cudaMemPoolImportFromShareableHandle_v11020_params: TypeAlias = struct_cudaMemPoolImportFromShareableHandle_v11020_params_st
@c.record
class struct_cudaMemPoolExportPointer_v11020_params_st(c.Struct):
SIZE = 16
exportData: Annotated[c.POINTER[struct_cudaMemPoolPtrExportData], 0]
ptr: Annotated[ctypes.c_void_p, 8]
@c.record
class struct_cudaMemPoolPtrExportData(c.Struct):
SIZE = 64
reserved: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[64]], 0]
cudaMemPoolExportPointer_v11020_params: TypeAlias = struct_cudaMemPoolExportPointer_v11020_params_st
@c.record
class struct_cudaMemPoolImportPointer_v11020_params_st(c.Struct):
SIZE = 24
ptr: Annotated[c.POINTER[ctypes.c_void_p], 0]
memPool: Annotated[cudaMemPool_t, 8]
exportData: Annotated[c.POINTER[struct_cudaMemPoolPtrExportData], 16]
cudaMemPoolImportPointer_v11020_params: TypeAlias = struct_cudaMemPoolImportPointer_v11020_params_st
@c.record
class struct_cudaPointerGetAttributes_v4000_params_st(c.Struct):
SIZE = 16
attributes: Annotated[c.POINTER[struct_cudaPointerAttributes], 0]
ptr: Annotated[ctypes.c_void_p, 8]
@c.record
class struct_cudaPointerAttributes(c.Struct):
SIZE = 24
type: Annotated[enum_cudaMemoryType, 0]
device: Annotated[Annotated[int, ctypes.c_int32], 4]
devicePointer: Annotated[ctypes.c_void_p, 8]
hostPointer: Annotated[ctypes.c_void_p, 16]
class enum_cudaMemoryType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaMemoryTypeUnregistered = enum_cudaMemoryType.define('cudaMemoryTypeUnregistered', 0)
cudaMemoryTypeHost = enum_cudaMemoryType.define('cudaMemoryTypeHost', 1)
cudaMemoryTypeDevice = enum_cudaMemoryType.define('cudaMemoryTypeDevice', 2)
cudaMemoryTypeManaged = enum_cudaMemoryType.define('cudaMemoryTypeManaged', 3)
cudaPointerGetAttributes_v4000_params: TypeAlias = struct_cudaPointerGetAttributes_v4000_params_st
@c.record
class struct_cudaDeviceCanAccessPeer_v4000_params_st(c.Struct):
SIZE = 16
canAccessPeer: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
device: Annotated[Annotated[int, ctypes.c_int32], 8]
peerDevice: Annotated[Annotated[int, ctypes.c_int32], 12]
cudaDeviceCanAccessPeer_v4000_params: TypeAlias = struct_cudaDeviceCanAccessPeer_v4000_params_st
@c.record
class struct_cudaDeviceEnablePeerAccess_v4000_params_st(c.Struct):
SIZE = 8
peerDevice: Annotated[Annotated[int, ctypes.c_int32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
cudaDeviceEnablePeerAccess_v4000_params: TypeAlias = struct_cudaDeviceEnablePeerAccess_v4000_params_st
@c.record
class struct_cudaDeviceDisablePeerAccess_v4000_params_st(c.Struct):
SIZE = 4
peerDevice: Annotated[Annotated[int, ctypes.c_int32], 0]
cudaDeviceDisablePeerAccess_v4000_params: TypeAlias = struct_cudaDeviceDisablePeerAccess_v4000_params_st
@c.record
class struct_cudaGraphicsUnregisterResource_v3020_params_st(c.Struct):
SIZE = 8
resource: Annotated[cudaGraphicsResource_t, 0]
class struct_cudaGraphicsResource(ctypes.Structure): pass
cudaGraphicsResource_t: TypeAlias = c.POINTER[struct_cudaGraphicsResource]
cudaGraphicsUnregisterResource_v3020_params: TypeAlias = struct_cudaGraphicsUnregisterResource_v3020_params_st
@c.record
class struct_cudaGraphicsResourceSetMapFlags_v3020_params_st(c.Struct):
SIZE = 16
resource: Annotated[cudaGraphicsResource_t, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cudaGraphicsResourceSetMapFlags_v3020_params: TypeAlias = struct_cudaGraphicsResourceSetMapFlags_v3020_params_st
@c.record
class struct_cudaGraphicsMapResources_v3020_params_st(c.Struct):
SIZE = 24
count: Annotated[Annotated[int, ctypes.c_int32], 0]
resources: Annotated[c.POINTER[cudaGraphicsResource_t], 8]
stream: Annotated[cudaStream_t, 16]
cudaGraphicsMapResources_v3020_params: TypeAlias = struct_cudaGraphicsMapResources_v3020_params_st
@c.record
class struct_cudaGraphicsUnmapResources_v3020_params_st(c.Struct):
SIZE = 24
count: Annotated[Annotated[int, ctypes.c_int32], 0]
resources: Annotated[c.POINTER[cudaGraphicsResource_t], 8]
stream: Annotated[cudaStream_t, 16]
cudaGraphicsUnmapResources_v3020_params: TypeAlias = struct_cudaGraphicsUnmapResources_v3020_params_st
@c.record
class struct_cudaGraphicsResourceGetMappedPointer_v3020_params_st(c.Struct):
SIZE = 24
devPtr: Annotated[c.POINTER[ctypes.c_void_p], 0]
size: Annotated[c.POINTER[size_t], 8]
resource: Annotated[cudaGraphicsResource_t, 16]
cudaGraphicsResourceGetMappedPointer_v3020_params: TypeAlias = struct_cudaGraphicsResourceGetMappedPointer_v3020_params_st
@c.record
class struct_cudaGraphicsSubResourceGetMappedArray_v3020_params_st(c.Struct):
SIZE = 24
array: Annotated[c.POINTER[cudaArray_t], 0]
resource: Annotated[cudaGraphicsResource_t, 8]
arrayIndex: Annotated[Annotated[int, ctypes.c_uint32], 16]
mipLevel: Annotated[Annotated[int, ctypes.c_uint32], 20]
cudaGraphicsSubResourceGetMappedArray_v3020_params: TypeAlias = struct_cudaGraphicsSubResourceGetMappedArray_v3020_params_st
@c.record
class struct_cudaGraphicsResourceGetMappedMipmappedArray_v5000_params_st(c.Struct):
SIZE = 16
mipmappedArray: Annotated[c.POINTER[cudaMipmappedArray_t], 0]
resource: Annotated[cudaGraphicsResource_t, 8]
cudaGraphicsResourceGetMappedMipmappedArray_v5000_params: TypeAlias = struct_cudaGraphicsResourceGetMappedMipmappedArray_v5000_params_st
@c.record
class struct_cudaGetChannelDesc_v3020_params_st(c.Struct):
SIZE = 16
desc: Annotated[c.POINTER[struct_cudaChannelFormatDesc], 0]
array: Annotated[cudaArray_const_t, 8]
cudaGetChannelDesc_v3020_params: TypeAlias = struct_cudaGetChannelDesc_v3020_params_st
@c.record
class struct_cudaCreateChannelDesc_v3020_params_st(c.Struct):
SIZE = 20
x: Annotated[Annotated[int, ctypes.c_int32], 0]
y: Annotated[Annotated[int, ctypes.c_int32], 4]
z: Annotated[Annotated[int, ctypes.c_int32], 8]
w: Annotated[Annotated[int, ctypes.c_int32], 12]
f: Annotated[enum_cudaChannelFormatKind, 16]
cudaCreateChannelDesc_v3020_params: TypeAlias = struct_cudaCreateChannelDesc_v3020_params_st
@c.record
class struct_cudaCreateTextureObject_v5000_params_st(c.Struct):
SIZE = 32
pTexObject: Annotated[c.POINTER[cudaTextureObject_t], 0]
pResDesc: Annotated[c.POINTER[struct_cudaResourceDesc], 8]
pTexDesc: Annotated[c.POINTER[struct_cudaTextureDesc], 16]
pResViewDesc: Annotated[c.POINTER[struct_cudaResourceViewDesc], 24]
cudaTextureObject_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@c.record
class struct_cudaResourceDesc(c.Struct):
SIZE = 64
resType: Annotated[enum_cudaResourceType, 0]
res: Annotated[struct_cudaResourceDesc_res, 8]
class enum_cudaResourceType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaResourceTypeArray = enum_cudaResourceType.define('cudaResourceTypeArray', 0)
cudaResourceTypeMipmappedArray = enum_cudaResourceType.define('cudaResourceTypeMipmappedArray', 1)
cudaResourceTypeLinear = enum_cudaResourceType.define('cudaResourceTypeLinear', 2)
cudaResourceTypePitch2D = enum_cudaResourceType.define('cudaResourceTypePitch2D', 3)
@c.record
class struct_cudaResourceDesc_res(c.Struct):
SIZE = 56
array: Annotated[struct_cudaResourceDesc_res_array, 0]
mipmap: Annotated[struct_cudaResourceDesc_res_mipmap, 0]
linear: Annotated[struct_cudaResourceDesc_res_linear, 0]
pitch2D: Annotated[struct_cudaResourceDesc_res_pitch2D, 0]
@c.record
class struct_cudaResourceDesc_res_array(c.Struct):
SIZE = 8
array: Annotated[cudaArray_t, 0]
@c.record
class struct_cudaResourceDesc_res_mipmap(c.Struct):
SIZE = 8
mipmap: Annotated[cudaMipmappedArray_t, 0]
@c.record
class struct_cudaResourceDesc_res_linear(c.Struct):
SIZE = 40
devPtr: Annotated[ctypes.c_void_p, 0]
desc: Annotated[struct_cudaChannelFormatDesc, 8]
sizeInBytes: Annotated[size_t, 32]
@c.record
class struct_cudaResourceDesc_res_pitch2D(c.Struct):
SIZE = 56
devPtr: Annotated[ctypes.c_void_p, 0]
desc: Annotated[struct_cudaChannelFormatDesc, 8]
width: Annotated[size_t, 32]
height: Annotated[size_t, 40]
pitchInBytes: Annotated[size_t, 48]
@c.record
class struct_cudaTextureDesc(c.Struct):
SIZE = 72
addressMode: Annotated[c.Array[enum_cudaTextureAddressMode, Literal[3]], 0]
filterMode: Annotated[enum_cudaTextureFilterMode, 12]
readMode: Annotated[enum_cudaTextureReadMode, 16]
sRGB: Annotated[Annotated[int, ctypes.c_int32], 20]
borderColor: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[4]], 24]
normalizedCoords: Annotated[Annotated[int, ctypes.c_int32], 40]
maxAnisotropy: Annotated[Annotated[int, ctypes.c_uint32], 44]
mipmapFilterMode: Annotated[enum_cudaTextureFilterMode, 48]
mipmapLevelBias: Annotated[Annotated[float, ctypes.c_float], 52]
minMipmapLevelClamp: Annotated[Annotated[float, ctypes.c_float], 56]
maxMipmapLevelClamp: Annotated[Annotated[float, ctypes.c_float], 60]
disableTrilinearOptimization: Annotated[Annotated[int, ctypes.c_int32], 64]
seamlessCubemap: Annotated[Annotated[int, ctypes.c_int32], 68]
class enum_cudaTextureAddressMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaAddressModeWrap = enum_cudaTextureAddressMode.define('cudaAddressModeWrap', 0)
cudaAddressModeClamp = enum_cudaTextureAddressMode.define('cudaAddressModeClamp', 1)
cudaAddressModeMirror = enum_cudaTextureAddressMode.define('cudaAddressModeMirror', 2)
cudaAddressModeBorder = enum_cudaTextureAddressMode.define('cudaAddressModeBorder', 3)
class enum_cudaTextureFilterMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaFilterModePoint = enum_cudaTextureFilterMode.define('cudaFilterModePoint', 0)
cudaFilterModeLinear = enum_cudaTextureFilterMode.define('cudaFilterModeLinear', 1)
class enum_cudaTextureReadMode(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaReadModeElementType = enum_cudaTextureReadMode.define('cudaReadModeElementType', 0)
cudaReadModeNormalizedFloat = enum_cudaTextureReadMode.define('cudaReadModeNormalizedFloat', 1)
@c.record
class struct_cudaResourceViewDesc(c.Struct):
SIZE = 48
format: Annotated[enum_cudaResourceViewFormat, 0]
width: Annotated[size_t, 8]
height: Annotated[size_t, 16]
depth: Annotated[size_t, 24]
firstMipmapLevel: Annotated[Annotated[int, ctypes.c_uint32], 32]
lastMipmapLevel: Annotated[Annotated[int, ctypes.c_uint32], 36]
firstLayer: Annotated[Annotated[int, ctypes.c_uint32], 40]
lastLayer: Annotated[Annotated[int, ctypes.c_uint32], 44]
class enum_cudaResourceViewFormat(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaResViewFormatNone = enum_cudaResourceViewFormat.define('cudaResViewFormatNone', 0)
cudaResViewFormatUnsignedChar1 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedChar1', 1)
cudaResViewFormatUnsignedChar2 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedChar2', 2)
cudaResViewFormatUnsignedChar4 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedChar4', 3)
cudaResViewFormatSignedChar1 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedChar1', 4)
cudaResViewFormatSignedChar2 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedChar2', 5)
cudaResViewFormatSignedChar4 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedChar4', 6)
cudaResViewFormatUnsignedShort1 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedShort1', 7)
cudaResViewFormatUnsignedShort2 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedShort2', 8)
cudaResViewFormatUnsignedShort4 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedShort4', 9)
cudaResViewFormatSignedShort1 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedShort1', 10)
cudaResViewFormatSignedShort2 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedShort2', 11)
cudaResViewFormatSignedShort4 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedShort4', 12)
cudaResViewFormatUnsignedInt1 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedInt1', 13)
cudaResViewFormatUnsignedInt2 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedInt2', 14)
cudaResViewFormatUnsignedInt4 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedInt4', 15)
cudaResViewFormatSignedInt1 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedInt1', 16)
cudaResViewFormatSignedInt2 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedInt2', 17)
cudaResViewFormatSignedInt4 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedInt4', 18)
cudaResViewFormatHalf1 = enum_cudaResourceViewFormat.define('cudaResViewFormatHalf1', 19)
cudaResViewFormatHalf2 = enum_cudaResourceViewFormat.define('cudaResViewFormatHalf2', 20)
cudaResViewFormatHalf4 = enum_cudaResourceViewFormat.define('cudaResViewFormatHalf4', 21)
cudaResViewFormatFloat1 = enum_cudaResourceViewFormat.define('cudaResViewFormatFloat1', 22)
cudaResViewFormatFloat2 = enum_cudaResourceViewFormat.define('cudaResViewFormatFloat2', 23)
cudaResViewFormatFloat4 = enum_cudaResourceViewFormat.define('cudaResViewFormatFloat4', 24)
cudaResViewFormatUnsignedBlockCompressed1 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedBlockCompressed1', 25)
cudaResViewFormatUnsignedBlockCompressed2 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedBlockCompressed2', 26)
cudaResViewFormatUnsignedBlockCompressed3 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedBlockCompressed3', 27)
cudaResViewFormatUnsignedBlockCompressed4 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedBlockCompressed4', 28)
cudaResViewFormatSignedBlockCompressed4 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedBlockCompressed4', 29)
cudaResViewFormatUnsignedBlockCompressed5 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedBlockCompressed5', 30)
cudaResViewFormatSignedBlockCompressed5 = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedBlockCompressed5', 31)
cudaResViewFormatUnsignedBlockCompressed6H = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedBlockCompressed6H', 32)
cudaResViewFormatSignedBlockCompressed6H = enum_cudaResourceViewFormat.define('cudaResViewFormatSignedBlockCompressed6H', 33)
cudaResViewFormatUnsignedBlockCompressed7 = enum_cudaResourceViewFormat.define('cudaResViewFormatUnsignedBlockCompressed7', 34)
cudaCreateTextureObject_v5000_params: TypeAlias = struct_cudaCreateTextureObject_v5000_params_st
@c.record
class struct_cudaDestroyTextureObject_v5000_params_st(c.Struct):
SIZE = 8
texObject: Annotated[cudaTextureObject_t, 0]
cudaDestroyTextureObject_v5000_params: TypeAlias = struct_cudaDestroyTextureObject_v5000_params_st
@c.record
class struct_cudaGetTextureObjectResourceDesc_v5000_params_st(c.Struct):
SIZE = 16
pResDesc: Annotated[c.POINTER[struct_cudaResourceDesc], 0]
texObject: Annotated[cudaTextureObject_t, 8]
cudaGetTextureObjectResourceDesc_v5000_params: TypeAlias = struct_cudaGetTextureObjectResourceDesc_v5000_params_st
@c.record
class struct_cudaGetTextureObjectTextureDesc_v5000_params_st(c.Struct):
SIZE = 16
pTexDesc: Annotated[c.POINTER[struct_cudaTextureDesc], 0]
texObject: Annotated[cudaTextureObject_t, 8]
cudaGetTextureObjectTextureDesc_v5000_params: TypeAlias = struct_cudaGetTextureObjectTextureDesc_v5000_params_st
@c.record
class struct_cudaGetTextureObjectResourceViewDesc_v5000_params_st(c.Struct):
SIZE = 16
pResViewDesc: Annotated[c.POINTER[struct_cudaResourceViewDesc], 0]
texObject: Annotated[cudaTextureObject_t, 8]
cudaGetTextureObjectResourceViewDesc_v5000_params: TypeAlias = struct_cudaGetTextureObjectResourceViewDesc_v5000_params_st
@c.record
class struct_cudaCreateSurfaceObject_v5000_params_st(c.Struct):
SIZE = 16
pSurfObject: Annotated[c.POINTER[cudaSurfaceObject_t], 0]
pResDesc: Annotated[c.POINTER[struct_cudaResourceDesc], 8]
cudaSurfaceObject_t: TypeAlias = Annotated[int, ctypes.c_uint64]
cudaCreateSurfaceObject_v5000_params: TypeAlias = struct_cudaCreateSurfaceObject_v5000_params_st
@c.record
class struct_cudaDestroySurfaceObject_v5000_params_st(c.Struct):
SIZE = 8
surfObject: Annotated[cudaSurfaceObject_t, 0]
cudaDestroySurfaceObject_v5000_params: TypeAlias = struct_cudaDestroySurfaceObject_v5000_params_st
@c.record
class struct_cudaGetSurfaceObjectResourceDesc_v5000_params_st(c.Struct):
SIZE = 16
pResDesc: Annotated[c.POINTER[struct_cudaResourceDesc], 0]
surfObject: Annotated[cudaSurfaceObject_t, 8]
cudaGetSurfaceObjectResourceDesc_v5000_params: TypeAlias = struct_cudaGetSurfaceObjectResourceDesc_v5000_params_st
@c.record
class struct_cudaDriverGetVersion_v3020_params_st(c.Struct):
SIZE = 8
driverVersion: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
cudaDriverGetVersion_v3020_params: TypeAlias = struct_cudaDriverGetVersion_v3020_params_st
@c.record
class struct_cudaRuntimeGetVersion_v3020_params_st(c.Struct):
SIZE = 8
runtimeVersion: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 0]
cudaRuntimeGetVersion_v3020_params: TypeAlias = struct_cudaRuntimeGetVersion_v3020_params_st
@c.record
class struct_cudaGraphCreate_v10000_params_st(c.Struct):
SIZE = 16
pGraph: Annotated[c.POINTER[cudaGraph_t], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
cudaGraphCreate_v10000_params: TypeAlias = struct_cudaGraphCreate_v10000_params_st
@c.record
class struct_cudaGraphAddKernelNode_v10000_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
pNodeParams: Annotated[c.POINTER[struct_cudaKernelNodeParams], 32]
@c.record
class struct_cudaKernelNodeParams(c.Struct):
SIZE = 56
func: Annotated[ctypes.c_void_p, 0]
gridDim: Annotated[dim3, 8]
blockDim: Annotated[dim3, 20]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 40]
extra: Annotated[c.POINTER[ctypes.c_void_p], 48]
cudaGraphAddKernelNode_v10000_params: TypeAlias = struct_cudaGraphAddKernelNode_v10000_params_st
@c.record
class struct_cudaGraphKernelNodeGetParams_v10000_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
pNodeParams: Annotated[c.POINTER[struct_cudaKernelNodeParams], 8]
cudaGraphKernelNodeGetParams_v10000_params: TypeAlias = struct_cudaGraphKernelNodeGetParams_v10000_params_st
@c.record
class struct_cudaGraphKernelNodeSetParams_v10000_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
pNodeParams: Annotated[c.POINTER[struct_cudaKernelNodeParams], 8]
cudaGraphKernelNodeSetParams_v10000_params: TypeAlias = struct_cudaGraphKernelNodeSetParams_v10000_params_st
@c.record
class struct_cudaGraphKernelNodeCopyAttributes_v11000_params_st(c.Struct):
SIZE = 16
hSrc: Annotated[cudaGraphNode_t, 0]
hDst: Annotated[cudaGraphNode_t, 8]
cudaGraphKernelNodeCopyAttributes_v11000_params: TypeAlias = struct_cudaGraphKernelNodeCopyAttributes_v11000_params_st
@c.record
class struct_cudaGraphKernelNodeGetAttribute_v11000_params_st(c.Struct):
SIZE = 24
hNode: Annotated[cudaGraphNode_t, 0]
attr: Annotated[cudaLaunchAttributeID, 8]
value_out: Annotated[c.POINTER[cudaLaunchAttributeValue], 16]
cudaGraphKernelNodeGetAttribute_v11000_params: TypeAlias = struct_cudaGraphKernelNodeGetAttribute_v11000_params_st
@c.record
class struct_cudaGraphKernelNodeSetAttribute_v11000_params_st(c.Struct):
SIZE = 24
hNode: Annotated[cudaGraphNode_t, 0]
attr: Annotated[cudaLaunchAttributeID, 8]
value: Annotated[c.POINTER[cudaLaunchAttributeValue], 16]
cudaGraphKernelNodeSetAttribute_v11000_params: TypeAlias = struct_cudaGraphKernelNodeSetAttribute_v11000_params_st
@c.record
class struct_cudaGraphAddMemcpyNode_v10000_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
pCopyParams: Annotated[c.POINTER[struct_cudaMemcpy3DParms], 32]
cudaGraphAddMemcpyNode_v10000_params: TypeAlias = struct_cudaGraphAddMemcpyNode_v10000_params_st
@c.record
class struct_cudaGraphAddMemcpyNodeToSymbol_v11010_params_st(c.Struct):
SIZE = 72
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
symbol: Annotated[ctypes.c_void_p, 32]
src: Annotated[ctypes.c_void_p, 40]
count: Annotated[size_t, 48]
offset: Annotated[size_t, 56]
kind: Annotated[enum_cudaMemcpyKind, 64]
cudaGraphAddMemcpyNodeToSymbol_v11010_params: TypeAlias = struct_cudaGraphAddMemcpyNodeToSymbol_v11010_params_st
@c.record
class struct_cudaGraphAddMemcpyNodeFromSymbol_v11010_params_st(c.Struct):
SIZE = 72
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
dst: Annotated[ctypes.c_void_p, 32]
symbol: Annotated[ctypes.c_void_p, 40]
count: Annotated[size_t, 48]
offset: Annotated[size_t, 56]
kind: Annotated[enum_cudaMemcpyKind, 64]
cudaGraphAddMemcpyNodeFromSymbol_v11010_params: TypeAlias = struct_cudaGraphAddMemcpyNodeFromSymbol_v11010_params_st
@c.record
class struct_cudaGraphAddMemcpyNode1D_v11010_params_st(c.Struct):
SIZE = 64
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
dst: Annotated[ctypes.c_void_p, 32]
src: Annotated[ctypes.c_void_p, 40]
count: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
cudaGraphAddMemcpyNode1D_v11010_params: TypeAlias = struct_cudaGraphAddMemcpyNode1D_v11010_params_st
@c.record
class struct_cudaGraphMemcpyNodeGetParams_v10000_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
pNodeParams: Annotated[c.POINTER[struct_cudaMemcpy3DParms], 8]
cudaGraphMemcpyNodeGetParams_v10000_params: TypeAlias = struct_cudaGraphMemcpyNodeGetParams_v10000_params_st
@c.record
class struct_cudaGraphMemcpyNodeSetParams_v10000_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
pNodeParams: Annotated[c.POINTER[struct_cudaMemcpy3DParms], 8]
cudaGraphMemcpyNodeSetParams_v10000_params: TypeAlias = struct_cudaGraphMemcpyNodeSetParams_v10000_params_st
@c.record
class struct_cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params_st(c.Struct):
SIZE = 48
node: Annotated[cudaGraphNode_t, 0]
symbol: Annotated[ctypes.c_void_p, 8]
src: Annotated[ctypes.c_void_p, 16]
count: Annotated[size_t, 24]
offset: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params: TypeAlias = struct_cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params_st
@c.record
class struct_cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params_st(c.Struct):
SIZE = 48
node: Annotated[cudaGraphNode_t, 0]
dst: Annotated[ctypes.c_void_p, 8]
symbol: Annotated[ctypes.c_void_p, 16]
count: Annotated[size_t, 24]
offset: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params: TypeAlias = struct_cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params_st
@c.record
class struct_cudaGraphMemcpyNodeSetParams1D_v11010_params_st(c.Struct):
SIZE = 40
node: Annotated[cudaGraphNode_t, 0]
dst: Annotated[ctypes.c_void_p, 8]
src: Annotated[ctypes.c_void_p, 16]
count: Annotated[size_t, 24]
kind: Annotated[enum_cudaMemcpyKind, 32]
cudaGraphMemcpyNodeSetParams1D_v11010_params: TypeAlias = struct_cudaGraphMemcpyNodeSetParams1D_v11010_params_st
@c.record
class struct_cudaGraphAddMemsetNode_v10000_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
pMemsetParams: Annotated[c.POINTER[struct_cudaMemsetParams], 32]
@c.record
class struct_cudaMemsetParams(c.Struct):
SIZE = 40
dst: Annotated[ctypes.c_void_p, 0]
pitch: Annotated[size_t, 8]
value: Annotated[Annotated[int, ctypes.c_uint32], 16]
elementSize: Annotated[Annotated[int, ctypes.c_uint32], 20]
width: Annotated[size_t, 24]
height: Annotated[size_t, 32]
cudaGraphAddMemsetNode_v10000_params: TypeAlias = struct_cudaGraphAddMemsetNode_v10000_params_st
@c.record
class struct_cudaGraphMemsetNodeGetParams_v10000_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
pNodeParams: Annotated[c.POINTER[struct_cudaMemsetParams], 8]
cudaGraphMemsetNodeGetParams_v10000_params: TypeAlias = struct_cudaGraphMemsetNodeGetParams_v10000_params_st
@c.record
class struct_cudaGraphMemsetNodeSetParams_v10000_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
pNodeParams: Annotated[c.POINTER[struct_cudaMemsetParams], 8]
cudaGraphMemsetNodeSetParams_v10000_params: TypeAlias = struct_cudaGraphMemsetNodeSetParams_v10000_params_st
@c.record
class struct_cudaGraphAddHostNode_v10000_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
pNodeParams: Annotated[c.POINTER[struct_cudaHostNodeParams], 32]
@c.record
class struct_cudaHostNodeParams(c.Struct):
SIZE = 16
fn: Annotated[cudaHostFn_t, 0]
userData: Annotated[ctypes.c_void_p, 8]
cudaGraphAddHostNode_v10000_params: TypeAlias = struct_cudaGraphAddHostNode_v10000_params_st
@c.record
class struct_cudaGraphHostNodeGetParams_v10000_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
pNodeParams: Annotated[c.POINTER[struct_cudaHostNodeParams], 8]
cudaGraphHostNodeGetParams_v10000_params: TypeAlias = struct_cudaGraphHostNodeGetParams_v10000_params_st
@c.record
class struct_cudaGraphHostNodeSetParams_v10000_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
pNodeParams: Annotated[c.POINTER[struct_cudaHostNodeParams], 8]
cudaGraphHostNodeSetParams_v10000_params: TypeAlias = struct_cudaGraphHostNodeSetParams_v10000_params_st
@c.record
class struct_cudaGraphAddChildGraphNode_v10000_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
childGraph: Annotated[cudaGraph_t, 32]
cudaGraphAddChildGraphNode_v10000_params: TypeAlias = struct_cudaGraphAddChildGraphNode_v10000_params_st
@c.record
class struct_cudaGraphChildGraphNodeGetGraph_v10000_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
pGraph: Annotated[c.POINTER[cudaGraph_t], 8]
cudaGraphChildGraphNodeGetGraph_v10000_params: TypeAlias = struct_cudaGraphChildGraphNodeGetGraph_v10000_params_st
@c.record
class struct_cudaGraphAddEmptyNode_v10000_params_st(c.Struct):
SIZE = 32
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
cudaGraphAddEmptyNode_v10000_params: TypeAlias = struct_cudaGraphAddEmptyNode_v10000_params_st
@c.record
class struct_cudaGraphAddEventRecordNode_v11010_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
event: Annotated[cudaEvent_t, 32]
cudaGraphAddEventRecordNode_v11010_params: TypeAlias = struct_cudaGraphAddEventRecordNode_v11010_params_st
@c.record
class struct_cudaGraphEventRecordNodeGetEvent_v11010_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
event_out: Annotated[c.POINTER[cudaEvent_t], 8]
cudaGraphEventRecordNodeGetEvent_v11010_params: TypeAlias = struct_cudaGraphEventRecordNodeGetEvent_v11010_params_st
@c.record
class struct_cudaGraphEventRecordNodeSetEvent_v11010_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
event: Annotated[cudaEvent_t, 8]
cudaGraphEventRecordNodeSetEvent_v11010_params: TypeAlias = struct_cudaGraphEventRecordNodeSetEvent_v11010_params_st
@c.record
class struct_cudaGraphAddEventWaitNode_v11010_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
event: Annotated[cudaEvent_t, 32]
cudaGraphAddEventWaitNode_v11010_params: TypeAlias = struct_cudaGraphAddEventWaitNode_v11010_params_st
@c.record
class struct_cudaGraphEventWaitNodeGetEvent_v11010_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
event_out: Annotated[c.POINTER[cudaEvent_t], 8]
cudaGraphEventWaitNodeGetEvent_v11010_params: TypeAlias = struct_cudaGraphEventWaitNodeGetEvent_v11010_params_st
@c.record
class struct_cudaGraphEventWaitNodeSetEvent_v11010_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
event: Annotated[cudaEvent_t, 8]
cudaGraphEventWaitNodeSetEvent_v11010_params: TypeAlias = struct_cudaGraphEventWaitNodeSetEvent_v11010_params_st
@c.record
class struct_cudaGraphAddExternalSemaphoresSignalNode_v11020_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[struct_cudaExternalSemaphoreSignalNodeParams], 32]
@c.record
class struct_cudaExternalSemaphoreSignalNodeParams(c.Struct):
SIZE = 24
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreSignalParams], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaGraphAddExternalSemaphoresSignalNode_v11020_params: TypeAlias = struct_cudaGraphAddExternalSemaphoresSignalNode_v11020_params_st
@c.record
class struct_cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params_st(c.Struct):
SIZE = 16
hNode: Annotated[cudaGraphNode_t, 0]
params_out: Annotated[c.POINTER[struct_cudaExternalSemaphoreSignalNodeParams], 8]
cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params: TypeAlias = struct_cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params_st
@c.record
class struct_cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params_st(c.Struct):
SIZE = 16
hNode: Annotated[cudaGraphNode_t, 0]
nodeParams: Annotated[c.POINTER[struct_cudaExternalSemaphoreSignalNodeParams], 8]
cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params: TypeAlias = struct_cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params_st
@c.record
class struct_cudaGraphAddExternalSemaphoresWaitNode_v11020_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[struct_cudaExternalSemaphoreWaitNodeParams], 32]
@c.record
class struct_cudaExternalSemaphoreWaitNodeParams(c.Struct):
SIZE = 24
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreWaitParams], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaGraphAddExternalSemaphoresWaitNode_v11020_params: TypeAlias = struct_cudaGraphAddExternalSemaphoresWaitNode_v11020_params_st
@c.record
class struct_cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params_st(c.Struct):
SIZE = 16
hNode: Annotated[cudaGraphNode_t, 0]
params_out: Annotated[c.POINTER[struct_cudaExternalSemaphoreWaitNodeParams], 8]
cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params: TypeAlias = struct_cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params_st
@c.record
class struct_cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params_st(c.Struct):
SIZE = 16
hNode: Annotated[cudaGraphNode_t, 0]
nodeParams: Annotated[c.POINTER[struct_cudaExternalSemaphoreWaitNodeParams], 8]
cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params: TypeAlias = struct_cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params_st
@c.record
class struct_cudaGraphAddMemAllocNode_v11040_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[struct_cudaMemAllocNodeParams], 32]
@c.record
class struct_cudaMemAllocNodeParams(c.Struct):
SIZE = 120
poolProps: Annotated[struct_cudaMemPoolProps, 0]
accessDescs: Annotated[c.POINTER[struct_cudaMemAccessDesc], 88]
accessDescCount: Annotated[size_t, 96]
bytesize: Annotated[size_t, 104]
dptr: Annotated[ctypes.c_void_p, 112]
cudaGraphAddMemAllocNode_v11040_params: TypeAlias = struct_cudaGraphAddMemAllocNode_v11040_params_st
@c.record
class struct_cudaGraphMemAllocNodeGetParams_v11040_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
params_out: Annotated[c.POINTER[struct_cudaMemAllocNodeParams], 8]
cudaGraphMemAllocNodeGetParams_v11040_params: TypeAlias = struct_cudaGraphMemAllocNodeGetParams_v11040_params_st
@c.record
class struct_cudaGraphAddMemFreeNode_v11040_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
dptr: Annotated[ctypes.c_void_p, 32]
cudaGraphAddMemFreeNode_v11040_params: TypeAlias = struct_cudaGraphAddMemFreeNode_v11040_params_st
@c.record
class struct_cudaGraphMemFreeNodeGetParams_v11040_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
dptr_out: Annotated[ctypes.c_void_p, 8]
cudaGraphMemFreeNodeGetParams_v11040_params: TypeAlias = struct_cudaGraphMemFreeNodeGetParams_v11040_params_st
@c.record
class struct_cudaDeviceGraphMemTrim_v11040_params_st(c.Struct):
SIZE = 4
device: Annotated[Annotated[int, ctypes.c_int32], 0]
cudaDeviceGraphMemTrim_v11040_params: TypeAlias = struct_cudaDeviceGraphMemTrim_v11040_params_st
@c.record
class struct_cudaDeviceGetGraphMemAttribute_v11040_params_st(c.Struct):
SIZE = 16
device: Annotated[Annotated[int, ctypes.c_int32], 0]
attr: Annotated[enum_cudaGraphMemAttributeType, 4]
value: Annotated[ctypes.c_void_p, 8]
class enum_cudaGraphMemAttributeType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaGraphMemAttrUsedMemCurrent = enum_cudaGraphMemAttributeType.define('cudaGraphMemAttrUsedMemCurrent', 0)
cudaGraphMemAttrUsedMemHigh = enum_cudaGraphMemAttributeType.define('cudaGraphMemAttrUsedMemHigh', 1)
cudaGraphMemAttrReservedMemCurrent = enum_cudaGraphMemAttributeType.define('cudaGraphMemAttrReservedMemCurrent', 2)
cudaGraphMemAttrReservedMemHigh = enum_cudaGraphMemAttributeType.define('cudaGraphMemAttrReservedMemHigh', 3)
cudaDeviceGetGraphMemAttribute_v11040_params: TypeAlias = struct_cudaDeviceGetGraphMemAttribute_v11040_params_st
@c.record
class struct_cudaDeviceSetGraphMemAttribute_v11040_params_st(c.Struct):
SIZE = 16
device: Annotated[Annotated[int, ctypes.c_int32], 0]
attr: Annotated[enum_cudaGraphMemAttributeType, 4]
value: Annotated[ctypes.c_void_p, 8]
cudaDeviceSetGraphMemAttribute_v11040_params: TypeAlias = struct_cudaDeviceSetGraphMemAttribute_v11040_params_st
@c.record
class struct_cudaGraphClone_v10000_params_st(c.Struct):
SIZE = 16
pGraphClone: Annotated[c.POINTER[cudaGraph_t], 0]
originalGraph: Annotated[cudaGraph_t, 8]
cudaGraphClone_v10000_params: TypeAlias = struct_cudaGraphClone_v10000_params_st
@c.record
class struct_cudaGraphNodeFindInClone_v10000_params_st(c.Struct):
SIZE = 24
pNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
originalNode: Annotated[cudaGraphNode_t, 8]
clonedGraph: Annotated[cudaGraph_t, 16]
cudaGraphNodeFindInClone_v10000_params: TypeAlias = struct_cudaGraphNodeFindInClone_v10000_params_st
@c.record
class struct_cudaGraphNodeGetType_v10000_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
pType: Annotated[c.POINTER[enum_cudaGraphNodeType], 8]
class enum_cudaGraphNodeType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaGraphNodeTypeKernel = enum_cudaGraphNodeType.define('cudaGraphNodeTypeKernel', 0)
cudaGraphNodeTypeMemcpy = enum_cudaGraphNodeType.define('cudaGraphNodeTypeMemcpy', 1)
cudaGraphNodeTypeMemset = enum_cudaGraphNodeType.define('cudaGraphNodeTypeMemset', 2)
cudaGraphNodeTypeHost = enum_cudaGraphNodeType.define('cudaGraphNodeTypeHost', 3)
cudaGraphNodeTypeGraph = enum_cudaGraphNodeType.define('cudaGraphNodeTypeGraph', 4)
cudaGraphNodeTypeEmpty = enum_cudaGraphNodeType.define('cudaGraphNodeTypeEmpty', 5)
cudaGraphNodeTypeWaitEvent = enum_cudaGraphNodeType.define('cudaGraphNodeTypeWaitEvent', 6)
cudaGraphNodeTypeEventRecord = enum_cudaGraphNodeType.define('cudaGraphNodeTypeEventRecord', 7)
cudaGraphNodeTypeExtSemaphoreSignal = enum_cudaGraphNodeType.define('cudaGraphNodeTypeExtSemaphoreSignal', 8)
cudaGraphNodeTypeExtSemaphoreWait = enum_cudaGraphNodeType.define('cudaGraphNodeTypeExtSemaphoreWait', 9)
cudaGraphNodeTypeMemAlloc = enum_cudaGraphNodeType.define('cudaGraphNodeTypeMemAlloc', 10)
cudaGraphNodeTypeMemFree = enum_cudaGraphNodeType.define('cudaGraphNodeTypeMemFree', 11)
cudaGraphNodeTypeConditional = enum_cudaGraphNodeType.define('cudaGraphNodeTypeConditional', 13)
cudaGraphNodeTypeCount = enum_cudaGraphNodeType.define('cudaGraphNodeTypeCount', 14)
cudaGraphNodeGetType_v10000_params: TypeAlias = struct_cudaGraphNodeGetType_v10000_params_st
@c.record
class struct_cudaGraphGetNodes_v10000_params_st(c.Struct):
SIZE = 24
graph: Annotated[cudaGraph_t, 0]
nodes: Annotated[c.POINTER[cudaGraphNode_t], 8]
numNodes: Annotated[c.POINTER[size_t], 16]
cudaGraphGetNodes_v10000_params: TypeAlias = struct_cudaGraphGetNodes_v10000_params_st
@c.record
class struct_cudaGraphGetRootNodes_v10000_params_st(c.Struct):
SIZE = 24
graph: Annotated[cudaGraph_t, 0]
pRootNodes: Annotated[c.POINTER[cudaGraphNode_t], 8]
pNumRootNodes: Annotated[c.POINTER[size_t], 16]
cudaGraphGetRootNodes_v10000_params: TypeAlias = struct_cudaGraphGetRootNodes_v10000_params_st
@c.record
class struct_cudaGraphGetEdges_v10000_params_st(c.Struct):
SIZE = 32
graph: Annotated[cudaGraph_t, 0]
_from: Annotated[c.POINTER[cudaGraphNode_t], 8]
to: Annotated[c.POINTER[cudaGraphNode_t], 16]
numEdges: Annotated[c.POINTER[size_t], 24]
cudaGraphGetEdges_v10000_params: TypeAlias = struct_cudaGraphGetEdges_v10000_params_st
@c.record
class struct_cudaGraphGetEdges_v2_v12030_params_st(c.Struct):
SIZE = 40
graph: Annotated[cudaGraph_t, 0]
_from: Annotated[c.POINTER[cudaGraphNode_t], 8]
to: Annotated[c.POINTER[cudaGraphNode_t], 16]
edgeData: Annotated[c.POINTER[cudaGraphEdgeData], 24]
numEdges: Annotated[c.POINTER[size_t], 32]
cudaGraphGetEdges_v2_v12030_params: TypeAlias = struct_cudaGraphGetEdges_v2_v12030_params_st
@c.record
class struct_cudaGraphNodeGetDependencies_v10000_params_st(c.Struct):
SIZE = 24
node: Annotated[cudaGraphNode_t, 0]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 8]
pNumDependencies: Annotated[c.POINTER[size_t], 16]
cudaGraphNodeGetDependencies_v10000_params: TypeAlias = struct_cudaGraphNodeGetDependencies_v10000_params_st
@c.record
class struct_cudaGraphNodeGetDependencies_v2_v12030_params_st(c.Struct):
SIZE = 32
node: Annotated[cudaGraphNode_t, 0]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 8]
edgeData: Annotated[c.POINTER[cudaGraphEdgeData], 16]
pNumDependencies: Annotated[c.POINTER[size_t], 24]
cudaGraphNodeGetDependencies_v2_v12030_params: TypeAlias = struct_cudaGraphNodeGetDependencies_v2_v12030_params_st
@c.record
class struct_cudaGraphNodeGetDependentNodes_v10000_params_st(c.Struct):
SIZE = 24
node: Annotated[cudaGraphNode_t, 0]
pDependentNodes: Annotated[c.POINTER[cudaGraphNode_t], 8]
pNumDependentNodes: Annotated[c.POINTER[size_t], 16]
cudaGraphNodeGetDependentNodes_v10000_params: TypeAlias = struct_cudaGraphNodeGetDependentNodes_v10000_params_st
@c.record
class struct_cudaGraphNodeGetDependentNodes_v2_v12030_params_st(c.Struct):
SIZE = 32
node: Annotated[cudaGraphNode_t, 0]
pDependentNodes: Annotated[c.POINTER[cudaGraphNode_t], 8]
edgeData: Annotated[c.POINTER[cudaGraphEdgeData], 16]
pNumDependentNodes: Annotated[c.POINTER[size_t], 24]
cudaGraphNodeGetDependentNodes_v2_v12030_params: TypeAlias = struct_cudaGraphNodeGetDependentNodes_v2_v12030_params_st
@c.record
class struct_cudaGraphAddDependencies_v10000_params_st(c.Struct):
SIZE = 32
graph: Annotated[cudaGraph_t, 0]
_from: Annotated[c.POINTER[cudaGraphNode_t], 8]
to: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
cudaGraphAddDependencies_v10000_params: TypeAlias = struct_cudaGraphAddDependencies_v10000_params_st
@c.record
class struct_cudaGraphAddDependencies_v2_v12030_params_st(c.Struct):
SIZE = 40
graph: Annotated[cudaGraph_t, 0]
_from: Annotated[c.POINTER[cudaGraphNode_t], 8]
to: Annotated[c.POINTER[cudaGraphNode_t], 16]
edgeData: Annotated[c.POINTER[cudaGraphEdgeData], 24]
numDependencies: Annotated[size_t, 32]
cudaGraphAddDependencies_v2_v12030_params: TypeAlias = struct_cudaGraphAddDependencies_v2_v12030_params_st
@c.record
class struct_cudaGraphRemoveDependencies_v10000_params_st(c.Struct):
SIZE = 32
graph: Annotated[cudaGraph_t, 0]
_from: Annotated[c.POINTER[cudaGraphNode_t], 8]
to: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
cudaGraphRemoveDependencies_v10000_params: TypeAlias = struct_cudaGraphRemoveDependencies_v10000_params_st
@c.record
class struct_cudaGraphRemoveDependencies_v2_v12030_params_st(c.Struct):
SIZE = 40
graph: Annotated[cudaGraph_t, 0]
_from: Annotated[c.POINTER[cudaGraphNode_t], 8]
to: Annotated[c.POINTER[cudaGraphNode_t], 16]
edgeData: Annotated[c.POINTER[cudaGraphEdgeData], 24]
numDependencies: Annotated[size_t, 32]
cudaGraphRemoveDependencies_v2_v12030_params: TypeAlias = struct_cudaGraphRemoveDependencies_v2_v12030_params_st
@c.record
class struct_cudaGraphDestroyNode_v10000_params_st(c.Struct):
SIZE = 8
node: Annotated[cudaGraphNode_t, 0]
cudaGraphDestroyNode_v10000_params: TypeAlias = struct_cudaGraphDestroyNode_v10000_params_st
@c.record
class struct_cudaGraphInstantiate_v12000_params_st(c.Struct):
SIZE = 24
pGraphExec: Annotated[c.POINTER[cudaGraphExec_t], 0]
graph: Annotated[cudaGraph_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint64], 16]
cudaGraphExec_t: TypeAlias = c.POINTER[struct_CUgraphExec_st]
cudaGraphInstantiate_v12000_params: TypeAlias = struct_cudaGraphInstantiate_v12000_params_st
@c.record
class struct_cudaGraphInstantiateWithFlags_v11040_params_st(c.Struct):
SIZE = 24
pGraphExec: Annotated[c.POINTER[cudaGraphExec_t], 0]
graph: Annotated[cudaGraph_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint64], 16]
cudaGraphInstantiateWithFlags_v11040_params: TypeAlias = struct_cudaGraphInstantiateWithFlags_v11040_params_st
@c.record
class struct_cudaGraphInstantiateWithParams_ptsz_v12000_params_st(c.Struct):
SIZE = 24
pGraphExec: Annotated[c.POINTER[cudaGraphExec_t], 0]
graph: Annotated[cudaGraph_t, 8]
instantiateParams: Annotated[c.POINTER[cudaGraphInstantiateParams], 16]
@c.record
class struct_cudaGraphInstantiateParams_st(c.Struct):
SIZE = 32
flags: Annotated[Annotated[int, ctypes.c_uint64], 0]
uploadStream: Annotated[cudaStream_t, 8]
errNode_out: Annotated[cudaGraphNode_t, 16]
result_out: Annotated[cudaGraphInstantiateResult, 24]
cudaGraphInstantiateParams: TypeAlias = struct_cudaGraphInstantiateParams_st
class enum_cudaGraphInstantiateResult(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaGraphInstantiateSuccess = enum_cudaGraphInstantiateResult.define('cudaGraphInstantiateSuccess', 0)
cudaGraphInstantiateError = enum_cudaGraphInstantiateResult.define('cudaGraphInstantiateError', 1)
cudaGraphInstantiateInvalidStructure = enum_cudaGraphInstantiateResult.define('cudaGraphInstantiateInvalidStructure', 2)
cudaGraphInstantiateNodeOperationNotSupported = enum_cudaGraphInstantiateResult.define('cudaGraphInstantiateNodeOperationNotSupported', 3)
cudaGraphInstantiateMultipleDevicesNotSupported = enum_cudaGraphInstantiateResult.define('cudaGraphInstantiateMultipleDevicesNotSupported', 4)
cudaGraphInstantiateConditionalHandleUnused = enum_cudaGraphInstantiateResult.define('cudaGraphInstantiateConditionalHandleUnused', 5)
cudaGraphInstantiateResult: TypeAlias = enum_cudaGraphInstantiateResult
cudaGraphInstantiateWithParams_ptsz_v12000_params: TypeAlias = struct_cudaGraphInstantiateWithParams_ptsz_v12000_params_st
@c.record
class struct_cudaGraphExecGetFlags_v12000_params_st(c.Struct):
SIZE = 16
graphExec: Annotated[cudaGraphExec_t, 0]
flags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 8]
cudaGraphExecGetFlags_v12000_params: TypeAlias = struct_cudaGraphExecGetFlags_v12000_params_st
@c.record
class struct_cudaGraphExecKernelNodeSetParams_v10010_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
node: Annotated[cudaGraphNode_t, 8]
pNodeParams: Annotated[c.POINTER[struct_cudaKernelNodeParams], 16]
cudaGraphExecKernelNodeSetParams_v10010_params: TypeAlias = struct_cudaGraphExecKernelNodeSetParams_v10010_params_st
@c.record
class struct_cudaGraphExecMemcpyNodeSetParams_v10020_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
node: Annotated[cudaGraphNode_t, 8]
pNodeParams: Annotated[c.POINTER[struct_cudaMemcpy3DParms], 16]
cudaGraphExecMemcpyNodeSetParams_v10020_params: TypeAlias = struct_cudaGraphExecMemcpyNodeSetParams_v10020_params_st
@c.record
class struct_cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params_st(c.Struct):
SIZE = 56
hGraphExec: Annotated[cudaGraphExec_t, 0]
node: Annotated[cudaGraphNode_t, 8]
symbol: Annotated[ctypes.c_void_p, 16]
src: Annotated[ctypes.c_void_p, 24]
count: Annotated[size_t, 32]
offset: Annotated[size_t, 40]
kind: Annotated[enum_cudaMemcpyKind, 48]
cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params: TypeAlias = struct_cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params_st
@c.record
class struct_cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params_st(c.Struct):
SIZE = 56
hGraphExec: Annotated[cudaGraphExec_t, 0]
node: Annotated[cudaGraphNode_t, 8]
dst: Annotated[ctypes.c_void_p, 16]
symbol: Annotated[ctypes.c_void_p, 24]
count: Annotated[size_t, 32]
offset: Annotated[size_t, 40]
kind: Annotated[enum_cudaMemcpyKind, 48]
cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params: TypeAlias = struct_cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params_st
@c.record
class struct_cudaGraphExecMemcpyNodeSetParams1D_v11010_params_st(c.Struct):
SIZE = 48
hGraphExec: Annotated[cudaGraphExec_t, 0]
node: Annotated[cudaGraphNode_t, 8]
dst: Annotated[ctypes.c_void_p, 16]
src: Annotated[ctypes.c_void_p, 24]
count: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
cudaGraphExecMemcpyNodeSetParams1D_v11010_params: TypeAlias = struct_cudaGraphExecMemcpyNodeSetParams1D_v11010_params_st
@c.record
class struct_cudaGraphExecMemsetNodeSetParams_v10020_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
node: Annotated[cudaGraphNode_t, 8]
pNodeParams: Annotated[c.POINTER[struct_cudaMemsetParams], 16]
cudaGraphExecMemsetNodeSetParams_v10020_params: TypeAlias = struct_cudaGraphExecMemsetNodeSetParams_v10020_params_st
@c.record
class struct_cudaGraphExecHostNodeSetParams_v10020_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
node: Annotated[cudaGraphNode_t, 8]
pNodeParams: Annotated[c.POINTER[struct_cudaHostNodeParams], 16]
cudaGraphExecHostNodeSetParams_v10020_params: TypeAlias = struct_cudaGraphExecHostNodeSetParams_v10020_params_st
@c.record
class struct_cudaGraphExecChildGraphNodeSetParams_v11010_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
node: Annotated[cudaGraphNode_t, 8]
childGraph: Annotated[cudaGraph_t, 16]
cudaGraphExecChildGraphNodeSetParams_v11010_params: TypeAlias = struct_cudaGraphExecChildGraphNodeSetParams_v11010_params_st
@c.record
class struct_cudaGraphExecEventRecordNodeSetEvent_v11010_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
hNode: Annotated[cudaGraphNode_t, 8]
event: Annotated[cudaEvent_t, 16]
cudaGraphExecEventRecordNodeSetEvent_v11010_params: TypeAlias = struct_cudaGraphExecEventRecordNodeSetEvent_v11010_params_st
@c.record
class struct_cudaGraphExecEventWaitNodeSetEvent_v11010_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
hNode: Annotated[cudaGraphNode_t, 8]
event: Annotated[cudaEvent_t, 16]
cudaGraphExecEventWaitNodeSetEvent_v11010_params: TypeAlias = struct_cudaGraphExecEventWaitNodeSetEvent_v11010_params_st
@c.record
class struct_cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
hNode: Annotated[cudaGraphNode_t, 8]
nodeParams: Annotated[c.POINTER[struct_cudaExternalSemaphoreSignalNodeParams], 16]
cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params: TypeAlias = struct_cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params_st
@c.record
class struct_cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
hNode: Annotated[cudaGraphNode_t, 8]
nodeParams: Annotated[c.POINTER[struct_cudaExternalSemaphoreWaitNodeParams], 16]
cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params: TypeAlias = struct_cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params_st
@c.record
class struct_cudaGraphNodeSetEnabled_v11060_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
hNode: Annotated[cudaGraphNode_t, 8]
isEnabled: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaGraphNodeSetEnabled_v11060_params: TypeAlias = struct_cudaGraphNodeSetEnabled_v11060_params_st
@c.record
class struct_cudaGraphNodeGetEnabled_v11060_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
hNode: Annotated[cudaGraphNode_t, 8]
isEnabled: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 16]
cudaGraphNodeGetEnabled_v11060_params: TypeAlias = struct_cudaGraphNodeGetEnabled_v11060_params_st
@c.record
class struct_cudaGraphExecUpdate_v10020_params_st(c.Struct):
SIZE = 24
hGraphExec: Annotated[cudaGraphExec_t, 0]
hGraph: Annotated[cudaGraph_t, 8]
resultInfo: Annotated[c.POINTER[cudaGraphExecUpdateResultInfo], 16]
@c.record
class struct_cudaGraphExecUpdateResultInfo_st(c.Struct):
SIZE = 24
result: Annotated[enum_cudaGraphExecUpdateResult, 0]
errorNode: Annotated[cudaGraphNode_t, 8]
errorFromNode: Annotated[cudaGraphNode_t, 16]
cudaGraphExecUpdateResultInfo: TypeAlias = struct_cudaGraphExecUpdateResultInfo_st
class enum_cudaGraphExecUpdateResult(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaGraphExecUpdateSuccess = enum_cudaGraphExecUpdateResult.define('cudaGraphExecUpdateSuccess', 0)
cudaGraphExecUpdateError = enum_cudaGraphExecUpdateResult.define('cudaGraphExecUpdateError', 1)
cudaGraphExecUpdateErrorTopologyChanged = enum_cudaGraphExecUpdateResult.define('cudaGraphExecUpdateErrorTopologyChanged', 2)
cudaGraphExecUpdateErrorNodeTypeChanged = enum_cudaGraphExecUpdateResult.define('cudaGraphExecUpdateErrorNodeTypeChanged', 3)
cudaGraphExecUpdateErrorFunctionChanged = enum_cudaGraphExecUpdateResult.define('cudaGraphExecUpdateErrorFunctionChanged', 4)
cudaGraphExecUpdateErrorParametersChanged = enum_cudaGraphExecUpdateResult.define('cudaGraphExecUpdateErrorParametersChanged', 5)
cudaGraphExecUpdateErrorNotSupported = enum_cudaGraphExecUpdateResult.define('cudaGraphExecUpdateErrorNotSupported', 6)
cudaGraphExecUpdateErrorUnsupportedFunctionChange = enum_cudaGraphExecUpdateResult.define('cudaGraphExecUpdateErrorUnsupportedFunctionChange', 7)
cudaGraphExecUpdateErrorAttributesChanged = enum_cudaGraphExecUpdateResult.define('cudaGraphExecUpdateErrorAttributesChanged', 8)
cudaGraphExecUpdate_v10020_params: TypeAlias = struct_cudaGraphExecUpdate_v10020_params_st
@c.record
class struct_cudaGraphUpload_ptsz_v10000_params_st(c.Struct):
SIZE = 16
graphExec: Annotated[cudaGraphExec_t, 0]
stream: Annotated[cudaStream_t, 8]
cudaGraphUpload_ptsz_v10000_params: TypeAlias = struct_cudaGraphUpload_ptsz_v10000_params_st
@c.record
class struct_cudaGraphLaunch_ptsz_v10000_params_st(c.Struct):
SIZE = 16
graphExec: Annotated[cudaGraphExec_t, 0]
stream: Annotated[cudaStream_t, 8]
cudaGraphLaunch_ptsz_v10000_params: TypeAlias = struct_cudaGraphLaunch_ptsz_v10000_params_st
@c.record
class struct_cudaGraphExecDestroy_v10000_params_st(c.Struct):
SIZE = 8
graphExec: Annotated[cudaGraphExec_t, 0]
cudaGraphExecDestroy_v10000_params: TypeAlias = struct_cudaGraphExecDestroy_v10000_params_st
@c.record
class struct_cudaGraphDestroy_v10000_params_st(c.Struct):
SIZE = 8
graph: Annotated[cudaGraph_t, 0]
cudaGraphDestroy_v10000_params: TypeAlias = struct_cudaGraphDestroy_v10000_params_st
@c.record
class struct_cudaGraphDebugDotPrint_v11030_params_st(c.Struct):
SIZE = 24
graph: Annotated[cudaGraph_t, 0]
path: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaGraphDebugDotPrint_v11030_params: TypeAlias = struct_cudaGraphDebugDotPrint_v11030_params_st
@c.record
class struct_cudaUserObjectCreate_v11030_params_st(c.Struct):
SIZE = 32
object_out: Annotated[c.POINTER[cudaUserObject_t], 0]
ptr: Annotated[ctypes.c_void_p, 8]
destroy: Annotated[cudaHostFn_t, 16]
initialRefcount: Annotated[Annotated[int, ctypes.c_uint32], 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 28]
cudaUserObject_t: TypeAlias = c.POINTER[struct_CUuserObject_st]
cudaUserObjectCreate_v11030_params: TypeAlias = struct_cudaUserObjectCreate_v11030_params_st
@c.record
class struct_cudaUserObjectRetain_v11030_params_st(c.Struct):
SIZE = 16
object: Annotated[cudaUserObject_t, 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
cudaUserObjectRetain_v11030_params: TypeAlias = struct_cudaUserObjectRetain_v11030_params_st
@c.record
class struct_cudaUserObjectRelease_v11030_params_st(c.Struct):
SIZE = 16
object: Annotated[cudaUserObject_t, 0]
count: Annotated[Annotated[int, ctypes.c_uint32], 8]
cudaUserObjectRelease_v11030_params: TypeAlias = struct_cudaUserObjectRelease_v11030_params_st
@c.record
class struct_cudaGraphRetainUserObject_v11030_params_st(c.Struct):
SIZE = 24
graph: Annotated[cudaGraph_t, 0]
object: Annotated[cudaUserObject_t, 8]
count: Annotated[Annotated[int, ctypes.c_uint32], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cudaGraphRetainUserObject_v11030_params: TypeAlias = struct_cudaGraphRetainUserObject_v11030_params_st
@c.record
class struct_cudaGraphReleaseUserObject_v11030_params_st(c.Struct):
SIZE = 24
graph: Annotated[cudaGraph_t, 0]
object: Annotated[cudaUserObject_t, 8]
count: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaGraphReleaseUserObject_v11030_params: TypeAlias = struct_cudaGraphReleaseUserObject_v11030_params_st
@c.record
class struct_cudaGraphAddNode_v12020_params_st(c.Struct):
SIZE = 40
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
numDependencies: Annotated[size_t, 24]
nodeParams: Annotated[c.POINTER[struct_cudaGraphNodeParams], 32]
@c.record
class struct_cudaGraphNodeParams(c.Struct):
SIZE = 256
type: Annotated[enum_cudaGraphNodeType, 0]
reserved0: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 4]
reserved1: Annotated[c.Array[Annotated[int, ctypes.c_int64], Literal[29]], 16]
kernel: Annotated[struct_cudaKernelNodeParamsV2, 16]
memcpy: Annotated[struct_cudaMemcpyNodeParams, 16]
memset: Annotated[struct_cudaMemsetParamsV2, 16]
host: Annotated[struct_cudaHostNodeParamsV2, 16]
graph: Annotated[struct_cudaChildGraphNodeParams, 16]
eventWait: Annotated[struct_cudaEventWaitNodeParams, 16]
eventRecord: Annotated[struct_cudaEventRecordNodeParams, 16]
extSemSignal: Annotated[struct_cudaExternalSemaphoreSignalNodeParamsV2, 16]
extSemWait: Annotated[struct_cudaExternalSemaphoreWaitNodeParamsV2, 16]
alloc: Annotated[struct_cudaMemAllocNodeParamsV2, 16]
free: Annotated[struct_cudaMemFreeNodeParams, 16]
conditional: Annotated[struct_cudaConditionalNodeParams, 16]
reserved2: Annotated[Annotated[int, ctypes.c_int64], 248]
@c.record
class struct_cudaKernelNodeParamsV2(c.Struct):
SIZE = 56
func: Annotated[ctypes.c_void_p, 0]
gridDim: Annotated[dim3, 8]
blockDim: Annotated[dim3, 20]
sharedMemBytes: Annotated[Annotated[int, ctypes.c_uint32], 32]
kernelParams: Annotated[c.POINTER[ctypes.c_void_p], 40]
extra: Annotated[c.POINTER[ctypes.c_void_p], 48]
@c.record
class struct_cudaMemcpyNodeParams(c.Struct):
SIZE = 176
flags: Annotated[Annotated[int, ctypes.c_int32], 0]
reserved: Annotated[c.Array[Annotated[int, ctypes.c_int32], Literal[3]], 4]
copyParams: Annotated[struct_cudaMemcpy3DParms, 16]
@c.record
class struct_cudaMemsetParamsV2(c.Struct):
SIZE = 40
dst: Annotated[ctypes.c_void_p, 0]
pitch: Annotated[size_t, 8]
value: Annotated[Annotated[int, ctypes.c_uint32], 16]
elementSize: Annotated[Annotated[int, ctypes.c_uint32], 20]
width: Annotated[size_t, 24]
height: Annotated[size_t, 32]
@c.record
class struct_cudaHostNodeParamsV2(c.Struct):
SIZE = 16
fn: Annotated[cudaHostFn_t, 0]
userData: Annotated[ctypes.c_void_p, 8]
@c.record
class struct_cudaChildGraphNodeParams(c.Struct):
SIZE = 8
graph: Annotated[cudaGraph_t, 0]
@c.record
class struct_cudaEventWaitNodeParams(c.Struct):
SIZE = 8
event: Annotated[cudaEvent_t, 0]
@c.record
class struct_cudaEventRecordNodeParams(c.Struct):
SIZE = 8
event: Annotated[cudaEvent_t, 0]
@c.record
class struct_cudaExternalSemaphoreSignalNodeParamsV2(c.Struct):
SIZE = 24
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreSignalParams], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
@c.record
class struct_cudaExternalSemaphoreWaitNodeParamsV2(c.Struct):
SIZE = 24
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreWaitParams], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
@c.record
class struct_cudaMemAllocNodeParamsV2(c.Struct):
SIZE = 120
poolProps: Annotated[struct_cudaMemPoolProps, 0]
accessDescs: Annotated[c.POINTER[struct_cudaMemAccessDesc], 88]
accessDescCount: Annotated[size_t, 96]
bytesize: Annotated[size_t, 104]
dptr: Annotated[ctypes.c_void_p, 112]
@c.record
class struct_cudaMemFreeNodeParams(c.Struct):
SIZE = 8
dptr: Annotated[ctypes.c_void_p, 0]
@c.record
class struct_cudaConditionalNodeParams(c.Struct):
SIZE = 24
handle: Annotated[cudaGraphConditionalHandle, 0]
type: Annotated[enum_cudaGraphConditionalNodeType, 8]
size: Annotated[Annotated[int, ctypes.c_uint32], 12]
phGraph_out: Annotated[c.POINTER[cudaGraph_t], 16]
cudaGraphConditionalHandle: TypeAlias = Annotated[int, ctypes.c_uint64]
class enum_cudaGraphConditionalNodeType(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaGraphCondTypeIf = enum_cudaGraphConditionalNodeType.define('cudaGraphCondTypeIf', 0)
cudaGraphCondTypeWhile = enum_cudaGraphConditionalNodeType.define('cudaGraphCondTypeWhile', 1)
cudaGraphCondTypeSwitch = enum_cudaGraphConditionalNodeType.define('cudaGraphCondTypeSwitch', 2)
cudaGraphAddNode_v12020_params: TypeAlias = struct_cudaGraphAddNode_v12020_params_st
@c.record
class struct_cudaGraphAddNode_v2_v12030_params_st(c.Struct):
SIZE = 48
pGraphNode: Annotated[c.POINTER[cudaGraphNode_t], 0]
graph: Annotated[cudaGraph_t, 8]
pDependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
dependencyData: Annotated[c.POINTER[cudaGraphEdgeData], 24]
numDependencies: Annotated[size_t, 32]
nodeParams: Annotated[c.POINTER[struct_cudaGraphNodeParams], 40]
cudaGraphAddNode_v2_v12030_params: TypeAlias = struct_cudaGraphAddNode_v2_v12030_params_st
@c.record
class struct_cudaGraphNodeSetParams_v12020_params_st(c.Struct):
SIZE = 16
node: Annotated[cudaGraphNode_t, 0]
nodeParams: Annotated[c.POINTER[struct_cudaGraphNodeParams], 8]
cudaGraphNodeSetParams_v12020_params: TypeAlias = struct_cudaGraphNodeSetParams_v12020_params_st
@c.record
class struct_cudaGraphExecNodeSetParams_v12020_params_st(c.Struct):
SIZE = 24
graphExec: Annotated[cudaGraphExec_t, 0]
node: Annotated[cudaGraphNode_t, 8]
nodeParams: Annotated[c.POINTER[struct_cudaGraphNodeParams], 16]
cudaGraphExecNodeSetParams_v12020_params: TypeAlias = struct_cudaGraphExecNodeSetParams_v12020_params_st
@c.record
class struct_cudaGraphConditionalHandleCreate_v12030_params_st(c.Struct):
SIZE = 24
pHandle_out: Annotated[c.POINTER[cudaGraphConditionalHandle], 0]
graph: Annotated[cudaGraph_t, 8]
defaultLaunchValue: Annotated[Annotated[int, ctypes.c_uint32], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
cudaGraphConditionalHandleCreate_v12030_params: TypeAlias = struct_cudaGraphConditionalHandleCreate_v12030_params_st
@c.record
class struct_cudaGetDriverEntryPoint_ptsz_v11030_params_st(c.Struct):
SIZE = 32
symbol: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
funcPtr: Annotated[c.POINTER[ctypes.c_void_p], 8]
flags: Annotated[Annotated[int, ctypes.c_uint64], 16]
driverStatus: Annotated[c.POINTER[enum_cudaDriverEntryPointQueryResult], 24]
class enum_cudaDriverEntryPointQueryResult(Annotated[int, ctypes.c_uint32], c.Enum): pass
cudaDriverEntryPointSuccess = enum_cudaDriverEntryPointQueryResult.define('cudaDriverEntryPointSuccess', 0)
cudaDriverEntryPointSymbolNotFound = enum_cudaDriverEntryPointQueryResult.define('cudaDriverEntryPointSymbolNotFound', 1)
cudaDriverEntryPointVersionNotSufficent = enum_cudaDriverEntryPointQueryResult.define('cudaDriverEntryPointVersionNotSufficent', 2)
cudaGetDriverEntryPoint_ptsz_v11030_params: TypeAlias = struct_cudaGetDriverEntryPoint_ptsz_v11030_params_st
@c.record
class struct_cudaGetDriverEntryPointByVersion_ptsz_v12050_params_st(c.Struct):
SIZE = 40
symbol: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
funcPtr: Annotated[c.POINTER[ctypes.c_void_p], 8]
cudaVersion: Annotated[Annotated[int, ctypes.c_uint32], 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
driverStatus: Annotated[c.POINTER[enum_cudaDriverEntryPointQueryResult], 32]
cudaGetDriverEntryPointByVersion_ptsz_v12050_params: TypeAlias = struct_cudaGetDriverEntryPointByVersion_ptsz_v12050_params_st
@c.record
class struct_cudaGetFuncBySymbol_v11000_params_st(c.Struct):
SIZE = 16
functionPtr: Annotated[c.POINTER[cudaFunction_t], 0]
symbolPtr: Annotated[ctypes.c_void_p, 8]
cudaFunction_t: TypeAlias = c.POINTER[struct_CUfunc_st]
cudaGetFuncBySymbol_v11000_params: TypeAlias = struct_cudaGetFuncBySymbol_v11000_params_st
@c.record
class struct_cudaGetKernel_v12000_params_st(c.Struct):
SIZE = 16
kernelPtr: Annotated[c.POINTER[cudaKernel_t], 0]
entryFuncAddr: Annotated[ctypes.c_void_p, 8]
cudaKernel_t: TypeAlias = c.POINTER[struct_CUkern_st]
cudaGetKernel_v12000_params: TypeAlias = struct_cudaGetKernel_v12000_params_st
@c.record
class struct_cudaMemcpy_v3020_params_st(c.Struct):
SIZE = 32
dst: Annotated[ctypes.c_void_p, 0]
src: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
kind: Annotated[enum_cudaMemcpyKind, 24]
cudaMemcpy_v3020_params: TypeAlias = struct_cudaMemcpy_v3020_params_st
@c.record
class struct_cudaMemcpyToSymbol_v3020_params_st(c.Struct):
SIZE = 40
symbol: Annotated[ctypes.c_void_p, 0]
src: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
offset: Annotated[size_t, 24]
kind: Annotated[enum_cudaMemcpyKind, 32]
cudaMemcpyToSymbol_v3020_params: TypeAlias = struct_cudaMemcpyToSymbol_v3020_params_st
@c.record
class struct_cudaMemcpyFromSymbol_v3020_params_st(c.Struct):
SIZE = 40
dst: Annotated[ctypes.c_void_p, 0]
symbol: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
offset: Annotated[size_t, 24]
kind: Annotated[enum_cudaMemcpyKind, 32]
cudaMemcpyFromSymbol_v3020_params: TypeAlias = struct_cudaMemcpyFromSymbol_v3020_params_st
@c.record
class struct_cudaMemcpy2D_v3020_params_st(c.Struct):
SIZE = 56
dst: Annotated[ctypes.c_void_p, 0]
dpitch: Annotated[size_t, 8]
src: Annotated[ctypes.c_void_p, 16]
spitch: Annotated[size_t, 24]
width: Annotated[size_t, 32]
height: Annotated[size_t, 40]
kind: Annotated[enum_cudaMemcpyKind, 48]
cudaMemcpy2D_v3020_params: TypeAlias = struct_cudaMemcpy2D_v3020_params_st
@c.record
class struct_cudaMemcpyToArray_v3020_params_st(c.Struct):
SIZE = 48
dst: Annotated[cudaArray_t, 0]
wOffset: Annotated[size_t, 8]
hOffset: Annotated[size_t, 16]
src: Annotated[ctypes.c_void_p, 24]
count: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
cudaMemcpyToArray_v3020_params: TypeAlias = struct_cudaMemcpyToArray_v3020_params_st
@c.record
class struct_cudaMemcpy2DToArray_v3020_params_st(c.Struct):
SIZE = 64
dst: Annotated[cudaArray_t, 0]
wOffset: Annotated[size_t, 8]
hOffset: Annotated[size_t, 16]
src: Annotated[ctypes.c_void_p, 24]
spitch: Annotated[size_t, 32]
width: Annotated[size_t, 40]
height: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
cudaMemcpy2DToArray_v3020_params: TypeAlias = struct_cudaMemcpy2DToArray_v3020_params_st
@c.record
class struct_cudaMemcpyFromArray_v3020_params_st(c.Struct):
SIZE = 48
dst: Annotated[ctypes.c_void_p, 0]
src: Annotated[cudaArray_const_t, 8]
wOffset: Annotated[size_t, 16]
hOffset: Annotated[size_t, 24]
count: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
cudaMemcpyFromArray_v3020_params: TypeAlias = struct_cudaMemcpyFromArray_v3020_params_st
@c.record
class struct_cudaMemcpy2DFromArray_v3020_params_st(c.Struct):
SIZE = 64
dst: Annotated[ctypes.c_void_p, 0]
dpitch: Annotated[size_t, 8]
src: Annotated[cudaArray_const_t, 16]
wOffset: Annotated[size_t, 24]
hOffset: Annotated[size_t, 32]
width: Annotated[size_t, 40]
height: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
cudaMemcpy2DFromArray_v3020_params: TypeAlias = struct_cudaMemcpy2DFromArray_v3020_params_st
@c.record
class struct_cudaMemcpyArrayToArray_v3020_params_st(c.Struct):
SIZE = 64
dst: Annotated[cudaArray_t, 0]
wOffsetDst: Annotated[size_t, 8]
hOffsetDst: Annotated[size_t, 16]
src: Annotated[cudaArray_const_t, 24]
wOffsetSrc: Annotated[size_t, 32]
hOffsetSrc: Annotated[size_t, 40]
count: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
cudaMemcpyArrayToArray_v3020_params: TypeAlias = struct_cudaMemcpyArrayToArray_v3020_params_st
@c.record
class struct_cudaMemcpy2DArrayToArray_v3020_params_st(c.Struct):
SIZE = 72
dst: Annotated[cudaArray_t, 0]
wOffsetDst: Annotated[size_t, 8]
hOffsetDst: Annotated[size_t, 16]
src: Annotated[cudaArray_const_t, 24]
wOffsetSrc: Annotated[size_t, 32]
hOffsetSrc: Annotated[size_t, 40]
width: Annotated[size_t, 48]
height: Annotated[size_t, 56]
kind: Annotated[enum_cudaMemcpyKind, 64]
cudaMemcpy2DArrayToArray_v3020_params: TypeAlias = struct_cudaMemcpy2DArrayToArray_v3020_params_st
@c.record
class struct_cudaMemcpy3D_v3020_params_st(c.Struct):
SIZE = 8
p: Annotated[c.POINTER[struct_cudaMemcpy3DParms], 0]
cudaMemcpy3D_v3020_params: TypeAlias = struct_cudaMemcpy3D_v3020_params_st
@c.record
class struct_cudaMemcpy3DPeer_v4000_params_st(c.Struct):
SIZE = 8
p: Annotated[c.POINTER[struct_cudaMemcpy3DPeerParms], 0]
cudaMemcpy3DPeer_v4000_params: TypeAlias = struct_cudaMemcpy3DPeer_v4000_params_st
@c.record
class struct_cudaMemcpyBatchAsync_v12080_params_st(c.Struct):
SIZE = 72
dsts: Annotated[c.POINTER[ctypes.c_void_p], 0]
srcs: Annotated[c.POINTER[ctypes.c_void_p], 8]
sizes: Annotated[c.POINTER[size_t], 16]
count: Annotated[size_t, 24]
attrs: Annotated[c.POINTER[struct_cudaMemcpyAttributes], 32]
attrsIdxs: Annotated[c.POINTER[size_t], 40]
numAttrs: Annotated[size_t, 48]
failIdx: Annotated[c.POINTER[size_t], 56]
stream: Annotated[cudaStream_t, 64]
cudaMemcpyBatchAsync_v12080_params: TypeAlias = struct_cudaMemcpyBatchAsync_v12080_params_st
@c.record
class struct_cudaMemcpy3DBatchAsync_v12080_params_st(c.Struct):
SIZE = 40
numOps: Annotated[size_t, 0]
opList: Annotated[c.POINTER[struct_cudaMemcpy3DBatchOp], 8]
failIdx: Annotated[c.POINTER[size_t], 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
stream: Annotated[cudaStream_t, 32]
cudaMemcpy3DBatchAsync_v12080_params: TypeAlias = struct_cudaMemcpy3DBatchAsync_v12080_params_st
@c.record
class struct_cudaMemset_v3020_params_st(c.Struct):
SIZE = 24
devPtr: Annotated[ctypes.c_void_p, 0]
value: Annotated[Annotated[int, ctypes.c_int32], 8]
count: Annotated[size_t, 16]
cudaMemset_v3020_params: TypeAlias = struct_cudaMemset_v3020_params_st
@c.record
class struct_cudaMemset2D_v3020_params_st(c.Struct):
SIZE = 40
devPtr: Annotated[ctypes.c_void_p, 0]
pitch: Annotated[size_t, 8]
value: Annotated[Annotated[int, ctypes.c_int32], 16]
width: Annotated[size_t, 24]
height: Annotated[size_t, 32]
cudaMemset2D_v3020_params: TypeAlias = struct_cudaMemset2D_v3020_params_st
@c.record
class struct_cudaMemset3D_v3020_params_st(c.Struct):
SIZE = 64
pitchedDevPtr: Annotated[struct_cudaPitchedPtr, 0]
value: Annotated[Annotated[int, ctypes.c_int32], 32]
extent: Annotated[struct_cudaExtent, 40]
cudaMemset3D_v3020_params: TypeAlias = struct_cudaMemset3D_v3020_params_st
@c.record
class struct_cudaMemcpyAsync_v3020_params_st(c.Struct):
SIZE = 40
dst: Annotated[ctypes.c_void_p, 0]
src: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
kind: Annotated[enum_cudaMemcpyKind, 24]
stream: Annotated[cudaStream_t, 32]
cudaMemcpyAsync_v3020_params: TypeAlias = struct_cudaMemcpyAsync_v3020_params_st
@c.record
class struct_cudaMemcpyToSymbolAsync_v3020_params_st(c.Struct):
SIZE = 48
symbol: Annotated[ctypes.c_void_p, 0]
src: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
offset: Annotated[size_t, 24]
kind: Annotated[enum_cudaMemcpyKind, 32]
stream: Annotated[cudaStream_t, 40]
cudaMemcpyToSymbolAsync_v3020_params: TypeAlias = struct_cudaMemcpyToSymbolAsync_v3020_params_st
@c.record
class struct_cudaMemcpyFromSymbolAsync_v3020_params_st(c.Struct):
SIZE = 48
dst: Annotated[ctypes.c_void_p, 0]
symbol: Annotated[ctypes.c_void_p, 8]
count: Annotated[size_t, 16]
offset: Annotated[size_t, 24]
kind: Annotated[enum_cudaMemcpyKind, 32]
stream: Annotated[cudaStream_t, 40]
cudaMemcpyFromSymbolAsync_v3020_params: TypeAlias = struct_cudaMemcpyFromSymbolAsync_v3020_params_st
@c.record
class struct_cudaMemcpy2DAsync_v3020_params_st(c.Struct):
SIZE = 64
dst: Annotated[ctypes.c_void_p, 0]
dpitch: Annotated[size_t, 8]
src: Annotated[ctypes.c_void_p, 16]
spitch: Annotated[size_t, 24]
width: Annotated[size_t, 32]
height: Annotated[size_t, 40]
kind: Annotated[enum_cudaMemcpyKind, 48]
stream: Annotated[cudaStream_t, 56]
cudaMemcpy2DAsync_v3020_params: TypeAlias = struct_cudaMemcpy2DAsync_v3020_params_st
@c.record
class struct_cudaMemcpyToArrayAsync_v3020_params_st(c.Struct):
SIZE = 56
dst: Annotated[cudaArray_t, 0]
wOffset: Annotated[size_t, 8]
hOffset: Annotated[size_t, 16]
src: Annotated[ctypes.c_void_p, 24]
count: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
stream: Annotated[cudaStream_t, 48]
cudaMemcpyToArrayAsync_v3020_params: TypeAlias = struct_cudaMemcpyToArrayAsync_v3020_params_st
@c.record
class struct_cudaMemcpy2DToArrayAsync_v3020_params_st(c.Struct):
SIZE = 72
dst: Annotated[cudaArray_t, 0]
wOffset: Annotated[size_t, 8]
hOffset: Annotated[size_t, 16]
src: Annotated[ctypes.c_void_p, 24]
spitch: Annotated[size_t, 32]
width: Annotated[size_t, 40]
height: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
stream: Annotated[cudaStream_t, 64]
cudaMemcpy2DToArrayAsync_v3020_params: TypeAlias = struct_cudaMemcpy2DToArrayAsync_v3020_params_st
@c.record
class struct_cudaMemcpyFromArrayAsync_v3020_params_st(c.Struct):
SIZE = 56
dst: Annotated[ctypes.c_void_p, 0]
src: Annotated[cudaArray_const_t, 8]
wOffset: Annotated[size_t, 16]
hOffset: Annotated[size_t, 24]
count: Annotated[size_t, 32]
kind: Annotated[enum_cudaMemcpyKind, 40]
stream: Annotated[cudaStream_t, 48]
cudaMemcpyFromArrayAsync_v3020_params: TypeAlias = struct_cudaMemcpyFromArrayAsync_v3020_params_st
@c.record
class struct_cudaMemcpy2DFromArrayAsync_v3020_params_st(c.Struct):
SIZE = 72
dst: Annotated[ctypes.c_void_p, 0]
dpitch: Annotated[size_t, 8]
src: Annotated[cudaArray_const_t, 16]
wOffset: Annotated[size_t, 24]
hOffset: Annotated[size_t, 32]
width: Annotated[size_t, 40]
height: Annotated[size_t, 48]
kind: Annotated[enum_cudaMemcpyKind, 56]
stream: Annotated[cudaStream_t, 64]
cudaMemcpy2DFromArrayAsync_v3020_params: TypeAlias = struct_cudaMemcpy2DFromArrayAsync_v3020_params_st
@c.record
class struct_cudaMemcpy3DAsync_v3020_params_st(c.Struct):
SIZE = 16
p: Annotated[c.POINTER[struct_cudaMemcpy3DParms], 0]
stream: Annotated[cudaStream_t, 8]
cudaMemcpy3DAsync_v3020_params: TypeAlias = struct_cudaMemcpy3DAsync_v3020_params_st
@c.record
class struct_cudaMemcpy3DPeerAsync_v4000_params_st(c.Struct):
SIZE = 16
p: Annotated[c.POINTER[struct_cudaMemcpy3DPeerParms], 0]
stream: Annotated[cudaStream_t, 8]
cudaMemcpy3DPeerAsync_v4000_params: TypeAlias = struct_cudaMemcpy3DPeerAsync_v4000_params_st
@c.record
class struct_cudaMemsetAsync_v3020_params_st(c.Struct):
SIZE = 32
devPtr: Annotated[ctypes.c_void_p, 0]
value: Annotated[Annotated[int, ctypes.c_int32], 8]
count: Annotated[size_t, 16]
stream: Annotated[cudaStream_t, 24]
cudaMemsetAsync_v3020_params: TypeAlias = struct_cudaMemsetAsync_v3020_params_st
@c.record
class struct_cudaMemset2DAsync_v3020_params_st(c.Struct):
SIZE = 48
devPtr: Annotated[ctypes.c_void_p, 0]
pitch: Annotated[size_t, 8]
value: Annotated[Annotated[int, ctypes.c_int32], 16]
width: Annotated[size_t, 24]
height: Annotated[size_t, 32]
stream: Annotated[cudaStream_t, 40]
cudaMemset2DAsync_v3020_params: TypeAlias = struct_cudaMemset2DAsync_v3020_params_st
@c.record
class struct_cudaMemset3DAsync_v3020_params_st(c.Struct):
SIZE = 72
pitchedDevPtr: Annotated[struct_cudaPitchedPtr, 0]
value: Annotated[Annotated[int, ctypes.c_int32], 32]
extent: Annotated[struct_cudaExtent, 40]
stream: Annotated[cudaStream_t, 64]
cudaMemset3DAsync_v3020_params: TypeAlias = struct_cudaMemset3DAsync_v3020_params_st
@c.record
class struct_cudaStreamQuery_v3020_params_st(c.Struct):
SIZE = 8
stream: Annotated[cudaStream_t, 0]
cudaStreamQuery_v3020_params: TypeAlias = struct_cudaStreamQuery_v3020_params_st
@c.record
class struct_cudaStreamGetDevice_v12080_params_st(c.Struct):
SIZE = 16
hStream: Annotated[cudaStream_t, 0]
device: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
cudaStreamGetDevice_v12080_params: TypeAlias = struct_cudaStreamGetDevice_v12080_params_st
@c.record
class struct_cudaStreamGetFlags_v5050_params_st(c.Struct):
SIZE = 16
hStream: Annotated[cudaStream_t, 0]
flags: Annotated[c.POINTER[Annotated[int, ctypes.c_uint32]], 8]
cudaStreamGetFlags_v5050_params: TypeAlias = struct_cudaStreamGetFlags_v5050_params_st
@c.record
class struct_cudaStreamGetId_v12000_params_st(c.Struct):
SIZE = 16
hStream: Annotated[cudaStream_t, 0]
streamId: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 8]
cudaStreamGetId_v12000_params: TypeAlias = struct_cudaStreamGetId_v12000_params_st
@c.record
class struct_cudaStreamGetPriority_v5050_params_st(c.Struct):
SIZE = 16
hStream: Annotated[cudaStream_t, 0]
priority: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
cudaStreamGetPriority_v5050_params: TypeAlias = struct_cudaStreamGetPriority_v5050_params_st
@c.record
class struct_cudaEventRecord_v3020_params_st(c.Struct):
SIZE = 16
event: Annotated[cudaEvent_t, 0]
stream: Annotated[cudaStream_t, 8]
cudaEventRecord_v3020_params: TypeAlias = struct_cudaEventRecord_v3020_params_st
@c.record
class struct_cudaEventRecordWithFlags_v11010_params_st(c.Struct):
SIZE = 24
event: Annotated[cudaEvent_t, 0]
stream: Annotated[cudaStream_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaEventRecordWithFlags_v11010_params: TypeAlias = struct_cudaEventRecordWithFlags_v11010_params_st
@c.record
class struct_cudaStreamWaitEvent_v3020_params_st(c.Struct):
SIZE = 24
stream: Annotated[cudaStream_t, 0]
event: Annotated[cudaEvent_t, 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
cudaStreamWaitEvent_v3020_params: TypeAlias = struct_cudaStreamWaitEvent_v3020_params_st
@c.record
class struct_cudaStreamAddCallback_v5000_params_st(c.Struct):
SIZE = 32
stream: Annotated[cudaStream_t, 0]
callback: Annotated[cudaStreamCallback_t, 8]
userData: Annotated[ctypes.c_void_p, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cudaStreamAddCallback_v5000_params: TypeAlias = struct_cudaStreamAddCallback_v5000_params_st
@c.record
class struct_cudaStreamAttachMemAsync_v6000_params_st(c.Struct):
SIZE = 32
stream: Annotated[cudaStream_t, 0]
devPtr: Annotated[ctypes.c_void_p, 8]
length: Annotated[size_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cudaStreamAttachMemAsync_v6000_params: TypeAlias = struct_cudaStreamAttachMemAsync_v6000_params_st
@c.record
class struct_cudaStreamSynchronize_v3020_params_st(c.Struct):
SIZE = 8
stream: Annotated[cudaStream_t, 0]
cudaStreamSynchronize_v3020_params: TypeAlias = struct_cudaStreamSynchronize_v3020_params_st
@c.record
class struct_cudaLaunchKernel_v7000_params_st(c.Struct):
SIZE = 56
func: Annotated[ctypes.c_void_p, 0]
gridDim: Annotated[dim3, 8]
blockDim: Annotated[dim3, 20]
args: Annotated[c.POINTER[ctypes.c_void_p], 32]
sharedMem: Annotated[size_t, 40]
stream: Annotated[cudaStream_t, 48]
cudaLaunchKernel_v7000_params: TypeAlias = struct_cudaLaunchKernel_v7000_params_st
@c.record
class struct_cudaLaunchKernelExC_v11060_params_st(c.Struct):
SIZE = 24
config: Annotated[c.POINTER[cudaLaunchConfig_t], 0]
func: Annotated[ctypes.c_void_p, 8]
args: Annotated[c.POINTER[ctypes.c_void_p], 16]
cudaLaunchKernelExC_v11060_params: TypeAlias = struct_cudaLaunchKernelExC_v11060_params_st
@c.record
class struct_cudaLaunchCooperativeKernel_v9000_params_st(c.Struct):
SIZE = 56
func: Annotated[ctypes.c_void_p, 0]
gridDim: Annotated[dim3, 8]
blockDim: Annotated[dim3, 20]
args: Annotated[c.POINTER[ctypes.c_void_p], 32]
sharedMem: Annotated[size_t, 40]
stream: Annotated[cudaStream_t, 48]
cudaLaunchCooperativeKernel_v9000_params: TypeAlias = struct_cudaLaunchCooperativeKernel_v9000_params_st
@c.record
class struct_cudaLaunchHostFunc_v10000_params_st(c.Struct):
SIZE = 24
stream: Annotated[cudaStream_t, 0]
fn: Annotated[cudaHostFn_t, 8]
userData: Annotated[ctypes.c_void_p, 16]
cudaLaunchHostFunc_v10000_params: TypeAlias = struct_cudaLaunchHostFunc_v10000_params_st
@c.record
class struct_cudaMemPrefetchAsync_v8000_params_st(c.Struct):
SIZE = 32
devPtr: Annotated[ctypes.c_void_p, 0]
count: Annotated[size_t, 8]
dstDevice: Annotated[Annotated[int, ctypes.c_int32], 16]
stream: Annotated[cudaStream_t, 24]
cudaMemPrefetchAsync_v8000_params: TypeAlias = struct_cudaMemPrefetchAsync_v8000_params_st
@c.record
class struct_cudaMemPrefetchAsync_v2_v12020_params_st(c.Struct):
SIZE = 40
devPtr: Annotated[ctypes.c_void_p, 0]
count: Annotated[size_t, 8]
location: Annotated[struct_cudaMemLocation, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
stream: Annotated[cudaStream_t, 32]
cudaMemPrefetchAsync_v2_v12020_params: TypeAlias = struct_cudaMemPrefetchAsync_v2_v12020_params_st
@c.record
class struct_cudaSignalExternalSemaphoresAsync_v10000_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreSignalParams_v1], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[cudaStream_t, 24]
@c.record
class struct_cudaExternalSemaphoreSignalParams_v1(c.Struct):
SIZE = 32
params: Annotated[struct_cudaExternalSemaphoreSignalParams_v1_params, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
@c.record
class struct_cudaExternalSemaphoreSignalParams_v1_params(c.Struct):
SIZE = 24
fence: Annotated[struct_cudaExternalSemaphoreSignalParams_v1_params_fence, 0]
nvSciSync: Annotated[struct_cudaExternalSemaphoreSignalParams_v1_params_nvSciSync, 8]
keyedMutex: Annotated[struct_cudaExternalSemaphoreSignalParams_v1_params_keyedMutex, 16]
@c.record
class struct_cudaExternalSemaphoreSignalParams_v1_params_fence(c.Struct):
SIZE = 8
value: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_cudaExternalSemaphoreSignalParams_v1_params_nvSciSync(c.Struct):
SIZE = 8
fence: Annotated[ctypes.c_void_p, 0]
reserved: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_cudaExternalSemaphoreSignalParams_v1_params_keyedMutex(c.Struct):
SIZE = 8
key: Annotated[Annotated[int, ctypes.c_uint64], 0]
cudaSignalExternalSemaphoresAsync_v10000_params: TypeAlias = struct_cudaSignalExternalSemaphoresAsync_v10000_params_st
@c.record
class struct_cudaSignalExternalSemaphoresAsync_ptsz_v10000_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreSignalParams_v1], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[cudaStream_t, 24]
cudaSignalExternalSemaphoresAsync_ptsz_v10000_params: TypeAlias = struct_cudaSignalExternalSemaphoresAsync_ptsz_v10000_params_st
@c.record
class struct_cudaSignalExternalSemaphoresAsync_v2_v11020_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreSignalParams], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[cudaStream_t, 24]
cudaSignalExternalSemaphoresAsync_v2_v11020_params: TypeAlias = struct_cudaSignalExternalSemaphoresAsync_v2_v11020_params_st
@c.record
class struct_cudaWaitExternalSemaphoresAsync_v10000_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreWaitParams_v1], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[cudaStream_t, 24]
@c.record
class struct_cudaExternalSemaphoreWaitParams_v1(c.Struct):
SIZE = 40
params: Annotated[struct_cudaExternalSemaphoreWaitParams_v1_params, 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
@c.record
class struct_cudaExternalSemaphoreWaitParams_v1_params(c.Struct):
SIZE = 32
fence: Annotated[struct_cudaExternalSemaphoreWaitParams_v1_params_fence, 0]
nvSciSync: Annotated[struct_cudaExternalSemaphoreWaitParams_v1_params_nvSciSync, 8]
keyedMutex: Annotated[struct_cudaExternalSemaphoreWaitParams_v1_params_keyedMutex, 16]
@c.record
class struct_cudaExternalSemaphoreWaitParams_v1_params_fence(c.Struct):
SIZE = 8
value: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_cudaExternalSemaphoreWaitParams_v1_params_nvSciSync(c.Struct):
SIZE = 8
fence: Annotated[ctypes.c_void_p, 0]
reserved: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_cudaExternalSemaphoreWaitParams_v1_params_keyedMutex(c.Struct):
SIZE = 16
key: Annotated[Annotated[int, ctypes.c_uint64], 0]
timeoutMs: Annotated[Annotated[int, ctypes.c_uint32], 8]
cudaWaitExternalSemaphoresAsync_v10000_params: TypeAlias = struct_cudaWaitExternalSemaphoresAsync_v10000_params_st
@c.record
class struct_cudaWaitExternalSemaphoresAsync_ptsz_v10000_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreWaitParams_v1], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[cudaStream_t, 24]
cudaWaitExternalSemaphoresAsync_ptsz_v10000_params: TypeAlias = struct_cudaWaitExternalSemaphoresAsync_ptsz_v10000_params_st
@c.record
class struct_cudaWaitExternalSemaphoresAsync_v2_v11020_params_st(c.Struct):
SIZE = 32
extSemArray: Annotated[c.POINTER[cudaExternalSemaphore_t], 0]
paramsArray: Annotated[c.POINTER[struct_cudaExternalSemaphoreWaitParams], 8]
numExtSems: Annotated[Annotated[int, ctypes.c_uint32], 16]
stream: Annotated[cudaStream_t, 24]
cudaWaitExternalSemaphoresAsync_v2_v11020_params: TypeAlias = struct_cudaWaitExternalSemaphoresAsync_v2_v11020_params_st
@c.record
class struct_cudaGraphInstantiateWithParams_v12000_params_st(c.Struct):
SIZE = 24
pGraphExec: Annotated[c.POINTER[cudaGraphExec_t], 0]
graph: Annotated[cudaGraph_t, 8]
instantiateParams: Annotated[c.POINTER[cudaGraphInstantiateParams], 16]
cudaGraphInstantiateWithParams_v12000_params: TypeAlias = struct_cudaGraphInstantiateWithParams_v12000_params_st
@c.record
class struct_cudaGraphUpload_v10000_params_st(c.Struct):
SIZE = 16
graphExec: Annotated[cudaGraphExec_t, 0]
stream: Annotated[cudaStream_t, 8]
cudaGraphUpload_v10000_params: TypeAlias = struct_cudaGraphUpload_v10000_params_st
@c.record
class struct_cudaGraphLaunch_v10000_params_st(c.Struct):
SIZE = 16
graphExec: Annotated[cudaGraphExec_t, 0]
stream: Annotated[cudaStream_t, 8]
cudaGraphLaunch_v10000_params: TypeAlias = struct_cudaGraphLaunch_v10000_params_st
@c.record
class struct_cudaStreamBeginCapture_v10000_params_st(c.Struct):
SIZE = 16
stream: Annotated[cudaStream_t, 0]
mode: Annotated[enum_cudaStreamCaptureMode, 8]
cudaStreamBeginCapture_v10000_params: TypeAlias = struct_cudaStreamBeginCapture_v10000_params_st
@c.record
class struct_cudaStreamBeginCaptureToGraph_v12030_params_st(c.Struct):
SIZE = 48
stream: Annotated[cudaStream_t, 0]
graph: Annotated[cudaGraph_t, 8]
dependencies: Annotated[c.POINTER[cudaGraphNode_t], 16]
dependencyData: Annotated[c.POINTER[cudaGraphEdgeData], 24]
numDependencies: Annotated[size_t, 32]
mode: Annotated[enum_cudaStreamCaptureMode, 40]
cudaStreamBeginCaptureToGraph_v12030_params: TypeAlias = struct_cudaStreamBeginCaptureToGraph_v12030_params_st
@c.record
class struct_cudaStreamEndCapture_v10000_params_st(c.Struct):
SIZE = 16
stream: Annotated[cudaStream_t, 0]
pGraph: Annotated[c.POINTER[cudaGraph_t], 8]
cudaStreamEndCapture_v10000_params: TypeAlias = struct_cudaStreamEndCapture_v10000_params_st
@c.record
class struct_cudaStreamIsCapturing_v10000_params_st(c.Struct):
SIZE = 16
stream: Annotated[cudaStream_t, 0]
pCaptureStatus: Annotated[c.POINTER[enum_cudaStreamCaptureStatus], 8]
cudaStreamIsCapturing_v10000_params: TypeAlias = struct_cudaStreamIsCapturing_v10000_params_st
@c.record
class struct_cudaStreamGetCaptureInfo_v10010_params_st(c.Struct):
SIZE = 24
stream: Annotated[cudaStream_t, 0]
captureStatus_out: Annotated[c.POINTER[enum_cudaStreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 16]
cudaStreamGetCaptureInfo_v10010_params: TypeAlias = struct_cudaStreamGetCaptureInfo_v10010_params_st
@c.record
class struct_cudaStreamGetCaptureInfo_ptsz_v10010_params_st(c.Struct):
SIZE = 24
stream: Annotated[cudaStream_t, 0]
captureStatus_out: Annotated[c.POINTER[enum_cudaStreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 16]
cudaStreamGetCaptureInfo_ptsz_v10010_params: TypeAlias = struct_cudaStreamGetCaptureInfo_ptsz_v10010_params_st
@c.record
class struct_cudaStreamGetCaptureInfo_v2_v11030_params_st(c.Struct):
SIZE = 48
stream: Annotated[cudaStream_t, 0]
captureStatus_out: Annotated[c.POINTER[enum_cudaStreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 16]
graph_out: Annotated[c.POINTER[cudaGraph_t], 24]
dependencies_out: Annotated[c.POINTER[c.POINTER[cudaGraphNode_t]], 32]
numDependencies_out: Annotated[c.POINTER[size_t], 40]
cudaStreamGetCaptureInfo_v2_v11030_params: TypeAlias = struct_cudaStreamGetCaptureInfo_v2_v11030_params_st
@c.record
class struct_cudaStreamGetCaptureInfo_v3_v12030_params_st(c.Struct):
SIZE = 56
stream: Annotated[cudaStream_t, 0]
captureStatus_out: Annotated[c.POINTER[enum_cudaStreamCaptureStatus], 8]
id_out: Annotated[c.POINTER[Annotated[int, ctypes.c_uint64]], 16]
graph_out: Annotated[c.POINTER[cudaGraph_t], 24]
dependencies_out: Annotated[c.POINTER[c.POINTER[cudaGraphNode_t]], 32]
edgeData_out: Annotated[c.POINTER[c.POINTER[cudaGraphEdgeData]], 40]
numDependencies_out: Annotated[c.POINTER[size_t], 48]
cudaStreamGetCaptureInfo_v3_v12030_params: TypeAlias = struct_cudaStreamGetCaptureInfo_v3_v12030_params_st
@c.record
class struct_cudaStreamUpdateCaptureDependencies_v11030_params_st(c.Struct):
SIZE = 32
stream: Annotated[cudaStream_t, 0]
dependencies: Annotated[c.POINTER[cudaGraphNode_t], 8]
numDependencies: Annotated[size_t, 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
cudaStreamUpdateCaptureDependencies_v11030_params: TypeAlias = struct_cudaStreamUpdateCaptureDependencies_v11030_params_st
@c.record
class struct_cudaStreamUpdateCaptureDependencies_v2_v12030_params_st(c.Struct):
SIZE = 40
stream: Annotated[cudaStream_t, 0]
dependencies: Annotated[c.POINTER[cudaGraphNode_t], 8]
dependencyData: Annotated[c.POINTER[cudaGraphEdgeData], 16]
numDependencies: Annotated[size_t, 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 32]
cudaStreamUpdateCaptureDependencies_v2_v12030_params: TypeAlias = struct_cudaStreamUpdateCaptureDependencies_v2_v12030_params_st
@c.record
class struct_cudaStreamCopyAttributes_v11000_params_st(c.Struct):
SIZE = 16
dstStream: Annotated[cudaStream_t, 0]
srcStream: Annotated[cudaStream_t, 8]
cudaStreamCopyAttributes_v11000_params: TypeAlias = struct_cudaStreamCopyAttributes_v11000_params_st
@c.record
class struct_cudaStreamGetAttribute_v11000_params_st(c.Struct):
SIZE = 24
stream: Annotated[cudaStream_t, 0]
attr: Annotated[cudaLaunchAttributeID, 8]
value: Annotated[c.POINTER[cudaLaunchAttributeValue], 16]
cudaStreamGetAttribute_v11000_params: TypeAlias = struct_cudaStreamGetAttribute_v11000_params_st
@c.record
class struct_cudaStreamSetAttribute_v11000_params_st(c.Struct):
SIZE = 24
stream: Annotated[cudaStream_t, 0]
attr: Annotated[cudaLaunchAttributeID, 8]
param: Annotated[c.POINTER[cudaLaunchAttributeValue], 16]
cudaStreamSetAttribute_v11000_params: TypeAlias = struct_cudaStreamSetAttribute_v11000_params_st
@c.record
class struct_cudaMallocAsync_v11020_params_st(c.Struct):
SIZE = 24
devPtr: Annotated[c.POINTER[ctypes.c_void_p], 0]
size: Annotated[size_t, 8]
hStream: Annotated[cudaStream_t, 16]
cudaMallocAsync_v11020_params: TypeAlias = struct_cudaMallocAsync_v11020_params_st
@c.record
class struct_cudaFreeAsync_v11020_params_st(c.Struct):
SIZE = 16
devPtr: Annotated[ctypes.c_void_p, 0]
hStream: Annotated[cudaStream_t, 8]
cudaFreeAsync_v11020_params: TypeAlias = struct_cudaFreeAsync_v11020_params_st
@c.record
class struct_cudaMallocFromPoolAsync_v11020_params_st(c.Struct):
SIZE = 32
ptr: Annotated[c.POINTER[ctypes.c_void_p], 0]
size: Annotated[size_t, 8]
memPool: Annotated[cudaMemPool_t, 16]
stream: Annotated[cudaStream_t, 24]
cudaMallocFromPoolAsync_v11020_params: TypeAlias = struct_cudaMallocFromPoolAsync_v11020_params_st
@c.record
class struct_cudaGetDriverEntryPoint_v11030_params_st(c.Struct):
SIZE = 32
symbol: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
funcPtr: Annotated[c.POINTER[ctypes.c_void_p], 8]
flags: Annotated[Annotated[int, ctypes.c_uint64], 16]
driverStatus: Annotated[c.POINTER[enum_cudaDriverEntryPointQueryResult], 24]
cudaGetDriverEntryPoint_v11030_params: TypeAlias = struct_cudaGetDriverEntryPoint_v11030_params_st
@c.record
class struct_cudaGetDriverEntryPointByVersion_v12050_params_st(c.Struct):
SIZE = 40
symbol: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
funcPtr: Annotated[c.POINTER[ctypes.c_void_p], 8]
cudaVersion: Annotated[Annotated[int, ctypes.c_uint32], 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
driverStatus: Annotated[c.POINTER[enum_cudaDriverEntryPointQueryResult], 32]
cudaGetDriverEntryPointByVersion_v12050_params: TypeAlias = struct_cudaGetDriverEntryPointByVersion_v12050_params_st
@c.record
class struct_cudaGetDeviceProperties_v3020_params_st(c.Struct):
SIZE = 16
prop: Annotated[c.POINTER[struct_cudaDeviceProp], 0]
device: Annotated[Annotated[int, ctypes.c_int32], 8]
cudaGetDeviceProperties_v3020_params: TypeAlias = struct_cudaGetDeviceProperties_v3020_params_st
c.init_records()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/nv_pma/cupti/cupti.py",
"license": "MIT License",
"lines": 14001,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:examples/anthropic_challenge.py | from tinygrad import Tensor, dtypes, Context, getenv, UOp, fetch
from tinygrad.uop.ops import Ops, PatternMatcher, UPat
from tinygrad.uop.symbolic import symbolic
from tinygrad.codegen import Renderer
from tinygrad.codegen.opt import Opt, OptOps
# ************************* implementation of the problem ************************
def myhash(a: Tensor) -> Tensor:
a = (a + 0x7ED55D16) + (a << 12)
a = (a ^ 0xC761C23C) ^ (a >> 19)
a = (a + 0x165667B1) + (a << 5)
a = (a + 0xD3A2646C) ^ (a << 9)
a = (a + 0xFD7046C5) + (a << 3)
a = (a ^ 0xB55A4F09) ^ (a >> 16)
return a
def select_with_where_tree(values: Tensor, relative_idx: Tensor) -> Tensor:
n = values.shape[0]
if n == 1: return values[0].expand(relative_idx.shape)
mid = n // 2
left = select_with_where_tree(values[:mid], relative_idx)
right = select_with_where_tree(values[mid:], relative_idx - mid)
go_left = relative_idx < mid
return go_left.where(left, right)
def tree_traversal(forest: Tensor, val: Tensor, height: int, rounds: int, where_tree_threshold=3) -> Tensor:
# All walkers start at idx=0
idx = Tensor.zeros(val.shape, device=val.device, dtype=dtypes.uint32)
for r in range(rounds):
level = r % (height + 1)
level_start = (1 << level) - 1
level_size = 1 << level
if level == 0:
# At root (level 0), all walkers are at idx=0
# No gather needed, just broadcast the root value
node_val = forest[0].expand(val.shape)
idx = idx * 0 # Reset to 0
elif level <= where_tree_threshold:
# Small level: use where-tree
level_values = forest[level_start : level_start + level_size]
relative_idx = (idx - level_start)
node_val = select_with_where_tree(level_values, relative_idx)
else:
# Large level: use gather
node_val = forest.gather(0, idx)
val = myhash(val ^ node_val)
idx = (idx << 1) + (1 + (val & 1))
# No wrap check needed! At round 10 (level becomes 0), we reset idx above.
return val.contiguous(arg=(Opt(OptOps.UPCAST, 0, 8),))
# ************************* renderer for VLIW machine *************************
def loop_unrolling(sink:UOp):
rng = [x for x in sink.toposort() if x.op is Ops.RANGE]
if len(rng) == 0: return None
print(f"unrolling loop with size {rng[0].vmax+1}")
unrolled_sinks = [sink.substitute({rng[0]:rng[0].const_like(i)}).src[0] for i in range(rng[0].vmax+1)]
return UOp.sink(*unrolled_sinks, arg=sink.arg)
global_addrs = []
vliw_prepare = PatternMatcher([
# loop unrolling (should be a part of tinygrad)
(UPat(Ops.SINK, name="sink"), loop_unrolling),
# cast is fake
(UPat(Ops.CAST, name="c"), lambda c: c.src[0]),
# rewrites to hardcode the addresses in memory
(UPat(Ops.PARAM, name="dg"), lambda dg: UOp.const(dtypes.uint, global_addrs[dg.arg])),
# INDEX is just plus
(UPat(Ops.INDEX, name="i"), lambda i: i.src[0]+i.src[1]),
])+symbolic
class VLIWRenderer(Renderer):
has_local = False # TODO: this should be the default / cleaned up
# this says this backend supports MULACC + more. decompositions uses this
code_for_op: dict = {Ops.MULACC: None, Ops.ADD: "+", Ops.MUL: "*",
Ops.XOR: "^", Ops.AND: "&", Ops.OR: "|",
Ops.SHL: "<<", Ops.SHR: ">>", Ops.CMPLT: "<"}
# this matcher runs while still in graph form
pre_matcher = vliw_prepare
def render(self, uops:list[UOp]):
# TODO: this is a minimal renderer. for low cycle count, make it good
# to get speed, you need to add VLIW packing
# to get under 1536 regs, you need to add a register allocator
# we left the fun parts to you
print(f"rendering with {len(uops)} uops")
reg, inst = 0, []
r: dict[UOp, int] = {}
for u in uops:
assert u.dtype.count in (1,8), "dtype count must be 1 or 8"
# dumb register allocator
if u.op not in {Ops.STORE, Ops.SINK, Ops.GEP}:
r[u] = reg
reg += u.dtype.count
# render UOps to instructions
match u.op:
case Ops.SINK:
inst.append({"flow": [("halt",)]})
case Ops.CONST:
inst.append({"load": [("const", r[u], u.arg)]})
case Ops.GEP:
# a GEP is just an alias to a special register in the vector
r[u] = r[u.src[0]] + u.arg[0]
case Ops.VECTORIZE:
if all(s == u.src[0] for s in u.src):
# if all sources are the same, we can broadcast
inst.append({"valu": [("vbroadcast", r[u], r[u.src[0]])]})
else:
# this is a copy into a contiguous chunk of registers
inst.extend({"flow": [("add_imm", r[u]+i, r[s], 0)]} for i,s in enumerate(u.src) if r[s] != r[u]+i)
case Ops.LOAD:
op = "vload" if u.dtype.count > 1 else "load"
inst.append({"load": [(op, r[u], r[u.src[0]])]})
case Ops.STORE:
op = "vstore" if u.src[1].dtype.count > 1 else "store"
inst.append({"store": [(op, r[u.src[0]], r[u.src[1]])]})
case Ops.MULACC:
assert u.dtype.count == 8
inst.append({"valu": [("multiply_add", r[u], r[u.src[0]], r[u.src[1]], r[u.src[2]])]})
case Ops.WHERE:
assert u.dtype.count == 8
inst.append({"flow": [("vselect", r[u], r[u.src[0]], r[u.src[1]], r[u.src[2]])]})
case _ if u.op in self.code_for_op:
cat = "valu" if u.dtype.count > 1 else "alu"
inst.append({cat: [(self.code_for_op[u.op], r[u], r[u.src[0]], r[u.src[1]])]})
case _:
raise NotImplementedError(f"unhandled op {u.op}")
return repr(inst)
# ************************* test and render *************************
import sys, types
PROBLEM_URL = "https://raw.githubusercontent.com/anthropics/original_performance_takehome/refs/heads/main/tests/frozen_problem.py"
sys.modules["problem"] = problem = types.ModuleType("problem")
exec(fetch(PROBLEM_URL).read_text(), problem.__dict__)
if __name__ == "__main__":
batch_size = getenv("BS", 256)
height = 10
rounds = getenv("ROUNDS", 16)
# build problem
tree = problem.Tree.generate(height)
inp = problem.Input.generate(tree, batch_size, rounds)
mem = problem.build_mem_image(tree, inp)
global_addrs.extend([mem[6], mem[6], mem[4]]) # output, input, forest
# *** verify the kernel in tinygrad compared to reference ***
forest_t = Tensor(tree.values, dtype=dtypes.uint32)
val_t = Tensor(inp.values, dtype=dtypes.uint32)
if getenv("VERIFY", 1):
# verify on normal tinygrad device
with Context(PCONTIG=2):
out = tree_traversal(forest_t, val_t, height, rounds)
val_out = out.tolist()
problem.reference_kernel(tree, inp)
assert val_out == inp.values
print("verification passed")
# *** render to device ***
from tinygrad.codegen import get_program
with Context(PCONTIG=2, DEVECTORIZE=2, SPEC=0):
out = tree_traversal(forest_t, val_t, height, rounds)
sink = out.schedule()[-1].ast
prg = get_program(sink, VLIWRenderer())
# *** run on Machine and compare ***
# NOTE: the scratch size needs to be reduced to 1536 when you have a register allocator
src = eval(prg.src)
max_regs = max(t[1] for instr in src for v in instr.values() for t in v if len(t) > 1) + 8
print(f"{max_regs:5d} regs used" + ("" if max_regs <= 1536 else " <-- WARNING: TOO MANY REGISTERS, MUST BE <= 1536"))
machine = problem.Machine(mem, src, problem.DebugInfo(scratch_map={}), n_cores=1, trace=False, scratch_size=max_regs)
machine.run()
print(f"ran for {machine.cycle:5d} cycles" + ("" if machine.cycle <= 1363 else " <-- EVEN CLAUDE GOT 1363"))
# compare to reference
ref_mem = mem.copy()
for _ in problem.reference_kernel2(ref_mem, {}): pass
assert machine.mem[mem[6]:mem[6]+mem[2]] == ref_mem[mem[6]:mem[6]+mem[2]]
print("compare passed!")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "examples/anthropic_challenge.py",
"license": "MIT License",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/autogen/amdgpu_drm.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
drm_handle_t: TypeAlias = Annotated[int, ctypes.c_uint32]
drm_context_t: TypeAlias = Annotated[int, ctypes.c_uint32]
drm_drawable_t: TypeAlias = Annotated[int, ctypes.c_uint32]
drm_magic_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@c.record
class struct_drm_clip_rect(c.Struct):
SIZE = 8
x1: Annotated[Annotated[int, ctypes.c_uint16], 0]
y1: Annotated[Annotated[int, ctypes.c_uint16], 2]
x2: Annotated[Annotated[int, ctypes.c_uint16], 4]
y2: Annotated[Annotated[int, ctypes.c_uint16], 6]
@c.record
class struct_drm_drawable_info(c.Struct):
SIZE = 16
num_rects: Annotated[Annotated[int, ctypes.c_uint32], 0]
rects: Annotated[c.POINTER[struct_drm_clip_rect], 8]
@c.record
class struct_drm_tex_region(c.Struct):
SIZE = 8
next: Annotated[Annotated[int, ctypes.c_ubyte], 0]
prev: Annotated[Annotated[int, ctypes.c_ubyte], 1]
in_use: Annotated[Annotated[int, ctypes.c_ubyte], 2]
padding: Annotated[Annotated[int, ctypes.c_ubyte], 3]
age: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_hw_lock(c.Struct):
SIZE = 64
lock: Annotated[Annotated[int, ctypes.c_uint32], 0]
padding: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[60]], 4]
@c.record
class struct_drm_version(c.Struct):
SIZE = 64
version_major: Annotated[Annotated[int, ctypes.c_int32], 0]
version_minor: Annotated[Annotated[int, ctypes.c_int32], 4]
version_patchlevel: Annotated[Annotated[int, ctypes.c_int32], 8]
name_len: Annotated[Annotated[int, ctypes.c_uint64], 16]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
date_len: Annotated[Annotated[int, ctypes.c_uint64], 32]
date: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 40]
desc_len: Annotated[Annotated[int, ctypes.c_uint64], 48]
desc: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 56]
__kernel_size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@c.record
class struct_drm_unique(c.Struct):
SIZE = 16
unique_len: Annotated[Annotated[int, ctypes.c_uint64], 0]
unique: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
@c.record
class struct_drm_list(c.Struct):
SIZE = 16
count: Annotated[Annotated[int, ctypes.c_int32], 0]
version: Annotated[c.POINTER[struct_drm_version], 8]
@c.record
class struct_drm_block(c.Struct):
SIZE = 4
unused: Annotated[Annotated[int, ctypes.c_int32], 0]
@c.record
class struct_drm_control(c.Struct):
SIZE = 8
func: Annotated[struct_drm_control_func, 0]
irq: Annotated[Annotated[int, ctypes.c_int32], 4]
class struct_drm_control_func(Annotated[int, ctypes.c_uint32], c.Enum): pass
DRM_ADD_COMMAND = struct_drm_control_func.define('DRM_ADD_COMMAND', 0)
DRM_RM_COMMAND = struct_drm_control_func.define('DRM_RM_COMMAND', 1)
DRM_INST_HANDLER = struct_drm_control_func.define('DRM_INST_HANDLER', 2)
DRM_UNINST_HANDLER = struct_drm_control_func.define('DRM_UNINST_HANDLER', 3)
class enum_drm_map_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
_DRM_FRAME_BUFFER = enum_drm_map_type.define('_DRM_FRAME_BUFFER', 0)
_DRM_REGISTERS = enum_drm_map_type.define('_DRM_REGISTERS', 1)
_DRM_SHM = enum_drm_map_type.define('_DRM_SHM', 2)
_DRM_AGP = enum_drm_map_type.define('_DRM_AGP', 3)
_DRM_SCATTER_GATHER = enum_drm_map_type.define('_DRM_SCATTER_GATHER', 4)
_DRM_CONSISTENT = enum_drm_map_type.define('_DRM_CONSISTENT', 5)
class enum_drm_map_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
_DRM_RESTRICTED = enum_drm_map_flags.define('_DRM_RESTRICTED', 1)
_DRM_READ_ONLY = enum_drm_map_flags.define('_DRM_READ_ONLY', 2)
_DRM_LOCKED = enum_drm_map_flags.define('_DRM_LOCKED', 4)
_DRM_KERNEL = enum_drm_map_flags.define('_DRM_KERNEL', 8)
_DRM_WRITE_COMBINING = enum_drm_map_flags.define('_DRM_WRITE_COMBINING', 16)
_DRM_CONTAINS_LOCK = enum_drm_map_flags.define('_DRM_CONTAINS_LOCK', 32)
_DRM_REMOVABLE = enum_drm_map_flags.define('_DRM_REMOVABLE', 64)
_DRM_DRIVER = enum_drm_map_flags.define('_DRM_DRIVER', 128)
@c.record
class struct_drm_ctx_priv_map(c.Struct):
SIZE = 16
ctx_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
handle: Annotated[ctypes.c_void_p, 8]
@c.record
class struct_drm_map(c.Struct):
SIZE = 40
offset: Annotated[Annotated[int, ctypes.c_uint64], 0]
size: Annotated[Annotated[int, ctypes.c_uint64], 8]
type: Annotated[enum_drm_map_type, 16]
flags: Annotated[enum_drm_map_flags, 20]
handle: Annotated[ctypes.c_void_p, 24]
mtrr: Annotated[Annotated[int, ctypes.c_int32], 32]
@c.record
class struct_drm_client(c.Struct):
SIZE = 40
idx: Annotated[Annotated[int, ctypes.c_int32], 0]
auth: Annotated[Annotated[int, ctypes.c_int32], 4]
pid: Annotated[Annotated[int, ctypes.c_uint64], 8]
uid: Annotated[Annotated[int, ctypes.c_uint64], 16]
magic: Annotated[Annotated[int, ctypes.c_uint64], 24]
iocs: Annotated[Annotated[int, ctypes.c_uint64], 32]
class enum_drm_stat_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
_DRM_STAT_LOCK = enum_drm_stat_type.define('_DRM_STAT_LOCK', 0)
_DRM_STAT_OPENS = enum_drm_stat_type.define('_DRM_STAT_OPENS', 1)
_DRM_STAT_CLOSES = enum_drm_stat_type.define('_DRM_STAT_CLOSES', 2)
_DRM_STAT_IOCTLS = enum_drm_stat_type.define('_DRM_STAT_IOCTLS', 3)
_DRM_STAT_LOCKS = enum_drm_stat_type.define('_DRM_STAT_LOCKS', 4)
_DRM_STAT_UNLOCKS = enum_drm_stat_type.define('_DRM_STAT_UNLOCKS', 5)
_DRM_STAT_VALUE = enum_drm_stat_type.define('_DRM_STAT_VALUE', 6)
_DRM_STAT_BYTE = enum_drm_stat_type.define('_DRM_STAT_BYTE', 7)
_DRM_STAT_COUNT = enum_drm_stat_type.define('_DRM_STAT_COUNT', 8)
_DRM_STAT_IRQ = enum_drm_stat_type.define('_DRM_STAT_IRQ', 9)
_DRM_STAT_PRIMARY = enum_drm_stat_type.define('_DRM_STAT_PRIMARY', 10)
_DRM_STAT_SECONDARY = enum_drm_stat_type.define('_DRM_STAT_SECONDARY', 11)
_DRM_STAT_DMA = enum_drm_stat_type.define('_DRM_STAT_DMA', 12)
_DRM_STAT_SPECIAL = enum_drm_stat_type.define('_DRM_STAT_SPECIAL', 13)
_DRM_STAT_MISSED = enum_drm_stat_type.define('_DRM_STAT_MISSED', 14)
@c.record
class struct_drm_stats(c.Struct):
SIZE = 248
count: Annotated[Annotated[int, ctypes.c_uint64], 0]
data: Annotated[c.Array[struct_drm_stats_data, Literal[15]], 8]
@c.record
class struct_drm_stats_data(c.Struct):
SIZE = 16
value: Annotated[Annotated[int, ctypes.c_uint64], 0]
type: Annotated[enum_drm_stat_type, 8]
class enum_drm_lock_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
_DRM_LOCK_READY = enum_drm_lock_flags.define('_DRM_LOCK_READY', 1)
_DRM_LOCK_QUIESCENT = enum_drm_lock_flags.define('_DRM_LOCK_QUIESCENT', 2)
_DRM_LOCK_FLUSH = enum_drm_lock_flags.define('_DRM_LOCK_FLUSH', 4)
_DRM_LOCK_FLUSH_ALL = enum_drm_lock_flags.define('_DRM_LOCK_FLUSH_ALL', 8)
_DRM_HALT_ALL_QUEUES = enum_drm_lock_flags.define('_DRM_HALT_ALL_QUEUES', 16)
_DRM_HALT_CUR_QUEUES = enum_drm_lock_flags.define('_DRM_HALT_CUR_QUEUES', 32)
@c.record
class struct_drm_lock(c.Struct):
SIZE = 8
context: Annotated[Annotated[int, ctypes.c_int32], 0]
flags: Annotated[enum_drm_lock_flags, 4]
class enum_drm_dma_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
_DRM_DMA_BLOCK = enum_drm_dma_flags.define('_DRM_DMA_BLOCK', 1)
_DRM_DMA_WHILE_LOCKED = enum_drm_dma_flags.define('_DRM_DMA_WHILE_LOCKED', 2)
_DRM_DMA_PRIORITY = enum_drm_dma_flags.define('_DRM_DMA_PRIORITY', 4)
_DRM_DMA_WAIT = enum_drm_dma_flags.define('_DRM_DMA_WAIT', 16)
_DRM_DMA_SMALLER_OK = enum_drm_dma_flags.define('_DRM_DMA_SMALLER_OK', 32)
_DRM_DMA_LARGER_OK = enum_drm_dma_flags.define('_DRM_DMA_LARGER_OK', 64)
@c.record
class struct_drm_buf_desc(c.Struct):
SIZE = 32
count: Annotated[Annotated[int, ctypes.c_int32], 0]
size: Annotated[Annotated[int, ctypes.c_int32], 4]
low_mark: Annotated[Annotated[int, ctypes.c_int32], 8]
high_mark: Annotated[Annotated[int, ctypes.c_int32], 12]
flags: Annotated[struct_drm_buf_desc_flags, 16]
agp_start: Annotated[Annotated[int, ctypes.c_uint64], 24]
class struct_drm_buf_desc_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
_DRM_PAGE_ALIGN = struct_drm_buf_desc_flags.define('_DRM_PAGE_ALIGN', 1)
_DRM_AGP_BUFFER = struct_drm_buf_desc_flags.define('_DRM_AGP_BUFFER', 2)
_DRM_SG_BUFFER = struct_drm_buf_desc_flags.define('_DRM_SG_BUFFER', 4)
_DRM_FB_BUFFER = struct_drm_buf_desc_flags.define('_DRM_FB_BUFFER', 8)
_DRM_PCI_BUFFER_RO = struct_drm_buf_desc_flags.define('_DRM_PCI_BUFFER_RO', 16)
@c.record
class struct_drm_buf_info(c.Struct):
SIZE = 16
count: Annotated[Annotated[int, ctypes.c_int32], 0]
list: Annotated[c.POINTER[struct_drm_buf_desc], 8]
@c.record
class struct_drm_buf_free(c.Struct):
SIZE = 16
count: Annotated[Annotated[int, ctypes.c_int32], 0]
list: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
@c.record
class struct_drm_buf_pub(c.Struct):
SIZE = 24
idx: Annotated[Annotated[int, ctypes.c_int32], 0]
total: Annotated[Annotated[int, ctypes.c_int32], 4]
used: Annotated[Annotated[int, ctypes.c_int32], 8]
address: Annotated[ctypes.c_void_p, 16]
@c.record
class struct_drm_buf_map(c.Struct):
SIZE = 24
count: Annotated[Annotated[int, ctypes.c_int32], 0]
virtual: Annotated[ctypes.c_void_p, 8]
list: Annotated[c.POINTER[struct_drm_buf_pub], 16]
@c.record
class struct_drm_dma(c.Struct):
SIZE = 64
context: Annotated[Annotated[int, ctypes.c_int32], 0]
send_count: Annotated[Annotated[int, ctypes.c_int32], 4]
send_indices: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 8]
send_sizes: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 16]
flags: Annotated[enum_drm_dma_flags, 24]
request_count: Annotated[Annotated[int, ctypes.c_int32], 28]
request_size: Annotated[Annotated[int, ctypes.c_int32], 32]
request_indices: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 40]
request_sizes: Annotated[c.POINTER[Annotated[int, ctypes.c_int32]], 48]
granted_count: Annotated[Annotated[int, ctypes.c_int32], 56]
class enum_drm_ctx_flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
_DRM_CONTEXT_PRESERVED = enum_drm_ctx_flags.define('_DRM_CONTEXT_PRESERVED', 1)
_DRM_CONTEXT_2DONLY = enum_drm_ctx_flags.define('_DRM_CONTEXT_2DONLY', 2)
@c.record
class struct_drm_ctx(c.Struct):
SIZE = 8
handle: Annotated[drm_context_t, 0]
flags: Annotated[enum_drm_ctx_flags, 4]
@c.record
class struct_drm_ctx_res(c.Struct):
SIZE = 16
count: Annotated[Annotated[int, ctypes.c_int32], 0]
contexts: Annotated[c.POINTER[struct_drm_ctx], 8]
@c.record
class struct_drm_draw(c.Struct):
SIZE = 4
handle: Annotated[drm_drawable_t, 0]
class drm_drawable_info_type_t(Annotated[int, ctypes.c_uint32], c.Enum): pass
DRM_DRAWABLE_CLIPRECTS = drm_drawable_info_type_t.define('DRM_DRAWABLE_CLIPRECTS', 0)
@c.record
class struct_drm_update_draw(c.Struct):
SIZE = 24
handle: Annotated[drm_drawable_t, 0]
type: Annotated[Annotated[int, ctypes.c_uint32], 4]
num: Annotated[Annotated[int, ctypes.c_uint32], 8]
data: Annotated[Annotated[int, ctypes.c_uint64], 16]
@c.record
class struct_drm_auth(c.Struct):
SIZE = 4
magic: Annotated[drm_magic_t, 0]
@c.record
class struct_drm_irq_busid(c.Struct):
SIZE = 16
irq: Annotated[Annotated[int, ctypes.c_int32], 0]
busnum: Annotated[Annotated[int, ctypes.c_int32], 4]
devnum: Annotated[Annotated[int, ctypes.c_int32], 8]
funcnum: Annotated[Annotated[int, ctypes.c_int32], 12]
class enum_drm_vblank_seq_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
_DRM_VBLANK_ABSOLUTE = enum_drm_vblank_seq_type.define('_DRM_VBLANK_ABSOLUTE', 0)
_DRM_VBLANK_RELATIVE = enum_drm_vblank_seq_type.define('_DRM_VBLANK_RELATIVE', 1)
_DRM_VBLANK_HIGH_CRTC_MASK = enum_drm_vblank_seq_type.define('_DRM_VBLANK_HIGH_CRTC_MASK', 62)
_DRM_VBLANK_EVENT = enum_drm_vblank_seq_type.define('_DRM_VBLANK_EVENT', 67108864)
_DRM_VBLANK_FLIP = enum_drm_vblank_seq_type.define('_DRM_VBLANK_FLIP', 134217728)
_DRM_VBLANK_NEXTONMISS = enum_drm_vblank_seq_type.define('_DRM_VBLANK_NEXTONMISS', 268435456)
_DRM_VBLANK_SECONDARY = enum_drm_vblank_seq_type.define('_DRM_VBLANK_SECONDARY', 536870912)
_DRM_VBLANK_SIGNAL = enum_drm_vblank_seq_type.define('_DRM_VBLANK_SIGNAL', 1073741824)
@c.record
class struct_drm_wait_vblank_request(c.Struct):
SIZE = 16
type: Annotated[enum_drm_vblank_seq_type, 0]
sequence: Annotated[Annotated[int, ctypes.c_uint32], 4]
signal: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class struct_drm_wait_vblank_reply(c.Struct):
SIZE = 24
type: Annotated[enum_drm_vblank_seq_type, 0]
sequence: Annotated[Annotated[int, ctypes.c_uint32], 4]
tval_sec: Annotated[Annotated[int, ctypes.c_int64], 8]
tval_usec: Annotated[Annotated[int, ctypes.c_int64], 16]
@c.record
class union_drm_wait_vblank(c.Struct):
SIZE = 24
request: Annotated[struct_drm_wait_vblank_request, 0]
reply: Annotated[struct_drm_wait_vblank_reply, 0]
@c.record
class struct_drm_modeset_ctl(c.Struct):
SIZE = 8
crtc: Annotated[Annotated[int, ctypes.c_uint32], 0]
cmd: Annotated[Annotated[int, ctypes.c_uint32], 4]
__u32: TypeAlias = Annotated[int, ctypes.c_uint32]
@c.record
class struct_drm_agp_mode(c.Struct):
SIZE = 8
mode: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_drm_agp_buffer(c.Struct):
SIZE = 32
size: Annotated[Annotated[int, ctypes.c_uint64], 0]
handle: Annotated[Annotated[int, ctypes.c_uint64], 8]
type: Annotated[Annotated[int, ctypes.c_uint64], 16]
physical: Annotated[Annotated[int, ctypes.c_uint64], 24]
@c.record
class struct_drm_agp_binding(c.Struct):
SIZE = 16
handle: Annotated[Annotated[int, ctypes.c_uint64], 0]
offset: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class struct_drm_agp_info(c.Struct):
SIZE = 56
agp_version_major: Annotated[Annotated[int, ctypes.c_int32], 0]
agp_version_minor: Annotated[Annotated[int, ctypes.c_int32], 4]
mode: Annotated[Annotated[int, ctypes.c_uint64], 8]
aperture_base: Annotated[Annotated[int, ctypes.c_uint64], 16]
aperture_size: Annotated[Annotated[int, ctypes.c_uint64], 24]
memory_allowed: Annotated[Annotated[int, ctypes.c_uint64], 32]
memory_used: Annotated[Annotated[int, ctypes.c_uint64], 40]
id_vendor: Annotated[Annotated[int, ctypes.c_uint16], 48]
id_device: Annotated[Annotated[int, ctypes.c_uint16], 50]
@c.record
class struct_drm_scatter_gather(c.Struct):
SIZE = 16
size: Annotated[Annotated[int, ctypes.c_uint64], 0]
handle: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class struct_drm_set_version(c.Struct):
SIZE = 16
drm_di_major: Annotated[Annotated[int, ctypes.c_int32], 0]
drm_di_minor: Annotated[Annotated[int, ctypes.c_int32], 4]
drm_dd_major: Annotated[Annotated[int, ctypes.c_int32], 8]
drm_dd_minor: Annotated[Annotated[int, ctypes.c_int32], 12]
@c.record
class struct_drm_gem_close(c.Struct):
SIZE = 8
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_gem_flink(c.Struct):
SIZE = 8
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
name: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_gem_open(c.Struct):
SIZE = 16
name: Annotated[Annotated[int, ctypes.c_uint32], 0]
handle: Annotated[Annotated[int, ctypes.c_uint32], 4]
size: Annotated[Annotated[int, ctypes.c_uint64], 8]
__u64: TypeAlias = Annotated[int, ctypes.c_uint64]
@c.record
class struct_drm_get_cap(c.Struct):
SIZE = 16
capability: Annotated[Annotated[int, ctypes.c_uint64], 0]
value: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class struct_drm_set_client_cap(c.Struct):
SIZE = 16
capability: Annotated[Annotated[int, ctypes.c_uint64], 0]
value: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class struct_drm_prime_handle(c.Struct):
SIZE = 12
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
fd: Annotated[Annotated[int, ctypes.c_int32], 8]
__s32: TypeAlias = Annotated[int, ctypes.c_int32]
@c.record
class struct_drm_syncobj_create(c.Struct):
SIZE = 8
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_syncobj_destroy(c.Struct):
SIZE = 8
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_syncobj_handle(c.Struct):
SIZE = 16
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
fd: Annotated[Annotated[int, ctypes.c_int32], 8]
pad: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_drm_syncobj_transfer(c.Struct):
SIZE = 32
src_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
dst_handle: Annotated[Annotated[int, ctypes.c_uint32], 4]
src_point: Annotated[Annotated[int, ctypes.c_uint64], 8]
dst_point: Annotated[Annotated[int, ctypes.c_uint64], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 24]
pad: Annotated[Annotated[int, ctypes.c_uint32], 28]
@c.record
class struct_drm_syncobj_wait(c.Struct):
SIZE = 40
handles: Annotated[Annotated[int, ctypes.c_uint64], 0]
timeout_nsec: Annotated[Annotated[int, ctypes.c_int64], 8]
count_handles: Annotated[Annotated[int, ctypes.c_uint32], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
first_signaled: Annotated[Annotated[int, ctypes.c_uint32], 24]
pad: Annotated[Annotated[int, ctypes.c_uint32], 28]
deadline_nsec: Annotated[Annotated[int, ctypes.c_uint64], 32]
__s64: TypeAlias = Annotated[int, ctypes.c_int64]
@c.record
class struct_drm_syncobj_timeline_wait(c.Struct):
SIZE = 48
handles: Annotated[Annotated[int, ctypes.c_uint64], 0]
points: Annotated[Annotated[int, ctypes.c_uint64], 8]
timeout_nsec: Annotated[Annotated[int, ctypes.c_int64], 16]
count_handles: Annotated[Annotated[int, ctypes.c_uint32], 24]
flags: Annotated[Annotated[int, ctypes.c_uint32], 28]
first_signaled: Annotated[Annotated[int, ctypes.c_uint32], 32]
pad: Annotated[Annotated[int, ctypes.c_uint32], 36]
deadline_nsec: Annotated[Annotated[int, ctypes.c_uint64], 40]
@c.record
class struct_drm_syncobj_eventfd(c.Struct):
SIZE = 24
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
point: Annotated[Annotated[int, ctypes.c_uint64], 8]
fd: Annotated[Annotated[int, ctypes.c_int32], 16]
pad: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_drm_syncobj_array(c.Struct):
SIZE = 16
handles: Annotated[Annotated[int, ctypes.c_uint64], 0]
count_handles: Annotated[Annotated[int, ctypes.c_uint32], 8]
pad: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_drm_syncobj_timeline_array(c.Struct):
SIZE = 24
handles: Annotated[Annotated[int, ctypes.c_uint64], 0]
points: Annotated[Annotated[int, ctypes.c_uint64], 8]
count_handles: Annotated[Annotated[int, ctypes.c_uint32], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_drm_crtc_get_sequence(c.Struct):
SIZE = 24
crtc_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
active: Annotated[Annotated[int, ctypes.c_uint32], 4]
sequence: Annotated[Annotated[int, ctypes.c_uint64], 8]
sequence_ns: Annotated[Annotated[int, ctypes.c_int64], 16]
@c.record
class struct_drm_crtc_queue_sequence(c.Struct):
SIZE = 24
crtc_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
sequence: Annotated[Annotated[int, ctypes.c_uint64], 8]
user_data: Annotated[Annotated[int, ctypes.c_uint64], 16]
@c.record
class struct_drm_event(c.Struct):
SIZE = 8
type: Annotated[Annotated[int, ctypes.c_uint32], 0]
length: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_event_vblank(c.Struct):
SIZE = 32
base: Annotated[struct_drm_event, 0]
user_data: Annotated[Annotated[int, ctypes.c_uint64], 8]
tv_sec: Annotated[Annotated[int, ctypes.c_uint32], 16]
tv_usec: Annotated[Annotated[int, ctypes.c_uint32], 20]
sequence: Annotated[Annotated[int, ctypes.c_uint32], 24]
crtc_id: Annotated[Annotated[int, ctypes.c_uint32], 28]
@c.record
class struct_drm_event_crtc_sequence(c.Struct):
SIZE = 32
base: Annotated[struct_drm_event, 0]
user_data: Annotated[Annotated[int, ctypes.c_uint64], 8]
time_ns: Annotated[Annotated[int, ctypes.c_int64], 16]
sequence: Annotated[Annotated[int, ctypes.c_uint64], 24]
drm_clip_rect_t: TypeAlias = struct_drm_clip_rect
drm_drawable_info_t: TypeAlias = struct_drm_drawable_info
drm_tex_region_t: TypeAlias = struct_drm_tex_region
drm_hw_lock_t: TypeAlias = struct_drm_hw_lock
drm_version_t: TypeAlias = struct_drm_version
drm_unique_t: TypeAlias = struct_drm_unique
drm_list_t: TypeAlias = struct_drm_list
drm_block_t: TypeAlias = struct_drm_block
drm_control_t: TypeAlias = struct_drm_control
drm_map_type_t: TypeAlias = enum_drm_map_type
drm_map_flags_t: TypeAlias = enum_drm_map_flags
drm_ctx_priv_map_t: TypeAlias = struct_drm_ctx_priv_map
drm_map_t: TypeAlias = struct_drm_map
drm_client_t: TypeAlias = struct_drm_client
drm_stat_type_t: TypeAlias = enum_drm_stat_type
drm_stats_t: TypeAlias = struct_drm_stats
drm_lock_flags_t: TypeAlias = enum_drm_lock_flags
drm_lock_t: TypeAlias = struct_drm_lock
drm_dma_flags_t: TypeAlias = enum_drm_dma_flags
drm_buf_desc_t: TypeAlias = struct_drm_buf_desc
drm_buf_info_t: TypeAlias = struct_drm_buf_info
drm_buf_free_t: TypeAlias = struct_drm_buf_free
drm_buf_pub_t: TypeAlias = struct_drm_buf_pub
drm_buf_map_t: TypeAlias = struct_drm_buf_map
drm_dma_t: TypeAlias = struct_drm_dma
drm_wait_vblank_t: TypeAlias = union_drm_wait_vblank
drm_agp_mode_t: TypeAlias = struct_drm_agp_mode
drm_ctx_flags_t: TypeAlias = enum_drm_ctx_flags
drm_ctx_t: TypeAlias = struct_drm_ctx
drm_ctx_res_t: TypeAlias = struct_drm_ctx_res
drm_draw_t: TypeAlias = struct_drm_draw
drm_update_draw_t: TypeAlias = struct_drm_update_draw
drm_auth_t: TypeAlias = struct_drm_auth
drm_irq_busid_t: TypeAlias = struct_drm_irq_busid
drm_vblank_seq_type_t: TypeAlias = enum_drm_vblank_seq_type
drm_agp_buffer_t: TypeAlias = struct_drm_agp_buffer
drm_agp_binding_t: TypeAlias = struct_drm_agp_binding
drm_agp_info_t: TypeAlias = struct_drm_agp_info
drm_scatter_gather_t: TypeAlias = struct_drm_scatter_gather
drm_set_version_t: TypeAlias = struct_drm_set_version
@c.record
class struct_drm_amdgpu_gem_create_in(c.Struct):
SIZE = 32
bo_size: Annotated[Annotated[int, ctypes.c_uint64], 0]
alignment: Annotated[Annotated[int, ctypes.c_uint64], 8]
domains: Annotated[Annotated[int, ctypes.c_uint64], 16]
domain_flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
@c.record
class struct_drm_amdgpu_gem_create_out(c.Struct):
SIZE = 8
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
_pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class union_drm_amdgpu_gem_create(c.Struct):
SIZE = 32
_in: Annotated[struct_drm_amdgpu_gem_create_in, 0]
out: Annotated[struct_drm_amdgpu_gem_create_out, 0]
@c.record
class struct_drm_amdgpu_bo_list_in(c.Struct):
SIZE = 24
operation: Annotated[Annotated[int, ctypes.c_uint32], 0]
list_handle: Annotated[Annotated[int, ctypes.c_uint32], 4]
bo_number: Annotated[Annotated[int, ctypes.c_uint32], 8]
bo_info_size: Annotated[Annotated[int, ctypes.c_uint32], 12]
bo_info_ptr: Annotated[Annotated[int, ctypes.c_uint64], 16]
@c.record
class struct_drm_amdgpu_bo_list_entry(c.Struct):
SIZE = 8
bo_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
bo_priority: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_amdgpu_bo_list_out(c.Struct):
SIZE = 8
list_handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
_pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class union_drm_amdgpu_bo_list(c.Struct):
SIZE = 24
_in: Annotated[struct_drm_amdgpu_bo_list_in, 0]
out: Annotated[struct_drm_amdgpu_bo_list_out, 0]
@c.record
class struct_drm_amdgpu_ctx_in(c.Struct):
SIZE = 16
op: Annotated[Annotated[int, ctypes.c_uint32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
ctx_id: Annotated[Annotated[int, ctypes.c_uint32], 8]
priority: Annotated[Annotated[int, ctypes.c_int32], 12]
@c.record
class union_drm_amdgpu_ctx_out(c.Struct):
SIZE = 16
alloc: Annotated[union_drm_amdgpu_ctx_out_alloc, 0]
state: Annotated[union_drm_amdgpu_ctx_out_state, 0]
pstate: Annotated[union_drm_amdgpu_ctx_out_pstate, 0]
@c.record
class union_drm_amdgpu_ctx_out_alloc(c.Struct):
SIZE = 8
ctx_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
_pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class union_drm_amdgpu_ctx_out_state(c.Struct):
SIZE = 16
flags: Annotated[Annotated[int, ctypes.c_uint64], 0]
hangs: Annotated[Annotated[int, ctypes.c_uint32], 8]
reset_status: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class union_drm_amdgpu_ctx_out_pstate(c.Struct):
SIZE = 8
flags: Annotated[Annotated[int, ctypes.c_uint32], 0]
_pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class union_drm_amdgpu_ctx(c.Struct):
SIZE = 16
_in: Annotated[struct_drm_amdgpu_ctx_in, 0]
out: Annotated[union_drm_amdgpu_ctx_out, 0]
@c.record
class struct_drm_amdgpu_userq_in(c.Struct):
SIZE = 72
op: Annotated[Annotated[int, ctypes.c_uint32], 0]
queue_id: Annotated[Annotated[int, ctypes.c_uint32], 4]
ip_type: Annotated[Annotated[int, ctypes.c_uint32], 8]
doorbell_handle: Annotated[Annotated[int, ctypes.c_uint32], 12]
doorbell_offset: Annotated[Annotated[int, ctypes.c_uint32], 16]
flags: Annotated[Annotated[int, ctypes.c_uint32], 20]
queue_va: Annotated[Annotated[int, ctypes.c_uint64], 24]
queue_size: Annotated[Annotated[int, ctypes.c_uint64], 32]
rptr_va: Annotated[Annotated[int, ctypes.c_uint64], 40]
wptr_va: Annotated[Annotated[int, ctypes.c_uint64], 48]
mqd: Annotated[Annotated[int, ctypes.c_uint64], 56]
mqd_size: Annotated[Annotated[int, ctypes.c_uint64], 64]
@c.record
class struct_drm_amdgpu_userq_out(c.Struct):
SIZE = 8
queue_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
_pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class union_drm_amdgpu_userq(c.Struct):
SIZE = 72
_in: Annotated[struct_drm_amdgpu_userq_in, 0]
out: Annotated[struct_drm_amdgpu_userq_out, 0]
@c.record
class struct_drm_amdgpu_userq_mqd_gfx11(c.Struct):
SIZE = 16
shadow_va: Annotated[Annotated[int, ctypes.c_uint64], 0]
csa_va: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class struct_drm_amdgpu_userq_mqd_sdma_gfx11(c.Struct):
SIZE = 8
csa_va: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_drm_amdgpu_userq_mqd_compute_gfx11(c.Struct):
SIZE = 8
eop_va: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class struct_drm_amdgpu_userq_signal(c.Struct):
SIZE = 48
queue_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
syncobj_handles: Annotated[Annotated[int, ctypes.c_uint64], 8]
num_syncobj_handles: Annotated[Annotated[int, ctypes.c_uint64], 16]
bo_read_handles: Annotated[Annotated[int, ctypes.c_uint64], 24]
bo_write_handles: Annotated[Annotated[int, ctypes.c_uint64], 32]
num_bo_read_handles: Annotated[Annotated[int, ctypes.c_uint32], 40]
num_bo_write_handles: Annotated[Annotated[int, ctypes.c_uint32], 44]
@c.record
class struct_drm_amdgpu_userq_fence_info(c.Struct):
SIZE = 16
va: Annotated[Annotated[int, ctypes.c_uint64], 0]
value: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class struct_drm_amdgpu_userq_wait(c.Struct):
SIZE = 72
waitq_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
syncobj_handles: Annotated[Annotated[int, ctypes.c_uint64], 8]
syncobj_timeline_handles: Annotated[Annotated[int, ctypes.c_uint64], 16]
syncobj_timeline_points: Annotated[Annotated[int, ctypes.c_uint64], 24]
bo_read_handles: Annotated[Annotated[int, ctypes.c_uint64], 32]
bo_write_handles: Annotated[Annotated[int, ctypes.c_uint64], 40]
num_syncobj_timeline_handles: Annotated[Annotated[int, ctypes.c_uint16], 48]
num_fences: Annotated[Annotated[int, ctypes.c_uint16], 50]
num_syncobj_handles: Annotated[Annotated[int, ctypes.c_uint32], 52]
num_bo_read_handles: Annotated[Annotated[int, ctypes.c_uint32], 56]
num_bo_write_handles: Annotated[Annotated[int, ctypes.c_uint32], 60]
out_fences: Annotated[Annotated[int, ctypes.c_uint64], 64]
__u16: TypeAlias = Annotated[int, ctypes.c_uint16]
class struct_drm_amdgpu_sem_in(ctypes.Structure): pass
class union_drm_amdgpu_sem_out(ctypes.Union): pass
class union_drm_amdgpu_sem(ctypes.Union): pass
@c.record
class struct_drm_amdgpu_vm_in(c.Struct):
SIZE = 8
op: Annotated[Annotated[int, ctypes.c_uint32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_amdgpu_vm_out(c.Struct):
SIZE = 8
flags: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class union_drm_amdgpu_vm(c.Struct):
SIZE = 8
_in: Annotated[struct_drm_amdgpu_vm_in, 0]
out: Annotated[struct_drm_amdgpu_vm_out, 0]
@c.record
class struct_drm_amdgpu_sched_in(c.Struct):
SIZE = 16
op: Annotated[Annotated[int, ctypes.c_uint32], 0]
fd: Annotated[Annotated[int, ctypes.c_uint32], 4]
priority: Annotated[Annotated[int, ctypes.c_int32], 8]
ctx_id: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class union_drm_amdgpu_sched(c.Struct):
SIZE = 16
_in: Annotated[struct_drm_amdgpu_sched_in, 0]
@c.record
class struct_drm_amdgpu_gem_userptr(c.Struct):
SIZE = 24
addr: Annotated[Annotated[int, ctypes.c_uint64], 0]
size: Annotated[Annotated[int, ctypes.c_uint64], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
handle: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_drm_amdgpu_gem_dgma(c.Struct):
SIZE = 24
addr: Annotated[Annotated[int, ctypes.c_uint64], 0]
size: Annotated[Annotated[int, ctypes.c_uint64], 8]
op: Annotated[Annotated[int, ctypes.c_uint32], 16]
handle: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_drm_amdgpu_gem_metadata(c.Struct):
SIZE = 288
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
op: Annotated[Annotated[int, ctypes.c_uint32], 4]
data: Annotated[struct_drm_amdgpu_gem_metadata_data, 8]
@c.record
class struct_drm_amdgpu_gem_metadata_data(c.Struct):
SIZE = 280
flags: Annotated[Annotated[int, ctypes.c_uint64], 0]
tiling_info: Annotated[Annotated[int, ctypes.c_uint64], 8]
data_size_bytes: Annotated[Annotated[int, ctypes.c_uint32], 16]
data: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[64]], 20]
@c.record
class struct_drm_amdgpu_gem_mmap_in(c.Struct):
SIZE = 8
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
_pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_amdgpu_gem_mmap_out(c.Struct):
SIZE = 8
addr_ptr: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class union_drm_amdgpu_gem_mmap(c.Struct):
SIZE = 8
_in: Annotated[struct_drm_amdgpu_gem_mmap_in, 0]
out: Annotated[struct_drm_amdgpu_gem_mmap_out, 0]
@c.record
class struct_drm_amdgpu_gem_wait_idle_in(c.Struct):
SIZE = 16
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
timeout: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class struct_drm_amdgpu_gem_wait_idle_out(c.Struct):
SIZE = 8
status: Annotated[Annotated[int, ctypes.c_uint32], 0]
domain: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class union_drm_amdgpu_gem_wait_idle(c.Struct):
SIZE = 16
_in: Annotated[struct_drm_amdgpu_gem_wait_idle_in, 0]
out: Annotated[struct_drm_amdgpu_gem_wait_idle_out, 0]
@c.record
class struct_drm_amdgpu_wait_cs_in(c.Struct):
SIZE = 32
handle: Annotated[Annotated[int, ctypes.c_uint64], 0]
timeout: Annotated[Annotated[int, ctypes.c_uint64], 8]
ip_type: Annotated[Annotated[int, ctypes.c_uint32], 16]
ip_instance: Annotated[Annotated[int, ctypes.c_uint32], 20]
ring: Annotated[Annotated[int, ctypes.c_uint32], 24]
ctx_id: Annotated[Annotated[int, ctypes.c_uint32], 28]
@c.record
class struct_drm_amdgpu_wait_cs_out(c.Struct):
SIZE = 8
status: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class union_drm_amdgpu_wait_cs(c.Struct):
SIZE = 32
_in: Annotated[struct_drm_amdgpu_wait_cs_in, 0]
out: Annotated[struct_drm_amdgpu_wait_cs_out, 0]
@c.record
class struct_drm_amdgpu_fence(c.Struct):
SIZE = 24
ctx_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
ip_type: Annotated[Annotated[int, ctypes.c_uint32], 4]
ip_instance: Annotated[Annotated[int, ctypes.c_uint32], 8]
ring: Annotated[Annotated[int, ctypes.c_uint32], 12]
seq_no: Annotated[Annotated[int, ctypes.c_uint64], 16]
@c.record
class struct_drm_amdgpu_wait_fences_in(c.Struct):
SIZE = 24
fences: Annotated[Annotated[int, ctypes.c_uint64], 0]
fence_count: Annotated[Annotated[int, ctypes.c_uint32], 8]
wait_all: Annotated[Annotated[int, ctypes.c_uint32], 12]
timeout_ns: Annotated[Annotated[int, ctypes.c_uint64], 16]
@c.record
class struct_drm_amdgpu_wait_fences_out(c.Struct):
SIZE = 8
status: Annotated[Annotated[int, ctypes.c_uint32], 0]
first_signaled: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class union_drm_amdgpu_wait_fences(c.Struct):
SIZE = 24
_in: Annotated[struct_drm_amdgpu_wait_fences_in, 0]
out: Annotated[struct_drm_amdgpu_wait_fences_out, 0]
@c.record
class struct_drm_amdgpu_gem_op(c.Struct):
SIZE = 16
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
op: Annotated[Annotated[int, ctypes.c_uint32], 4]
value: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class struct_drm_amdgpu_gem_va(c.Struct):
SIZE = 64
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
_pad: Annotated[Annotated[int, ctypes.c_uint32], 4]
operation: Annotated[Annotated[int, ctypes.c_uint32], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 12]
va_address: Annotated[Annotated[int, ctypes.c_uint64], 16]
offset_in_bo: Annotated[Annotated[int, ctypes.c_uint64], 24]
map_size: Annotated[Annotated[int, ctypes.c_uint64], 32]
vm_timeline_point: Annotated[Annotated[int, ctypes.c_uint64], 40]
vm_timeline_syncobj_out: Annotated[Annotated[int, ctypes.c_uint32], 48]
num_syncobj_handles: Annotated[Annotated[int, ctypes.c_uint32], 52]
input_fence_syncobj_handles: Annotated[Annotated[int, ctypes.c_uint64], 56]
@c.record
class struct_drm_amdgpu_cs_chunk(c.Struct):
SIZE = 16
chunk_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
length_dw: Annotated[Annotated[int, ctypes.c_uint32], 4]
chunk_data: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class struct_drm_amdgpu_cs_in(c.Struct):
SIZE = 24
ctx_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
bo_list_handle: Annotated[Annotated[int, ctypes.c_uint32], 4]
num_chunks: Annotated[Annotated[int, ctypes.c_uint32], 8]
flags: Annotated[Annotated[int, ctypes.c_uint32], 12]
chunks: Annotated[Annotated[int, ctypes.c_uint64], 16]
@c.record
class struct_drm_amdgpu_cs_out(c.Struct):
SIZE = 8
handle: Annotated[Annotated[int, ctypes.c_uint64], 0]
@c.record
class union_drm_amdgpu_cs(c.Struct):
SIZE = 24
_in: Annotated[struct_drm_amdgpu_cs_in, 0]
out: Annotated[struct_drm_amdgpu_cs_out, 0]
@c.record
class struct_drm_amdgpu_cs_chunk_ib(c.Struct):
SIZE = 32
_pad: Annotated[Annotated[int, ctypes.c_uint32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
va_start: Annotated[Annotated[int, ctypes.c_uint64], 8]
ib_bytes: Annotated[Annotated[int, ctypes.c_uint32], 16]
ip_type: Annotated[Annotated[int, ctypes.c_uint32], 20]
ip_instance: Annotated[Annotated[int, ctypes.c_uint32], 24]
ring: Annotated[Annotated[int, ctypes.c_uint32], 28]
@c.record
class struct_drm_amdgpu_cs_chunk_dep(c.Struct):
SIZE = 24
ip_type: Annotated[Annotated[int, ctypes.c_uint32], 0]
ip_instance: Annotated[Annotated[int, ctypes.c_uint32], 4]
ring: Annotated[Annotated[int, ctypes.c_uint32], 8]
ctx_id: Annotated[Annotated[int, ctypes.c_uint32], 12]
handle: Annotated[Annotated[int, ctypes.c_uint64], 16]
@c.record
class struct_drm_amdgpu_cs_chunk_fence(c.Struct):
SIZE = 8
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
offset: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_amdgpu_cs_chunk_sem(c.Struct):
SIZE = 4
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_drm_amdgpu_cs_chunk_syncobj(c.Struct):
SIZE = 16
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
flags: Annotated[Annotated[int, ctypes.c_uint32], 4]
point: Annotated[Annotated[int, ctypes.c_uint64], 8]
@c.record
class union_drm_amdgpu_fence_to_handle(c.Struct):
SIZE = 32
_in: Annotated[union_drm_amdgpu_fence_to_handle_in, 0]
out: Annotated[union_drm_amdgpu_fence_to_handle_out, 0]
@c.record
class union_drm_amdgpu_fence_to_handle_in(c.Struct):
SIZE = 32
fence: Annotated[struct_drm_amdgpu_fence, 0]
what: Annotated[Annotated[int, ctypes.c_uint32], 24]
pad: Annotated[Annotated[int, ctypes.c_uint32], 28]
@c.record
class union_drm_amdgpu_fence_to_handle_out(c.Struct):
SIZE = 4
handle: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class struct_drm_amdgpu_cs_chunk_data(c.Struct):
SIZE = 32
ib_data: Annotated[struct_drm_amdgpu_cs_chunk_ib, 0]
fence_data: Annotated[struct_drm_amdgpu_cs_chunk_fence, 0]
@c.record
class struct_drm_amdgpu_cs_chunk_cp_gfx_shadow(c.Struct):
SIZE = 32
shadow_va: Annotated[Annotated[int, ctypes.c_uint64], 0]
csa_va: Annotated[Annotated[int, ctypes.c_uint64], 8]
gds_va: Annotated[Annotated[int, ctypes.c_uint64], 16]
flags: Annotated[Annotated[int, ctypes.c_uint64], 24]
@c.record
class struct_drm_amdgpu_query_fw(c.Struct):
SIZE = 16
fw_type: Annotated[Annotated[int, ctypes.c_uint32], 0]
ip_instance: Annotated[Annotated[int, ctypes.c_uint32], 4]
index: Annotated[Annotated[int, ctypes.c_uint32], 8]
_pad: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_drm_amdgpu_info(c.Struct):
SIZE = 16
return_pointer: Annotated[Annotated[int, ctypes.c_uint64], 0]
return_size: Annotated[Annotated[int, ctypes.c_uint32], 8]
query: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_drm_amdgpu_info_gds(c.Struct):
SIZE = 32
gds_gfx_partition_size: Annotated[Annotated[int, ctypes.c_uint32], 0]
compute_partition_size: Annotated[Annotated[int, ctypes.c_uint32], 4]
gds_total_size: Annotated[Annotated[int, ctypes.c_uint32], 8]
gws_per_gfx_partition: Annotated[Annotated[int, ctypes.c_uint32], 12]
gws_per_compute_partition: Annotated[Annotated[int, ctypes.c_uint32], 16]
oa_per_gfx_partition: Annotated[Annotated[int, ctypes.c_uint32], 20]
oa_per_compute_partition: Annotated[Annotated[int, ctypes.c_uint32], 24]
_pad: Annotated[Annotated[int, ctypes.c_uint32], 28]
@c.record
class struct_drm_amdgpu_info_vram_gtt(c.Struct):
SIZE = 24
vram_size: Annotated[Annotated[int, ctypes.c_uint64], 0]
vram_cpu_accessible_size: Annotated[Annotated[int, ctypes.c_uint64], 8]
gtt_size: Annotated[Annotated[int, ctypes.c_uint64], 16]
@c.record
class struct_drm_amdgpu_heap_info(c.Struct):
SIZE = 32
total_heap_size: Annotated[Annotated[int, ctypes.c_uint64], 0]
usable_heap_size: Annotated[Annotated[int, ctypes.c_uint64], 8]
heap_usage: Annotated[Annotated[int, ctypes.c_uint64], 16]
max_allocation: Annotated[Annotated[int, ctypes.c_uint64], 24]
@c.record
class struct_drm_amdgpu_memory_info(c.Struct):
SIZE = 96
vram: Annotated[struct_drm_amdgpu_heap_info, 0]
cpu_accessible_vram: Annotated[struct_drm_amdgpu_heap_info, 32]
gtt: Annotated[struct_drm_amdgpu_heap_info, 64]
@c.record
class struct_drm_amdgpu_info_firmware(c.Struct):
SIZE = 8
ver: Annotated[Annotated[int, ctypes.c_uint32], 0]
feature: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_amdgpu_info_vbios(c.Struct):
SIZE = 200
name: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[64]], 0]
vbios_pn: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[64]], 64]
version: Annotated[Annotated[int, ctypes.c_uint32], 128]
pad: Annotated[Annotated[int, ctypes.c_uint32], 132]
vbios_ver_str: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[32]], 136]
date: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[32]], 168]
__u8: TypeAlias = Annotated[int, ctypes.c_ubyte]
@c.record
class struct_drm_amdgpu_info_device(c.Struct):
SIZE = 448
device_id: Annotated[Annotated[int, ctypes.c_uint32], 0]
chip_rev: Annotated[Annotated[int, ctypes.c_uint32], 4]
external_rev: Annotated[Annotated[int, ctypes.c_uint32], 8]
pci_rev: Annotated[Annotated[int, ctypes.c_uint32], 12]
family: Annotated[Annotated[int, ctypes.c_uint32], 16]
num_shader_engines: Annotated[Annotated[int, ctypes.c_uint32], 20]
num_shader_arrays_per_engine: Annotated[Annotated[int, ctypes.c_uint32], 24]
gpu_counter_freq: Annotated[Annotated[int, ctypes.c_uint32], 28]
max_engine_clock: Annotated[Annotated[int, ctypes.c_uint64], 32]
max_memory_clock: Annotated[Annotated[int, ctypes.c_uint64], 40]
cu_active_number: Annotated[Annotated[int, ctypes.c_uint32], 48]
cu_ao_mask: Annotated[Annotated[int, ctypes.c_uint32], 52]
cu_bitmap: Annotated[c.Array[c.Array[Annotated[int, ctypes.c_uint32], Literal[4]], Literal[4]], 56]
enabled_rb_pipes_mask: Annotated[Annotated[int, ctypes.c_uint32], 120]
num_rb_pipes: Annotated[Annotated[int, ctypes.c_uint32], 124]
num_hw_gfx_contexts: Annotated[Annotated[int, ctypes.c_uint32], 128]
pcie_gen: Annotated[Annotated[int, ctypes.c_uint32], 132]
ids_flags: Annotated[Annotated[int, ctypes.c_uint64], 136]
virtual_address_offset: Annotated[Annotated[int, ctypes.c_uint64], 144]
virtual_address_max: Annotated[Annotated[int, ctypes.c_uint64], 152]
virtual_address_alignment: Annotated[Annotated[int, ctypes.c_uint32], 160]
pte_fragment_size: Annotated[Annotated[int, ctypes.c_uint32], 164]
gart_page_size: Annotated[Annotated[int, ctypes.c_uint32], 168]
ce_ram_size: Annotated[Annotated[int, ctypes.c_uint32], 172]
vram_type: Annotated[Annotated[int, ctypes.c_uint32], 176]
vram_bit_width: Annotated[Annotated[int, ctypes.c_uint32], 180]
vce_harvest_config: Annotated[Annotated[int, ctypes.c_uint32], 184]
gc_double_offchip_lds_buf: Annotated[Annotated[int, ctypes.c_uint32], 188]
prim_buf_gpu_addr: Annotated[Annotated[int, ctypes.c_uint64], 192]
pos_buf_gpu_addr: Annotated[Annotated[int, ctypes.c_uint64], 200]
cntl_sb_buf_gpu_addr: Annotated[Annotated[int, ctypes.c_uint64], 208]
param_buf_gpu_addr: Annotated[Annotated[int, ctypes.c_uint64], 216]
prim_buf_size: Annotated[Annotated[int, ctypes.c_uint32], 224]
pos_buf_size: Annotated[Annotated[int, ctypes.c_uint32], 228]
cntl_sb_buf_size: Annotated[Annotated[int, ctypes.c_uint32], 232]
param_buf_size: Annotated[Annotated[int, ctypes.c_uint32], 236]
wave_front_size: Annotated[Annotated[int, ctypes.c_uint32], 240]
num_shader_visible_vgprs: Annotated[Annotated[int, ctypes.c_uint32], 244]
num_cu_per_sh: Annotated[Annotated[int, ctypes.c_uint32], 248]
num_tcc_blocks: Annotated[Annotated[int, ctypes.c_uint32], 252]
gs_vgt_table_depth: Annotated[Annotated[int, ctypes.c_uint32], 256]
gs_prim_buffer_depth: Annotated[Annotated[int, ctypes.c_uint32], 260]
max_gs_waves_per_vgt: Annotated[Annotated[int, ctypes.c_uint32], 264]
pcie_num_lanes: Annotated[Annotated[int, ctypes.c_uint32], 268]
cu_ao_bitmap: Annotated[c.Array[c.Array[Annotated[int, ctypes.c_uint32], Literal[4]], Literal[4]], 272]
high_va_offset: Annotated[Annotated[int, ctypes.c_uint64], 336]
high_va_max: Annotated[Annotated[int, ctypes.c_uint64], 344]
pa_sc_tile_steering_override: Annotated[Annotated[int, ctypes.c_uint32], 352]
tcc_disabled_mask: Annotated[Annotated[int, ctypes.c_uint64], 360]
min_engine_clock: Annotated[Annotated[int, ctypes.c_uint64], 368]
min_memory_clock: Annotated[Annotated[int, ctypes.c_uint64], 376]
tcp_cache_size: Annotated[Annotated[int, ctypes.c_uint32], 384]
num_sqc_per_wgp: Annotated[Annotated[int, ctypes.c_uint32], 388]
sqc_data_cache_size: Annotated[Annotated[int, ctypes.c_uint32], 392]
sqc_inst_cache_size: Annotated[Annotated[int, ctypes.c_uint32], 396]
gl1c_cache_size: Annotated[Annotated[int, ctypes.c_uint32], 400]
gl2c_cache_size: Annotated[Annotated[int, ctypes.c_uint32], 404]
mall_size: Annotated[Annotated[int, ctypes.c_uint64], 408]
enabled_rb_pipes_mask_hi: Annotated[Annotated[int, ctypes.c_uint32], 416]
shadow_size: Annotated[Annotated[int, ctypes.c_uint32], 420]
shadow_alignment: Annotated[Annotated[int, ctypes.c_uint32], 424]
csa_size: Annotated[Annotated[int, ctypes.c_uint32], 428]
csa_alignment: Annotated[Annotated[int, ctypes.c_uint32], 432]
userq_ip_mask: Annotated[Annotated[int, ctypes.c_uint32], 436]
pad: Annotated[Annotated[int, ctypes.c_uint32], 440]
@c.record
class struct_drm_amdgpu_info_hw_ip(c.Struct):
SIZE = 32
hw_ip_version_major: Annotated[Annotated[int, ctypes.c_uint32], 0]
hw_ip_version_minor: Annotated[Annotated[int, ctypes.c_uint32], 4]
capabilities_flags: Annotated[Annotated[int, ctypes.c_uint64], 8]
ib_start_alignment: Annotated[Annotated[int, ctypes.c_uint32], 16]
ib_size_alignment: Annotated[Annotated[int, ctypes.c_uint32], 20]
available_rings: Annotated[Annotated[int, ctypes.c_uint32], 24]
ip_discovery_version: Annotated[Annotated[int, ctypes.c_uint32], 28]
@c.record
class struct_drm_amdgpu_info_uq_fw_areas_gfx(c.Struct):
SIZE = 16
shadow_size: Annotated[Annotated[int, ctypes.c_uint32], 0]
shadow_alignment: Annotated[Annotated[int, ctypes.c_uint32], 4]
csa_size: Annotated[Annotated[int, ctypes.c_uint32], 8]
csa_alignment: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_drm_amdgpu_info_uq_fw_areas(c.Struct):
SIZE = 16
gfx: Annotated[struct_drm_amdgpu_info_uq_fw_areas_gfx, 0]
@c.record
class struct_drm_amdgpu_info_num_handles(c.Struct):
SIZE = 8
uvd_max_handles: Annotated[Annotated[int, ctypes.c_uint32], 0]
uvd_used_handles: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_amdgpu_info_vce_clock_table_entry(c.Struct):
SIZE = 16
sclk: Annotated[Annotated[int, ctypes.c_uint32], 0]
mclk: Annotated[Annotated[int, ctypes.c_uint32], 4]
eclk: Annotated[Annotated[int, ctypes.c_uint32], 8]
pad: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_drm_amdgpu_info_vce_clock_table(c.Struct):
SIZE = 104
entries: Annotated[c.Array[struct_drm_amdgpu_info_vce_clock_table_entry, Literal[6]], 0]
num_valid_entries: Annotated[Annotated[int, ctypes.c_uint32], 96]
pad: Annotated[Annotated[int, ctypes.c_uint32], 100]
@c.record
class struct_drm_amdgpu_info_video_codec_info(c.Struct):
SIZE = 24
valid: Annotated[Annotated[int, ctypes.c_uint32], 0]
max_width: Annotated[Annotated[int, ctypes.c_uint32], 4]
max_height: Annotated[Annotated[int, ctypes.c_uint32], 8]
max_pixels_per_frame: Annotated[Annotated[int, ctypes.c_uint32], 12]
max_level: Annotated[Annotated[int, ctypes.c_uint32], 16]
pad: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_drm_amdgpu_info_video_caps(c.Struct):
SIZE = 192
codec_info: Annotated[c.Array[struct_drm_amdgpu_info_video_codec_info, Literal[8]], 0]
@c.record
class struct_drm_amdgpu_info_gpuvm_fault(c.Struct):
SIZE = 16
addr: Annotated[Annotated[int, ctypes.c_uint64], 0]
status: Annotated[Annotated[int, ctypes.c_uint32], 8]
vmhub: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_drm_amdgpu_info_uq_metadata_gfx(c.Struct):
SIZE = 16
shadow_size: Annotated[Annotated[int, ctypes.c_uint32], 0]
shadow_alignment: Annotated[Annotated[int, ctypes.c_uint32], 4]
csa_size: Annotated[Annotated[int, ctypes.c_uint32], 8]
csa_alignment: Annotated[Annotated[int, ctypes.c_uint32], 12]
@c.record
class struct_drm_amdgpu_info_uq_metadata(c.Struct):
SIZE = 16
gfx: Annotated[struct_drm_amdgpu_info_uq_metadata_gfx, 0]
class _anonstruct0(ctypes.Structure): pass
class struct_drm_amdgpu_virtual_range(ctypes.Structure): pass
@c.record
class struct_drm_amdgpu_capability(c.Struct):
SIZE = 8
flag: Annotated[Annotated[int, ctypes.c_uint32], 0]
direct_gma_size: Annotated[Annotated[int, ctypes.c_uint32], 4]
@c.record
class struct_drm_amdgpu_freesync(c.Struct):
SIZE = 32
op: Annotated[Annotated[int, ctypes.c_uint32], 0]
spare: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[7]], 4]
c.init_records()
DRM_NAME = "drm" # type: ignore
DRM_MIN_ORDER = 5 # type: ignore
DRM_MAX_ORDER = 22 # type: ignore
DRM_RAM_PERCENT = 10 # type: ignore
_DRM_LOCK_HELD = 0x80000000 # type: ignore
_DRM_LOCK_CONT = 0x40000000 # type: ignore
_DRM_LOCK_IS_HELD = lambda lock: ((lock) & _DRM_LOCK_HELD) # type: ignore
_DRM_LOCK_IS_CONT = lambda lock: ((lock) & _DRM_LOCK_CONT) # type: ignore
_DRM_LOCKING_CONTEXT = lambda lock: ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) # type: ignore
_DRM_VBLANK_HIGH_CRTC_SHIFT = 1 # type: ignore
_DRM_VBLANK_TYPES_MASK = (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) # type: ignore
_DRM_VBLANK_FLAGS_MASK = (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) # type: ignore
_DRM_PRE_MODESET = 1 # type: ignore
_DRM_POST_MODESET = 2 # type: ignore
DRM_CAP_DUMB_BUFFER = 0x1 # type: ignore
DRM_CAP_VBLANK_HIGH_CRTC = 0x2 # type: ignore
DRM_CAP_DUMB_PREFERRED_DEPTH = 0x3 # type: ignore
DRM_CAP_DUMB_PREFER_SHADOW = 0x4 # type: ignore
DRM_CAP_PRIME = 0x5 # type: ignore
DRM_PRIME_CAP_IMPORT = 0x1 # type: ignore
DRM_PRIME_CAP_EXPORT = 0x2 # type: ignore
DRM_CAP_TIMESTAMP_MONOTONIC = 0x6 # type: ignore
DRM_CAP_ASYNC_PAGE_FLIP = 0x7 # type: ignore
DRM_CAP_CURSOR_WIDTH = 0x8 # type: ignore
DRM_CAP_CURSOR_HEIGHT = 0x9 # type: ignore
DRM_CAP_ADDFB2_MODIFIERS = 0x10 # type: ignore
DRM_CAP_PAGE_FLIP_TARGET = 0x11 # type: ignore
DRM_CAP_CRTC_IN_VBLANK_EVENT = 0x12 # type: ignore
DRM_CAP_SYNCOBJ = 0x13 # type: ignore
DRM_CAP_SYNCOBJ_TIMELINE = 0x14 # type: ignore
DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP = 0x15 # type: ignore
DRM_CLIENT_CAP_STEREO_3D = 1 # type: ignore
DRM_CLIENT_CAP_UNIVERSAL_PLANES = 2 # type: ignore
DRM_CLIENT_CAP_ATOMIC = 3 # type: ignore
DRM_CLIENT_CAP_ASPECT_RATIO = 4 # type: ignore
DRM_CLIENT_CAP_WRITEBACK_CONNECTORS = 5 # type: ignore
DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT = 6 # type: ignore
DRM_SYNCOBJ_CREATE_SIGNALED = (1 << 0) # type: ignore
DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE = (1 << 0) # type: ignore
DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE = (1 << 0) # type: ignore
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL = (1 << 0) # type: ignore
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT = (1 << 1) # type: ignore
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE = (1 << 2) # type: ignore
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE = (1 << 3) # type: ignore
DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED = (1 << 0) # type: ignore
DRM_CRTC_SEQUENCE_RELATIVE = 0x00000001 # type: ignore
DRM_CRTC_SEQUENCE_NEXT_ON_MISS = 0x00000002 # type: ignore
DRM_IOCTL_BASE = 'd' # type: ignore
DRM_IO = lambda nr: _IO(DRM_IOCTL_BASE,nr) # type: ignore
DRM_IOR = lambda nr,type: _IOR(DRM_IOCTL_BASE,nr,type) # type: ignore
DRM_IOW = lambda nr,type: _IOW(DRM_IOCTL_BASE,nr,type) # type: ignore
DRM_IOWR = lambda nr,type: _IOWR(DRM_IOCTL_BASE,nr,type) # type: ignore
DRM_IOCTL_VERSION = DRM_IOWR(0x00, struct_drm_version) # type: ignore
DRM_IOCTL_GET_UNIQUE = DRM_IOWR(0x01, struct_drm_unique) # type: ignore
DRM_IOCTL_GET_MAGIC = DRM_IOR( 0x02, struct_drm_auth) # type: ignore
DRM_IOCTL_IRQ_BUSID = DRM_IOWR(0x03, struct_drm_irq_busid) # type: ignore
DRM_IOCTL_GET_MAP = DRM_IOWR(0x04, struct_drm_map) # type: ignore
DRM_IOCTL_GET_CLIENT = DRM_IOWR(0x05, struct_drm_client) # type: ignore
DRM_IOCTL_GET_STATS = DRM_IOR( 0x06, struct_drm_stats) # type: ignore
DRM_IOCTL_SET_VERSION = DRM_IOWR(0x07, struct_drm_set_version) # type: ignore
DRM_IOCTL_MODESET_CTL = DRM_IOW(0x08, struct_drm_modeset_ctl) # type: ignore
DRM_IOCTL_GEM_CLOSE = DRM_IOW (0x09, struct_drm_gem_close) # type: ignore
DRM_IOCTL_GEM_FLINK = DRM_IOWR(0x0a, struct_drm_gem_flink) # type: ignore
DRM_IOCTL_GEM_OPEN = DRM_IOWR(0x0b, struct_drm_gem_open) # type: ignore
DRM_IOCTL_GET_CAP = DRM_IOWR(0x0c, struct_drm_get_cap) # type: ignore
DRM_IOCTL_SET_CLIENT_CAP = DRM_IOW( 0x0d, struct_drm_set_client_cap) # type: ignore
DRM_IOCTL_SET_UNIQUE = DRM_IOW( 0x10, struct_drm_unique) # type: ignore
DRM_IOCTL_AUTH_MAGIC = DRM_IOW( 0x11, struct_drm_auth) # type: ignore
DRM_IOCTL_BLOCK = DRM_IOWR(0x12, struct_drm_block) # type: ignore
DRM_IOCTL_UNBLOCK = DRM_IOWR(0x13, struct_drm_block) # type: ignore
DRM_IOCTL_CONTROL = DRM_IOW( 0x14, struct_drm_control) # type: ignore
DRM_IOCTL_ADD_MAP = DRM_IOWR(0x15, struct_drm_map) # type: ignore
DRM_IOCTL_ADD_BUFS = DRM_IOWR(0x16, struct_drm_buf_desc) # type: ignore
DRM_IOCTL_MARK_BUFS = DRM_IOW( 0x17, struct_drm_buf_desc) # type: ignore
DRM_IOCTL_INFO_BUFS = DRM_IOWR(0x18, struct_drm_buf_info) # type: ignore
DRM_IOCTL_MAP_BUFS = DRM_IOWR(0x19, struct_drm_buf_map) # type: ignore
DRM_IOCTL_FREE_BUFS = DRM_IOW( 0x1a, struct_drm_buf_free) # type: ignore
DRM_IOCTL_RM_MAP = DRM_IOW( 0x1b, struct_drm_map) # type: ignore
DRM_IOCTL_SET_SAREA_CTX = DRM_IOW( 0x1c, struct_drm_ctx_priv_map) # type: ignore
DRM_IOCTL_GET_SAREA_CTX = DRM_IOWR(0x1d, struct_drm_ctx_priv_map) # type: ignore
DRM_IOCTL_SET_MASTER = DRM_IO(0x1e) # type: ignore
DRM_IOCTL_DROP_MASTER = DRM_IO(0x1f) # type: ignore
DRM_IOCTL_ADD_CTX = DRM_IOWR(0x20, struct_drm_ctx) # type: ignore
DRM_IOCTL_RM_CTX = DRM_IOWR(0x21, struct_drm_ctx) # type: ignore
DRM_IOCTL_MOD_CTX = DRM_IOW( 0x22, struct_drm_ctx) # type: ignore
DRM_IOCTL_GET_CTX = DRM_IOWR(0x23, struct_drm_ctx) # type: ignore
DRM_IOCTL_SWITCH_CTX = DRM_IOW( 0x24, struct_drm_ctx) # type: ignore
DRM_IOCTL_NEW_CTX = DRM_IOW( 0x25, struct_drm_ctx) # type: ignore
DRM_IOCTL_RES_CTX = DRM_IOWR(0x26, struct_drm_ctx_res) # type: ignore
DRM_IOCTL_ADD_DRAW = DRM_IOWR(0x27, struct_drm_draw) # type: ignore
DRM_IOCTL_RM_DRAW = DRM_IOWR(0x28, struct_drm_draw) # type: ignore
DRM_IOCTL_DMA = DRM_IOWR(0x29, struct_drm_dma) # type: ignore
DRM_IOCTL_LOCK = DRM_IOW( 0x2a, struct_drm_lock) # type: ignore
DRM_IOCTL_UNLOCK = DRM_IOW( 0x2b, struct_drm_lock) # type: ignore
DRM_IOCTL_FINISH = DRM_IOW( 0x2c, struct_drm_lock) # type: ignore
DRM_IOCTL_PRIME_HANDLE_TO_FD = DRM_IOWR(0x2d, struct_drm_prime_handle) # type: ignore
DRM_IOCTL_PRIME_FD_TO_HANDLE = DRM_IOWR(0x2e, struct_drm_prime_handle) # type: ignore
DRM_IOCTL_AGP_ACQUIRE = DRM_IO( 0x30) # type: ignore
DRM_IOCTL_AGP_RELEASE = DRM_IO( 0x31) # type: ignore
DRM_IOCTL_AGP_ENABLE = DRM_IOW( 0x32, struct_drm_agp_mode) # type: ignore
DRM_IOCTL_AGP_INFO = DRM_IOR( 0x33, struct_drm_agp_info) # type: ignore
DRM_IOCTL_AGP_ALLOC = DRM_IOWR(0x34, struct_drm_agp_buffer) # type: ignore
DRM_IOCTL_AGP_FREE = DRM_IOW( 0x35, struct_drm_agp_buffer) # type: ignore
DRM_IOCTL_AGP_BIND = DRM_IOW( 0x36, struct_drm_agp_binding) # type: ignore
DRM_IOCTL_AGP_UNBIND = DRM_IOW( 0x37, struct_drm_agp_binding) # type: ignore
DRM_IOCTL_SG_ALLOC = DRM_IOWR(0x38, struct_drm_scatter_gather) # type: ignore
DRM_IOCTL_SG_FREE = DRM_IOW( 0x39, struct_drm_scatter_gather) # type: ignore
DRM_IOCTL_WAIT_VBLANK = DRM_IOWR(0x3a, union_drm_wait_vblank) # type: ignore
DRM_IOCTL_CRTC_GET_SEQUENCE = DRM_IOWR(0x3b, struct_drm_crtc_get_sequence) # type: ignore
DRM_IOCTL_CRTC_QUEUE_SEQUENCE = DRM_IOWR(0x3c, struct_drm_crtc_queue_sequence) # type: ignore
DRM_IOCTL_UPDATE_DRAW = DRM_IOW(0x3f, struct_drm_update_draw) # type: ignore
DRM_IOCTL_SYNCOBJ_CREATE = DRM_IOWR(0xBF, struct_drm_syncobj_create) # type: ignore
DRM_IOCTL_SYNCOBJ_DESTROY = DRM_IOWR(0xC0, struct_drm_syncobj_destroy) # type: ignore
DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD = DRM_IOWR(0xC1, struct_drm_syncobj_handle) # type: ignore
DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE = DRM_IOWR(0xC2, struct_drm_syncobj_handle) # type: ignore
DRM_IOCTL_SYNCOBJ_WAIT = DRM_IOWR(0xC3, struct_drm_syncobj_wait) # type: ignore
DRM_IOCTL_SYNCOBJ_RESET = DRM_IOWR(0xC4, struct_drm_syncobj_array) # type: ignore
DRM_IOCTL_SYNCOBJ_SIGNAL = DRM_IOWR(0xC5, struct_drm_syncobj_array) # type: ignore
DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT = DRM_IOWR(0xCA, struct_drm_syncobj_timeline_wait) # type: ignore
DRM_IOCTL_SYNCOBJ_QUERY = DRM_IOWR(0xCB, struct_drm_syncobj_timeline_array) # type: ignore
DRM_IOCTL_SYNCOBJ_TRANSFER = DRM_IOWR(0xCC, struct_drm_syncobj_transfer) # type: ignore
DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL = DRM_IOWR(0xCD, struct_drm_syncobj_timeline_array) # type: ignore
DRM_IOCTL_SYNCOBJ_EVENTFD = DRM_IOWR(0xCF, struct_drm_syncobj_eventfd) # type: ignore
DRM_COMMAND_BASE = 0x40 # type: ignore
DRM_COMMAND_END = 0xA0 # type: ignore
DRM_EVENT_VBLANK = 0x01 # type: ignore
DRM_EVENT_FLIP_COMPLETE = 0x02 # type: ignore
DRM_EVENT_CRTC_SEQUENCE = 0x03 # type: ignore
DRM_AMDGPU_GEM_CREATE = 0x00 # type: ignore
DRM_AMDGPU_GEM_MMAP = 0x01 # type: ignore
DRM_AMDGPU_CTX = 0x02 # type: ignore
DRM_AMDGPU_BO_LIST = 0x03 # type: ignore
DRM_AMDGPU_CS = 0x04 # type: ignore
DRM_AMDGPU_INFO = 0x05 # type: ignore
DRM_AMDGPU_GEM_METADATA = 0x06 # type: ignore
DRM_AMDGPU_GEM_WAIT_IDLE = 0x07 # type: ignore
DRM_AMDGPU_GEM_VA = 0x08 # type: ignore
DRM_AMDGPU_WAIT_CS = 0x09 # type: ignore
DRM_AMDGPU_GEM_OP = 0x10 # type: ignore
DRM_AMDGPU_GEM_USERPTR = 0x11 # type: ignore
DRM_AMDGPU_WAIT_FENCES = 0x12 # type: ignore
DRM_AMDGPU_VM = 0x13 # type: ignore
DRM_AMDGPU_FENCE_TO_HANDLE = 0x14 # type: ignore
DRM_AMDGPU_SCHED = 0x15 # type: ignore
DRM_AMDGPU_USERQ = 0x16 # type: ignore
DRM_AMDGPU_USERQ_SIGNAL = 0x17 # type: ignore
DRM_AMDGPU_USERQ_WAIT = 0x18 # type: ignore
DRM_AMDGPU_GEM_DGMA = 0x5c # type: ignore
DRM_AMDGPU_SEM = 0x5b # type: ignore
DRM_IOCTL_AMDGPU_GEM_CREATE = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union_drm_amdgpu_gem_create) # type: ignore
DRM_IOCTL_AMDGPU_GEM_MMAP = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union_drm_amdgpu_gem_mmap) # type: ignore
DRM_IOCTL_AMDGPU_CTX = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union_drm_amdgpu_ctx) # type: ignore
DRM_IOCTL_AMDGPU_BO_LIST = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union_drm_amdgpu_bo_list) # type: ignore
DRM_IOCTL_AMDGPU_CS = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union_drm_amdgpu_cs) # type: ignore
DRM_IOCTL_AMDGPU_INFO = DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct_drm_amdgpu_info) # type: ignore
DRM_IOCTL_AMDGPU_GEM_METADATA = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct_drm_amdgpu_gem_metadata) # type: ignore
DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union_drm_amdgpu_gem_wait_idle) # type: ignore
DRM_IOCTL_AMDGPU_GEM_VA = DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct_drm_amdgpu_gem_va) # type: ignore
DRM_IOCTL_AMDGPU_WAIT_CS = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union_drm_amdgpu_wait_cs) # type: ignore
DRM_IOCTL_AMDGPU_GEM_OP = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct_drm_amdgpu_gem_op) # type: ignore
DRM_IOCTL_AMDGPU_GEM_USERPTR = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct_drm_amdgpu_gem_userptr) # type: ignore
DRM_IOCTL_AMDGPU_WAIT_FENCES = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union_drm_amdgpu_wait_fences) # type: ignore
DRM_IOCTL_AMDGPU_VM = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union_drm_amdgpu_vm) # type: ignore
DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union_drm_amdgpu_fence_to_handle) # type: ignore
DRM_IOCTL_AMDGPU_SCHED = DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union_drm_amdgpu_sched) # type: ignore
DRM_IOCTL_AMDGPU_USERQ = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union_drm_amdgpu_userq) # type: ignore
DRM_IOCTL_AMDGPU_USERQ_SIGNAL = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct_drm_amdgpu_userq_signal) # type: ignore
DRM_IOCTL_AMDGPU_USERQ_WAIT = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct_drm_amdgpu_userq_wait) # type: ignore
DRM_IOCTL_AMDGPU_GEM_DGMA = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_DGMA, struct_drm_amdgpu_gem_dgma) # type: ignore
DRM_IOCTL_AMDGPU_SEM = DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_SEM, union_drm_amdgpu_sem) # type: ignore
AMDGPU_GEM_DOMAIN_CPU = 0x1 # type: ignore
AMDGPU_GEM_DOMAIN_GTT = 0x2 # type: ignore
AMDGPU_GEM_DOMAIN_VRAM = 0x4 # type: ignore
AMDGPU_GEM_DOMAIN_GDS = 0x8 # type: ignore
AMDGPU_GEM_DOMAIN_GWS = 0x10 # type: ignore
AMDGPU_GEM_DOMAIN_OA = 0x20 # type: ignore
AMDGPU_GEM_DOMAIN_DOORBELL = 0x40 # type: ignore
AMDGPU_GEM_DOMAIN_DGMA = 0x400 # type: ignore
AMDGPU_GEM_DOMAIN_DGMA_IMPORT = 0x800 # type: ignore
AMDGPU_GEM_DOMAIN_MASK = (AMDGPU_GEM_DOMAIN_CPU | AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | AMDGPU_GEM_DOMAIN_DOORBELL | AMDGPU_GEM_DOMAIN_DGMA | AMDGPU_GEM_DOMAIN_DGMA_IMPORT) # type: ignore
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED = (1 << 0) # type: ignore
AMDGPU_GEM_CREATE_NO_CPU_ACCESS = (1 << 1) # type: ignore
AMDGPU_GEM_CREATE_CPU_GTT_USWC = (1 << 2) # type: ignore
AMDGPU_GEM_CREATE_VRAM_CLEARED = (1 << 3) # type: ignore
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS = (1 << 5) # type: ignore
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID = (1 << 6) # type: ignore
AMDGPU_GEM_CREATE_EXPLICIT_SYNC = (1 << 7) # type: ignore
AMDGPU_GEM_CREATE_CP_MQD_GFX9 = (1 << 8) # type: ignore
AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE = (1 << 9) # type: ignore
AMDGPU_GEM_CREATE_ENCRYPTED = (1 << 10) # type: ignore
AMDGPU_GEM_CREATE_PREEMPTIBLE = (1 << 11) # type: ignore
AMDGPU_GEM_CREATE_DISCARDABLE = (1 << 12) # type: ignore
AMDGPU_GEM_CREATE_COHERENT = (1 << 13) # type: ignore
AMDGPU_GEM_CREATE_UNCACHED = (1 << 14) # type: ignore
AMDGPU_GEM_CREATE_EXT_COHERENT = (1 << 15) # type: ignore
AMDGPU_GEM_CREATE_GFX12_DCC = (1 << 16) # type: ignore
AMDGPU_GEM_CREATE_SPARSE = (1 << 29) # type: ignore
AMDGPU_GEM_CREATE_TOP_DOWN = (1 << 30) # type: ignore
AMDGPU_GEM_CREATE_NO_EVICT = (1 << 31) # type: ignore
AMDGPU_BO_LIST_OP_CREATE = 0 # type: ignore
AMDGPU_BO_LIST_OP_DESTROY = 1 # type: ignore
AMDGPU_BO_LIST_OP_UPDATE = 2 # type: ignore
AMDGPU_CTX_OP_ALLOC_CTX = 1 # type: ignore
AMDGPU_CTX_OP_FREE_CTX = 2 # type: ignore
AMDGPU_CTX_OP_QUERY_STATE = 3 # type: ignore
AMDGPU_CTX_OP_QUERY_STATE2 = 4 # type: ignore
AMDGPU_CTX_OP_GET_STABLE_PSTATE = 5 # type: ignore
AMDGPU_CTX_OP_SET_STABLE_PSTATE = 6 # type: ignore
AMDGPU_CTX_NO_RESET = 0 # type: ignore
AMDGPU_CTX_GUILTY_RESET = 1 # type: ignore
AMDGPU_CTX_INNOCENT_RESET = 2 # type: ignore
AMDGPU_CTX_UNKNOWN_RESET = 3 # type: ignore
AMDGPU_CTX_QUERY2_FLAGS_RESET = (1<<0) # type: ignore
AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST = (1<<1) # type: ignore
AMDGPU_CTX_QUERY2_FLAGS_GUILTY = (1<<2) # type: ignore
AMDGPU_CTX_QUERY2_FLAGS_RAS_CE = (1<<3) # type: ignore
AMDGPU_CTX_QUERY2_FLAGS_RAS_UE = (1<<4) # type: ignore
AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS = (1<<5) # type: ignore
AMDGPU_CTX_PRIORITY_UNSET = -2048 # type: ignore
AMDGPU_CTX_PRIORITY_VERY_LOW = -1023 # type: ignore
AMDGPU_CTX_PRIORITY_LOW = -512 # type: ignore
AMDGPU_CTX_PRIORITY_NORMAL = 0 # type: ignore
AMDGPU_CTX_PRIORITY_HIGH = 512 # type: ignore
AMDGPU_CTX_PRIORITY_VERY_HIGH = 1023 # type: ignore
AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK = 0xf # type: ignore
AMDGPU_CTX_STABLE_PSTATE_NONE = 0 # type: ignore
AMDGPU_CTX_STABLE_PSTATE_STANDARD = 1 # type: ignore
AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK = 2 # type: ignore
AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK = 3 # type: ignore
AMDGPU_CTX_STABLE_PSTATE_PEAK = 4 # type: ignore
AMDGPU_USERQ_OP_CREATE = 1 # type: ignore
AMDGPU_USERQ_OP_FREE = 2 # type: ignore
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK = 0x3 # type: ignore
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT = 0 # type: ignore
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW = 0 # type: ignore
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW = 1 # type: ignore
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH = 2 # type: ignore
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH = 3 # type: ignore
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE = (1 << 2) # type: ignore
AMDGPU_SEM_OP_CREATE_SEM = 1 # type: ignore
AMDGPU_SEM_OP_WAIT_SEM = 2 # type: ignore
AMDGPU_SEM_OP_SIGNAL_SEM = 3 # type: ignore
AMDGPU_SEM_OP_DESTROY_SEM = 4 # type: ignore
AMDGPU_SEM_OP_IMPORT_SEM = 5 # type: ignore
AMDGPU_SEM_OP_EXPORT_SEM = 6 # type: ignore
AMDGPU_VM_OP_RESERVE_VMID = 1 # type: ignore
AMDGPU_VM_OP_UNRESERVE_VMID = 2 # type: ignore
AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE = 1 # type: ignore
AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE = 2 # type: ignore
AMDGPU_GEM_USERPTR_READONLY = (1 << 0) # type: ignore
AMDGPU_GEM_USERPTR_ANONONLY = (1 << 1) # type: ignore
AMDGPU_GEM_USERPTR_VALIDATE = (1 << 2) # type: ignore
AMDGPU_GEM_USERPTR_REGISTER = (1 << 3) # type: ignore
AMDGPU_GEM_DGMA_IMPORT = 0 # type: ignore
AMDGPU_GEM_DGMA_QUERY_PHYS_ADDR = 1 # type: ignore
AMDGPU_TILING_ARRAY_MODE_SHIFT = 0 # type: ignore
AMDGPU_TILING_ARRAY_MODE_MASK = 0xf # type: ignore
AMDGPU_TILING_PIPE_CONFIG_SHIFT = 4 # type: ignore
AMDGPU_TILING_PIPE_CONFIG_MASK = 0x1f # type: ignore
AMDGPU_TILING_TILE_SPLIT_SHIFT = 9 # type: ignore
AMDGPU_TILING_TILE_SPLIT_MASK = 0x7 # type: ignore
AMDGPU_TILING_MICRO_TILE_MODE_SHIFT = 12 # type: ignore
AMDGPU_TILING_MICRO_TILE_MODE_MASK = 0x7 # type: ignore
AMDGPU_TILING_BANK_WIDTH_SHIFT = 15 # type: ignore
AMDGPU_TILING_BANK_WIDTH_MASK = 0x3 # type: ignore
AMDGPU_TILING_BANK_HEIGHT_SHIFT = 17 # type: ignore
AMDGPU_TILING_BANK_HEIGHT_MASK = 0x3 # type: ignore
AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT = 19 # type: ignore
AMDGPU_TILING_MACRO_TILE_ASPECT_MASK = 0x3 # type: ignore
AMDGPU_TILING_NUM_BANKS_SHIFT = 21 # type: ignore
AMDGPU_TILING_NUM_BANKS_MASK = 0x3 # type: ignore
AMDGPU_TILING_SWIZZLE_MODE_SHIFT = 0 # type: ignore
AMDGPU_TILING_SWIZZLE_MODE_MASK = 0x1f # type: ignore
AMDGPU_TILING_DCC_OFFSET_256B_SHIFT = 5 # type: ignore
AMDGPU_TILING_DCC_OFFSET_256B_MASK = 0xFFFFFF # type: ignore
AMDGPU_TILING_DCC_PITCH_MAX_SHIFT = 29 # type: ignore
AMDGPU_TILING_DCC_PITCH_MAX_MASK = 0x3FFF # type: ignore
AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT = 43 # type: ignore
AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK = 0x1 # type: ignore
AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT = 44 # type: ignore
AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK = 0x1 # type: ignore
AMDGPU_TILING_SCANOUT_SHIFT = 63 # type: ignore
AMDGPU_TILING_SCANOUT_MASK = 0x1 # type: ignore
AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT = 0 # type: ignore
AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK = 0x7 # type: ignore
AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT = 3 # type: ignore
AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK = 0x3 # type: ignore
AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT = 5 # type: ignore
AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK = 0x7 # type: ignore
AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT = 8 # type: ignore
AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK = 0x3f # type: ignore
AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_SHIFT = 14 # type: ignore
AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_MASK = 0x1 # type: ignore
AMDGPU_TILING_GFX12_SCANOUT_SHIFT = 63 # type: ignore
AMDGPU_TILING_GFX12_SCANOUT_MASK = 0x1 # type: ignore
AMDGPU_GEM_METADATA_OP_SET_METADATA = 1 # type: ignore
AMDGPU_GEM_METADATA_OP_GET_METADATA = 2 # type: ignore
AMDGPU_GEM_OP_GET_GEM_CREATE_INFO = 0 # type: ignore
AMDGPU_GEM_OP_SET_PLACEMENT = 1 # type: ignore
AMDGPU_VA_OP_MAP = 1 # type: ignore
AMDGPU_VA_OP_UNMAP = 2 # type: ignore
AMDGPU_VA_OP_CLEAR = 3 # type: ignore
AMDGPU_VA_OP_REPLACE = 4 # type: ignore
AMDGPU_VM_DELAY_UPDATE = (1 << 0) # type: ignore
AMDGPU_VM_PAGE_READABLE = (1 << 1) # type: ignore
AMDGPU_VM_PAGE_WRITEABLE = (1 << 2) # type: ignore
AMDGPU_VM_PAGE_EXECUTABLE = (1 << 3) # type: ignore
AMDGPU_VM_PAGE_PRT = (1 << 4) # type: ignore
AMDGPU_VM_MTYPE_MASK = (0xf << 5) # type: ignore
AMDGPU_VM_MTYPE_DEFAULT = (0 << 5) # type: ignore
AMDGPU_VM_MTYPE_NC = (1 << 5) # type: ignore
AMDGPU_VM_MTYPE_WC = (2 << 5) # type: ignore
AMDGPU_VM_MTYPE_CC = (3 << 5) # type: ignore
AMDGPU_VM_MTYPE_UC = (4 << 5) # type: ignore
AMDGPU_VM_MTYPE_RW = (5 << 5) # type: ignore
AMDGPU_VM_PAGE_NOALLOC = (1 << 9) # type: ignore
AMDGPU_HW_IP_GFX = 0 # type: ignore
AMDGPU_HW_IP_COMPUTE = 1 # type: ignore
AMDGPU_HW_IP_DMA = 2 # type: ignore
AMDGPU_HW_IP_UVD = 3 # type: ignore
AMDGPU_HW_IP_VCE = 4 # type: ignore
AMDGPU_HW_IP_UVD_ENC = 5 # type: ignore
AMDGPU_HW_IP_VCN_DEC = 6 # type: ignore
AMDGPU_HW_IP_VCN_ENC = 7 # type: ignore
AMDGPU_HW_IP_VCN_JPEG = 8 # type: ignore
AMDGPU_HW_IP_VPE = 9 # type: ignore
AMDGPU_HW_IP_NUM = 10 # type: ignore
AMDGPU_HW_IP_INSTANCE_MAX_COUNT = 1 # type: ignore
AMDGPU_CHUNK_ID_IB = 0x01 # type: ignore
AMDGPU_CHUNK_ID_FENCE = 0x02 # type: ignore
AMDGPU_CHUNK_ID_DEPENDENCIES = 0x03 # type: ignore
AMDGPU_CHUNK_ID_SYNCOBJ_IN = 0x04 # type: ignore
AMDGPU_CHUNK_ID_SYNCOBJ_OUT = 0x05 # type: ignore
AMDGPU_CHUNK_ID_BO_HANDLES = 0x06 # type: ignore
AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES = 0x07 # type: ignore
AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT = 0x08 # type: ignore
AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL = 0x09 # type: ignore
AMDGPU_CHUNK_ID_CP_GFX_SHADOW = 0x0a # type: ignore
AMDGPU_IB_FLAG_CE = (1<<0) # type: ignore
AMDGPU_IB_FLAG_PREAMBLE = (1<<1) # type: ignore
AMDGPU_IB_FLAG_PREEMPT = (1<<2) # type: ignore
AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE = (1 << 3) # type: ignore
AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID = (1 << 4) # type: ignore
AMDGPU_IB_FLAGS_SECURE = (1 << 5) # type: ignore
AMDGPU_IB_FLAG_EMIT_MEM_SYNC = (1 << 6) # type: ignore
AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ = 0 # type: ignore
AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD = 1 # type: ignore
AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD = 2 # type: ignore
AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW = 0x1 # type: ignore
AMDGPU_IDS_FLAGS_FUSION = 0x1 # type: ignore
AMDGPU_IDS_FLAGS_PREEMPTION = 0x2 # type: ignore
AMDGPU_IDS_FLAGS_TMZ = 0x4 # type: ignore
AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD = 0x8 # type: ignore
AMDGPU_IDS_FLAGS_MODE_MASK = 0x300 # type: ignore
AMDGPU_IDS_FLAGS_MODE_SHIFT = 0x8 # type: ignore
AMDGPU_IDS_FLAGS_MODE_PF = 0x0 # type: ignore
AMDGPU_IDS_FLAGS_MODE_VF = 0x1 # type: ignore
AMDGPU_IDS_FLAGS_MODE_PT = 0x2 # type: ignore
AMDGPU_INFO_ACCEL_WORKING = 0x00 # type: ignore
AMDGPU_INFO_CRTC_FROM_ID = 0x01 # type: ignore
AMDGPU_INFO_HW_IP_INFO = 0x02 # type: ignore
AMDGPU_INFO_HW_IP_COUNT = 0x03 # type: ignore
AMDGPU_INFO_TIMESTAMP = 0x05 # type: ignore
AMDGPU_INFO_FW_VERSION = 0x0e # type: ignore
AMDGPU_INFO_FW_VCE = 0x1 # type: ignore
AMDGPU_INFO_FW_UVD = 0x2 # type: ignore
AMDGPU_INFO_FW_GMC = 0x03 # type: ignore
AMDGPU_INFO_FW_GFX_ME = 0x04 # type: ignore
AMDGPU_INFO_FW_GFX_PFP = 0x05 # type: ignore
AMDGPU_INFO_FW_GFX_CE = 0x06 # type: ignore
AMDGPU_INFO_FW_GFX_RLC = 0x07 # type: ignore
AMDGPU_INFO_FW_GFX_MEC = 0x08 # type: ignore
AMDGPU_INFO_FW_SMC = 0x0a # type: ignore
AMDGPU_INFO_FW_SDMA = 0x0b # type: ignore
AMDGPU_INFO_FW_SOS = 0x0c # type: ignore
AMDGPU_INFO_FW_ASD = 0x0d # type: ignore
AMDGPU_INFO_FW_VCN = 0x0e # type: ignore
AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL = 0x0f # type: ignore
AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM = 0x10 # type: ignore
AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM = 0x11 # type: ignore
AMDGPU_INFO_FW_DMCU = 0x12 # type: ignore
AMDGPU_INFO_FW_TA = 0x13 # type: ignore
AMDGPU_INFO_FW_DMCUB = 0x14 # type: ignore
AMDGPU_INFO_FW_TOC = 0x15 # type: ignore
AMDGPU_INFO_FW_CAP = 0x16 # type: ignore
AMDGPU_INFO_FW_GFX_RLCP = 0x17 # type: ignore
AMDGPU_INFO_FW_GFX_RLCV = 0x18 # type: ignore
AMDGPU_INFO_FW_MES_KIQ = 0x19 # type: ignore
AMDGPU_INFO_FW_MES = 0x1a # type: ignore
AMDGPU_INFO_FW_IMU = 0x1b # type: ignore
AMDGPU_INFO_FW_VPE = 0x1c # type: ignore
AMDGPU_INFO_NUM_BYTES_MOVED = 0x0f # type: ignore
AMDGPU_INFO_VRAM_USAGE = 0x10 # type: ignore
AMDGPU_INFO_GTT_USAGE = 0x11 # type: ignore
AMDGPU_INFO_GDS_CONFIG = 0x13 # type: ignore
AMDGPU_INFO_VRAM_GTT = 0x14 # type: ignore
AMDGPU_INFO_READ_MMR_REG = 0x15 # type: ignore
AMDGPU_INFO_DEV_INFO = 0x16 # type: ignore
AMDGPU_INFO_VIS_VRAM_USAGE = 0x17 # type: ignore
AMDGPU_INFO_NUM_EVICTIONS = 0x18 # type: ignore
AMDGPU_INFO_MEMORY = 0x19 # type: ignore
AMDGPU_INFO_VCE_CLOCK_TABLE = 0x1A # type: ignore
AMDGPU_INFO_VBIOS = 0x1B # type: ignore
AMDGPU_INFO_VBIOS_SIZE = 0x1 # type: ignore
AMDGPU_INFO_VBIOS_IMAGE = 0x2 # type: ignore
AMDGPU_INFO_VBIOS_INFO = 0x3 # type: ignore
AMDGPU_INFO_NUM_HANDLES = 0x1C # type: ignore
AMDGPU_INFO_SENSOR = 0x1D # type: ignore
AMDGPU_INFO_SENSOR_GFX_SCLK = 0x1 # type: ignore
AMDGPU_INFO_SENSOR_GFX_MCLK = 0x2 # type: ignore
AMDGPU_INFO_SENSOR_GPU_TEMP = 0x3 # type: ignore
AMDGPU_INFO_SENSOR_GPU_LOAD = 0x4 # type: ignore
AMDGPU_INFO_SENSOR_GPU_AVG_POWER = 0x5 # type: ignore
AMDGPU_INFO_SENSOR_VDDNB = 0x6 # type: ignore
AMDGPU_INFO_SENSOR_VDDGFX = 0x7 # type: ignore
AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK = 0x8 # type: ignore
AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK = 0x9 # type: ignore
AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK = 0xa # type: ignore
AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK = 0xb # type: ignore
AMDGPU_INFO_SENSOR_GPU_INPUT_POWER = 0xc # type: ignore
AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS = 0x1E # type: ignore
AMDGPU_INFO_VRAM_LOST_COUNTER = 0x1F # type: ignore
AMDGPU_INFO_RAS_ENABLED_FEATURES = 0x20 # type: ignore
AMDGPU_INFO_RAS_ENABLED_UMC = (1 << 0) # type: ignore
AMDGPU_INFO_RAS_ENABLED_SDMA = (1 << 1) # type: ignore
AMDGPU_INFO_RAS_ENABLED_GFX = (1 << 2) # type: ignore
AMDGPU_INFO_RAS_ENABLED_MMHUB = (1 << 3) # type: ignore
AMDGPU_INFO_RAS_ENABLED_ATHUB = (1 << 4) # type: ignore
AMDGPU_INFO_RAS_ENABLED_PCIE = (1 << 5) # type: ignore
AMDGPU_INFO_RAS_ENABLED_HDP = (1 << 6) # type: ignore
AMDGPU_INFO_RAS_ENABLED_XGMI = (1 << 7) # type: ignore
AMDGPU_INFO_RAS_ENABLED_DF = (1 << 8) # type: ignore
AMDGPU_INFO_RAS_ENABLED_SMN = (1 << 9) # type: ignore
AMDGPU_INFO_RAS_ENABLED_SEM = (1 << 10) # type: ignore
AMDGPU_INFO_RAS_ENABLED_MP0 = (1 << 11) # type: ignore
AMDGPU_INFO_RAS_ENABLED_MP1 = (1 << 12) # type: ignore
AMDGPU_INFO_RAS_ENABLED_FUSE = (1 << 13) # type: ignore
AMDGPU_INFO_VIDEO_CAPS = 0x21 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_DECODE = 0 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_ENCODE = 1 # type: ignore
AMDGPU_INFO_MAX_IBS = 0x22 # type: ignore
AMDGPU_INFO_GPUVM_FAULT = 0x23 # type: ignore
AMDGPU_INFO_UQ_FW_AREAS = 0x24 # type: ignore
AMDGPU_INFO_CAPABILITY = 0x50 # type: ignore
AMDGPU_INFO_VIRTUAL_RANGE = 0x51 # type: ignore
AMDGPU_CAPABILITY_PIN_MEM_FLAG = (1 << 0) # type: ignore
AMDGPU_CAPABILITY_DIRECT_GMA_FLAG = (1 << 1) # type: ignore
AMDGPU_INFO_MMR_SE_INDEX_SHIFT = 0 # type: ignore
AMDGPU_INFO_MMR_SE_INDEX_MASK = 0xff # type: ignore
AMDGPU_INFO_MMR_SH_INDEX_SHIFT = 8 # type: ignore
AMDGPU_INFO_MMR_SH_INDEX_MASK = 0xff # type: ignore
AMDGPU_VRAM_TYPE_UNKNOWN = 0 # type: ignore
AMDGPU_VRAM_TYPE_GDDR1 = 1 # type: ignore
AMDGPU_VRAM_TYPE_DDR2 = 2 # type: ignore
AMDGPU_VRAM_TYPE_GDDR3 = 3 # type: ignore
AMDGPU_VRAM_TYPE_GDDR4 = 4 # type: ignore
AMDGPU_VRAM_TYPE_GDDR5 = 5 # type: ignore
AMDGPU_VRAM_TYPE_HBM = 6 # type: ignore
AMDGPU_VRAM_TYPE_DDR3 = 7 # type: ignore
AMDGPU_VRAM_TYPE_DDR4 = 8 # type: ignore
AMDGPU_VRAM_TYPE_GDDR6 = 9 # type: ignore
AMDGPU_VRAM_TYPE_DDR5 = 10 # type: ignore
AMDGPU_VRAM_TYPE_LPDDR4 = 11 # type: ignore
AMDGPU_VRAM_TYPE_LPDDR5 = 12 # type: ignore
AMDGPU_VRAM_TYPE_HBM3E = 13 # type: ignore
AMDGPU_VRAM_TYPE_HBM_WIDTH = 4096 # type: ignore
AMDGPU_VCE_CLOCK_TABLE_ENTRIES = 6 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 = 0 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 = 1 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 = 2 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC = 3 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC = 4 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG = 5 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 = 6 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 = 7 # type: ignore
AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT = 8 # type: ignore
AMDGPU_VMHUB_TYPE_MASK = 0xff # type: ignore
AMDGPU_VMHUB_TYPE_SHIFT = 0 # type: ignore
AMDGPU_VMHUB_TYPE_GFX = 0 # type: ignore
AMDGPU_VMHUB_TYPE_MM0 = 1 # type: ignore
AMDGPU_VMHUB_TYPE_MM1 = 2 # type: ignore
AMDGPU_VMHUB_IDX_MASK = 0xff00 # type: ignore
AMDGPU_VMHUB_IDX_SHIFT = 8 # type: ignore
AMDGPU_FAMILY_UNKNOWN = 0 # type: ignore
AMDGPU_FAMILY_SI = 110 # type: ignore
AMDGPU_FAMILY_CI = 120 # type: ignore
AMDGPU_FAMILY_KV = 125 # type: ignore
AMDGPU_FAMILY_VI = 130 # type: ignore
AMDGPU_FAMILY_CZ = 135 # type: ignore
AMDGPU_FAMILY_AI = 141 # type: ignore
AMDGPU_FAMILY_RV = 142 # type: ignore
AMDGPU_FAMILY_NV = 143 # type: ignore
AMDGPU_FAMILY_VGH = 144 # type: ignore
AMDGPU_FAMILY_GC_11_0_0 = 145 # type: ignore
AMDGPU_FAMILY_YC = 146 # type: ignore
AMDGPU_FAMILY_GC_11_0_1 = 148 # type: ignore
AMDGPU_FAMILY_GC_10_3_6 = 149 # type: ignore
AMDGPU_FAMILY_GC_10_3_7 = 151 # type: ignore
AMDGPU_FAMILY_GC_11_5_0 = 150 # type: ignore
AMDGPU_FAMILY_GC_12_0_0 = 152 # type: ignore
AMDGPU_SUA_APERTURE_PRIVATE = 1 # type: ignore
AMDGPU_SUA_APERTURE_SHARED = 2 # type: ignore
AMDGPU_FREESYNC_FULLSCREEN_ENTER = 1 # type: ignore
AMDGPU_FREESYNC_FULLSCREEN_EXIT = 2 # type: ignore | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/amdgpu_drm.py",
"license": "MIT License",
"lines": 1583,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:examples/audio_helpers.py | from typing import Optional
from tinygrad import Tensor
from tinygrad.dtype import DTypeLike, dtypes
import math
# rewritten from numpy
def rfftfreq(n: int, d: float = 1.0, device=None) -> Tensor:
val = 1.0 / (n * d)
N = n // 2 + 1
results = Tensor.arange(N, device=device)
return results * val
# just like in librosa
def fft_frequencies(sr: float, n_fft: int) -> Tensor:
return rfftfreq(n=n_fft, d=1.0 / sr)
def hz_to_mel(freq: Tensor) -> Tensor:
# linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (freq - f_min) / f_sp
# log-scale part
min_log_hz = 1000.0 # beginning of log region (Hz)
mask = freq >= min_log_hz
return mask.where(((min_log_hz - f_min) / f_sp) + (freq / min_log_hz).log() / (math.log(6.4) / 27.0), mels)
def mel_to_hz(mels: Tensor) -> Tensor:
# linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# nonlinear scale
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = math.log(6.4) / 27.0 # step size for log region
log_t = mels >= min_log_mel
freqs = log_t.where(min_log_hz * ((logstep * (mels - min_log_mel)).exp()), freqs)
return freqs
def mel_frequencies(n_mels: int = 128, *, fmin: float = 0.0, fmax: float = 11025.0) -> Tensor:
# center freqs of mel bands - uniformly spaced between limits
min_max_mel = hz_to_mel(Tensor([fmin, fmax]))
mels = Tensor.linspace(min_max_mel[0], min_max_mel[1], n_mels)
hz = mel_to_hz(mels)
return hz
def mel(
*,
sr: float,
n_fft: int,
n_mels: int = 128,
fmin: float = 0.0,
fmax: Optional[float] = None,
dtype: DTypeLike = dtypes.default_float,
) -> Tensor:
if fmax is None:
fmax = float(sr) / 2
n_mels = int(n_mels)
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft) # center freqs of each FFT bin
mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax) # center freqs of mel bands
fdiff = mel_f[1:] - mel_f[:-1]
ramps = mel_f[None].T.expand(-1, fftfreqs.shape[-1]) - fftfreqs
lower = -ramps[:n_mels] / fdiff[:n_mels][None].T
upper = ramps[2 : n_mels + 2] / fdiff[1 : n_mels + 1][None].T
weights = lower.minimum(upper).maximum(0)
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2 : n_mels + 2] - mel_f[:n_mels])
weights *= enorm[:, None]
return weights
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "examples/audio_helpers.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:tinygrad/runtime/autogen/corefoundation.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
dll = c.DLL('corefoundation', 'CoreFoundation')
CFStringEncoding: TypeAlias = Annotated[int, ctypes.c_uint32]
CFStringBuiltInEncodings: TypeAlias = Annotated[int, ctypes.c_uint32]
class _anonenum0(Annotated[int, ctypes.c_uint32], c.Enum): pass
kCFStringEncodingMacRoman = _anonenum0.define('kCFStringEncodingMacRoman', 0)
kCFStringEncodingWindowsLatin1 = _anonenum0.define('kCFStringEncodingWindowsLatin1', 1280)
kCFStringEncodingISOLatin1 = _anonenum0.define('kCFStringEncodingISOLatin1', 513)
kCFStringEncodingNextStepLatin = _anonenum0.define('kCFStringEncodingNextStepLatin', 2817)
kCFStringEncodingASCII = _anonenum0.define('kCFStringEncodingASCII', 1536)
kCFStringEncodingUnicode = _anonenum0.define('kCFStringEncodingUnicode', 256)
kCFStringEncodingUTF8 = _anonenum0.define('kCFStringEncodingUTF8', 134217984)
kCFStringEncodingNonLossyASCII = _anonenum0.define('kCFStringEncodingNonLossyASCII', 3071)
kCFStringEncodingUTF16 = _anonenum0.define('kCFStringEncodingUTF16', 256)
kCFStringEncodingUTF16BE = _anonenum0.define('kCFStringEncodingUTF16BE', 268435712)
kCFStringEncodingUTF16LE = _anonenum0.define('kCFStringEncodingUTF16LE', 335544576)
kCFStringEncodingUTF32 = _anonenum0.define('kCFStringEncodingUTF32', 201326848)
kCFStringEncodingUTF32BE = _anonenum0.define('kCFStringEncodingUTF32BE', 402653440)
kCFStringEncodingUTF32LE = _anonenum0.define('kCFStringEncodingUTF32LE', 469762304)
CFTypeID: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def CFStringGetTypeID() -> CFTypeID: ...
class struct___CFAllocator(ctypes.Structure): pass
CFAllocatorRef: TypeAlias = c.POINTER[struct___CFAllocator]
ConstStr255Param: TypeAlias = c.POINTER[Annotated[int, ctypes.c_ubyte]]
class struct___CFString(ctypes.Structure): pass
CFStringRef: TypeAlias = c.POINTER[struct___CFString]
@dll.bind
def CFStringCreateWithPascalString(alloc:CFAllocatorRef, pStr:ConstStr255Param, encoding:CFStringEncoding) -> CFStringRef: ...
@dll.bind
def CFStringCreateWithCString(alloc:CFAllocatorRef, cStr:c.POINTER[Annotated[bytes, ctypes.c_char]], encoding:CFStringEncoding) -> CFStringRef: ...
UInt8: TypeAlias = Annotated[int, ctypes.c_ubyte]
CFIndex: TypeAlias = Annotated[int, ctypes.c_int64]
Boolean: TypeAlias = Annotated[int, ctypes.c_ubyte]
@dll.bind
def CFStringCreateWithBytes(alloc:CFAllocatorRef, bytes:c.POINTER[UInt8], numBytes:CFIndex, encoding:CFStringEncoding, isExternalRepresentation:Boolean) -> CFStringRef: ...
UniChar: TypeAlias = Annotated[int, ctypes.c_uint16]
@dll.bind
def CFStringCreateWithCharacters(alloc:CFAllocatorRef, chars:c.POINTER[UniChar], numChars:CFIndex) -> CFStringRef: ...
@dll.bind
def CFStringCreateWithPascalStringNoCopy(alloc:CFAllocatorRef, pStr:ConstStr255Param, encoding:CFStringEncoding, contentsDeallocator:CFAllocatorRef) -> CFStringRef: ...
@dll.bind
def CFStringCreateWithCStringNoCopy(alloc:CFAllocatorRef, cStr:c.POINTER[Annotated[bytes, ctypes.c_char]], encoding:CFStringEncoding, contentsDeallocator:CFAllocatorRef) -> CFStringRef: ...
@dll.bind
def CFStringCreateWithBytesNoCopy(alloc:CFAllocatorRef, bytes:c.POINTER[UInt8], numBytes:CFIndex, encoding:CFStringEncoding, isExternalRepresentation:Boolean, contentsDeallocator:CFAllocatorRef) -> CFStringRef: ...
@dll.bind
def CFStringCreateWithCharactersNoCopy(alloc:CFAllocatorRef, chars:c.POINTER[UniChar], numChars:CFIndex, contentsDeallocator:CFAllocatorRef) -> CFStringRef: ...
@c.record
class CFRange(c.Struct):
SIZE = 16
location: Annotated[CFIndex, 0]
length: Annotated[CFIndex, 8]
@dll.bind
def CFStringCreateWithSubstring(alloc:CFAllocatorRef, str:CFStringRef, range:CFRange) -> CFStringRef: ...
@dll.bind
def CFStringCreateCopy(alloc:CFAllocatorRef, theString:CFStringRef) -> CFStringRef: ...
class struct___CFDictionary(ctypes.Structure): pass
CFDictionaryRef: TypeAlias = c.POINTER[struct___CFDictionary]
@dll.bind
def CFStringCreateWithFormat(alloc:CFAllocatorRef, formatOptions:CFDictionaryRef, format:CFStringRef) -> CFStringRef: ...
@dll.bind
def CFStringCreateWithFormatAndArguments(alloc:CFAllocatorRef, formatOptions:CFDictionaryRef, format:CFStringRef, arguments:Annotated[int, ctypes.c_int32]) -> CFStringRef: ...
class struct___CFError(ctypes.Structure): pass
CFErrorRef: TypeAlias = c.POINTER[struct___CFError]
@dll.bind
def CFStringCreateStringWithValidatedFormat(alloc:CFAllocatorRef, formatOptions:CFDictionaryRef, validFormatSpecifiers:CFStringRef, format:CFStringRef, errorPtr:c.POINTER[CFErrorRef]) -> CFStringRef: ...
@dll.bind
def CFStringCreateStringWithValidatedFormatAndArguments(alloc:CFAllocatorRef, formatOptions:CFDictionaryRef, validFormatSpecifiers:CFStringRef, format:CFStringRef, arguments:Annotated[int, ctypes.c_int32], errorPtr:c.POINTER[CFErrorRef]) -> CFStringRef: ...
CFMutableStringRef: TypeAlias = c.POINTER[struct___CFString]
@dll.bind
def CFStringCreateMutable(alloc:CFAllocatorRef, maxLength:CFIndex) -> CFMutableStringRef: ...
@dll.bind
def CFStringCreateMutableCopy(alloc:CFAllocatorRef, maxLength:CFIndex, theString:CFStringRef) -> CFMutableStringRef: ...
@dll.bind
def CFStringCreateMutableWithExternalCharactersNoCopy(alloc:CFAllocatorRef, chars:c.POINTER[UniChar], numChars:CFIndex, capacity:CFIndex, externalCharactersAllocator:CFAllocatorRef) -> CFMutableStringRef: ...
@dll.bind
def CFStringGetLength(theString:CFStringRef) -> CFIndex: ...
@dll.bind
def CFStringGetCharacterAtIndex(theString:CFStringRef, idx:CFIndex) -> UniChar: ...
@dll.bind
def CFStringGetCharacters(theString:CFStringRef, range:CFRange, buffer:c.POINTER[UniChar]) -> None: ...
StringPtr: TypeAlias = c.POINTER[Annotated[int, ctypes.c_ubyte]]
@dll.bind
def CFStringGetPascalString(theString:CFStringRef, buffer:StringPtr, bufferSize:CFIndex, encoding:CFStringEncoding) -> Boolean: ...
@dll.bind
def CFStringGetCString(theString:CFStringRef, buffer:c.POINTER[Annotated[bytes, ctypes.c_char]], bufferSize:CFIndex, encoding:CFStringEncoding) -> Boolean: ...
ConstStringPtr: TypeAlias = c.POINTER[Annotated[int, ctypes.c_ubyte]]
@dll.bind
def CFStringGetPascalStringPtr(theString:CFStringRef, encoding:CFStringEncoding) -> ConstStringPtr: ...
@dll.bind
def CFStringGetCStringPtr(theString:CFStringRef, encoding:CFStringEncoding) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def CFStringGetCharactersPtr(theString:CFStringRef) -> c.POINTER[UniChar]: ...
@dll.bind
def CFStringGetBytes(theString:CFStringRef, range:CFRange, encoding:CFStringEncoding, lossByte:UInt8, isExternalRepresentation:Boolean, buffer:c.POINTER[UInt8], maxBufLen:CFIndex, usedBufLen:c.POINTER[CFIndex]) -> CFIndex: ...
class struct___CFData(ctypes.Structure): pass
CFDataRef: TypeAlias = c.POINTER[struct___CFData]
@dll.bind
def CFStringCreateFromExternalRepresentation(alloc:CFAllocatorRef, data:CFDataRef, encoding:CFStringEncoding) -> CFStringRef: ...
@dll.bind
def CFStringCreateExternalRepresentation(alloc:CFAllocatorRef, theString:CFStringRef, encoding:CFStringEncoding, lossByte:UInt8) -> CFDataRef: ...
@dll.bind
def CFStringGetSmallestEncoding(theString:CFStringRef) -> CFStringEncoding: ...
@dll.bind
def CFStringGetFastestEncoding(theString:CFStringRef) -> CFStringEncoding: ...
@dll.bind
def CFStringGetSystemEncoding() -> CFStringEncoding: ...
@dll.bind
def CFStringGetMaximumSizeForEncoding(length:CFIndex, encoding:CFStringEncoding) -> CFIndex: ...
@dll.bind
def CFStringGetFileSystemRepresentation(string:CFStringRef, buffer:c.POINTER[Annotated[bytes, ctypes.c_char]], maxBufLen:CFIndex) -> Boolean: ...
@dll.bind
def CFStringGetMaximumSizeOfFileSystemRepresentation(string:CFStringRef) -> CFIndex: ...
@dll.bind
def CFStringCreateWithFileSystemRepresentation(alloc:CFAllocatorRef, buffer:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CFStringRef: ...
CFStringCompareFlags: TypeAlias = Annotated[int, ctypes.c_uint64]
class _anonenum1(Annotated[int, ctypes.c_uint32], c.Enum): pass
kCFCompareCaseInsensitive = _anonenum1.define('kCFCompareCaseInsensitive', 1)
kCFCompareBackwards = _anonenum1.define('kCFCompareBackwards', 4)
kCFCompareAnchored = _anonenum1.define('kCFCompareAnchored', 8)
kCFCompareNonliteral = _anonenum1.define('kCFCompareNonliteral', 16)
kCFCompareLocalized = _anonenum1.define('kCFCompareLocalized', 32)
kCFCompareNumerically = _anonenum1.define('kCFCompareNumerically', 64)
kCFCompareDiacriticInsensitive = _anonenum1.define('kCFCompareDiacriticInsensitive', 128)
kCFCompareWidthInsensitive = _anonenum1.define('kCFCompareWidthInsensitive', 256)
kCFCompareForcedOrdering = _anonenum1.define('kCFCompareForcedOrdering', 512)
class struct___CFLocale(ctypes.Structure): pass
CFLocaleRef: TypeAlias = c.POINTER[struct___CFLocale]
CFComparisonResult: TypeAlias = Annotated[int, ctypes.c_int64]
@dll.bind
def CFStringCompareWithOptionsAndLocale(theString1:CFStringRef, theString2:CFStringRef, rangeToCompare:CFRange, compareOptions:CFStringCompareFlags, locale:CFLocaleRef) -> CFComparisonResult: ...
@dll.bind
def CFStringCompareWithOptions(theString1:CFStringRef, theString2:CFStringRef, rangeToCompare:CFRange, compareOptions:CFStringCompareFlags) -> CFComparisonResult: ...
@dll.bind
def CFStringCompare(theString1:CFStringRef, theString2:CFStringRef, compareOptions:CFStringCompareFlags) -> CFComparisonResult: ...
@dll.bind
def CFStringFindWithOptionsAndLocale(theString:CFStringRef, stringToFind:CFStringRef, rangeToSearch:CFRange, searchOptions:CFStringCompareFlags, locale:CFLocaleRef, result:c.POINTER[CFRange]) -> Boolean: ...
@dll.bind
def CFStringFindWithOptions(theString:CFStringRef, stringToFind:CFStringRef, rangeToSearch:CFRange, searchOptions:CFStringCompareFlags, result:c.POINTER[CFRange]) -> Boolean: ...
class struct___CFArray(ctypes.Structure): pass
CFArrayRef: TypeAlias = c.POINTER[struct___CFArray]
@dll.bind
def CFStringCreateArrayWithFindResults(alloc:CFAllocatorRef, theString:CFStringRef, stringToFind:CFStringRef, rangeToSearch:CFRange, compareOptions:CFStringCompareFlags) -> CFArrayRef: ...
@dll.bind
def CFStringFind(theString:CFStringRef, stringToFind:CFStringRef, compareOptions:CFStringCompareFlags) -> CFRange: ...
@dll.bind
def CFStringHasPrefix(theString:CFStringRef, prefix:CFStringRef) -> Boolean: ...
@dll.bind
def CFStringHasSuffix(theString:CFStringRef, suffix:CFStringRef) -> Boolean: ...
@dll.bind
def CFStringGetRangeOfComposedCharactersAtIndex(theString:CFStringRef, theIndex:CFIndex) -> CFRange: ...
class struct___CFCharacterSet(ctypes.Structure): pass
CFCharacterSetRef: TypeAlias = c.POINTER[struct___CFCharacterSet]
@dll.bind
def CFStringFindCharacterFromSet(theString:CFStringRef, theSet:CFCharacterSetRef, rangeToSearch:CFRange, searchOptions:CFStringCompareFlags, result:c.POINTER[CFRange]) -> Boolean: ...
@dll.bind
def CFStringGetLineBounds(theString:CFStringRef, range:CFRange, lineBeginIndex:c.POINTER[CFIndex], lineEndIndex:c.POINTER[CFIndex], contentsEndIndex:c.POINTER[CFIndex]) -> None: ...
@dll.bind
def CFStringGetParagraphBounds(string:CFStringRef, range:CFRange, parBeginIndex:c.POINTER[CFIndex], parEndIndex:c.POINTER[CFIndex], contentsEndIndex:c.POINTER[CFIndex]) -> None: ...
CFOptionFlags: TypeAlias = Annotated[int, ctypes.c_uint64]
UTF32Char: TypeAlias = Annotated[int, ctypes.c_uint32]
@dll.bind
def CFStringGetHyphenationLocationBeforeIndex(string:CFStringRef, location:CFIndex, limitRange:CFRange, options:CFOptionFlags, locale:CFLocaleRef, character:c.POINTER[UTF32Char]) -> CFIndex: ...
@dll.bind
def CFStringIsHyphenationAvailableForLocale(locale:CFLocaleRef) -> Boolean: ...
@dll.bind
def CFStringCreateByCombiningStrings(alloc:CFAllocatorRef, theArray:CFArrayRef, separatorString:CFStringRef) -> CFStringRef: ...
@dll.bind
def CFStringCreateArrayBySeparatingStrings(alloc:CFAllocatorRef, theString:CFStringRef, separatorString:CFStringRef) -> CFArrayRef: ...
SInt32: TypeAlias = Annotated[int, ctypes.c_int32]
@dll.bind
def CFStringGetIntValue(str:CFStringRef) -> SInt32: ...
@dll.bind
def CFStringGetDoubleValue(str:CFStringRef) -> Annotated[float, ctypes.c_double]: ...
@dll.bind
def CFStringAppend(theString:CFMutableStringRef, appendedString:CFStringRef) -> None: ...
@dll.bind
def CFStringAppendCharacters(theString:CFMutableStringRef, chars:c.POINTER[UniChar], numChars:CFIndex) -> None: ...
@dll.bind
def CFStringAppendPascalString(theString:CFMutableStringRef, pStr:ConstStr255Param, encoding:CFStringEncoding) -> None: ...
@dll.bind
def CFStringAppendCString(theString:CFMutableStringRef, cStr:c.POINTER[Annotated[bytes, ctypes.c_char]], encoding:CFStringEncoding) -> None: ...
@dll.bind
def CFStringAppendFormat(theString:CFMutableStringRef, formatOptions:CFDictionaryRef, format:CFStringRef) -> None: ...
@dll.bind
def CFStringAppendFormatAndArguments(theString:CFMutableStringRef, formatOptions:CFDictionaryRef, format:CFStringRef, arguments:Annotated[int, ctypes.c_int32]) -> None: ...
@dll.bind
def CFStringInsert(str:CFMutableStringRef, idx:CFIndex, insertedStr:CFStringRef) -> None: ...
@dll.bind
def CFStringDelete(theString:CFMutableStringRef, range:CFRange) -> None: ...
@dll.bind
def CFStringReplace(theString:CFMutableStringRef, range:CFRange, replacement:CFStringRef) -> None: ...
@dll.bind
def CFStringReplaceAll(theString:CFMutableStringRef, replacement:CFStringRef) -> None: ...
@dll.bind
def CFStringFindAndReplace(theString:CFMutableStringRef, stringToFind:CFStringRef, replacementString:CFStringRef, rangeToSearch:CFRange, compareOptions:CFStringCompareFlags) -> CFIndex: ...
@dll.bind
def CFStringSetExternalCharactersNoCopy(theString:CFMutableStringRef, chars:c.POINTER[UniChar], length:CFIndex, capacity:CFIndex) -> None: ...
@dll.bind
def CFStringPad(theString:CFMutableStringRef, padString:CFStringRef, length:CFIndex, indexIntoPad:CFIndex) -> None: ...
@dll.bind
def CFStringTrim(theString:CFMutableStringRef, trimString:CFStringRef) -> None: ...
@dll.bind
def CFStringTrimWhitespace(theString:CFMutableStringRef) -> None: ...
@dll.bind
def CFStringLowercase(theString:CFMutableStringRef, locale:CFLocaleRef) -> None: ...
@dll.bind
def CFStringUppercase(theString:CFMutableStringRef, locale:CFLocaleRef) -> None: ...
@dll.bind
def CFStringCapitalize(theString:CFMutableStringRef, locale:CFLocaleRef) -> None: ...
CFStringNormalizationForm: TypeAlias = Annotated[int, ctypes.c_int64]
class _anonenum2(Annotated[int, ctypes.c_uint32], c.Enum): pass
kCFStringNormalizationFormD = _anonenum2.define('kCFStringNormalizationFormD', 0)
kCFStringNormalizationFormKD = _anonenum2.define('kCFStringNormalizationFormKD', 1)
kCFStringNormalizationFormC = _anonenum2.define('kCFStringNormalizationFormC', 2)
kCFStringNormalizationFormKC = _anonenum2.define('kCFStringNormalizationFormKC', 3)
@dll.bind
def CFStringNormalize(theString:CFMutableStringRef, theForm:CFStringNormalizationForm) -> None: ...
@dll.bind
def CFStringFold(theString:CFMutableStringRef, theFlags:CFStringCompareFlags, theLocale:CFLocaleRef) -> None: ...
@dll.bind
def CFStringTransform(string:CFMutableStringRef, range:c.POINTER[CFRange], transform:CFStringRef, reverse:Boolean) -> Boolean: ...
try: kCFStringTransformStripCombiningMarks = CFStringRef.in_dll(dll, 'kCFStringTransformStripCombiningMarks') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformToLatin = CFStringRef.in_dll(dll, 'kCFStringTransformToLatin') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformFullwidthHalfwidth = CFStringRef.in_dll(dll, 'kCFStringTransformFullwidthHalfwidth') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformLatinKatakana = CFStringRef.in_dll(dll, 'kCFStringTransformLatinKatakana') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformLatinHiragana = CFStringRef.in_dll(dll, 'kCFStringTransformLatinHiragana') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformHiraganaKatakana = CFStringRef.in_dll(dll, 'kCFStringTransformHiraganaKatakana') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformMandarinLatin = CFStringRef.in_dll(dll, 'kCFStringTransformMandarinLatin') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformLatinHangul = CFStringRef.in_dll(dll, 'kCFStringTransformLatinHangul') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformLatinArabic = CFStringRef.in_dll(dll, 'kCFStringTransformLatinArabic') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformLatinHebrew = CFStringRef.in_dll(dll, 'kCFStringTransformLatinHebrew') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformLatinThai = CFStringRef.in_dll(dll, 'kCFStringTransformLatinThai') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformLatinCyrillic = CFStringRef.in_dll(dll, 'kCFStringTransformLatinCyrillic') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformLatinGreek = CFStringRef.in_dll(dll, 'kCFStringTransformLatinGreek') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformToXMLHex = CFStringRef.in_dll(dll, 'kCFStringTransformToXMLHex') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformToUnicodeName = CFStringRef.in_dll(dll, 'kCFStringTransformToUnicodeName') # type: ignore
except (ValueError,AttributeError): pass
try: kCFStringTransformStripDiacritics = CFStringRef.in_dll(dll, 'kCFStringTransformStripDiacritics') # type: ignore
except (ValueError,AttributeError): pass
@dll.bind
def CFStringIsEncodingAvailable(encoding:CFStringEncoding) -> Boolean: ...
@dll.bind
def CFStringGetListOfAvailableEncodings() -> c.POINTER[CFStringEncoding]: ...
@dll.bind
def CFStringGetNameOfEncoding(encoding:CFStringEncoding) -> CFStringRef: ...
@dll.bind
def CFStringConvertEncodingToNSStringEncoding(encoding:CFStringEncoding) -> Annotated[int, ctypes.c_uint64]: ...
@dll.bind
def CFStringConvertNSStringEncodingToEncoding(encoding:Annotated[int, ctypes.c_uint64]) -> CFStringEncoding: ...
UInt32: TypeAlias = Annotated[int, ctypes.c_uint32]
@dll.bind
def CFStringConvertEncodingToWindowsCodepage(encoding:CFStringEncoding) -> UInt32: ...
@dll.bind
def CFStringConvertWindowsCodepageToEncoding(codepage:UInt32) -> CFStringEncoding: ...
@dll.bind
def CFStringConvertIANACharSetNameToEncoding(theString:CFStringRef) -> CFStringEncoding: ...
@dll.bind
def CFStringConvertEncodingToIANACharSetName(encoding:CFStringEncoding) -> CFStringRef: ...
@dll.bind
def CFStringGetMostCompatibleMacStringEncoding(encoding:CFStringEncoding) -> CFStringEncoding: ...
@c.record
class CFStringInlineBuffer(c.Struct):
SIZE = 184
buffer: Annotated[c.Array[UniChar, Literal[64]], 0]
theString: Annotated[CFStringRef, 128]
directUniCharBuffer: Annotated[c.POINTER[UniChar], 136]
directCStringBuffer: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 144]
rangeToBuffer: Annotated[CFRange, 152]
bufferedRangeStart: Annotated[CFIndex, 168]
bufferedRangeEnd: Annotated[CFIndex, 176]
CFTypeRef: TypeAlias = ctypes.c_void_p
@dll.bind
def CFShow(obj:CFTypeRef) -> None: ...
@dll.bind
def CFShowStr(str:CFStringRef) -> None: ...
@dll.bind
def __CFStringMakeConstantString(cStr:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CFStringRef: ...
CFMutableDataRef: TypeAlias = c.POINTER[struct___CFData]
@dll.bind
def CFDataGetTypeID() -> CFTypeID: ...
@dll.bind
def CFDataCreate(allocator:CFAllocatorRef, bytes:c.POINTER[UInt8], length:CFIndex) -> CFDataRef: ...
@dll.bind
def CFDataCreateWithBytesNoCopy(allocator:CFAllocatorRef, bytes:c.POINTER[UInt8], length:CFIndex, bytesDeallocator:CFAllocatorRef) -> CFDataRef: ...
@dll.bind
def CFDataCreateCopy(allocator:CFAllocatorRef, theData:CFDataRef) -> CFDataRef: ...
@dll.bind
def CFDataCreateMutable(allocator:CFAllocatorRef, capacity:CFIndex) -> CFMutableDataRef: ...
@dll.bind
def CFDataCreateMutableCopy(allocator:CFAllocatorRef, capacity:CFIndex, theData:CFDataRef) -> CFMutableDataRef: ...
@dll.bind
def CFDataGetLength(theData:CFDataRef) -> CFIndex: ...
@dll.bind
def CFDataGetBytePtr(theData:CFDataRef) -> c.POINTER[UInt8]: ...
@dll.bind
def CFDataGetMutableBytePtr(theData:CFMutableDataRef) -> c.POINTER[UInt8]: ...
@dll.bind
def CFDataGetBytes(theData:CFDataRef, range:CFRange, buffer:c.POINTER[UInt8]) -> None: ...
@dll.bind
def CFDataSetLength(theData:CFMutableDataRef, length:CFIndex) -> None: ...
@dll.bind
def CFDataIncreaseLength(theData:CFMutableDataRef, extraLength:CFIndex) -> None: ...
@dll.bind
def CFDataAppendBytes(theData:CFMutableDataRef, bytes:c.POINTER[UInt8], length:CFIndex) -> None: ...
@dll.bind
def CFDataReplaceBytes(theData:CFMutableDataRef, range:CFRange, newBytes:c.POINTER[UInt8], newLength:CFIndex) -> None: ...
@dll.bind
def CFDataDeleteBytes(theData:CFMutableDataRef, range:CFRange) -> None: ...
CFDataSearchFlags: TypeAlias = Annotated[int, ctypes.c_uint64]
class _anonenum3(Annotated[int, ctypes.c_uint32], c.Enum): pass
kCFDataSearchBackwards = _anonenum3.define('kCFDataSearchBackwards', 1)
kCFDataSearchAnchored = _anonenum3.define('kCFDataSearchAnchored', 2)
@dll.bind
def CFDataFind(theData:CFDataRef, dataToFind:CFDataRef, searchRange:CFRange, compareOptions:CFDataSearchFlags) -> CFRange: ...
c.init_records()
__COREFOUNDATION_CFSTRING__ = 1 # type: ignore
kCFStringEncodingInvalidId = (0xffffffff) # type: ignore
CF_FORMAT_FUNCTION = lambda F,A: __attribute__((format(CFString, F, A))) # type: ignore
CF_FORMAT_ARGUMENT = lambda A: __attribute__((format_arg(A))) # type: ignore
__kCFStringInlineBufferLength = 64 # type: ignore
__COREFOUNDATION_CFDATA__ = 1 # type: ignore | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/corefoundation.py",
"license": "MIT License",
"lines": 341,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/autogen/iokit.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
dll = c.DLL('iokit', 'IOKit')
class struct_IONotificationPort(ctypes.Structure): pass
IONotificationPortRef: TypeAlias = c.POINTER[struct_IONotificationPort]
IOServiceMatchingCallback: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p, Annotated[int, ctypes.c_uint32]]]
IOServiceInterestCallback: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p, Annotated[int, ctypes.c_uint32], Annotated[int, ctypes.c_uint32], ctypes.c_void_p]]
mach_port_t: TypeAlias = Annotated[int, ctypes.c_uint32]
try: kIOMainPortDefault = mach_port_t.in_dll(dll, 'kIOMainPortDefault') # type: ignore
except (ValueError,AttributeError): pass
kern_return_t: TypeAlias = Annotated[int, ctypes.c_int32]
@dll.bind
def IOMainPort(bootstrapPort:mach_port_t, mainPort:c.POINTER[mach_port_t]) -> kern_return_t: ...
try: kIOMasterPortDefault = mach_port_t.in_dll(dll, 'kIOMasterPortDefault') # type: ignore
except (ValueError,AttributeError): pass
@dll.bind
def IOMasterPort(bootstrapPort:mach_port_t, mainPort:c.POINTER[mach_port_t]) -> kern_return_t: ...
@dll.bind
def IONotificationPortCreate(mainPort:mach_port_t) -> IONotificationPortRef: ...
@dll.bind
def IONotificationPortDestroy(notify:IONotificationPortRef) -> None: ...
class struct___CFRunLoopSource(ctypes.Structure): pass
CFRunLoopSourceRef: TypeAlias = c.POINTER[struct___CFRunLoopSource]
@dll.bind
def IONotificationPortGetRunLoopSource(notify:IONotificationPortRef) -> CFRunLoopSourceRef: ...
@dll.bind
def IONotificationPortGetMachPort(notify:IONotificationPortRef) -> mach_port_t: ...
@dll.bind
def IONotificationPortSetImportanceReceiver(notify:IONotificationPortRef) -> kern_return_t: ...
class struct_dispatch_queue_s(ctypes.Structure): pass
dispatch_queue_t: TypeAlias = c.POINTER[struct_dispatch_queue_s]
@dll.bind
def IONotificationPortSetDispatchQueue(notify:IONotificationPortRef, queue:dispatch_queue_t) -> None: ...
@c.record
class mach_msg_header_t(c.Struct):
SIZE = 24
msgh_bits: Annotated[mach_msg_bits_t, 0]
msgh_size: Annotated[mach_msg_size_t, 4]
msgh_remote_port: Annotated[mach_port_t, 8]
msgh_local_port: Annotated[mach_port_t, 12]
msgh_voucher_port: Annotated[mach_port_name_t, 16]
msgh_id: Annotated[mach_msg_id_t, 20]
mach_msg_bits_t: TypeAlias = Annotated[int, ctypes.c_uint32]
mach_msg_size_t: TypeAlias = Annotated[int, ctypes.c_uint32]
mach_port_name_t: TypeAlias = Annotated[int, ctypes.c_uint32]
mach_msg_id_t: TypeAlias = Annotated[int, ctypes.c_int32]
@dll.bind
def IODispatchCalloutFromMessage(unused:ctypes.c_void_p, msg:c.POINTER[mach_msg_header_t], reference:ctypes.c_void_p) -> None: ...
uint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@dll.bind
def IOCreateReceivePort(msgType:uint32_t, recvPort:c.POINTER[mach_port_t]) -> kern_return_t: ...
io_object_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@dll.bind
def IOObjectRelease(object:io_object_t) -> kern_return_t: ...
@dll.bind
def IOObjectRetain(object:io_object_t) -> kern_return_t: ...
io_name_t: TypeAlias = c.Array[Annotated[bytes, ctypes.c_char], Literal[128]]
@dll.bind
def IOObjectGetClass(object:io_object_t, className:io_name_t) -> kern_return_t: ...
class struct___CFString(ctypes.Structure): pass
CFStringRef: TypeAlias = c.POINTER[struct___CFString]
@dll.bind
def IOObjectCopyClass(object:io_object_t) -> CFStringRef: ...
@dll.bind
def IOObjectCopySuperclassForClass(classname:CFStringRef) -> CFStringRef: ...
@dll.bind
def IOObjectCopyBundleIdentifierForClass(classname:CFStringRef) -> CFStringRef: ...
boolean_t: TypeAlias = Annotated[int, ctypes.c_int32]
@dll.bind
def IOObjectConformsTo(object:io_object_t, className:io_name_t) -> boolean_t: ...
@dll.bind
def IOObjectIsEqualTo(object:io_object_t, anObject:io_object_t) -> boolean_t: ...
@dll.bind
def IOObjectGetKernelRetainCount(object:io_object_t) -> uint32_t: ...
@dll.bind
def IOObjectGetUserRetainCount(object:io_object_t) -> uint32_t: ...
@dll.bind
def IOObjectGetRetainCount(object:io_object_t) -> uint32_t: ...
io_iterator_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@dll.bind
def IOIteratorNext(iterator:io_iterator_t) -> io_object_t: ...
@dll.bind
def IOIteratorReset(iterator:io_iterator_t) -> None: ...
@dll.bind
def IOIteratorIsValid(iterator:io_iterator_t) -> boolean_t: ...
class struct___CFDictionary(ctypes.Structure): pass
CFDictionaryRef: TypeAlias = c.POINTER[struct___CFDictionary]
io_service_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@dll.bind
def IOServiceGetMatchingService(mainPort:mach_port_t, matching:CFDictionaryRef) -> io_service_t: ...
@dll.bind
def IOServiceGetMatchingServices(mainPort:mach_port_t, matching:CFDictionaryRef, existing:c.POINTER[io_iterator_t]) -> kern_return_t: ...
uintptr_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def IOServiceAddNotification(mainPort:mach_port_t, notificationType:io_name_t, matching:CFDictionaryRef, wakePort:mach_port_t, reference:uintptr_t, notification:c.POINTER[io_iterator_t]) -> kern_return_t: ...
@dll.bind
def IOServiceAddMatchingNotification(notifyPort:IONotificationPortRef, notificationType:io_name_t, matching:CFDictionaryRef, callback:IOServiceMatchingCallback, refCon:ctypes.c_void_p, notification:c.POINTER[io_iterator_t]) -> kern_return_t: ...
@dll.bind
def IOServiceAddInterestNotification(notifyPort:IONotificationPortRef, service:io_service_t, interestType:io_name_t, callback:IOServiceInterestCallback, refCon:ctypes.c_void_p, notification:c.POINTER[io_object_t]) -> kern_return_t: ...
@dll.bind
def IOServiceMatchPropertyTable(service:io_service_t, matching:CFDictionaryRef, matches:c.POINTER[boolean_t]) -> kern_return_t: ...
@dll.bind
def IOServiceGetBusyState(service:io_service_t, busyState:c.POINTER[uint32_t]) -> kern_return_t: ...
@c.record
class struct_mach_timespec(c.Struct):
SIZE = 8
tv_sec: Annotated[Annotated[int, ctypes.c_uint32], 0]
tv_nsec: Annotated[clock_res_t, 4]
mach_timespec_t: TypeAlias = struct_mach_timespec
clock_res_t: TypeAlias = Annotated[int, ctypes.c_int32]
@dll.bind
def IOServiceWaitQuiet(service:io_service_t, waitTime:c.POINTER[mach_timespec_t]) -> kern_return_t: ...
@dll.bind
def IOKitGetBusyState(mainPort:mach_port_t, busyState:c.POINTER[uint32_t]) -> kern_return_t: ...
IOOptionBits: TypeAlias = Annotated[int, ctypes.c_uint32]
@dll.bind
def IOKitWaitQuietWithOptions(mainPort:mach_port_t, waitTime:c.POINTER[mach_timespec_t], options:IOOptionBits) -> kern_return_t: ...
@dll.bind
def IOKitWaitQuiet(mainPort:mach_port_t, waitTime:c.POINTER[mach_timespec_t]) -> kern_return_t: ...
task_port_t: TypeAlias = Annotated[int, ctypes.c_uint32]
io_connect_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@dll.bind
def IOServiceOpen(service:io_service_t, owningTask:task_port_t, type:uint32_t, connect:c.POINTER[io_connect_t]) -> kern_return_t: ...
@dll.bind
def IOServiceRequestProbe(service:io_service_t, options:uint32_t) -> kern_return_t: ...
class _anonenum0(Annotated[int, ctypes.c_uint32], c.Enum): pass
kIOServiceInteractionAllowed = _anonenum0.define('kIOServiceInteractionAllowed', 1)
@dll.bind
def IOServiceAuthorize(service:io_service_t, options:uint32_t) -> kern_return_t: ...
@dll.bind
def IOServiceOpenAsFileDescriptor(service:io_service_t, oflag:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def IOServiceClose(connect:io_connect_t) -> kern_return_t: ...
@dll.bind
def IOConnectAddRef(connect:io_connect_t) -> kern_return_t: ...
@dll.bind
def IOConnectRelease(connect:io_connect_t) -> kern_return_t: ...
@dll.bind
def IOConnectGetService(connect:io_connect_t, service:c.POINTER[io_service_t]) -> kern_return_t: ...
@dll.bind
def IOConnectSetNotificationPort(connect:io_connect_t, type:uint32_t, port:mach_port_t, reference:uintptr_t) -> kern_return_t: ...
mach_vm_address_t: TypeAlias = Annotated[int, ctypes.c_uint64]
mach_vm_size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def IOConnectMapMemory(connect:io_connect_t, memoryType:uint32_t, intoTask:task_port_t, atAddress:c.POINTER[mach_vm_address_t], ofSize:c.POINTER[mach_vm_size_t], options:IOOptionBits) -> kern_return_t: ...
@dll.bind
def IOConnectMapMemory64(connect:io_connect_t, memoryType:uint32_t, intoTask:task_port_t, atAddress:c.POINTER[mach_vm_address_t], ofSize:c.POINTER[mach_vm_size_t], options:IOOptionBits) -> kern_return_t: ...
@dll.bind
def IOConnectUnmapMemory(connect:io_connect_t, memoryType:uint32_t, fromTask:task_port_t, atAddress:mach_vm_address_t) -> kern_return_t: ...
@dll.bind
def IOConnectUnmapMemory64(connect:io_connect_t, memoryType:uint32_t, fromTask:task_port_t, atAddress:mach_vm_address_t) -> kern_return_t: ...
CFTypeRef: TypeAlias = ctypes.c_void_p
@dll.bind
def IOConnectSetCFProperties(connect:io_connect_t, properties:CFTypeRef) -> kern_return_t: ...
@dll.bind
def IOConnectSetCFProperty(connect:io_connect_t, propertyName:CFStringRef, property:CFTypeRef) -> kern_return_t: ...
uint64_t: TypeAlias = Annotated[int, ctypes.c_uint64]
size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def IOConnectCallMethod(connection:mach_port_t, selector:uint32_t, input:c.POINTER[uint64_t], inputCnt:uint32_t, inputStruct:ctypes.c_void_p, inputStructCnt:size_t, output:c.POINTER[uint64_t], outputCnt:c.POINTER[uint32_t], outputStruct:ctypes.c_void_p, outputStructCnt:c.POINTER[size_t]) -> kern_return_t: ...
@dll.bind
def IOConnectCallAsyncMethod(connection:mach_port_t, selector:uint32_t, wake_port:mach_port_t, reference:c.POINTER[uint64_t], referenceCnt:uint32_t, input:c.POINTER[uint64_t], inputCnt:uint32_t, inputStruct:ctypes.c_void_p, inputStructCnt:size_t, output:c.POINTER[uint64_t], outputCnt:c.POINTER[uint32_t], outputStruct:ctypes.c_void_p, outputStructCnt:c.POINTER[size_t]) -> kern_return_t: ...
@dll.bind
def IOConnectCallStructMethod(connection:mach_port_t, selector:uint32_t, inputStruct:ctypes.c_void_p, inputStructCnt:size_t, outputStruct:ctypes.c_void_p, outputStructCnt:c.POINTER[size_t]) -> kern_return_t: ...
@dll.bind
def IOConnectCallAsyncStructMethod(connection:mach_port_t, selector:uint32_t, wake_port:mach_port_t, reference:c.POINTER[uint64_t], referenceCnt:uint32_t, inputStruct:ctypes.c_void_p, inputStructCnt:size_t, outputStruct:ctypes.c_void_p, outputStructCnt:c.POINTER[size_t]) -> kern_return_t: ...
@dll.bind
def IOConnectCallScalarMethod(connection:mach_port_t, selector:uint32_t, input:c.POINTER[uint64_t], inputCnt:uint32_t, output:c.POINTER[uint64_t], outputCnt:c.POINTER[uint32_t]) -> kern_return_t: ...
@dll.bind
def IOConnectCallAsyncScalarMethod(connection:mach_port_t, selector:uint32_t, wake_port:mach_port_t, reference:c.POINTER[uint64_t], referenceCnt:uint32_t, input:c.POINTER[uint64_t], inputCnt:uint32_t, output:c.POINTER[uint64_t], outputCnt:c.POINTER[uint32_t]) -> kern_return_t: ...
@dll.bind
def IOConnectTrap0(connect:io_connect_t, index:uint32_t) -> kern_return_t: ...
@dll.bind
def IOConnectTrap1(connect:io_connect_t, index:uint32_t, p1:uintptr_t) -> kern_return_t: ...
@dll.bind
def IOConnectTrap2(connect:io_connect_t, index:uint32_t, p1:uintptr_t, p2:uintptr_t) -> kern_return_t: ...
@dll.bind
def IOConnectTrap3(connect:io_connect_t, index:uint32_t, p1:uintptr_t, p2:uintptr_t, p3:uintptr_t) -> kern_return_t: ...
@dll.bind
def IOConnectTrap4(connect:io_connect_t, index:uint32_t, p1:uintptr_t, p2:uintptr_t, p3:uintptr_t, p4:uintptr_t) -> kern_return_t: ...
@dll.bind
def IOConnectTrap5(connect:io_connect_t, index:uint32_t, p1:uintptr_t, p2:uintptr_t, p3:uintptr_t, p4:uintptr_t, p5:uintptr_t) -> kern_return_t: ...
@dll.bind
def IOConnectTrap6(connect:io_connect_t, index:uint32_t, p1:uintptr_t, p2:uintptr_t, p3:uintptr_t, p4:uintptr_t, p5:uintptr_t, p6:uintptr_t) -> kern_return_t: ...
@dll.bind
def IOConnectAddClient(connect:io_connect_t, client:io_connect_t) -> kern_return_t: ...
io_registry_entry_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@dll.bind
def IORegistryGetRootEntry(mainPort:mach_port_t) -> io_registry_entry_t: ...
io_string_t: TypeAlias = c.Array[Annotated[bytes, ctypes.c_char], Literal[512]]
@dll.bind
def IORegistryEntryFromPath(mainPort:mach_port_t, path:io_string_t) -> io_registry_entry_t: ...
@dll.bind
def IORegistryEntryCopyFromPath(mainPort:mach_port_t, path:CFStringRef) -> io_registry_entry_t: ...
class _anonenum1(Annotated[int, ctypes.c_uint32], c.Enum): pass
kIORegistryIterateRecursively = _anonenum1.define('kIORegistryIterateRecursively', 1)
kIORegistryIterateParents = _anonenum1.define('kIORegistryIterateParents', 2)
@dll.bind
def IORegistryCreateIterator(mainPort:mach_port_t, plane:io_name_t, options:IOOptionBits, iterator:c.POINTER[io_iterator_t]) -> kern_return_t: ...
@dll.bind
def IORegistryEntryCreateIterator(entry:io_registry_entry_t, plane:io_name_t, options:IOOptionBits, iterator:c.POINTER[io_iterator_t]) -> kern_return_t: ...
@dll.bind
def IORegistryIteratorEnterEntry(iterator:io_iterator_t) -> kern_return_t: ...
@dll.bind
def IORegistryIteratorExitEntry(iterator:io_iterator_t) -> kern_return_t: ...
@dll.bind
def IORegistryEntryGetName(entry:io_registry_entry_t, name:io_name_t) -> kern_return_t: ...
@dll.bind
def IORegistryEntryGetNameInPlane(entry:io_registry_entry_t, plane:io_name_t, name:io_name_t) -> kern_return_t: ...
@dll.bind
def IORegistryEntryGetLocationInPlane(entry:io_registry_entry_t, plane:io_name_t, location:io_name_t) -> kern_return_t: ...
@dll.bind
def IORegistryEntryGetPath(entry:io_registry_entry_t, plane:io_name_t, path:io_string_t) -> kern_return_t: ...
@dll.bind
def IORegistryEntryCopyPath(entry:io_registry_entry_t, plane:io_name_t) -> CFStringRef: ...
@dll.bind
def IORegistryEntryGetRegistryEntryID(entry:io_registry_entry_t, entryID:c.POINTER[uint64_t]) -> kern_return_t: ...
CFMutableDictionaryRef: TypeAlias = c.POINTER[struct___CFDictionary]
class struct___CFAllocator(ctypes.Structure): pass
CFAllocatorRef: TypeAlias = c.POINTER[struct___CFAllocator]
@dll.bind
def IORegistryEntryCreateCFProperties(entry:io_registry_entry_t, properties:c.POINTER[CFMutableDictionaryRef], allocator:CFAllocatorRef, options:IOOptionBits) -> kern_return_t: ...
@dll.bind
def IORegistryEntryCreateCFProperty(entry:io_registry_entry_t, key:CFStringRef, allocator:CFAllocatorRef, options:IOOptionBits) -> CFTypeRef: ...
@dll.bind
def IORegistryEntrySearchCFProperty(entry:io_registry_entry_t, plane:io_name_t, key:CFStringRef, allocator:CFAllocatorRef, options:IOOptionBits) -> CFTypeRef: ...
io_struct_inband_t: TypeAlias = c.Array[Annotated[bytes, ctypes.c_char], Literal[4096]]
@dll.bind
def IORegistryEntryGetProperty(entry:io_registry_entry_t, propertyName:io_name_t, buffer:io_struct_inband_t, size:c.POINTER[uint32_t]) -> kern_return_t: ...
@dll.bind
def IORegistryEntrySetCFProperties(entry:io_registry_entry_t, properties:CFTypeRef) -> kern_return_t: ...
@dll.bind
def IORegistryEntrySetCFProperty(entry:io_registry_entry_t, propertyName:CFStringRef, property:CFTypeRef) -> kern_return_t: ...
@dll.bind
def IORegistryEntryGetChildIterator(entry:io_registry_entry_t, plane:io_name_t, iterator:c.POINTER[io_iterator_t]) -> kern_return_t: ...
@dll.bind
def IORegistryEntryGetChildEntry(entry:io_registry_entry_t, plane:io_name_t, child:c.POINTER[io_registry_entry_t]) -> kern_return_t: ...
@dll.bind
def IORegistryEntryGetParentIterator(entry:io_registry_entry_t, plane:io_name_t, iterator:c.POINTER[io_iterator_t]) -> kern_return_t: ...
@dll.bind
def IORegistryEntryGetParentEntry(entry:io_registry_entry_t, plane:io_name_t, parent:c.POINTER[io_registry_entry_t]) -> kern_return_t: ...
@dll.bind
def IORegistryEntryInPlane(entry:io_registry_entry_t, plane:io_name_t) -> boolean_t: ...
@dll.bind
def IOServiceMatching(name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CFMutableDictionaryRef: ...
@dll.bind
def IOServiceNameMatching(name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CFMutableDictionaryRef: ...
@dll.bind
def IOBSDNameMatching(mainPort:mach_port_t, options:uint32_t, bsdName:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CFMutableDictionaryRef: ...
@dll.bind
def IOOpenFirmwarePathMatching(mainPort:mach_port_t, options:uint32_t, path:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CFMutableDictionaryRef: ...
@dll.bind
def IORegistryEntryIDMatching(entryID:uint64_t) -> CFMutableDictionaryRef: ...
@dll.bind
def IOServiceOFPathToBSDName(mainPort:mach_port_t, openFirmwarePath:io_name_t, bsdName:io_name_t) -> kern_return_t: ...
IOAsyncCallback0: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p, Annotated[int, ctypes.c_int32]]]
IOAsyncCallback1: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p, Annotated[int, ctypes.c_int32], ctypes.c_void_p]]
IOAsyncCallback2: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p, Annotated[int, ctypes.c_int32], ctypes.c_void_p, ctypes.c_void_p]]
IOAsyncCallback: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p, Annotated[int, ctypes.c_int32], c.POINTER[ctypes.c_void_p], Annotated[int, ctypes.c_uint32]]]
vm_size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def OSGetNotificationFromMessage(msg:c.POINTER[mach_msg_header_t], index:uint32_t, type:c.POINTER[uint32_t], reference:c.POINTER[uintptr_t], content:c.POINTER[ctypes.c_void_p], size:c.POINTER[vm_size_t]) -> kern_return_t: ...
@dll.bind
def IOCatalogueSendData(mainPort:mach_port_t, flag:uint32_t, buffer:c.POINTER[Annotated[bytes, ctypes.c_char]], size:uint32_t) -> kern_return_t: ...
@dll.bind
def IOCatalogueTerminate(mainPort:mach_port_t, flag:uint32_t, description:io_name_t) -> kern_return_t: ...
@dll.bind
def IOCatalogueGetData(mainPort:mach_port_t, flag:uint32_t, buffer:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], size:c.POINTER[uint32_t]) -> kern_return_t: ...
@dll.bind
def IOCatalogueModuleLoaded(mainPort:mach_port_t, name:io_name_t) -> kern_return_t: ...
@dll.bind
def IOCatalogueReset(mainPort:mach_port_t, flag:uint32_t) -> kern_return_t: ...
c.init_records()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/iokit.py",
"license": "MIT License",
"lines": 277,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/sqtt/examples/generate_examples.py | import os, subprocess, sys
from pathlib import Path
from tinygrad.helpers import temp
EXAMPLES_DIR = Path(__file__).parent
PROFILE_PATH = Path(temp("profile.pkl", append_user=True))
EXAMPLES = [
"test/backend/test_custom_kernel.py TestCustomKernel.test_empty",
"test/test_tiny.py TestTiny.test_plus",
"test/test_tiny.py TestTiny.test_gemm",
"extra/sqtt/examples/discover_ops.py"
]
if __name__ == "__main__":
arch = subprocess.check_output(["python", "-c", "from tinygrad import Device; print(Device['AMD'].arch)"], text=True,
env={**os.environ, "DEBUG":"0"}).rstrip()
(EXAMPLES_DIR/arch).mkdir(exist_ok=True)
for test in EXAMPLES:
for i in range(2):
# AM_RESET=1 gets a clear trace, does not work on mi300 machines
subprocess.run([sys.executable, *test.split()], cwd=EXAMPLES_DIR.parent.parent.parent,
env={**os.environ, "AMD":"1", "AM_RESET":"1" if not arch.startswith("gfx9") else "0", "VIZ":"-2", "PYTHONPATH":"."})
PROFILE_PATH.rename(dest:=EXAMPLES_DIR/arch/f"profile_{test.split('.')[-1].replace('test_', '')}_run_{i}.pkl")
print(f"saved SQTT trace to {dest}")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/sqtt/examples/generate_examples.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/unit/test_getitem_ops.py | import unittest
import numpy as np
from tinygrad import Tensor, GlobalCounters
class TestGetitemOps(unittest.TestCase):
def test_two_tensor_indices(self):
# linear indexing is O(idx_size), one-hot masks is O(idx_size * src_size)
src_np = np.random.rand(10, 100, 200).astype(np.float32)
idx1_np, idx2_np = np.random.randint(0, 100, (50, 60), dtype=np.int32), np.random.randint(0, 200, (50, 60), dtype=np.int32)
src, idx1, idx2 = Tensor(src_np), Tensor(idx1_np), Tensor(idx2_np)
# O(50*60) = 3K vs O(50*60*100*200) = 60M
GlobalCounters.reset()
np.testing.assert_allclose(src_np[0, idx1_np, idx2_np], src[0, idx1, idx2].numpy())
self.assertLess(GlobalCounters.global_ops, 50_000)
# consecutive indices not starting from dim 0: O(10*50*60) = 30K vs O(10*50*60*100*200) = 600M
GlobalCounters.reset()
np.testing.assert_allclose(src_np[:, idx1_np, idx2_np], src[:, idx1, idx2].numpy())
self.assertLess(GlobalCounters.global_ops, 500_000)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_getitem_ops.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/fp8/fp8_linear.py | from typing import Callable, Any
from tinygrad import Tensor, dtypes, nn, UOp
from tinygrad.uop.ops import KernelInfo, AxisType, Ops
def quantize_to_fp8(x: Tensor, dtype=dtypes.fp8e4m3):
fp8_min = -448.0 if dtype == dtypes.fp8e4m3 else -57344.0
fp8_max = 448.0 if dtype == dtypes.fp8e4m3 else 57344.0
x_abs_max = x.abs().max().detach()
scale = fp8_max / (x_abs_max + 1e-8)
x_scaled = x * scale
x_det = x_scaled.detach()
x_clamped = x_det.clamp(fp8_min, fp8_max)
x_clamped_ste = x_scaled + (x_clamped - x_det)
res = x_clamped_ste.cast(dtype)
return res, scale.float().reciprocal()
def custom_matmul(output: UOp, inp: UOp, weight: UOp) -> UOp:
SEQ = inp.shape[1]
OUT = weight.shape[0]
IN = weight.shape[-1]
seq_idx = UOp.range(SEQ, 2, AxisType.LOOP)
out_idx = UOp.range(OUT, 3, AxisType.LOOP)
batch_idx = UOp.range(output.size//SEQ//OUT, 1, AxisType.LOOP)
reduce_idx = UOp.range(IN, 0, AxisType.REDUCE)
product = (inp.index((seq_idx*IN+reduce_idx+batch_idx*IN*SEQ)) * weight.index((out_idx*IN+reduce_idx))).cast(dtypes.float)
reduced = product.reduce(reduce_idx, arg=Ops.ADD)
store_op = output.index((seq_idx*OUT+out_idx+batch_idx*OUT*SEQ), ptr=True).store(reduced).end(batch_idx, seq_idx, out_idx)
return store_op.sink(arg=KernelInfo(name=f"fp8_matmul_{inp.shape}x{weight.shape}"))
def custom_matmul_backward(gradient: UOp, kernel: UOp) -> tuple[UOp, UOp]:
_, input_uop, weight_uop = kernel.src[1:]
input_tensor = Tensor(input_uop, device=input_uop.device)
grad_tensor = Tensor(gradient, device=gradient.device)
weight_tensor = Tensor(weight_uop, device=weight_uop.device)
grad_quantized, scale = quantize_to_fp8(grad_tensor)
scale_scalar = scale.reshape(())
grad_weight = Tensor.einsum("bso,bsi->oi", grad_quantized, input_tensor, dtype=dtypes.float)
grad_weight = grad_weight * scale_scalar
grad_2d = grad_quantized.reshape(grad_tensor.shape[0] * grad_tensor.shape[1], grad_tensor.shape[-1])
grad_input = (grad_2d.dot(weight_tensor, dtype=dtypes.float)).contiguous().reshape(input_tensor.shape) * scale
return (None, grad_input.uop, grad_weight.uop)
class FP8Linear:
def __init__(self, in_features:int, out_features:int, bias:bool=True):
self.weight = Tensor.empty(out_features, in_features, dtype=dtypes.float32)
self.bias = Tensor.empty(out_features, dtype=dtypes.float32) if bias else None
def __call__(self, x: Tensor) -> Tensor:
original_ndim = len(x.shape)
if original_ndim == 2: x = x.reshape(x.shape[0], 1, x.shape[1])
batch, seq, _ = x.shape
w_fp8, w_scale = quantize_to_fp8(self.weight)
x_fp8, x_scale = quantize_to_fp8(x)
GPUS = self.weight.device
if isinstance(GPUS, tuple) and len(GPUS) > 1:
y = Tensor(Tensor.empty((batch//len(GPUS), seq, self.weight.shape[0]), dtype=dtypes.float, device=GPUS).uop.multi(0), device=GPUS)
else:
y = Tensor.empty((batch, seq, self.weight.shape[0]), dtype=dtypes.float)
y = Tensor.custom_kernel(y, x_fp8, w_fp8, fxn=custom_matmul, grad_fxn=custom_matmul_backward)[0]
y = y * w_scale * x_scale
if self.bias is not None: y = y + self.bias
if original_ndim == 2: y = y.reshape(batch, self.weight.shape[0])
return y.cast(x.dtype)
def _replace_linear(layer: nn.Linear):
fp8_linear = FP8Linear(layer.weight.shape[1], layer.weight.shape[0], layer.bias is not None)
fp8_linear.weight = layer.weight
if layer.bias is not None: fp8_linear.bias = layer.bias
return fp8_linear
def _swap_linear_with_fp8(model, module_filter_fn:Callable[[Any, str],bool]|None=None, fqn:str="", parent:Any|None=None,
attr_name:str="", visited:set|None=None):
if visited is None: visited = set()
if id(model) in visited: return
visited.add(id(model))
if isinstance(model, (str, int, float, bool, type(None), Tensor, UOp)): return
elif isinstance(model, nn.Linear):
if module_filter_fn is not None and not module_filter_fn(model, fqn): return
fp8_linear = _replace_linear(model)
if parent is not None and attr_name:
setattr(parent, attr_name, fp8_linear)
elif isinstance(model, list):
for i, item in enumerate(model):
child_fqn = f"{fqn}.{i}" if fqn else str(i)
if isinstance(item, nn.Linear) and (module_filter_fn is None or module_filter_fn(item, child_fqn)): model[i] = _replace_linear(item)
else: _swap_linear_with_fp8(item, module_filter_fn, child_fqn, None, "", visited)
elif isinstance(model, dict):
for key, item in list(model.items()):
child_fqn = f"{fqn}.{key}" if fqn else str(key)
if isinstance(item, nn.Linear) and (module_filter_fn is None or module_filter_fn(item, child_fqn)): model[key] = _replace_linear(item)
else: _swap_linear_with_fp8(item, module_filter_fn, child_fqn, None, "", visited)
elif hasattr(model, "__dict__"):
for attr_key in list(vars(model).keys()):
try: attr = getattr(model, attr_key)
except Exception: continue
child_fqn = f"{fqn}.{attr_key}" if fqn else attr_key
_swap_linear_with_fp8(attr, module_filter_fn, child_fqn, model, attr_key, visited)
def convert_to_float8_training(model, module_filter_fn:Callable[[Any,str],bool]|None=None):
_swap_linear_with_fp8(model, module_filter_fn, "", None, "")
return model
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/fp8/fp8_linear.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/testextra/test_fp8_linear.py | #!/usr/bin/env python
import unittest
import numpy as np
from tinygrad import Tensor, dtypes, Device
from tinygrad.nn import Linear
from extra.fp8.fp8_linear import FP8Linear, convert_to_float8_training
from tinygrad.device import is_dtype_supported
from test.helpers import not_support_multi_device, needs_second_gpu
BS, T, in_dim, out_dim = 16, 4, 128, 128
@unittest.skipUnless(is_dtype_supported(dtypes.fp8e4m3), f"no fp8e4m3 on {Device.DEFAULT}")
class TestFP8Linear(unittest.TestCase):
def setUp(self):
Tensor.manual_seed(42)
def _test_forward(self, shape, in_features, out_features):
fp8_layer = FP8Linear(in_features, out_features)
normal_layer = Linear(in_features, out_features)
weight = Tensor.randn(out_features, in_features, dtype=dtypes.float32) * 0.2
bias = Tensor.randn(out_features, dtype=dtypes.float32) * 0.2
fp8_layer.weight.assign(weight)
normal_layer.weight.assign(weight)
fp8_layer.bias.assign(bias)
normal_layer.bias.assign(bias)
x = Tensor.randn(*shape, dtype=dtypes.float32) * 0.2
y_fp8, y_normal = fp8_layer(x), normal_layer(x)
np.testing.assert_allclose(y_fp8.numpy(), y_normal.numpy(), rtol=0.1, atol=0.1)
def _test_backward(self, shape, in_features, out_features):
fp8_layer = FP8Linear(in_features, out_features)
normal_layer = Linear(in_features, out_features)
weight = Tensor.randn(out_features, in_features, dtype=dtypes.float32) * 0.2
bias = Tensor.randn(out_features, dtype=dtypes.float32) * 0.2
fp8_layer.weight, normal_layer.weight = weight.detach(), weight.detach()
fp8_layer.bias, normal_layer.bias = bias.detach(), bias.detach()
fp8_layer.weight.requires_grad = normal_layer.weight.requires_grad = True
x_fp8 = Tensor.randn(*shape, dtype=dtypes.float32, requires_grad=True) * 0.2
x_normal = x_fp8.detach().requires_grad_(True)
fp8_layer(x_fp8).sum().backward()
normal_layer(x_normal).sum().backward()
np.testing.assert_allclose(x_fp8.grad.numpy(), x_normal.grad.numpy(), rtol=1.0, atol=0.1)
np.testing.assert_allclose(fp8_layer.weight.grad.numpy(), normal_layer.weight.grad.numpy(), rtol=1.0, atol=0.1)
def test_forward_2d(self): self._test_forward((BS, in_dim), in_dim, out_dim)
def test_forward_3d(self): self._test_forward((BS, T, in_dim), in_dim, out_dim)
def test_backward_2d(self): self._test_backward((BS, in_dim), in_dim, out_dim)
def test_backward_3d(self): self._test_backward((BS, T, in_dim), in_dim, out_dim)
def test_filter(self):
class Model:
def __init__(self):
self.fc1 = Linear(32, 16)
self.fc2 = Linear(16, 8)
def __call__(self, x):
return self.fc2(self.fc1(x).relu())
model = Model()
x = Tensor.randn(16, 32)
y_before = model(x).numpy()
convert_to_float8_training(model, module_filter_fn=lambda _, fqn: "fc1" in fqn)
self.assertIsInstance(model.fc1, FP8Linear)
self.assertNotIsInstance(model.fc2, FP8Linear)
y_after = model(x).numpy()
np.testing.assert_allclose(y_after, y_before, rtol=0.1, atol=0.1)
@needs_second_gpu
@unittest.skipIf(not_support_multi_device(), "no multi")
def test_multi_gpu(self):
GPUS = tuple(f"{Device.DEFAULT}:{i}" for i in range(2))
fp8_layer = FP8Linear(in_dim, out_dim)
normal_layer = Linear(in_dim, out_dim)
weight = Tensor.randn(out_dim, in_dim, dtype=dtypes.float32) * 0.2
bias = Tensor.randn(out_dim, dtype=dtypes.float32) * 0.2
fp8_layer.weight.assign(weight)
fp8_layer.bias.assign(bias)
normal_layer.weight.assign(weight)
normal_layer.bias.assign(bias)
fp8_layer.weight.to_(GPUS)
fp8_layer.bias.to_(GPUS)
normal_layer.weight.to_(GPUS)
normal_layer.bias.to_(GPUS)
x = Tensor.randn(BS*2, in_dim, dtype=dtypes.float32) * 0.2
x_sharded = x.detach()
x = x.shard_(GPUS, axis=0)
y_normal = normal_layer(x).realize()
x_sharded.shard_(GPUS, axis=0)
y_fp8 = fp8_layer(x_sharded).realize()
np.testing.assert_allclose(y_fp8.numpy(), y_normal.numpy(), rtol=0.1, atol=0.1)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/testextra/test_fp8_linear.py",
"license": "MIT License",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/gemm/amd_asm_matmul.py | # RDNA3 128x128 tiled GEMM kernel - DSL version
# Computes C = A @ B for NxN float32 matrices using 128x128 tiles
#
# Architecture: RDNA3 (gfx1100)
# Tile size: 128x128 (each workgroup computes one tile of C)
# Workgroup: 128 threads (arranged as 32x4 for coalesced memory access)
# Inner loop: 8 iterations per K-block, processing 8 columns of A and 8 rows of B
#
# Accumulators: 128 vgprs (v[2-129])
import numpy as np
from tinygrad import Tensor, Device, Context, GlobalCounters
from tinygrad.uop.ops import UOp, Ops, KernelInfo
from tinygrad.helpers import getenv, colored
from tinygrad.dtype import dtypes, AddrSpace
from tinygrad.engine.realize import Estimates
from tinygrad.renderer.amd.dsl import s, v, VCC_LO, NULL
from tinygrad.runtime.autogen.amd.rdna3.ins import *
# =============================================================================
# Kernel constants
# =============================================================================
LDS_SIZE = 8320 # Local data share size in bytes
LDS_A_STRIDE = 0x210 # LDS stride for A tile (528 bytes)
LDS_B_STRIDE = 0x200 # LDS stride for B tile (512 bytes)
LDS_BASE_OFFSET = 0x1080 # Base LDS offset for tiles
ADDR_MASK = 0x3fffff80 # Address alignment mask
# =============================================================================
# Named register assignments (VGPRs)
# =============================================================================
V_LANE_ID = 0 # lane_id set on startup
# Use tile gaps (v146-159) for named regs to minimize max VGPR
V_LANE_ID_MOD8 = 146 # lane_id & 7
V_LANE_MOD8_X4 = 147 # (lane_id & 7) << 2
V_LANE_DIV8_X4 = 150 # ((lane_id >> 3) & 3) << 2
V_LDS_B_BASE = 151 # LDS B-tile base address for inner loop
V_LDS_A_BASE = 154 # LDS A-tile base address for inner loop
V_GLOBAL_A_ADDR = 155 # global memory A prefetch address
V_GLOBAL_B_ADDR = 158 # global memory B prefetch address
V_LDS_A_ADDR = 159 # single base register for A stores
V_LDS_B_ADDR = 162 # single base register for B stores
# LDS tile register destinations - SEPARATE from DATA to avoid overlap
# A on banks 2-3, B on banks 0-1 to avoid bank conflicts in VOPD
V_A_TILE_REGS = [130, 134, 138, 142] # A tile: banks 2,2,2,2 (130%4=2, etc.)
V_B_TILE_REGS = [132, 136, 140, 144, 148, 152, 156, 160] # B tile: banks 0,0,0,0,0,0,0,0
# =============================================================================
# Named register assignments (SGPRs)
# =============================================================================
S_OUT_PTR = (0, 1) # output C matrix base pointer
S_WORKGROUP_X = 2 # workgroup_id_x (system SGPR, follows user SGPRs)
S_WORKGROUP_Y = 3 # workgroup_id_y (system SGPR)
S_DIM_N = 4 # matrix dimension N
S_LOOP_BOUND = 7 # K-8 (loop termination bound)
S_LOOP_CTR = 12 # loop counter (increments by 8)
S_PREFETCH_FLAG = 13 # prefetch condition flag / row stride in epilogue
S_TILE_X = 14 # workgroup_x << 7
S_TILE_Y = 15 # workgroup_y << 7
# Kernarg load destinations
S_KERNARG_A = (20, 21) # A pointer from kernarg
S_KERNARG_B = (22, 23) # B pointer from kernarg
# Prefetch base pointers (8 pairs each, B: N*4 bytes apart, A: N*64 bytes apart)
S_PREFETCH_B = 24 # s[24:39] - 8 B tile pointers
S_PREFETCH_A = 40 # s[40:55] - 8 A tile pointers
# =============================================================================
# Data tables
# =============================================================================
# Accumulator grid: ACC_GRID[a_idx][b_idx] = vgpr for C[a,b]
# a_idx: which A value (0-7), b_idx: which B value (0-15)
# Scattered due to VOPD bank constraints (vdst_x % 4 != vdst_y % 4)
# Range is from v2 - v129
ACC_GRID = [
[ 5, 3, 9, 8, 37, 35, 41, 40, 69, 67, 73, 72, 101, 99,105,104], # a0
[ 4, 2, 7, 6, 36, 34, 39, 38, 68, 66, 71, 70, 100, 98,103,102], # a1
[ 17, 16, 13, 11, 49, 48, 45, 43, 81, 80, 77, 75, 113,112,109,107], # a2
[ 15, 14, 12, 10, 47, 46, 44, 42, 79, 78, 76, 74, 111,110,108,106], # a3
[ 21, 19, 25, 24, 53, 51, 57, 56, 85, 83, 89, 88, 117,115,121,120], # a4
[ 20, 18, 23, 22, 52, 50, 55, 54, 84, 82, 87, 86, 116,114,123,122], # a5
[125,128, 29, 27, 33, 32, 61, 59, 65, 64, 93, 91, 97, 96,129,127], # a6
[119,118, 28, 26, 31, 30, 60, 58, 63, 62, 92, 90, 95, 94,124,126], # a7
]
# Optimized (a_pair, b_pair) iteration order for better GPU scheduling
# Interleaves A and B pairs to maximize instruction-level parallelism
FMAC_PAIR_ORDER = [
(0,0),(0,1),(1,1),(1,0), (2,0),(2,1),(3,1),(3,2), (0,2),(0,3),(1,3),(1,2), (2,2),(2,3),(3,3),(3,4),
(0,4),(0,5),(1,5),(1,4), (2,4),(2,5),(3,5),(3,6), (0,6),(0,7),(1,7),(1,6), (2,6),(2,7),(3,7),(3,0),
]
def derive_fmac_pattern(acc_grid, a_tile_regs=None, b_tile_regs=None):
"""Generate 64 dual FMAC ops from accumulator grid with optimized iteration order."""
pattern = []
for idx, (a_pair, b_pair) in enumerate(FMAC_PAIR_ORDER):
a_even, a_odd = a_pair * 2, a_pair * 2 + 1
b_even, b_odd = b_pair * 2, b_pair * 2 + 1
a_base, b_base = a_tile_regs[a_pair], b_tile_regs[b_pair]
# Op 1: normal order -> C[a_even, b_even] + C[a_odd, b_odd]
pattern.append((acc_grid[a_even][b_even], acc_grid[a_odd][b_odd],
a_base, b_base, a_base+1, b_base+1))
# Op 2: alternate swapping A vs B to vary register banks
if idx % 2 == 0: # swap B
pattern.append((acc_grid[a_even][b_odd], acc_grid[a_odd][b_even],
a_base, b_base+1, a_base+1, b_base))
else: # swap A
pattern.append((acc_grid[a_odd][b_even], acc_grid[a_even][b_odd],
a_base+1, b_base, a_base, b_base+1))
return pattern
# Derived: 64 dual FMAC operations
FMAC_PATTERN = derive_fmac_pattern(ACC_GRID, V_A_TILE_REGS, V_B_TILE_REGS)
def derive_permute_swaps(acc_grid, out_regs):
"""Derive swap sequence to permute accumulators from FMAC layout to output order.
After FMAC loop: acc_grid[a][b] holds C[a,b]
Output order: for row_half in 0,1; col_group in 0-3; row_in_group in 0-3; b_off in 0-3
-> need C[row_half*4 + row_in_group, col_group*4 + b_off] in specified reg order
"""
def target_ab(i):
row_half, col_group = i // 64, (i // 16) % 4
row_in_group, b_off = (i // 4) % 4, i % 4
return (row_half * 4 + row_in_group, col_group * 4 + b_off)
reg_contents = {acc_grid[a][b]: (a, b) for a in range(8) for b in range(16)}
ab_location = {ab: r for r, ab in reg_contents.items()}
swaps = []
for i in range(128):
target_reg, needed_ab = out_regs[i], target_ab(i)
current_reg = ab_location[needed_ab]
if current_reg != target_reg:
swaps.append((current_reg, target_reg))
ab_at_target = reg_contents.get(target_reg)
reg_contents[target_reg], ab_location[needed_ab] = needed_ab, target_reg
if ab_at_target is not None:
reg_contents[current_reg], ab_location[ab_at_target] = ab_at_target, current_reg
return swaps
# Derived: swap sequence to arrange accumulators for output
# Each group of 4 registers is ascending for direct global_store_b128
OUT_REGS = [r for i in range(32) for r in range(126 - i*4, 130 - i*4)]
PERMUTE_SWAPS = derive_permute_swaps(ACC_GRID, OUT_REGS)
# =============================================================================
# LDS tile staging registers
# =============================================================================
# DATA regs receive contiguous global prefetch, then write to LDS
# TILE regs receive scattered LDS loads (ds_load_b64 pairs), then feed FMACs
# Contiguous layout with mod4=[3,0,1,2,3,0,1,2] for bank conflict avoidance
V_LDS_A_DATA = [163, 164, 165, 166, 167, 168, 169, 170]
V_LDS_B_DATA = [171, 172, 173, 174, 175, 176, 177, 178]
# Initial tile prefetch: (vdst, saddr_lo) - load into A data regs using B prefetch pointers (s[24:31])
INIT_PREFETCH = [(V_LDS_A_DATA[i], S_PREFETCH_B+2*i) for i in range(4)]
# Global memory prefetch schedule: (vdst1, vdst2, addr_vreg, saddr_lo1, saddr_lo2)
# First 2 pairs from B prefetch pointers (s[32:39]), next 4 pairs from A prefetch pointers (s[40:55])
PREFETCH_LOADS = [(V_LDS_A_DATA[4+2*i], V_LDS_A_DATA[4+2*i+1], V_GLOBAL_B_ADDR, S_PREFETCH_B+8+4*i, S_PREFETCH_B+10+4*i) for i in range(2)] + \
[(V_LDS_B_DATA[2*(i-2)], V_LDS_B_DATA[2*(i-2)+1], V_GLOBAL_A_ADDR, S_PREFETCH_A+4*(i-2), S_PREFETCH_A+2+4*(i-2)) for i in range(2, 6)]
# =============================================================================
# Kernel class
# =============================================================================
class Kernel:
def __init__(self, arch='gfx1100'): self.instructions, self.labels, self.pos, self.arch = [], {}, 0, arch
def label(self, name): self.labels[name] = self.pos
def emit(self, inst, target=None):
self.instructions.append(inst)
inst._target, inst._pos = target, self.pos
self.pos += inst.size()
return inst
def waitcnt(self, lgkm=None, vm=None):
"""Wait for memory operations. lgkm=N waits until N lgkm ops remain, vm=N waits until N vmem ops remain."""
vmcnt, lgkmcnt, expcnt = vm if vm is not None else 63, lgkm if lgkm is not None else 63, 7
waitcnt = (expcnt & 0x7) | ((lgkmcnt & 0x3f) << 4) | ((vmcnt & 0x3f) << 10)
self.emit(s_waitcnt(simm16=waitcnt))
def finalize(self):
"""Patch branch offsets and return the finalized instruction list."""
for inst in self.instructions:
if inst._target is None: continue
offset_dwords = (self.labels[inst._target] - inst._pos - inst.size()) // 4
if not -32768 <= offset_dwords <= 32767: raise ValueError(f"branch to '{inst._target}' offset {offset_dwords} exceeds simm16 range")
inst.simm16 = offset_dwords
return self.instructions
# =============================================================================
# Kernel builder
# =============================================================================
def build_kernel(N, arch='gfx1100'):
assert N % 128 == 0, f"N must be a multiple of 128 (tile size), got {N}"
assert N >= 256, f"N must be >= 256 (prefetch pipeline requires at least 2 K-blocks), got {N}"
k = Kernel(arch)
# ===========================================================================
# PROLOGUE: Load kernel arguments, compute tile coordinates and addresses
# ===========================================================================
k.emit(s_load_b128(sdata=s[S_KERNARG_A[0]:S_KERNARG_B[1]], sbase=s[0:1], offset=0x0, soffset=NULL))
k.emit(s_load_b64(sdata=s[S_OUT_PTR[0]:S_OUT_PTR[1]], sbase=s[0:1], offset=0x10, soffset=NULL))
k.emit(s_mov_b32(s[S_DIM_N], N))
k.emit(s_mov_b32(s[S_LOOP_CTR], 0)) # used by LDS swizzle, always 0 for valid workgroups
k.emit(s_lshl_b32(s[S_TILE_X], s[S_WORKGROUP_X], 7))
k.emit(s_lshl_b32(s[S_TILE_Y], s[S_WORKGROUP_Y], 7))
# Lane-derived values
k.emit(v_and_b32_e32(v[V_LANE_ID_MOD8], 7, v[V_LANE_ID]))
k.emit(v_lshrrev_b32_e32(v[4], 3, v[V_LANE_ID]))
k.emit(v_or_b32_e32(v[1], s[S_TILE_X], v[V_LANE_ID]))
k.emit(v_or_b32_e32(v[22], s[S_TILE_Y], v[4]))
k.emit(v_lshlrev_b32_e32(v[V_LANE_MOD8_X4], 2, v[V_LANE_ID_MOD8]))
k.waitcnt(lgkm=0)
# Compute 8 A and B matrix tile base pointers for prefetch
k.emit(s_mov_b64(s[S_PREFETCH_B:S_PREFETCH_B+1], s[S_KERNARG_B[0]:S_KERNARG_B[1]])) # B[0]: no offset
for i in range(1, 8): # B: each pointer 1 row of B apart (N*4 bytes)
k.emit(s_add_u32(s[S_PREFETCH_B+i*2], s[S_KERNARG_B[0]], i * N * 4))
k.emit(s_addc_u32(s[S_PREFETCH_B+i*2+1], s[S_KERNARG_B[1]], 0))
k.emit(s_mov_b64(s[S_PREFETCH_A:S_PREFETCH_A+1], s[S_KERNARG_A[0]:S_KERNARG_A[1]])) # A[0]: no offset
for i in range(1, 8): # A: each pointer 16 rows of A apart (16*N*4 bytes)
k.emit(s_add_u32(s[S_PREFETCH_A+i*2], s[S_KERNARG_A[0]], i * N * 64))
k.emit(s_addc_u32(s[S_PREFETCH_A+i*2+1], s[S_KERNARG_A[1]], 0))
# Global prefetch addresses: B = (tile_x + lane_id) * 4, A = (tile_y*N + (lane_id/8)*N + lane_id%8) * 4
k.emit(v_add_nc_u32_e32(v[V_GLOBAL_B_ADDR], s[S_TILE_X], v[V_LANE_ID]))
k.emit(v_lshlrev_b32_e32(v[V_GLOBAL_B_ADDR], 2, v[V_GLOBAL_B_ADDR]))
k.emit(s_mul_i32(s[19], s[S_TILE_Y], N))
k.emit(v_mul_lo_u32(v[V_GLOBAL_A_ADDR], v[4], N)) # (lane_id/8)*N
k.emit(v_add_nc_u32_e32(v[V_GLOBAL_A_ADDR], v[V_LANE_ID_MOD8], v[V_GLOBAL_A_ADDR])) # + lane_id%8
k.emit(v_add_nc_u32_e32(v[V_GLOBAL_A_ADDR], s[19], v[V_GLOBAL_A_ADDR]))
k.emit(v_lshlrev_b32_e32(v[V_GLOBAL_A_ADDR], 2, v[V_GLOBAL_A_ADDR]))
# Do initial loads
for vdst, saddr_lo in INIT_PREFETCH:
k.emit(global_load_b32(vdst=v[vdst], addr=v[V_GLOBAL_B_ADDR], saddr=s[saddr_lo:saddr_lo+1]))
for iter in range(6):
vdst1, vdst2, addr, slo1, slo2 = PREFETCH_LOADS[iter]
k.emit(global_load_b32(vdst=v[vdst1], addr=v[addr], saddr=s[slo1:slo1+1]))
k.emit(global_load_b32(vdst=v[vdst2], addr=v[addr], saddr=s[slo2:slo2+1]))
# ===========================================================================
# LDS store address computation (bank-conflict-avoiding swizzle)
# ===========================================================================
# This section computes LDS store addresses with a swizzle pattern to avoid bank conflicts.
# The swizzle ensures that threads in the same wavefront write to different LDS banks.
# Formula: swizzled_addr = base + (lane_id & 7) * LDS_A_STRIDE + swizzle_offset
# where swizzle_offset depends on (lane_id >> 3) to distribute across banks.
k.emit(v_add_nc_u32_e32(v[9], s[S_LOOP_CTR], v[22])) # row 0 base
k.emit(v_and_b32_e32(v[9], ADDR_MASK, v[9]))
k.emit(v_sub_nc_u32_e32(v[9], v[22], v[9])) # row 0 swizzle offset
k.emit(v_lshlrev_b32_e32(v[9], 2, v[9])) # * 4
k.emit(v_mad_u32_u24(v[V_LDS_B_ADDR], LDS_A_STRIDE, v[V_LANE_ID_MOD8], v[9]))
# For V_LDS_A_BASE and epilogue
k.emit(v_bfe_u32(v[2], v[V_LANE_ID], 3, 2)) # v[2] = (lane_id >> 3) & 3
k.emit(v_lshlrev_b32_e32(v[V_LANE_DIV8_X4], 2, v[2]))
# Compute LDS load/store base addresses for inner loop
k.emit(v_lshlrev_b32_e32(v[2], 4, v[2]))
k.emit(v_and_b32_e32(v[3], 0x7F, v[1])) # simplified from 3 lines
k.emit(v_lshl_or_b32(v[V_LDS_B_BASE], v[V_LANE_ID_MOD8], 4, LDS_BASE_OFFSET))
k.emit(v_lshl_add_u32(v[V_LDS_A_ADDR], v[3], 2, LDS_BASE_OFFSET))
k.emit(v_lshlrev_b32_e32(v[3], 2, v[V_LANE_ID]))
k.emit(v_and_or_b32(v[V_LDS_A_BASE], 0x180, v[3], v[2]))
# Do initial stores
k.waitcnt(vm=0)
for i in range(4): # A tile: 8 values via 4 stride64 stores
k.emit(ds_store_2addr_stride64_b32(addr=v[V_LDS_A_ADDR], data0=v[V_LDS_A_DATA[i*2]], data1=v[V_LDS_A_DATA[i*2+1]], offset0=i*4, offset1=i*4+2))
for i in range(8): # B tile: 8 values via 8 scalar stores with 64-byte spacing
offset = i * 64
k.emit(ds_store_b32(addr=v[V_LDS_B_ADDR], data0=v[V_LDS_B_DATA[i]], offset0=offset & 0xFF, offset1=offset >> 8))
# Zero all 128 accumulators using VOPD dual moves (64 instructions instead of 128)
for i in range(0, len(OUT_REGS), 2):
k.emit(VOPD(VOPDOp.V_DUAL_MOV_B32, VOPDOp.V_DUAL_MOV_B32, vdstx=v[OUT_REGS[i]], vdsty=v[OUT_REGS[i+1]], srcx0=0, srcy0=0))
k.emit(s_add_i32(s[S_LOOP_BOUND], s[S_DIM_N], -8))
# S_LOOP_CTR is already 0 from prologue initialization
k.emit(s_branch(), target='LOOP_ENTRY')
# ===========================================================================
# MAIN GEMM LOOP
# ===========================================================================
NO_DS, NO_GLOBAL = getenv("NO_DS", 0), getenv("NO_GLOBAL", 0)
k.label('LOOP_INC')
k.emit(s_add_i32(s[S_LOOP_CTR], s[S_LOOP_CTR], 8))
k.emit(s_cmp_ge_i32(s[S_LOOP_CTR], s[S_DIM_N]))
k.emit(s_cbranch_scc1(), target='EPILOGUE')
k.label('LOOP_ENTRY')
k.emit(s_cmp_lt_i32(s[S_LOOP_CTR], s[S_LOOP_BOUND]))
k.emit(s_cselect_b32(s[S_PREFETCH_FLAG], -1, 0)) # s_cselect doesn't modify SCC
k.emit(s_cbranch_scc0(), target='SKIP_PREFETCH') # branch if loop_ctr >= loop_bound
if not NO_GLOBAL:
# Advance prefetch pointers (VGPR)
#k.emit(v_add_nc_u32_e32(v[V_GLOBAL_B_ADDR], N * 32, v[V_GLOBAL_B_ADDR]))
#k.emit(v_add_nc_u32_e32(v[V_GLOBAL_A_ADDR], 0x20, v[V_GLOBAL_A_ADDR]))
# Advance prefetch pointers (64-bit adds): B advances 8 rows (8*N*4 bytes), A advances 8 cols (8*4 bytes)
k.emit(s_clause(simm16=31))
for i in range(8):
k.emit(s_add_u32(s[S_PREFETCH_B+i*2], s[S_PREFETCH_B+i*2], N * 32))
k.emit(s_addc_u32(s[S_PREFETCH_B+i*2+1], s[S_PREFETCH_B+i*2+1], 0))
for i in range(8):
k.emit(s_add_u32(s[S_PREFETCH_A+i*2], s[S_PREFETCH_A+i*2], 0x20))
k.emit(s_addc_u32(s[S_PREFETCH_A+i*2+1], s[S_PREFETCH_A+i*2+1], 0))
# do the fetch
for vdst, saddr_lo in INIT_PREFETCH:
k.emit(global_load_b32(vdst=v[vdst], addr=v[V_GLOBAL_B_ADDR], saddr=s[saddr_lo:saddr_lo+1]))
k.label('SKIP_PREFETCH')
# wait for local stores to finish (either initial or loop)
# then sync the warp so it's safe to load local
k.waitcnt(lgkm=0)
k.emit(s_barrier())
# 8 inner loop iterations
for iter in range(8):
# Load A tile (4 pairs) and B tile (8 pairs) from LDS
if not NO_DS:
k.emit(s_clause(simm16=len(V_A_TILE_REGS) + len(V_B_TILE_REGS) - 1)) # 12 loads total: 4 A + 8 B
# A tile: 4 ds_load_b64
for i, vdst in enumerate(V_A_TILE_REGS):
a_off = (i & 1) * 8 + (i >> 1) * 64 + iter * LDS_A_STRIDE
k.emit(ds_load_b64(vdst=v[vdst:vdst+1], addr=v[V_LDS_A_BASE], offset0=a_off & 0xFF, offset1=a_off >> 8))
# B tile: 8 ds_load_b64
for i, vdst in enumerate(V_B_TILE_REGS):
b_off = (i & 1) * 8 + (i & 2) * 64 + (i >> 2) * 256 + iter * LDS_B_STRIDE
k.emit(ds_load_b64(vdst=v[vdst:vdst+1], addr=v[V_LDS_B_BASE], offset0=b_off & 0xFF, offset1=b_off >> 8))
# Issue global prefetch (first 6 iterations only)
if iter < 6 and not NO_GLOBAL:
vdst1, vdst2, addr, slo1, slo2 = PREFETCH_LOADS[iter]
k.emit(global_load_b32(vdst=v[vdst1], addr=v[addr], saddr=s[slo1:slo1+1]))
k.emit(global_load_b32(vdst=v[vdst2], addr=v[addr], saddr=s[slo2:slo2+1]))
# 64 dual FMACs
k.waitcnt(lgkm=0)
k.emit(s_clause(simm16=len(FMAC_PATTERN)-1))
for i, (vdst_x, vdst_y, ax, bx, ay, by) in enumerate(FMAC_PATTERN):
k.emit(VOPD(VOPDOp.V_DUAL_FMAC_F32, VOPDOp.V_DUAL_FMAC_F32,
vdstx=v[vdst_x], vdsty=v[vdst_y], srcx0=v[ax], vsrcx1=v[bx], srcy0=v[ay], vsrcy1=v[by]))
# wait for all global loads to finish
# then sync the warp so it's safe to store local
k.waitcnt(vm=0)
k.emit(s_barrier())
# Store prefetched data to LDS
# NOTE: Register naming reflects LDS tile organization, not source matrix:
# V_LDS_A_DATA (v155-162) holds data that goes to LDS A-tile region
# V_LDS_B_DATA (v163-170) holds data that goes to LDS B-tile region
# The data sources are swapped: A-tile receives B matrix rows, B-tile receives A matrix columns
if not NO_DS:
for i in range(4): # A tile: 8 values via 4 stride64 stores
k.emit(ds_store_2addr_stride64_b32(addr=v[V_LDS_A_ADDR], data0=v[V_LDS_A_DATA[i*2]], data1=v[V_LDS_A_DATA[i*2+1]], offset0=i*4, offset1=i*4+2))
for i in range(8): # B tile: 8 values via 8 scalar stores with 64-byte spacing
offset = i * 64
k.emit(ds_store_b32(addr=v[V_LDS_B_ADDR], data0=v[V_LDS_B_DATA[i]], offset0=offset & 0xFF, offset1=offset >> 8))
k.emit(s_branch(), target='LOOP_INC')
# ===========================================================================
# EPILOGUE: Permute and store results
# ===========================================================================
k.label('EPILOGUE')
# Rearrange accumulators from FMAC layout to contiguous output order
for a, b in PERMUTE_SWAPS:
k.emit(v_swap_b32_e32(v[a], v[b]))
# Compute output base coordinates
# v[130] = col_base = tile_x + (lane_id & 7) * 4
# v[131] = row_base = tile_y + (lane_id & 0x60) + ((lane_id >> 3) & 3) * 4
# v[132] = 0 (for 64-bit address high part)
k.emit(v_add_nc_u32_e32(v[130], s[S_TILE_X], v[V_LANE_MOD8_X4]))
k.emit(v_and_b32_e32(v[131], 0x60, v[V_LANE_ID]))
k.emit(v_add_nc_u32_e32(v[131], s[S_TILE_Y], v[131]))
k.emit(v_add_nc_u32_e32(v[131], v[V_LANE_DIV8_X4], v[131]))
k.emit(v_mov_b32_e32(v[132], 0))
# Precompute row offsets: v[133-136] for rows 0-3, v[137-140] for rows 16-19
for base, row_off in [(133, 0), (137, 16)]:
if row_off: k.emit(v_add_nc_u32_e32(v[141], row_off, v[131]))
k.emit(v_mul_lo_u32(v[base], v[141] if row_off else v[131], s[S_DIM_N]))
for j in range(3): k.emit(v_add_nc_u32_e32(v[base + 1 + j], s[S_DIM_N], v[base + j]))
# s[S_PREFETCH_FLAG] = row stride in bytes (N * 4)
k.emit(s_lshl_b32(s[S_PREFETCH_FLAG], s[S_DIM_N], 2))
# Store 128 output values as 32 groups of 4 (128-bit stores)
# Layout: 2 row halves (0-3, 16-19) x 4 col groups x 4 rows = 32 stores of 4 floats
for i, (row_half, col_off, row_in_group) in enumerate([(rh, co, ri)
for rh in range(2) for co in [0, 32, 64, 96] for ri in range(4)]):
row = row_half * 16 + row_in_group
src = OUT_REGS[i*4] # first reg of ascending group of 4
if row_in_group == 0:
# First row of group: compute full address
if col_off == 0: k.emit(v_mov_b32_e32(v[141], v[130]))
else: k.emit(v_add_nc_u32_e32(v[141], col_off, v[130]))
row_base = 133 + row if row < 4 else 137 + row - 16
k.emit(v_add_nc_u32_e32(v[141], v[row_base], v[141]))
k.emit(v_lshlrev_b32_e32(v[141], 2, v[141]))
k.emit(v_add_co_u32(v[141], VCC_LO, s[S_OUT_PTR[0]], v[141]))
k.emit(v_add_co_ci_u32_e32(v[142], s[S_OUT_PTR[1]], v[132]))
else:
# Subsequent rows: add stride
k.emit(v_add_co_u32(v[141], VCC_LO, s[S_PREFETCH_FLAG], v[141]))
k.emit(v_add_co_ci_u32_e32(v[142], v[142], v[132]))
k.emit(global_store_b128(addr=v[141:142], data=v[src:src+3], saddr=NULL))
k.emit(s_sendmsg(simm16=3)) # DEALLOC_VGPRS
k.emit(s_endpgm())
return k.finalize()
# =============================================================================
# Test harness
# =============================================================================
N = getenv("N", 4096)
BLOCK_M, BLOCK_N = 128, 128
THREADS = 128
def test_matmul():
dev = Device[Device.DEFAULT]
print(f"Device arch: {dev.renderer.arch}")
insts = build_kernel(N, dev.renderer.arch)
rng = np.random.default_rng(42)
a = Tensor(rng.random((N, N), dtype=np.float32) - 0.5)
b = Tensor(rng.random((N, N), dtype=np.float32) - 0.5)
c = Tensor.empty(N, N)
Tensor.realize(a, b, c)
grid, local = (N // BLOCK_N, N // BLOCK_M, 1), (THREADS, 1, 1)
print(f"Grid: {grid}, Local: {local}")
dname:str = Device.DEFAULT
def asm_kernel(A:UOp, B:UOp, C:UOp) -> UOp:
gidxs = [UOp.special(n, f"gidx{i}") for i,n in enumerate(grid)]
lidxs = [UOp.special(n, f"lidx{i}") for i,n in enumerate(local)]
lds = UOp(Ops.DEFINE_LOCAL, dtypes.uint8.ptr(size=max(LDS_SIZE, 65536//getenv("LIMIT_OCC", 65536)), addrspace=AddrSpace.LOCAL), (), 'lds')
sink = UOp.sink(A.base, B.base, C.base, lds, *gidxs, *lidxs, arg=KernelInfo(name=colored("kernel", "cyan"),
estimates=Estimates(ops=N*N*N*2, mem=N*N*4*3)))
return UOp(Ops.PROGRAM, src=(sink, UOp(Ops.DEVICE, arg=dname), UOp(Ops.LINEAR, src=tuple([UOp(Ops.INS, arg=x) for x in insts]))))
c = Tensor.custom_kernel(a, b, c, fxn=asm_kernel)[2]
ei = c.schedule()[0].lower()
ets = []
with Context(DEBUG=2):
for _ in range(getenv("CNT", 5)): ets.append(ei.run(wait=True))
print(f"REAL TFLOPS {N * N * N * 2 / min(ets) * 1e-12:.2f}")
if getenv("VERIFY", 1):
GlobalCounters.reset()
with Context(DEBUG=2): tc = (a @ b).realize()
with Context(DEBUG=0): err = (c - tc).square().mean().item()
print(f"mean squared error {err}")
if err != err or err > 1e-06:
c_np, tc_np = c.numpy(), tc.numpy()
for bi in range(N // 128):
for bj in range(N // 128):
blk_c = c_np[bi*128:(bi+1)*128, bj*128:(bj+1)*128]
blk_ref = tc_np[bi*128:(bi+1)*128, bj*128:(bj+1)*128]
blk_diff = blk_c - blk_ref
zero_rows = [i for i in range(128) if np.all(np.abs(blk_c[i,:]) < 1e-10)]
nz_rows = [i for i in range(128) if i not in zero_rows]
nz_mse = float(np.mean(blk_diff[nz_rows,:]**2)) if nz_rows else 0
print(f"Block ({bi},{bj}): zero_rows={zero_rows}, nz_rows_mse={nz_mse:.2e}")
# show first few non-zero row comparisons
if nz_rows and nz_mse > 1e-6:
for r in nz_rows[:3]:
print(f" row {r} asm[0:8]: {blk_c[r,:8]}")
print(f" row {r} ref[0:8]: {blk_ref[r,:8]}")
raise RuntimeError("matmul is wrong!")
if __name__ == "__main__":
test_matmul()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/gemm/amd_asm_matmul.py",
"license": "MIT License",
"lines": 424,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/gemm/asm/rdna3/test.py | import math, pathlib
from tinygrad import Device, dtypes
from tinygrad.uop.ops import UOp, Ops, KernelInfo
from extra.gemm.amd_uop_matmul import test_matmul
N = 4096
TN = 96
THREADS_PER_WG = 128
NUM_WG = math.ceil(N / TN) * math.ceil(N / TN)
dname:str = Device.DEFAULT
template:str = (pathlib.Path(__file__).parent/"template.s").read_text()
def asm_kernel() -> UOp:
lidx = UOp.special(THREADS_PER_WG, "lidx0")
gidx = UOp.special(NUM_WG, "gidx0")
a = UOp.placeholder((N*N,), dtypes.half, slot=1)
b = UOp.placeholder((N*N,), dtypes.half, slot=2)
c = UOp.placeholder((N*N,), dtypes.half, slot=0)
src = template.replace("INSTRUCTIONS", (pathlib.Path(__file__).parent/"gemm.s").read_text())
sink = UOp.sink(a, b, c, lidx, gidx, arg=KernelInfo(name="gemm"))
return UOp(Ops.PROGRAM, src=(sink, UOp(Ops.DEVICE, arg=dname), UOp(Ops.LINEAR, src=(*sink.src, sink)), UOp(Ops.SOURCE, arg=src)))
if __name__ == "__main__":
test_matmul(asm_kernel(), dtype=dtypes.half, N=N)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/gemm/asm/rdna3/test.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/gemm/asm/unpack_kd.py | # unpack the complete kernel descriptor of an amdgpu ELF
# https://rocm.docs.amd.com/projects/llvm-project/en/latest/LLVM/llvm/html/AMDGPUUsage.html#code-object-v3-kernel-descriptor
import struct, pathlib, sys
from tinygrad.runtime.support.elf import elf_loader
def bits(x, lo, hi): return (x >> lo) & ((1 << (hi - lo + 1)) - 1)
def assert_zero(x, lo, hi): assert bits(x, lo, hi) == 0
with open(sys.argv[1], "rb") as f:
lib = f.read()
image, sections, relocs = elf_loader(lib)
rodata_entry = next((sh.header.sh_addr for sh in sections if sh.name == ".rodata"))
# rodata is exactly 64 bytes
kd = image[rodata_entry:rodata_entry+64]
desc = int.from_bytes(kd, byteorder="little")
group_segment_fixed_size = bits(desc, 0, 31)
private_segment_fixed_size = bits(desc, 32, 63)
kernarg_size = bits(desc, 64, 95)
reserved_127_96 = bits(desc, 96, 127)
assert reserved_127_96 == 0
print("GROUP_SEGMENT_FIXED_SIZE:", group_segment_fixed_size)
print("PRIVATE_SEGMENT_FIXED_SIZE:", private_segment_fixed_size)
print("KERNARG_SIZE:", kernarg_size)
print("RESERVED 127:96:", reserved_127_96)
entry_off = bits(desc, 128, 191)
# sign-extend manually if needed
if entry_off & (1 << 63):
entry_off -= 1 << 64
print("KERNEL_CODE_ENTRY_BYTE_OFFSET:", entry_off)
kd_addr = 0x1840
entry_addr = kd_addr + entry_off
print("Computed entry address: 0x%016x" % entry_addr)
print("256B aligned:", entry_addr % 256 == 0)
pgm_rsrc3 = bits(desc, 352, 383)
pgm_rsrc1 = bits(desc, 384, 415)
pgm_rsrc2 = bits(desc, 416, 447)
print("COMPUTE_PGM_RSRC3: 0x%08x" % pgm_rsrc3)
print("COMPUTE_PGM_RSRC1: 0x%08x" % pgm_rsrc1)
print("COMPUTE_PGM_RSRC2: 0x%08x" % pgm_rsrc2)
# rsrc 3 (gfx950)
accum_offset_raw = bits(pgm_rsrc3, 0, 5)
assert_zero(pgm_rsrc3, 6, 15)
tg_split = bits(pgm_rsrc3, 16, 16)
accum_offset_vgprs = (accum_offset_raw + 1) * 4
print("RSRC3.ACCUM_OFFSET (AccVGPR index):", accum_offset_vgprs)
print("RSRC3.TG_SPLIT:", tg_split)
# rsrc 1
vgpr_gran = bits(pgm_rsrc1, 0, 5)
sgpr_gran = bits(pgm_rsrc1, 6, 9)
assert_zero(pgm_rsrc1, 27, 28)
# NOTE: this is vgprs + agprs
vgprs_used = (vgpr_gran + 1) * 8
assert 0 <= vgprs_used <= 512
k = sgpr_gran // 2
sgprs_used = (k + 1) * 16
print("RSRC1.VGPRS:", vgprs_used)
print("RSRC1.SGPRS:", sgprs_used)
assert_zero(pgm_rsrc1, 10, 11)
float_round_mode_32 = bits(pgm_rsrc1, 12, 13)
float_round_mode_16_64 = bits(pgm_rsrc1, 15, 14)
float_denorm_mode_32 = bits(pgm_rsrc1, 16, 17)
float_denorm_mode_16_64 = bits(pgm_rsrc1, 18, 19)
priv = bits(pgm_rsrc1, 20, 20)
assert priv == 0
enable_dx10_clamp_wg_rr_en = bits(pgm_rsrc1, 21, 21)
debug_mode = bits(pgm_rsrc1, 22, 22)
enable_ieee_mode = bits(pgm_rsrc1, 23, 23)
bulky = bits(pgm_rsrc1, 24, 24)
assert bulky == 0
cdbg_user = bits(pgm_rsrc1, 25, 25)
assert cdbg_user == 0
fp16_ovfl = bits(pgm_rsrc1, 26, 26)
assert_zero(pgm_rsrc1, 27, 28) # reserved
assert_zero(pgm_rsrc1, 29, 29) # WGP_MODE (reserved on gfx9)
assert_zero(pgm_rsrc1, 30, 30) # MEM_ORDERED (reserved on gfx9)
assert_zero(pgm_rsrc1, 31, 31) # FWD_PROGRESS (reserved on gfx9)
# rsrc 2
enable_private_segment = bits(pgm_rsrc2, 0, 0) # SCRATCH_EN
user_sgpr_count = bits(pgm_rsrc2, 1, 5) # USER_SGPR
enable_trap_handler = bits(pgm_rsrc2, 6, 6) # TRAP_PRESENT (must be 0 here)
assert enable_trap_handler == 0
enable_sgpr_workgroup_id_x = bits(pgm_rsrc2, 7, 7)
enable_sgpr_workgroup_id_y = bits(pgm_rsrc2, 8, 8)
enable_sgpr_workgroup_id_z = bits(pgm_rsrc2, 9, 9)
enable_sgpr_workgroup_info = bits(pgm_rsrc2, 10, 10)
enable_vgpr_workitem_id = bits(pgm_rsrc2, 11, 12) # TIDIG_CMP_CNT enum (0..3)
enable_exception_address_watch = bits(pgm_rsrc2, 13, 13)
assert enable_exception_address_watch == 0
enable_exception_memory = bits(pgm_rsrc2, 14, 14)
assert enable_exception_memory == 0
granulated_lds_size = bits(pgm_rsrc2, 15, 23)
assert granulated_lds_size == 0 # spec: must be 0; CP uses dispatch packet rounding
enable_exception_fp_invalid = bits(pgm_rsrc2, 24, 24)
enable_exception_fp_denorm_src = bits(pgm_rsrc2, 25, 25)
enable_exception_fp_div0 = bits(pgm_rsrc2, 26, 26)
enable_exception_fp_overflow = bits(pgm_rsrc2, 27, 27)
enable_exception_fp_underflow = bits(pgm_rsrc2, 28, 28)
enable_exception_fp_inexact = bits(pgm_rsrc2, 29, 29)
enable_exception_int_div0 = bits(pgm_rsrc2, 30, 30)
assert_zero(pgm_rsrc2, 31, 31)
print("RSRC2.ENABLE_PRIVATE_SEGMENT:", enable_private_segment)
print("RSRC2.USER_SGPR_COUNT:", user_sgpr_count)
print("RSRC2.ENABLE_SGPR_WORKGROUP_ID_X:", enable_sgpr_workgroup_id_x)
print("RSRC2.ENABLE_SGPR_WORKGROUP_ID_Y:", enable_sgpr_workgroup_id_y)
print("RSRC2.ENABLE_SGPR_WORKGROUP_ID_Z:", enable_sgpr_workgroup_id_z)
print("RSRC2.ENABLE_SGPR_WORKGROUP_INFO:", enable_sgpr_workgroup_info)
print("RSRC2.ENABLE_VGPR_WORKITEM_ID (enum):", enable_vgpr_workitem_id)
print("RSRC2.EXC_FP_INVALID:", enable_exception_fp_invalid)
print("RSRC2.EXC_FP_DENORM_SRC:", enable_exception_fp_denorm_src)
print("RSRC2.EXC_FP_DIV0:", enable_exception_fp_div0)
print("RSRC2.EXC_FP_OVERFLOW:", enable_exception_fp_overflow)
print("RSRC2.EXC_FP_UNDERFLOW:", enable_exception_fp_underflow)
print("RSRC2.EXC_FP_INEXACT:", enable_exception_fp_inexact)
print("RSRC2.EXC_INT_DIV0:", enable_exception_int_div0)
# user sgprs
enable_sgpr_private_segment_buffer = bits(desc, 448, 448)
enable_sgpr_dispatch_ptr = bits(desc, 449, 449)
enable_sgpr_queue_ptr = bits(desc, 450, 450)
enable_sgpr_kernarg_segment_ptr = bits(desc, 451, 451)
enable_sgpr_dispatch_id = bits(desc, 452, 452)
enable_sgpr_flat_scratch_init = bits(desc, 453, 453)
enable_sgpr_private_segment_size = bits(desc, 454, 454)
assert_zero(desc, 455, 457)
print("DESC.ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER:", enable_sgpr_private_segment_buffer)
print("DESC.ENABLE_SGPR_DISPATCH_PTR:", enable_sgpr_dispatch_ptr)
print("DESC.ENABLE_SGPR_QUEUE_PTR:", enable_sgpr_queue_ptr)
print("DESC.ENABLE_SGPR_KERNARG_SEGMENT_PTR:", enable_sgpr_kernarg_segment_ptr)
print("DESC.ENABLE_SGPR_DISPATCH_ID:", enable_sgpr_dispatch_id)
print("DESC.ENABLE_SGPR_FLAT_SCRATCH_INIT:", enable_sgpr_flat_scratch_init)
print("DESC.ENABLE_SGPR_PRIVATE_SEGMENT_SIZE:", enable_sgpr_private_segment_size)
assert_zero(desc, 458, 459)
uses_dynamic_stack = bits(desc, 459, 460)
print("DESC.USES_DYNAMIC_STACK:", uses_dynamic_stack)
# gfx950 only
assert_zero(desc, 460, 463)
kernarg_preload_spec_length = bits(desc, 464, 470)
print("DESC.KERNARG_PRELOAD_SPEC_LENGTH:", kernarg_preload_spec_length)
kernarg_preload_spec_offset = bits(desc, 471, 479)
print("DESC.KERNARG_PRELOAD_SPEC_OFFSET:", kernarg_preload_spec_offset)
assert_zero(desc, 480, 511)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/gemm/asm/unpack_kd.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/testextra/test_cfg_viz.py | # ruff: noqa: F405, F403
# allow define from star imports
import unittest
from tinygrad import Device, Tensor
from tinygrad.uop.ops import UOp, Ops, KernelInfo
from tinygrad.viz.serve import amdgpu_cfg
from tinygrad.runtime.autogen.amd.rdna3.ins import *
from tinygrad.renderer.amd.dsl import s
# TODO: this belongs to the dsl infrastructure
from extra.gemm.amd_asm_matmul import Kernel
def run_asm(name:str, k:Kernel):
insts = k.finalize()
def fxn(out:UOp) -> UOp:
lidx = UOp.special(1, "lidx0")
gidx = UOp.special(1, "gidx0")
sink = UOp.sink(out.base, lidx, gidx, arg=KernelInfo(name=name))
return UOp(Ops.PROGRAM, src=(sink, UOp(Ops.DEVICE, arg="AMD"), UOp(Ops.LINEAR, src=tuple([UOp(Ops.INS, arg=x) for x in insts]))))
out = Tensor.custom_kernel(Tensor.empty(1), fxn=fxn)[0]
ei = out.schedule()[-1].lower()
ei.run()
return ei
@unittest.skipUnless(Device.DEFAULT == "AMD", "only on AMD")
class TestCfg(unittest.TestCase):
def setUp(self):
self.arch = Device["AMD"].arch
if not any(self.arch.startswith(a) for a in {"gfx11", "gfx12"}):
self.skipTest(f"tests written for RDNA, got arch {self.arch}")
def test_simple(self):
k = Kernel(arch=Device["AMD"].arch)
k.label("entry")
k.emit(s_branch(), target="bb1")
k.label("bb1")
k.emit(s_endpgm())
k.emit(s_code_end())
run_asm("simple", k)
def test_diamond(self):
k = Kernel(arch=Device["AMD"].arch)
k.label("entry")
k.emit(s_mov_b32(s[0], 0))
k.emit(s_mov_b32(s[1], 0))
k.emit(s_cmp_eq_u64(s[0:1], 0))
k.emit(s_cbranch_scc1(), target="if")
k.emit(s_branch(), target="else")
k.label("if")
k.emit(s_nop(1))
k.emit(s_branch(), target="end")
k.label("else")
k.emit(s_nop(0))
k.label("end")
k.emit(s_endpgm())
k.emit(s_code_end())
ei = run_asm("diamond", k)
cfg = amdgpu_cfg(ei.prg.p.lib, self.arch)["data"]
self.assertEqual(len(cfg["blocks"]), 5)
edge_count = sum(len(v) for v in cfg["paths"].values())
self.assertEqual(edge_count, 5)
references:dict[str, list[str]] = {}
for pc, tokens in cfg["pc_tokens"].items():
for t in tokens:
for key in t["keys"]: references.setdefault(key, []).append(pc)
self.assertEqual(len(references["r0"]), 2)
insts = [cfg["pc_tokens"][pc][0]["st"] for pc in references["r0"]]
self.assertEqual(insts, ['s_mov_b32', 's_cmp_eq_u64'])
end_block_content = "\n".join(" ".join(t["st"] for t in cfg["pc_tokens"][pc]) for pc in list(cfg["blocks"].values())[-1])
self.assertEqual(end_block_content, "s_endpgm\ns_code_end (217x)")
def test_loop(self):
k = Kernel(arch=Device["AMD"].arch)
k.label("entry")
k.emit(s_mov_b32(s[1], 4))
k.label("loop")
k.emit(s_add_u32(s[1], s[1], -1))
k.emit(s_cmp_eq_i32(s[1], 0))
k.emit(s_cbranch_scc0(), target="loop")
k.emit(s_endpgm())
k.emit(s_code_end())
run_asm("simple_loop", k)
def test_loop_branch(self):
k = Kernel(arch=Device["AMD"].arch)
k.label("entry")
k.emit(s_mov_b32(s[1], 4))
k.label("loop")
k.emit(s_add_u32(s[1], s[1], -1))
k.emit(s_cmp_eq_i32(s[1], 2))
k.emit(s_cbranch_scc1(), target="cond")
k.emit(s_branch(), target="cont")
k.label("cond")
k.emit(s_add_u32(s[1], s[1], -2))
k.label("cont")
k.emit(s_cmp_eq_i32(s[1], 0))
k.emit(s_cbranch_scc0(), target="loop")
k.emit(s_endpgm())
k.emit(s_code_end())
run_asm("loop_if", k)
def test_loop_break(self):
k = Kernel(arch=Device["AMD"].arch)
k.label("entry")
k.emit(s_mov_b32(s[1], 8))
k.label("loop")
k.emit(s_add_u32(s[1], s[1], -1))
k.emit(s_cmp_eq_i32(s[1], 5))
k.emit(s_cbranch_scc1(), target="break")
k.emit(s_cmp_eq_i32(s[1], 0))
k.emit(s_cbranch_scc0(), target="loop")
k.label("break")
k.emit(s_endpgm())
k.emit(s_code_end())
run_asm("loop_break", k)
def test_switch(self):
k = Kernel(arch=Device["AMD"].arch)
k.label("entry")
k.emit(s_cmp_eq_i32(s[0], 0))
k.emit(s_cbranch_scc1(), target="case0")
k.emit(s_cmp_eq_i32(s[0], 1))
k.emit(s_cbranch_scc1(), target="case1")
k.emit(s_branch(), target="case2")
k.label("case0")
k.emit(s_nop(0))
k.emit(s_branch(), target="join")
k.label("case1")
k.emit(s_nop(1))
k.emit(s_branch(), target="join")
k.label("case2")
k.emit(s_nop(2))
k.emit(s_branch(), target="join")
k.label("join")
k.emit(s_endpgm())
k.emit(s_code_end())
run_asm("switch_case", k)
def test_ping_pong(self):
k = Kernel(arch=Device["AMD"].arch)
k.label("entry")
k.emit(s_cmp_eq_i32(s[0], 0))
k.emit(s_cbranch_scc1(), target="ping")
k.emit(s_branch(), target="pong")
k.label("ping")
k.emit(s_cmp_eq_i32(s[1], 0))
k.emit(s_cbranch_scc1(), target="pong")
k.emit(s_branch(), target="end")
k.label("pong")
k.emit(s_cmp_eq_i32(s[2], 0))
k.emit(s_cbranch_scc1(), target="ping")
k.label("end")
k.emit(s_endpgm())
k.emit(s_code_end())
run_asm("ping_pong", k)
def test_colored_blocks(self):
N = 10
k = Kernel(arch=Device["AMD"].arch)
k.label("entry")
k.emit(s_branch(), target="init0")
for i in range(N):
loop = f"loop{i}"
k.label(f"init{i}")
k.emit(s_mov_b32(s[1], i + 1))
k.emit(s_branch(), target=loop)
k.label(loop)
k.emit(s_nop(i & 7))
k.emit(s_add_u32(s[1], s[1], -1))
k.emit(s_cmp_eq_i32(s[1], 0))
k.emit(s_cbranch_scc0(), target=loop)
k.emit(s_branch(), target=f"init{i+1}" if i + 1 < N else "end")
k.label("end")
k.emit(s_endpgm())
k.emit(s_code_end())
run_asm("test_colored_blocks", k)
def test_jump_back_to_end(self):
k = Kernel(arch=Device["AMD"].arch)
k.label("entry")
k.emit(s_mov_b32(s[1], 2))
k.emit(s_cbranch_execz(), target="loop")
k.label("end")
k.emit(s_endpgm())
k.label("loop")
k.emit(s_add_u32(s[1], s[1], -1))
k.emit(s_cmp_eq_i32(s[1], 0))
k.emit(s_branch(), target="end")
k.emit(s_code_end())
run_asm("jump_back_to_end", k)
def test_hit_count(self):
k = Kernel(arch=Device["AMD"].arch)
k.label("entry")
k.emit(s_mov_b32(s[1], 1))
k.emit(s_branch(), target="alt")
k.label("continue")
k.emit(s_mov_b32(s[2], 2))
k.emit(s_add_u32(s[1], s[1], s[2]))
k.label("alt")
k.emit(s_add_u32(s[1], s[1], -1))
k.emit(s_endpgm())
k.emit(s_code_end())
run_asm("test_hit_count", k)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/testextra/test_cfg_viz.py",
"license": "MIT License",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/unit/test_llm_moe.py | import unittest
import numpy as np
from tinygrad import Tensor
class TestMoEFeedForward(unittest.TestCase):
def test_moe_feed_forward(self):
from tinygrad.apps.llm import TransformerBlock
dim, hidden, n_heads = 8, 16, 2
num_experts, k = 4, 2
block = TransformerBlock(dim, hidden, n_heads, n_heads, norm_eps=1e-5, head_dim=dim//n_heads,
rope_theta=10000, max_context=16, num_experts=num_experts, num_experts_per_tok=k)
# set up weights: gate scales by (expert_id+1), up/down are identity-ish, router picks experts 0,2
block.ffn_gate_exps.weight = Tensor.stack(*[Tensor.eye(hidden, dim) * (i + 1) for i in range(num_experts)])
block.ffn_up_exps.weight = Tensor.stack(*[Tensor.eye(hidden, dim) for _ in range(num_experts)])
block.ffn_down_exps.weight = Tensor.stack(*[Tensor.eye(dim, hidden) for _ in range(num_experts)])
block.ffn_gate_inp.weight = Tensor([[1, 0, 1, 0]] * dim).T # router strongly prefers experts 0 and 2
block.ffn_norm.weight = Tensor.ones(dim) # identity norm
# input of ones -> after norm still ~ones -> experts 0,2 selected -> weighted sum of silu outputs
h = Tensor.ones(1, 1, dim)
out = block._feed_forward(h)
# expected: residual + moe_output ≈ 1 + avg(silu(1), silu(3))
expected = 1 + (Tensor([1.0]).silu().item() + Tensor([3.0]).silu().item()) / 2
np.testing.assert_allclose(out.numpy()[0, 0, 0], expected, rtol=1e-2)
def test_moe_feed_forward_batched(self):
from tinygrad.apps.llm import TransformerBlock
dim, hidden, n_heads = 8, 16, 2
num_experts, k = 4, 2
block = TransformerBlock(dim, hidden, n_heads, n_heads, norm_eps=1e-5, head_dim=dim//n_heads,
rope_theta=10000, max_context=16, num_experts=num_experts, num_experts_per_tok=k)
# same setup as BS=1 test
block.ffn_gate_exps.weight = Tensor.stack(*[Tensor.eye(hidden, dim) * (i + 1) for i in range(num_experts)])
block.ffn_up_exps.weight = Tensor.stack(*[Tensor.eye(hidden, dim) for _ in range(num_experts)])
block.ffn_down_exps.weight = Tensor.stack(*[Tensor.eye(dim, hidden) for _ in range(num_experts)])
block.ffn_gate_inp.weight = Tensor([[1, 0, 1, 0]] * dim).T
block.ffn_norm.weight = Tensor.ones(dim)
# test with BS=2, T=3
h = Tensor.ones(2, 3, dim)
out = block._feed_forward(h)
# all outputs should match the BS=1 expected value
expected = 1 + (Tensor([1.0]).silu().item() + Tensor([3.0]).silu().item()) / 2
np.testing.assert_allclose(out.numpy(), expected, rtol=1e-2)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_llm_moe.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/amdpci/hive_reset.py | #!/usr/bin/env python3
import os
from tinygrad.helpers import Context
from tinygrad.runtime.support.system import System, PCIDevice, PCIDevImplBase
from tinygrad.runtime.support.hcq import FileIOInterface
from tinygrad.runtime.support.am.amdev import AMDev
if __name__ == "__main__":
gpus = System.pci_scan_bus(0x1002, [(0xffff, [0x74a1, 0x75a0])])
for gpu in gpus:
drv_path = f"/sys/bus/pci/devices/{gpu}/driver"
if FileIOInterface.exists(drv_path) and os.path.basename(os.readlink(drv_path)) == "amdgpu":
raise RuntimeError(f"amdgpu is bound to {gpu}. Stopping...")
pcidevs = [PCIDevice("AM", gpu, bars=[0, 2, 5]) for gpu in gpus]
amdevs = []
with Context(DEBUG=2):
for pcidev in pcidevs:
amdevs.append(AMDev(pcidev, reset_mode=True))
for amdev in amdevs: amdev.smu.mode1_reset()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/amdpci/hive_reset.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:tinygrad/runtime/autogen/am/smu_v13_0_6.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
PPSMC_Result: TypeAlias = Annotated[int, ctypes.c_uint32]
PPSMC_MSG: TypeAlias = Annotated[int, ctypes.c_uint32]
class FEATURE_LIST_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
FEATURE_DATA_CALCULATION = FEATURE_LIST_e.define('FEATURE_DATA_CALCULATION', 0)
FEATURE_DPM_CCLK = FEATURE_LIST_e.define('FEATURE_DPM_CCLK', 1)
FEATURE_DPM_FCLK = FEATURE_LIST_e.define('FEATURE_DPM_FCLK', 2)
FEATURE_DPM_GFXCLK = FEATURE_LIST_e.define('FEATURE_DPM_GFXCLK', 3)
FEATURE_DPM_LCLK = FEATURE_LIST_e.define('FEATURE_DPM_LCLK', 4)
FEATURE_DPM_SOCCLK = FEATURE_LIST_e.define('FEATURE_DPM_SOCCLK', 5)
FEATURE_DPM_UCLK = FEATURE_LIST_e.define('FEATURE_DPM_UCLK', 6)
FEATURE_DPM_VCN = FEATURE_LIST_e.define('FEATURE_DPM_VCN', 7)
FEATURE_DPM_XGMI = FEATURE_LIST_e.define('FEATURE_DPM_XGMI', 8)
FEATURE_DS_FCLK = FEATURE_LIST_e.define('FEATURE_DS_FCLK', 9)
FEATURE_DS_GFXCLK = FEATURE_LIST_e.define('FEATURE_DS_GFXCLK', 10)
FEATURE_DS_LCLK = FEATURE_LIST_e.define('FEATURE_DS_LCLK', 11)
FEATURE_DS_MP0CLK = FEATURE_LIST_e.define('FEATURE_DS_MP0CLK', 12)
FEATURE_DS_MP1CLK = FEATURE_LIST_e.define('FEATURE_DS_MP1CLK', 13)
FEATURE_DS_MPIOCLK = FEATURE_LIST_e.define('FEATURE_DS_MPIOCLK', 14)
FEATURE_DS_SOCCLK = FEATURE_LIST_e.define('FEATURE_DS_SOCCLK', 15)
FEATURE_DS_VCN = FEATURE_LIST_e.define('FEATURE_DS_VCN', 16)
FEATURE_APCC_DFLL = FEATURE_LIST_e.define('FEATURE_APCC_DFLL', 17)
FEATURE_APCC_PLUS = FEATURE_LIST_e.define('FEATURE_APCC_PLUS', 18)
FEATURE_DF_CSTATE = FEATURE_LIST_e.define('FEATURE_DF_CSTATE', 19)
FEATURE_CC6 = FEATURE_LIST_e.define('FEATURE_CC6', 20)
FEATURE_PC6 = FEATURE_LIST_e.define('FEATURE_PC6', 21)
FEATURE_CPPC = FEATURE_LIST_e.define('FEATURE_CPPC', 22)
FEATURE_PPT = FEATURE_LIST_e.define('FEATURE_PPT', 23)
FEATURE_TDC = FEATURE_LIST_e.define('FEATURE_TDC', 24)
FEATURE_THERMAL = FEATURE_LIST_e.define('FEATURE_THERMAL', 25)
FEATURE_SOC_PCC = FEATURE_LIST_e.define('FEATURE_SOC_PCC', 26)
FEATURE_CCD_PCC = FEATURE_LIST_e.define('FEATURE_CCD_PCC', 27)
FEATURE_CCD_EDC = FEATURE_LIST_e.define('FEATURE_CCD_EDC', 28)
FEATURE_PROCHOT = FEATURE_LIST_e.define('FEATURE_PROCHOT', 29)
FEATURE_DVO_CCLK = FEATURE_LIST_e.define('FEATURE_DVO_CCLK', 30)
FEATURE_FDD_AID_HBM = FEATURE_LIST_e.define('FEATURE_FDD_AID_HBM', 31)
FEATURE_FDD_AID_SOC = FEATURE_LIST_e.define('FEATURE_FDD_AID_SOC', 32)
FEATURE_FDD_XCD_EDC = FEATURE_LIST_e.define('FEATURE_FDD_XCD_EDC', 33)
FEATURE_FDD_XCD_XVMIN = FEATURE_LIST_e.define('FEATURE_FDD_XCD_XVMIN', 34)
FEATURE_FW_CTF = FEATURE_LIST_e.define('FEATURE_FW_CTF', 35)
FEATURE_GFXOFF = FEATURE_LIST_e.define('FEATURE_GFXOFF', 36)
FEATURE_SMU_CG = FEATURE_LIST_e.define('FEATURE_SMU_CG', 37)
FEATURE_PSI7 = FEATURE_LIST_e.define('FEATURE_PSI7', 38)
FEATURE_CSTATE_BOOST = FEATURE_LIST_e.define('FEATURE_CSTATE_BOOST', 39)
FEATURE_XGMI_PER_LINK_PWR_DOWN = FEATURE_LIST_e.define('FEATURE_XGMI_PER_LINK_PWR_DOWN', 40)
FEATURE_CXL_QOS = FEATURE_LIST_e.define('FEATURE_CXL_QOS', 41)
FEATURE_SOC_DC_RTC = FEATURE_LIST_e.define('FEATURE_SOC_DC_RTC', 42)
FEATURE_GFX_DC_RTC = FEATURE_LIST_e.define('FEATURE_GFX_DC_RTC', 43)
FEATURE_DVM_MIN_PSM = FEATURE_LIST_e.define('FEATURE_DVM_MIN_PSM', 44)
FEATURE_PRC = FEATURE_LIST_e.define('FEATURE_PRC', 45)
NUM_FEATURES = FEATURE_LIST_e.define('NUM_FEATURES', 46)
class PCIE_LINK_SPEED_INDEX_TABLE_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
PCIE_LINK_SPEED_INDEX_TABLE_GEN1 = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN1', 0)
PCIE_LINK_SPEED_INDEX_TABLE_GEN2 = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN2', 1)
PCIE_LINK_SPEED_INDEX_TABLE_GEN3 = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN3', 2)
PCIE_LINK_SPEED_INDEX_TABLE_GEN4 = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN4', 3)
PCIE_LINK_SPEED_INDEX_TABLE_GEN4_ESM = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN4_ESM', 4)
PCIE_LINK_SPEED_INDEX_TABLE_GEN5 = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_GEN5', 5)
PCIE_LINK_SPEED_INDEX_TABLE_COUNT = PCIE_LINK_SPEED_INDEX_TABLE_e.define('PCIE_LINK_SPEED_INDEX_TABLE_COUNT', 6)
class GFX_GUARDBAND_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
VOLTAGE_COLD_0 = GFX_GUARDBAND_e.define('VOLTAGE_COLD_0', 0)
VOLTAGE_COLD_1 = GFX_GUARDBAND_e.define('VOLTAGE_COLD_1', 1)
VOLTAGE_COLD_2 = GFX_GUARDBAND_e.define('VOLTAGE_COLD_2', 2)
VOLTAGE_COLD_3 = GFX_GUARDBAND_e.define('VOLTAGE_COLD_3', 3)
VOLTAGE_COLD_4 = GFX_GUARDBAND_e.define('VOLTAGE_COLD_4', 4)
VOLTAGE_COLD_5 = GFX_GUARDBAND_e.define('VOLTAGE_COLD_5', 5)
VOLTAGE_COLD_6 = GFX_GUARDBAND_e.define('VOLTAGE_COLD_6', 6)
VOLTAGE_COLD_7 = GFX_GUARDBAND_e.define('VOLTAGE_COLD_7', 7)
VOLTAGE_MID_0 = GFX_GUARDBAND_e.define('VOLTAGE_MID_0', 8)
VOLTAGE_MID_1 = GFX_GUARDBAND_e.define('VOLTAGE_MID_1', 9)
VOLTAGE_MID_2 = GFX_GUARDBAND_e.define('VOLTAGE_MID_2', 10)
VOLTAGE_MID_3 = GFX_GUARDBAND_e.define('VOLTAGE_MID_3', 11)
VOLTAGE_MID_4 = GFX_GUARDBAND_e.define('VOLTAGE_MID_4', 12)
VOLTAGE_MID_5 = GFX_GUARDBAND_e.define('VOLTAGE_MID_5', 13)
VOLTAGE_MID_6 = GFX_GUARDBAND_e.define('VOLTAGE_MID_6', 14)
VOLTAGE_MID_7 = GFX_GUARDBAND_e.define('VOLTAGE_MID_7', 15)
VOLTAGE_HOT_0 = GFX_GUARDBAND_e.define('VOLTAGE_HOT_0', 16)
VOLTAGE_HOT_1 = GFX_GUARDBAND_e.define('VOLTAGE_HOT_1', 17)
VOLTAGE_HOT_2 = GFX_GUARDBAND_e.define('VOLTAGE_HOT_2', 18)
VOLTAGE_HOT_3 = GFX_GUARDBAND_e.define('VOLTAGE_HOT_3', 19)
VOLTAGE_HOT_4 = GFX_GUARDBAND_e.define('VOLTAGE_HOT_4', 20)
VOLTAGE_HOT_5 = GFX_GUARDBAND_e.define('VOLTAGE_HOT_5', 21)
VOLTAGE_HOT_6 = GFX_GUARDBAND_e.define('VOLTAGE_HOT_6', 22)
VOLTAGE_HOT_7 = GFX_GUARDBAND_e.define('VOLTAGE_HOT_7', 23)
VOLTAGE_GUARDBAND_COUNT = GFX_GUARDBAND_e.define('VOLTAGE_GUARDBAND_COUNT', 24)
@c.record
class MetricsTableV0_t(c.Struct):
SIZE = 2268
AccumulationCounter: Annotated[uint32_t, 0]
MaxSocketTemperature: Annotated[uint32_t, 4]
MaxVrTemperature: Annotated[uint32_t, 8]
MaxHbmTemperature: Annotated[uint32_t, 12]
MaxSocketTemperatureAcc: Annotated[uint64_t, 16]
MaxVrTemperatureAcc: Annotated[uint64_t, 24]
MaxHbmTemperatureAcc: Annotated[uint64_t, 32]
SocketPowerLimit: Annotated[uint32_t, 40]
MaxSocketPowerLimit: Annotated[uint32_t, 44]
SocketPower: Annotated[uint32_t, 48]
Timestamp: Annotated[uint64_t, 52]
SocketEnergyAcc: Annotated[uint64_t, 60]
CcdEnergyAcc: Annotated[uint64_t, 68]
XcdEnergyAcc: Annotated[uint64_t, 76]
AidEnergyAcc: Annotated[uint64_t, 84]
HbmEnergyAcc: Annotated[uint64_t, 92]
CclkFrequencyLimit: Annotated[uint32_t, 100]
GfxclkFrequencyLimit: Annotated[uint32_t, 104]
FclkFrequency: Annotated[uint32_t, 108]
UclkFrequency: Annotated[uint32_t, 112]
SocclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 116]
VclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 132]
DclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 148]
LclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 164]
GfxclkFrequencyAcc: Annotated[c.Array[uint64_t, Literal[8]], 180]
CclkFrequencyAcc: Annotated[c.Array[uint64_t, Literal[96]], 244]
MaxCclkFrequency: Annotated[uint32_t, 1012]
MinCclkFrequency: Annotated[uint32_t, 1016]
MaxGfxclkFrequency: Annotated[uint32_t, 1020]
MinGfxclkFrequency: Annotated[uint32_t, 1024]
FclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1028]
UclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1044]
SocclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1060]
VclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1076]
DclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1092]
LclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1108]
MaxLclkDpmRange: Annotated[uint32_t, 1124]
MinLclkDpmRange: Annotated[uint32_t, 1128]
XgmiWidth: Annotated[uint32_t, 1132]
XgmiBitrate: Annotated[uint32_t, 1136]
XgmiReadBandwidthAcc: Annotated[c.Array[uint64_t, Literal[8]], 1140]
XgmiWriteBandwidthAcc: Annotated[c.Array[uint64_t, Literal[8]], 1204]
SocketC0Residency: Annotated[uint32_t, 1268]
SocketGfxBusy: Annotated[uint32_t, 1272]
DramBandwidthUtilization: Annotated[uint32_t, 1276]
SocketC0ResidencyAcc: Annotated[uint64_t, 1280]
SocketGfxBusyAcc: Annotated[uint64_t, 1288]
DramBandwidthAcc: Annotated[uint64_t, 1296]
MaxDramBandwidth: Annotated[uint32_t, 1304]
DramBandwidthUtilizationAcc: Annotated[uint64_t, 1308]
PcieBandwidthAcc: Annotated[c.Array[uint64_t, Literal[4]], 1316]
ProchotResidencyAcc: Annotated[uint32_t, 1348]
PptResidencyAcc: Annotated[uint32_t, 1352]
SocketThmResidencyAcc: Annotated[uint32_t, 1356]
VrThmResidencyAcc: Annotated[uint32_t, 1360]
HbmThmResidencyAcc: Annotated[uint32_t, 1364]
GfxLockXCDMak: Annotated[uint32_t, 1368]
GfxclkFrequency: Annotated[c.Array[uint32_t, Literal[8]], 1372]
PublicSerialNumber_AID: Annotated[c.Array[uint64_t, Literal[4]], 1404]
PublicSerialNumber_XCD: Annotated[c.Array[uint64_t, Literal[8]], 1436]
PublicSerialNumber_CCD: Annotated[c.Array[uint64_t, Literal[12]], 1500]
XgmiReadDataSizeAcc: Annotated[c.Array[uint64_t, Literal[8]], 1596]
XgmiWriteDataSizeAcc: Annotated[c.Array[uint64_t, Literal[8]], 1660]
PcieBandwidth: Annotated[c.Array[uint32_t, Literal[4]], 1724]
PCIeL0ToRecoveryCountAcc: Annotated[uint32_t, 1740]
PCIenReplayAAcc: Annotated[uint32_t, 1744]
PCIenReplayARolloverCountAcc: Annotated[uint32_t, 1748]
PCIeNAKSentCountAcc: Annotated[uint32_t, 1752]
PCIeNAKReceivedCountAcc: Annotated[uint32_t, 1756]
VcnBusy: Annotated[c.Array[uint32_t, Literal[4]], 1760]
JpegBusy: Annotated[c.Array[uint32_t, Literal[32]], 1776]
PCIeLinkSpeed: Annotated[uint32_t, 1904]
PCIeLinkWidth: Annotated[uint32_t, 1908]
GfxBusy: Annotated[c.Array[uint32_t, Literal[8]], 1912]
GfxBusyAcc: Annotated[c.Array[uint64_t, Literal[8]], 1944]
PCIeOtherEndRecoveryAcc: Annotated[uint32_t, 2008]
GfxclkBelowHostLimitPptAcc: Annotated[c.Array[uint64_t, Literal[8]], 2012]
GfxclkBelowHostLimitThmAcc: Annotated[c.Array[uint64_t, Literal[8]], 2076]
GfxclkBelowHostLimitTotalAcc: Annotated[c.Array[uint64_t, Literal[8]], 2140]
GfxclkLowUtilizationAcc: Annotated[c.Array[uint64_t, Literal[8]], 2204]
uint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
uint64_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@c.record
class MetricsTableV1_t(c.Struct):
SIZE = 1868
AccumulationCounter: Annotated[uint32_t, 0]
MaxSocketTemperature: Annotated[uint32_t, 4]
MaxVrTemperature: Annotated[uint32_t, 8]
MaxHbmTemperature: Annotated[uint32_t, 12]
MaxSocketTemperatureAcc: Annotated[uint64_t, 16]
MaxVrTemperatureAcc: Annotated[uint64_t, 24]
MaxHbmTemperatureAcc: Annotated[uint64_t, 32]
SocketPowerLimit: Annotated[uint32_t, 40]
MaxSocketPowerLimit: Annotated[uint32_t, 44]
SocketPower: Annotated[uint32_t, 48]
Timestamp: Annotated[uint64_t, 52]
SocketEnergyAcc: Annotated[uint64_t, 60]
CcdEnergyAcc: Annotated[uint64_t, 68]
XcdEnergyAcc: Annotated[uint64_t, 76]
AidEnergyAcc: Annotated[uint64_t, 84]
HbmEnergyAcc: Annotated[uint64_t, 92]
CclkFrequencyLimit: Annotated[uint32_t, 100]
GfxclkFrequencyLimit: Annotated[uint32_t, 104]
FclkFrequency: Annotated[uint32_t, 108]
UclkFrequency: Annotated[uint32_t, 112]
SocclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 116]
VclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 132]
DclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 148]
LclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 164]
GfxclkFrequencyAcc: Annotated[c.Array[uint64_t, Literal[8]], 180]
CclkFrequencyAcc: Annotated[c.Array[uint64_t, Literal[96]], 244]
MaxCclkFrequency: Annotated[uint32_t, 1012]
MinCclkFrequency: Annotated[uint32_t, 1016]
MaxGfxclkFrequency: Annotated[uint32_t, 1020]
MinGfxclkFrequency: Annotated[uint32_t, 1024]
FclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1028]
UclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1044]
SocclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1060]
VclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1076]
DclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1092]
LclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 1108]
MaxLclkDpmRange: Annotated[uint32_t, 1124]
MinLclkDpmRange: Annotated[uint32_t, 1128]
XgmiWidth: Annotated[uint32_t, 1132]
XgmiBitrate: Annotated[uint32_t, 1136]
XgmiReadBandwidthAcc: Annotated[c.Array[uint64_t, Literal[8]], 1140]
XgmiWriteBandwidthAcc: Annotated[c.Array[uint64_t, Literal[8]], 1204]
SocketC0Residency: Annotated[uint32_t, 1268]
SocketGfxBusy: Annotated[uint32_t, 1272]
DramBandwidthUtilization: Annotated[uint32_t, 1276]
SocketC0ResidencyAcc: Annotated[uint64_t, 1280]
SocketGfxBusyAcc: Annotated[uint64_t, 1288]
DramBandwidthAcc: Annotated[uint64_t, 1296]
MaxDramBandwidth: Annotated[uint32_t, 1304]
DramBandwidthUtilizationAcc: Annotated[uint64_t, 1308]
PcieBandwidthAcc: Annotated[c.Array[uint64_t, Literal[4]], 1316]
ProchotResidencyAcc: Annotated[uint32_t, 1348]
PptResidencyAcc: Annotated[uint32_t, 1352]
SocketThmResidencyAcc: Annotated[uint32_t, 1356]
VrThmResidencyAcc: Annotated[uint32_t, 1360]
HbmThmResidencyAcc: Annotated[uint32_t, 1364]
GfxLockXCDMak: Annotated[uint32_t, 1368]
GfxclkFrequency: Annotated[c.Array[uint32_t, Literal[8]], 1372]
PublicSerialNumber_AID: Annotated[c.Array[uint64_t, Literal[4]], 1404]
PublicSerialNumber_XCD: Annotated[c.Array[uint64_t, Literal[8]], 1436]
PublicSerialNumber_CCD: Annotated[c.Array[uint64_t, Literal[12]], 1500]
XgmiReadDataSizeAcc: Annotated[c.Array[uint64_t, Literal[8]], 1596]
XgmiWriteDataSizeAcc: Annotated[c.Array[uint64_t, Literal[8]], 1660]
VcnBusy: Annotated[c.Array[uint32_t, Literal[4]], 1724]
JpegBusy: Annotated[c.Array[uint32_t, Literal[32]], 1740]
@c.record
class MetricsTableV2_t(c.Struct):
SIZE = 1200
AccumulationCounter: Annotated[uint64_t, 0]
MaxSocketTemperature: Annotated[uint32_t, 8]
MaxVrTemperature: Annotated[uint32_t, 12]
MaxHbmTemperature: Annotated[uint32_t, 16]
MaxSocketTemperatureAcc: Annotated[uint64_t, 20]
MaxVrTemperatureAcc: Annotated[uint64_t, 28]
MaxHbmTemperatureAcc: Annotated[uint64_t, 36]
SocketPowerLimit: Annotated[uint32_t, 44]
MaxSocketPowerLimit: Annotated[uint32_t, 48]
SocketPower: Annotated[uint32_t, 52]
Timestamp: Annotated[uint64_t, 56]
SocketEnergyAcc: Annotated[uint64_t, 64]
CcdEnergyAcc: Annotated[uint64_t, 72]
XcdEnergyAcc: Annotated[uint64_t, 80]
AidEnergyAcc: Annotated[uint64_t, 88]
HbmEnergyAcc: Annotated[uint64_t, 96]
GfxclkFrequencyLimit: Annotated[uint32_t, 104]
FclkFrequency: Annotated[uint32_t, 108]
UclkFrequency: Annotated[uint32_t, 112]
SocclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 116]
VclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 132]
DclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 148]
LclkFrequency: Annotated[c.Array[uint32_t, Literal[4]], 164]
GfxclkFrequencyAcc: Annotated[c.Array[uint64_t, Literal[8]], 180]
MaxGfxclkFrequency: Annotated[uint32_t, 244]
MinGfxclkFrequency: Annotated[uint32_t, 248]
FclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 252]
UclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 268]
SocclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 284]
VclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 300]
DclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 316]
LclkFrequencyTable: Annotated[c.Array[uint32_t, Literal[4]], 332]
MaxLclkDpmRange: Annotated[uint32_t, 348]
MinLclkDpmRange: Annotated[uint32_t, 352]
XgmiWidth: Annotated[uint32_t, 356]
XgmiBitrate: Annotated[uint32_t, 360]
XgmiReadBandwidthAcc: Annotated[c.Array[uint64_t, Literal[8]], 364]
XgmiWriteBandwidthAcc: Annotated[c.Array[uint64_t, Literal[8]], 428]
SocketGfxBusy: Annotated[uint32_t, 492]
DramBandwidthUtilization: Annotated[uint32_t, 496]
SocketC0ResidencyAcc: Annotated[uint64_t, 500]
SocketGfxBusyAcc: Annotated[uint64_t, 508]
DramBandwidthAcc: Annotated[uint64_t, 516]
MaxDramBandwidth: Annotated[uint32_t, 524]
DramBandwidthUtilizationAcc: Annotated[uint64_t, 528]
PcieBandwidthAcc: Annotated[c.Array[uint64_t, Literal[4]], 536]
ProchotResidencyAcc: Annotated[uint32_t, 568]
PptResidencyAcc: Annotated[uint32_t, 572]
SocketThmResidencyAcc: Annotated[uint32_t, 576]
VrThmResidencyAcc: Annotated[uint32_t, 580]
HbmThmResidencyAcc: Annotated[uint32_t, 584]
GfxLockXCDMak: Annotated[uint32_t, 588]
GfxclkFrequency: Annotated[c.Array[uint32_t, Literal[8]], 592]
PublicSerialNumber_AID: Annotated[c.Array[uint64_t, Literal[4]], 624]
PublicSerialNumber_XCD: Annotated[c.Array[uint64_t, Literal[8]], 656]
XgmiReadDataSizeAcc: Annotated[c.Array[uint64_t, Literal[8]], 720]
XgmiWriteDataSizeAcc: Annotated[c.Array[uint64_t, Literal[8]], 784]
PcieBandwidth: Annotated[c.Array[uint32_t, Literal[4]], 848]
PCIeL0ToRecoveryCountAcc: Annotated[uint32_t, 864]
PCIenReplayAAcc: Annotated[uint32_t, 868]
PCIenReplayARolloverCountAcc: Annotated[uint32_t, 872]
PCIeNAKSentCountAcc: Annotated[uint32_t, 876]
PCIeNAKReceivedCountAcc: Annotated[uint32_t, 880]
VcnBusy: Annotated[c.Array[uint32_t, Literal[4]], 884]
JpegBusy: Annotated[c.Array[uint32_t, Literal[32]], 900]
PCIeLinkSpeed: Annotated[uint32_t, 1028]
PCIeLinkWidth: Annotated[uint32_t, 1032]
GfxBusy: Annotated[c.Array[uint32_t, Literal[8]], 1036]
GfxBusyAcc: Annotated[c.Array[uint64_t, Literal[8]], 1068]
PCIeOtherEndRecoveryAcc: Annotated[uint32_t, 1132]
GfxclkBelowHostLimitAcc: Annotated[c.Array[uint64_t, Literal[8]], 1136]
@c.record
class VfMetricsTable_t(c.Struct):
SIZE = 32
AccumulationCounter: Annotated[uint32_t, 0]
InstGfxclk_TargFreq: Annotated[uint32_t, 4]
AccGfxclk_TargFreq: Annotated[uint64_t, 8]
AccGfxRsmuDpm_Busy: Annotated[uint64_t, 16]
AccGfxclkBelowHostLimit: Annotated[uint64_t, 24]
@c.record
class StaticMetricsTable_t(c.Struct):
SIZE = 12
InputTelemetryVoltageInmV: Annotated[uint32_t, 0]
pldmVersion: Annotated[c.Array[uint32_t, Literal[2]], 4]
class I2cControllerPort_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
I2C_CONTROLLER_PORT_0 = I2cControllerPort_e.define('I2C_CONTROLLER_PORT_0', 0)
I2C_CONTROLLER_PORT_1 = I2cControllerPort_e.define('I2C_CONTROLLER_PORT_1', 1)
I2C_CONTROLLER_PORT_COUNT = I2cControllerPort_e.define('I2C_CONTROLLER_PORT_COUNT', 2)
class I2cSpeed_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
UNSUPPORTED_1 = I2cSpeed_e.define('UNSUPPORTED_1', 0)
I2C_SPEED_STANDARD_100K = I2cSpeed_e.define('I2C_SPEED_STANDARD_100K', 1)
I2C_SPEED_FAST_400K = I2cSpeed_e.define('I2C_SPEED_FAST_400K', 2)
I2C_SPEED_FAST_PLUS_1M = I2cSpeed_e.define('I2C_SPEED_FAST_PLUS_1M', 3)
UNSUPPORTED_2 = I2cSpeed_e.define('UNSUPPORTED_2', 4)
UNSUPPORTED_3 = I2cSpeed_e.define('UNSUPPORTED_3', 5)
I2C_SPEED_COUNT = I2cSpeed_e.define('I2C_SPEED_COUNT', 6)
class I2cCmdType_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
I2C_CMD_READ = I2cCmdType_e.define('I2C_CMD_READ', 0)
I2C_CMD_WRITE = I2cCmdType_e.define('I2C_CMD_WRITE', 1)
I2C_CMD_COUNT = I2cCmdType_e.define('I2C_CMD_COUNT', 2)
class ERR_CODE_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
CODE_DAGB0 = ERR_CODE_e.define('CODE_DAGB0', 0)
CODE_EA0 = ERR_CODE_e.define('CODE_EA0', 5)
CODE_UTCL2_ROUTER = ERR_CODE_e.define('CODE_UTCL2_ROUTER', 10)
CODE_VML2 = ERR_CODE_e.define('CODE_VML2', 11)
CODE_VML2_WALKER = ERR_CODE_e.define('CODE_VML2_WALKER', 12)
CODE_MMCANE = ERR_CODE_e.define('CODE_MMCANE', 13)
CODE_VIDD = ERR_CODE_e.define('CODE_VIDD', 14)
CODE_VIDV = ERR_CODE_e.define('CODE_VIDV', 15)
CODE_JPEG0S = ERR_CODE_e.define('CODE_JPEG0S', 16)
CODE_JPEG0D = ERR_CODE_e.define('CODE_JPEG0D', 17)
CODE_JPEG1S = ERR_CODE_e.define('CODE_JPEG1S', 18)
CODE_JPEG1D = ERR_CODE_e.define('CODE_JPEG1D', 19)
CODE_JPEG2S = ERR_CODE_e.define('CODE_JPEG2S', 20)
CODE_JPEG2D = ERR_CODE_e.define('CODE_JPEG2D', 21)
CODE_JPEG3S = ERR_CODE_e.define('CODE_JPEG3S', 22)
CODE_JPEG3D = ERR_CODE_e.define('CODE_JPEG3D', 23)
CODE_JPEG4S = ERR_CODE_e.define('CODE_JPEG4S', 24)
CODE_JPEG4D = ERR_CODE_e.define('CODE_JPEG4D', 25)
CODE_JPEG5S = ERR_CODE_e.define('CODE_JPEG5S', 26)
CODE_JPEG5D = ERR_CODE_e.define('CODE_JPEG5D', 27)
CODE_JPEG6S = ERR_CODE_e.define('CODE_JPEG6S', 28)
CODE_JPEG6D = ERR_CODE_e.define('CODE_JPEG6D', 29)
CODE_JPEG7S = ERR_CODE_e.define('CODE_JPEG7S', 30)
CODE_JPEG7D = ERR_CODE_e.define('CODE_JPEG7D', 31)
CODE_MMSCHD = ERR_CODE_e.define('CODE_MMSCHD', 32)
CODE_SDMA0 = ERR_CODE_e.define('CODE_SDMA0', 33)
CODE_SDMA1 = ERR_CODE_e.define('CODE_SDMA1', 34)
CODE_SDMA2 = ERR_CODE_e.define('CODE_SDMA2', 35)
CODE_SDMA3 = ERR_CODE_e.define('CODE_SDMA3', 36)
CODE_HDP = ERR_CODE_e.define('CODE_HDP', 37)
CODE_ATHUB = ERR_CODE_e.define('CODE_ATHUB', 38)
CODE_IH = ERR_CODE_e.define('CODE_IH', 39)
CODE_XHUB_POISON = ERR_CODE_e.define('CODE_XHUB_POISON', 40)
CODE_SMN_SLVERR = ERR_CODE_e.define('CODE_SMN_SLVERR', 40)
CODE_WDT = ERR_CODE_e.define('CODE_WDT', 41)
CODE_UNKNOWN = ERR_CODE_e.define('CODE_UNKNOWN', 42)
CODE_COUNT = ERR_CODE_e.define('CODE_COUNT', 43)
class GC_ERROR_CODE_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
SH_FED_CODE = GC_ERROR_CODE_e.define('SH_FED_CODE', 0)
GCEA_CODE = GC_ERROR_CODE_e.define('GCEA_CODE', 1)
SQ_CODE = GC_ERROR_CODE_e.define('SQ_CODE', 2)
LDS_CODE = GC_ERROR_CODE_e.define('LDS_CODE', 3)
GDS_CODE = GC_ERROR_CODE_e.define('GDS_CODE', 4)
SP0_CODE = GC_ERROR_CODE_e.define('SP0_CODE', 5)
SP1_CODE = GC_ERROR_CODE_e.define('SP1_CODE', 6)
TCC_CODE = GC_ERROR_CODE_e.define('TCC_CODE', 7)
TCA_CODE = GC_ERROR_CODE_e.define('TCA_CODE', 8)
TCX_CODE = GC_ERROR_CODE_e.define('TCX_CODE', 9)
CPC_CODE = GC_ERROR_CODE_e.define('CPC_CODE', 10)
CPF_CODE = GC_ERROR_CODE_e.define('CPF_CODE', 11)
CPG_CODE = GC_ERROR_CODE_e.define('CPG_CODE', 12)
SPI_CODE = GC_ERROR_CODE_e.define('SPI_CODE', 13)
RLC_CODE = GC_ERROR_CODE_e.define('RLC_CODE', 14)
SQC_CODE = GC_ERROR_CODE_e.define('SQC_CODE', 15)
TA_CODE = GC_ERROR_CODE_e.define('TA_CODE', 16)
TD_CODE = GC_ERROR_CODE_e.define('TD_CODE', 17)
TCP_CODE = GC_ERROR_CODE_e.define('TCP_CODE', 18)
TCI_CODE = GC_ERROR_CODE_e.define('TCI_CODE', 19)
GC_ROUTER_CODE = GC_ERROR_CODE_e.define('GC_ROUTER_CODE', 20)
VML2_CODE = GC_ERROR_CODE_e.define('VML2_CODE', 21)
VML2_WALKER_CODE = GC_ERROR_CODE_e.define('VML2_WALKER_CODE', 22)
ATCL2_CODE = GC_ERROR_CODE_e.define('ATCL2_CODE', 23)
GC_CANE_CODE = GC_ERROR_CODE_e.define('GC_CANE_CODE', 24)
MP5_CODE_SMN_SLVERR = GC_ERROR_CODE_e.define('MP5_CODE_SMN_SLVERR', 40)
MP5_CODE_UNKNOWN = GC_ERROR_CODE_e.define('MP5_CODE_UNKNOWN', 42)
@c.record
class SwI2cCmd_t(c.Struct):
SIZE = 2
ReadWriteData: Annotated[uint8_t, 0]
CmdConfig: Annotated[uint8_t, 1]
uint8_t: TypeAlias = Annotated[int, ctypes.c_ubyte]
@c.record
class SwI2cRequest_t(c.Struct):
SIZE = 52
I2CcontrollerPort: Annotated[uint8_t, 0]
I2CSpeed: Annotated[uint8_t, 1]
SlaveAddress: Annotated[uint8_t, 2]
NumCmds: Annotated[uint8_t, 3]
SwI2cCmds: Annotated[c.Array[SwI2cCmd_t, Literal[24]], 4]
@c.record
class SwI2cRequestExternal_t(c.Struct):
SIZE = 116
SwI2cRequest: Annotated[SwI2cRequest_t, 0]
Spare: Annotated[c.Array[uint32_t, Literal[8]], 52]
MmHubPadding: Annotated[c.Array[uint32_t, Literal[8]], 84]
class PPCLK_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
PPCLK_VCLK = PPCLK_e.define('PPCLK_VCLK', 0)
PPCLK_DCLK = PPCLK_e.define('PPCLK_DCLK', 1)
PPCLK_SOCCLK = PPCLK_e.define('PPCLK_SOCCLK', 2)
PPCLK_UCLK = PPCLK_e.define('PPCLK_UCLK', 3)
PPCLK_FCLK = PPCLK_e.define('PPCLK_FCLK', 4)
PPCLK_LCLK = PPCLK_e.define('PPCLK_LCLK', 5)
PPCLK_COUNT = PPCLK_e.define('PPCLK_COUNT', 6)
class GpioIntPolarity_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
GPIO_INT_POLARITY_ACTIVE_LOW = GpioIntPolarity_e.define('GPIO_INT_POLARITY_ACTIVE_LOW', 0)
GPIO_INT_POLARITY_ACTIVE_HIGH = GpioIntPolarity_e.define('GPIO_INT_POLARITY_ACTIVE_HIGH', 1)
class UCLK_DPM_MODE_e(Annotated[int, ctypes.c_uint32], c.Enum): pass
UCLK_DPM_MODE_BANDWIDTH = UCLK_DPM_MODE_e.define('UCLK_DPM_MODE_BANDWIDTH', 0)
UCLK_DPM_MODE_LATENCY = UCLK_DPM_MODE_e.define('UCLK_DPM_MODE_LATENCY', 1)
@c.record
class AvfsDebugTableAid_t(c.Struct):
SIZE = 360
avgPsmCount: Annotated[c.Array[uint16_t, Literal[30]], 0]
minPsmCount: Annotated[c.Array[uint16_t, Literal[30]], 60]
avgPsmVoltage: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[30]], 120]
minPsmVoltage: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[30]], 240]
uint16_t: TypeAlias = Annotated[int, ctypes.c_uint16]
@c.record
class AvfsDebugTableXcd_t(c.Struct):
SIZE = 360
avgPsmCount: Annotated[c.Array[uint16_t, Literal[30]], 0]
minPsmCount: Annotated[c.Array[uint16_t, Literal[30]], 60]
avgPsmVoltage: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[30]], 120]
minPsmVoltage: Annotated[c.Array[Annotated[float, ctypes.c_float], Literal[30]], 240]
@c.record
class struct_smu_hw_power_state(c.Struct):
SIZE = 4
magic: Annotated[Annotated[int, ctypes.c_uint32], 0]
class struct_smu_power_state(ctypes.Structure): pass
class enum_smu_state_ui_label(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_STATE_UI_LABEL_NONE = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_NONE', 0)
SMU_STATE_UI_LABEL_BATTERY = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_BATTERY', 1)
SMU_STATE_UI_TABEL_MIDDLE_LOW = enum_smu_state_ui_label.define('SMU_STATE_UI_TABEL_MIDDLE_LOW', 2)
SMU_STATE_UI_LABEL_BALLANCED = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_BALLANCED', 3)
SMU_STATE_UI_LABEL_MIDDLE_HIGHT = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_MIDDLE_HIGHT', 4)
SMU_STATE_UI_LABEL_PERFORMANCE = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_PERFORMANCE', 5)
SMU_STATE_UI_LABEL_BACO = enum_smu_state_ui_label.define('SMU_STATE_UI_LABEL_BACO', 6)
class enum_smu_state_classification_flag(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_STATE_CLASSIFICATION_FLAG_BOOT = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_BOOT', 1)
SMU_STATE_CLASSIFICATION_FLAG_THERMAL = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_THERMAL', 2)
SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE', 4)
SMU_STATE_CLASSIFICATION_FLAG_RESET = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_RESET', 8)
SMU_STATE_CLASSIFICATION_FLAG_FORCED = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_FORCED', 16)
SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE', 32)
SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE', 64)
SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE', 128)
SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE', 256)
SMU_STATE_CLASSIFICATION_FLAG_UVD = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_UVD', 512)
SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW', 1024)
SMU_STATE_CLASSIFICATION_FLAG_ACPI = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_ACPI', 2048)
SMU_STATE_CLASSIFICATION_FLAG_HD2 = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_HD2', 4096)
SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_UVD_HD', 8192)
SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_UVD_SD', 16384)
SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE', 32768)
SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE', 65536)
SMU_STATE_CLASSIFICATION_FLAG_BACO = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_BACO', 131072)
SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2', 262144)
SMU_STATE_CLASSIFICATION_FLAG_ULV = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_ULV', 524288)
SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = enum_smu_state_classification_flag.define('SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC', 1048576)
@c.record
class struct_smu_state_classification_block(c.Struct):
SIZE = 16
ui_label: Annotated[enum_smu_state_ui_label, 0]
flags: Annotated[enum_smu_state_classification_flag, 4]
bios_index: Annotated[Annotated[int, ctypes.c_int32], 8]
temporary_state: Annotated[Annotated[bool, ctypes.c_bool], 12]
to_be_deleted: Annotated[Annotated[bool, ctypes.c_bool], 13]
@c.record
class struct_smu_state_pcie_block(c.Struct):
SIZE = 4
lanes: Annotated[Annotated[int, ctypes.c_uint32], 0]
class enum_smu_refreshrate_source(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_REFRESHRATE_SOURCE_EDID = enum_smu_refreshrate_source.define('SMU_REFRESHRATE_SOURCE_EDID', 0)
SMU_REFRESHRATE_SOURCE_EXPLICIT = enum_smu_refreshrate_source.define('SMU_REFRESHRATE_SOURCE_EXPLICIT', 1)
@c.record
class struct_smu_state_display_block(c.Struct):
SIZE = 20
disable_frame_modulation: Annotated[Annotated[bool, ctypes.c_bool], 0]
limit_refreshrate: Annotated[Annotated[bool, ctypes.c_bool], 1]
refreshrate_source: Annotated[enum_smu_refreshrate_source, 4]
explicit_refreshrate: Annotated[Annotated[int, ctypes.c_int32], 8]
edid_refreshrate_index: Annotated[Annotated[int, ctypes.c_int32], 12]
enable_vari_bright: Annotated[Annotated[bool, ctypes.c_bool], 16]
@c.record
class struct_smu_state_memory_block(c.Struct):
SIZE = 5
dll_off: Annotated[Annotated[bool, ctypes.c_bool], 0]
m3arb: Annotated[Annotated[int, ctypes.c_ubyte], 1]
unused: Annotated[c.Array[Annotated[int, ctypes.c_ubyte], Literal[3]], 2]
@c.record
class struct_smu_state_software_algorithm_block(c.Struct):
SIZE = 2
disable_load_balancing: Annotated[Annotated[bool, ctypes.c_bool], 0]
enable_sleep_for_timestamps: Annotated[Annotated[bool, ctypes.c_bool], 1]
@c.record
class struct_smu_temperature_range(c.Struct):
SIZE = 44
min: Annotated[Annotated[int, ctypes.c_int32], 0]
max: Annotated[Annotated[int, ctypes.c_int32], 4]
edge_emergency_max: Annotated[Annotated[int, ctypes.c_int32], 8]
hotspot_min: Annotated[Annotated[int, ctypes.c_int32], 12]
hotspot_crit_max: Annotated[Annotated[int, ctypes.c_int32], 16]
hotspot_emergency_max: Annotated[Annotated[int, ctypes.c_int32], 20]
mem_min: Annotated[Annotated[int, ctypes.c_int32], 24]
mem_crit_max: Annotated[Annotated[int, ctypes.c_int32], 28]
mem_emergency_max: Annotated[Annotated[int, ctypes.c_int32], 32]
software_shutdown_temp: Annotated[Annotated[int, ctypes.c_int32], 36]
software_shutdown_temp_offset: Annotated[Annotated[int, ctypes.c_int32], 40]
@c.record
class struct_smu_state_validation_block(c.Struct):
SIZE = 3
single_display_only: Annotated[Annotated[bool, ctypes.c_bool], 0]
disallow_on_dc: Annotated[Annotated[bool, ctypes.c_bool], 1]
supported_power_levels: Annotated[Annotated[int, ctypes.c_ubyte], 2]
@c.record
class struct_smu_uvd_clocks(c.Struct):
SIZE = 8
vclk: Annotated[Annotated[int, ctypes.c_uint32], 0]
dclk: Annotated[Annotated[int, ctypes.c_uint32], 4]
class enum_smu_power_src_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_POWER_SOURCE_AC = enum_smu_power_src_type.define('SMU_POWER_SOURCE_AC', 0)
SMU_POWER_SOURCE_DC = enum_smu_power_src_type.define('SMU_POWER_SOURCE_DC', 1)
SMU_POWER_SOURCE_COUNT = enum_smu_power_src_type.define('SMU_POWER_SOURCE_COUNT', 2)
class enum_smu_ppt_limit_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_DEFAULT_PPT_LIMIT = enum_smu_ppt_limit_type.define('SMU_DEFAULT_PPT_LIMIT', 0)
SMU_FAST_PPT_LIMIT = enum_smu_ppt_limit_type.define('SMU_FAST_PPT_LIMIT', 1)
class enum_smu_ppt_limit_level(Annotated[int, ctypes.c_int32], c.Enum): pass
SMU_PPT_LIMIT_MIN = enum_smu_ppt_limit_level.define('SMU_PPT_LIMIT_MIN', -1)
SMU_PPT_LIMIT_CURRENT = enum_smu_ppt_limit_level.define('SMU_PPT_LIMIT_CURRENT', 0)
SMU_PPT_LIMIT_DEFAULT = enum_smu_ppt_limit_level.define('SMU_PPT_LIMIT_DEFAULT', 1)
SMU_PPT_LIMIT_MAX = enum_smu_ppt_limit_level.define('SMU_PPT_LIMIT_MAX', 2)
class enum_smu_memory_pool_size(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_MEMORY_POOL_SIZE_ZERO = enum_smu_memory_pool_size.define('SMU_MEMORY_POOL_SIZE_ZERO', 0)
SMU_MEMORY_POOL_SIZE_256_MB = enum_smu_memory_pool_size.define('SMU_MEMORY_POOL_SIZE_256_MB', 268435456)
SMU_MEMORY_POOL_SIZE_512_MB = enum_smu_memory_pool_size.define('SMU_MEMORY_POOL_SIZE_512_MB', 536870912)
SMU_MEMORY_POOL_SIZE_1_GB = enum_smu_memory_pool_size.define('SMU_MEMORY_POOL_SIZE_1_GB', 1073741824)
SMU_MEMORY_POOL_SIZE_2_GB = enum_smu_memory_pool_size.define('SMU_MEMORY_POOL_SIZE_2_GB', 2147483648)
class enum_smu_clk_type(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_GFXCLK = enum_smu_clk_type.define('SMU_GFXCLK', 0)
SMU_VCLK = enum_smu_clk_type.define('SMU_VCLK', 1)
SMU_DCLK = enum_smu_clk_type.define('SMU_DCLK', 2)
SMU_VCLK1 = enum_smu_clk_type.define('SMU_VCLK1', 3)
SMU_DCLK1 = enum_smu_clk_type.define('SMU_DCLK1', 4)
SMU_ECLK = enum_smu_clk_type.define('SMU_ECLK', 5)
SMU_SOCCLK = enum_smu_clk_type.define('SMU_SOCCLK', 6)
SMU_UCLK = enum_smu_clk_type.define('SMU_UCLK', 7)
SMU_DCEFCLK = enum_smu_clk_type.define('SMU_DCEFCLK', 8)
SMU_DISPCLK = enum_smu_clk_type.define('SMU_DISPCLK', 9)
SMU_PIXCLK = enum_smu_clk_type.define('SMU_PIXCLK', 10)
SMU_PHYCLK = enum_smu_clk_type.define('SMU_PHYCLK', 11)
SMU_FCLK = enum_smu_clk_type.define('SMU_FCLK', 12)
SMU_SCLK = enum_smu_clk_type.define('SMU_SCLK', 13)
SMU_MCLK = enum_smu_clk_type.define('SMU_MCLK', 14)
SMU_PCIE = enum_smu_clk_type.define('SMU_PCIE', 15)
SMU_LCLK = enum_smu_clk_type.define('SMU_LCLK', 16)
SMU_OD_CCLK = enum_smu_clk_type.define('SMU_OD_CCLK', 17)
SMU_OD_SCLK = enum_smu_clk_type.define('SMU_OD_SCLK', 18)
SMU_OD_MCLK = enum_smu_clk_type.define('SMU_OD_MCLK', 19)
SMU_OD_VDDC_CURVE = enum_smu_clk_type.define('SMU_OD_VDDC_CURVE', 20)
SMU_OD_RANGE = enum_smu_clk_type.define('SMU_OD_RANGE', 21)
SMU_OD_VDDGFX_OFFSET = enum_smu_clk_type.define('SMU_OD_VDDGFX_OFFSET', 22)
SMU_OD_FAN_CURVE = enum_smu_clk_type.define('SMU_OD_FAN_CURVE', 23)
SMU_OD_ACOUSTIC_LIMIT = enum_smu_clk_type.define('SMU_OD_ACOUSTIC_LIMIT', 24)
SMU_OD_ACOUSTIC_TARGET = enum_smu_clk_type.define('SMU_OD_ACOUSTIC_TARGET', 25)
SMU_OD_FAN_TARGET_TEMPERATURE = enum_smu_clk_type.define('SMU_OD_FAN_TARGET_TEMPERATURE', 26)
SMU_OD_FAN_MINIMUM_PWM = enum_smu_clk_type.define('SMU_OD_FAN_MINIMUM_PWM', 27)
SMU_CLK_COUNT = enum_smu_clk_type.define('SMU_CLK_COUNT', 28)
@c.record
class struct_smu_user_dpm_profile(c.Struct):
SIZE = 140
fan_mode: Annotated[Annotated[int, ctypes.c_uint32], 0]
power_limit: Annotated[Annotated[int, ctypes.c_uint32], 4]
fan_speed_pwm: Annotated[Annotated[int, ctypes.c_uint32], 8]
fan_speed_rpm: Annotated[Annotated[int, ctypes.c_uint32], 12]
flags: Annotated[Annotated[int, ctypes.c_uint32], 16]
user_od: Annotated[Annotated[int, ctypes.c_uint32], 20]
clk_mask: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[28]], 24]
clk_dependency: Annotated[Annotated[int, ctypes.c_uint32], 136]
@c.record
class struct_smu_table(c.Struct):
SIZE = 48
size: Annotated[Annotated[int, ctypes.c_uint64], 0]
align: Annotated[Annotated[int, ctypes.c_uint32], 8]
domain: Annotated[Annotated[int, ctypes.c_ubyte], 12]
mc_address: Annotated[Annotated[int, ctypes.c_uint64], 16]
cpu_addr: Annotated[ctypes.c_void_p, 24]
bo: Annotated[c.POINTER[struct_amdgpu_bo], 32]
version: Annotated[Annotated[int, ctypes.c_uint32], 40]
class struct_amdgpu_bo(ctypes.Structure): pass
class enum_smu_perf_level_designation(Annotated[int, ctypes.c_uint32], c.Enum): pass
PERF_LEVEL_ACTIVITY = enum_smu_perf_level_designation.define('PERF_LEVEL_ACTIVITY', 0)
PERF_LEVEL_POWER_CONTAINMENT = enum_smu_perf_level_designation.define('PERF_LEVEL_POWER_CONTAINMENT', 1)
@c.record
class struct_smu_performance_level(c.Struct):
SIZE = 24
core_clock: Annotated[Annotated[int, ctypes.c_uint32], 0]
memory_clock: Annotated[Annotated[int, ctypes.c_uint32], 4]
vddc: Annotated[Annotated[int, ctypes.c_uint32], 8]
vddci: Annotated[Annotated[int, ctypes.c_uint32], 12]
non_local_mem_freq: Annotated[Annotated[int, ctypes.c_uint32], 16]
non_local_mem_width: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_smu_clock_info(c.Struct):
SIZE = 24
min_mem_clk: Annotated[Annotated[int, ctypes.c_uint32], 0]
max_mem_clk: Annotated[Annotated[int, ctypes.c_uint32], 4]
min_eng_clk: Annotated[Annotated[int, ctypes.c_uint32], 8]
max_eng_clk: Annotated[Annotated[int, ctypes.c_uint32], 12]
min_bus_bandwidth: Annotated[Annotated[int, ctypes.c_uint32], 16]
max_bus_bandwidth: Annotated[Annotated[int, ctypes.c_uint32], 20]
@c.record
class struct_smu_bios_boot_up_values(c.Struct):
SIZE = 68
revision: Annotated[Annotated[int, ctypes.c_uint32], 0]
gfxclk: Annotated[Annotated[int, ctypes.c_uint32], 4]
uclk: Annotated[Annotated[int, ctypes.c_uint32], 8]
socclk: Annotated[Annotated[int, ctypes.c_uint32], 12]
dcefclk: Annotated[Annotated[int, ctypes.c_uint32], 16]
eclk: Annotated[Annotated[int, ctypes.c_uint32], 20]
vclk: Annotated[Annotated[int, ctypes.c_uint32], 24]
dclk: Annotated[Annotated[int, ctypes.c_uint32], 28]
vddc: Annotated[Annotated[int, ctypes.c_uint16], 32]
vddci: Annotated[Annotated[int, ctypes.c_uint16], 34]
mvddc: Annotated[Annotated[int, ctypes.c_uint16], 36]
vdd_gfx: Annotated[Annotated[int, ctypes.c_uint16], 38]
cooling_id: Annotated[Annotated[int, ctypes.c_ubyte], 40]
pp_table_id: Annotated[Annotated[int, ctypes.c_uint32], 44]
format_revision: Annotated[Annotated[int, ctypes.c_uint32], 48]
content_revision: Annotated[Annotated[int, ctypes.c_uint32], 52]
fclk: Annotated[Annotated[int, ctypes.c_uint32], 56]
lclk: Annotated[Annotated[int, ctypes.c_uint32], 60]
firmware_caps: Annotated[Annotated[int, ctypes.c_uint32], 64]
class enum_smu_table_id(Annotated[int, ctypes.c_uint32], c.Enum): pass
SMU_TABLE_PPTABLE = enum_smu_table_id.define('SMU_TABLE_PPTABLE', 0)
SMU_TABLE_WATERMARKS = enum_smu_table_id.define('SMU_TABLE_WATERMARKS', 1)
SMU_TABLE_CUSTOM_DPM = enum_smu_table_id.define('SMU_TABLE_CUSTOM_DPM', 2)
SMU_TABLE_DPMCLOCKS = enum_smu_table_id.define('SMU_TABLE_DPMCLOCKS', 3)
SMU_TABLE_AVFS = enum_smu_table_id.define('SMU_TABLE_AVFS', 4)
SMU_TABLE_AVFS_PSM_DEBUG = enum_smu_table_id.define('SMU_TABLE_AVFS_PSM_DEBUG', 5)
SMU_TABLE_AVFS_FUSE_OVERRIDE = enum_smu_table_id.define('SMU_TABLE_AVFS_FUSE_OVERRIDE', 6)
SMU_TABLE_PMSTATUSLOG = enum_smu_table_id.define('SMU_TABLE_PMSTATUSLOG', 7)
SMU_TABLE_SMU_METRICS = enum_smu_table_id.define('SMU_TABLE_SMU_METRICS', 8)
SMU_TABLE_DRIVER_SMU_CONFIG = enum_smu_table_id.define('SMU_TABLE_DRIVER_SMU_CONFIG', 9)
SMU_TABLE_ACTIVITY_MONITOR_COEFF = enum_smu_table_id.define('SMU_TABLE_ACTIVITY_MONITOR_COEFF', 10)
SMU_TABLE_OVERDRIVE = enum_smu_table_id.define('SMU_TABLE_OVERDRIVE', 11)
SMU_TABLE_I2C_COMMANDS = enum_smu_table_id.define('SMU_TABLE_I2C_COMMANDS', 12)
SMU_TABLE_PACE = enum_smu_table_id.define('SMU_TABLE_PACE', 13)
SMU_TABLE_ECCINFO = enum_smu_table_id.define('SMU_TABLE_ECCINFO', 14)
SMU_TABLE_COMBO_PPTABLE = enum_smu_table_id.define('SMU_TABLE_COMBO_PPTABLE', 15)
SMU_TABLE_WIFIBAND = enum_smu_table_id.define('SMU_TABLE_WIFIBAND', 16)
SMU_TABLE_COUNT = enum_smu_table_id.define('SMU_TABLE_COUNT', 17)
c.init_records()
PPSMC_Result_OK = 0x1 # type: ignore
PPSMC_Result_Failed = 0xFF # type: ignore
PPSMC_Result_UnknownCmd = 0xFE # type: ignore
PPSMC_Result_CmdRejectedPrereq = 0xFD # type: ignore
PPSMC_Result_CmdRejectedBusy = 0xFC # type: ignore
PPSMC_MSG_TestMessage = 0x1 # type: ignore
PPSMC_MSG_GetSmuVersion = 0x2 # type: ignore
PPSMC_MSG_GfxDriverReset = 0x3 # type: ignore
PPSMC_MSG_GetDriverIfVersion = 0x4 # type: ignore
PPSMC_MSG_EnableAllSmuFeatures = 0x5 # type: ignore
PPSMC_MSG_DisableAllSmuFeatures = 0x6 # type: ignore
PPSMC_MSG_RequestI2cTransaction = 0x7 # type: ignore
PPSMC_MSG_GetMetricsVersion = 0x8 # type: ignore
PPSMC_MSG_GetMetricsTable = 0x9 # type: ignore
PPSMC_MSG_GetEccInfoTable = 0xA # type: ignore
PPSMC_MSG_GetEnabledSmuFeaturesLow = 0xB # type: ignore
PPSMC_MSG_GetEnabledSmuFeaturesHigh = 0xC # type: ignore
PPSMC_MSG_SetDriverDramAddrHigh = 0xD # type: ignore
PPSMC_MSG_SetDriverDramAddrLow = 0xE # type: ignore
PPSMC_MSG_SetToolsDramAddrHigh = 0xF # type: ignore
PPSMC_MSG_SetToolsDramAddrLow = 0x10 # type: ignore
PPSMC_MSG_SetSystemVirtualDramAddrHigh = 0x11 # type: ignore
PPSMC_MSG_SetSystemVirtualDramAddrLow = 0x12 # type: ignore
PPSMC_MSG_SetSoftMinByFreq = 0x13 # type: ignore
PPSMC_MSG_SetSoftMaxByFreq = 0x14 # type: ignore
PPSMC_MSG_GetMinDpmFreq = 0x15 # type: ignore
PPSMC_MSG_GetMaxDpmFreq = 0x16 # type: ignore
PPSMC_MSG_GetDpmFreqByIndex = 0x17 # type: ignore
PPSMC_MSG_SetPptLimit = 0x18 # type: ignore
PPSMC_MSG_GetPptLimit = 0x19 # type: ignore
PPSMC_MSG_DramLogSetDramAddrHigh = 0x1A # type: ignore
PPSMC_MSG_DramLogSetDramAddrLow = 0x1B # type: ignore
PPSMC_MSG_DramLogSetDramSize = 0x1C # type: ignore
PPSMC_MSG_GetDebugData = 0x1D # type: ignore
PPSMC_MSG_HeavySBR = 0x1E # type: ignore
PPSMC_MSG_SetNumBadHbmPagesRetired = 0x1F # type: ignore
PPSMC_MSG_DFCstateControl = 0x20 # type: ignore
PPSMC_MSG_GetGmiPwrDnHyst = 0x21 # type: ignore
PPSMC_MSG_SetGmiPwrDnHyst = 0x22 # type: ignore
PPSMC_MSG_GmiPwrDnControl = 0x23 # type: ignore
PPSMC_MSG_EnterGfxoff = 0x24 # type: ignore
PPSMC_MSG_ExitGfxoff = 0x25 # type: ignore
PPSMC_MSG_EnableDeterminism = 0x26 # type: ignore
PPSMC_MSG_DisableDeterminism = 0x27 # type: ignore
PPSMC_MSG_DumpSTBtoDram = 0x28 # type: ignore
PPSMC_MSG_STBtoDramLogSetDramAddrHigh = 0x29 # type: ignore
PPSMC_MSG_STBtoDramLogSetDramAddrLow = 0x2A # type: ignore
PPSMC_MSG_STBtoDramLogSetDramSize = 0x2B # type: ignore
PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh = 0x2C # type: ignore
PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow = 0x2D # type: ignore
PPSMC_MSG_GfxDriverResetRecovery = 0x2E # type: ignore
PPSMC_MSG_TriggerVFFLR = 0x2F # type: ignore
PPSMC_MSG_SetSoftMinGfxClk = 0x30 # type: ignore
PPSMC_MSG_SetSoftMaxGfxClk = 0x31 # type: ignore
PPSMC_MSG_GetMinGfxDpmFreq = 0x32 # type: ignore
PPSMC_MSG_GetMaxGfxDpmFreq = 0x33 # type: ignore
PPSMC_MSG_PrepareForDriverUnload = 0x34 # type: ignore
PPSMC_MSG_ReadThrottlerLimit = 0x35 # type: ignore
PPSMC_MSG_QueryValidMcaCount = 0x36 # type: ignore
PPSMC_MSG_McaBankDumpDW = 0x37 # type: ignore
PPSMC_MSG_GetCTFLimit = 0x38 # type: ignore
PPSMC_MSG_ClearMcaOnRead = 0x39 # type: ignore
PPSMC_MSG_QueryValidMcaCeCount = 0x3A # type: ignore
PPSMC_MSG_McaBankCeDumpDW = 0x3B # type: ignore
PPSMC_MSG_SelectPLPDMode = 0x40 # type: ignore
PPSMC_MSG_RmaDueToBadPageThreshold = 0x43 # type: ignore
PPSMC_MSG_SetThrottlingPolicy = 0x44 # type: ignore
PPSMC_MSG_SetPhsDetWRbwThreshold = 0x45 # type: ignore
PPSMC_MSG_SetPhsDetWRbwFreqHigh = 0x46 # type: ignore
PPSMC_MSG_SetPhsDetWRbwFreqLow = 0x47 # type: ignore
PPSMC_MSG_SetPhsDetWRbwHystDown = 0x48 # type: ignore
PPSMC_MSG_SetPhsDetWRbwAlpha = 0x49 # type: ignore
PPSMC_MSG_SetPhsDetOnOff = 0x4A # type: ignore
PPSMC_MSG_GetPhsDetResidency = 0x4B # type: ignore
PPSMC_MSG_ResetSDMA = 0x4D # type: ignore
PPSMC_MSG_GetStaticMetricsTable = 0x59 # type: ignore
PPSMC_MSG_ResetVCN = 0x5B # type: ignore
PPSMC_Message_Count = 0x5C # type: ignore
PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET = 0x1 # type: ignore
PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET = 0x2 # type: ignore
PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET = 0x3 # type: ignore
PPSMC_THROTTLING_LIMIT_TYPE_SOCKET = 0x1 # type: ignore
PPSMC_THROTTLING_LIMIT_TYPE_HBM = 0x2 # type: ignore
PPSMC_AID_THM_TYPE = 0x1 # type: ignore
PPSMC_CCD_THM_TYPE = 0x2 # type: ignore
PPSMC_XCD_THM_TYPE = 0x3 # type: ignore
PPSMC_HBM_THM_TYPE = 0x4 # type: ignore
PPSMC_PLPD_MODE_DEFAULT = 0x1 # type: ignore
PPSMC_PLPD_MODE_OPTIMIZED = 0x2 # type: ignore
NUM_VCLK_DPM_LEVELS = 4 # type: ignore
NUM_DCLK_DPM_LEVELS = 4 # type: ignore
NUM_SOCCLK_DPM_LEVELS = 4 # type: ignore
NUM_LCLK_DPM_LEVELS = 4 # type: ignore
NUM_UCLK_DPM_LEVELS = 4 # type: ignore
NUM_FCLK_DPM_LEVELS = 4 # type: ignore
NUM_XGMI_DPM_LEVELS = 2 # type: ignore
NUM_CXL_BITRATES = 4 # type: ignore
NUM_PCIE_BITRATES = 4 # type: ignore
NUM_XGMI_BITRATES = 4 # type: ignore
NUM_XGMI_WIDTHS = 3 # type: ignore
NUM_SOC_P2S_TABLES = 3 # type: ignore
NUM_TDP_GROUPS = 4 # type: ignore
SMU_METRICS_TABLE_VERSION = 0x11 # type: ignore
SMU_VF_METRICS_TABLE_VERSION = 0x5 # type: ignore
SMU13_0_6_DRIVER_IF_VERSION = 0x08042024 # type: ignore
NUM_I2C_CONTROLLERS = 8 # type: ignore
I2C_CONTROLLER_ENABLED = 1 # type: ignore
I2C_CONTROLLER_DISABLED = 0 # type: ignore
MAX_SW_I2C_COMMANDS = 24 # type: ignore
CMDCONFIG_STOP_BIT = 0 # type: ignore
CMDCONFIG_RESTART_BIT = 1 # type: ignore
CMDCONFIG_READWRITE_BIT = 2 # type: ignore
CMDCONFIG_STOP_MASK = (1 << CMDCONFIG_STOP_BIT) # type: ignore
CMDCONFIG_RESTART_MASK = (1 << CMDCONFIG_RESTART_BIT) # type: ignore
CMDCONFIG_READWRITE_MASK = (1 << CMDCONFIG_READWRITE_BIT) # type: ignore
IH_INTERRUPT_ID_TO_DRIVER = 0xFE # type: ignore
IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING = 0x7 # type: ignore
THROTTLER_PROCHOT_BIT = 0 # type: ignore
THROTTLER_PPT_BIT = 1 # type: ignore
THROTTLER_THERMAL_SOCKET_BIT = 2 # type: ignore
THROTTLER_THERMAL_VR_BIT = 3 # type: ignore
THROTTLER_THERMAL_HBM_BIT = 4 # type: ignore
ClearMcaOnRead_UE_FLAG_MASK = 0x1 # type: ignore
ClearMcaOnRead_CE_POLL_MASK = 0x2 # type: ignore
int32_t = int # type: ignore
SMU_THERMAL_MINIMUM_ALERT_TEMP = 0 # type: ignore
SMU_THERMAL_MAXIMUM_ALERT_TEMP = 255 # type: ignore
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES = 1000 # type: ignore
SMU_FW_NAME_LEN = 0x24 # type: ignore
SMU_DPM_USER_PROFILE_RESTORE = (1 << 0) # type: ignore
SMU_CUSTOM_FAN_SPEED_RPM = (1 << 1) # type: ignore
SMU_CUSTOM_FAN_SPEED_PWM = (1 << 2) # type: ignore
SMU_THROTTLER_PPT0_BIT = 0 # type: ignore
SMU_THROTTLER_PPT1_BIT = 1 # type: ignore
SMU_THROTTLER_PPT2_BIT = 2 # type: ignore
SMU_THROTTLER_PPT3_BIT = 3 # type: ignore
SMU_THROTTLER_SPL_BIT = 4 # type: ignore
SMU_THROTTLER_FPPT_BIT = 5 # type: ignore
SMU_THROTTLER_SPPT_BIT = 6 # type: ignore
SMU_THROTTLER_SPPT_APU_BIT = 7 # type: ignore
SMU_THROTTLER_TDC_GFX_BIT = 16 # type: ignore
SMU_THROTTLER_TDC_SOC_BIT = 17 # type: ignore
SMU_THROTTLER_TDC_MEM_BIT = 18 # type: ignore
SMU_THROTTLER_TDC_VDD_BIT = 19 # type: ignore
SMU_THROTTLER_TDC_CVIP_BIT = 20 # type: ignore
SMU_THROTTLER_EDC_CPU_BIT = 21 # type: ignore
SMU_THROTTLER_EDC_GFX_BIT = 22 # type: ignore
SMU_THROTTLER_APCC_BIT = 23 # type: ignore
SMU_THROTTLER_TEMP_GPU_BIT = 32 # type: ignore
SMU_THROTTLER_TEMP_CORE_BIT = 33 # type: ignore
SMU_THROTTLER_TEMP_MEM_BIT = 34 # type: ignore
SMU_THROTTLER_TEMP_EDGE_BIT = 35 # type: ignore
SMU_THROTTLER_TEMP_HOTSPOT_BIT = 36 # type: ignore
SMU_THROTTLER_TEMP_SOC_BIT = 37 # type: ignore
SMU_THROTTLER_TEMP_VR_GFX_BIT = 38 # type: ignore
SMU_THROTTLER_TEMP_VR_SOC_BIT = 39 # type: ignore
SMU_THROTTLER_TEMP_VR_MEM0_BIT = 40 # type: ignore
SMU_THROTTLER_TEMP_VR_MEM1_BIT = 41 # type: ignore
SMU_THROTTLER_TEMP_LIQUID0_BIT = 42 # type: ignore
SMU_THROTTLER_TEMP_LIQUID1_BIT = 43 # type: ignore
SMU_THROTTLER_VRHOT0_BIT = 44 # type: ignore
SMU_THROTTLER_VRHOT1_BIT = 45 # type: ignore
SMU_THROTTLER_PROCHOT_CPU_BIT = 46 # type: ignore
SMU_THROTTLER_PROCHOT_GFX_BIT = 47 # type: ignore
SMU_THROTTLER_PPM_BIT = 56 # type: ignore
SMU_THROTTLER_FIT_BIT = 57 # type: ignore | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/am/smu_v13_0_6.py",
"license": "MIT License",
"lines": 855,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/unit/test_llm_server.py | import unittest
from unittest.mock import patch
from tinygrad import Tensor
class TestTransformerGenerate(unittest.TestCase):
def test_start_pos_parameter_is_used(self):
"""Test that start_pos parameter is not ignored (regression test for always resetting to 0)."""
from tinygrad.apps.llm import Transformer
# Create a minimal transformer
model = Transformer(num_blocks=1, dim=64, hidden_dim=128, n_heads=2, n_kv_heads=2,
norm_eps=1e-5, vocab_size=100, head_dim=32, rope_theta=10000.0, max_context=32)
captured_inputs = []
def mock_call(self, tokens, start_pos):
captured_inputs.append((tokens.shape, start_pos if isinstance(start_pos, int) else start_pos.bind_val))
return Tensor([[42]]) # return a fake next token
with patch.object(Transformer, '__call__', mock_call):
tokens = [1, 2, 3, 4, 5]
gen = model.generate(tokens, start_pos=3)
next(gen) # get first token
# With start_pos=3, the initial tensor should only have tokens[3:] = [4, 5] (length 2)
# If the bug existed (start_pos always reset to 0), it would have all 5 tokens
self.assertEqual(captured_inputs[0][0][-1], 2) # shape should be (1, 2)
self.assertEqual(captured_inputs[0][1], 3) # start_pos should be 3, not 0
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_llm_server.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/thunder/tiny/fa.py | import math
from tinygrad import Tensor, dtypes
from tinygrad.helpers import DEBUG
from tinygrad.uop.ops import UOp, Ops
from extra.thunder.tiny.tk import WARP_THREADS
from extra.thunder.tiny.tk.kernel import Kernel
from extra.thunder.tiny.tk.tiles import GL, TileLayout
NUM_WORKERS = 1
Q_BLOCK_SIZE = 32
KV_BLOCK_SIZE = 32
def _sharded_empty(shape:Tensor, ref:Tensor, axis:int|None) -> Tensor:
if not isinstance(ref.device, tuple): return Tensor.empty(*shape, dtype=ref.dtype, device=ref.device)
shape = tuple(s // len(ref.device) if i == ref.uop.axis else s for i, s in enumerate(shape))
axis = ref.uop.axis if axis is None else axis
return Tensor(Tensor.empty(*shape, dtype=ref.dtype, device=ref.device).uop.multi(axis), dtype=ref.dtype, device=ref.device)
def _sharded_empty_like(ref:Tensor, axis:int|None=None) -> Tensor:
return _sharded_empty(ref.shape, ref, axis)
def flash_attention(xq, xk, xv, attn_mask:Tensor|None=None, is_causal:bool=False):
if len(xq.shape) == 3: xq, xk, xv = xq.unsqueeze(0), xk.unsqueeze(0), xv.unsqueeze(0)
odtype = xq.dtype
xq, xk, xv = xq.transpose(1, 2).cast(dtypes.bfloat16), xk.transpose(1, 2).cast(dtypes.bfloat16), xv.transpose(1, 2).cast(dtypes.bfloat16)
_, N_, _, D_ = xq.shape
block_size = max(Q_BLOCK_SIZE, KV_BLOCK_SIZE)
assert D_ % block_size == 0, f"embedding dimension must be multiple of block size, got {D_=} {block_size=}"
# pad to multiple of block size
xq = xq.pad(((0, 0), (0, (block_size - (xq.shape[1] % block_size)) % block_size), (0, 0), (0, 0)))
xk = xk.pad(((0, 0), (0, (block_size - (xk.shape[1] % block_size)) % block_size), (0, 0), (0, 0)))
xv = xv.pad(((0, 0), (0, (block_size - (xv.shape[1] % block_size)) % block_size), (0, 0), (0, 0)))
B, N, H, D = xq.shape
H_KV = xk.shape[2]
GROUP_SIZE = H // H_KV
num_devices = len(xq.device) if isinstance(xq.device, tuple) else 1
B_local = B // num_devices
if DEBUG >= 2: print(f"Flash Attention {B=} {B_local=} {N=} {H=} {D=} {H_KV=} {GROUP_SIZE=}")
def _custom_forward_impl(ou:UOp, l_vecu:UOp, qu:UOp, ku:UOp, vu:UOp, masku:UOp|None) -> UOp:
with Kernel("fa_custom_forward", (H, N // (Q_BLOCK_SIZE*NUM_WORKERS), B_local), NUM_WORKERS * WARP_THREADS) as ker:
warp = ker.warp
o, q, k, v, l_vec = GL(ou, ker), GL(qu, ker), GL(ku, ker), GL(vu, ker), GL(l_vecu, ker)
mask = GL(masku, ker) if masku is not None else None
head = ker.blockIdx_x
head_kv = head // GROUP_SIZE
batch = ker.blockIdx_z
q_seq = ker.blockIdx_y * NUM_WORKERS + ker.warpid
q_reg_fl = ker.rt((Q_BLOCK_SIZE, D), dtypes.float32)
q_reg = ker.rt((Q_BLOCK_SIZE, D), dtypes.bfloat16)
q_reg_transposed = ker.rt((D, Q_BLOCK_SIZE), dtypes.bfloat16, TileLayout.COL)
k_reg = ker.rt((KV_BLOCK_SIZE, D), dtypes.bfloat16)
k_reg_transposed = ker.rt((D, KV_BLOCK_SIZE), dtypes.bfloat16, TileLayout.COL)
v_reg = ker.rt((KV_BLOCK_SIZE, D), dtypes.bfloat16, TileLayout.COL)
o_reg = ker.rt((D, Q_BLOCK_SIZE), dtypes.float32, TileLayout.COL)
o_reg_transposed = ker.rt((Q_BLOCK_SIZE, D), dtypes.float32)
att_block = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.float32, TileLayout.COL)
att_block_mma = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.bfloat16, TileLayout.COL)
mask_reg = ker.rt((Q_BLOCK_SIZE, KV_BLOCK_SIZE), dtypes.float32)
mask_reg_transposed = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.float32, TileLayout.COL)
max_vec_last = ker.rv(Q_BLOCK_SIZE, dtypes.float32)
max_vec = ker.rv(Q_BLOCK_SIZE, dtypes.float32)
norm_vec = ker.rv(Q_BLOCK_SIZE, dtypes.float32)
scale_vec = ker.rv(Q_BLOCK_SIZE, dtypes.float32)
max_vec = warp.neg_inf(max_vec)
norm_vec = warp.zero(norm_vec)
o_reg = warp.zero(o_reg)
scale_vec = warp.ones(scale_vec)
# load q tile
q_reg_fl = warp.load(q_reg_fl, q, (), (batch, q_seq, head, 0), axis=1)
q_reg_fl *= (1.0 / math.sqrt(D)) * (1.0 / math.log(2))
q_reg = warp.copy(q_reg, q_reg_fl)
q_reg_transposed = warp.transpose(q_reg_transposed, q_reg)
num_kv_blocks = (q_seq + 1) if is_causal else (N // KV_BLOCK_SIZE)
for kv_idx in ker.range(num_kv_blocks):
k_reg = warp.load(k_reg, k, (), (batch, kv_idx, head_kv, 0), axis=1)
v_reg = warp.load(v_reg, v, (), (batch, kv_idx, head_kv, 0), axis=1)
# mma qk^t
att_block = warp.zero(att_block.after(kv_idx))
k_reg_transposed = warp.transpose(k_reg_transposed, k_reg)
att_block = warp.mma_AtB(att_block, k_reg_transposed, q_reg_transposed)
# apply attention mask
if is_causal:
bs_rows, bs_cols, bs_stride = att_block.base_shape.rows, att_block.base_shape.cols, att_block.base_shape.stride
q_base = q_seq * Q_BLOCK_SIZE + (warp.laneid % bs_cols)
kv_base = kv_idx * KV_BLOCK_SIZE + (warp.laneid // bs_cols) * bs_stride
att_block = warp.map(att_block,
lambda x, idx: ((kv_base + idx[0]*bs_rows + idx[2]) > (q_base + idx[1]*bs_cols)).alu(Ops.WHERE, UOp.ufix(x._uop, -math.inf), x))
elif mask is not None:
mask_reg = warp.load(mask_reg, mask, (), (batch, 0, q_seq, kv_idx), axis=2)
mask_reg_transposed = warp.transpose(mask_reg_transposed, mask_reg)
att_block += mask_reg_transposed
# softmax
max_vec_last = warp.copy(max_vec_last.after(kv_idx), max_vec)
max_vec = warp.col_reduce(max_vec.after(max_vec_last), att_block, lambda a, b: a.maximum(b), init_value=-math.inf)
scale_vec = warp.map(scale_vec.after(max_vec_last, max_vec), lambda _, idx: max_vec_last[*idx] - max_vec[*idx])
scale_vec = scale_vec.exp2()
o_reg *= scale_vec
norm_vec *= scale_vec
att_block -= max_vec
att_block = att_block.exp2()
norm_vec = warp.col_reduce(norm_vec.after(scale_vec), att_block, lambda a, b: a + b)
# mma av
att_block_mma = warp.copy(att_block_mma.after(kv_idx, norm_vec), att_block)
o_reg = warp.mma_AtB(o_reg, v_reg, att_block_mma)
o_reg = ker.endrange()
norm_vec = norm_vec.after(o_reg)
max_vec = max_vec.after(o_reg)
o_reg /= norm_vec
o_reg_transposed = warp.transpose(o_reg_transposed, o_reg)
o = warp.store(o, o_reg_transposed, (batch, q_seq, head, 0), (), axis=1)
norm_vec = norm_vec.after(o)
max_vec = max_vec.after(o)
max_vec *= math.log(2)
norm_vec = norm_vec.log2() * math.log(2)
norm_vec += max_vec
l_vec = warp.store(l_vec, norm_vec, (batch, head, 0, q_seq), (), axis=2)
o = o.after(l_vec)
return ker.finish()
def custom_forward_causal(ou:UOp, l_vecu:UOp, qu:UOp, ku:UOp, vu:UOp) -> UOp:
return _custom_forward_impl(ou, l_vecu, qu, ku, vu, None)
def custom_forward_masked(ou:UOp, l_vecu:UOp, qu:UOp, ku:UOp, vu:UOp, masku:UOp) -> UOp:
return _custom_forward_impl(ou, l_vecu, qu, ku, vu, masku)
def _custom_backward_q_impl(dqu:UOp, dou:UOp, qu:UOp, ku:UOp, vu:UOp, masku:UOp|None, l_vecu:UOp, delta_vecu:UOp) -> UOp:
with Kernel("fa_custom_backward_q", (H, N // (Q_BLOCK_SIZE*NUM_WORKERS), B_local), NUM_WORKERS * WARP_THREADS) as ker:
warp = ker.warp
dq, do, q, k, v = GL(dqu, ker), GL(dou, ker), GL(qu, ker), GL(ku, ker), GL(vu, ker)
mask = GL(masku, ker) if masku is not None else None
l_vec, delta_vec = GL(l_vecu, ker), GL(delta_vecu, ker)
head = ker.blockIdx_x
head_kv = head // GROUP_SIZE
batch = ker.blockIdx_z
q_seq = ker.blockIdx_y * NUM_WORKERS + ker.warpid
q_reg_fl = ker.rt((Q_BLOCK_SIZE, D), dtypes.float32)
q_reg = ker.rt((Q_BLOCK_SIZE, D), dtypes.bfloat16)
q_reg_t = ker.rt((D, Q_BLOCK_SIZE), dtypes.bfloat16, TileLayout.COL)
k_reg = ker.rt((KV_BLOCK_SIZE, D), dtypes.bfloat16)
k_reg_t = ker.rt((D, KV_BLOCK_SIZE), dtypes.bfloat16, TileLayout.COL)
k_reg_col = ker.rt((KV_BLOCK_SIZE, D), dtypes.bfloat16, TileLayout.COL)
k_reg_col_t = ker.rt((D, KV_BLOCK_SIZE), dtypes.bfloat16)
v_reg = ker.rt((KV_BLOCK_SIZE, D), dtypes.bfloat16)
mask_reg = ker.rt((Q_BLOCK_SIZE, KV_BLOCK_SIZE), dtypes.float32)
mask_reg_transposed = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.float32, TileLayout.COL)
dq_reg = ker.rt((D, Q_BLOCK_SIZE), dtypes.float32, TileLayout.COL)
dq_reg_transposed = ker.rt((Q_BLOCK_SIZE, D), dtypes.float32)
do_reg = ker.rt((Q_BLOCK_SIZE, D), dtypes.bfloat16)
dp_block = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.float32, TileLayout.COL)
att_block = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.float32, TileLayout.COL)
att_block_mma = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.bfloat16, TileLayout.COL)
l_vec_reg = ker.rv(Q_BLOCK_SIZE, dtypes.float32)
delta_vec_reg = ker.rv(Q_BLOCK_SIZE, dtypes.float32)
dq_reg = warp.zero(dq_reg)
# load q tile
q_reg_fl = warp.load(q_reg_fl, q, (), (batch, q_seq, head, 0), axis=1)
q_reg_fl *= (1.0 / math.sqrt(D)) * (1.0 / math.log(2))
q_reg = warp.copy(q_reg, q_reg_fl)
q_reg_t = warp.transpose(q_reg_t, q_reg)
# load do tile
do_reg = warp.load(do_reg, do, (), (batch, q_seq, head, 0), axis=1)
# load l_vec
l_vec_reg = warp.load(l_vec_reg, l_vec, (), (batch, head, 0, q_seq), axis=2)
l_vec_reg *= 1.0 / math.log(2)
delta_vec_reg = warp.load(delta_vec_reg, delta_vec, (), (batch, head, 0, q_seq), axis=2)
num_kv_blocks = (q_seq + 1) if is_causal else (N // KV_BLOCK_SIZE)
for kv_idx in ker.range(num_kv_blocks):
k_reg = warp.load(k_reg, k, (), (batch, kv_idx, head_kv, 0), axis=1)
k_reg_col = warp.load(k_reg_col, k, (), (batch, kv_idx, head_kv, 0), axis=1)
v_reg = warp.load(v_reg, v, (), (batch, kv_idx, head_kv, 0), axis=1)
k_reg_t = warp.transpose(k_reg_t, k_reg)
k_reg_col_t = warp.transpose(k_reg_col_t, k_reg_col)
# mma qk^t
att_block = warp.zero(att_block.after(kv_idx))
att_block = warp.mma_AtB(att_block, k_reg_t, q_reg_t)
# apply attention mask
if is_causal:
bs_rows, bs_cols, bs_stride = att_block.base_shape.rows, att_block.base_shape.cols, att_block.base_shape.stride
q_base = q_seq * Q_BLOCK_SIZE + (warp.laneid % bs_cols)
kv_base = kv_idx * KV_BLOCK_SIZE + (warp.laneid // bs_cols) * bs_stride
att_block = warp.map(att_block,
lambda x, idx: ((kv_base + idx[0]*bs_rows + idx[2]) > (q_base + idx[1]*bs_cols)).alu(Ops.WHERE, UOp.ufix(x._uop, -math.inf), x))
elif mask is not None:
mask_reg = warp.load(mask_reg, mask, (), (batch, 0, q_seq, kv_idx), axis=2)
mask_reg_transposed = warp.transpose(mask_reg_transposed, mask_reg)
att_block += mask_reg_transposed
att_block -= l_vec_reg
att_block = att_block.exp2()
dp_block = warp.zero(dp_block.after(kv_idx, att_block))
dp_block = warp.mma_ABt(dp_block, v_reg, do_reg)
dp_block -= delta_vec_reg
att_block *= dp_block
att_block *= 1.0 / math.sqrt(D)
att_block_mma = warp.copy(att_block_mma, att_block)
dq_reg = warp.mma_AB(dq_reg, k_reg_col_t, att_block_mma)
dq_reg = ker.endrange()
dq_reg_transposed = warp.transpose(dq_reg_transposed, dq_reg)
dq = warp.store(dq, dq_reg_transposed, (batch, q_seq, head, 0), axis=1)
return ker.finish()
def custom_backward_q_causal(dqu:UOp, dou:UOp, qu:UOp, ku:UOp, vu:UOp, l_vecu:UOp, delta_vecu:UOp) -> UOp:
return _custom_backward_q_impl(dqu, dou, qu, ku, vu, None, l_vecu, delta_vecu)
def custom_backward_q_masked(dqu:UOp, dou:UOp, qu:UOp, ku:UOp, vu:UOp, masku:UOp, l_vecu:UOp, delta_vecu:UOp) -> UOp:
return _custom_backward_q_impl(dqu, dou, qu, ku, vu, masku, l_vecu, delta_vecu)
def _custom_backward_kv_impl(dku:UOp, dvu:UOp, dou:UOp, qu:UOp, ku:UOp, vu:UOp, masku:UOp|None, l_vecu:UOp, delta_vecu:UOp):
with Kernel("fa_custom_backward_kv", (H_KV, N // (KV_BLOCK_SIZE*NUM_WORKERS), B_local), NUM_WORKERS * WARP_THREADS) as ker:
warp = ker.warp
dk, dv, do, q, k, v = GL(dku, ker), GL(dvu, ker), GL(dou, ker), GL(qu, ker), GL(ku, ker), GL(vu, ker)
mask = GL(masku, ker) if masku is not None else None
l_vec, delta_vec = GL(l_vecu, ker), GL(delta_vecu, ker)
head_kv = ker.blockIdx_x
batch = ker.blockIdx_z
kv_seq = ker.blockIdx_y * NUM_WORKERS + ker.warpid
att_smem = ker.st((Q_BLOCK_SIZE, KV_BLOCK_SIZE), dtypes.bfloat16)
q_reg = ker.rt((Q_BLOCK_SIZE, D), dtypes.bfloat16)
q_reg_t = ker.rt((D, Q_BLOCK_SIZE), dtypes.bfloat16, TileLayout.COL)
q_reg_col = ker.rt((Q_BLOCK_SIZE, D), dtypes.bfloat16, TileLayout.COL)
k_reg = ker.rt((KV_BLOCK_SIZE, D), dtypes.bfloat16)
k_reg_t = ker.rt((D, KV_BLOCK_SIZE), dtypes.bfloat16, TileLayout.COL)
v_reg = ker.rt((KV_BLOCK_SIZE, D), dtypes.bfloat16)
mask_reg = ker.rt((Q_BLOCK_SIZE, KV_BLOCK_SIZE), dtypes.float32)
mask_reg_transposed = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.float32, TileLayout.COL)
dk_reg = ker.rt((KV_BLOCK_SIZE, D), dtypes.float32, TileLayout.COL)
dv_reg = ker.rt((KV_BLOCK_SIZE, D), dtypes.float32, TileLayout.COL)
do_reg = ker.rt((Q_BLOCK_SIZE, D), dtypes.bfloat16)
do_reg_col = ker.rt((Q_BLOCK_SIZE, D), dtypes.bfloat16, TileLayout.COL)
dp_block = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.float32, TileLayout.COL)
att_block = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.float32, TileLayout.COL)
att_block_mma = ker.rt((KV_BLOCK_SIZE, Q_BLOCK_SIZE), dtypes.bfloat16, TileLayout.COL)
att_block_transposed = ker.rt((Q_BLOCK_SIZE, KV_BLOCK_SIZE), dtypes.bfloat16, TileLayout.COL)
att_block_row = ker.rt((Q_BLOCK_SIZE, KV_BLOCK_SIZE), dtypes.bfloat16)
l_vec_reg = ker.rv(Q_BLOCK_SIZE, dtypes.float32)
delta_vec_reg = ker.rv(Q_BLOCK_SIZE, dtypes.float32)
dk_reg = warp.zero(dk_reg)
dv_reg = warp.zero(dv_reg)
# load kv tile
k_reg = warp.load(k_reg, k, (), (batch, kv_seq, head_kv, 0), axis=1)
k_reg_t = warp.transpose(k_reg_t, k_reg)
v_reg = warp.load(v_reg, v, (), (batch, kv_seq, head_kv, 0), axis=1)
q_start = kv_seq if is_causal else 0
for q_idx in ker.range(q_start, N // Q_BLOCK_SIZE):
for g in ker.range(GROUP_SIZE):
head_q = head_kv * GROUP_SIZE + g
q_reg = warp.load(q_reg, q, (), (batch, q_idx, head_q, 0), axis=1)
q_reg_col = warp.load(q_reg_col, q, (), (batch, q_idx, head_q, 0), axis=1)
do_reg = warp.load(do_reg, do, (), (batch, q_idx, head_q, 0), axis=1)
do_reg_col = warp.load(do_reg_col, do, (), (batch, q_idx, head_q, 0), axis=1)
q_reg_t = warp.transpose(q_reg_t, q_reg)
# load l_vec and delta_vec
l_vec_reg = warp.load(l_vec_reg, l_vec, (), (batch, head_q, 0, q_idx), axis=2)
l_vec_reg *= 1.0 / math.log(2)
delta_vec_reg = warp.load(delta_vec_reg, delta_vec, (), (batch, head_q, 0, q_idx), axis=2)
# mma qk^t
att_block = warp.zero(att_block.after(g))
att_block = warp.mma_AtB(att_block, k_reg_t, q_reg_t)
att_block *= (1.0 / math.sqrt(D)) * (1.0 / math.log(2))
# apply attention mask
if is_causal:
bs_rows, bs_cols, bs_stride = att_block.base_shape.rows, att_block.base_shape.cols, att_block.base_shape.stride
q_base = q_idx * Q_BLOCK_SIZE + (warp.laneid % bs_cols)
kv_base = kv_seq * KV_BLOCK_SIZE + (warp.laneid // bs_cols) * bs_stride
att_block = warp.map(att_block,
lambda x, idx: ((kv_base + idx[0]*bs_rows + idx[2]) > (q_base + idx[1]*bs_cols)).alu(Ops.WHERE, UOp.ufix(x._uop, -math.inf), x))
elif mask is not None:
mask_reg = warp.load(mask_reg, mask, (), (batch, 0, q_idx, kv_seq), axis=2)
mask_reg_transposed = warp.transpose(mask_reg_transposed, mask_reg)
att_block += mask_reg_transposed
att_block -= l_vec_reg
att_block = att_block.exp2()
att_block_mma = warp.copy(att_block_mma, att_block)
att_block_transposed = warp.transpose(att_block_transposed, att_block_mma)
att_smem = warp.store(att_smem, att_block_transposed)
att_block_row = warp.load(att_block_row, att_smem)
dv_reg_ = warp.mma_AtB(dv_reg, att_block_row, do_reg_col)
dp_block = warp.zero(dp_block.after(g, q_idx, dv_reg_))
dp_block = warp.mma_ABt(dp_block, v_reg, do_reg)
dp_block -= delta_vec_reg
att_block *= dp_block
att_block *= 1.0 / math.sqrt(D)
att_block_mma = warp.copy(att_block_mma, att_block)
att_block_transposed = warp.transpose(att_block_transposed, att_block_mma)
att_smem = warp.store(att_smem, att_block_transposed)
att_block_row = warp.load(att_block_row, att_smem)
dk_reg = warp.mma_AtB(dk_reg, att_block_row, q_reg_col)
dk_reg = ker.endrange(2)
dv_reg = dv_reg.after(dk_reg)
dv_reg = warp.map(dv_reg, lambda x, idx: x + v_reg[*idx].cast(dtypes.float32) * 1e-30)
dk = warp.store(dk, dk_reg, (batch, kv_seq, head_kv, 0), axis=1)
dv = warp.store(dv, dv_reg, (batch, kv_seq, head_kv, 0), axis=1)
return ker.finish(2)
def custom_backward_kv_causal(dku:UOp, dvu:UOp, dou:UOp, qu:UOp, ku:UOp, vu:UOp, l_vecu:UOp, delta_vecu:UOp):
return _custom_backward_kv_impl(dku, dvu, dou, qu, ku, vu, None, l_vecu, delta_vecu)
def custom_backward_kv_masked(dku:UOp, dvu:UOp, dou:UOp, qu:UOp, ku:UOp, vu:UOp, masku:UOp, l_vecu:UOp, delta_vecu:UOp):
return _custom_backward_kv_impl(dku, dvu, dou, qu, ku, vu, masku, l_vecu, delta_vecu)
single_device = xq.device[0] if isinstance(xq.device, tuple) else xq.device
if is_causal:
if attn_mask is not None: raise RuntimeError("cannot set attn_mask when is_causal=True")
elif attn_mask is not None:
if attn_mask.dtype == dtypes.bool: attn_mask = attn_mask.where(0, -float("inf"))
if attn_mask.shape != (B, 1, N, N):
attn_mask = attn_mask.expand(B, 1, N, N)
if isinstance(xq.device, tuple) and not isinstance(attn_mask.device, tuple):
attn_mask = attn_mask.shard(xq.device, axis=0)
else:
attn_mask = Tensor.zeros((B, 1, N, N), requires_grad=False, device=single_device, dtype=dtypes.float32)
if isinstance(xq.device, tuple):
attn_mask = attn_mask.shard(xq.device, axis=0)
attn = _sharded_empty_like(xq, axis=0)
l_vec = _sharded_empty((B, H, 1, N), xq, axis=0)
def grad_causal(gradu:UOp, _) -> tuple[None, None, UOp, UOp, UOp]:
grad = Tensor(gradu, device=gradu.device)
grad_q = _sharded_empty_like(xq, axis=0)
grad_k = _sharded_empty_like(xk, axis=0)
grad_v = _sharded_empty_like(xv, axis=0)
delta_vec = (grad * attn).sum(-1, dtype=dtypes.float32).transpose(1, 2).unsqueeze(-2).detach()
grad_q = Tensor.custom_kernel(grad_q, grad, xq, xk, xv, l_vec, delta_vec, fxn=custom_backward_q_causal)[0]
grad_k, grad_v = Tensor.custom_kernel(grad_k, grad_v, grad, xq, xk, xv, l_vec, delta_vec, fxn=custom_backward_kv_causal)[:2]
return (None, None, grad_q.uop, grad_k.uop, grad_v.uop)
def grad_masked(gradu:UOp, _) -> tuple[None, None, UOp, UOp, UOp, None]:
grad = Tensor(gradu, device=gradu.device)
grad_q = _sharded_empty_like(xq, axis=0)
grad_k = _sharded_empty_like(xk, axis=0)
grad_v = _sharded_empty_like(xv, axis=0)
delta_vec = (grad * attn).sum(-1, dtype=dtypes.float32).transpose(1, 2).unsqueeze(-2).detach()
grad_q = Tensor.custom_kernel(grad_q, grad, xq, xk, xv, attn_mask, l_vec, delta_vec, fxn=custom_backward_q_masked)[0]
grad_k, grad_v = Tensor.custom_kernel(grad_k, grad_v, grad, xq, xk, xv, attn_mask, l_vec, delta_vec, fxn=custom_backward_kv_masked)[:2]
return (None, None, grad_q.uop, grad_k.uop, grad_v.uop, None)
if is_causal:
attn, l_vec = Tensor.custom_kernel(attn, l_vec, xq, xk, xv, fxn=custom_forward_causal, grad_fxn=grad_causal)[:2]
else:
attn, l_vec = Tensor.custom_kernel(attn, l_vec, xq, xk, xv, attn_mask, fxn=custom_forward_masked, grad_fxn=grad_masked)[:2]
attn_ = attn[:, :N_, :, :D_]
return attn_.transpose(1, 2).cast(odtype)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/thunder/tiny/fa.py",
"license": "MIT License",
"lines": 325,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/unit/test_schedule_cache.py | import unittest
import functools
from tinygrad import Tensor, Variable, UOp
from tinygrad.uop.ops import KernelInfo
from tinygrad.engine.schedule import schedule_cache
def custom_set0_kernel(A:UOp, num:int) -> UOp:
return A[0].set(num).sink(arg=KernelInfo(f"custom_set0_{num}"))
class TestScheduleCache(unittest.TestCase):
def test_bound_variable_reuses_cache(self):
schedule_cache.clear()
v = Variable('v', 1, 100)
x = Tensor.ones(10).contiguous().realize()
# first run with v=5
t1 = (x + Tensor(v.bind(5))).sum()
self.assertEqual(t1.item(), 60.0)
cache_size_after_first = len(schedule_cache)
# second run with v=10 should reuse cache
t2 = (x + Tensor(v.bind(10))).sum()
self.assertEqual(t2.item(), 110.0)
self.assertEqual(len(schedule_cache), cache_size_after_first)
def test_custom_kernel(self):
for i in range(4):
a = Tensor.empty(1)
a = Tensor.custom_kernel(a, fxn=functools.partial(custom_set0_kernel, num=i))[0]
a.realize()
self.assertEqual(a.item(), i)
def test_same_custom_function_reuses_cache(self):
schedule_cache.clear()
fxn = functools.partial(custom_set0_kernel, num=10)
# first run
a = Tensor.empty(1)
a = Tensor.custom_kernel(a, fxn=fxn)[0]
a.realize()
self.assertEqual(a.item(), 10)
cache_size_after_first = len(schedule_cache)
# second run with same function should reuse cache
b = Tensor.empty(1)
b = Tensor.custom_kernel(b, fxn=fxn)[0]
b.realize()
self.assertEqual(b.item(), 10)
self.assertEqual(len(schedule_cache), cache_size_after_first)
def test_simple(self):
a = Tensor.ones(10).contiguous()
b = Tensor.ones(10).contiguous()
Tensor.realize(a, b)
# warm up
for _ in range(2):
num = (a.sum().contiguous()+b.sum().contiguous()).item()
print(num)
# confirm schedule cache doesn't grow
start_len_schedule_cache = len(schedule_cache)
for _ in range(3):
num = (a.sum().contiguous()+b.sum().contiguous()).item()
print(num)
self.assertEqual(len(schedule_cache), start_len_schedule_cache)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/unit/test_schedule_cache.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_test_process_count.py | import os, sys, time, multiprocessing
N = int(os.environ.get("NPROC", str(os.cpu_count())))
DEVICE = os.environ.get("DEV", "AMD")
# this tests the total number of processes that can be running tinygrad at a time
def proc(i, device, stop_evt):
from tinygrad import Tensor
try:
a = Tensor.ones(2, device=device).contiguous()
b = Tensor.ones(2, device=device).contiguous()
c = (a + b).realize()
assert c.tolist() == [2, 2]
except Exception as e:
# fail if it fails
print(f"[child {i:2d}] tinygrad op failed: {e}", file=sys.stderr)
# non-zero exit code propagated back to parent
sys.exit(1)
# TODO: wait here for global exit if success. fail if it fails
# -> We wait on a global Event shared from the parent.
print(f"[child {i:2d}] success")
stop_evt.wait()
# Normal successful exit
sys.exit(0)
if __name__ == "__main__":
print(f"testing {N} concurrent tinygrad processes")
# global exit event, shared by all children
stop_evt = multiprocessing.Event()
procs = []
# launch n proc of proc 1 per 200 ms
for i in range(N):
p = multiprocessing.Process(target=proc, args=(i, DEVICE, stop_evt), name=f"tinygrad-proc-{i}")
p.start()
procs.append(p)
time.sleep(0.1) # 100 ms between launches
# signal global exit
time.sleep(0.5)
stop_evt.set()
# join all children
for p in procs: p.join()
# check for failures
failed = [p for p in procs if p.exitcode != 0]
if failed:
print(f"{len(failed)} / {len(procs)} processes failed "
f"with exit codes: {[p.exitcode for p in failed]}", file=sys.stderr)
sys.exit(1)
print(f"All {len(procs)} tinygrad processes ran successfully")
sys.exit(0)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_test_process_count.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:examples/gradaccum_mnist.py | import itertools
from typing import Callable
from tinygrad import nn, Tensor, dtypes, Device, TinyJit
from tinygrad.helpers import getenv, trange, partition
class Model:
def __init__(self):
self.layers: list[Callable[[Tensor], Tensor]] = [
nn.Conv2d(1, 32, 5), Tensor.relu,
nn.Conv2d(32, 32, 5), Tensor.relu,
nn.BatchNorm(32), Tensor.max_pool2d,
nn.Conv2d(32, 64, 3), Tensor.relu,
nn.Conv2d(64, 64, 3), Tensor.relu,
nn.BatchNorm(64), Tensor.max_pool2d,
lambda x: x.flatten(1), nn.Linear(576, 10)]
def __call__(self, x:Tensor) -> Tensor: return x.sequential(self.layers)
# TODO: refactor this into optim/onnx
def functional_adam(g:Tensor, m:Tensor, v:Tensor, b1_t:Tensor, b2_t:Tensor, lr=0.001, b1=0.9, b2=0.999, eps=1e-6) -> Tensor:
b1_t *= b1
b2_t *= b2
m.assign(b1 * m + (1.0 - b1) * g)
v.assign(b2 * v + (1.0 - b2) * (g * g))
m_hat = m / (1.0 - b1_t)
v_hat = v / (1.0 - b2_t)
return lr * (m_hat / (v_hat.sqrt() + eps))
if __name__ == "__main__":
BS = getenv("BS", 512)
ACC_STEPS = getenv("ACC_STEPS", 8)
X_train, Y_train, X_test, Y_test = nn.datasets.mnist()
model = Model()
params = nn.state.get_parameters(model)
# init params, set requires grad on the ones we need gradients of
for x in params:
if x.requires_grad is None: x.requires_grad_()
x.replace(x.contiguous())
Tensor.realize(*params)
# split params (with grads) and buffers (without)
params, buffers = partition(params, lambda x: x.requires_grad)
print(f"params: {len(params)} buffers: {len(buffers)}")
# optim params
pos_params = list(itertools.accumulate(params, lambda x,y: x+y.numel(), initial=0))
adam_m = Tensor.zeros(pos_params[-1], device="CPU").contiguous()
adam_v = Tensor.zeros(pos_params[-1], device="CPU").contiguous()
adam_b1_t = Tensor.ones((1,), dtype=dtypes.float32, device="CPU", requires_grad=False).contiguous()
adam_b2_t = Tensor.ones((1,), dtype=dtypes.float32, device="CPU", requires_grad=False).contiguous()
adam_params = [adam_m, adam_v, adam_b1_t, adam_b2_t]
# create loss and grads. init all state so the JIT works on microbatch
for x in params: x.assign(x.detach())
loss = Tensor.zeros(tuple()).contiguous()
grads = Tensor.zeros(pos_params[-1]).contiguous()
Tensor.realize(*params, *buffers, *adam_params, loss, grads)
@TinyJit
@Tensor.train()
def microbatch():
samples = Tensor.randint(BS // ACC_STEPS, high=X_train.shape[0])
for t in params: t.grad = None
# divide by ACC_STEPS at the loss
uloss = (model(X_train[samples]).sparse_categorical_crossentropy(Y_train[samples]) / ACC_STEPS).backward()
ugrads = Tensor.cat(*[t.grad.contiguous().flatten() for t in params], dim=0)
for t in params: t.grad = None
# concat the grads and assign them
loss.assign(loss + uloss)
grads.assign(grads + ugrads)
Tensor.realize(*params, *buffers, loss, grads)
@TinyJit
def optimizer():
# run optimizer (on CPU, where adam params live)
delta = functional_adam(grads.to("CPU"), adam_m, adam_v, adam_b1_t, adam_b2_t)
# update the params, copying back the delta one at a time to avoid OOM
# NOTE: the scheduler is ordering things poorly, all the copies are happening before the adds
for j,tt in enumerate(params):
tt.assign(tt.detach() - delta[pos_params[j]:pos_params[j+1]].reshape(tt.shape).to(Device.DEFAULT))
# realize everything, zero out loss and grads
loss.assign(Tensor.zeros_like(loss))
grads.assign(Tensor.zeros_like(grads))
Tensor.realize(*params, *adam_params, loss, grads)
@TinyJit
def get_test_acc() -> Tensor: return (model(X_test).argmax(axis=1) == Y_test).mean()*100
test_acc = float('nan')
for i in (t:=trange(getenv("STEPS", 70))):
# microbatch sets the gradients
for _ in range(ACC_STEPS): microbatch()
# get the loss before the optimizer clears it
# this is already realized so this isn't a schedule
loss_item = loss.item()
# run the optimizer
optimizer()
# eval
if i%10 == 9: test_acc = get_test_acc().item()
t.set_description(f"loss: {loss_item:6.2f} test_accuracy: {test_acc:5.2f}%")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "examples/gradaccum_mnist.py",
"license": "MIT License",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/hevc/decode.py | import argparse, os, hashlib, functools
from typing import Iterator, Callable
from tinygrad.helpers import getenv, DEBUG, round_up, Timing, tqdm, fetch, ceildiv
from extra.hevc.hevc import parse_hevc_file_headers, untile_nv12, to_bgr, nv_gpu
from tinygrad import Tensor, dtypes, Device, Variable, TinyJit
# rounds up hevc input data to 32 bytes, so more optimal kernels can be generated
HEVC_ROUNDUP = getenv("DATA_ROUNDUP", 32)
@functools.cache
def _hevc_jitted_decoder(out_image_size:tuple[int, int], max_hist:int, inplace:bool):
def hevc_decode_frame(pos:Variable, hevc_tensor:Tensor, offset:Variable, sz:Variable, opaque:Tensor, i:Variable, *hist:Tensor, outbuf:Tensor|None=None):
x = hevc_tensor[offset:offset+sz*HEVC_ROUNDUP].decode_hevc_frame(pos, out_image_size, opaque[i], hist)
if outbuf is not None: outbuf.assign(x).realize()
return x.realize()
return TinyJit(hevc_decode_frame)
def hevc_decode(hevc_tensor:Tensor, opaque:Tensor, frame_info:list, luma_h:int, luma_w:int,
history:list[Tensor]|None=None, preallocated_outputs:list[Tensor]|None=None, warmup=False) -> Iterator[Tensor]:
out_image_size = luma_h + (luma_h + 1) // 2, round_up(luma_w, 64)
max_hist = max((hs for _, _, _, hs, _ in frame_info), default=0)
v_pos = Variable("pos", 0, max_hist + 1)
v_offset = Variable("offset", 0, hevc_tensor.numel()-1)
v_sz = Variable("sz", 1, ceildiv(hevc_tensor.numel(), HEVC_ROUNDUP))
v_i = Variable("i", 0, len(frame_info)-1)
decode_jit = _hevc_jitted_decoder(out_image_size, max_hist, preallocated_outputs is not None)
history = history or [Tensor.empty(*out_image_size, dtype=dtypes.uint8, device="NV").contiguous().realize() for _ in range(max_hist)]
assert len(history) == max_hist, f"history length {len(history)} does not match max_hist {max_hist}"
for i, (offset, sz, frame_pos, _, is_hist) in enumerate(frame_info):
history = history[-max_hist:] if max_hist > 0 else []
img = decode_jit(v_pos.bind(frame_pos), hevc_tensor, v_offset.bind(offset), v_sz.bind(ceildiv(sz, HEVC_ROUNDUP)),
opaque, v_i.bind(i), *history, outbuf=preallocated_outputs[i] if preallocated_outputs else None)
res = preallocated_outputs[i] if preallocated_outputs else img.clone().realize()
if is_hist: history.append(res)
yield res
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", type=str, default="")
parser.add_argument("--output_dir", type=str, default="extra/hevc/out")
args = parser.parse_args()
if args.input_file == "":
url = "https://github.com/haraschax/filedump/raw/09a497959f7fa6fd8dba501a25f2cdb3a41ecb12/comma_video.hevc"
hevc_tensor = Tensor.from_url(url, device="CPU")
else:
hevc_tensor = Tensor.empty(os.stat(args.input_file).st_size, dtype=dtypes.uint8, device=f"disk:{args.input_file}").to("CPU")
dat = bytes(hevc_tensor.data())
dat_hash = hashlib.md5(dat).hexdigest()
with Timing("prep infos: "):
opaque, frame_info, w, h, luma_w, luma_h, chroma_off = parse_hevc_file_headers(dat)
frame_info = frame_info[:getenv("MAX_FRAMES", len(frame_info))]
# move all needed data to gpu
with Timing("copy to gpu: "):
opaque_nv = opaque.to("NV").contiguous().realize()
hevc_tensor = hevc_tensor.to("NV")
out_image_size = luma_h + (luma_h + 1) // 2, round_up(luma_w, 64)
# preallocate output/hist buffers
max_hist = max((hs for _, _, _, hs, _ in frame_info), default=0)
hist = [Tensor.empty(*out_image_size, dtype=dtypes.uint8, device="NV").contiguous().realize() for _ in range(max_hist)]
out_images = [Tensor.zeros(*out_image_size, dtype=dtypes.uint8, device="NV").contiguous().realize() for _ in range(len(frame_info))]
# warmup decode
_ = list(hevc_decode(hevc_tensor, opaque_nv, frame_info[:3], luma_h, luma_w, history=hist, preallocated_outputs=out_images))
Device.default.synchronize()
# decode all frames using the iterator
with Timing("decoding whole file: ", on_exit=(lambda et: f", {len(frame_info)} frames, {len(frame_info)/(et/1e9):.2f} fps")):
images = list(hevc_decode(hevc_tensor, opaque_nv, frame_info, luma_h, luma_w, history=hist, preallocated_outputs=out_images))
Device.default.synchronize()
# validation
if getenv("VALIDATE", 0):
import pickle
if dat_hash == "b813bfdbec194fd17fdf0e3ceb8cea1c":
url = "https://github.com/nimlgen/hevc_validate_set/raw/refs/heads/main/decoded_frames_b813bfdbec194fd17fdf0e3ceb8cea1c.pkl"
decoded_frames = pickle.load(fetch(url).open("rb"))
else: decoded_frames = pickle.load(open(f"extra/hevc/decoded_frames_{dat_hash}.pkl", "rb"))
else: import cv2
for i, img in tqdm(enumerate(images)):
if getenv("VALIDATE", 0):
if i < len(decoded_frames) and len(decoded_frames[i]) > 0:
img = untile_nv12(img, h, w, luma_w, chroma_off).realize()
assert img.data() == decoded_frames[i], f"Frame {i} does not match reference decoder!"
print(f"Frame {i} matches reference decoder!")
else:
if len(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
img = to_bgr(img, h, w, luma_w, chroma_off).realize()
cv2.imwrite(f"{args.output_dir}/out_frame_{i:04d}.png", img.numpy())
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/hevc/decode.py",
"license": "MIT License",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/testextra/test_hevc.py | import unittest
from tinygrad import Tensor, Device, dtypes
from tinygrad.helpers import fetch, round_up
from extra.hevc.hevc import parse_hevc_file_headers, nv_gpu
from extra.hevc.decode import hevc_decode
class TestHevc(unittest.TestCase):
def test_hevc_parser(self):
url = "https://github.com/haraschax/filedump/raw/09a497959f7fa6fd8dba501a25f2cdb3a41ecb12/comma_video.hevc"
dat = fetch(url, headers={"Range": f"bytes=0-{512<<10}"}).read_bytes()
opaque, frame_info, w, h, luma_w, luma_h, chroma_off = parse_hevc_file_headers(dat, device=Device.DEFAULT)
def _test_common(frame, bts):
self.assertEqual(frame0.pic_width_in_luma_samples, 1952)
self.assertEqual(frame0.pic_height_in_luma_samples, 1216)
self.assertEqual(frame0.chroma_format_idc, 1)
self.assertEqual(frame0.bit_depth_luma, 8)
self.assertEqual(frame0.bit_depth_chroma, 8)
self.assertEqual(frame0.log2_min_luma_coding_block_size, 3)
self.assertEqual(frame0.log2_max_luma_coding_block_size, 5)
self.assertEqual(frame0.log2_min_transform_block_size, 2)
self.assertEqual(frame0.log2_max_transform_block_size, 5)
self.assertEqual(frame0.num_tile_columns, 3)
self.assertEqual(frame0.num_tile_rows, 1)
self.assertEqual(frame0.colMvBuffersize, 589)
self.assertEqual(frame0.HevcSaoBufferOffset, 2888)
self.assertEqual(frame0.HevcBsdCtrlOffset, 25992)
self.assertEqual(frame0.v1.hevc_main10_444_ext.HevcFltAboveOffset, 26714)
self.assertEqual(frame0.v1.hevc_main10_444_ext.HevcSaoAboveOffset, 36214)
# tiles
self.assertEqual(bytes(bts[0x200:0x210]), b'\x18\x00&\x00\x18\x00&\x00\r\x00&\x00\x00\x00\x00\x00')
frame0 = nv_gpu.nvdec_hevc_pic_s.from_buffer(opaque[0].data())
_test_common(frame0, opaque[0].data())
self.assertEqual(frame0.stream_len, 148063)
self.assertEqual(frame0.IDR_picture_flag, 1)
self.assertEqual(frame0.RAP_picture_flag, 1)
self.assertEqual(frame0.sw_hdr_skip_length, 0)
self.assertEqual(frame0.num_ref_frames, 0)
frame1 = nv_gpu.nvdec_hevc_pic_s.from_buffer(opaque[1].data())
_test_common(frame1, opaque[1].data())
self.assertEqual(frame1.stream_len, 57110)
self.assertEqual(frame1.IDR_picture_flag, 0)
self.assertEqual(frame1.RAP_picture_flag, 0)
self.assertEqual(frame1.sw_hdr_skip_length, 9)
self.assertEqual(frame1.num_ref_frames, 1)
self.assertEqual(list(frame1.initreflistidxl0), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertEqual(list(frame1.initreflistidxl1), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertEqual(list(frame1.RefDiffPicOrderCnts), [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
frame3 = nv_gpu.nvdec_hevc_pic_s.from_buffer(opaque[3].data())
_test_common(frame3, opaque[3].data())
self.assertEqual(frame3.stream_len, 47036)
self.assertEqual(frame3.IDR_picture_flag, 0)
self.assertEqual(frame3.RAP_picture_flag, 0)
self.assertEqual(frame3.sw_hdr_skip_length, 9)
self.assertEqual(frame3.num_ref_frames, 1)
self.assertEqual(list(frame3.initreflistidxl0), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertEqual(list(frame3.initreflistidxl1), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertEqual(list(frame3.RefDiffPicOrderCnts), [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
@unittest.skipUnless(Device.DEFAULT == "NV", "NV only")
def test_hevc_decode(self):
url = "https://github.com/haraschax/filedump/raw/09a497959f7fa6fd8dba501a25f2cdb3a41ecb12/comma_video.hevc"
dat = fetch(url, headers={"Range": f"bytes=0-{512<<10}"}).read_bytes()
opaque, frame_info, w, h, luma_w, luma_h, chroma_off = parse_hevc_file_headers(dat)
frame_info = frame_info[:4]
out_image_size = luma_h + (luma_h + 1) // 2, round_up(luma_w, 64)
hevc_tensor = Tensor(dat, device="NV")
opaque_nv = opaque.to("NV").contiguous().realize()
frames = list(hevc_decode(hevc_tensor, opaque_nv, frame_info, luma_h, luma_w))
Device.default.synchronize()
self.assertEqual(len(frames), 4)
for f in frames:
self.assertEqual(f.shape, out_image_size)
self.assertEqual(f.dtype, dtypes.uint8)
self.assertEqual(f.device, "NV")
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/testextra/test_hevc.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/runtime/autogen/avcodec.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
class enum_HEVCNALUnitType(Annotated[int, ctypes.c_uint32], c.Enum): pass
HEVC_NAL_TRAIL_N = enum_HEVCNALUnitType.define('HEVC_NAL_TRAIL_N', 0)
HEVC_NAL_TRAIL_R = enum_HEVCNALUnitType.define('HEVC_NAL_TRAIL_R', 1)
HEVC_NAL_TSA_N = enum_HEVCNALUnitType.define('HEVC_NAL_TSA_N', 2)
HEVC_NAL_TSA_R = enum_HEVCNALUnitType.define('HEVC_NAL_TSA_R', 3)
HEVC_NAL_STSA_N = enum_HEVCNALUnitType.define('HEVC_NAL_STSA_N', 4)
HEVC_NAL_STSA_R = enum_HEVCNALUnitType.define('HEVC_NAL_STSA_R', 5)
HEVC_NAL_RADL_N = enum_HEVCNALUnitType.define('HEVC_NAL_RADL_N', 6)
HEVC_NAL_RADL_R = enum_HEVCNALUnitType.define('HEVC_NAL_RADL_R', 7)
HEVC_NAL_RASL_N = enum_HEVCNALUnitType.define('HEVC_NAL_RASL_N', 8)
HEVC_NAL_RASL_R = enum_HEVCNALUnitType.define('HEVC_NAL_RASL_R', 9)
HEVC_NAL_VCL_N10 = enum_HEVCNALUnitType.define('HEVC_NAL_VCL_N10', 10)
HEVC_NAL_VCL_R11 = enum_HEVCNALUnitType.define('HEVC_NAL_VCL_R11', 11)
HEVC_NAL_VCL_N12 = enum_HEVCNALUnitType.define('HEVC_NAL_VCL_N12', 12)
HEVC_NAL_VCL_R13 = enum_HEVCNALUnitType.define('HEVC_NAL_VCL_R13', 13)
HEVC_NAL_VCL_N14 = enum_HEVCNALUnitType.define('HEVC_NAL_VCL_N14', 14)
HEVC_NAL_VCL_R15 = enum_HEVCNALUnitType.define('HEVC_NAL_VCL_R15', 15)
HEVC_NAL_BLA_W_LP = enum_HEVCNALUnitType.define('HEVC_NAL_BLA_W_LP', 16)
HEVC_NAL_BLA_W_RADL = enum_HEVCNALUnitType.define('HEVC_NAL_BLA_W_RADL', 17)
HEVC_NAL_BLA_N_LP = enum_HEVCNALUnitType.define('HEVC_NAL_BLA_N_LP', 18)
HEVC_NAL_IDR_W_RADL = enum_HEVCNALUnitType.define('HEVC_NAL_IDR_W_RADL', 19)
HEVC_NAL_IDR_N_LP = enum_HEVCNALUnitType.define('HEVC_NAL_IDR_N_LP', 20)
HEVC_NAL_CRA_NUT = enum_HEVCNALUnitType.define('HEVC_NAL_CRA_NUT', 21)
HEVC_NAL_RSV_IRAP_VCL22 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_IRAP_VCL22', 22)
HEVC_NAL_RSV_IRAP_VCL23 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_IRAP_VCL23', 23)
HEVC_NAL_RSV_VCL24 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_VCL24', 24)
HEVC_NAL_RSV_VCL25 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_VCL25', 25)
HEVC_NAL_RSV_VCL26 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_VCL26', 26)
HEVC_NAL_RSV_VCL27 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_VCL27', 27)
HEVC_NAL_RSV_VCL28 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_VCL28', 28)
HEVC_NAL_RSV_VCL29 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_VCL29', 29)
HEVC_NAL_RSV_VCL30 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_VCL30', 30)
HEVC_NAL_RSV_VCL31 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_VCL31', 31)
HEVC_NAL_VPS = enum_HEVCNALUnitType.define('HEVC_NAL_VPS', 32)
HEVC_NAL_SPS = enum_HEVCNALUnitType.define('HEVC_NAL_SPS', 33)
HEVC_NAL_PPS = enum_HEVCNALUnitType.define('HEVC_NAL_PPS', 34)
HEVC_NAL_AUD = enum_HEVCNALUnitType.define('HEVC_NAL_AUD', 35)
HEVC_NAL_EOS_NUT = enum_HEVCNALUnitType.define('HEVC_NAL_EOS_NUT', 36)
HEVC_NAL_EOB_NUT = enum_HEVCNALUnitType.define('HEVC_NAL_EOB_NUT', 37)
HEVC_NAL_FD_NUT = enum_HEVCNALUnitType.define('HEVC_NAL_FD_NUT', 38)
HEVC_NAL_SEI_PREFIX = enum_HEVCNALUnitType.define('HEVC_NAL_SEI_PREFIX', 39)
HEVC_NAL_SEI_SUFFIX = enum_HEVCNALUnitType.define('HEVC_NAL_SEI_SUFFIX', 40)
HEVC_NAL_RSV_NVCL41 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_NVCL41', 41)
HEVC_NAL_RSV_NVCL42 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_NVCL42', 42)
HEVC_NAL_RSV_NVCL43 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_NVCL43', 43)
HEVC_NAL_RSV_NVCL44 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_NVCL44', 44)
HEVC_NAL_RSV_NVCL45 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_NVCL45', 45)
HEVC_NAL_RSV_NVCL46 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_NVCL46', 46)
HEVC_NAL_RSV_NVCL47 = enum_HEVCNALUnitType.define('HEVC_NAL_RSV_NVCL47', 47)
HEVC_NAL_UNSPEC48 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC48', 48)
HEVC_NAL_UNSPEC49 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC49', 49)
HEVC_NAL_UNSPEC50 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC50', 50)
HEVC_NAL_UNSPEC51 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC51', 51)
HEVC_NAL_UNSPEC52 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC52', 52)
HEVC_NAL_UNSPEC53 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC53', 53)
HEVC_NAL_UNSPEC54 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC54', 54)
HEVC_NAL_UNSPEC55 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC55', 55)
HEVC_NAL_UNSPEC56 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC56', 56)
HEVC_NAL_UNSPEC57 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC57', 57)
HEVC_NAL_UNSPEC58 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC58', 58)
HEVC_NAL_UNSPEC59 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC59', 59)
HEVC_NAL_UNSPEC60 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC60', 60)
HEVC_NAL_UNSPEC61 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC61', 61)
HEVC_NAL_UNSPEC62 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC62', 62)
HEVC_NAL_UNSPEC63 = enum_HEVCNALUnitType.define('HEVC_NAL_UNSPEC63', 63)
class enum_HEVCSliceType(Annotated[int, ctypes.c_uint32], c.Enum): pass
HEVC_SLICE_B = enum_HEVCSliceType.define('HEVC_SLICE_B', 0)
HEVC_SLICE_P = enum_HEVCSliceType.define('HEVC_SLICE_P', 1)
HEVC_SLICE_I = enum_HEVCSliceType.define('HEVC_SLICE_I', 2)
class _anonenum0(Annotated[int, ctypes.c_uint32], c.Enum): pass
HEVC_MAX_LAYERS = _anonenum0.define('HEVC_MAX_LAYERS', 63)
HEVC_MAX_SUB_LAYERS = _anonenum0.define('HEVC_MAX_SUB_LAYERS', 7)
HEVC_MAX_LAYER_SETS = _anonenum0.define('HEVC_MAX_LAYER_SETS', 1024)
HEVC_MAX_LAYER_ID = _anonenum0.define('HEVC_MAX_LAYER_ID', 63)
HEVC_MAX_NUH_LAYER_ID = _anonenum0.define('HEVC_MAX_NUH_LAYER_ID', 62)
HEVC_MAX_VPS_COUNT = _anonenum0.define('HEVC_MAX_VPS_COUNT', 16)
HEVC_MAX_SPS_COUNT = _anonenum0.define('HEVC_MAX_SPS_COUNT', 16)
HEVC_MAX_PPS_COUNT = _anonenum0.define('HEVC_MAX_PPS_COUNT', 64)
HEVC_MAX_DPB_SIZE = _anonenum0.define('HEVC_MAX_DPB_SIZE', 16)
HEVC_MAX_REFS = _anonenum0.define('HEVC_MAX_REFS', 16)
HEVC_MAX_SHORT_TERM_REF_PIC_SETS = _anonenum0.define('HEVC_MAX_SHORT_TERM_REF_PIC_SETS', 64)
HEVC_MAX_LONG_TERM_REF_PICS = _anonenum0.define('HEVC_MAX_LONG_TERM_REF_PICS', 32)
HEVC_MIN_LOG2_CTB_SIZE = _anonenum0.define('HEVC_MIN_LOG2_CTB_SIZE', 4)
HEVC_MAX_LOG2_CTB_SIZE = _anonenum0.define('HEVC_MAX_LOG2_CTB_SIZE', 6)
HEVC_MAX_CPB_CNT = _anonenum0.define('HEVC_MAX_CPB_CNT', 32)
HEVC_MAX_LUMA_PS = _anonenum0.define('HEVC_MAX_LUMA_PS', 35651584)
HEVC_MAX_WIDTH = _anonenum0.define('HEVC_MAX_WIDTH', 16888)
HEVC_MAX_HEIGHT = _anonenum0.define('HEVC_MAX_HEIGHT', 16888)
HEVC_MAX_TILE_ROWS = _anonenum0.define('HEVC_MAX_TILE_ROWS', 22)
HEVC_MAX_TILE_COLUMNS = _anonenum0.define('HEVC_MAX_TILE_COLUMNS', 20)
HEVC_MAX_SLICE_SEGMENTS = _anonenum0.define('HEVC_MAX_SLICE_SEGMENTS', 600)
HEVC_MAX_ENTRY_POINT_OFFSETS = _anonenum0.define('HEVC_MAX_ENTRY_POINT_OFFSETS', 2700)
HEVC_MAX_PALETTE_PREDICTOR_SIZE = _anonenum0.define('HEVC_MAX_PALETTE_PREDICTOR_SIZE', 128)
class enum_HEVCScalabilityMask(Annotated[int, ctypes.c_uint32], c.Enum): pass
HEVC_SCALABILITY_DEPTH = enum_HEVCScalabilityMask.define('HEVC_SCALABILITY_DEPTH', 32768)
HEVC_SCALABILITY_MULTIVIEW = enum_HEVCScalabilityMask.define('HEVC_SCALABILITY_MULTIVIEW', 16384)
HEVC_SCALABILITY_SPATIAL = enum_HEVCScalabilityMask.define('HEVC_SCALABILITY_SPATIAL', 8192)
HEVC_SCALABILITY_AUXILIARY = enum_HEVCScalabilityMask.define('HEVC_SCALABILITY_AUXILIARY', 4096)
HEVC_SCALABILITY_MASK_MAX = enum_HEVCScalabilityMask.define('HEVC_SCALABILITY_MASK_MAX', 65535)
class enum_HEVCAuxId(Annotated[int, ctypes.c_uint32], c.Enum): pass
HEVC_AUX_ALPHA = enum_HEVCAuxId.define('HEVC_AUX_ALPHA', 1)
HEVC_AUX_DEPTH = enum_HEVCAuxId.define('HEVC_AUX_DEPTH', 2)
@c.record
class struct_H265RawNALUnitHeader(c.Struct):
SIZE = 3
nal_unit_type: Annotated[uint8_t, 0]
nuh_layer_id: Annotated[uint8_t, 1]
nuh_temporal_id_plus1: Annotated[uint8_t, 2]
uint8_t: TypeAlias = Annotated[int, ctypes.c_ubyte]
H265RawNALUnitHeader: TypeAlias = struct_H265RawNALUnitHeader
@c.record
class struct_H265RawProfileTierLevel(c.Struct):
SIZE = 422
general_profile_space: Annotated[uint8_t, 0]
general_tier_flag: Annotated[uint8_t, 1]
general_profile_idc: Annotated[uint8_t, 2]
general_profile_compatibility_flag: Annotated[c.Array[uint8_t, Literal[32]], 3]
general_progressive_source_flag: Annotated[uint8_t, 35]
general_interlaced_source_flag: Annotated[uint8_t, 36]
general_non_packed_constraint_flag: Annotated[uint8_t, 37]
general_frame_only_constraint_flag: Annotated[uint8_t, 38]
general_max_12bit_constraint_flag: Annotated[uint8_t, 39]
general_max_10bit_constraint_flag: Annotated[uint8_t, 40]
general_max_8bit_constraint_flag: Annotated[uint8_t, 41]
general_max_422chroma_constraint_flag: Annotated[uint8_t, 42]
general_max_420chroma_constraint_flag: Annotated[uint8_t, 43]
general_max_monochrome_constraint_flag: Annotated[uint8_t, 44]
general_intra_constraint_flag: Annotated[uint8_t, 45]
general_one_picture_only_constraint_flag: Annotated[uint8_t, 46]
general_lower_bit_rate_constraint_flag: Annotated[uint8_t, 47]
general_max_14bit_constraint_flag: Annotated[uint8_t, 48]
general_inbld_flag: Annotated[uint8_t, 49]
general_level_idc: Annotated[uint8_t, 50]
sub_layer_profile_present_flag: Annotated[c.Array[uint8_t, Literal[7]], 51]
sub_layer_level_present_flag: Annotated[c.Array[uint8_t, Literal[7]], 58]
sub_layer_profile_space: Annotated[c.Array[uint8_t, Literal[7]], 65]
sub_layer_tier_flag: Annotated[c.Array[uint8_t, Literal[7]], 72]
sub_layer_profile_idc: Annotated[c.Array[uint8_t, Literal[7]], 79]
sub_layer_profile_compatibility_flag: Annotated[c.Array[c.Array[uint8_t, Literal[32]], Literal[7]], 86]
sub_layer_progressive_source_flag: Annotated[c.Array[uint8_t, Literal[7]], 310]
sub_layer_interlaced_source_flag: Annotated[c.Array[uint8_t, Literal[7]], 317]
sub_layer_non_packed_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 324]
sub_layer_frame_only_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 331]
sub_layer_max_12bit_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 338]
sub_layer_max_10bit_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 345]
sub_layer_max_8bit_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 352]
sub_layer_max_422chroma_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 359]
sub_layer_max_420chroma_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 366]
sub_layer_max_monochrome_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 373]
sub_layer_intra_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 380]
sub_layer_one_picture_only_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 387]
sub_layer_lower_bit_rate_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 394]
sub_layer_max_14bit_constraint_flag: Annotated[c.Array[uint8_t, Literal[7]], 401]
sub_layer_inbld_flag: Annotated[c.Array[uint8_t, Literal[7]], 408]
sub_layer_level_idc: Annotated[c.Array[uint8_t, Literal[7]], 415]
H265RawProfileTierLevel: TypeAlias = struct_H265RawProfileTierLevel
@c.record
class struct_H265RawSubLayerHRDParameters(c.Struct):
SIZE = 544
bit_rate_value_minus1: Annotated[c.Array[uint32_t, Literal[32]], 0]
cpb_size_value_minus1: Annotated[c.Array[uint32_t, Literal[32]], 128]
cpb_size_du_value_minus1: Annotated[c.Array[uint32_t, Literal[32]], 256]
bit_rate_du_value_minus1: Annotated[c.Array[uint32_t, Literal[32]], 384]
cbr_flag: Annotated[c.Array[uint8_t, Literal[32]], 512]
uint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
H265RawSubLayerHRDParameters: TypeAlias = struct_H265RawSubLayerHRDParameters
@c.record
class struct_H265RawHRDParameters(c.Struct):
SIZE = 7672
nal_hrd_parameters_present_flag: Annotated[uint8_t, 0]
vcl_hrd_parameters_present_flag: Annotated[uint8_t, 1]
sub_pic_hrd_params_present_flag: Annotated[uint8_t, 2]
tick_divisor_minus2: Annotated[uint8_t, 3]
du_cpb_removal_delay_increment_length_minus1: Annotated[uint8_t, 4]
sub_pic_cpb_params_in_pic_timing_sei_flag: Annotated[uint8_t, 5]
dpb_output_delay_du_length_minus1: Annotated[uint8_t, 6]
bit_rate_scale: Annotated[uint8_t, 7]
cpb_size_scale: Annotated[uint8_t, 8]
cpb_size_du_scale: Annotated[uint8_t, 9]
initial_cpb_removal_delay_length_minus1: Annotated[uint8_t, 10]
au_cpb_removal_delay_length_minus1: Annotated[uint8_t, 11]
dpb_output_delay_length_minus1: Annotated[uint8_t, 12]
fixed_pic_rate_general_flag: Annotated[c.Array[uint8_t, Literal[7]], 13]
fixed_pic_rate_within_cvs_flag: Annotated[c.Array[uint8_t, Literal[7]], 20]
elemental_duration_in_tc_minus1: Annotated[c.Array[uint16_t, Literal[7]], 28]
low_delay_hrd_flag: Annotated[c.Array[uint8_t, Literal[7]], 42]
cpb_cnt_minus1: Annotated[c.Array[uint8_t, Literal[7]], 49]
nal_sub_layer_hrd_parameters: Annotated[c.Array[H265RawSubLayerHRDParameters, Literal[7]], 56]
vcl_sub_layer_hrd_parameters: Annotated[c.Array[H265RawSubLayerHRDParameters, Literal[7]], 3864]
uint16_t: TypeAlias = Annotated[int, ctypes.c_uint16]
H265RawHRDParameters: TypeAlias = struct_H265RawHRDParameters
@c.record
class struct_H265RawVUI(c.Struct):
SIZE = 7736
aspect_ratio_info_present_flag: Annotated[uint8_t, 0]
aspect_ratio_idc: Annotated[uint8_t, 1]
sar_width: Annotated[uint16_t, 2]
sar_height: Annotated[uint16_t, 4]
overscan_info_present_flag: Annotated[uint8_t, 6]
overscan_appropriate_flag: Annotated[uint8_t, 7]
video_signal_type_present_flag: Annotated[uint8_t, 8]
video_format: Annotated[uint8_t, 9]
video_full_range_flag: Annotated[uint8_t, 10]
colour_description_present_flag: Annotated[uint8_t, 11]
colour_primaries: Annotated[uint8_t, 12]
transfer_characteristics: Annotated[uint8_t, 13]
matrix_coefficients: Annotated[uint8_t, 14]
chroma_loc_info_present_flag: Annotated[uint8_t, 15]
chroma_sample_loc_type_top_field: Annotated[uint8_t, 16]
chroma_sample_loc_type_bottom_field: Annotated[uint8_t, 17]
neutral_chroma_indication_flag: Annotated[uint8_t, 18]
field_seq_flag: Annotated[uint8_t, 19]
frame_field_info_present_flag: Annotated[uint8_t, 20]
default_display_window_flag: Annotated[uint8_t, 21]
def_disp_win_left_offset: Annotated[uint16_t, 22]
def_disp_win_right_offset: Annotated[uint16_t, 24]
def_disp_win_top_offset: Annotated[uint16_t, 26]
def_disp_win_bottom_offset: Annotated[uint16_t, 28]
vui_timing_info_present_flag: Annotated[uint8_t, 30]
vui_num_units_in_tick: Annotated[uint32_t, 32]
vui_time_scale: Annotated[uint32_t, 36]
vui_poc_proportional_to_timing_flag: Annotated[uint8_t, 40]
vui_num_ticks_poc_diff_one_minus1: Annotated[uint32_t, 44]
vui_hrd_parameters_present_flag: Annotated[uint8_t, 48]
hrd_parameters: Annotated[H265RawHRDParameters, 52]
bitstream_restriction_flag: Annotated[uint8_t, 7724]
tiles_fixed_structure_flag: Annotated[uint8_t, 7725]
motion_vectors_over_pic_boundaries_flag: Annotated[uint8_t, 7726]
restricted_ref_pic_lists_flag: Annotated[uint8_t, 7727]
min_spatial_segmentation_idc: Annotated[uint16_t, 7728]
max_bytes_per_pic_denom: Annotated[uint8_t, 7730]
max_bits_per_min_cu_denom: Annotated[uint8_t, 7731]
log2_max_mv_length_horizontal: Annotated[uint8_t, 7732]
log2_max_mv_length_vertical: Annotated[uint8_t, 7733]
H265RawVUI: TypeAlias = struct_H265RawVUI
class struct_H265RawExtensionData(ctypes.Structure): pass
H265RawExtensionData: TypeAlias = struct_H265RawExtensionData
class struct_H265RawVPS(ctypes.Structure): pass
H265RawVPS: TypeAlias = struct_H265RawVPS
@c.record
class struct_H265RawSTRefPicSet(c.Struct):
SIZE = 136
inter_ref_pic_set_prediction_flag: Annotated[uint8_t, 0]
delta_idx_minus1: Annotated[uint8_t, 1]
delta_rps_sign: Annotated[uint8_t, 2]
abs_delta_rps_minus1: Annotated[uint16_t, 4]
used_by_curr_pic_flag: Annotated[c.Array[uint8_t, Literal[16]], 6]
use_delta_flag: Annotated[c.Array[uint8_t, Literal[16]], 22]
num_negative_pics: Annotated[uint8_t, 38]
num_positive_pics: Annotated[uint8_t, 39]
delta_poc_s0_minus1: Annotated[c.Array[uint16_t, Literal[16]], 40]
used_by_curr_pic_s0_flag: Annotated[c.Array[uint8_t, Literal[16]], 72]
delta_poc_s1_minus1: Annotated[c.Array[uint16_t, Literal[16]], 88]
used_by_curr_pic_s1_flag: Annotated[c.Array[uint8_t, Literal[16]], 120]
H265RawSTRefPicSet: TypeAlias = struct_H265RawSTRefPicSet
@c.record
class struct_H265RawScalingList(c.Struct):
SIZE = 1632
scaling_list_pred_mode_flag: Annotated[c.Array[c.Array[uint8_t, Literal[6]], Literal[4]], 0]
scaling_list_pred_matrix_id_delta: Annotated[c.Array[c.Array[uint8_t, Literal[6]], Literal[4]], 24]
scaling_list_dc_coef_minus8: Annotated[c.Array[c.Array[int16_t, Literal[6]], Literal[4]], 48]
scaling_list_delta_coeff: Annotated[c.Array[c.Array[c.Array[int8_t, Literal[64]], Literal[6]], Literal[4]], 96]
int16_t: TypeAlias = Annotated[int, ctypes.c_int16]
int8_t: TypeAlias = Annotated[int, ctypes.c_byte]
H265RawScalingList: TypeAlias = struct_H265RawScalingList
class struct_H265RawSPS(ctypes.Structure): pass
H265RawSPS: TypeAlias = struct_H265RawSPS
class struct_H265RawPPS(ctypes.Structure): pass
H265RawPPS: TypeAlias = struct_H265RawPPS
@c.record
class struct_H265RawAUD(c.Struct):
SIZE = 4
nal_unit_header: Annotated[H265RawNALUnitHeader, 0]
pic_type: Annotated[uint8_t, 3]
H265RawAUD: TypeAlias = struct_H265RawAUD
@c.record
class struct_H265RawSliceHeader(c.Struct):
SIZE = 11772
nal_unit_header: Annotated[H265RawNALUnitHeader, 0]
first_slice_segment_in_pic_flag: Annotated[uint8_t, 3]
no_output_of_prior_pics_flag: Annotated[uint8_t, 4]
slice_pic_parameter_set_id: Annotated[uint8_t, 5]
dependent_slice_segment_flag: Annotated[uint8_t, 6]
slice_segment_address: Annotated[uint16_t, 8]
slice_reserved_flag: Annotated[c.Array[uint8_t, Literal[8]], 10]
slice_type: Annotated[uint8_t, 18]
pic_output_flag: Annotated[uint8_t, 19]
colour_plane_id: Annotated[uint8_t, 20]
slice_pic_order_cnt_lsb: Annotated[uint16_t, 22]
short_term_ref_pic_set_sps_flag: Annotated[uint8_t, 24]
short_term_ref_pic_set: Annotated[H265RawSTRefPicSet, 26]
short_term_ref_pic_set_idx: Annotated[uint8_t, 162]
num_long_term_sps: Annotated[uint8_t, 163]
num_long_term_pics: Annotated[uint8_t, 164]
lt_idx_sps: Annotated[c.Array[uint8_t, Literal[16]], 165]
poc_lsb_lt: Annotated[c.Array[uint8_t, Literal[16]], 181]
used_by_curr_pic_lt_flag: Annotated[c.Array[uint8_t, Literal[16]], 197]
delta_poc_msb_present_flag: Annotated[c.Array[uint8_t, Literal[16]], 213]
delta_poc_msb_cycle_lt: Annotated[c.Array[uint32_t, Literal[16]], 232]
slice_temporal_mvp_enabled_flag: Annotated[uint8_t, 296]
slice_sao_luma_flag: Annotated[uint8_t, 297]
slice_sao_chroma_flag: Annotated[uint8_t, 298]
num_ref_idx_active_override_flag: Annotated[uint8_t, 299]
num_ref_idx_l0_active_minus1: Annotated[uint8_t, 300]
num_ref_idx_l1_active_minus1: Annotated[uint8_t, 301]
ref_pic_list_modification_flag_l0: Annotated[uint8_t, 302]
list_entry_l0: Annotated[c.Array[uint8_t, Literal[16]], 303]
ref_pic_list_modification_flag_l1: Annotated[uint8_t, 319]
list_entry_l1: Annotated[c.Array[uint8_t, Literal[16]], 320]
mvd_l1_zero_flag: Annotated[uint8_t, 336]
cabac_init_flag: Annotated[uint8_t, 337]
collocated_from_l0_flag: Annotated[uint8_t, 338]
collocated_ref_idx: Annotated[uint8_t, 339]
luma_log2_weight_denom: Annotated[uint8_t, 340]
delta_chroma_log2_weight_denom: Annotated[int8_t, 341]
luma_weight_l0_flag: Annotated[c.Array[uint8_t, Literal[16]], 342]
chroma_weight_l0_flag: Annotated[c.Array[uint8_t, Literal[16]], 358]
delta_luma_weight_l0: Annotated[c.Array[int8_t, Literal[16]], 374]
luma_offset_l0: Annotated[c.Array[int16_t, Literal[16]], 390]
delta_chroma_weight_l0: Annotated[c.Array[c.Array[int8_t, Literal[2]], Literal[16]], 422]
chroma_offset_l0: Annotated[c.Array[c.Array[int16_t, Literal[2]], Literal[16]], 454]
luma_weight_l1_flag: Annotated[c.Array[uint8_t, Literal[16]], 518]
chroma_weight_l1_flag: Annotated[c.Array[uint8_t, Literal[16]], 534]
delta_luma_weight_l1: Annotated[c.Array[int8_t, Literal[16]], 550]
luma_offset_l1: Annotated[c.Array[int16_t, Literal[16]], 566]
delta_chroma_weight_l1: Annotated[c.Array[c.Array[int8_t, Literal[2]], Literal[16]], 598]
chroma_offset_l1: Annotated[c.Array[c.Array[int16_t, Literal[2]], Literal[16]], 630]
five_minus_max_num_merge_cand: Annotated[uint8_t, 694]
use_integer_mv_flag: Annotated[uint8_t, 695]
slice_qp_delta: Annotated[int8_t, 696]
slice_cb_qp_offset: Annotated[int8_t, 697]
slice_cr_qp_offset: Annotated[int8_t, 698]
slice_act_y_qp_offset: Annotated[int8_t, 699]
slice_act_cb_qp_offset: Annotated[int8_t, 700]
slice_act_cr_qp_offset: Annotated[int8_t, 701]
cu_chroma_qp_offset_enabled_flag: Annotated[uint8_t, 702]
deblocking_filter_override_flag: Annotated[uint8_t, 703]
slice_deblocking_filter_disabled_flag: Annotated[uint8_t, 704]
slice_beta_offset_div2: Annotated[int8_t, 705]
slice_tc_offset_div2: Annotated[int8_t, 706]
slice_loop_filter_across_slices_enabled_flag: Annotated[uint8_t, 707]
num_entry_point_offsets: Annotated[uint16_t, 708]
offset_len_minus1: Annotated[uint8_t, 710]
entry_point_offset_minus1: Annotated[c.Array[uint32_t, Literal[2700]], 712]
slice_segment_header_extension_length: Annotated[uint16_t, 11512]
slice_segment_header_extension_data_byte: Annotated[c.Array[uint8_t, Literal[256]], 11514]
H265RawSliceHeader: TypeAlias = struct_H265RawSliceHeader
class struct_H265RawSlice(ctypes.Structure): pass
H265RawSlice: TypeAlias = struct_H265RawSlice
@c.record
class struct_H265RawSEIBufferingPeriod(c.Struct):
SIZE = 1048
bp_seq_parameter_set_id: Annotated[uint8_t, 0]
irap_cpb_params_present_flag: Annotated[uint8_t, 1]
cpb_delay_offset: Annotated[uint32_t, 4]
dpb_delay_offset: Annotated[uint32_t, 8]
concatenation_flag: Annotated[uint8_t, 12]
au_cpb_removal_delay_delta_minus1: Annotated[uint32_t, 16]
nal_initial_cpb_removal_delay: Annotated[c.Array[uint32_t, Literal[32]], 20]
nal_initial_cpb_removal_offset: Annotated[c.Array[uint32_t, Literal[32]], 148]
nal_initial_alt_cpb_removal_delay: Annotated[c.Array[uint32_t, Literal[32]], 276]
nal_initial_alt_cpb_removal_offset: Annotated[c.Array[uint32_t, Literal[32]], 404]
vcl_initial_cpb_removal_delay: Annotated[c.Array[uint32_t, Literal[32]], 532]
vcl_initial_cpb_removal_offset: Annotated[c.Array[uint32_t, Literal[32]], 660]
vcl_initial_alt_cpb_removal_delay: Annotated[c.Array[uint32_t, Literal[32]], 788]
vcl_initial_alt_cpb_removal_offset: Annotated[c.Array[uint32_t, Literal[32]], 916]
use_alt_cpb_params_flag: Annotated[uint8_t, 1044]
H265RawSEIBufferingPeriod: TypeAlias = struct_H265RawSEIBufferingPeriod
@c.record
class struct_H265RawSEIPicTiming(c.Struct):
SIZE = 3624
pic_struct: Annotated[uint8_t, 0]
source_scan_type: Annotated[uint8_t, 1]
duplicate_flag: Annotated[uint8_t, 2]
au_cpb_removal_delay_minus1: Annotated[uint32_t, 4]
pic_dpb_output_delay: Annotated[uint32_t, 8]
pic_dpb_output_du_delay: Annotated[uint32_t, 12]
num_decoding_units_minus1: Annotated[uint16_t, 16]
du_common_cpb_removal_delay_flag: Annotated[uint8_t, 18]
du_common_cpb_removal_delay_increment_minus1: Annotated[uint32_t, 20]
num_nalus_in_du_minus1: Annotated[c.Array[uint16_t, Literal[600]], 24]
du_cpb_removal_delay_increment_minus1: Annotated[c.Array[uint32_t, Literal[600]], 1224]
H265RawSEIPicTiming: TypeAlias = struct_H265RawSEIPicTiming
@c.record
class struct_H265RawSEIPanScanRect(c.Struct):
SIZE = 60
pan_scan_rect_id: Annotated[uint32_t, 0]
pan_scan_rect_cancel_flag: Annotated[uint8_t, 4]
pan_scan_cnt_minus1: Annotated[uint8_t, 5]
pan_scan_rect_left_offset: Annotated[c.Array[int32_t, Literal[3]], 8]
pan_scan_rect_right_offset: Annotated[c.Array[int32_t, Literal[3]], 20]
pan_scan_rect_top_offset: Annotated[c.Array[int32_t, Literal[3]], 32]
pan_scan_rect_bottom_offset: Annotated[c.Array[int32_t, Literal[3]], 44]
pan_scan_rect_persistence_flag: Annotated[uint16_t, 56]
int32_t: TypeAlias = Annotated[int, ctypes.c_int32]
H265RawSEIPanScanRect: TypeAlias = struct_H265RawSEIPanScanRect
@c.record
class struct_H265RawSEIRecoveryPoint(c.Struct):
SIZE = 4
recovery_poc_cnt: Annotated[int16_t, 0]
exact_match_flag: Annotated[uint8_t, 2]
broken_link_flag: Annotated[uint8_t, 3]
H265RawSEIRecoveryPoint: TypeAlias = struct_H265RawSEIRecoveryPoint
@c.record
class struct_H265RawFilmGrainCharacteristics(c.Struct):
SIZE = 10774
film_grain_characteristics_cancel_flag: Annotated[uint8_t, 0]
film_grain_model_id: Annotated[uint8_t, 1]
separate_colour_description_present_flag: Annotated[uint8_t, 2]
film_grain_bit_depth_luma_minus8: Annotated[uint8_t, 3]
film_grain_bit_depth_chroma_minus8: Annotated[uint8_t, 4]
film_grain_full_range_flag: Annotated[uint8_t, 5]
film_grain_colour_primaries: Annotated[uint8_t, 6]
film_grain_transfer_characteristics: Annotated[uint8_t, 7]
film_grain_matrix_coeffs: Annotated[uint8_t, 8]
blending_mode_id: Annotated[uint8_t, 9]
log2_scale_factor: Annotated[uint8_t, 10]
comp_model_present_flag: Annotated[c.Array[uint8_t, Literal[3]], 11]
num_intensity_intervals_minus1: Annotated[c.Array[uint8_t, Literal[3]], 14]
num_model_values_minus1: Annotated[c.Array[uint8_t, Literal[3]], 17]
intensity_interval_lower_bound: Annotated[c.Array[c.Array[uint8_t, Literal[256]], Literal[3]], 20]
intensity_interval_upper_bound: Annotated[c.Array[c.Array[uint8_t, Literal[256]], Literal[3]], 788]
comp_model_value: Annotated[c.Array[c.Array[c.Array[int16_t, Literal[6]], Literal[256]], Literal[3]], 1556]
film_grain_characteristics_persistence_flag: Annotated[uint8_t, 10772]
H265RawFilmGrainCharacteristics: TypeAlias = struct_H265RawFilmGrainCharacteristics
@c.record
class struct_H265RawSEIDisplayOrientation(c.Struct):
SIZE = 10
display_orientation_cancel_flag: Annotated[uint8_t, 0]
hor_flip: Annotated[uint8_t, 1]
ver_flip: Annotated[uint8_t, 2]
anticlockwise_rotation: Annotated[uint16_t, 4]
display_orientation_repetition_period: Annotated[uint16_t, 6]
display_orientation_persistence_flag: Annotated[uint8_t, 8]
H265RawSEIDisplayOrientation: TypeAlias = struct_H265RawSEIDisplayOrientation
@c.record
class struct_H265RawSEIActiveParameterSets(c.Struct):
SIZE = 83
active_video_parameter_set_id: Annotated[uint8_t, 0]
self_contained_cvs_flag: Annotated[uint8_t, 1]
no_parameter_set_update_flag: Annotated[uint8_t, 2]
num_sps_ids_minus1: Annotated[uint8_t, 3]
active_seq_parameter_set_id: Annotated[c.Array[uint8_t, Literal[16]], 4]
layer_sps_idx: Annotated[c.Array[uint8_t, Literal[63]], 20]
H265RawSEIActiveParameterSets: TypeAlias = struct_H265RawSEIActiveParameterSets
@c.record
class struct_H265RawSEIDecodedPictureHash(c.Struct):
SIZE = 68
hash_type: Annotated[uint8_t, 0]
picture_md5: Annotated[c.Array[c.Array[uint8_t, Literal[16]], Literal[3]], 1]
picture_crc: Annotated[c.Array[uint16_t, Literal[3]], 50]
picture_checksum: Annotated[c.Array[uint32_t, Literal[3]], 56]
H265RawSEIDecodedPictureHash: TypeAlias = struct_H265RawSEIDecodedPictureHash
@c.record
class struct_H265RawSEITimeCode(c.Struct):
SIZE = 60
num_clock_ts: Annotated[uint8_t, 0]
clock_timestamp_flag: Annotated[c.Array[uint8_t, Literal[3]], 1]
units_field_based_flag: Annotated[c.Array[uint8_t, Literal[3]], 4]
counting_type: Annotated[c.Array[uint8_t, Literal[3]], 7]
full_timestamp_flag: Annotated[c.Array[uint8_t, Literal[3]], 10]
discontinuity_flag: Annotated[c.Array[uint8_t, Literal[3]], 13]
cnt_dropped_flag: Annotated[c.Array[uint8_t, Literal[3]], 16]
n_frames: Annotated[c.Array[uint16_t, Literal[3]], 20]
seconds_value: Annotated[c.Array[uint8_t, Literal[3]], 26]
minutes_value: Annotated[c.Array[uint8_t, Literal[3]], 29]
hours_value: Annotated[c.Array[uint8_t, Literal[3]], 32]
seconds_flag: Annotated[c.Array[uint8_t, Literal[3]], 35]
minutes_flag: Annotated[c.Array[uint8_t, Literal[3]], 38]
hours_flag: Annotated[c.Array[uint8_t, Literal[3]], 41]
time_offset_length: Annotated[c.Array[uint8_t, Literal[3]], 44]
time_offset_value: Annotated[c.Array[int32_t, Literal[3]], 48]
H265RawSEITimeCode: TypeAlias = struct_H265RawSEITimeCode
@c.record
class struct_H265RawSEIAlphaChannelInfo(c.Struct):
SIZE = 12
alpha_channel_cancel_flag: Annotated[uint8_t, 0]
alpha_channel_use_idc: Annotated[uint8_t, 1]
alpha_channel_bit_depth_minus8: Annotated[uint8_t, 2]
alpha_transparent_value: Annotated[uint16_t, 4]
alpha_opaque_value: Annotated[uint16_t, 6]
alpha_channel_incr_flag: Annotated[uint8_t, 8]
alpha_channel_clip_flag: Annotated[uint8_t, 9]
alpha_channel_clip_type_flag: Annotated[uint8_t, 10]
H265RawSEIAlphaChannelInfo: TypeAlias = struct_H265RawSEIAlphaChannelInfo
@c.record
class struct_H265RawSEI3DReferenceDisplaysInfo(c.Struct):
SIZE = 358
prec_ref_display_width: Annotated[uint8_t, 0]
ref_viewing_distance_flag: Annotated[uint8_t, 1]
prec_ref_viewing_dist: Annotated[uint8_t, 2]
num_ref_displays_minus1: Annotated[uint8_t, 3]
left_view_id: Annotated[c.Array[uint16_t, Literal[32]], 4]
right_view_id: Annotated[c.Array[uint16_t, Literal[32]], 68]
exponent_ref_display_width: Annotated[c.Array[uint8_t, Literal[32]], 132]
mantissa_ref_display_width: Annotated[c.Array[uint8_t, Literal[32]], 164]
exponent_ref_viewing_distance: Annotated[c.Array[uint8_t, Literal[32]], 196]
mantissa_ref_viewing_distance: Annotated[c.Array[uint8_t, Literal[32]], 228]
additional_shift_present_flag: Annotated[c.Array[uint8_t, Literal[32]], 260]
num_sample_shift_plus512: Annotated[c.Array[uint16_t, Literal[32]], 292]
three_dimensional_reference_displays_extension_flag: Annotated[uint8_t, 356]
H265RawSEI3DReferenceDisplaysInfo: TypeAlias = struct_H265RawSEI3DReferenceDisplaysInfo
@c.record
class struct_H265RawSEI(c.Struct):
SIZE = 24
nal_unit_header: Annotated[H265RawNALUnitHeader, 0]
message_list: Annotated[SEIRawMessageList, 8]
@c.record
class struct_SEIRawMessageList(c.Struct):
SIZE = 16
messages: Annotated[c.POINTER[SEIRawMessage], 0]
nb_messages: Annotated[Annotated[int, ctypes.c_int32], 8]
nb_messages_allocated: Annotated[Annotated[int, ctypes.c_int32], 12]
SEIRawMessageList: TypeAlias = struct_SEIRawMessageList
@c.record
class struct_SEIRawMessage(c.Struct):
SIZE = 40
payload_type: Annotated[uint32_t, 0]
payload_size: Annotated[uint32_t, 4]
payload: Annotated[ctypes.c_void_p, 8]
payload_ref: Annotated[ctypes.c_void_p, 16]
extension_data: Annotated[c.POINTER[uint8_t], 24]
extension_bit_length: Annotated[size_t, 32]
SEIRawMessage: TypeAlias = struct_SEIRawMessage
size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
H265RawSEI: TypeAlias = struct_H265RawSEI
@c.record
class struct_H265RawFiller(c.Struct):
SIZE = 8
nal_unit_header: Annotated[H265RawNALUnitHeader, 0]
filler_size: Annotated[uint32_t, 4]
H265RawFiller: TypeAlias = struct_H265RawFiller
class struct_CodedBitstreamH265Context(ctypes.Structure): pass
CodedBitstreamH265Context: TypeAlias = struct_CodedBitstreamH265Context
c.init_records()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/avcodec.py",
"license": "MIT License",
"lines": 541,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:test/external/fuzz_symbolic_symbolic_div.py | import random, sys
import z3
from tinygrad.uop.ops import UOp, Ops
from tinygrad.uop.validate import uops_to_z3
from tinygrad.helpers import DEBUG, Context, colored
seed = int(sys.argv[1]) if len(sys.argv) > 1 else random.randint(0, 100)
print(f"Seed: {seed}", flush=True)
random.seed(seed)
def get_random_term(ranges, factors):
# 10% chance of nesting
if random.randint(0,9) == 0: return get_random_expr(ranges, factors)
return random.choice(ranges)*random.choice(factors)*random.choice([1, 1, 1, -1])
def get_random_expr(ranges, factors):
num_terms = random.randint(2,4)
x = UOp.sum(*[get_random_term(ranges, factors) for _ in range(num_terms)])
return x.alu(random.choice([Ops.IDIV, Ops.MOD]), x.ufix(random.choice(factors)*random.choice([1, 1, 1, -1])))
if __name__ == "__main__":
skipped = 0
for i in range(700):
if i % 100 == 0:
print(f"Running test {i}")
upper_bounds = [*list(range(1, 4)), 16, 33, 53, 64, 256]
variable_names = ["i", "j", "k"]
variables = [UOp.variable(s, 1, random.choice(upper_bounds)) for s in variable_names]
factors = variables+upper_bounds
# add some products
for _ in range(2): factors.append(random.choice(variables)*random.choice(variables))
# add some adds
for _ in range(2): factors.append(random.choice(variables)+random.choice(factors))
num_ranges = 4
ranges = [UOp.range(random.choice(factors), i) for i in range(num_ranges)]
variable_names += [f"r{i}" for i in range(num_ranges)]
expr = get_random_expr(ranges, factors)
with Context(CORRECT_DIVMOD_FOLDING=1):
simplified_expr = expr.simplify()
if DEBUG>=1:
print(expr.render(simplify=False), " --> ", simplified_expr.render(simplify=False))
solver = z3.Solver()
solver.set(timeout=3000) # some expressions take very long verify, but its very unlikely they actually return sat
z3_expr, z3_simplified_expr, *z3_vars = uops_to_z3(solver, expr, simplified_expr, *variables, *ranges)
check = solver.check(z3_simplified_expr != z3_expr)
if check == z3.unknown and DEBUG>=1:
skipped += 1
print("skipped z3 verification due to timeout")
elif check == z3.sat:
print(colored("simplify INCORRECT!", "red"))
print(solver.model())
var_vals = {s:solver.model()[z] for s,z in zip(variable_names, z3_vars)}
print("reproduce with:")
print("var_vals = ", var_vals)
print("globals = var_vals|{'cdiv':cdiv,'cmod':cmod}")
print("expr = ast.simplify()")
print("assert eval(ast.render(pm=renderer_infer, simplify=False),globals) == eval(expr.render(pm=renderer_infer, simplify=False),globals)")
print()
assert False
if DEBUG >= 2: print(f"validated {expr.render()}")
print(f"Skipped {skipped} expressions due to timeout")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/fuzz_symbolic_symbolic_div.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:test/external/external_benchmark_pyrender.py | # benchmark speed of pyrender for all created UOps saved with TRACK_MATCH_STATS=2
import functools, pickle
from tinygrad.uop.ops import UOp, Ops
from tinygrad.helpers import tqdm, temp, time_to_str, cpu_profile
BENCHMARK_OPS = {Ops.INDEX, Ops.BUFFERIZE}
@functools.cache
def create_uop(a:int) -> UOp:
op, dtype, src, arg, *rest = trace.uop_fields[a]
return UOp(op, dtype, tuple(create_uop(s) for s in src), arg, *rest)
if __name__ == "__main__":
# load rewrite trace
with open(temp("rewrites.pkl", append_user=True), "rb") as f:
trace = pickle.load(f)
# benchmark
result:list[tuple[str, int]] = []
try:
for steps in tqdm(trace.rewrites):
for r in steps:
for _,yn,_,__ in r.matches:
y = create_uop(yn)
if y.op in BENCHMARK_OPS:
with cpu_profile("pyrender") as e:
try: ren = y.render()
except Exception: ren = "PYRENDER_ERR"
result.append((ren, float(e.en-e.st)/1e6))
finally:
N = 10
print(f"Slowst {N} renders from {len(result)} samples:")
for ren,tm in sorted(result, key=lambda x:x[1], reverse=True)[:N]:
print(f"{time_to_str(tm).strip():<10s} {ren}")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_benchmark_pyrender.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:extra/torch_backend/test_kernel_fusion.py | # simple tests
import unittest
import torch
from tinygrad.helpers import getenv, GlobalCounters
if getenv("TINY_BACKEND2"):
import extra.torch_backend.backend2
device = "cpu"
else:
import extra.torch_backend.backend
device = "tiny"
class TestKernelFusionRegression(unittest.TestCase):
def _realize(self, t): _ = t.detach().cpu().numpy()
def _check_kernel_count(self, fn, expected_kernels):
torch.manual_seed(42)
GlobalCounters.reset()
fn().detach().cpu().numpy()
self.assertEqual(GlobalCounters.kernel_count, expected_kernels)
def test_elementwise_fusion(self):
def fn():
x = torch.randn(128, 128, device=device)
return (x + 1.0) * 2.0 - 0.5
self._check_kernel_count(fn, 5)
def test_relu_fusion(self):
def fn():
x = torch.randn(1, 3, 32, 32, device=device)
conv = torch.nn.Conv2d(3, 16, 3, padding=1).to(device)
with torch.no_grad():
return torch.nn.functional.relu(conv(x))
self._check_kernel_count(fn, 6)
def test_batchnorm_fusion(self):
def fn():
x = torch.randn(2, 3, 16, 16, device=device)
conv = torch.nn.Conv2d(3, 8, 3, padding=1).to(device)
bn = torch.nn.BatchNorm2d(8).to(device)
bn.eval()
with torch.no_grad():
return torch.nn.functional.relu(bn(conv(x)))
self._check_kernel_count(fn, 10)
def test_reduce_fusion(self):
def fn():
x = torch.randn(64, 64, device=device)
return (x * 2.0).sum()
self._check_kernel_count(fn, 5)
def test_matmul_elementwise_fusion(self):
def fn():
x = torch.randn(32, 32, device=device)
w = torch.randn(32, 32, device=device)
return torch.nn.functional.relu(x @ w + 1.0)
self._check_kernel_count(fn, 7)
def test_pooling_fusion(self):
def fn():
x = torch.randn(1, 8, 16, 16, device=device)
return torch.nn.functional.max_pool2d(x * 2.0, 2)
self._check_kernel_count(fn, 5)
def test_residual_add_relu_fusion(self):
def fn():
x = torch.randn(1, 8, 16, 16, device=device)
identity = torch.randn(1, 8, 16, 16, device=device)
out = x + identity
return torch.nn.functional.relu(out)
self._check_kernel_count(fn, 7)
def test_inplace_add_relu_fusion(self):
def fn():
x = torch.randn(1, 16, 32, 32, device=device)
y = torch.randn(1, 16, 32, 32, device=device)
x += y
return torch.nn.functional.relu(x)
self._check_kernel_count(fn, 7)
def test_conv_bn_add_relu_fusion(self):
def fn():
x = torch.randn(1, 8, 16, 16, device=device)
identity = torch.randn(1, 8, 16, 16, device=device)
conv = torch.nn.Conv2d(8, 8, 3, padding=1, bias=False).to(device)
bn = torch.nn.BatchNorm2d(8).to(device)
bn.eval()
with torch.no_grad():
out = bn(conv(x))
out += identity
return torch.nn.functional.relu(out)
self._check_kernel_count(fn, 12)
def test_multiple_inplace_ops_fusion(self):
def fn():
x = torch.randn(64, 64, device=device)
x += 1.0
x *= 2.0
return torch.nn.functional.relu(x)
self._check_kernel_count(fn, 4)
def test_view_inplace_no_fusion_break(self):
def fn():
x = torch.randn(4, 64, device=device)
view = x[1:3]
view += 1.0
return x.sum()
self._check_kernel_count(fn, 8)
def test_batchnorm_running_stats_update(self):
def fn():
x = torch.randn(2, 8, 8, 8, device=device)
bn = torch.nn.BatchNorm2d(8).to(device)
bn.train()
with torch.no_grad():
return bn(x)
self._check_kernel_count(fn, 8)
# this is a minimal extra/other_mnist/beautiful_mnist_torch.py to cover fusion for training with optimizer
def test_mnist_training_fusion(self):
def fn():
model = torch.nn.Sequential(
torch.nn.Conv2d(1, 8, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2),
torch.nn.Flatten(),
torch.nn.Linear(8*14*14, 10)
).to(device)
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
x = torch.randn(32, 1, 28, 28, device=device)
labels = torch.randint(0, 10, (32,), device=device)
out = model(x)
loss = torch.nn.functional.cross_entropy(out, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss
self._check_kernel_count(fn, 24)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/torch_backend/test_kernel_fusion.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/uop/divandmod.py | import functools
from tinygrad.uop.ops import PatternMatcher, UPat, Ops, UOp
from tinygrad.dtype import dtypes
from tinygrad.helpers import cdiv, cmod, CORRECT_DIVMOD_FOLDING, unwrap
# NOTE: this cache is only on index UOps
@functools.cache
def fold_divmod_general(d: UOp, correct_divmod_folding: bool) -> UOp|None:
x, y = d.src
# cancel_divmod: simple cancel div/mod case when the range of the numerator lies within a single denominator interval
x_min, x_max, y_min, y_max = x.vmin, x.vmax, y.vmin, y.vmax
assert isinstance(x_min, int) and isinstance(x_max, int) and isinstance(y_min, int) and isinstance(y_max, int)
if y_min==y_max==0: raise ZeroDivisionError(f"{'Division' if d.op is Ops.IDIV else 'Mod'} by zero trying to rewrite {x.alu(d.op, y)}")
if y_min*y_max > 0 and (q:=cdiv(x_min,y_min)) == cdiv(x_min,y_max) == cdiv(x_max,y_min) == cdiv(x_max,y_max):
return x - q*y if d.op is Ops.MOD else d.const_like(q)
# split uops for the rest of the processing
x_peeled, const = x.pop_const()
uops_no_const = list(x_peeled.split_uop(Ops.ADD))
# ** Constant Denominator Rules **
# these rules strictly require y to be a scalar constant > 0
if y.op is Ops.CONST and (c := y.arg) > 0:
# remove_nested_mod: remove nested mod in case the inner mod is a multiple of the outer mod, example: (a%4 + b)%2 -> (a+b)%2
if d.op is Ops.MOD and x.vmin >= 0:
new_xs, changed = [], False
for u in uops_no_const:
if u.op is Ops.MOD and u.src[1].divides(c) is not None:
u = u.src[0]
changed = True
new_xs.append(u)
if changed and (new_x:=(UOp.sum(*new_xs) + const)).vmin >= 0: return new_x % y
# Shared decomposition for folding rules
decomp = [(u.divides(f:=u.const_factor()),f) for u in uops_no_const]
terms, factors = zip(*decomp)
# fold_binary_numerator: fold if expression has one non-constant term that takes on two values
if len(terms)==1 and (v:=terms[0]).vmax-v.vmin == 1:
y1 = cmod(factors[0]*v.vmin+const, c) if d.op is Ops.MOD else cdiv(factors[0]*v.vmin+const, c)
y2 = cmod(factors[0]*v.vmax+const, c) if d.op is Ops.MOD else cdiv(factors[0]*v.vmax+const, c)
return (y2-y1)*(v-v.vmin) + y1
# fold_divmod_congruence: fold if a is congruent to an expression whose range is between 0 and c
if not (x.vmin<0 and correct_divmod_folding):
rems = [min((r:=f%c), r-c, key=abs) for f in factors]
if (rem:=sum(r*v for r,v in zip(rems,terms))+const%c).vmin//c==rem.vmax//c:
if d.op is Ops.MOD: return rem - rem.vmin//c*c
return sum((f-r)//c * v for f,r,v in zip(factors,rems,terms)) + (const-const%c+rem.vmin//c*c)//c
# gcd_with_remainder: factor out common gcd from numerator
# Note: this rule uses uops_no_const to exclude the additive constant from the GCD calculation
if x.vmin >= 0:
gcd = UOp.gcd(*uops_no_const, y).simplify()
if gcd.op is Ops.CONST and gcd.arg > 1:
new_x = unwrap(x_peeled.divide_exact(gcd)).simplify() + (const%c)//gcd.arg
if new_x.vmin >= 0:
ret = new_x.alu(d.op, x.ufix(c//gcd.arg))
return ret*gcd + const%gcd.arg if d.op is Ops.MOD else ret+const//c
# nest_div_by_smallest_factor: try and nest the div and see if it allows the numerator to be simplified
if d.op is Ops.IDIV and x.vmin >= 0:
div = min([c] + [abs(f) for u, f in zip(uops_no_const, factors) if u.op not in (Ops.CONST, Ops.VCONST) and abs(f) > 1 and (c%f)==0])
# NOTE: this is recursive!
if div < c and (newxs := fold_divmod_general(x//div, correct_divmod_folding)) is not None and newxs.vmin >= 0:
return newxs // (c // div)
# ** Variable Denominator / Fallback Rules **
# These rules apply to variables OR constants that failed the checks above.
# Reconstruct all uops including const for these checks.
all_uops = uops_no_const + ([x.const_like(const)] if const != 0 else [])
# divide_by_gcd: x//y -> (x//gcd)//(y//gcd)
gcd = UOp.gcd(*all_uops, y).simplify()
if not (gcd.op is Ops.CONST and gcd.arg==1):
ret = unwrap(x.divide_exact(gcd)).alu(d.op, unwrap(y.divide_exact(gcd)))
return ret*gcd if d.op is Ops.MOD else ret
# factor_remainder: (d*x+y)//d -> x+y//d
if y.vmin<0 or x.vmin<0: return None
quo, rem = [], []
for u in all_uops:
if (q:=u.divide_exact(y)) is not None: quo.append(q)
elif d.op is Ops.MOD and y.op is Ops.CONST and (c:=u.const_factor())%y.arg!=c:
rem.append(u.divides(c)*(c%y.arg))
quo.append(u.const_like(0))
else: rem.append(u)
if not quo: return None
new_x = sum(rem)+x.const_like(0)
if new_x.vmin<0: return None
return new_x%y if d.op is Ops.MOD else new_x//y+sum(quo)
div_and_mod_symbolic = PatternMatcher([
# ** 1. Fast Inline Rules **
((UPat.var("x")//UPat.cvar("c") + UPat.cvar("a"))//UPat.cvar("d"), lambda x,c,a,d: (x+a*c)//(c*d)
if c.vmin>0 and d.vmin>0 and ((x.vmin>=0 and a.vmin>=0) or (x.vmax<=0 and a.vmax<=0)) else None), # (x//c+a)//d -> (x+a*c)//(c*d)
(UPat.var("x", dtypes.index) // UPat.var("d"), lambda x,d: -(x//(-d)) if d.vmax < 0 else None),
(UPat.var("x", dtypes.index) // UPat.var("d"), lambda x,d: -((-x)//d) if x.vmax <= 0 else None),
((UPat.var("x", dtypes.index)+UPat.cvar("c", vec=False)).named("n")//UPat.cvar("d", vec=False),
lambda x,c,n,d: ((x+c.arg%d.arg)//d + c.arg//d.arg) if c.arg%d.arg!=c.arg and x.vmin>=0 and n.vmin>=0 and d.arg>0 else None),
((UPat.var("x", dtypes.index)+UPat.cvar("c", vec=False)).named("n")//UPat.cvar("d", vec=False),
lambda x,c,n,d: (-(-(c.arg%d.arg + x - (d.arg-1))//d) + c.arg//d.arg) if x.vmax<=0 and n.vmin>=0 and d.arg>0 else None),
# ** 2. Slow Rules **
(UPat((Ops.IDIV, Ops.MOD), dtypes.index, name="d"), lambda d: fold_divmod_general(d, bool(CORRECT_DIVMOD_FOLDING))),
# NOTE: these have to go at the bottom or TestSymbolicOps.test_var loops
(UPat.var("x", dtypes.index) % UPat.var("d"), lambda x,d: -((-x)%d) if x.vmax <= 0 else None),
(UPat.var("x", dtypes.index) % UPat.var("d"), lambda x,d: (x%(-d)) if d.vmax < 0 else None),
]) | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/uop/divandmod.py",
"license": "MIT License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/autogen/libclang.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
from tinygrad.helpers import OSX
dll = c.DLL('libclang', '/opt/homebrew/opt/llvm@20/lib/libclang.dylib' if OSX else ['clang-20', 'clang'])
CXIndex: TypeAlias = ctypes.c_void_p
class struct_CXTargetInfoImpl(ctypes.Structure): pass
CXTargetInfo: TypeAlias = c.POINTER[struct_CXTargetInfoImpl]
class struct_CXTranslationUnitImpl(ctypes.Structure): pass
CXTranslationUnit: TypeAlias = c.POINTER[struct_CXTranslationUnitImpl]
CXClientData: TypeAlias = ctypes.c_void_p
@c.record
class struct_CXUnsavedFile(c.Struct):
SIZE = 24
Filename: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 0]
Contents: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
Length: Annotated[Annotated[int, ctypes.c_uint64], 16]
class enum_CXAvailabilityKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXAvailability_Available = enum_CXAvailabilityKind.define('CXAvailability_Available', 0)
CXAvailability_Deprecated = enum_CXAvailabilityKind.define('CXAvailability_Deprecated', 1)
CXAvailability_NotAvailable = enum_CXAvailabilityKind.define('CXAvailability_NotAvailable', 2)
CXAvailability_NotAccessible = enum_CXAvailabilityKind.define('CXAvailability_NotAccessible', 3)
@c.record
class struct_CXVersion(c.Struct):
SIZE = 12
Major: Annotated[Annotated[int, ctypes.c_int32], 0]
Minor: Annotated[Annotated[int, ctypes.c_int32], 4]
Subminor: Annotated[Annotated[int, ctypes.c_int32], 8]
CXVersion: TypeAlias = struct_CXVersion
class enum_CXCursor_ExceptionSpecificationKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXCursor_ExceptionSpecificationKind_None = enum_CXCursor_ExceptionSpecificationKind.define('CXCursor_ExceptionSpecificationKind_None', 0)
CXCursor_ExceptionSpecificationKind_DynamicNone = enum_CXCursor_ExceptionSpecificationKind.define('CXCursor_ExceptionSpecificationKind_DynamicNone', 1)
CXCursor_ExceptionSpecificationKind_Dynamic = enum_CXCursor_ExceptionSpecificationKind.define('CXCursor_ExceptionSpecificationKind_Dynamic', 2)
CXCursor_ExceptionSpecificationKind_MSAny = enum_CXCursor_ExceptionSpecificationKind.define('CXCursor_ExceptionSpecificationKind_MSAny', 3)
CXCursor_ExceptionSpecificationKind_BasicNoexcept = enum_CXCursor_ExceptionSpecificationKind.define('CXCursor_ExceptionSpecificationKind_BasicNoexcept', 4)
CXCursor_ExceptionSpecificationKind_ComputedNoexcept = enum_CXCursor_ExceptionSpecificationKind.define('CXCursor_ExceptionSpecificationKind_ComputedNoexcept', 5)
CXCursor_ExceptionSpecificationKind_Unevaluated = enum_CXCursor_ExceptionSpecificationKind.define('CXCursor_ExceptionSpecificationKind_Unevaluated', 6)
CXCursor_ExceptionSpecificationKind_Uninstantiated = enum_CXCursor_ExceptionSpecificationKind.define('CXCursor_ExceptionSpecificationKind_Uninstantiated', 7)
CXCursor_ExceptionSpecificationKind_Unparsed = enum_CXCursor_ExceptionSpecificationKind.define('CXCursor_ExceptionSpecificationKind_Unparsed', 8)
CXCursor_ExceptionSpecificationKind_NoThrow = enum_CXCursor_ExceptionSpecificationKind.define('CXCursor_ExceptionSpecificationKind_NoThrow', 9)
@dll.bind
def clang_createIndex(excludeDeclarationsFromPCH:Annotated[int, ctypes.c_int32], displayDiagnostics:Annotated[int, ctypes.c_int32]) -> CXIndex: ...
@dll.bind
def clang_disposeIndex(index:CXIndex) -> None: ...
class CXChoice(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXChoice_Default = CXChoice.define('CXChoice_Default', 0)
CXChoice_Enabled = CXChoice.define('CXChoice_Enabled', 1)
CXChoice_Disabled = CXChoice.define('CXChoice_Disabled', 2)
class CXGlobalOptFlags(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXGlobalOpt_None = CXGlobalOptFlags.define('CXGlobalOpt_None', 0)
CXGlobalOpt_ThreadBackgroundPriorityForIndexing = CXGlobalOptFlags.define('CXGlobalOpt_ThreadBackgroundPriorityForIndexing', 1)
CXGlobalOpt_ThreadBackgroundPriorityForEditing = CXGlobalOptFlags.define('CXGlobalOpt_ThreadBackgroundPriorityForEditing', 2)
CXGlobalOpt_ThreadBackgroundPriorityForAll = CXGlobalOptFlags.define('CXGlobalOpt_ThreadBackgroundPriorityForAll', 3)
@c.record
class struct_CXIndexOptions(c.Struct):
SIZE = 24
Size: Annotated[Annotated[int, ctypes.c_uint32], 0]
ThreadBackgroundPriorityForIndexing: Annotated[Annotated[int, ctypes.c_ubyte], 4]
ThreadBackgroundPriorityForEditing: Annotated[Annotated[int, ctypes.c_ubyte], 5]
ExcludeDeclarationsFromPCH: Annotated[Annotated[int, ctypes.c_uint32], 6, 1, 0]
DisplayDiagnostics: Annotated[Annotated[int, ctypes.c_uint32], 6, 1, 1]
StorePreamblesInMemory: Annotated[Annotated[int, ctypes.c_uint32], 6, 1, 2]
PreambleStoragePath: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 8]
InvocationEmissionPath: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
CXIndexOptions: TypeAlias = struct_CXIndexOptions
@dll.bind
def clang_createIndexWithOptions(options:c.POINTER[CXIndexOptions]) -> CXIndex: ...
@dll.bind
def clang_CXIndex_setGlobalOptions(_0:CXIndex, options:Annotated[int, ctypes.c_uint32]) -> None: ...
@dll.bind
def clang_CXIndex_getGlobalOptions(_0:CXIndex) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXIndex_setInvocationEmissionPathOption(_0:CXIndex, Path:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> None: ...
CXFile: TypeAlias = ctypes.c_void_p
@dll.bind
def clang_isFileMultipleIncludeGuarded(tu:CXTranslationUnit, file:CXFile) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getFile(tu:CXTranslationUnit, file_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CXFile: ...
size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def clang_getFileContents(tu:CXTranslationUnit, file:CXFile, size:c.POINTER[size_t]) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@c.record
class CXSourceLocation(c.Struct):
SIZE = 24
ptr_data: Annotated[c.Array[ctypes.c_void_p, Literal[2]], 0]
int_data: Annotated[Annotated[int, ctypes.c_uint32], 16]
@dll.bind
def clang_getLocation(tu:CXTranslationUnit, file:CXFile, line:Annotated[int, ctypes.c_uint32], column:Annotated[int, ctypes.c_uint32]) -> CXSourceLocation: ...
@dll.bind
def clang_getLocationForOffset(tu:CXTranslationUnit, file:CXFile, offset:Annotated[int, ctypes.c_uint32]) -> CXSourceLocation: ...
@c.record
class CXSourceRangeList(c.Struct):
SIZE = 16
count: Annotated[Annotated[int, ctypes.c_uint32], 0]
ranges: Annotated[c.POINTER[CXSourceRange], 8]
@c.record
class CXSourceRange(c.Struct):
SIZE = 24
ptr_data: Annotated[c.Array[ctypes.c_void_p, Literal[2]], 0]
begin_int_data: Annotated[Annotated[int, ctypes.c_uint32], 16]
end_int_data: Annotated[Annotated[int, ctypes.c_uint32], 20]
@dll.bind
def clang_getSkippedRanges(tu:CXTranslationUnit, file:CXFile) -> c.POINTER[CXSourceRangeList]: ...
@dll.bind
def clang_getAllSkippedRanges(tu:CXTranslationUnit) -> c.POINTER[CXSourceRangeList]: ...
@dll.bind
def clang_getNumDiagnostics(Unit:CXTranslationUnit) -> Annotated[int, ctypes.c_uint32]: ...
CXDiagnostic: TypeAlias = ctypes.c_void_p
@dll.bind
def clang_getDiagnostic(Unit:CXTranslationUnit, Index:Annotated[int, ctypes.c_uint32]) -> CXDiagnostic: ...
CXDiagnosticSet: TypeAlias = ctypes.c_void_p
@dll.bind
def clang_getDiagnosticSetFromTU(Unit:CXTranslationUnit) -> CXDiagnosticSet: ...
@c.record
class CXString(c.Struct):
SIZE = 16
data: Annotated[ctypes.c_void_p, 0]
private_flags: Annotated[Annotated[int, ctypes.c_uint32], 8]
@dll.bind
def clang_getTranslationUnitSpelling(CTUnit:CXTranslationUnit) -> CXString: ...
@dll.bind
def clang_createTranslationUnitFromSourceFile(CIdx:CXIndex, source_filename:c.POINTER[Annotated[bytes, ctypes.c_char]], num_clang_command_line_args:Annotated[int, ctypes.c_int32], clang_command_line_args:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], num_unsaved_files:Annotated[int, ctypes.c_uint32], unsaved_files:c.POINTER[struct_CXUnsavedFile]) -> CXTranslationUnit: ...
@dll.bind
def clang_createTranslationUnit(CIdx:CXIndex, ast_filename:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CXTranslationUnit: ...
class enum_CXErrorCode(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXError_Success = enum_CXErrorCode.define('CXError_Success', 0)
CXError_Failure = enum_CXErrorCode.define('CXError_Failure', 1)
CXError_Crashed = enum_CXErrorCode.define('CXError_Crashed', 2)
CXError_InvalidArguments = enum_CXErrorCode.define('CXError_InvalidArguments', 3)
CXError_ASTReadError = enum_CXErrorCode.define('CXError_ASTReadError', 4)
@dll.bind
def clang_createTranslationUnit2(CIdx:CXIndex, ast_filename:c.POINTER[Annotated[bytes, ctypes.c_char]], out_TU:c.POINTER[CXTranslationUnit]) -> enum_CXErrorCode: ...
class enum_CXTranslationUnit_Flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXTranslationUnit_None = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_None', 0)
CXTranslationUnit_DetailedPreprocessingRecord = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_DetailedPreprocessingRecord', 1)
CXTranslationUnit_Incomplete = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_Incomplete', 2)
CXTranslationUnit_PrecompiledPreamble = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_PrecompiledPreamble', 4)
CXTranslationUnit_CacheCompletionResults = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_CacheCompletionResults', 8)
CXTranslationUnit_ForSerialization = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_ForSerialization', 16)
CXTranslationUnit_CXXChainedPCH = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_CXXChainedPCH', 32)
CXTranslationUnit_SkipFunctionBodies = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_SkipFunctionBodies', 64)
CXTranslationUnit_IncludeBriefCommentsInCodeCompletion = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_IncludeBriefCommentsInCodeCompletion', 128)
CXTranslationUnit_CreatePreambleOnFirstParse = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_CreatePreambleOnFirstParse', 256)
CXTranslationUnit_KeepGoing = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_KeepGoing', 512)
CXTranslationUnit_SingleFileParse = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_SingleFileParse', 1024)
CXTranslationUnit_LimitSkipFunctionBodiesToPreamble = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_LimitSkipFunctionBodiesToPreamble', 2048)
CXTranslationUnit_IncludeAttributedTypes = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_IncludeAttributedTypes', 4096)
CXTranslationUnit_VisitImplicitAttributes = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_VisitImplicitAttributes', 8192)
CXTranslationUnit_IgnoreNonErrorsFromIncludedFiles = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_IgnoreNonErrorsFromIncludedFiles', 16384)
CXTranslationUnit_RetainExcludedConditionalBlocks = enum_CXTranslationUnit_Flags.define('CXTranslationUnit_RetainExcludedConditionalBlocks', 32768)
@dll.bind
def clang_defaultEditingTranslationUnitOptions() -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_parseTranslationUnit(CIdx:CXIndex, source_filename:c.POINTER[Annotated[bytes, ctypes.c_char]], command_line_args:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], num_command_line_args:Annotated[int, ctypes.c_int32], unsaved_files:c.POINTER[struct_CXUnsavedFile], num_unsaved_files:Annotated[int, ctypes.c_uint32], options:Annotated[int, ctypes.c_uint32]) -> CXTranslationUnit: ...
@dll.bind
def clang_parseTranslationUnit2(CIdx:CXIndex, source_filename:c.POINTER[Annotated[bytes, ctypes.c_char]], command_line_args:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], num_command_line_args:Annotated[int, ctypes.c_int32], unsaved_files:c.POINTER[struct_CXUnsavedFile], num_unsaved_files:Annotated[int, ctypes.c_uint32], options:Annotated[int, ctypes.c_uint32], out_TU:c.POINTER[CXTranslationUnit]) -> enum_CXErrorCode: ...
@dll.bind
def clang_parseTranslationUnit2FullArgv(CIdx:CXIndex, source_filename:c.POINTER[Annotated[bytes, ctypes.c_char]], command_line_args:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], num_command_line_args:Annotated[int, ctypes.c_int32], unsaved_files:c.POINTER[struct_CXUnsavedFile], num_unsaved_files:Annotated[int, ctypes.c_uint32], options:Annotated[int, ctypes.c_uint32], out_TU:c.POINTER[CXTranslationUnit]) -> enum_CXErrorCode: ...
class enum_CXSaveTranslationUnit_Flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXSaveTranslationUnit_None = enum_CXSaveTranslationUnit_Flags.define('CXSaveTranslationUnit_None', 0)
@dll.bind
def clang_defaultSaveOptions(TU:CXTranslationUnit) -> Annotated[int, ctypes.c_uint32]: ...
class enum_CXSaveError(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXSaveError_None = enum_CXSaveError.define('CXSaveError_None', 0)
CXSaveError_Unknown = enum_CXSaveError.define('CXSaveError_Unknown', 1)
CXSaveError_TranslationErrors = enum_CXSaveError.define('CXSaveError_TranslationErrors', 2)
CXSaveError_InvalidTU = enum_CXSaveError.define('CXSaveError_InvalidTU', 3)
@dll.bind
def clang_saveTranslationUnit(TU:CXTranslationUnit, FileName:c.POINTER[Annotated[bytes, ctypes.c_char]], options:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_suspendTranslationUnit(_0:CXTranslationUnit) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_disposeTranslationUnit(_0:CXTranslationUnit) -> None: ...
class enum_CXReparse_Flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXReparse_None = enum_CXReparse_Flags.define('CXReparse_None', 0)
@dll.bind
def clang_defaultReparseOptions(TU:CXTranslationUnit) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_reparseTranslationUnit(TU:CXTranslationUnit, num_unsaved_files:Annotated[int, ctypes.c_uint32], unsaved_files:c.POINTER[struct_CXUnsavedFile], options:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_int32]: ...
class enum_CXTUResourceUsageKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXTUResourceUsage_AST = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_AST', 1)
CXTUResourceUsage_Identifiers = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_Identifiers', 2)
CXTUResourceUsage_Selectors = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_Selectors', 3)
CXTUResourceUsage_GlobalCompletionResults = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_GlobalCompletionResults', 4)
CXTUResourceUsage_SourceManagerContentCache = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_SourceManagerContentCache', 5)
CXTUResourceUsage_AST_SideTables = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_AST_SideTables', 6)
CXTUResourceUsage_SourceManager_Membuffer_Malloc = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_SourceManager_Membuffer_Malloc', 7)
CXTUResourceUsage_SourceManager_Membuffer_MMap = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_SourceManager_Membuffer_MMap', 8)
CXTUResourceUsage_ExternalASTSource_Membuffer_Malloc = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_ExternalASTSource_Membuffer_Malloc', 9)
CXTUResourceUsage_ExternalASTSource_Membuffer_MMap = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_ExternalASTSource_Membuffer_MMap', 10)
CXTUResourceUsage_Preprocessor = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_Preprocessor', 11)
CXTUResourceUsage_PreprocessingRecord = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_PreprocessingRecord', 12)
CXTUResourceUsage_SourceManager_DataStructures = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_SourceManager_DataStructures', 13)
CXTUResourceUsage_Preprocessor_HeaderSearch = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_Preprocessor_HeaderSearch', 14)
CXTUResourceUsage_MEMORY_IN_BYTES_BEGIN = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_MEMORY_IN_BYTES_BEGIN', 1)
CXTUResourceUsage_MEMORY_IN_BYTES_END = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_MEMORY_IN_BYTES_END', 14)
CXTUResourceUsage_First = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_First', 1)
CXTUResourceUsage_Last = enum_CXTUResourceUsageKind.define('CXTUResourceUsage_Last', 14)
@dll.bind
def clang_getTUResourceUsageName(kind:enum_CXTUResourceUsageKind) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@c.record
class struct_CXTUResourceUsageEntry(c.Struct):
SIZE = 16
kind: Annotated[enum_CXTUResourceUsageKind, 0]
amount: Annotated[Annotated[int, ctypes.c_uint64], 8]
CXTUResourceUsageEntry: TypeAlias = struct_CXTUResourceUsageEntry
@c.record
class struct_CXTUResourceUsage(c.Struct):
SIZE = 24
data: Annotated[ctypes.c_void_p, 0]
numEntries: Annotated[Annotated[int, ctypes.c_uint32], 8]
entries: Annotated[c.POINTER[CXTUResourceUsageEntry], 16]
CXTUResourceUsage: TypeAlias = struct_CXTUResourceUsage
@dll.bind
def clang_getCXTUResourceUsage(TU:CXTranslationUnit) -> CXTUResourceUsage: ...
@dll.bind
def clang_disposeCXTUResourceUsage(usage:CXTUResourceUsage) -> None: ...
@dll.bind
def clang_getTranslationUnitTargetInfo(CTUnit:CXTranslationUnit) -> CXTargetInfo: ...
@dll.bind
def clang_TargetInfo_dispose(Info:CXTargetInfo) -> None: ...
@dll.bind
def clang_TargetInfo_getTriple(Info:CXTargetInfo) -> CXString: ...
@dll.bind
def clang_TargetInfo_getPointerWidth(Info:CXTargetInfo) -> Annotated[int, ctypes.c_int32]: ...
class enum_CXCursorKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXCursor_UnexposedDecl = enum_CXCursorKind.define('CXCursor_UnexposedDecl', 1)
CXCursor_StructDecl = enum_CXCursorKind.define('CXCursor_StructDecl', 2)
CXCursor_UnionDecl = enum_CXCursorKind.define('CXCursor_UnionDecl', 3)
CXCursor_ClassDecl = enum_CXCursorKind.define('CXCursor_ClassDecl', 4)
CXCursor_EnumDecl = enum_CXCursorKind.define('CXCursor_EnumDecl', 5)
CXCursor_FieldDecl = enum_CXCursorKind.define('CXCursor_FieldDecl', 6)
CXCursor_EnumConstantDecl = enum_CXCursorKind.define('CXCursor_EnumConstantDecl', 7)
CXCursor_FunctionDecl = enum_CXCursorKind.define('CXCursor_FunctionDecl', 8)
CXCursor_VarDecl = enum_CXCursorKind.define('CXCursor_VarDecl', 9)
CXCursor_ParmDecl = enum_CXCursorKind.define('CXCursor_ParmDecl', 10)
CXCursor_ObjCInterfaceDecl = enum_CXCursorKind.define('CXCursor_ObjCInterfaceDecl', 11)
CXCursor_ObjCCategoryDecl = enum_CXCursorKind.define('CXCursor_ObjCCategoryDecl', 12)
CXCursor_ObjCProtocolDecl = enum_CXCursorKind.define('CXCursor_ObjCProtocolDecl', 13)
CXCursor_ObjCPropertyDecl = enum_CXCursorKind.define('CXCursor_ObjCPropertyDecl', 14)
CXCursor_ObjCIvarDecl = enum_CXCursorKind.define('CXCursor_ObjCIvarDecl', 15)
CXCursor_ObjCInstanceMethodDecl = enum_CXCursorKind.define('CXCursor_ObjCInstanceMethodDecl', 16)
CXCursor_ObjCClassMethodDecl = enum_CXCursorKind.define('CXCursor_ObjCClassMethodDecl', 17)
CXCursor_ObjCImplementationDecl = enum_CXCursorKind.define('CXCursor_ObjCImplementationDecl', 18)
CXCursor_ObjCCategoryImplDecl = enum_CXCursorKind.define('CXCursor_ObjCCategoryImplDecl', 19)
CXCursor_TypedefDecl = enum_CXCursorKind.define('CXCursor_TypedefDecl', 20)
CXCursor_CXXMethod = enum_CXCursorKind.define('CXCursor_CXXMethod', 21)
CXCursor_Namespace = enum_CXCursorKind.define('CXCursor_Namespace', 22)
CXCursor_LinkageSpec = enum_CXCursorKind.define('CXCursor_LinkageSpec', 23)
CXCursor_Constructor = enum_CXCursorKind.define('CXCursor_Constructor', 24)
CXCursor_Destructor = enum_CXCursorKind.define('CXCursor_Destructor', 25)
CXCursor_ConversionFunction = enum_CXCursorKind.define('CXCursor_ConversionFunction', 26)
CXCursor_TemplateTypeParameter = enum_CXCursorKind.define('CXCursor_TemplateTypeParameter', 27)
CXCursor_NonTypeTemplateParameter = enum_CXCursorKind.define('CXCursor_NonTypeTemplateParameter', 28)
CXCursor_TemplateTemplateParameter = enum_CXCursorKind.define('CXCursor_TemplateTemplateParameter', 29)
CXCursor_FunctionTemplate = enum_CXCursorKind.define('CXCursor_FunctionTemplate', 30)
CXCursor_ClassTemplate = enum_CXCursorKind.define('CXCursor_ClassTemplate', 31)
CXCursor_ClassTemplatePartialSpecialization = enum_CXCursorKind.define('CXCursor_ClassTemplatePartialSpecialization', 32)
CXCursor_NamespaceAlias = enum_CXCursorKind.define('CXCursor_NamespaceAlias', 33)
CXCursor_UsingDirective = enum_CXCursorKind.define('CXCursor_UsingDirective', 34)
CXCursor_UsingDeclaration = enum_CXCursorKind.define('CXCursor_UsingDeclaration', 35)
CXCursor_TypeAliasDecl = enum_CXCursorKind.define('CXCursor_TypeAliasDecl', 36)
CXCursor_ObjCSynthesizeDecl = enum_CXCursorKind.define('CXCursor_ObjCSynthesizeDecl', 37)
CXCursor_ObjCDynamicDecl = enum_CXCursorKind.define('CXCursor_ObjCDynamicDecl', 38)
CXCursor_CXXAccessSpecifier = enum_CXCursorKind.define('CXCursor_CXXAccessSpecifier', 39)
CXCursor_FirstDecl = enum_CXCursorKind.define('CXCursor_FirstDecl', 1)
CXCursor_LastDecl = enum_CXCursorKind.define('CXCursor_LastDecl', 39)
CXCursor_FirstRef = enum_CXCursorKind.define('CXCursor_FirstRef', 40)
CXCursor_ObjCSuperClassRef = enum_CXCursorKind.define('CXCursor_ObjCSuperClassRef', 40)
CXCursor_ObjCProtocolRef = enum_CXCursorKind.define('CXCursor_ObjCProtocolRef', 41)
CXCursor_ObjCClassRef = enum_CXCursorKind.define('CXCursor_ObjCClassRef', 42)
CXCursor_TypeRef = enum_CXCursorKind.define('CXCursor_TypeRef', 43)
CXCursor_CXXBaseSpecifier = enum_CXCursorKind.define('CXCursor_CXXBaseSpecifier', 44)
CXCursor_TemplateRef = enum_CXCursorKind.define('CXCursor_TemplateRef', 45)
CXCursor_NamespaceRef = enum_CXCursorKind.define('CXCursor_NamespaceRef', 46)
CXCursor_MemberRef = enum_CXCursorKind.define('CXCursor_MemberRef', 47)
CXCursor_LabelRef = enum_CXCursorKind.define('CXCursor_LabelRef', 48)
CXCursor_OverloadedDeclRef = enum_CXCursorKind.define('CXCursor_OverloadedDeclRef', 49)
CXCursor_VariableRef = enum_CXCursorKind.define('CXCursor_VariableRef', 50)
CXCursor_LastRef = enum_CXCursorKind.define('CXCursor_LastRef', 50)
CXCursor_FirstInvalid = enum_CXCursorKind.define('CXCursor_FirstInvalid', 70)
CXCursor_InvalidFile = enum_CXCursorKind.define('CXCursor_InvalidFile', 70)
CXCursor_NoDeclFound = enum_CXCursorKind.define('CXCursor_NoDeclFound', 71)
CXCursor_NotImplemented = enum_CXCursorKind.define('CXCursor_NotImplemented', 72)
CXCursor_InvalidCode = enum_CXCursorKind.define('CXCursor_InvalidCode', 73)
CXCursor_LastInvalid = enum_CXCursorKind.define('CXCursor_LastInvalid', 73)
CXCursor_FirstExpr = enum_CXCursorKind.define('CXCursor_FirstExpr', 100)
CXCursor_UnexposedExpr = enum_CXCursorKind.define('CXCursor_UnexposedExpr', 100)
CXCursor_DeclRefExpr = enum_CXCursorKind.define('CXCursor_DeclRefExpr', 101)
CXCursor_MemberRefExpr = enum_CXCursorKind.define('CXCursor_MemberRefExpr', 102)
CXCursor_CallExpr = enum_CXCursorKind.define('CXCursor_CallExpr', 103)
CXCursor_ObjCMessageExpr = enum_CXCursorKind.define('CXCursor_ObjCMessageExpr', 104)
CXCursor_BlockExpr = enum_CXCursorKind.define('CXCursor_BlockExpr', 105)
CXCursor_IntegerLiteral = enum_CXCursorKind.define('CXCursor_IntegerLiteral', 106)
CXCursor_FloatingLiteral = enum_CXCursorKind.define('CXCursor_FloatingLiteral', 107)
CXCursor_ImaginaryLiteral = enum_CXCursorKind.define('CXCursor_ImaginaryLiteral', 108)
CXCursor_StringLiteral = enum_CXCursorKind.define('CXCursor_StringLiteral', 109)
CXCursor_CharacterLiteral = enum_CXCursorKind.define('CXCursor_CharacterLiteral', 110)
CXCursor_ParenExpr = enum_CXCursorKind.define('CXCursor_ParenExpr', 111)
CXCursor_UnaryOperator = enum_CXCursorKind.define('CXCursor_UnaryOperator', 112)
CXCursor_ArraySubscriptExpr = enum_CXCursorKind.define('CXCursor_ArraySubscriptExpr', 113)
CXCursor_BinaryOperator = enum_CXCursorKind.define('CXCursor_BinaryOperator', 114)
CXCursor_CompoundAssignOperator = enum_CXCursorKind.define('CXCursor_CompoundAssignOperator', 115)
CXCursor_ConditionalOperator = enum_CXCursorKind.define('CXCursor_ConditionalOperator', 116)
CXCursor_CStyleCastExpr = enum_CXCursorKind.define('CXCursor_CStyleCastExpr', 117)
CXCursor_CompoundLiteralExpr = enum_CXCursorKind.define('CXCursor_CompoundLiteralExpr', 118)
CXCursor_InitListExpr = enum_CXCursorKind.define('CXCursor_InitListExpr', 119)
CXCursor_AddrLabelExpr = enum_CXCursorKind.define('CXCursor_AddrLabelExpr', 120)
CXCursor_StmtExpr = enum_CXCursorKind.define('CXCursor_StmtExpr', 121)
CXCursor_GenericSelectionExpr = enum_CXCursorKind.define('CXCursor_GenericSelectionExpr', 122)
CXCursor_GNUNullExpr = enum_CXCursorKind.define('CXCursor_GNUNullExpr', 123)
CXCursor_CXXStaticCastExpr = enum_CXCursorKind.define('CXCursor_CXXStaticCastExpr', 124)
CXCursor_CXXDynamicCastExpr = enum_CXCursorKind.define('CXCursor_CXXDynamicCastExpr', 125)
CXCursor_CXXReinterpretCastExpr = enum_CXCursorKind.define('CXCursor_CXXReinterpretCastExpr', 126)
CXCursor_CXXConstCastExpr = enum_CXCursorKind.define('CXCursor_CXXConstCastExpr', 127)
CXCursor_CXXFunctionalCastExpr = enum_CXCursorKind.define('CXCursor_CXXFunctionalCastExpr', 128)
CXCursor_CXXTypeidExpr = enum_CXCursorKind.define('CXCursor_CXXTypeidExpr', 129)
CXCursor_CXXBoolLiteralExpr = enum_CXCursorKind.define('CXCursor_CXXBoolLiteralExpr', 130)
CXCursor_CXXNullPtrLiteralExpr = enum_CXCursorKind.define('CXCursor_CXXNullPtrLiteralExpr', 131)
CXCursor_CXXThisExpr = enum_CXCursorKind.define('CXCursor_CXXThisExpr', 132)
CXCursor_CXXThrowExpr = enum_CXCursorKind.define('CXCursor_CXXThrowExpr', 133)
CXCursor_CXXNewExpr = enum_CXCursorKind.define('CXCursor_CXXNewExpr', 134)
CXCursor_CXXDeleteExpr = enum_CXCursorKind.define('CXCursor_CXXDeleteExpr', 135)
CXCursor_UnaryExpr = enum_CXCursorKind.define('CXCursor_UnaryExpr', 136)
CXCursor_ObjCStringLiteral = enum_CXCursorKind.define('CXCursor_ObjCStringLiteral', 137)
CXCursor_ObjCEncodeExpr = enum_CXCursorKind.define('CXCursor_ObjCEncodeExpr', 138)
CXCursor_ObjCSelectorExpr = enum_CXCursorKind.define('CXCursor_ObjCSelectorExpr', 139)
CXCursor_ObjCProtocolExpr = enum_CXCursorKind.define('CXCursor_ObjCProtocolExpr', 140)
CXCursor_ObjCBridgedCastExpr = enum_CXCursorKind.define('CXCursor_ObjCBridgedCastExpr', 141)
CXCursor_PackExpansionExpr = enum_CXCursorKind.define('CXCursor_PackExpansionExpr', 142)
CXCursor_SizeOfPackExpr = enum_CXCursorKind.define('CXCursor_SizeOfPackExpr', 143)
CXCursor_LambdaExpr = enum_CXCursorKind.define('CXCursor_LambdaExpr', 144)
CXCursor_ObjCBoolLiteralExpr = enum_CXCursorKind.define('CXCursor_ObjCBoolLiteralExpr', 145)
CXCursor_ObjCSelfExpr = enum_CXCursorKind.define('CXCursor_ObjCSelfExpr', 146)
CXCursor_ArraySectionExpr = enum_CXCursorKind.define('CXCursor_ArraySectionExpr', 147)
CXCursor_ObjCAvailabilityCheckExpr = enum_CXCursorKind.define('CXCursor_ObjCAvailabilityCheckExpr', 148)
CXCursor_FixedPointLiteral = enum_CXCursorKind.define('CXCursor_FixedPointLiteral', 149)
CXCursor_OMPArrayShapingExpr = enum_CXCursorKind.define('CXCursor_OMPArrayShapingExpr', 150)
CXCursor_OMPIteratorExpr = enum_CXCursorKind.define('CXCursor_OMPIteratorExpr', 151)
CXCursor_CXXAddrspaceCastExpr = enum_CXCursorKind.define('CXCursor_CXXAddrspaceCastExpr', 152)
CXCursor_ConceptSpecializationExpr = enum_CXCursorKind.define('CXCursor_ConceptSpecializationExpr', 153)
CXCursor_RequiresExpr = enum_CXCursorKind.define('CXCursor_RequiresExpr', 154)
CXCursor_CXXParenListInitExpr = enum_CXCursorKind.define('CXCursor_CXXParenListInitExpr', 155)
CXCursor_PackIndexingExpr = enum_CXCursorKind.define('CXCursor_PackIndexingExpr', 156)
CXCursor_LastExpr = enum_CXCursorKind.define('CXCursor_LastExpr', 156)
CXCursor_FirstStmt = enum_CXCursorKind.define('CXCursor_FirstStmt', 200)
CXCursor_UnexposedStmt = enum_CXCursorKind.define('CXCursor_UnexposedStmt', 200)
CXCursor_LabelStmt = enum_CXCursorKind.define('CXCursor_LabelStmt', 201)
CXCursor_CompoundStmt = enum_CXCursorKind.define('CXCursor_CompoundStmt', 202)
CXCursor_CaseStmt = enum_CXCursorKind.define('CXCursor_CaseStmt', 203)
CXCursor_DefaultStmt = enum_CXCursorKind.define('CXCursor_DefaultStmt', 204)
CXCursor_IfStmt = enum_CXCursorKind.define('CXCursor_IfStmt', 205)
CXCursor_SwitchStmt = enum_CXCursorKind.define('CXCursor_SwitchStmt', 206)
CXCursor_WhileStmt = enum_CXCursorKind.define('CXCursor_WhileStmt', 207)
CXCursor_DoStmt = enum_CXCursorKind.define('CXCursor_DoStmt', 208)
CXCursor_ForStmt = enum_CXCursorKind.define('CXCursor_ForStmt', 209)
CXCursor_GotoStmt = enum_CXCursorKind.define('CXCursor_GotoStmt', 210)
CXCursor_IndirectGotoStmt = enum_CXCursorKind.define('CXCursor_IndirectGotoStmt', 211)
CXCursor_ContinueStmt = enum_CXCursorKind.define('CXCursor_ContinueStmt', 212)
CXCursor_BreakStmt = enum_CXCursorKind.define('CXCursor_BreakStmt', 213)
CXCursor_ReturnStmt = enum_CXCursorKind.define('CXCursor_ReturnStmt', 214)
CXCursor_GCCAsmStmt = enum_CXCursorKind.define('CXCursor_GCCAsmStmt', 215)
CXCursor_AsmStmt = enum_CXCursorKind.define('CXCursor_AsmStmt', 215)
CXCursor_ObjCAtTryStmt = enum_CXCursorKind.define('CXCursor_ObjCAtTryStmt', 216)
CXCursor_ObjCAtCatchStmt = enum_CXCursorKind.define('CXCursor_ObjCAtCatchStmt', 217)
CXCursor_ObjCAtFinallyStmt = enum_CXCursorKind.define('CXCursor_ObjCAtFinallyStmt', 218)
CXCursor_ObjCAtThrowStmt = enum_CXCursorKind.define('CXCursor_ObjCAtThrowStmt', 219)
CXCursor_ObjCAtSynchronizedStmt = enum_CXCursorKind.define('CXCursor_ObjCAtSynchronizedStmt', 220)
CXCursor_ObjCAutoreleasePoolStmt = enum_CXCursorKind.define('CXCursor_ObjCAutoreleasePoolStmt', 221)
CXCursor_ObjCForCollectionStmt = enum_CXCursorKind.define('CXCursor_ObjCForCollectionStmt', 222)
CXCursor_CXXCatchStmt = enum_CXCursorKind.define('CXCursor_CXXCatchStmt', 223)
CXCursor_CXXTryStmt = enum_CXCursorKind.define('CXCursor_CXXTryStmt', 224)
CXCursor_CXXForRangeStmt = enum_CXCursorKind.define('CXCursor_CXXForRangeStmt', 225)
CXCursor_SEHTryStmt = enum_CXCursorKind.define('CXCursor_SEHTryStmt', 226)
CXCursor_SEHExceptStmt = enum_CXCursorKind.define('CXCursor_SEHExceptStmt', 227)
CXCursor_SEHFinallyStmt = enum_CXCursorKind.define('CXCursor_SEHFinallyStmt', 228)
CXCursor_MSAsmStmt = enum_CXCursorKind.define('CXCursor_MSAsmStmt', 229)
CXCursor_NullStmt = enum_CXCursorKind.define('CXCursor_NullStmt', 230)
CXCursor_DeclStmt = enum_CXCursorKind.define('CXCursor_DeclStmt', 231)
CXCursor_OMPParallelDirective = enum_CXCursorKind.define('CXCursor_OMPParallelDirective', 232)
CXCursor_OMPSimdDirective = enum_CXCursorKind.define('CXCursor_OMPSimdDirective', 233)
CXCursor_OMPForDirective = enum_CXCursorKind.define('CXCursor_OMPForDirective', 234)
CXCursor_OMPSectionsDirective = enum_CXCursorKind.define('CXCursor_OMPSectionsDirective', 235)
CXCursor_OMPSectionDirective = enum_CXCursorKind.define('CXCursor_OMPSectionDirective', 236)
CXCursor_OMPSingleDirective = enum_CXCursorKind.define('CXCursor_OMPSingleDirective', 237)
CXCursor_OMPParallelForDirective = enum_CXCursorKind.define('CXCursor_OMPParallelForDirective', 238)
CXCursor_OMPParallelSectionsDirective = enum_CXCursorKind.define('CXCursor_OMPParallelSectionsDirective', 239)
CXCursor_OMPTaskDirective = enum_CXCursorKind.define('CXCursor_OMPTaskDirective', 240)
CXCursor_OMPMasterDirective = enum_CXCursorKind.define('CXCursor_OMPMasterDirective', 241)
CXCursor_OMPCriticalDirective = enum_CXCursorKind.define('CXCursor_OMPCriticalDirective', 242)
CXCursor_OMPTaskyieldDirective = enum_CXCursorKind.define('CXCursor_OMPTaskyieldDirective', 243)
CXCursor_OMPBarrierDirective = enum_CXCursorKind.define('CXCursor_OMPBarrierDirective', 244)
CXCursor_OMPTaskwaitDirective = enum_CXCursorKind.define('CXCursor_OMPTaskwaitDirective', 245)
CXCursor_OMPFlushDirective = enum_CXCursorKind.define('CXCursor_OMPFlushDirective', 246)
CXCursor_SEHLeaveStmt = enum_CXCursorKind.define('CXCursor_SEHLeaveStmt', 247)
CXCursor_OMPOrderedDirective = enum_CXCursorKind.define('CXCursor_OMPOrderedDirective', 248)
CXCursor_OMPAtomicDirective = enum_CXCursorKind.define('CXCursor_OMPAtomicDirective', 249)
CXCursor_OMPForSimdDirective = enum_CXCursorKind.define('CXCursor_OMPForSimdDirective', 250)
CXCursor_OMPParallelForSimdDirective = enum_CXCursorKind.define('CXCursor_OMPParallelForSimdDirective', 251)
CXCursor_OMPTargetDirective = enum_CXCursorKind.define('CXCursor_OMPTargetDirective', 252)
CXCursor_OMPTeamsDirective = enum_CXCursorKind.define('CXCursor_OMPTeamsDirective', 253)
CXCursor_OMPTaskgroupDirective = enum_CXCursorKind.define('CXCursor_OMPTaskgroupDirective', 254)
CXCursor_OMPCancellationPointDirective = enum_CXCursorKind.define('CXCursor_OMPCancellationPointDirective', 255)
CXCursor_OMPCancelDirective = enum_CXCursorKind.define('CXCursor_OMPCancelDirective', 256)
CXCursor_OMPTargetDataDirective = enum_CXCursorKind.define('CXCursor_OMPTargetDataDirective', 257)
CXCursor_OMPTaskLoopDirective = enum_CXCursorKind.define('CXCursor_OMPTaskLoopDirective', 258)
CXCursor_OMPTaskLoopSimdDirective = enum_CXCursorKind.define('CXCursor_OMPTaskLoopSimdDirective', 259)
CXCursor_OMPDistributeDirective = enum_CXCursorKind.define('CXCursor_OMPDistributeDirective', 260)
CXCursor_OMPTargetEnterDataDirective = enum_CXCursorKind.define('CXCursor_OMPTargetEnterDataDirective', 261)
CXCursor_OMPTargetExitDataDirective = enum_CXCursorKind.define('CXCursor_OMPTargetExitDataDirective', 262)
CXCursor_OMPTargetParallelDirective = enum_CXCursorKind.define('CXCursor_OMPTargetParallelDirective', 263)
CXCursor_OMPTargetParallelForDirective = enum_CXCursorKind.define('CXCursor_OMPTargetParallelForDirective', 264)
CXCursor_OMPTargetUpdateDirective = enum_CXCursorKind.define('CXCursor_OMPTargetUpdateDirective', 265)
CXCursor_OMPDistributeParallelForDirective = enum_CXCursorKind.define('CXCursor_OMPDistributeParallelForDirective', 266)
CXCursor_OMPDistributeParallelForSimdDirective = enum_CXCursorKind.define('CXCursor_OMPDistributeParallelForSimdDirective', 267)
CXCursor_OMPDistributeSimdDirective = enum_CXCursorKind.define('CXCursor_OMPDistributeSimdDirective', 268)
CXCursor_OMPTargetParallelForSimdDirective = enum_CXCursorKind.define('CXCursor_OMPTargetParallelForSimdDirective', 269)
CXCursor_OMPTargetSimdDirective = enum_CXCursorKind.define('CXCursor_OMPTargetSimdDirective', 270)
CXCursor_OMPTeamsDistributeDirective = enum_CXCursorKind.define('CXCursor_OMPTeamsDistributeDirective', 271)
CXCursor_OMPTeamsDistributeSimdDirective = enum_CXCursorKind.define('CXCursor_OMPTeamsDistributeSimdDirective', 272)
CXCursor_OMPTeamsDistributeParallelForSimdDirective = enum_CXCursorKind.define('CXCursor_OMPTeamsDistributeParallelForSimdDirective', 273)
CXCursor_OMPTeamsDistributeParallelForDirective = enum_CXCursorKind.define('CXCursor_OMPTeamsDistributeParallelForDirective', 274)
CXCursor_OMPTargetTeamsDirective = enum_CXCursorKind.define('CXCursor_OMPTargetTeamsDirective', 275)
CXCursor_OMPTargetTeamsDistributeDirective = enum_CXCursorKind.define('CXCursor_OMPTargetTeamsDistributeDirective', 276)
CXCursor_OMPTargetTeamsDistributeParallelForDirective = enum_CXCursorKind.define('CXCursor_OMPTargetTeamsDistributeParallelForDirective', 277)
CXCursor_OMPTargetTeamsDistributeParallelForSimdDirective = enum_CXCursorKind.define('CXCursor_OMPTargetTeamsDistributeParallelForSimdDirective', 278)
CXCursor_OMPTargetTeamsDistributeSimdDirective = enum_CXCursorKind.define('CXCursor_OMPTargetTeamsDistributeSimdDirective', 279)
CXCursor_BuiltinBitCastExpr = enum_CXCursorKind.define('CXCursor_BuiltinBitCastExpr', 280)
CXCursor_OMPMasterTaskLoopDirective = enum_CXCursorKind.define('CXCursor_OMPMasterTaskLoopDirective', 281)
CXCursor_OMPParallelMasterTaskLoopDirective = enum_CXCursorKind.define('CXCursor_OMPParallelMasterTaskLoopDirective', 282)
CXCursor_OMPMasterTaskLoopSimdDirective = enum_CXCursorKind.define('CXCursor_OMPMasterTaskLoopSimdDirective', 283)
CXCursor_OMPParallelMasterTaskLoopSimdDirective = enum_CXCursorKind.define('CXCursor_OMPParallelMasterTaskLoopSimdDirective', 284)
CXCursor_OMPParallelMasterDirective = enum_CXCursorKind.define('CXCursor_OMPParallelMasterDirective', 285)
CXCursor_OMPDepobjDirective = enum_CXCursorKind.define('CXCursor_OMPDepobjDirective', 286)
CXCursor_OMPScanDirective = enum_CXCursorKind.define('CXCursor_OMPScanDirective', 287)
CXCursor_OMPTileDirective = enum_CXCursorKind.define('CXCursor_OMPTileDirective', 288)
CXCursor_OMPCanonicalLoop = enum_CXCursorKind.define('CXCursor_OMPCanonicalLoop', 289)
CXCursor_OMPInteropDirective = enum_CXCursorKind.define('CXCursor_OMPInteropDirective', 290)
CXCursor_OMPDispatchDirective = enum_CXCursorKind.define('CXCursor_OMPDispatchDirective', 291)
CXCursor_OMPMaskedDirective = enum_CXCursorKind.define('CXCursor_OMPMaskedDirective', 292)
CXCursor_OMPUnrollDirective = enum_CXCursorKind.define('CXCursor_OMPUnrollDirective', 293)
CXCursor_OMPMetaDirective = enum_CXCursorKind.define('CXCursor_OMPMetaDirective', 294)
CXCursor_OMPGenericLoopDirective = enum_CXCursorKind.define('CXCursor_OMPGenericLoopDirective', 295)
CXCursor_OMPTeamsGenericLoopDirective = enum_CXCursorKind.define('CXCursor_OMPTeamsGenericLoopDirective', 296)
CXCursor_OMPTargetTeamsGenericLoopDirective = enum_CXCursorKind.define('CXCursor_OMPTargetTeamsGenericLoopDirective', 297)
CXCursor_OMPParallelGenericLoopDirective = enum_CXCursorKind.define('CXCursor_OMPParallelGenericLoopDirective', 298)
CXCursor_OMPTargetParallelGenericLoopDirective = enum_CXCursorKind.define('CXCursor_OMPTargetParallelGenericLoopDirective', 299)
CXCursor_OMPParallelMaskedDirective = enum_CXCursorKind.define('CXCursor_OMPParallelMaskedDirective', 300)
CXCursor_OMPMaskedTaskLoopDirective = enum_CXCursorKind.define('CXCursor_OMPMaskedTaskLoopDirective', 301)
CXCursor_OMPMaskedTaskLoopSimdDirective = enum_CXCursorKind.define('CXCursor_OMPMaskedTaskLoopSimdDirective', 302)
CXCursor_OMPParallelMaskedTaskLoopDirective = enum_CXCursorKind.define('CXCursor_OMPParallelMaskedTaskLoopDirective', 303)
CXCursor_OMPParallelMaskedTaskLoopSimdDirective = enum_CXCursorKind.define('CXCursor_OMPParallelMaskedTaskLoopSimdDirective', 304)
CXCursor_OMPErrorDirective = enum_CXCursorKind.define('CXCursor_OMPErrorDirective', 305)
CXCursor_OMPScopeDirective = enum_CXCursorKind.define('CXCursor_OMPScopeDirective', 306)
CXCursor_OMPReverseDirective = enum_CXCursorKind.define('CXCursor_OMPReverseDirective', 307)
CXCursor_OMPInterchangeDirective = enum_CXCursorKind.define('CXCursor_OMPInterchangeDirective', 308)
CXCursor_OMPAssumeDirective = enum_CXCursorKind.define('CXCursor_OMPAssumeDirective', 309)
CXCursor_OpenACCComputeConstruct = enum_CXCursorKind.define('CXCursor_OpenACCComputeConstruct', 320)
CXCursor_OpenACCLoopConstruct = enum_CXCursorKind.define('CXCursor_OpenACCLoopConstruct', 321)
CXCursor_OpenACCCombinedConstruct = enum_CXCursorKind.define('CXCursor_OpenACCCombinedConstruct', 322)
CXCursor_OpenACCDataConstruct = enum_CXCursorKind.define('CXCursor_OpenACCDataConstruct', 323)
CXCursor_OpenACCEnterDataConstruct = enum_CXCursorKind.define('CXCursor_OpenACCEnterDataConstruct', 324)
CXCursor_OpenACCExitDataConstruct = enum_CXCursorKind.define('CXCursor_OpenACCExitDataConstruct', 325)
CXCursor_OpenACCHostDataConstruct = enum_CXCursorKind.define('CXCursor_OpenACCHostDataConstruct', 326)
CXCursor_OpenACCWaitConstruct = enum_CXCursorKind.define('CXCursor_OpenACCWaitConstruct', 327)
CXCursor_OpenACCInitConstruct = enum_CXCursorKind.define('CXCursor_OpenACCInitConstruct', 328)
CXCursor_OpenACCShutdownConstruct = enum_CXCursorKind.define('CXCursor_OpenACCShutdownConstruct', 329)
CXCursor_OpenACCSetConstruct = enum_CXCursorKind.define('CXCursor_OpenACCSetConstruct', 330)
CXCursor_OpenACCUpdateConstruct = enum_CXCursorKind.define('CXCursor_OpenACCUpdateConstruct', 331)
CXCursor_LastStmt = enum_CXCursorKind.define('CXCursor_LastStmt', 331)
CXCursor_TranslationUnit = enum_CXCursorKind.define('CXCursor_TranslationUnit', 350)
CXCursor_FirstAttr = enum_CXCursorKind.define('CXCursor_FirstAttr', 400)
CXCursor_UnexposedAttr = enum_CXCursorKind.define('CXCursor_UnexposedAttr', 400)
CXCursor_IBActionAttr = enum_CXCursorKind.define('CXCursor_IBActionAttr', 401)
CXCursor_IBOutletAttr = enum_CXCursorKind.define('CXCursor_IBOutletAttr', 402)
CXCursor_IBOutletCollectionAttr = enum_CXCursorKind.define('CXCursor_IBOutletCollectionAttr', 403)
CXCursor_CXXFinalAttr = enum_CXCursorKind.define('CXCursor_CXXFinalAttr', 404)
CXCursor_CXXOverrideAttr = enum_CXCursorKind.define('CXCursor_CXXOverrideAttr', 405)
CXCursor_AnnotateAttr = enum_CXCursorKind.define('CXCursor_AnnotateAttr', 406)
CXCursor_AsmLabelAttr = enum_CXCursorKind.define('CXCursor_AsmLabelAttr', 407)
CXCursor_PackedAttr = enum_CXCursorKind.define('CXCursor_PackedAttr', 408)
CXCursor_PureAttr = enum_CXCursorKind.define('CXCursor_PureAttr', 409)
CXCursor_ConstAttr = enum_CXCursorKind.define('CXCursor_ConstAttr', 410)
CXCursor_NoDuplicateAttr = enum_CXCursorKind.define('CXCursor_NoDuplicateAttr', 411)
CXCursor_CUDAConstantAttr = enum_CXCursorKind.define('CXCursor_CUDAConstantAttr', 412)
CXCursor_CUDADeviceAttr = enum_CXCursorKind.define('CXCursor_CUDADeviceAttr', 413)
CXCursor_CUDAGlobalAttr = enum_CXCursorKind.define('CXCursor_CUDAGlobalAttr', 414)
CXCursor_CUDAHostAttr = enum_CXCursorKind.define('CXCursor_CUDAHostAttr', 415)
CXCursor_CUDASharedAttr = enum_CXCursorKind.define('CXCursor_CUDASharedAttr', 416)
CXCursor_VisibilityAttr = enum_CXCursorKind.define('CXCursor_VisibilityAttr', 417)
CXCursor_DLLExport = enum_CXCursorKind.define('CXCursor_DLLExport', 418)
CXCursor_DLLImport = enum_CXCursorKind.define('CXCursor_DLLImport', 419)
CXCursor_NSReturnsRetained = enum_CXCursorKind.define('CXCursor_NSReturnsRetained', 420)
CXCursor_NSReturnsNotRetained = enum_CXCursorKind.define('CXCursor_NSReturnsNotRetained', 421)
CXCursor_NSReturnsAutoreleased = enum_CXCursorKind.define('CXCursor_NSReturnsAutoreleased', 422)
CXCursor_NSConsumesSelf = enum_CXCursorKind.define('CXCursor_NSConsumesSelf', 423)
CXCursor_NSConsumed = enum_CXCursorKind.define('CXCursor_NSConsumed', 424)
CXCursor_ObjCException = enum_CXCursorKind.define('CXCursor_ObjCException', 425)
CXCursor_ObjCNSObject = enum_CXCursorKind.define('CXCursor_ObjCNSObject', 426)
CXCursor_ObjCIndependentClass = enum_CXCursorKind.define('CXCursor_ObjCIndependentClass', 427)
CXCursor_ObjCPreciseLifetime = enum_CXCursorKind.define('CXCursor_ObjCPreciseLifetime', 428)
CXCursor_ObjCReturnsInnerPointer = enum_CXCursorKind.define('CXCursor_ObjCReturnsInnerPointer', 429)
CXCursor_ObjCRequiresSuper = enum_CXCursorKind.define('CXCursor_ObjCRequiresSuper', 430)
CXCursor_ObjCRootClass = enum_CXCursorKind.define('CXCursor_ObjCRootClass', 431)
CXCursor_ObjCSubclassingRestricted = enum_CXCursorKind.define('CXCursor_ObjCSubclassingRestricted', 432)
CXCursor_ObjCExplicitProtocolImpl = enum_CXCursorKind.define('CXCursor_ObjCExplicitProtocolImpl', 433)
CXCursor_ObjCDesignatedInitializer = enum_CXCursorKind.define('CXCursor_ObjCDesignatedInitializer', 434)
CXCursor_ObjCRuntimeVisible = enum_CXCursorKind.define('CXCursor_ObjCRuntimeVisible', 435)
CXCursor_ObjCBoxable = enum_CXCursorKind.define('CXCursor_ObjCBoxable', 436)
CXCursor_FlagEnum = enum_CXCursorKind.define('CXCursor_FlagEnum', 437)
CXCursor_ConvergentAttr = enum_CXCursorKind.define('CXCursor_ConvergentAttr', 438)
CXCursor_WarnUnusedAttr = enum_CXCursorKind.define('CXCursor_WarnUnusedAttr', 439)
CXCursor_WarnUnusedResultAttr = enum_CXCursorKind.define('CXCursor_WarnUnusedResultAttr', 440)
CXCursor_AlignedAttr = enum_CXCursorKind.define('CXCursor_AlignedAttr', 441)
CXCursor_LastAttr = enum_CXCursorKind.define('CXCursor_LastAttr', 441)
CXCursor_PreprocessingDirective = enum_CXCursorKind.define('CXCursor_PreprocessingDirective', 500)
CXCursor_MacroDefinition = enum_CXCursorKind.define('CXCursor_MacroDefinition', 501)
CXCursor_MacroExpansion = enum_CXCursorKind.define('CXCursor_MacroExpansion', 502)
CXCursor_MacroInstantiation = enum_CXCursorKind.define('CXCursor_MacroInstantiation', 502)
CXCursor_InclusionDirective = enum_CXCursorKind.define('CXCursor_InclusionDirective', 503)
CXCursor_FirstPreprocessing = enum_CXCursorKind.define('CXCursor_FirstPreprocessing', 500)
CXCursor_LastPreprocessing = enum_CXCursorKind.define('CXCursor_LastPreprocessing', 503)
CXCursor_ModuleImportDecl = enum_CXCursorKind.define('CXCursor_ModuleImportDecl', 600)
CXCursor_TypeAliasTemplateDecl = enum_CXCursorKind.define('CXCursor_TypeAliasTemplateDecl', 601)
CXCursor_StaticAssert = enum_CXCursorKind.define('CXCursor_StaticAssert', 602)
CXCursor_FriendDecl = enum_CXCursorKind.define('CXCursor_FriendDecl', 603)
CXCursor_ConceptDecl = enum_CXCursorKind.define('CXCursor_ConceptDecl', 604)
CXCursor_FirstExtraDecl = enum_CXCursorKind.define('CXCursor_FirstExtraDecl', 600)
CXCursor_LastExtraDecl = enum_CXCursorKind.define('CXCursor_LastExtraDecl', 604)
CXCursor_OverloadCandidate = enum_CXCursorKind.define('CXCursor_OverloadCandidate', 700)
@c.record
class CXCursor(c.Struct):
SIZE = 32
kind: Annotated[enum_CXCursorKind, 0]
xdata: Annotated[Annotated[int, ctypes.c_int32], 4]
data: Annotated[c.Array[ctypes.c_void_p, Literal[3]], 8]
@dll.bind
def clang_getNullCursor() -> CXCursor: ...
@dll.bind
def clang_getTranslationUnitCursor(_0:CXTranslationUnit) -> CXCursor: ...
@dll.bind
def clang_equalCursors(_0:CXCursor, _1:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_isNull(cursor:CXCursor) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_hashCursor(_0:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getCursorKind(_0:CXCursor) -> enum_CXCursorKind: ...
@dll.bind
def clang_isDeclaration(_0:enum_CXCursorKind) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isInvalidDeclaration(_0:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isReference(_0:enum_CXCursorKind) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isExpression(_0:enum_CXCursorKind) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isStatement(_0:enum_CXCursorKind) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isAttribute(_0:enum_CXCursorKind) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_hasAttrs(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isInvalid(_0:enum_CXCursorKind) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isTranslationUnit(_0:enum_CXCursorKind) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isPreprocessing(_0:enum_CXCursorKind) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isUnexposed(_0:enum_CXCursorKind) -> Annotated[int, ctypes.c_uint32]: ...
class enum_CXLinkageKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXLinkage_Invalid = enum_CXLinkageKind.define('CXLinkage_Invalid', 0)
CXLinkage_NoLinkage = enum_CXLinkageKind.define('CXLinkage_NoLinkage', 1)
CXLinkage_Internal = enum_CXLinkageKind.define('CXLinkage_Internal', 2)
CXLinkage_UniqueExternal = enum_CXLinkageKind.define('CXLinkage_UniqueExternal', 3)
CXLinkage_External = enum_CXLinkageKind.define('CXLinkage_External', 4)
@dll.bind
def clang_getCursorLinkage(cursor:CXCursor) -> enum_CXLinkageKind: ...
class enum_CXVisibilityKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXVisibility_Invalid = enum_CXVisibilityKind.define('CXVisibility_Invalid', 0)
CXVisibility_Hidden = enum_CXVisibilityKind.define('CXVisibility_Hidden', 1)
CXVisibility_Protected = enum_CXVisibilityKind.define('CXVisibility_Protected', 2)
CXVisibility_Default = enum_CXVisibilityKind.define('CXVisibility_Default', 3)
@dll.bind
def clang_getCursorVisibility(cursor:CXCursor) -> enum_CXVisibilityKind: ...
@dll.bind
def clang_getCursorAvailability(cursor:CXCursor) -> enum_CXAvailabilityKind: ...
@c.record
class struct_CXPlatformAvailability(c.Struct):
SIZE = 72
Platform: Annotated[CXString, 0]
Introduced: Annotated[CXVersion, 16]
Deprecated: Annotated[CXVersion, 28]
Obsoleted: Annotated[CXVersion, 40]
Unavailable: Annotated[Annotated[int, ctypes.c_int32], 52]
Message: Annotated[CXString, 56]
CXPlatformAvailability: TypeAlias = struct_CXPlatformAvailability
@dll.bind
def clang_getCursorPlatformAvailability(cursor:CXCursor, always_deprecated:c.POINTER[Annotated[int, ctypes.c_int32]], deprecated_message:c.POINTER[CXString], always_unavailable:c.POINTER[Annotated[int, ctypes.c_int32]], unavailable_message:c.POINTER[CXString], availability:c.POINTER[CXPlatformAvailability], availability_size:Annotated[int, ctypes.c_int32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_disposeCXPlatformAvailability(availability:c.POINTER[CXPlatformAvailability]) -> None: ...
@dll.bind
def clang_Cursor_getVarDeclInitializer(cursor:CXCursor) -> CXCursor: ...
@dll.bind
def clang_Cursor_hasVarDeclGlobalStorage(cursor:CXCursor) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_Cursor_hasVarDeclExternalStorage(cursor:CXCursor) -> Annotated[int, ctypes.c_int32]: ...
class enum_CXLanguageKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXLanguage_Invalid = enum_CXLanguageKind.define('CXLanguage_Invalid', 0)
CXLanguage_C = enum_CXLanguageKind.define('CXLanguage_C', 1)
CXLanguage_ObjC = enum_CXLanguageKind.define('CXLanguage_ObjC', 2)
CXLanguage_CPlusPlus = enum_CXLanguageKind.define('CXLanguage_CPlusPlus', 3)
@dll.bind
def clang_getCursorLanguage(cursor:CXCursor) -> enum_CXLanguageKind: ...
class enum_CXTLSKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXTLS_None = enum_CXTLSKind.define('CXTLS_None', 0)
CXTLS_Dynamic = enum_CXTLSKind.define('CXTLS_Dynamic', 1)
CXTLS_Static = enum_CXTLSKind.define('CXTLS_Static', 2)
@dll.bind
def clang_getCursorTLSKind(cursor:CXCursor) -> enum_CXTLSKind: ...
@dll.bind
def clang_Cursor_getTranslationUnit(_0:CXCursor) -> CXTranslationUnit: ...
class struct_CXCursorSetImpl(ctypes.Structure): pass
CXCursorSet: TypeAlias = c.POINTER[struct_CXCursorSetImpl]
@dll.bind
def clang_createCXCursorSet() -> CXCursorSet: ...
@dll.bind
def clang_disposeCXCursorSet(cset:CXCursorSet) -> None: ...
@dll.bind
def clang_CXCursorSet_contains(cset:CXCursorSet, cursor:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXCursorSet_insert(cset:CXCursorSet, cursor:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getCursorSemanticParent(cursor:CXCursor) -> CXCursor: ...
@dll.bind
def clang_getCursorLexicalParent(cursor:CXCursor) -> CXCursor: ...
@dll.bind
def clang_getOverriddenCursors(cursor:CXCursor, overridden:c.POINTER[c.POINTER[CXCursor]], num_overridden:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def clang_disposeOverriddenCursors(overridden:c.POINTER[CXCursor]) -> None: ...
@dll.bind
def clang_getIncludedFile(cursor:CXCursor) -> CXFile: ...
@dll.bind
def clang_getCursor(_0:CXTranslationUnit, _1:CXSourceLocation) -> CXCursor: ...
@dll.bind
def clang_getCursorLocation(_0:CXCursor) -> CXSourceLocation: ...
@dll.bind
def clang_getCursorExtent(_0:CXCursor) -> CXSourceRange: ...
class enum_CXTypeKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXType_Invalid = enum_CXTypeKind.define('CXType_Invalid', 0)
CXType_Unexposed = enum_CXTypeKind.define('CXType_Unexposed', 1)
CXType_Void = enum_CXTypeKind.define('CXType_Void', 2)
CXType_Bool = enum_CXTypeKind.define('CXType_Bool', 3)
CXType_Char_U = enum_CXTypeKind.define('CXType_Char_U', 4)
CXType_UChar = enum_CXTypeKind.define('CXType_UChar', 5)
CXType_Char16 = enum_CXTypeKind.define('CXType_Char16', 6)
CXType_Char32 = enum_CXTypeKind.define('CXType_Char32', 7)
CXType_UShort = enum_CXTypeKind.define('CXType_UShort', 8)
CXType_UInt = enum_CXTypeKind.define('CXType_UInt', 9)
CXType_ULong = enum_CXTypeKind.define('CXType_ULong', 10)
CXType_ULongLong = enum_CXTypeKind.define('CXType_ULongLong', 11)
CXType_UInt128 = enum_CXTypeKind.define('CXType_UInt128', 12)
CXType_Char_S = enum_CXTypeKind.define('CXType_Char_S', 13)
CXType_SChar = enum_CXTypeKind.define('CXType_SChar', 14)
CXType_WChar = enum_CXTypeKind.define('CXType_WChar', 15)
CXType_Short = enum_CXTypeKind.define('CXType_Short', 16)
CXType_Int = enum_CXTypeKind.define('CXType_Int', 17)
CXType_Long = enum_CXTypeKind.define('CXType_Long', 18)
CXType_LongLong = enum_CXTypeKind.define('CXType_LongLong', 19)
CXType_Int128 = enum_CXTypeKind.define('CXType_Int128', 20)
CXType_Float = enum_CXTypeKind.define('CXType_Float', 21)
CXType_Double = enum_CXTypeKind.define('CXType_Double', 22)
CXType_LongDouble = enum_CXTypeKind.define('CXType_LongDouble', 23)
CXType_NullPtr = enum_CXTypeKind.define('CXType_NullPtr', 24)
CXType_Overload = enum_CXTypeKind.define('CXType_Overload', 25)
CXType_Dependent = enum_CXTypeKind.define('CXType_Dependent', 26)
CXType_ObjCId = enum_CXTypeKind.define('CXType_ObjCId', 27)
CXType_ObjCClass = enum_CXTypeKind.define('CXType_ObjCClass', 28)
CXType_ObjCSel = enum_CXTypeKind.define('CXType_ObjCSel', 29)
CXType_Float128 = enum_CXTypeKind.define('CXType_Float128', 30)
CXType_Half = enum_CXTypeKind.define('CXType_Half', 31)
CXType_Float16 = enum_CXTypeKind.define('CXType_Float16', 32)
CXType_ShortAccum = enum_CXTypeKind.define('CXType_ShortAccum', 33)
CXType_Accum = enum_CXTypeKind.define('CXType_Accum', 34)
CXType_LongAccum = enum_CXTypeKind.define('CXType_LongAccum', 35)
CXType_UShortAccum = enum_CXTypeKind.define('CXType_UShortAccum', 36)
CXType_UAccum = enum_CXTypeKind.define('CXType_UAccum', 37)
CXType_ULongAccum = enum_CXTypeKind.define('CXType_ULongAccum', 38)
CXType_BFloat16 = enum_CXTypeKind.define('CXType_BFloat16', 39)
CXType_Ibm128 = enum_CXTypeKind.define('CXType_Ibm128', 40)
CXType_FirstBuiltin = enum_CXTypeKind.define('CXType_FirstBuiltin', 2)
CXType_LastBuiltin = enum_CXTypeKind.define('CXType_LastBuiltin', 40)
CXType_Complex = enum_CXTypeKind.define('CXType_Complex', 100)
CXType_Pointer = enum_CXTypeKind.define('CXType_Pointer', 101)
CXType_BlockPointer = enum_CXTypeKind.define('CXType_BlockPointer', 102)
CXType_LValueReference = enum_CXTypeKind.define('CXType_LValueReference', 103)
CXType_RValueReference = enum_CXTypeKind.define('CXType_RValueReference', 104)
CXType_Record = enum_CXTypeKind.define('CXType_Record', 105)
CXType_Enum = enum_CXTypeKind.define('CXType_Enum', 106)
CXType_Typedef = enum_CXTypeKind.define('CXType_Typedef', 107)
CXType_ObjCInterface = enum_CXTypeKind.define('CXType_ObjCInterface', 108)
CXType_ObjCObjectPointer = enum_CXTypeKind.define('CXType_ObjCObjectPointer', 109)
CXType_FunctionNoProto = enum_CXTypeKind.define('CXType_FunctionNoProto', 110)
CXType_FunctionProto = enum_CXTypeKind.define('CXType_FunctionProto', 111)
CXType_ConstantArray = enum_CXTypeKind.define('CXType_ConstantArray', 112)
CXType_Vector = enum_CXTypeKind.define('CXType_Vector', 113)
CXType_IncompleteArray = enum_CXTypeKind.define('CXType_IncompleteArray', 114)
CXType_VariableArray = enum_CXTypeKind.define('CXType_VariableArray', 115)
CXType_DependentSizedArray = enum_CXTypeKind.define('CXType_DependentSizedArray', 116)
CXType_MemberPointer = enum_CXTypeKind.define('CXType_MemberPointer', 117)
CXType_Auto = enum_CXTypeKind.define('CXType_Auto', 118)
CXType_Elaborated = enum_CXTypeKind.define('CXType_Elaborated', 119)
CXType_Pipe = enum_CXTypeKind.define('CXType_Pipe', 120)
CXType_OCLImage1dRO = enum_CXTypeKind.define('CXType_OCLImage1dRO', 121)
CXType_OCLImage1dArrayRO = enum_CXTypeKind.define('CXType_OCLImage1dArrayRO', 122)
CXType_OCLImage1dBufferRO = enum_CXTypeKind.define('CXType_OCLImage1dBufferRO', 123)
CXType_OCLImage2dRO = enum_CXTypeKind.define('CXType_OCLImage2dRO', 124)
CXType_OCLImage2dArrayRO = enum_CXTypeKind.define('CXType_OCLImage2dArrayRO', 125)
CXType_OCLImage2dDepthRO = enum_CXTypeKind.define('CXType_OCLImage2dDepthRO', 126)
CXType_OCLImage2dArrayDepthRO = enum_CXTypeKind.define('CXType_OCLImage2dArrayDepthRO', 127)
CXType_OCLImage2dMSAARO = enum_CXTypeKind.define('CXType_OCLImage2dMSAARO', 128)
CXType_OCLImage2dArrayMSAARO = enum_CXTypeKind.define('CXType_OCLImage2dArrayMSAARO', 129)
CXType_OCLImage2dMSAADepthRO = enum_CXTypeKind.define('CXType_OCLImage2dMSAADepthRO', 130)
CXType_OCLImage2dArrayMSAADepthRO = enum_CXTypeKind.define('CXType_OCLImage2dArrayMSAADepthRO', 131)
CXType_OCLImage3dRO = enum_CXTypeKind.define('CXType_OCLImage3dRO', 132)
CXType_OCLImage1dWO = enum_CXTypeKind.define('CXType_OCLImage1dWO', 133)
CXType_OCLImage1dArrayWO = enum_CXTypeKind.define('CXType_OCLImage1dArrayWO', 134)
CXType_OCLImage1dBufferWO = enum_CXTypeKind.define('CXType_OCLImage1dBufferWO', 135)
CXType_OCLImage2dWO = enum_CXTypeKind.define('CXType_OCLImage2dWO', 136)
CXType_OCLImage2dArrayWO = enum_CXTypeKind.define('CXType_OCLImage2dArrayWO', 137)
CXType_OCLImage2dDepthWO = enum_CXTypeKind.define('CXType_OCLImage2dDepthWO', 138)
CXType_OCLImage2dArrayDepthWO = enum_CXTypeKind.define('CXType_OCLImage2dArrayDepthWO', 139)
CXType_OCLImage2dMSAAWO = enum_CXTypeKind.define('CXType_OCLImage2dMSAAWO', 140)
CXType_OCLImage2dArrayMSAAWO = enum_CXTypeKind.define('CXType_OCLImage2dArrayMSAAWO', 141)
CXType_OCLImage2dMSAADepthWO = enum_CXTypeKind.define('CXType_OCLImage2dMSAADepthWO', 142)
CXType_OCLImage2dArrayMSAADepthWO = enum_CXTypeKind.define('CXType_OCLImage2dArrayMSAADepthWO', 143)
CXType_OCLImage3dWO = enum_CXTypeKind.define('CXType_OCLImage3dWO', 144)
CXType_OCLImage1dRW = enum_CXTypeKind.define('CXType_OCLImage1dRW', 145)
CXType_OCLImage1dArrayRW = enum_CXTypeKind.define('CXType_OCLImage1dArrayRW', 146)
CXType_OCLImage1dBufferRW = enum_CXTypeKind.define('CXType_OCLImage1dBufferRW', 147)
CXType_OCLImage2dRW = enum_CXTypeKind.define('CXType_OCLImage2dRW', 148)
CXType_OCLImage2dArrayRW = enum_CXTypeKind.define('CXType_OCLImage2dArrayRW', 149)
CXType_OCLImage2dDepthRW = enum_CXTypeKind.define('CXType_OCLImage2dDepthRW', 150)
CXType_OCLImage2dArrayDepthRW = enum_CXTypeKind.define('CXType_OCLImage2dArrayDepthRW', 151)
CXType_OCLImage2dMSAARW = enum_CXTypeKind.define('CXType_OCLImage2dMSAARW', 152)
CXType_OCLImage2dArrayMSAARW = enum_CXTypeKind.define('CXType_OCLImage2dArrayMSAARW', 153)
CXType_OCLImage2dMSAADepthRW = enum_CXTypeKind.define('CXType_OCLImage2dMSAADepthRW', 154)
CXType_OCLImage2dArrayMSAADepthRW = enum_CXTypeKind.define('CXType_OCLImage2dArrayMSAADepthRW', 155)
CXType_OCLImage3dRW = enum_CXTypeKind.define('CXType_OCLImage3dRW', 156)
CXType_OCLSampler = enum_CXTypeKind.define('CXType_OCLSampler', 157)
CXType_OCLEvent = enum_CXTypeKind.define('CXType_OCLEvent', 158)
CXType_OCLQueue = enum_CXTypeKind.define('CXType_OCLQueue', 159)
CXType_OCLReserveID = enum_CXTypeKind.define('CXType_OCLReserveID', 160)
CXType_ObjCObject = enum_CXTypeKind.define('CXType_ObjCObject', 161)
CXType_ObjCTypeParam = enum_CXTypeKind.define('CXType_ObjCTypeParam', 162)
CXType_Attributed = enum_CXTypeKind.define('CXType_Attributed', 163)
CXType_OCLIntelSubgroupAVCMcePayload = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCMcePayload', 164)
CXType_OCLIntelSubgroupAVCImePayload = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCImePayload', 165)
CXType_OCLIntelSubgroupAVCRefPayload = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCRefPayload', 166)
CXType_OCLIntelSubgroupAVCSicPayload = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCSicPayload', 167)
CXType_OCLIntelSubgroupAVCMceResult = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCMceResult', 168)
CXType_OCLIntelSubgroupAVCImeResult = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCImeResult', 169)
CXType_OCLIntelSubgroupAVCRefResult = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCRefResult', 170)
CXType_OCLIntelSubgroupAVCSicResult = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCSicResult', 171)
CXType_OCLIntelSubgroupAVCImeResultSingleReferenceStreamout = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCImeResultSingleReferenceStreamout', 172)
CXType_OCLIntelSubgroupAVCImeResultDualReferenceStreamout = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCImeResultDualReferenceStreamout', 173)
CXType_OCLIntelSubgroupAVCImeSingleReferenceStreamin = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCImeSingleReferenceStreamin', 174)
CXType_OCLIntelSubgroupAVCImeDualReferenceStreamin = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCImeDualReferenceStreamin', 175)
CXType_OCLIntelSubgroupAVCImeResultSingleRefStreamout = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCImeResultSingleRefStreamout', 172)
CXType_OCLIntelSubgroupAVCImeResultDualRefStreamout = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCImeResultDualRefStreamout', 173)
CXType_OCLIntelSubgroupAVCImeSingleRefStreamin = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCImeSingleRefStreamin', 174)
CXType_OCLIntelSubgroupAVCImeDualRefStreamin = enum_CXTypeKind.define('CXType_OCLIntelSubgroupAVCImeDualRefStreamin', 175)
CXType_ExtVector = enum_CXTypeKind.define('CXType_ExtVector', 176)
CXType_Atomic = enum_CXTypeKind.define('CXType_Atomic', 177)
CXType_BTFTagAttributed = enum_CXTypeKind.define('CXType_BTFTagAttributed', 178)
CXType_HLSLResource = enum_CXTypeKind.define('CXType_HLSLResource', 179)
CXType_HLSLAttributedResource = enum_CXTypeKind.define('CXType_HLSLAttributedResource', 180)
class enum_CXCallingConv(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXCallingConv_Default = enum_CXCallingConv.define('CXCallingConv_Default', 0)
CXCallingConv_C = enum_CXCallingConv.define('CXCallingConv_C', 1)
CXCallingConv_X86StdCall = enum_CXCallingConv.define('CXCallingConv_X86StdCall', 2)
CXCallingConv_X86FastCall = enum_CXCallingConv.define('CXCallingConv_X86FastCall', 3)
CXCallingConv_X86ThisCall = enum_CXCallingConv.define('CXCallingConv_X86ThisCall', 4)
CXCallingConv_X86Pascal = enum_CXCallingConv.define('CXCallingConv_X86Pascal', 5)
CXCallingConv_AAPCS = enum_CXCallingConv.define('CXCallingConv_AAPCS', 6)
CXCallingConv_AAPCS_VFP = enum_CXCallingConv.define('CXCallingConv_AAPCS_VFP', 7)
CXCallingConv_X86RegCall = enum_CXCallingConv.define('CXCallingConv_X86RegCall', 8)
CXCallingConv_IntelOclBicc = enum_CXCallingConv.define('CXCallingConv_IntelOclBicc', 9)
CXCallingConv_Win64 = enum_CXCallingConv.define('CXCallingConv_Win64', 10)
CXCallingConv_X86_64Win64 = enum_CXCallingConv.define('CXCallingConv_X86_64Win64', 10)
CXCallingConv_X86_64SysV = enum_CXCallingConv.define('CXCallingConv_X86_64SysV', 11)
CXCallingConv_X86VectorCall = enum_CXCallingConv.define('CXCallingConv_X86VectorCall', 12)
CXCallingConv_Swift = enum_CXCallingConv.define('CXCallingConv_Swift', 13)
CXCallingConv_PreserveMost = enum_CXCallingConv.define('CXCallingConv_PreserveMost', 14)
CXCallingConv_PreserveAll = enum_CXCallingConv.define('CXCallingConv_PreserveAll', 15)
CXCallingConv_AArch64VectorCall = enum_CXCallingConv.define('CXCallingConv_AArch64VectorCall', 16)
CXCallingConv_SwiftAsync = enum_CXCallingConv.define('CXCallingConv_SwiftAsync', 17)
CXCallingConv_AArch64SVEPCS = enum_CXCallingConv.define('CXCallingConv_AArch64SVEPCS', 18)
CXCallingConv_M68kRTD = enum_CXCallingConv.define('CXCallingConv_M68kRTD', 19)
CXCallingConv_PreserveNone = enum_CXCallingConv.define('CXCallingConv_PreserveNone', 20)
CXCallingConv_RISCVVectorCall = enum_CXCallingConv.define('CXCallingConv_RISCVVectorCall', 21)
CXCallingConv_Invalid = enum_CXCallingConv.define('CXCallingConv_Invalid', 100)
CXCallingConv_Unexposed = enum_CXCallingConv.define('CXCallingConv_Unexposed', 200)
@c.record
class CXType(c.Struct):
SIZE = 24
kind: Annotated[enum_CXTypeKind, 0]
data: Annotated[c.Array[ctypes.c_void_p, Literal[2]], 8]
@dll.bind
def clang_getCursorType(C:CXCursor) -> CXType: ...
@dll.bind
def clang_getTypeSpelling(CT:CXType) -> CXString: ...
@dll.bind
def clang_getTypedefDeclUnderlyingType(C:CXCursor) -> CXType: ...
@dll.bind
def clang_getEnumDeclIntegerType(C:CXCursor) -> CXType: ...
@dll.bind
def clang_getEnumConstantDeclValue(C:CXCursor) -> Annotated[int, ctypes.c_int64]: ...
@dll.bind
def clang_getEnumConstantDeclUnsignedValue(C:CXCursor) -> Annotated[int, ctypes.c_uint64]: ...
@dll.bind
def clang_Cursor_isBitField(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getFieldDeclBitWidth(C:CXCursor) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_Cursor_getNumArguments(C:CXCursor) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_Cursor_getArgument(C:CXCursor, i:Annotated[int, ctypes.c_uint32]) -> CXCursor: ...
class enum_CXTemplateArgumentKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXTemplateArgumentKind_Null = enum_CXTemplateArgumentKind.define('CXTemplateArgumentKind_Null', 0)
CXTemplateArgumentKind_Type = enum_CXTemplateArgumentKind.define('CXTemplateArgumentKind_Type', 1)
CXTemplateArgumentKind_Declaration = enum_CXTemplateArgumentKind.define('CXTemplateArgumentKind_Declaration', 2)
CXTemplateArgumentKind_NullPtr = enum_CXTemplateArgumentKind.define('CXTemplateArgumentKind_NullPtr', 3)
CXTemplateArgumentKind_Integral = enum_CXTemplateArgumentKind.define('CXTemplateArgumentKind_Integral', 4)
CXTemplateArgumentKind_Template = enum_CXTemplateArgumentKind.define('CXTemplateArgumentKind_Template', 5)
CXTemplateArgumentKind_TemplateExpansion = enum_CXTemplateArgumentKind.define('CXTemplateArgumentKind_TemplateExpansion', 6)
CXTemplateArgumentKind_Expression = enum_CXTemplateArgumentKind.define('CXTemplateArgumentKind_Expression', 7)
CXTemplateArgumentKind_Pack = enum_CXTemplateArgumentKind.define('CXTemplateArgumentKind_Pack', 8)
CXTemplateArgumentKind_Invalid = enum_CXTemplateArgumentKind.define('CXTemplateArgumentKind_Invalid', 9)
@dll.bind
def clang_Cursor_getNumTemplateArguments(C:CXCursor) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_Cursor_getTemplateArgumentKind(C:CXCursor, I:Annotated[int, ctypes.c_uint32]) -> enum_CXTemplateArgumentKind: ...
@dll.bind
def clang_Cursor_getTemplateArgumentType(C:CXCursor, I:Annotated[int, ctypes.c_uint32]) -> CXType: ...
@dll.bind
def clang_Cursor_getTemplateArgumentValue(C:CXCursor, I:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_int64]: ...
@dll.bind
def clang_Cursor_getTemplateArgumentUnsignedValue(C:CXCursor, I:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_uint64]: ...
@dll.bind
def clang_equalTypes(A:CXType, B:CXType) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getCanonicalType(T:CXType) -> CXType: ...
@dll.bind
def clang_isConstQualifiedType(T:CXType) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_isMacroFunctionLike(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_isMacroBuiltin(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_isFunctionInlined(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isVolatileQualifiedType(T:CXType) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isRestrictQualifiedType(T:CXType) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getAddressSpace(T:CXType) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getTypedefName(CT:CXType) -> CXString: ...
@dll.bind
def clang_getPointeeType(T:CXType) -> CXType: ...
@dll.bind
def clang_getUnqualifiedType(CT:CXType) -> CXType: ...
@dll.bind
def clang_getNonReferenceType(CT:CXType) -> CXType: ...
@dll.bind
def clang_getTypeDeclaration(T:CXType) -> CXCursor: ...
@dll.bind
def clang_getDeclObjCTypeEncoding(C:CXCursor) -> CXString: ...
@dll.bind
def clang_Type_getObjCEncoding(type:CXType) -> CXString: ...
@dll.bind
def clang_getTypeKindSpelling(K:enum_CXTypeKind) -> CXString: ...
@dll.bind
def clang_getFunctionTypeCallingConv(T:CXType) -> enum_CXCallingConv: ...
@dll.bind
def clang_getResultType(T:CXType) -> CXType: ...
@dll.bind
def clang_getExceptionSpecificationType(T:CXType) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_getNumArgTypes(T:CXType) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_getArgType(T:CXType, i:Annotated[int, ctypes.c_uint32]) -> CXType: ...
@dll.bind
def clang_Type_getObjCObjectBaseType(T:CXType) -> CXType: ...
@dll.bind
def clang_Type_getNumObjCProtocolRefs(T:CXType) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Type_getObjCProtocolDecl(T:CXType, i:Annotated[int, ctypes.c_uint32]) -> CXCursor: ...
@dll.bind
def clang_Type_getNumObjCTypeArgs(T:CXType) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Type_getObjCTypeArg(T:CXType, i:Annotated[int, ctypes.c_uint32]) -> CXType: ...
@dll.bind
def clang_isFunctionTypeVariadic(T:CXType) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getCursorResultType(C:CXCursor) -> CXType: ...
@dll.bind
def clang_getCursorExceptionSpecificationType(C:CXCursor) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_isPODType(T:CXType) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getElementType(T:CXType) -> CXType: ...
@dll.bind
def clang_getNumElements(T:CXType) -> Annotated[int, ctypes.c_int64]: ...
@dll.bind
def clang_getArrayElementType(T:CXType) -> CXType: ...
@dll.bind
def clang_getArraySize(T:CXType) -> Annotated[int, ctypes.c_int64]: ...
@dll.bind
def clang_Type_getNamedType(T:CXType) -> CXType: ...
@dll.bind
def clang_Type_isTransparentTagTypedef(T:CXType) -> Annotated[int, ctypes.c_uint32]: ...
class enum_CXTypeNullabilityKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXTypeNullability_NonNull = enum_CXTypeNullabilityKind.define('CXTypeNullability_NonNull', 0)
CXTypeNullability_Nullable = enum_CXTypeNullabilityKind.define('CXTypeNullability_Nullable', 1)
CXTypeNullability_Unspecified = enum_CXTypeNullabilityKind.define('CXTypeNullability_Unspecified', 2)
CXTypeNullability_Invalid = enum_CXTypeNullabilityKind.define('CXTypeNullability_Invalid', 3)
CXTypeNullability_NullableResult = enum_CXTypeNullabilityKind.define('CXTypeNullability_NullableResult', 4)
@dll.bind
def clang_Type_getNullability(T:CXType) -> enum_CXTypeNullabilityKind: ...
class enum_CXTypeLayoutError(Annotated[int, ctypes.c_int32], c.Enum): pass
CXTypeLayoutError_Invalid = enum_CXTypeLayoutError.define('CXTypeLayoutError_Invalid', -1)
CXTypeLayoutError_Incomplete = enum_CXTypeLayoutError.define('CXTypeLayoutError_Incomplete', -2)
CXTypeLayoutError_Dependent = enum_CXTypeLayoutError.define('CXTypeLayoutError_Dependent', -3)
CXTypeLayoutError_NotConstantSize = enum_CXTypeLayoutError.define('CXTypeLayoutError_NotConstantSize', -4)
CXTypeLayoutError_InvalidFieldName = enum_CXTypeLayoutError.define('CXTypeLayoutError_InvalidFieldName', -5)
CXTypeLayoutError_Undeduced = enum_CXTypeLayoutError.define('CXTypeLayoutError_Undeduced', -6)
@dll.bind
def clang_Type_getAlignOf(T:CXType) -> Annotated[int, ctypes.c_int64]: ...
@dll.bind
def clang_Type_getClassType(T:CXType) -> CXType: ...
@dll.bind
def clang_Type_getSizeOf(T:CXType) -> Annotated[int, ctypes.c_int64]: ...
@dll.bind
def clang_Type_getOffsetOf(T:CXType, S:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> Annotated[int, ctypes.c_int64]: ...
@dll.bind
def clang_Type_getModifiedType(T:CXType) -> CXType: ...
@dll.bind
def clang_Type_getValueType(CT:CXType) -> CXType: ...
@dll.bind
def clang_Cursor_getOffsetOfField(C:CXCursor) -> Annotated[int, ctypes.c_int64]: ...
@dll.bind
def clang_Cursor_isAnonymous(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_isAnonymousRecordDecl(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_isInlineNamespace(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
class enum_CXRefQualifierKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXRefQualifier_None = enum_CXRefQualifierKind.define('CXRefQualifier_None', 0)
CXRefQualifier_LValue = enum_CXRefQualifierKind.define('CXRefQualifier_LValue', 1)
CXRefQualifier_RValue = enum_CXRefQualifierKind.define('CXRefQualifier_RValue', 2)
@dll.bind
def clang_Type_getNumTemplateArguments(T:CXType) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_Type_getTemplateArgumentAsType(T:CXType, i:Annotated[int, ctypes.c_uint32]) -> CXType: ...
@dll.bind
def clang_Type_getCXXRefQualifier(T:CXType) -> enum_CXRefQualifierKind: ...
@dll.bind
def clang_isVirtualBase(_0:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getOffsetOfBase(Parent:CXCursor, Base:CXCursor) -> Annotated[int, ctypes.c_int64]: ...
class enum_CX_CXXAccessSpecifier(Annotated[int, ctypes.c_uint32], c.Enum): pass
CX_CXXInvalidAccessSpecifier = enum_CX_CXXAccessSpecifier.define('CX_CXXInvalidAccessSpecifier', 0)
CX_CXXPublic = enum_CX_CXXAccessSpecifier.define('CX_CXXPublic', 1)
CX_CXXProtected = enum_CX_CXXAccessSpecifier.define('CX_CXXProtected', 2)
CX_CXXPrivate = enum_CX_CXXAccessSpecifier.define('CX_CXXPrivate', 3)
@dll.bind
def clang_getCXXAccessSpecifier(_0:CXCursor) -> enum_CX_CXXAccessSpecifier: ...
class enum_CX_StorageClass(Annotated[int, ctypes.c_uint32], c.Enum): pass
CX_SC_Invalid = enum_CX_StorageClass.define('CX_SC_Invalid', 0)
CX_SC_None = enum_CX_StorageClass.define('CX_SC_None', 1)
CX_SC_Extern = enum_CX_StorageClass.define('CX_SC_Extern', 2)
CX_SC_Static = enum_CX_StorageClass.define('CX_SC_Static', 3)
CX_SC_PrivateExtern = enum_CX_StorageClass.define('CX_SC_PrivateExtern', 4)
CX_SC_OpenCLWorkGroupLocal = enum_CX_StorageClass.define('CX_SC_OpenCLWorkGroupLocal', 5)
CX_SC_Auto = enum_CX_StorageClass.define('CX_SC_Auto', 6)
CX_SC_Register = enum_CX_StorageClass.define('CX_SC_Register', 7)
class enum_CX_BinaryOperatorKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CX_BO_Invalid = enum_CX_BinaryOperatorKind.define('CX_BO_Invalid', 0)
CX_BO_PtrMemD = enum_CX_BinaryOperatorKind.define('CX_BO_PtrMemD', 1)
CX_BO_PtrMemI = enum_CX_BinaryOperatorKind.define('CX_BO_PtrMemI', 2)
CX_BO_Mul = enum_CX_BinaryOperatorKind.define('CX_BO_Mul', 3)
CX_BO_Div = enum_CX_BinaryOperatorKind.define('CX_BO_Div', 4)
CX_BO_Rem = enum_CX_BinaryOperatorKind.define('CX_BO_Rem', 5)
CX_BO_Add = enum_CX_BinaryOperatorKind.define('CX_BO_Add', 6)
CX_BO_Sub = enum_CX_BinaryOperatorKind.define('CX_BO_Sub', 7)
CX_BO_Shl = enum_CX_BinaryOperatorKind.define('CX_BO_Shl', 8)
CX_BO_Shr = enum_CX_BinaryOperatorKind.define('CX_BO_Shr', 9)
CX_BO_Cmp = enum_CX_BinaryOperatorKind.define('CX_BO_Cmp', 10)
CX_BO_LT = enum_CX_BinaryOperatorKind.define('CX_BO_LT', 11)
CX_BO_GT = enum_CX_BinaryOperatorKind.define('CX_BO_GT', 12)
CX_BO_LE = enum_CX_BinaryOperatorKind.define('CX_BO_LE', 13)
CX_BO_GE = enum_CX_BinaryOperatorKind.define('CX_BO_GE', 14)
CX_BO_EQ = enum_CX_BinaryOperatorKind.define('CX_BO_EQ', 15)
CX_BO_NE = enum_CX_BinaryOperatorKind.define('CX_BO_NE', 16)
CX_BO_And = enum_CX_BinaryOperatorKind.define('CX_BO_And', 17)
CX_BO_Xor = enum_CX_BinaryOperatorKind.define('CX_BO_Xor', 18)
CX_BO_Or = enum_CX_BinaryOperatorKind.define('CX_BO_Or', 19)
CX_BO_LAnd = enum_CX_BinaryOperatorKind.define('CX_BO_LAnd', 20)
CX_BO_LOr = enum_CX_BinaryOperatorKind.define('CX_BO_LOr', 21)
CX_BO_Assign = enum_CX_BinaryOperatorKind.define('CX_BO_Assign', 22)
CX_BO_MulAssign = enum_CX_BinaryOperatorKind.define('CX_BO_MulAssign', 23)
CX_BO_DivAssign = enum_CX_BinaryOperatorKind.define('CX_BO_DivAssign', 24)
CX_BO_RemAssign = enum_CX_BinaryOperatorKind.define('CX_BO_RemAssign', 25)
CX_BO_AddAssign = enum_CX_BinaryOperatorKind.define('CX_BO_AddAssign', 26)
CX_BO_SubAssign = enum_CX_BinaryOperatorKind.define('CX_BO_SubAssign', 27)
CX_BO_ShlAssign = enum_CX_BinaryOperatorKind.define('CX_BO_ShlAssign', 28)
CX_BO_ShrAssign = enum_CX_BinaryOperatorKind.define('CX_BO_ShrAssign', 29)
CX_BO_AndAssign = enum_CX_BinaryOperatorKind.define('CX_BO_AndAssign', 30)
CX_BO_XorAssign = enum_CX_BinaryOperatorKind.define('CX_BO_XorAssign', 31)
CX_BO_OrAssign = enum_CX_BinaryOperatorKind.define('CX_BO_OrAssign', 32)
CX_BO_Comma = enum_CX_BinaryOperatorKind.define('CX_BO_Comma', 33)
CX_BO_LAST = enum_CX_BinaryOperatorKind.define('CX_BO_LAST', 33)
@dll.bind
def clang_Cursor_getBinaryOpcode(C:CXCursor) -> enum_CX_BinaryOperatorKind: ...
@dll.bind
def clang_Cursor_getBinaryOpcodeStr(Op:enum_CX_BinaryOperatorKind) -> CXString: ...
@dll.bind
def clang_Cursor_getStorageClass(_0:CXCursor) -> enum_CX_StorageClass: ...
@dll.bind
def clang_getNumOverloadedDecls(cursor:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getOverloadedDecl(cursor:CXCursor, index:Annotated[int, ctypes.c_uint32]) -> CXCursor: ...
@dll.bind
def clang_getIBOutletCollectionType(_0:CXCursor) -> CXType: ...
class enum_CXChildVisitResult(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXChildVisit_Break = enum_CXChildVisitResult.define('CXChildVisit_Break', 0)
CXChildVisit_Continue = enum_CXChildVisitResult.define('CXChildVisit_Continue', 1)
CXChildVisit_Recurse = enum_CXChildVisitResult.define('CXChildVisit_Recurse', 2)
CXCursorVisitor: TypeAlias = c.CFUNCTYPE[enum_CXChildVisitResult, [CXCursor, CXCursor, ctypes.c_void_p]]
@dll.bind
def clang_visitChildren(parent:CXCursor, visitor:CXCursorVisitor, client_data:CXClientData) -> Annotated[int, ctypes.c_uint32]: ...
class struct__CXChildVisitResult(ctypes.Structure): pass
CXCursorVisitorBlock: TypeAlias = c.POINTER[struct__CXChildVisitResult]
@dll.bind
def clang_visitChildrenWithBlock(parent:CXCursor, block:CXCursorVisitorBlock) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getCursorUSR(_0:CXCursor) -> CXString: ...
@dll.bind
def clang_constructUSR_ObjCClass(class_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CXString: ...
@dll.bind
def clang_constructUSR_ObjCCategory(class_name:c.POINTER[Annotated[bytes, ctypes.c_char]], category_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CXString: ...
@dll.bind
def clang_constructUSR_ObjCProtocol(protocol_name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CXString: ...
@dll.bind
def clang_constructUSR_ObjCIvar(name:c.POINTER[Annotated[bytes, ctypes.c_char]], classUSR:CXString) -> CXString: ...
@dll.bind
def clang_constructUSR_ObjCMethod(name:c.POINTER[Annotated[bytes, ctypes.c_char]], isInstanceMethod:Annotated[int, ctypes.c_uint32], classUSR:CXString) -> CXString: ...
@dll.bind
def clang_constructUSR_ObjCProperty(property:c.POINTER[Annotated[bytes, ctypes.c_char]], classUSR:CXString) -> CXString: ...
@dll.bind
def clang_getCursorSpelling(_0:CXCursor) -> CXString: ...
@dll.bind
def clang_Cursor_getSpellingNameRange(_0:CXCursor, pieceIndex:Annotated[int, ctypes.c_uint32], options:Annotated[int, ctypes.c_uint32]) -> CXSourceRange: ...
CXPrintingPolicy: TypeAlias = ctypes.c_void_p
class enum_CXPrintingPolicyProperty(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXPrintingPolicy_Indentation = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_Indentation', 0)
CXPrintingPolicy_SuppressSpecifiers = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_SuppressSpecifiers', 1)
CXPrintingPolicy_SuppressTagKeyword = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_SuppressTagKeyword', 2)
CXPrintingPolicy_IncludeTagDefinition = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_IncludeTagDefinition', 3)
CXPrintingPolicy_SuppressScope = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_SuppressScope', 4)
CXPrintingPolicy_SuppressUnwrittenScope = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_SuppressUnwrittenScope', 5)
CXPrintingPolicy_SuppressInitializers = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_SuppressInitializers', 6)
CXPrintingPolicy_ConstantArraySizeAsWritten = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_ConstantArraySizeAsWritten', 7)
CXPrintingPolicy_AnonymousTagLocations = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_AnonymousTagLocations', 8)
CXPrintingPolicy_SuppressStrongLifetime = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_SuppressStrongLifetime', 9)
CXPrintingPolicy_SuppressLifetimeQualifiers = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_SuppressLifetimeQualifiers', 10)
CXPrintingPolicy_SuppressTemplateArgsInCXXConstructors = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_SuppressTemplateArgsInCXXConstructors', 11)
CXPrintingPolicy_Bool = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_Bool', 12)
CXPrintingPolicy_Restrict = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_Restrict', 13)
CXPrintingPolicy_Alignof = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_Alignof', 14)
CXPrintingPolicy_UnderscoreAlignof = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_UnderscoreAlignof', 15)
CXPrintingPolicy_UseVoidForZeroParams = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_UseVoidForZeroParams', 16)
CXPrintingPolicy_TerseOutput = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_TerseOutput', 17)
CXPrintingPolicy_PolishForDeclaration = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_PolishForDeclaration', 18)
CXPrintingPolicy_Half = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_Half', 19)
CXPrintingPolicy_MSWChar = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_MSWChar', 20)
CXPrintingPolicy_IncludeNewlines = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_IncludeNewlines', 21)
CXPrintingPolicy_MSVCFormatting = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_MSVCFormatting', 22)
CXPrintingPolicy_ConstantsAsWritten = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_ConstantsAsWritten', 23)
CXPrintingPolicy_SuppressImplicitBase = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_SuppressImplicitBase', 24)
CXPrintingPolicy_FullyQualifiedName = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_FullyQualifiedName', 25)
CXPrintingPolicy_LastProperty = enum_CXPrintingPolicyProperty.define('CXPrintingPolicy_LastProperty', 25)
@dll.bind
def clang_PrintingPolicy_getProperty(Policy:CXPrintingPolicy, Property:enum_CXPrintingPolicyProperty) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_PrintingPolicy_setProperty(Policy:CXPrintingPolicy, Property:enum_CXPrintingPolicyProperty, Value:Annotated[int, ctypes.c_uint32]) -> None: ...
@dll.bind
def clang_getCursorPrintingPolicy(_0:CXCursor) -> CXPrintingPolicy: ...
@dll.bind
def clang_PrintingPolicy_dispose(Policy:CXPrintingPolicy) -> None: ...
@dll.bind
def clang_getCursorPrettyPrinted(Cursor:CXCursor, Policy:CXPrintingPolicy) -> CXString: ...
@dll.bind
def clang_getTypePrettyPrinted(CT:CXType, cxPolicy:CXPrintingPolicy) -> CXString: ...
@dll.bind
def clang_getCursorDisplayName(_0:CXCursor) -> CXString: ...
@dll.bind
def clang_getCursorReferenced(_0:CXCursor) -> CXCursor: ...
@dll.bind
def clang_getCursorDefinition(_0:CXCursor) -> CXCursor: ...
@dll.bind
def clang_isCursorDefinition(_0:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getCanonicalCursor(_0:CXCursor) -> CXCursor: ...
@dll.bind
def clang_Cursor_getObjCSelectorIndex(_0:CXCursor) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_Cursor_isDynamicCall(C:CXCursor) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_Cursor_getReceiverType(C:CXCursor) -> CXType: ...
class CXObjCPropertyAttrKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXObjCPropertyAttr_noattr = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_noattr', 0)
CXObjCPropertyAttr_readonly = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_readonly', 1)
CXObjCPropertyAttr_getter = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_getter', 2)
CXObjCPropertyAttr_assign = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_assign', 4)
CXObjCPropertyAttr_readwrite = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_readwrite', 8)
CXObjCPropertyAttr_retain = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_retain', 16)
CXObjCPropertyAttr_copy = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_copy', 32)
CXObjCPropertyAttr_nonatomic = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_nonatomic', 64)
CXObjCPropertyAttr_setter = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_setter', 128)
CXObjCPropertyAttr_atomic = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_atomic', 256)
CXObjCPropertyAttr_weak = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_weak', 512)
CXObjCPropertyAttr_strong = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_strong', 1024)
CXObjCPropertyAttr_unsafe_unretained = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_unsafe_unretained', 2048)
CXObjCPropertyAttr_class = CXObjCPropertyAttrKind.define('CXObjCPropertyAttr_class', 4096)
@dll.bind
def clang_Cursor_getObjCPropertyAttributes(C:CXCursor, reserved:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_getObjCPropertyGetterName(C:CXCursor) -> CXString: ...
@dll.bind
def clang_Cursor_getObjCPropertySetterName(C:CXCursor) -> CXString: ...
class CXObjCDeclQualifierKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXObjCDeclQualifier_None = CXObjCDeclQualifierKind.define('CXObjCDeclQualifier_None', 0)
CXObjCDeclQualifier_In = CXObjCDeclQualifierKind.define('CXObjCDeclQualifier_In', 1)
CXObjCDeclQualifier_Inout = CXObjCDeclQualifierKind.define('CXObjCDeclQualifier_Inout', 2)
CXObjCDeclQualifier_Out = CXObjCDeclQualifierKind.define('CXObjCDeclQualifier_Out', 4)
CXObjCDeclQualifier_Bycopy = CXObjCDeclQualifierKind.define('CXObjCDeclQualifier_Bycopy', 8)
CXObjCDeclQualifier_Byref = CXObjCDeclQualifierKind.define('CXObjCDeclQualifier_Byref', 16)
CXObjCDeclQualifier_Oneway = CXObjCDeclQualifierKind.define('CXObjCDeclQualifier_Oneway', 32)
@dll.bind
def clang_Cursor_getObjCDeclQualifiers(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_isObjCOptional(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_isVariadic(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_isExternalSymbol(C:CXCursor, language:c.POINTER[CXString], definedIn:c.POINTER[CXString], isGenerated:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Cursor_getCommentRange(C:CXCursor) -> CXSourceRange: ...
@dll.bind
def clang_Cursor_getRawCommentText(C:CXCursor) -> CXString: ...
@dll.bind
def clang_Cursor_getBriefCommentText(C:CXCursor) -> CXString: ...
@dll.bind
def clang_Cursor_getMangling(_0:CXCursor) -> CXString: ...
@c.record
class CXStringSet(c.Struct):
SIZE = 16
Strings: Annotated[c.POINTER[CXString], 0]
Count: Annotated[Annotated[int, ctypes.c_uint32], 8]
@dll.bind
def clang_Cursor_getCXXManglings(_0:CXCursor) -> c.POINTER[CXStringSet]: ...
@dll.bind
def clang_Cursor_getObjCManglings(_0:CXCursor) -> c.POINTER[CXStringSet]: ...
CXModule: TypeAlias = ctypes.c_void_p
@dll.bind
def clang_Cursor_getModule(C:CXCursor) -> CXModule: ...
@dll.bind
def clang_getModuleForFile(_0:CXTranslationUnit, _1:CXFile) -> CXModule: ...
@dll.bind
def clang_Module_getASTFile(Module:CXModule) -> CXFile: ...
@dll.bind
def clang_Module_getParent(Module:CXModule) -> CXModule: ...
@dll.bind
def clang_Module_getName(Module:CXModule) -> CXString: ...
@dll.bind
def clang_Module_getFullName(Module:CXModule) -> CXString: ...
@dll.bind
def clang_Module_isSystem(Module:CXModule) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_Module_getNumTopLevelHeaders(_0:CXTranslationUnit, Module:CXModule) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Module_getTopLevelHeader(_0:CXTranslationUnit, Module:CXModule, Index:Annotated[int, ctypes.c_uint32]) -> CXFile: ...
@dll.bind
def clang_CXXConstructor_isConvertingConstructor(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXConstructor_isCopyConstructor(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXConstructor_isDefaultConstructor(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXConstructor_isMoveConstructor(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXField_isMutable(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXMethod_isDefaulted(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXMethod_isDeleted(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXMethod_isPureVirtual(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXMethod_isStatic(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXMethod_isVirtual(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXMethod_isCopyAssignmentOperator(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXMethod_isMoveAssignmentOperator(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXMethod_isExplicit(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXRecord_isAbstract(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_EnumDecl_isScoped(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_CXXMethod_isConst(C:CXCursor) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getTemplateCursorKind(C:CXCursor) -> enum_CXCursorKind: ...
@dll.bind
def clang_getSpecializedCursorTemplate(C:CXCursor) -> CXCursor: ...
@dll.bind
def clang_getCursorReferenceNameRange(C:CXCursor, NameFlags:Annotated[int, ctypes.c_uint32], PieceIndex:Annotated[int, ctypes.c_uint32]) -> CXSourceRange: ...
class enum_CXNameRefFlags(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXNameRange_WantQualifier = enum_CXNameRefFlags.define('CXNameRange_WantQualifier', 1)
CXNameRange_WantTemplateArgs = enum_CXNameRefFlags.define('CXNameRange_WantTemplateArgs', 2)
CXNameRange_WantSinglePiece = enum_CXNameRefFlags.define('CXNameRange_WantSinglePiece', 4)
class enum_CXTokenKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXToken_Punctuation = enum_CXTokenKind.define('CXToken_Punctuation', 0)
CXToken_Keyword = enum_CXTokenKind.define('CXToken_Keyword', 1)
CXToken_Identifier = enum_CXTokenKind.define('CXToken_Identifier', 2)
CXToken_Literal = enum_CXTokenKind.define('CXToken_Literal', 3)
CXToken_Comment = enum_CXTokenKind.define('CXToken_Comment', 4)
CXTokenKind: TypeAlias = enum_CXTokenKind
@c.record
class CXToken(c.Struct):
SIZE = 24
int_data: Annotated[c.Array[Annotated[int, ctypes.c_uint32], Literal[4]], 0]
ptr_data: Annotated[ctypes.c_void_p, 16]
@dll.bind
def clang_getToken(TU:CXTranslationUnit, Location:CXSourceLocation) -> c.POINTER[CXToken]: ...
@dll.bind
def clang_getTokenKind(_0:CXToken) -> CXTokenKind: ...
@dll.bind
def clang_getTokenSpelling(_0:CXTranslationUnit, _1:CXToken) -> CXString: ...
@dll.bind
def clang_getTokenLocation(_0:CXTranslationUnit, _1:CXToken) -> CXSourceLocation: ...
@dll.bind
def clang_getTokenExtent(_0:CXTranslationUnit, _1:CXToken) -> CXSourceRange: ...
@dll.bind
def clang_tokenize(TU:CXTranslationUnit, Range:CXSourceRange, Tokens:c.POINTER[c.POINTER[CXToken]], NumTokens:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def clang_annotateTokens(TU:CXTranslationUnit, Tokens:c.POINTER[CXToken], NumTokens:Annotated[int, ctypes.c_uint32], Cursors:c.POINTER[CXCursor]) -> None: ...
@dll.bind
def clang_disposeTokens(TU:CXTranslationUnit, Tokens:c.POINTER[CXToken], NumTokens:Annotated[int, ctypes.c_uint32]) -> None: ...
@dll.bind
def clang_getCursorKindSpelling(Kind:enum_CXCursorKind) -> CXString: ...
@dll.bind
def clang_getDefinitionSpellingAndExtent(_0:CXCursor, startBuf:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], endBuf:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], startLine:c.POINTER[Annotated[int, ctypes.c_uint32]], startColumn:c.POINTER[Annotated[int, ctypes.c_uint32]], endLine:c.POINTER[Annotated[int, ctypes.c_uint32]], endColumn:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def clang_enableStackTraces() -> None: ...
@dll.bind
def clang_executeOnThread(fn:c.CFUNCTYPE[None, [ctypes.c_void_p]], user_data:ctypes.c_void_p, stack_size:Annotated[int, ctypes.c_uint32]) -> None: ...
CXCompletionString: TypeAlias = ctypes.c_void_p
@c.record
class CXCompletionResult(c.Struct):
SIZE = 16
CursorKind: Annotated[enum_CXCursorKind, 0]
CompletionString: Annotated[CXCompletionString, 8]
class enum_CXCompletionChunkKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXCompletionChunk_Optional = enum_CXCompletionChunkKind.define('CXCompletionChunk_Optional', 0)
CXCompletionChunk_TypedText = enum_CXCompletionChunkKind.define('CXCompletionChunk_TypedText', 1)
CXCompletionChunk_Text = enum_CXCompletionChunkKind.define('CXCompletionChunk_Text', 2)
CXCompletionChunk_Placeholder = enum_CXCompletionChunkKind.define('CXCompletionChunk_Placeholder', 3)
CXCompletionChunk_Informative = enum_CXCompletionChunkKind.define('CXCompletionChunk_Informative', 4)
CXCompletionChunk_CurrentParameter = enum_CXCompletionChunkKind.define('CXCompletionChunk_CurrentParameter', 5)
CXCompletionChunk_LeftParen = enum_CXCompletionChunkKind.define('CXCompletionChunk_LeftParen', 6)
CXCompletionChunk_RightParen = enum_CXCompletionChunkKind.define('CXCompletionChunk_RightParen', 7)
CXCompletionChunk_LeftBracket = enum_CXCompletionChunkKind.define('CXCompletionChunk_LeftBracket', 8)
CXCompletionChunk_RightBracket = enum_CXCompletionChunkKind.define('CXCompletionChunk_RightBracket', 9)
CXCompletionChunk_LeftBrace = enum_CXCompletionChunkKind.define('CXCompletionChunk_LeftBrace', 10)
CXCompletionChunk_RightBrace = enum_CXCompletionChunkKind.define('CXCompletionChunk_RightBrace', 11)
CXCompletionChunk_LeftAngle = enum_CXCompletionChunkKind.define('CXCompletionChunk_LeftAngle', 12)
CXCompletionChunk_RightAngle = enum_CXCompletionChunkKind.define('CXCompletionChunk_RightAngle', 13)
CXCompletionChunk_Comma = enum_CXCompletionChunkKind.define('CXCompletionChunk_Comma', 14)
CXCompletionChunk_ResultType = enum_CXCompletionChunkKind.define('CXCompletionChunk_ResultType', 15)
CXCompletionChunk_Colon = enum_CXCompletionChunkKind.define('CXCompletionChunk_Colon', 16)
CXCompletionChunk_SemiColon = enum_CXCompletionChunkKind.define('CXCompletionChunk_SemiColon', 17)
CXCompletionChunk_Equal = enum_CXCompletionChunkKind.define('CXCompletionChunk_Equal', 18)
CXCompletionChunk_HorizontalSpace = enum_CXCompletionChunkKind.define('CXCompletionChunk_HorizontalSpace', 19)
CXCompletionChunk_VerticalSpace = enum_CXCompletionChunkKind.define('CXCompletionChunk_VerticalSpace', 20)
@dll.bind
def clang_getCompletionChunkKind(completion_string:CXCompletionString, chunk_number:Annotated[int, ctypes.c_uint32]) -> enum_CXCompletionChunkKind: ...
@dll.bind
def clang_getCompletionChunkText(completion_string:CXCompletionString, chunk_number:Annotated[int, ctypes.c_uint32]) -> CXString: ...
@dll.bind
def clang_getCompletionChunkCompletionString(completion_string:CXCompletionString, chunk_number:Annotated[int, ctypes.c_uint32]) -> CXCompletionString: ...
@dll.bind
def clang_getNumCompletionChunks(completion_string:CXCompletionString) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getCompletionPriority(completion_string:CXCompletionString) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getCompletionAvailability(completion_string:CXCompletionString) -> enum_CXAvailabilityKind: ...
@dll.bind
def clang_getCompletionNumAnnotations(completion_string:CXCompletionString) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getCompletionAnnotation(completion_string:CXCompletionString, annotation_number:Annotated[int, ctypes.c_uint32]) -> CXString: ...
@dll.bind
def clang_getCompletionParent(completion_string:CXCompletionString, kind:c.POINTER[enum_CXCursorKind]) -> CXString: ...
@dll.bind
def clang_getCompletionBriefComment(completion_string:CXCompletionString) -> CXString: ...
@dll.bind
def clang_getCursorCompletionString(cursor:CXCursor) -> CXCompletionString: ...
@c.record
class CXCodeCompleteResults(c.Struct):
SIZE = 16
Results: Annotated[c.POINTER[CXCompletionResult], 0]
NumResults: Annotated[Annotated[int, ctypes.c_uint32], 8]
@dll.bind
def clang_getCompletionNumFixIts(results:c.POINTER[CXCodeCompleteResults], completion_index:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_getCompletionFixIt(results:c.POINTER[CXCodeCompleteResults], completion_index:Annotated[int, ctypes.c_uint32], fixit_index:Annotated[int, ctypes.c_uint32], replacement_range:c.POINTER[CXSourceRange]) -> CXString: ...
class enum_CXCodeComplete_Flags(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXCodeComplete_IncludeMacros = enum_CXCodeComplete_Flags.define('CXCodeComplete_IncludeMacros', 1)
CXCodeComplete_IncludeCodePatterns = enum_CXCodeComplete_Flags.define('CXCodeComplete_IncludeCodePatterns', 2)
CXCodeComplete_IncludeBriefComments = enum_CXCodeComplete_Flags.define('CXCodeComplete_IncludeBriefComments', 4)
CXCodeComplete_SkipPreamble = enum_CXCodeComplete_Flags.define('CXCodeComplete_SkipPreamble', 8)
CXCodeComplete_IncludeCompletionsWithFixIts = enum_CXCodeComplete_Flags.define('CXCodeComplete_IncludeCompletionsWithFixIts', 16)
class enum_CXCompletionContext(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXCompletionContext_Unexposed = enum_CXCompletionContext.define('CXCompletionContext_Unexposed', 0)
CXCompletionContext_AnyType = enum_CXCompletionContext.define('CXCompletionContext_AnyType', 1)
CXCompletionContext_AnyValue = enum_CXCompletionContext.define('CXCompletionContext_AnyValue', 2)
CXCompletionContext_ObjCObjectValue = enum_CXCompletionContext.define('CXCompletionContext_ObjCObjectValue', 4)
CXCompletionContext_ObjCSelectorValue = enum_CXCompletionContext.define('CXCompletionContext_ObjCSelectorValue', 8)
CXCompletionContext_CXXClassTypeValue = enum_CXCompletionContext.define('CXCompletionContext_CXXClassTypeValue', 16)
CXCompletionContext_DotMemberAccess = enum_CXCompletionContext.define('CXCompletionContext_DotMemberAccess', 32)
CXCompletionContext_ArrowMemberAccess = enum_CXCompletionContext.define('CXCompletionContext_ArrowMemberAccess', 64)
CXCompletionContext_ObjCPropertyAccess = enum_CXCompletionContext.define('CXCompletionContext_ObjCPropertyAccess', 128)
CXCompletionContext_EnumTag = enum_CXCompletionContext.define('CXCompletionContext_EnumTag', 256)
CXCompletionContext_UnionTag = enum_CXCompletionContext.define('CXCompletionContext_UnionTag', 512)
CXCompletionContext_StructTag = enum_CXCompletionContext.define('CXCompletionContext_StructTag', 1024)
CXCompletionContext_ClassTag = enum_CXCompletionContext.define('CXCompletionContext_ClassTag', 2048)
CXCompletionContext_Namespace = enum_CXCompletionContext.define('CXCompletionContext_Namespace', 4096)
CXCompletionContext_NestedNameSpecifier = enum_CXCompletionContext.define('CXCompletionContext_NestedNameSpecifier', 8192)
CXCompletionContext_ObjCInterface = enum_CXCompletionContext.define('CXCompletionContext_ObjCInterface', 16384)
CXCompletionContext_ObjCProtocol = enum_CXCompletionContext.define('CXCompletionContext_ObjCProtocol', 32768)
CXCompletionContext_ObjCCategory = enum_CXCompletionContext.define('CXCompletionContext_ObjCCategory', 65536)
CXCompletionContext_ObjCInstanceMessage = enum_CXCompletionContext.define('CXCompletionContext_ObjCInstanceMessage', 131072)
CXCompletionContext_ObjCClassMessage = enum_CXCompletionContext.define('CXCompletionContext_ObjCClassMessage', 262144)
CXCompletionContext_ObjCSelectorName = enum_CXCompletionContext.define('CXCompletionContext_ObjCSelectorName', 524288)
CXCompletionContext_MacroName = enum_CXCompletionContext.define('CXCompletionContext_MacroName', 1048576)
CXCompletionContext_NaturalLanguage = enum_CXCompletionContext.define('CXCompletionContext_NaturalLanguage', 2097152)
CXCompletionContext_IncludedFile = enum_CXCompletionContext.define('CXCompletionContext_IncludedFile', 4194304)
CXCompletionContext_Unknown = enum_CXCompletionContext.define('CXCompletionContext_Unknown', 8388607)
@dll.bind
def clang_defaultCodeCompleteOptions() -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_codeCompleteAt(TU:CXTranslationUnit, complete_filename:c.POINTER[Annotated[bytes, ctypes.c_char]], complete_line:Annotated[int, ctypes.c_uint32], complete_column:Annotated[int, ctypes.c_uint32], unsaved_files:c.POINTER[struct_CXUnsavedFile], num_unsaved_files:Annotated[int, ctypes.c_uint32], options:Annotated[int, ctypes.c_uint32]) -> c.POINTER[CXCodeCompleteResults]: ...
@dll.bind
def clang_sortCodeCompletionResults(Results:c.POINTER[CXCompletionResult], NumResults:Annotated[int, ctypes.c_uint32]) -> None: ...
@dll.bind
def clang_disposeCodeCompleteResults(Results:c.POINTER[CXCodeCompleteResults]) -> None: ...
@dll.bind
def clang_codeCompleteGetNumDiagnostics(Results:c.POINTER[CXCodeCompleteResults]) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_codeCompleteGetDiagnostic(Results:c.POINTER[CXCodeCompleteResults], Index:Annotated[int, ctypes.c_uint32]) -> CXDiagnostic: ...
@dll.bind
def clang_codeCompleteGetContexts(Results:c.POINTER[CXCodeCompleteResults]) -> Annotated[int, ctypes.c_uint64]: ...
@dll.bind
def clang_codeCompleteGetContainerKind(Results:c.POINTER[CXCodeCompleteResults], IsIncomplete:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> enum_CXCursorKind: ...
@dll.bind
def clang_codeCompleteGetContainerUSR(Results:c.POINTER[CXCodeCompleteResults]) -> CXString: ...
@dll.bind
def clang_codeCompleteGetObjCSelector(Results:c.POINTER[CXCodeCompleteResults]) -> CXString: ...
@dll.bind
def clang_getClangVersion() -> CXString: ...
@dll.bind
def clang_toggleCrashRecovery(isEnabled:Annotated[int, ctypes.c_uint32]) -> None: ...
CXInclusionVisitor: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p, c.POINTER[CXSourceLocation], Annotated[int, ctypes.c_uint32], ctypes.c_void_p]]
@dll.bind
def clang_getInclusions(tu:CXTranslationUnit, visitor:CXInclusionVisitor, client_data:CXClientData) -> None: ...
class CXEvalResultKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXEval_Int = CXEvalResultKind.define('CXEval_Int', 1)
CXEval_Float = CXEvalResultKind.define('CXEval_Float', 2)
CXEval_ObjCStrLiteral = CXEvalResultKind.define('CXEval_ObjCStrLiteral', 3)
CXEval_StrLiteral = CXEvalResultKind.define('CXEval_StrLiteral', 4)
CXEval_CFStr = CXEvalResultKind.define('CXEval_CFStr', 5)
CXEval_Other = CXEvalResultKind.define('CXEval_Other', 6)
CXEval_UnExposed = CXEvalResultKind.define('CXEval_UnExposed', 0)
CXEvalResult: TypeAlias = ctypes.c_void_p
@dll.bind
def clang_Cursor_Evaluate(C:CXCursor) -> CXEvalResult: ...
@dll.bind
def clang_EvalResult_getKind(E:CXEvalResult) -> CXEvalResultKind: ...
@dll.bind
def clang_EvalResult_getAsInt(E:CXEvalResult) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_EvalResult_getAsLongLong(E:CXEvalResult) -> Annotated[int, ctypes.c_int64]: ...
@dll.bind
def clang_EvalResult_isUnsignedInt(E:CXEvalResult) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_EvalResult_getAsUnsigned(E:CXEvalResult) -> Annotated[int, ctypes.c_uint64]: ...
@dll.bind
def clang_EvalResult_getAsDouble(E:CXEvalResult) -> Annotated[float, ctypes.c_double]: ...
@dll.bind
def clang_EvalResult_getAsStr(E:CXEvalResult) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def clang_EvalResult_dispose(E:CXEvalResult) -> None: ...
CXRemapping: TypeAlias = ctypes.c_void_p
@dll.bind
def clang_getRemappings(path:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> CXRemapping: ...
@dll.bind
def clang_getRemappingsFromFileList(filePaths:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], numFiles:Annotated[int, ctypes.c_uint32]) -> CXRemapping: ...
@dll.bind
def clang_remap_getNumFiles(_0:CXRemapping) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_remap_getFilenames(_0:CXRemapping, index:Annotated[int, ctypes.c_uint32], original:c.POINTER[CXString], transformed:c.POINTER[CXString]) -> None: ...
@dll.bind
def clang_remap_dispose(_0:CXRemapping) -> None: ...
class enum_CXVisitorResult(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXVisit_Break = enum_CXVisitorResult.define('CXVisit_Break', 0)
CXVisit_Continue = enum_CXVisitorResult.define('CXVisit_Continue', 1)
@c.record
class struct_CXCursorAndRangeVisitor(c.Struct):
SIZE = 16
context: Annotated[ctypes.c_void_p, 0]
visit: Annotated[c.CFUNCTYPE[enum_CXVisitorResult, [ctypes.c_void_p, CXCursor, CXSourceRange]], 8]
CXCursorAndRangeVisitor: TypeAlias = struct_CXCursorAndRangeVisitor
class CXResult(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXResult_Success = CXResult.define('CXResult_Success', 0)
CXResult_Invalid = CXResult.define('CXResult_Invalid', 1)
CXResult_VisitBreak = CXResult.define('CXResult_VisitBreak', 2)
@dll.bind
def clang_findReferencesInFile(cursor:CXCursor, file:CXFile, visitor:CXCursorAndRangeVisitor) -> CXResult: ...
@dll.bind
def clang_findIncludesInFile(TU:CXTranslationUnit, file:CXFile, visitor:CXCursorAndRangeVisitor) -> CXResult: ...
class struct__CXCursorAndRangeVisitorBlock(ctypes.Structure): pass
CXCursorAndRangeVisitorBlock: TypeAlias = c.POINTER[struct__CXCursorAndRangeVisitorBlock]
@dll.bind
def clang_findReferencesInFileWithBlock(_0:CXCursor, _1:CXFile, _2:CXCursorAndRangeVisitorBlock) -> CXResult: ...
@dll.bind
def clang_findIncludesInFileWithBlock(_0:CXTranslationUnit, _1:CXFile, _2:CXCursorAndRangeVisitorBlock) -> CXResult: ...
CXIdxClientFile: TypeAlias = ctypes.c_void_p
CXIdxClientEntity: TypeAlias = ctypes.c_void_p
CXIdxClientContainer: TypeAlias = ctypes.c_void_p
CXIdxClientASTFile: TypeAlias = ctypes.c_void_p
@c.record
class CXIdxLoc(c.Struct):
SIZE = 24
ptr_data: Annotated[c.Array[ctypes.c_void_p, Literal[2]], 0]
int_data: Annotated[Annotated[int, ctypes.c_uint32], 16]
@c.record
class CXIdxIncludedFileInfo(c.Struct):
SIZE = 56
hashLoc: Annotated[CXIdxLoc, 0]
filename: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
file: Annotated[CXFile, 32]
isImport: Annotated[Annotated[int, ctypes.c_int32], 40]
isAngled: Annotated[Annotated[int, ctypes.c_int32], 44]
isModuleImport: Annotated[Annotated[int, ctypes.c_int32], 48]
@c.record
class CXIdxImportedASTFileInfo(c.Struct):
SIZE = 48
file: Annotated[CXFile, 0]
module: Annotated[CXModule, 8]
loc: Annotated[CXIdxLoc, 16]
isImplicit: Annotated[Annotated[int, ctypes.c_int32], 40]
class CXIdxEntityKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXIdxEntity_Unexposed = CXIdxEntityKind.define('CXIdxEntity_Unexposed', 0)
CXIdxEntity_Typedef = CXIdxEntityKind.define('CXIdxEntity_Typedef', 1)
CXIdxEntity_Function = CXIdxEntityKind.define('CXIdxEntity_Function', 2)
CXIdxEntity_Variable = CXIdxEntityKind.define('CXIdxEntity_Variable', 3)
CXIdxEntity_Field = CXIdxEntityKind.define('CXIdxEntity_Field', 4)
CXIdxEntity_EnumConstant = CXIdxEntityKind.define('CXIdxEntity_EnumConstant', 5)
CXIdxEntity_ObjCClass = CXIdxEntityKind.define('CXIdxEntity_ObjCClass', 6)
CXIdxEntity_ObjCProtocol = CXIdxEntityKind.define('CXIdxEntity_ObjCProtocol', 7)
CXIdxEntity_ObjCCategory = CXIdxEntityKind.define('CXIdxEntity_ObjCCategory', 8)
CXIdxEntity_ObjCInstanceMethod = CXIdxEntityKind.define('CXIdxEntity_ObjCInstanceMethod', 9)
CXIdxEntity_ObjCClassMethod = CXIdxEntityKind.define('CXIdxEntity_ObjCClassMethod', 10)
CXIdxEntity_ObjCProperty = CXIdxEntityKind.define('CXIdxEntity_ObjCProperty', 11)
CXIdxEntity_ObjCIvar = CXIdxEntityKind.define('CXIdxEntity_ObjCIvar', 12)
CXIdxEntity_Enum = CXIdxEntityKind.define('CXIdxEntity_Enum', 13)
CXIdxEntity_Struct = CXIdxEntityKind.define('CXIdxEntity_Struct', 14)
CXIdxEntity_Union = CXIdxEntityKind.define('CXIdxEntity_Union', 15)
CXIdxEntity_CXXClass = CXIdxEntityKind.define('CXIdxEntity_CXXClass', 16)
CXIdxEntity_CXXNamespace = CXIdxEntityKind.define('CXIdxEntity_CXXNamespace', 17)
CXIdxEntity_CXXNamespaceAlias = CXIdxEntityKind.define('CXIdxEntity_CXXNamespaceAlias', 18)
CXIdxEntity_CXXStaticVariable = CXIdxEntityKind.define('CXIdxEntity_CXXStaticVariable', 19)
CXIdxEntity_CXXStaticMethod = CXIdxEntityKind.define('CXIdxEntity_CXXStaticMethod', 20)
CXIdxEntity_CXXInstanceMethod = CXIdxEntityKind.define('CXIdxEntity_CXXInstanceMethod', 21)
CXIdxEntity_CXXConstructor = CXIdxEntityKind.define('CXIdxEntity_CXXConstructor', 22)
CXIdxEntity_CXXDestructor = CXIdxEntityKind.define('CXIdxEntity_CXXDestructor', 23)
CXIdxEntity_CXXConversionFunction = CXIdxEntityKind.define('CXIdxEntity_CXXConversionFunction', 24)
CXIdxEntity_CXXTypeAlias = CXIdxEntityKind.define('CXIdxEntity_CXXTypeAlias', 25)
CXIdxEntity_CXXInterface = CXIdxEntityKind.define('CXIdxEntity_CXXInterface', 26)
CXIdxEntity_CXXConcept = CXIdxEntityKind.define('CXIdxEntity_CXXConcept', 27)
class CXIdxEntityLanguage(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXIdxEntityLang_None = CXIdxEntityLanguage.define('CXIdxEntityLang_None', 0)
CXIdxEntityLang_C = CXIdxEntityLanguage.define('CXIdxEntityLang_C', 1)
CXIdxEntityLang_ObjC = CXIdxEntityLanguage.define('CXIdxEntityLang_ObjC', 2)
CXIdxEntityLang_CXX = CXIdxEntityLanguage.define('CXIdxEntityLang_CXX', 3)
CXIdxEntityLang_Swift = CXIdxEntityLanguage.define('CXIdxEntityLang_Swift', 4)
class CXIdxEntityCXXTemplateKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXIdxEntity_NonTemplate = CXIdxEntityCXXTemplateKind.define('CXIdxEntity_NonTemplate', 0)
CXIdxEntity_Template = CXIdxEntityCXXTemplateKind.define('CXIdxEntity_Template', 1)
CXIdxEntity_TemplatePartialSpecialization = CXIdxEntityCXXTemplateKind.define('CXIdxEntity_TemplatePartialSpecialization', 2)
CXIdxEntity_TemplateSpecialization = CXIdxEntityCXXTemplateKind.define('CXIdxEntity_TemplateSpecialization', 3)
class CXIdxAttrKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXIdxAttr_Unexposed = CXIdxAttrKind.define('CXIdxAttr_Unexposed', 0)
CXIdxAttr_IBAction = CXIdxAttrKind.define('CXIdxAttr_IBAction', 1)
CXIdxAttr_IBOutlet = CXIdxAttrKind.define('CXIdxAttr_IBOutlet', 2)
CXIdxAttr_IBOutletCollection = CXIdxAttrKind.define('CXIdxAttr_IBOutletCollection', 3)
@c.record
class CXIdxAttrInfo(c.Struct):
SIZE = 64
kind: Annotated[CXIdxAttrKind, 0]
cursor: Annotated[CXCursor, 8]
loc: Annotated[CXIdxLoc, 40]
@c.record
class CXIdxEntityInfo(c.Struct):
SIZE = 80
kind: Annotated[CXIdxEntityKind, 0]
templateKind: Annotated[CXIdxEntityCXXTemplateKind, 4]
lang: Annotated[CXIdxEntityLanguage, 8]
name: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 16]
USR: Annotated[c.POINTER[Annotated[bytes, ctypes.c_char]], 24]
cursor: Annotated[CXCursor, 32]
attributes: Annotated[c.POINTER[c.POINTER[CXIdxAttrInfo]], 64]
numAttributes: Annotated[Annotated[int, ctypes.c_uint32], 72]
@c.record
class CXIdxContainerInfo(c.Struct):
SIZE = 32
cursor: Annotated[CXCursor, 0]
@c.record
class CXIdxIBOutletCollectionAttrInfo(c.Struct):
SIZE = 72
attrInfo: Annotated[c.POINTER[CXIdxAttrInfo], 0]
objcClass: Annotated[c.POINTER[CXIdxEntityInfo], 8]
classCursor: Annotated[CXCursor, 16]
classLoc: Annotated[CXIdxLoc, 48]
class CXIdxDeclInfoFlags(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXIdxDeclFlag_Skipped = CXIdxDeclInfoFlags.define('CXIdxDeclFlag_Skipped', 1)
@c.record
class CXIdxDeclInfo(c.Struct):
SIZE = 128
entityInfo: Annotated[c.POINTER[CXIdxEntityInfo], 0]
cursor: Annotated[CXCursor, 8]
loc: Annotated[CXIdxLoc, 40]
semanticContainer: Annotated[c.POINTER[CXIdxContainerInfo], 64]
lexicalContainer: Annotated[c.POINTER[CXIdxContainerInfo], 72]
isRedeclaration: Annotated[Annotated[int, ctypes.c_int32], 80]
isDefinition: Annotated[Annotated[int, ctypes.c_int32], 84]
isContainer: Annotated[Annotated[int, ctypes.c_int32], 88]
declAsContainer: Annotated[c.POINTER[CXIdxContainerInfo], 96]
isImplicit: Annotated[Annotated[int, ctypes.c_int32], 104]
attributes: Annotated[c.POINTER[c.POINTER[CXIdxAttrInfo]], 112]
numAttributes: Annotated[Annotated[int, ctypes.c_uint32], 120]
flags: Annotated[Annotated[int, ctypes.c_uint32], 124]
class CXIdxObjCContainerKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXIdxObjCContainer_ForwardRef = CXIdxObjCContainerKind.define('CXIdxObjCContainer_ForwardRef', 0)
CXIdxObjCContainer_Interface = CXIdxObjCContainerKind.define('CXIdxObjCContainer_Interface', 1)
CXIdxObjCContainer_Implementation = CXIdxObjCContainerKind.define('CXIdxObjCContainer_Implementation', 2)
@c.record
class CXIdxObjCContainerDeclInfo(c.Struct):
SIZE = 16
declInfo: Annotated[c.POINTER[CXIdxDeclInfo], 0]
kind: Annotated[CXIdxObjCContainerKind, 8]
@c.record
class CXIdxBaseClassInfo(c.Struct):
SIZE = 64
base: Annotated[c.POINTER[CXIdxEntityInfo], 0]
cursor: Annotated[CXCursor, 8]
loc: Annotated[CXIdxLoc, 40]
@c.record
class CXIdxObjCProtocolRefInfo(c.Struct):
SIZE = 64
protocol: Annotated[c.POINTER[CXIdxEntityInfo], 0]
cursor: Annotated[CXCursor, 8]
loc: Annotated[CXIdxLoc, 40]
@c.record
class CXIdxObjCProtocolRefListInfo(c.Struct):
SIZE = 16
protocols: Annotated[c.POINTER[c.POINTER[CXIdxObjCProtocolRefInfo]], 0]
numProtocols: Annotated[Annotated[int, ctypes.c_uint32], 8]
@c.record
class CXIdxObjCInterfaceDeclInfo(c.Struct):
SIZE = 24
containerInfo: Annotated[c.POINTER[CXIdxObjCContainerDeclInfo], 0]
superInfo: Annotated[c.POINTER[CXIdxBaseClassInfo], 8]
protocols: Annotated[c.POINTER[CXIdxObjCProtocolRefListInfo], 16]
@c.record
class CXIdxObjCCategoryDeclInfo(c.Struct):
SIZE = 80
containerInfo: Annotated[c.POINTER[CXIdxObjCContainerDeclInfo], 0]
objcClass: Annotated[c.POINTER[CXIdxEntityInfo], 8]
classCursor: Annotated[CXCursor, 16]
classLoc: Annotated[CXIdxLoc, 48]
protocols: Annotated[c.POINTER[CXIdxObjCProtocolRefListInfo], 72]
@c.record
class CXIdxObjCPropertyDeclInfo(c.Struct):
SIZE = 24
declInfo: Annotated[c.POINTER[CXIdxDeclInfo], 0]
getter: Annotated[c.POINTER[CXIdxEntityInfo], 8]
setter: Annotated[c.POINTER[CXIdxEntityInfo], 16]
@c.record
class CXIdxCXXClassDeclInfo(c.Struct):
SIZE = 24
declInfo: Annotated[c.POINTER[CXIdxDeclInfo], 0]
bases: Annotated[c.POINTER[c.POINTER[CXIdxBaseClassInfo]], 8]
numBases: Annotated[Annotated[int, ctypes.c_uint32], 16]
class CXIdxEntityRefKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXIdxEntityRef_Direct = CXIdxEntityRefKind.define('CXIdxEntityRef_Direct', 1)
CXIdxEntityRef_Implicit = CXIdxEntityRefKind.define('CXIdxEntityRef_Implicit', 2)
class CXSymbolRole(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXSymbolRole_None = CXSymbolRole.define('CXSymbolRole_None', 0)
CXSymbolRole_Declaration = CXSymbolRole.define('CXSymbolRole_Declaration', 1)
CXSymbolRole_Definition = CXSymbolRole.define('CXSymbolRole_Definition', 2)
CXSymbolRole_Reference = CXSymbolRole.define('CXSymbolRole_Reference', 4)
CXSymbolRole_Read = CXSymbolRole.define('CXSymbolRole_Read', 8)
CXSymbolRole_Write = CXSymbolRole.define('CXSymbolRole_Write', 16)
CXSymbolRole_Call = CXSymbolRole.define('CXSymbolRole_Call', 32)
CXSymbolRole_Dynamic = CXSymbolRole.define('CXSymbolRole_Dynamic', 64)
CXSymbolRole_AddressOf = CXSymbolRole.define('CXSymbolRole_AddressOf', 128)
CXSymbolRole_Implicit = CXSymbolRole.define('CXSymbolRole_Implicit', 256)
@c.record
class CXIdxEntityRefInfo(c.Struct):
SIZE = 96
kind: Annotated[CXIdxEntityRefKind, 0]
cursor: Annotated[CXCursor, 8]
loc: Annotated[CXIdxLoc, 40]
referencedEntity: Annotated[c.POINTER[CXIdxEntityInfo], 64]
parentEntity: Annotated[c.POINTER[CXIdxEntityInfo], 72]
container: Annotated[c.POINTER[CXIdxContainerInfo], 80]
role: Annotated[CXSymbolRole, 88]
@c.record
class IndexerCallbacks(c.Struct):
SIZE = 64
abortQuery: Annotated[c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [CXClientData, ctypes.c_void_p]], 0]
diagnostic: Annotated[c.CFUNCTYPE[None, [CXClientData, CXDiagnosticSet, ctypes.c_void_p]], 8]
enteredMainFile: Annotated[c.CFUNCTYPE[CXIdxClientFile, [CXClientData, CXFile, ctypes.c_void_p]], 16]
ppIncludedFile: Annotated[c.CFUNCTYPE[CXIdxClientFile, [CXClientData, c.POINTER[CXIdxIncludedFileInfo]]], 24]
importedASTFile: Annotated[c.CFUNCTYPE[CXIdxClientASTFile, [CXClientData, c.POINTER[CXIdxImportedASTFileInfo]]], 32]
startedTranslationUnit: Annotated[c.CFUNCTYPE[CXIdxClientContainer, [CXClientData, ctypes.c_void_p]], 40]
indexDeclaration: Annotated[c.CFUNCTYPE[None, [CXClientData, c.POINTER[CXIdxDeclInfo]]], 48]
indexEntityReference: Annotated[c.CFUNCTYPE[None, [CXClientData, c.POINTER[CXIdxEntityRefInfo]]], 56]
@dll.bind
def clang_index_isEntityObjCContainerKind(_0:CXIdxEntityKind) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_index_getObjCContainerDeclInfo(_0:c.POINTER[CXIdxDeclInfo]) -> c.POINTER[CXIdxObjCContainerDeclInfo]: ...
@dll.bind
def clang_index_getObjCInterfaceDeclInfo(_0:c.POINTER[CXIdxDeclInfo]) -> c.POINTER[CXIdxObjCInterfaceDeclInfo]: ...
@dll.bind
def clang_index_getObjCCategoryDeclInfo(_0:c.POINTER[CXIdxDeclInfo]) -> c.POINTER[CXIdxObjCCategoryDeclInfo]: ...
@dll.bind
def clang_index_getObjCProtocolRefListInfo(_0:c.POINTER[CXIdxDeclInfo]) -> c.POINTER[CXIdxObjCProtocolRefListInfo]: ...
@dll.bind
def clang_index_getObjCPropertyDeclInfo(_0:c.POINTER[CXIdxDeclInfo]) -> c.POINTER[CXIdxObjCPropertyDeclInfo]: ...
@dll.bind
def clang_index_getIBOutletCollectionAttrInfo(_0:c.POINTER[CXIdxAttrInfo]) -> c.POINTER[CXIdxIBOutletCollectionAttrInfo]: ...
@dll.bind
def clang_index_getCXXClassDeclInfo(_0:c.POINTER[CXIdxDeclInfo]) -> c.POINTER[CXIdxCXXClassDeclInfo]: ...
@dll.bind
def clang_index_getClientContainer(_0:c.POINTER[CXIdxContainerInfo]) -> CXIdxClientContainer: ...
@dll.bind
def clang_index_setClientContainer(_0:c.POINTER[CXIdxContainerInfo], _1:CXIdxClientContainer) -> None: ...
@dll.bind
def clang_index_getClientEntity(_0:c.POINTER[CXIdxEntityInfo]) -> CXIdxClientEntity: ...
@dll.bind
def clang_index_setClientEntity(_0:c.POINTER[CXIdxEntityInfo], _1:CXIdxClientEntity) -> None: ...
CXIndexAction: TypeAlias = ctypes.c_void_p
@dll.bind
def clang_IndexAction_create(CIdx:CXIndex) -> CXIndexAction: ...
@dll.bind
def clang_IndexAction_dispose(_0:CXIndexAction) -> None: ...
class CXIndexOptFlags(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXIndexOpt_None = CXIndexOptFlags.define('CXIndexOpt_None', 0)
CXIndexOpt_SuppressRedundantRefs = CXIndexOptFlags.define('CXIndexOpt_SuppressRedundantRefs', 1)
CXIndexOpt_IndexFunctionLocalSymbols = CXIndexOptFlags.define('CXIndexOpt_IndexFunctionLocalSymbols', 2)
CXIndexOpt_IndexImplicitTemplateInstantiations = CXIndexOptFlags.define('CXIndexOpt_IndexImplicitTemplateInstantiations', 4)
CXIndexOpt_SuppressWarnings = CXIndexOptFlags.define('CXIndexOpt_SuppressWarnings', 8)
CXIndexOpt_SkipParsedBodiesInSession = CXIndexOptFlags.define('CXIndexOpt_SkipParsedBodiesInSession', 16)
@dll.bind
def clang_indexSourceFile(_0:CXIndexAction, client_data:CXClientData, index_callbacks:c.POINTER[IndexerCallbacks], index_callbacks_size:Annotated[int, ctypes.c_uint32], index_options:Annotated[int, ctypes.c_uint32], source_filename:c.POINTER[Annotated[bytes, ctypes.c_char]], command_line_args:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], num_command_line_args:Annotated[int, ctypes.c_int32], unsaved_files:c.POINTER[struct_CXUnsavedFile], num_unsaved_files:Annotated[int, ctypes.c_uint32], out_TU:c.POINTER[CXTranslationUnit], TU_options:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_indexSourceFileFullArgv(_0:CXIndexAction, client_data:CXClientData, index_callbacks:c.POINTER[IndexerCallbacks], index_callbacks_size:Annotated[int, ctypes.c_uint32], index_options:Annotated[int, ctypes.c_uint32], source_filename:c.POINTER[Annotated[bytes, ctypes.c_char]], command_line_args:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]], num_command_line_args:Annotated[int, ctypes.c_int32], unsaved_files:c.POINTER[struct_CXUnsavedFile], num_unsaved_files:Annotated[int, ctypes.c_uint32], out_TU:c.POINTER[CXTranslationUnit], TU_options:Annotated[int, ctypes.c_uint32]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_indexTranslationUnit(_0:CXIndexAction, client_data:CXClientData, index_callbacks:c.POINTER[IndexerCallbacks], index_callbacks_size:Annotated[int, ctypes.c_uint32], index_options:Annotated[int, ctypes.c_uint32], _5:CXTranslationUnit) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_indexLoc_getFileLocation(loc:CXIdxLoc, indexFile:c.POINTER[CXIdxClientFile], file:c.POINTER[CXFile], line:c.POINTER[Annotated[int, ctypes.c_uint32]], column:c.POINTER[Annotated[int, ctypes.c_uint32]], offset:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def clang_indexLoc_getCXSourceLocation(loc:CXIdxLoc) -> CXSourceLocation: ...
CXFieldVisitor: TypeAlias = c.CFUNCTYPE[enum_CXVisitorResult, [CXCursor, ctypes.c_void_p]]
@dll.bind
def clang_Type_visitFields(T:CXType, visitor:CXFieldVisitor, client_data:CXClientData) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_visitCXXBaseClasses(T:CXType, visitor:CXFieldVisitor, client_data:CXClientData) -> Annotated[int, ctypes.c_uint32]: ...
class enum_CXBinaryOperatorKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXBinaryOperator_Invalid = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Invalid', 0)
CXBinaryOperator_PtrMemD = enum_CXBinaryOperatorKind.define('CXBinaryOperator_PtrMemD', 1)
CXBinaryOperator_PtrMemI = enum_CXBinaryOperatorKind.define('CXBinaryOperator_PtrMemI', 2)
CXBinaryOperator_Mul = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Mul', 3)
CXBinaryOperator_Div = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Div', 4)
CXBinaryOperator_Rem = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Rem', 5)
CXBinaryOperator_Add = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Add', 6)
CXBinaryOperator_Sub = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Sub', 7)
CXBinaryOperator_Shl = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Shl', 8)
CXBinaryOperator_Shr = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Shr', 9)
CXBinaryOperator_Cmp = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Cmp', 10)
CXBinaryOperator_LT = enum_CXBinaryOperatorKind.define('CXBinaryOperator_LT', 11)
CXBinaryOperator_GT = enum_CXBinaryOperatorKind.define('CXBinaryOperator_GT', 12)
CXBinaryOperator_LE = enum_CXBinaryOperatorKind.define('CXBinaryOperator_LE', 13)
CXBinaryOperator_GE = enum_CXBinaryOperatorKind.define('CXBinaryOperator_GE', 14)
CXBinaryOperator_EQ = enum_CXBinaryOperatorKind.define('CXBinaryOperator_EQ', 15)
CXBinaryOperator_NE = enum_CXBinaryOperatorKind.define('CXBinaryOperator_NE', 16)
CXBinaryOperator_And = enum_CXBinaryOperatorKind.define('CXBinaryOperator_And', 17)
CXBinaryOperator_Xor = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Xor', 18)
CXBinaryOperator_Or = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Or', 19)
CXBinaryOperator_LAnd = enum_CXBinaryOperatorKind.define('CXBinaryOperator_LAnd', 20)
CXBinaryOperator_LOr = enum_CXBinaryOperatorKind.define('CXBinaryOperator_LOr', 21)
CXBinaryOperator_Assign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Assign', 22)
CXBinaryOperator_MulAssign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_MulAssign', 23)
CXBinaryOperator_DivAssign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_DivAssign', 24)
CXBinaryOperator_RemAssign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_RemAssign', 25)
CXBinaryOperator_AddAssign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_AddAssign', 26)
CXBinaryOperator_SubAssign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_SubAssign', 27)
CXBinaryOperator_ShlAssign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_ShlAssign', 28)
CXBinaryOperator_ShrAssign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_ShrAssign', 29)
CXBinaryOperator_AndAssign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_AndAssign', 30)
CXBinaryOperator_XorAssign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_XorAssign', 31)
CXBinaryOperator_OrAssign = enum_CXBinaryOperatorKind.define('CXBinaryOperator_OrAssign', 32)
CXBinaryOperator_Comma = enum_CXBinaryOperatorKind.define('CXBinaryOperator_Comma', 33)
@dll.bind
def clang_getBinaryOperatorKindSpelling(kind:enum_CXBinaryOperatorKind) -> CXString: ...
@dll.bind
def clang_getCursorBinaryOperatorKind(cursor:CXCursor) -> enum_CXBinaryOperatorKind: ...
class enum_CXUnaryOperatorKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
CXUnaryOperator_Invalid = enum_CXUnaryOperatorKind.define('CXUnaryOperator_Invalid', 0)
CXUnaryOperator_PostInc = enum_CXUnaryOperatorKind.define('CXUnaryOperator_PostInc', 1)
CXUnaryOperator_PostDec = enum_CXUnaryOperatorKind.define('CXUnaryOperator_PostDec', 2)
CXUnaryOperator_PreInc = enum_CXUnaryOperatorKind.define('CXUnaryOperator_PreInc', 3)
CXUnaryOperator_PreDec = enum_CXUnaryOperatorKind.define('CXUnaryOperator_PreDec', 4)
CXUnaryOperator_AddrOf = enum_CXUnaryOperatorKind.define('CXUnaryOperator_AddrOf', 5)
CXUnaryOperator_Deref = enum_CXUnaryOperatorKind.define('CXUnaryOperator_Deref', 6)
CXUnaryOperator_Plus = enum_CXUnaryOperatorKind.define('CXUnaryOperator_Plus', 7)
CXUnaryOperator_Minus = enum_CXUnaryOperatorKind.define('CXUnaryOperator_Minus', 8)
CXUnaryOperator_Not = enum_CXUnaryOperatorKind.define('CXUnaryOperator_Not', 9)
CXUnaryOperator_LNot = enum_CXUnaryOperatorKind.define('CXUnaryOperator_LNot', 10)
CXUnaryOperator_Real = enum_CXUnaryOperatorKind.define('CXUnaryOperator_Real', 11)
CXUnaryOperator_Imag = enum_CXUnaryOperatorKind.define('CXUnaryOperator_Imag', 12)
CXUnaryOperator_Extension = enum_CXUnaryOperatorKind.define('CXUnaryOperator_Extension', 13)
CXUnaryOperator_Coawait = enum_CXUnaryOperatorKind.define('CXUnaryOperator_Coawait', 14)
@dll.bind
def clang_getUnaryOperatorKindSpelling(kind:enum_CXUnaryOperatorKind) -> CXString: ...
@dll.bind
def clang_getCursorUnaryOperatorKind(cursor:CXCursor) -> enum_CXUnaryOperatorKind: ...
@dll.bind
def clang_getCString(string:CXString) -> c.POINTER[Annotated[bytes, ctypes.c_char]]: ...
@dll.bind
def clang_disposeString(string:CXString) -> None: ...
@dll.bind
def clang_disposeStringSet(set:c.POINTER[CXStringSet]) -> None: ...
@dll.bind
def clang_getNullLocation() -> CXSourceLocation: ...
@dll.bind
def clang_equalLocations(loc1:CXSourceLocation, loc2:CXSourceLocation) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_isBeforeInTranslationUnit(loc1:CXSourceLocation, loc2:CXSourceLocation) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Location_isInSystemHeader(location:CXSourceLocation) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_Location_isFromMainFile(location:CXSourceLocation) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_getNullRange() -> CXSourceRange: ...
@dll.bind
def clang_getRange(begin:CXSourceLocation, end:CXSourceLocation) -> CXSourceRange: ...
@dll.bind
def clang_equalRanges(range1:CXSourceRange, range2:CXSourceRange) -> Annotated[int, ctypes.c_uint32]: ...
@dll.bind
def clang_Range_isNull(range:CXSourceRange) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_getExpansionLocation(location:CXSourceLocation, file:c.POINTER[CXFile], line:c.POINTER[Annotated[int, ctypes.c_uint32]], column:c.POINTER[Annotated[int, ctypes.c_uint32]], offset:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def clang_getPresumedLocation(location:CXSourceLocation, filename:c.POINTER[CXString], line:c.POINTER[Annotated[int, ctypes.c_uint32]], column:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def clang_getInstantiationLocation(location:CXSourceLocation, file:c.POINTER[CXFile], line:c.POINTER[Annotated[int, ctypes.c_uint32]], column:c.POINTER[Annotated[int, ctypes.c_uint32]], offset:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def clang_getSpellingLocation(location:CXSourceLocation, file:c.POINTER[CXFile], line:c.POINTER[Annotated[int, ctypes.c_uint32]], column:c.POINTER[Annotated[int, ctypes.c_uint32]], offset:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def clang_getFileLocation(location:CXSourceLocation, file:c.POINTER[CXFile], line:c.POINTER[Annotated[int, ctypes.c_uint32]], column:c.POINTER[Annotated[int, ctypes.c_uint32]], offset:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> None: ...
@dll.bind
def clang_getRangeStart(range:CXSourceRange) -> CXSourceLocation: ...
@dll.bind
def clang_getRangeEnd(range:CXSourceRange) -> CXSourceLocation: ...
@dll.bind
def clang_disposeSourceRangeList(ranges:c.POINTER[CXSourceRangeList]) -> None: ...
@dll.bind
def clang_getFileName(SFile:CXFile) -> CXString: ...
time_t: TypeAlias = Annotated[int, ctypes.c_int64]
@dll.bind
def clang_getFileTime(SFile:CXFile) -> time_t: ...
@c.record
class CXFileUniqueID(c.Struct):
SIZE = 24
data: Annotated[c.Array[Annotated[int, ctypes.c_uint64], Literal[3]], 0]
@dll.bind
def clang_getFileUniqueID(file:CXFile, outID:c.POINTER[CXFileUniqueID]) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_File_isEqual(file1:CXFile, file2:CXFile) -> Annotated[int, ctypes.c_int32]: ...
@dll.bind
def clang_File_tryGetRealPathName(file:CXFile) -> CXString: ...
c.init_records()
CINDEX_VERSION_MAJOR = 0 # type: ignore
CINDEX_VERSION_MINOR = 64 # type: ignore
CINDEX_VERSION_ENCODE = lambda major,minor: (((major)*10000) + ((minor)*1)) # type: ignore
CINDEX_VERSION = CINDEX_VERSION_ENCODE(CINDEX_VERSION_MAJOR, CINDEX_VERSION_MINOR) # type: ignore
CINDEX_VERSION_STRINGIZE = lambda major,minor: CINDEX_VERSION_STRINGIZE_(major, minor) # type: ignore | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/libclang.py",
"license": "MIT License",
"lines": 1829,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:tinygrad/runtime/autogen/metal.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
from tinygrad.runtime.support import objc
dll = c.DLL('metal', 'Metal')
@c.record
class MTLDispatchThreadgroupsIndirectArguments(c.Struct):
SIZE = 12
threadgroupsPerGrid: Annotated[c.Array[uint32_t, Literal[3]], 0]
uint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@c.record
class MTLStageInRegionIndirectArguments(c.Struct):
SIZE = 24
stageInOrigin: Annotated[c.Array[uint32_t, Literal[3]], 0]
stageInSize: Annotated[c.Array[uint32_t, Literal[3]], 12]
class MTLComputeCommandEncoder(objc.Spec): pass
class MTLCommandEncoder(objc.Spec): pass
class MTLComputePipelineState(objc.Spec): pass
NSUInteger: TypeAlias = Annotated[int, ctypes.c_uint64]
class MTLBuffer(objc.Spec): pass
class MTLResource(objc.Spec): pass
@c.record
class struct__NSRange(c.Struct):
SIZE = 16
location: Annotated[NSUInteger, 0]
length: Annotated[NSUInteger, 8]
NSRange: TypeAlias = struct__NSRange
class MTLTexture(objc.Spec): pass
class MTLTextureDescriptor(objc.Spec): pass
class enum_MTLTextureType(NSUInteger, c.Enum): pass
MTLTextureType1D = enum_MTLTextureType.define('MTLTextureType1D', 0)
MTLTextureType1DArray = enum_MTLTextureType.define('MTLTextureType1DArray', 1)
MTLTextureType2D = enum_MTLTextureType.define('MTLTextureType2D', 2)
MTLTextureType2DArray = enum_MTLTextureType.define('MTLTextureType2DArray', 3)
MTLTextureType2DMultisample = enum_MTLTextureType.define('MTLTextureType2DMultisample', 4)
MTLTextureTypeCube = enum_MTLTextureType.define('MTLTextureTypeCube', 5)
MTLTextureTypeCubeArray = enum_MTLTextureType.define('MTLTextureTypeCubeArray', 6)
MTLTextureType3D = enum_MTLTextureType.define('MTLTextureType3D', 7)
MTLTextureType2DMultisampleArray = enum_MTLTextureType.define('MTLTextureType2DMultisampleArray', 8)
MTLTextureTypeTextureBuffer = enum_MTLTextureType.define('MTLTextureTypeTextureBuffer', 9)
MTLTextureType: TypeAlias = enum_MTLTextureType
class enum_MTLPixelFormat(NSUInteger, c.Enum): pass
MTLPixelFormatInvalid = enum_MTLPixelFormat.define('MTLPixelFormatInvalid', 0)
MTLPixelFormatA8Unorm = enum_MTLPixelFormat.define('MTLPixelFormatA8Unorm', 1)
MTLPixelFormatR8Unorm = enum_MTLPixelFormat.define('MTLPixelFormatR8Unorm', 10)
MTLPixelFormatR8Unorm_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatR8Unorm_sRGB', 11)
MTLPixelFormatR8Snorm = enum_MTLPixelFormat.define('MTLPixelFormatR8Snorm', 12)
MTLPixelFormatR8Uint = enum_MTLPixelFormat.define('MTLPixelFormatR8Uint', 13)
MTLPixelFormatR8Sint = enum_MTLPixelFormat.define('MTLPixelFormatR8Sint', 14)
MTLPixelFormatR16Unorm = enum_MTLPixelFormat.define('MTLPixelFormatR16Unorm', 20)
MTLPixelFormatR16Snorm = enum_MTLPixelFormat.define('MTLPixelFormatR16Snorm', 22)
MTLPixelFormatR16Uint = enum_MTLPixelFormat.define('MTLPixelFormatR16Uint', 23)
MTLPixelFormatR16Sint = enum_MTLPixelFormat.define('MTLPixelFormatR16Sint', 24)
MTLPixelFormatR16Float = enum_MTLPixelFormat.define('MTLPixelFormatR16Float', 25)
MTLPixelFormatRG8Unorm = enum_MTLPixelFormat.define('MTLPixelFormatRG8Unorm', 30)
MTLPixelFormatRG8Unorm_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatRG8Unorm_sRGB', 31)
MTLPixelFormatRG8Snorm = enum_MTLPixelFormat.define('MTLPixelFormatRG8Snorm', 32)
MTLPixelFormatRG8Uint = enum_MTLPixelFormat.define('MTLPixelFormatRG8Uint', 33)
MTLPixelFormatRG8Sint = enum_MTLPixelFormat.define('MTLPixelFormatRG8Sint', 34)
MTLPixelFormatB5G6R5Unorm = enum_MTLPixelFormat.define('MTLPixelFormatB5G6R5Unorm', 40)
MTLPixelFormatA1BGR5Unorm = enum_MTLPixelFormat.define('MTLPixelFormatA1BGR5Unorm', 41)
MTLPixelFormatABGR4Unorm = enum_MTLPixelFormat.define('MTLPixelFormatABGR4Unorm', 42)
MTLPixelFormatBGR5A1Unorm = enum_MTLPixelFormat.define('MTLPixelFormatBGR5A1Unorm', 43)
MTLPixelFormatR32Uint = enum_MTLPixelFormat.define('MTLPixelFormatR32Uint', 53)
MTLPixelFormatR32Sint = enum_MTLPixelFormat.define('MTLPixelFormatR32Sint', 54)
MTLPixelFormatR32Float = enum_MTLPixelFormat.define('MTLPixelFormatR32Float', 55)
MTLPixelFormatRG16Unorm = enum_MTLPixelFormat.define('MTLPixelFormatRG16Unorm', 60)
MTLPixelFormatRG16Snorm = enum_MTLPixelFormat.define('MTLPixelFormatRG16Snorm', 62)
MTLPixelFormatRG16Uint = enum_MTLPixelFormat.define('MTLPixelFormatRG16Uint', 63)
MTLPixelFormatRG16Sint = enum_MTLPixelFormat.define('MTLPixelFormatRG16Sint', 64)
MTLPixelFormatRG16Float = enum_MTLPixelFormat.define('MTLPixelFormatRG16Float', 65)
MTLPixelFormatRGBA8Unorm = enum_MTLPixelFormat.define('MTLPixelFormatRGBA8Unorm', 70)
MTLPixelFormatRGBA8Unorm_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatRGBA8Unorm_sRGB', 71)
MTLPixelFormatRGBA8Snorm = enum_MTLPixelFormat.define('MTLPixelFormatRGBA8Snorm', 72)
MTLPixelFormatRGBA8Uint = enum_MTLPixelFormat.define('MTLPixelFormatRGBA8Uint', 73)
MTLPixelFormatRGBA8Sint = enum_MTLPixelFormat.define('MTLPixelFormatRGBA8Sint', 74)
MTLPixelFormatBGRA8Unorm = enum_MTLPixelFormat.define('MTLPixelFormatBGRA8Unorm', 80)
MTLPixelFormatBGRA8Unorm_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatBGRA8Unorm_sRGB', 81)
MTLPixelFormatRGB10A2Unorm = enum_MTLPixelFormat.define('MTLPixelFormatRGB10A2Unorm', 90)
MTLPixelFormatRGB10A2Uint = enum_MTLPixelFormat.define('MTLPixelFormatRGB10A2Uint', 91)
MTLPixelFormatRG11B10Float = enum_MTLPixelFormat.define('MTLPixelFormatRG11B10Float', 92)
MTLPixelFormatRGB9E5Float = enum_MTLPixelFormat.define('MTLPixelFormatRGB9E5Float', 93)
MTLPixelFormatBGR10A2Unorm = enum_MTLPixelFormat.define('MTLPixelFormatBGR10A2Unorm', 94)
MTLPixelFormatBGR10_XR = enum_MTLPixelFormat.define('MTLPixelFormatBGR10_XR', 554)
MTLPixelFormatBGR10_XR_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatBGR10_XR_sRGB', 555)
MTLPixelFormatRG32Uint = enum_MTLPixelFormat.define('MTLPixelFormatRG32Uint', 103)
MTLPixelFormatRG32Sint = enum_MTLPixelFormat.define('MTLPixelFormatRG32Sint', 104)
MTLPixelFormatRG32Float = enum_MTLPixelFormat.define('MTLPixelFormatRG32Float', 105)
MTLPixelFormatRGBA16Unorm = enum_MTLPixelFormat.define('MTLPixelFormatRGBA16Unorm', 110)
MTLPixelFormatRGBA16Snorm = enum_MTLPixelFormat.define('MTLPixelFormatRGBA16Snorm', 112)
MTLPixelFormatRGBA16Uint = enum_MTLPixelFormat.define('MTLPixelFormatRGBA16Uint', 113)
MTLPixelFormatRGBA16Sint = enum_MTLPixelFormat.define('MTLPixelFormatRGBA16Sint', 114)
MTLPixelFormatRGBA16Float = enum_MTLPixelFormat.define('MTLPixelFormatRGBA16Float', 115)
MTLPixelFormatBGRA10_XR = enum_MTLPixelFormat.define('MTLPixelFormatBGRA10_XR', 552)
MTLPixelFormatBGRA10_XR_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatBGRA10_XR_sRGB', 553)
MTLPixelFormatRGBA32Uint = enum_MTLPixelFormat.define('MTLPixelFormatRGBA32Uint', 123)
MTLPixelFormatRGBA32Sint = enum_MTLPixelFormat.define('MTLPixelFormatRGBA32Sint', 124)
MTLPixelFormatRGBA32Float = enum_MTLPixelFormat.define('MTLPixelFormatRGBA32Float', 125)
MTLPixelFormatBC1_RGBA = enum_MTLPixelFormat.define('MTLPixelFormatBC1_RGBA', 130)
MTLPixelFormatBC1_RGBA_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatBC1_RGBA_sRGB', 131)
MTLPixelFormatBC2_RGBA = enum_MTLPixelFormat.define('MTLPixelFormatBC2_RGBA', 132)
MTLPixelFormatBC2_RGBA_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatBC2_RGBA_sRGB', 133)
MTLPixelFormatBC3_RGBA = enum_MTLPixelFormat.define('MTLPixelFormatBC3_RGBA', 134)
MTLPixelFormatBC3_RGBA_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatBC3_RGBA_sRGB', 135)
MTLPixelFormatBC4_RUnorm = enum_MTLPixelFormat.define('MTLPixelFormatBC4_RUnorm', 140)
MTLPixelFormatBC4_RSnorm = enum_MTLPixelFormat.define('MTLPixelFormatBC4_RSnorm', 141)
MTLPixelFormatBC5_RGUnorm = enum_MTLPixelFormat.define('MTLPixelFormatBC5_RGUnorm', 142)
MTLPixelFormatBC5_RGSnorm = enum_MTLPixelFormat.define('MTLPixelFormatBC5_RGSnorm', 143)
MTLPixelFormatBC6H_RGBFloat = enum_MTLPixelFormat.define('MTLPixelFormatBC6H_RGBFloat', 150)
MTLPixelFormatBC6H_RGBUfloat = enum_MTLPixelFormat.define('MTLPixelFormatBC6H_RGBUfloat', 151)
MTLPixelFormatBC7_RGBAUnorm = enum_MTLPixelFormat.define('MTLPixelFormatBC7_RGBAUnorm', 152)
MTLPixelFormatBC7_RGBAUnorm_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatBC7_RGBAUnorm_sRGB', 153)
MTLPixelFormatPVRTC_RGB_2BPP = enum_MTLPixelFormat.define('MTLPixelFormatPVRTC_RGB_2BPP', 160)
MTLPixelFormatPVRTC_RGB_2BPP_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatPVRTC_RGB_2BPP_sRGB', 161)
MTLPixelFormatPVRTC_RGB_4BPP = enum_MTLPixelFormat.define('MTLPixelFormatPVRTC_RGB_4BPP', 162)
MTLPixelFormatPVRTC_RGB_4BPP_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatPVRTC_RGB_4BPP_sRGB', 163)
MTLPixelFormatPVRTC_RGBA_2BPP = enum_MTLPixelFormat.define('MTLPixelFormatPVRTC_RGBA_2BPP', 164)
MTLPixelFormatPVRTC_RGBA_2BPP_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatPVRTC_RGBA_2BPP_sRGB', 165)
MTLPixelFormatPVRTC_RGBA_4BPP = enum_MTLPixelFormat.define('MTLPixelFormatPVRTC_RGBA_4BPP', 166)
MTLPixelFormatPVRTC_RGBA_4BPP_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatPVRTC_RGBA_4BPP_sRGB', 167)
MTLPixelFormatEAC_R11Unorm = enum_MTLPixelFormat.define('MTLPixelFormatEAC_R11Unorm', 170)
MTLPixelFormatEAC_R11Snorm = enum_MTLPixelFormat.define('MTLPixelFormatEAC_R11Snorm', 172)
MTLPixelFormatEAC_RG11Unorm = enum_MTLPixelFormat.define('MTLPixelFormatEAC_RG11Unorm', 174)
MTLPixelFormatEAC_RG11Snorm = enum_MTLPixelFormat.define('MTLPixelFormatEAC_RG11Snorm', 176)
MTLPixelFormatEAC_RGBA8 = enum_MTLPixelFormat.define('MTLPixelFormatEAC_RGBA8', 178)
MTLPixelFormatEAC_RGBA8_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatEAC_RGBA8_sRGB', 179)
MTLPixelFormatETC2_RGB8 = enum_MTLPixelFormat.define('MTLPixelFormatETC2_RGB8', 180)
MTLPixelFormatETC2_RGB8_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatETC2_RGB8_sRGB', 181)
MTLPixelFormatETC2_RGB8A1 = enum_MTLPixelFormat.define('MTLPixelFormatETC2_RGB8A1', 182)
MTLPixelFormatETC2_RGB8A1_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatETC2_RGB8A1_sRGB', 183)
MTLPixelFormatASTC_4x4_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_4x4_sRGB', 186)
MTLPixelFormatASTC_5x4_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_5x4_sRGB', 187)
MTLPixelFormatASTC_5x5_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_5x5_sRGB', 188)
MTLPixelFormatASTC_6x5_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_6x5_sRGB', 189)
MTLPixelFormatASTC_6x6_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_6x6_sRGB', 190)
MTLPixelFormatASTC_8x5_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_8x5_sRGB', 192)
MTLPixelFormatASTC_8x6_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_8x6_sRGB', 193)
MTLPixelFormatASTC_8x8_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_8x8_sRGB', 194)
MTLPixelFormatASTC_10x5_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x5_sRGB', 195)
MTLPixelFormatASTC_10x6_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x6_sRGB', 196)
MTLPixelFormatASTC_10x8_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x8_sRGB', 197)
MTLPixelFormatASTC_10x10_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x10_sRGB', 198)
MTLPixelFormatASTC_12x10_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_12x10_sRGB', 199)
MTLPixelFormatASTC_12x12_sRGB = enum_MTLPixelFormat.define('MTLPixelFormatASTC_12x12_sRGB', 200)
MTLPixelFormatASTC_4x4_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_4x4_LDR', 204)
MTLPixelFormatASTC_5x4_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_5x4_LDR', 205)
MTLPixelFormatASTC_5x5_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_5x5_LDR', 206)
MTLPixelFormatASTC_6x5_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_6x5_LDR', 207)
MTLPixelFormatASTC_6x6_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_6x6_LDR', 208)
MTLPixelFormatASTC_8x5_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_8x5_LDR', 210)
MTLPixelFormatASTC_8x6_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_8x6_LDR', 211)
MTLPixelFormatASTC_8x8_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_8x8_LDR', 212)
MTLPixelFormatASTC_10x5_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x5_LDR', 213)
MTLPixelFormatASTC_10x6_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x6_LDR', 214)
MTLPixelFormatASTC_10x8_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x8_LDR', 215)
MTLPixelFormatASTC_10x10_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x10_LDR', 216)
MTLPixelFormatASTC_12x10_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_12x10_LDR', 217)
MTLPixelFormatASTC_12x12_LDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_12x12_LDR', 218)
MTLPixelFormatASTC_4x4_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_4x4_HDR', 222)
MTLPixelFormatASTC_5x4_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_5x4_HDR', 223)
MTLPixelFormatASTC_5x5_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_5x5_HDR', 224)
MTLPixelFormatASTC_6x5_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_6x5_HDR', 225)
MTLPixelFormatASTC_6x6_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_6x6_HDR', 226)
MTLPixelFormatASTC_8x5_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_8x5_HDR', 228)
MTLPixelFormatASTC_8x6_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_8x6_HDR', 229)
MTLPixelFormatASTC_8x8_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_8x8_HDR', 230)
MTLPixelFormatASTC_10x5_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x5_HDR', 231)
MTLPixelFormatASTC_10x6_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x6_HDR', 232)
MTLPixelFormatASTC_10x8_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x8_HDR', 233)
MTLPixelFormatASTC_10x10_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_10x10_HDR', 234)
MTLPixelFormatASTC_12x10_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_12x10_HDR', 235)
MTLPixelFormatASTC_12x12_HDR = enum_MTLPixelFormat.define('MTLPixelFormatASTC_12x12_HDR', 236)
MTLPixelFormatGBGR422 = enum_MTLPixelFormat.define('MTLPixelFormatGBGR422', 240)
MTLPixelFormatBGRG422 = enum_MTLPixelFormat.define('MTLPixelFormatBGRG422', 241)
MTLPixelFormatDepth16Unorm = enum_MTLPixelFormat.define('MTLPixelFormatDepth16Unorm', 250)
MTLPixelFormatDepth32Float = enum_MTLPixelFormat.define('MTLPixelFormatDepth32Float', 252)
MTLPixelFormatStencil8 = enum_MTLPixelFormat.define('MTLPixelFormatStencil8', 253)
MTLPixelFormatDepth24Unorm_Stencil8 = enum_MTLPixelFormat.define('MTLPixelFormatDepth24Unorm_Stencil8', 255)
MTLPixelFormatDepth32Float_Stencil8 = enum_MTLPixelFormat.define('MTLPixelFormatDepth32Float_Stencil8', 260)
MTLPixelFormatX32_Stencil8 = enum_MTLPixelFormat.define('MTLPixelFormatX32_Stencil8', 261)
MTLPixelFormatX24_Stencil8 = enum_MTLPixelFormat.define('MTLPixelFormatX24_Stencil8', 262)
MTLPixelFormat: TypeAlias = enum_MTLPixelFormat
class enum_MTLResourceOptions(NSUInteger, c.Enum): pass
MTLResourceCPUCacheModeDefaultCache = enum_MTLResourceOptions.define('MTLResourceCPUCacheModeDefaultCache', 0)
MTLResourceCPUCacheModeWriteCombined = enum_MTLResourceOptions.define('MTLResourceCPUCacheModeWriteCombined', 1)
MTLResourceStorageModeShared = enum_MTLResourceOptions.define('MTLResourceStorageModeShared', 0)
MTLResourceStorageModeManaged = enum_MTLResourceOptions.define('MTLResourceStorageModeManaged', 16)
MTLResourceStorageModePrivate = enum_MTLResourceOptions.define('MTLResourceStorageModePrivate', 32)
MTLResourceStorageModeMemoryless = enum_MTLResourceOptions.define('MTLResourceStorageModeMemoryless', 48)
MTLResourceHazardTrackingModeDefault = enum_MTLResourceOptions.define('MTLResourceHazardTrackingModeDefault', 0)
MTLResourceHazardTrackingModeUntracked = enum_MTLResourceOptions.define('MTLResourceHazardTrackingModeUntracked', 256)
MTLResourceHazardTrackingModeTracked = enum_MTLResourceOptions.define('MTLResourceHazardTrackingModeTracked', 512)
MTLResourceOptionCPUCacheModeDefault = enum_MTLResourceOptions.define('MTLResourceOptionCPUCacheModeDefault', 0)
MTLResourceOptionCPUCacheModeWriteCombined = enum_MTLResourceOptions.define('MTLResourceOptionCPUCacheModeWriteCombined', 1)
MTLResourceOptions: TypeAlias = enum_MTLResourceOptions
class enum_MTLCPUCacheMode(NSUInteger, c.Enum): pass
MTLCPUCacheModeDefaultCache = enum_MTLCPUCacheMode.define('MTLCPUCacheModeDefaultCache', 0)
MTLCPUCacheModeWriteCombined = enum_MTLCPUCacheMode.define('MTLCPUCacheModeWriteCombined', 1)
MTLCPUCacheMode: TypeAlias = enum_MTLCPUCacheMode
class enum_MTLStorageMode(NSUInteger, c.Enum): pass
MTLStorageModeShared = enum_MTLStorageMode.define('MTLStorageModeShared', 0)
MTLStorageModeManaged = enum_MTLStorageMode.define('MTLStorageModeManaged', 1)
MTLStorageModePrivate = enum_MTLStorageMode.define('MTLStorageModePrivate', 2)
MTLStorageModeMemoryless = enum_MTLStorageMode.define('MTLStorageModeMemoryless', 3)
MTLStorageMode: TypeAlias = enum_MTLStorageMode
class enum_MTLHazardTrackingMode(NSUInteger, c.Enum): pass
MTLHazardTrackingModeDefault = enum_MTLHazardTrackingMode.define('MTLHazardTrackingModeDefault', 0)
MTLHazardTrackingModeUntracked = enum_MTLHazardTrackingMode.define('MTLHazardTrackingModeUntracked', 1)
MTLHazardTrackingModeTracked = enum_MTLHazardTrackingMode.define('MTLHazardTrackingModeTracked', 2)
MTLHazardTrackingMode: TypeAlias = enum_MTLHazardTrackingMode
class enum_MTLTextureUsage(NSUInteger, c.Enum): pass
MTLTextureUsageUnknown = enum_MTLTextureUsage.define('MTLTextureUsageUnknown', 0)
MTLTextureUsageShaderRead = enum_MTLTextureUsage.define('MTLTextureUsageShaderRead', 1)
MTLTextureUsageShaderWrite = enum_MTLTextureUsage.define('MTLTextureUsageShaderWrite', 2)
MTLTextureUsageRenderTarget = enum_MTLTextureUsage.define('MTLTextureUsageRenderTarget', 4)
MTLTextureUsagePixelFormatView = enum_MTLTextureUsage.define('MTLTextureUsagePixelFormatView', 16)
MTLTextureUsageShaderAtomic = enum_MTLTextureUsage.define('MTLTextureUsageShaderAtomic', 32)
MTLTextureUsage: TypeAlias = enum_MTLTextureUsage
BOOL: TypeAlias = Annotated[int, ctypes.c_int32]
NSInteger: TypeAlias = Annotated[int, ctypes.c_int64]
class enum_MTLTextureCompressionType(NSInteger, c.Enum): pass
MTLTextureCompressionTypeLossless = enum_MTLTextureCompressionType.define('MTLTextureCompressionTypeLossless', 0)
MTLTextureCompressionTypeLossy = enum_MTLTextureCompressionType.define('MTLTextureCompressionTypeLossy', 1)
MTLTextureCompressionType: TypeAlias = enum_MTLTextureCompressionType
@c.record
class MTLTextureSwizzleChannels(c.Struct):
SIZE = 4
red: Annotated[MTLTextureSwizzle, 0]
green: Annotated[MTLTextureSwizzle, 1]
blue: Annotated[MTLTextureSwizzle, 2]
alpha: Annotated[MTLTextureSwizzle, 3]
uint8_t: TypeAlias = Annotated[int, ctypes.c_ubyte]
class enum_MTLTextureSwizzle(uint8_t, c.Enum): pass
MTLTextureSwizzleZero = enum_MTLTextureSwizzle.define('MTLTextureSwizzleZero', 0)
MTLTextureSwizzleOne = enum_MTLTextureSwizzle.define('MTLTextureSwizzleOne', 1)
MTLTextureSwizzleRed = enum_MTLTextureSwizzle.define('MTLTextureSwizzleRed', 2)
MTLTextureSwizzleGreen = enum_MTLTextureSwizzle.define('MTLTextureSwizzleGreen', 3)
MTLTextureSwizzleBlue = enum_MTLTextureSwizzle.define('MTLTextureSwizzleBlue', 4)
MTLTextureSwizzleAlpha = enum_MTLTextureSwizzle.define('MTLTextureSwizzleAlpha', 5)
MTLTextureSwizzle: TypeAlias = enum_MTLTextureSwizzle
class NSObject(objc.Spec): pass
IMP: TypeAlias = c.CFUNCTYPE[None, []]
class NSInvocation(objc.Spec): pass
class NSMethodSignature(objc.Spec): pass
NSMethodSignature._bases_ = [NSObject]
NSMethodSignature._methods_ = [
('getArgumentTypeAtIndex:', c.POINTER[Annotated[bytes, ctypes.c_char]], [NSUInteger]),
('isOneway', BOOL, []),
('numberOfArguments', NSUInteger, []),
('frameLength', NSUInteger, []),
('methodReturnType', c.POINTER[Annotated[bytes, ctypes.c_char]], []),
('methodReturnLength', NSUInteger, []),
]
NSMethodSignature._classmethods_ = [
('signatureWithObjCTypes:', NSMethodSignature, [c.POINTER[Annotated[bytes, ctypes.c_char]]]),
]
NSInvocation._bases_ = [NSObject]
NSInvocation._methods_ = [
('retainArguments', None, []),
('getReturnValue:', None, [ctypes.c_void_p]),
('setReturnValue:', None, [ctypes.c_void_p]),
('getArgument:atIndex:', None, [ctypes.c_void_p, NSInteger]),
('setArgument:atIndex:', None, [ctypes.c_void_p, NSInteger]),
('invoke', None, []),
('invokeWithTarget:', None, [objc.id_]),
('invokeUsingIMP:', None, [IMP]),
('methodSignature', NSMethodSignature, []),
('argumentsRetained', BOOL, []),
('target', objc.id_, []),
('setTarget:', None, [objc.id_]),
('selector', objc.id_, []),
('setSelector:', None, [objc.id_]),
]
NSInvocation._classmethods_ = [
('invocationWithMethodSignature:', NSInvocation, [NSMethodSignature]),
]
class struct__NSZone(ctypes.Structure): pass
class Protocol(objc.Spec): pass
class NSString(objc.Spec): pass
unichar: TypeAlias = Annotated[int, ctypes.c_uint16]
class NSCoder(objc.Spec): pass
class NSData(objc.Spec): pass
NSData._bases_ = [NSObject]
NSData._methods_ = [
('length', NSUInteger, []),
('bytes', ctypes.c_void_p, []),
]
NSCoder._bases_ = [NSObject]
NSCoder._methods_ = [
('encodeValueOfObjCType:at:', None, [c.POINTER[Annotated[bytes, ctypes.c_char]], ctypes.c_void_p]),
('encodeDataObject:', None, [NSData]),
('decodeDataObject', NSData, []),
('decodeValueOfObjCType:at:size:', None, [c.POINTER[Annotated[bytes, ctypes.c_char]], ctypes.c_void_p, NSUInteger]),
('versionForClassName:', NSInteger, [NSString]),
]
NSString._bases_ = [NSObject]
NSString._methods_ = [
('characterAtIndex:', unichar, [NSUInteger]),
('init', 'instancetype', []),
('initWithCoder:', 'instancetype', [NSCoder]),
('length', NSUInteger, []),
]
NSObject._methods_ = [
('init', 'instancetype', []),
('dealloc', None, []),
('finalize', None, []),
('copy', objc.id_, [], True),
('mutableCopy', objc.id_, [], True),
('methodForSelector:', IMP, [objc.id_]),
('doesNotRecognizeSelector:', None, [objc.id_]),
('forwardingTargetForSelector:', objc.id_, [objc.id_]),
('forwardInvocation:', None, [NSInvocation]),
('methodSignatureForSelector:', NSMethodSignature, [objc.id_]),
('allowsWeakReference', BOOL, []),
('retainWeakReference', BOOL, []),
]
NSObject._classmethods_ = [
('load', None, []),
('initialize', None, []),
('new', 'instancetype', [], True),
('allocWithZone:', 'instancetype', [c.POINTER[struct__NSZone]], True),
('alloc', 'instancetype', [], True),
('copyWithZone:', objc.id_, [c.POINTER[struct__NSZone]], True),
('mutableCopyWithZone:', objc.id_, [c.POINTER[struct__NSZone]], True),
('instancesRespondToSelector:', BOOL, [objc.id_]),
('conformsToProtocol:', BOOL, [Protocol]),
('instanceMethodForSelector:', IMP, [objc.id_]),
('instanceMethodSignatureForSelector:', NSMethodSignature, [objc.id_]),
('resolveClassMethod:', BOOL, [objc.id_]),
('resolveInstanceMethod:', BOOL, [objc.id_]),
('hash', NSUInteger, []),
('description', NSString, []),
('debugDescription', NSString, []),
]
MTLTextureDescriptor._bases_ = [NSObject]
MTLTextureDescriptor._methods_ = [
('textureType', MTLTextureType, []),
('setTextureType:', None, [MTLTextureType]),
('pixelFormat', MTLPixelFormat, []),
('setPixelFormat:', None, [MTLPixelFormat]),
('width', NSUInteger, []),
('setWidth:', None, [NSUInteger]),
('height', NSUInteger, []),
('setHeight:', None, [NSUInteger]),
('depth', NSUInteger, []),
('setDepth:', None, [NSUInteger]),
('mipmapLevelCount', NSUInteger, []),
('setMipmapLevelCount:', None, [NSUInteger]),
('sampleCount', NSUInteger, []),
('setSampleCount:', None, [NSUInteger]),
('arrayLength', NSUInteger, []),
('setArrayLength:', None, [NSUInteger]),
('resourceOptions', MTLResourceOptions, []),
('setResourceOptions:', None, [MTLResourceOptions]),
('cpuCacheMode', MTLCPUCacheMode, []),
('setCpuCacheMode:', None, [MTLCPUCacheMode]),
('storageMode', MTLStorageMode, []),
('setStorageMode:', None, [MTLStorageMode]),
('hazardTrackingMode', MTLHazardTrackingMode, []),
('setHazardTrackingMode:', None, [MTLHazardTrackingMode]),
('usage', MTLTextureUsage, []),
('setUsage:', None, [MTLTextureUsage]),
('allowGPUOptimizedContents', BOOL, []),
('setAllowGPUOptimizedContents:', None, [BOOL]),
('compressionType', MTLTextureCompressionType, []),
('setCompressionType:', None, [MTLTextureCompressionType]),
('swizzle', MTLTextureSwizzleChannels, []),
('setSwizzle:', None, [MTLTextureSwizzleChannels]),
]
MTLTextureDescriptor._classmethods_ = [
('texture2DDescriptorWithPixelFormat:width:height:mipmapped:', MTLTextureDescriptor, [MTLPixelFormat, NSUInteger, NSUInteger, BOOL]),
('textureCubeDescriptorWithPixelFormat:size:mipmapped:', MTLTextureDescriptor, [MTLPixelFormat, NSUInteger, BOOL]),
('textureBufferDescriptorWithPixelFormat:width:resourceOptions:usage:', MTLTextureDescriptor, [MTLPixelFormat, NSUInteger, MTLResourceOptions, MTLTextureUsage]),
]
class MTLDevice(objc.Spec): pass
uint64_t: TypeAlias = Annotated[int, ctypes.c_uint64]
MTLBuffer._bases_ = [MTLResource]
MTLBuffer._methods_ = [
('contents', ctypes.c_void_p, []),
('didModifyRange:', None, [NSRange]),
('newTextureWithDescriptor:offset:bytesPerRow:', MTLTexture, [MTLTextureDescriptor, NSUInteger, NSUInteger], True),
('addDebugMarker:range:', None, [NSString, NSRange]),
('removeAllDebugMarkers', None, []),
('newRemoteBufferViewForDevice:', MTLBuffer, [MTLDevice], True),
('length', NSUInteger, []),
('remoteStorageBuffer', MTLBuffer, []),
('gpuAddress', uint64_t, []),
]
class MTLVisibleFunctionTable(objc.Spec): pass
class MTLIntersectionFunctionTable(objc.Spec): pass
class MTLAccelerationStructure(objc.Spec): pass
class MTLSamplerState(objc.Spec): pass
@c.record
class MTLRegion(c.Struct):
SIZE = 48
origin: Annotated[MTLOrigin, 0]
size: Annotated[MTLSize, 24]
@c.record
class MTLOrigin(c.Struct):
SIZE = 24
x: Annotated[NSUInteger, 0]
y: Annotated[NSUInteger, 8]
z: Annotated[NSUInteger, 16]
@c.record
class MTLSize(c.Struct):
SIZE = 24
width: Annotated[NSUInteger, 0]
height: Annotated[NSUInteger, 8]
depth: Annotated[NSUInteger, 16]
class MTLFence(objc.Spec): pass
MTLFence._bases_ = [NSObject]
MTLFence._methods_ = [
('device', MTLDevice, []),
('label', NSString, []),
('setLabel:', None, [NSString]),
]
class enum_MTLPurgeableState(NSUInteger, c.Enum): pass
MTLPurgeableStateKeepCurrent = enum_MTLPurgeableState.define('MTLPurgeableStateKeepCurrent', 1)
MTLPurgeableStateNonVolatile = enum_MTLPurgeableState.define('MTLPurgeableStateNonVolatile', 2)
MTLPurgeableStateVolatile = enum_MTLPurgeableState.define('MTLPurgeableStateVolatile', 3)
MTLPurgeableStateEmpty = enum_MTLPurgeableState.define('MTLPurgeableStateEmpty', 4)
MTLPurgeableState: TypeAlias = enum_MTLPurgeableState
kern_return_t: TypeAlias = Annotated[int, ctypes.c_int32]
task_id_token_t: TypeAlias = Annotated[int, ctypes.c_uint32]
class MTLHeap(objc.Spec): pass
MTLResource._bases_ = [NSObject]
MTLResource._methods_ = [
('setPurgeableState:', MTLPurgeableState, [MTLPurgeableState]),
('makeAliasable', None, []),
('isAliasable', BOOL, []),
('setOwnerWithIdentity:', kern_return_t, [task_id_token_t]),
('label', NSString, []),
('setLabel:', None, [NSString]),
('device', MTLDevice, []),
('cpuCacheMode', MTLCPUCacheMode, []),
('storageMode', MTLStorageMode, []),
('hazardTrackingMode', MTLHazardTrackingMode, []),
('resourceOptions', MTLResourceOptions, []),
('heap', MTLHeap, []),
('heapOffset', NSUInteger, []),
('allocatedSize', NSUInteger, [], True),
]
class enum_MTLResourceUsage(NSUInteger, c.Enum): pass
MTLResourceUsageRead = enum_MTLResourceUsage.define('MTLResourceUsageRead', 1)
MTLResourceUsageWrite = enum_MTLResourceUsage.define('MTLResourceUsageWrite', 2)
MTLResourceUsageSample = enum_MTLResourceUsage.define('MTLResourceUsageSample', 4)
MTLResourceUsage: TypeAlias = enum_MTLResourceUsage
class MTLIndirectCommandBuffer(objc.Spec): pass
class enum_MTLBarrierScope(NSUInteger, c.Enum): pass
MTLBarrierScopeBuffers = enum_MTLBarrierScope.define('MTLBarrierScopeBuffers', 1)
MTLBarrierScopeTextures = enum_MTLBarrierScope.define('MTLBarrierScopeTextures', 2)
MTLBarrierScopeRenderTargets = enum_MTLBarrierScope.define('MTLBarrierScopeRenderTargets', 4)
MTLBarrierScope: TypeAlias = enum_MTLBarrierScope
class MTLCounterSampleBuffer(objc.Spec): pass
MTLCounterSampleBuffer._bases_ = [NSObject]
MTLCounterSampleBuffer._methods_ = [
('resolveCounterRange:', NSData, [NSRange]),
('device', MTLDevice, []),
('label', NSString, []),
('sampleCount', NSUInteger, []),
]
class enum_MTLDispatchType(NSUInteger, c.Enum): pass
MTLDispatchTypeSerial = enum_MTLDispatchType.define('MTLDispatchTypeSerial', 0)
MTLDispatchTypeConcurrent = enum_MTLDispatchType.define('MTLDispatchTypeConcurrent', 1)
MTLDispatchType: TypeAlias = enum_MTLDispatchType
MTLComputeCommandEncoder._bases_ = [MTLCommandEncoder]
MTLComputeCommandEncoder._methods_ = [
('setComputePipelineState:', None, [MTLComputePipelineState]),
('setBytes:length:atIndex:', None, [ctypes.c_void_p, NSUInteger, NSUInteger]),
('setBuffer:offset:atIndex:', None, [MTLBuffer, NSUInteger, NSUInteger]),
('setBufferOffset:atIndex:', None, [NSUInteger, NSUInteger]),
('setBuffers:offsets:withRange:', None, [c.POINTER[MTLBuffer], c.POINTER[NSUInteger], NSRange]),
('setBuffer:offset:attributeStride:atIndex:', None, [MTLBuffer, NSUInteger, NSUInteger, NSUInteger]),
('setBuffers:offsets:attributeStrides:withRange:', None, [c.POINTER[MTLBuffer], c.POINTER[NSUInteger], c.POINTER[NSUInteger], NSRange]),
('setBufferOffset:attributeStride:atIndex:', None, [NSUInteger, NSUInteger, NSUInteger]),
('setBytes:length:attributeStride:atIndex:', None, [ctypes.c_void_p, NSUInteger, NSUInteger, NSUInteger]),
('setVisibleFunctionTable:atBufferIndex:', None, [MTLVisibleFunctionTable, NSUInteger]),
('setVisibleFunctionTables:withBufferRange:', None, [c.POINTER[MTLVisibleFunctionTable], NSRange]),
('setIntersectionFunctionTable:atBufferIndex:', None, [MTLIntersectionFunctionTable, NSUInteger]),
('setIntersectionFunctionTables:withBufferRange:', None, [c.POINTER[MTLIntersectionFunctionTable], NSRange]),
('setAccelerationStructure:atBufferIndex:', None, [MTLAccelerationStructure, NSUInteger]),
('setTexture:atIndex:', None, [MTLTexture, NSUInteger]),
('setTextures:withRange:', None, [c.POINTER[MTLTexture], NSRange]),
('setSamplerState:atIndex:', None, [MTLSamplerState, NSUInteger]),
('setSamplerStates:withRange:', None, [c.POINTER[MTLSamplerState], NSRange]),
('setSamplerState:lodMinClamp:lodMaxClamp:atIndex:', None, [MTLSamplerState, Annotated[float, ctypes.c_float], Annotated[float, ctypes.c_float], NSUInteger]),
('setSamplerStates:lodMinClamps:lodMaxClamps:withRange:', None, [c.POINTER[MTLSamplerState], c.POINTER[Annotated[float, ctypes.c_float]], c.POINTER[Annotated[float, ctypes.c_float]], NSRange]),
('setThreadgroupMemoryLength:atIndex:', None, [NSUInteger, NSUInteger]),
('setImageblockWidth:height:', None, [NSUInteger, NSUInteger]),
('setStageInRegion:', None, [MTLRegion]),
('setStageInRegionWithIndirectBuffer:indirectBufferOffset:', None, [MTLBuffer, NSUInteger]),
('dispatchThreadgroups:threadsPerThreadgroup:', None, [MTLSize, MTLSize]),
('dispatchThreadgroupsWithIndirectBuffer:indirectBufferOffset:threadsPerThreadgroup:', None, [MTLBuffer, NSUInteger, MTLSize]),
('dispatchThreads:threadsPerThreadgroup:', None, [MTLSize, MTLSize]),
('updateFence:', None, [MTLFence]),
('waitForFence:', None, [MTLFence]),
('useResource:usage:', None, [MTLResource, MTLResourceUsage]),
('useResources:count:usage:', None, [c.POINTER[MTLResource], NSUInteger, MTLResourceUsage]),
('useHeap:', None, [MTLHeap]),
('useHeaps:count:', None, [c.POINTER[MTLHeap], NSUInteger]),
('executeCommandsInBuffer:withRange:', None, [MTLIndirectCommandBuffer, NSRange]),
('executeCommandsInBuffer:indirectBuffer:indirectBufferOffset:', None, [MTLIndirectCommandBuffer, MTLBuffer, NSUInteger]),
('memoryBarrierWithScope:', None, [MTLBarrierScope]),
('memoryBarrierWithResources:count:', None, [c.POINTER[MTLResource], NSUInteger]),
('sampleCountersInBuffer:atSampleIndex:withBarrier:', None, [MTLCounterSampleBuffer, NSUInteger, BOOL]),
('dispatchType', MTLDispatchType, []),
]
class MTLComputePipelineReflection(objc.Spec): pass
MTLComputePipelineReflection._bases_ = [NSObject]
class MTLComputePipelineDescriptor(objc.Spec): pass
class MTLFunction(objc.Spec): pass
class MTLArgumentEncoder(objc.Spec): pass
class MTLArgument(objc.Spec): pass
class enum_MTLArgumentType(NSUInteger, c.Enum): pass
MTLArgumentTypeBuffer = enum_MTLArgumentType.define('MTLArgumentTypeBuffer', 0)
MTLArgumentTypeThreadgroupMemory = enum_MTLArgumentType.define('MTLArgumentTypeThreadgroupMemory', 1)
MTLArgumentTypeTexture = enum_MTLArgumentType.define('MTLArgumentTypeTexture', 2)
MTLArgumentTypeSampler = enum_MTLArgumentType.define('MTLArgumentTypeSampler', 3)
MTLArgumentTypeImageblockData = enum_MTLArgumentType.define('MTLArgumentTypeImageblockData', 16)
MTLArgumentTypeImageblock = enum_MTLArgumentType.define('MTLArgumentTypeImageblock', 17)
MTLArgumentTypeVisibleFunctionTable = enum_MTLArgumentType.define('MTLArgumentTypeVisibleFunctionTable', 24)
MTLArgumentTypePrimitiveAccelerationStructure = enum_MTLArgumentType.define('MTLArgumentTypePrimitiveAccelerationStructure', 25)
MTLArgumentTypeInstanceAccelerationStructure = enum_MTLArgumentType.define('MTLArgumentTypeInstanceAccelerationStructure', 26)
MTLArgumentTypeIntersectionFunctionTable = enum_MTLArgumentType.define('MTLArgumentTypeIntersectionFunctionTable', 27)
MTLArgumentType: TypeAlias = enum_MTLArgumentType
class enum_MTLBindingAccess(NSUInteger, c.Enum): pass
MTLBindingAccessReadOnly = enum_MTLBindingAccess.define('MTLBindingAccessReadOnly', 0)
MTLBindingAccessReadWrite = enum_MTLBindingAccess.define('MTLBindingAccessReadWrite', 1)
MTLBindingAccessWriteOnly = enum_MTLBindingAccess.define('MTLBindingAccessWriteOnly', 2)
MTLArgumentAccessReadOnly = enum_MTLBindingAccess.define('MTLArgumentAccessReadOnly', 0)
MTLArgumentAccessReadWrite = enum_MTLBindingAccess.define('MTLArgumentAccessReadWrite', 1)
MTLArgumentAccessWriteOnly = enum_MTLBindingAccess.define('MTLArgumentAccessWriteOnly', 2)
MTLBindingAccess: TypeAlias = enum_MTLBindingAccess
class enum_MTLDataType(NSUInteger, c.Enum): pass
MTLDataTypeNone = enum_MTLDataType.define('MTLDataTypeNone', 0)
MTLDataTypeStruct = enum_MTLDataType.define('MTLDataTypeStruct', 1)
MTLDataTypeArray = enum_MTLDataType.define('MTLDataTypeArray', 2)
MTLDataTypeFloat = enum_MTLDataType.define('MTLDataTypeFloat', 3)
MTLDataTypeFloat2 = enum_MTLDataType.define('MTLDataTypeFloat2', 4)
MTLDataTypeFloat3 = enum_MTLDataType.define('MTLDataTypeFloat3', 5)
MTLDataTypeFloat4 = enum_MTLDataType.define('MTLDataTypeFloat4', 6)
MTLDataTypeFloat2x2 = enum_MTLDataType.define('MTLDataTypeFloat2x2', 7)
MTLDataTypeFloat2x3 = enum_MTLDataType.define('MTLDataTypeFloat2x3', 8)
MTLDataTypeFloat2x4 = enum_MTLDataType.define('MTLDataTypeFloat2x4', 9)
MTLDataTypeFloat3x2 = enum_MTLDataType.define('MTLDataTypeFloat3x2', 10)
MTLDataTypeFloat3x3 = enum_MTLDataType.define('MTLDataTypeFloat3x3', 11)
MTLDataTypeFloat3x4 = enum_MTLDataType.define('MTLDataTypeFloat3x4', 12)
MTLDataTypeFloat4x2 = enum_MTLDataType.define('MTLDataTypeFloat4x2', 13)
MTLDataTypeFloat4x3 = enum_MTLDataType.define('MTLDataTypeFloat4x3', 14)
MTLDataTypeFloat4x4 = enum_MTLDataType.define('MTLDataTypeFloat4x4', 15)
MTLDataTypeHalf = enum_MTLDataType.define('MTLDataTypeHalf', 16)
MTLDataTypeHalf2 = enum_MTLDataType.define('MTLDataTypeHalf2', 17)
MTLDataTypeHalf3 = enum_MTLDataType.define('MTLDataTypeHalf3', 18)
MTLDataTypeHalf4 = enum_MTLDataType.define('MTLDataTypeHalf4', 19)
MTLDataTypeHalf2x2 = enum_MTLDataType.define('MTLDataTypeHalf2x2', 20)
MTLDataTypeHalf2x3 = enum_MTLDataType.define('MTLDataTypeHalf2x3', 21)
MTLDataTypeHalf2x4 = enum_MTLDataType.define('MTLDataTypeHalf2x4', 22)
MTLDataTypeHalf3x2 = enum_MTLDataType.define('MTLDataTypeHalf3x2', 23)
MTLDataTypeHalf3x3 = enum_MTLDataType.define('MTLDataTypeHalf3x3', 24)
MTLDataTypeHalf3x4 = enum_MTLDataType.define('MTLDataTypeHalf3x4', 25)
MTLDataTypeHalf4x2 = enum_MTLDataType.define('MTLDataTypeHalf4x2', 26)
MTLDataTypeHalf4x3 = enum_MTLDataType.define('MTLDataTypeHalf4x3', 27)
MTLDataTypeHalf4x4 = enum_MTLDataType.define('MTLDataTypeHalf4x4', 28)
MTLDataTypeInt = enum_MTLDataType.define('MTLDataTypeInt', 29)
MTLDataTypeInt2 = enum_MTLDataType.define('MTLDataTypeInt2', 30)
MTLDataTypeInt3 = enum_MTLDataType.define('MTLDataTypeInt3', 31)
MTLDataTypeInt4 = enum_MTLDataType.define('MTLDataTypeInt4', 32)
MTLDataTypeUInt = enum_MTLDataType.define('MTLDataTypeUInt', 33)
MTLDataTypeUInt2 = enum_MTLDataType.define('MTLDataTypeUInt2', 34)
MTLDataTypeUInt3 = enum_MTLDataType.define('MTLDataTypeUInt3', 35)
MTLDataTypeUInt4 = enum_MTLDataType.define('MTLDataTypeUInt4', 36)
MTLDataTypeShort = enum_MTLDataType.define('MTLDataTypeShort', 37)
MTLDataTypeShort2 = enum_MTLDataType.define('MTLDataTypeShort2', 38)
MTLDataTypeShort3 = enum_MTLDataType.define('MTLDataTypeShort3', 39)
MTLDataTypeShort4 = enum_MTLDataType.define('MTLDataTypeShort4', 40)
MTLDataTypeUShort = enum_MTLDataType.define('MTLDataTypeUShort', 41)
MTLDataTypeUShort2 = enum_MTLDataType.define('MTLDataTypeUShort2', 42)
MTLDataTypeUShort3 = enum_MTLDataType.define('MTLDataTypeUShort3', 43)
MTLDataTypeUShort4 = enum_MTLDataType.define('MTLDataTypeUShort4', 44)
MTLDataTypeChar = enum_MTLDataType.define('MTLDataTypeChar', 45)
MTLDataTypeChar2 = enum_MTLDataType.define('MTLDataTypeChar2', 46)
MTLDataTypeChar3 = enum_MTLDataType.define('MTLDataTypeChar3', 47)
MTLDataTypeChar4 = enum_MTLDataType.define('MTLDataTypeChar4', 48)
MTLDataTypeUChar = enum_MTLDataType.define('MTLDataTypeUChar', 49)
MTLDataTypeUChar2 = enum_MTLDataType.define('MTLDataTypeUChar2', 50)
MTLDataTypeUChar3 = enum_MTLDataType.define('MTLDataTypeUChar3', 51)
MTLDataTypeUChar4 = enum_MTLDataType.define('MTLDataTypeUChar4', 52)
MTLDataTypeBool = enum_MTLDataType.define('MTLDataTypeBool', 53)
MTLDataTypeBool2 = enum_MTLDataType.define('MTLDataTypeBool2', 54)
MTLDataTypeBool3 = enum_MTLDataType.define('MTLDataTypeBool3', 55)
MTLDataTypeBool4 = enum_MTLDataType.define('MTLDataTypeBool4', 56)
MTLDataTypeTexture = enum_MTLDataType.define('MTLDataTypeTexture', 58)
MTLDataTypeSampler = enum_MTLDataType.define('MTLDataTypeSampler', 59)
MTLDataTypePointer = enum_MTLDataType.define('MTLDataTypePointer', 60)
MTLDataTypeR8Unorm = enum_MTLDataType.define('MTLDataTypeR8Unorm', 62)
MTLDataTypeR8Snorm = enum_MTLDataType.define('MTLDataTypeR8Snorm', 63)
MTLDataTypeR16Unorm = enum_MTLDataType.define('MTLDataTypeR16Unorm', 64)
MTLDataTypeR16Snorm = enum_MTLDataType.define('MTLDataTypeR16Snorm', 65)
MTLDataTypeRG8Unorm = enum_MTLDataType.define('MTLDataTypeRG8Unorm', 66)
MTLDataTypeRG8Snorm = enum_MTLDataType.define('MTLDataTypeRG8Snorm', 67)
MTLDataTypeRG16Unorm = enum_MTLDataType.define('MTLDataTypeRG16Unorm', 68)
MTLDataTypeRG16Snorm = enum_MTLDataType.define('MTLDataTypeRG16Snorm', 69)
MTLDataTypeRGBA8Unorm = enum_MTLDataType.define('MTLDataTypeRGBA8Unorm', 70)
MTLDataTypeRGBA8Unorm_sRGB = enum_MTLDataType.define('MTLDataTypeRGBA8Unorm_sRGB', 71)
MTLDataTypeRGBA8Snorm = enum_MTLDataType.define('MTLDataTypeRGBA8Snorm', 72)
MTLDataTypeRGBA16Unorm = enum_MTLDataType.define('MTLDataTypeRGBA16Unorm', 73)
MTLDataTypeRGBA16Snorm = enum_MTLDataType.define('MTLDataTypeRGBA16Snorm', 74)
MTLDataTypeRGB10A2Unorm = enum_MTLDataType.define('MTLDataTypeRGB10A2Unorm', 75)
MTLDataTypeRG11B10Float = enum_MTLDataType.define('MTLDataTypeRG11B10Float', 76)
MTLDataTypeRGB9E5Float = enum_MTLDataType.define('MTLDataTypeRGB9E5Float', 77)
MTLDataTypeRenderPipeline = enum_MTLDataType.define('MTLDataTypeRenderPipeline', 78)
MTLDataTypeComputePipeline = enum_MTLDataType.define('MTLDataTypeComputePipeline', 79)
MTLDataTypeIndirectCommandBuffer = enum_MTLDataType.define('MTLDataTypeIndirectCommandBuffer', 80)
MTLDataTypeLong = enum_MTLDataType.define('MTLDataTypeLong', 81)
MTLDataTypeLong2 = enum_MTLDataType.define('MTLDataTypeLong2', 82)
MTLDataTypeLong3 = enum_MTLDataType.define('MTLDataTypeLong3', 83)
MTLDataTypeLong4 = enum_MTLDataType.define('MTLDataTypeLong4', 84)
MTLDataTypeULong = enum_MTLDataType.define('MTLDataTypeULong', 85)
MTLDataTypeULong2 = enum_MTLDataType.define('MTLDataTypeULong2', 86)
MTLDataTypeULong3 = enum_MTLDataType.define('MTLDataTypeULong3', 87)
MTLDataTypeULong4 = enum_MTLDataType.define('MTLDataTypeULong4', 88)
MTLDataTypeVisibleFunctionTable = enum_MTLDataType.define('MTLDataTypeVisibleFunctionTable', 115)
MTLDataTypeIntersectionFunctionTable = enum_MTLDataType.define('MTLDataTypeIntersectionFunctionTable', 116)
MTLDataTypePrimitiveAccelerationStructure = enum_MTLDataType.define('MTLDataTypePrimitiveAccelerationStructure', 117)
MTLDataTypeInstanceAccelerationStructure = enum_MTLDataType.define('MTLDataTypeInstanceAccelerationStructure', 118)
MTLDataTypeBFloat = enum_MTLDataType.define('MTLDataTypeBFloat', 121)
MTLDataTypeBFloat2 = enum_MTLDataType.define('MTLDataTypeBFloat2', 122)
MTLDataTypeBFloat3 = enum_MTLDataType.define('MTLDataTypeBFloat3', 123)
MTLDataTypeBFloat4 = enum_MTLDataType.define('MTLDataTypeBFloat4', 124)
MTLDataType: TypeAlias = enum_MTLDataType
class MTLStructType(objc.Spec): pass
class MTLStructMember(objc.Spec): pass
class MTLArrayType(objc.Spec): pass
class MTLTextureReferenceType(objc.Spec): pass
class MTLType(objc.Spec): pass
MTLType._bases_ = [NSObject]
MTLType._methods_ = [
('dataType', MTLDataType, []),
]
MTLTextureReferenceType._bases_ = [MTLType]
MTLTextureReferenceType._methods_ = [
('textureDataType', MTLDataType, []),
('textureType', MTLTextureType, []),
('access', MTLBindingAccess, []),
('isDepthTexture', BOOL, []),
]
class MTLPointerType(objc.Spec): pass
MTLPointerType._bases_ = [MTLType]
MTLPointerType._methods_ = [
('elementStructType', MTLStructType, []),
('elementArrayType', MTLArrayType, []),
('elementType', MTLDataType, []),
('access', MTLBindingAccess, []),
('alignment', NSUInteger, []),
('dataSize', NSUInteger, []),
('elementIsArgumentBuffer', BOOL, []),
]
MTLArrayType._bases_ = [MTLType]
MTLArrayType._methods_ = [
('elementStructType', MTLStructType, []),
('elementArrayType', MTLArrayType, []),
('elementTextureReferenceType', MTLTextureReferenceType, []),
('elementPointerType', MTLPointerType, []),
('elementType', MTLDataType, []),
('arrayLength', NSUInteger, []),
('stride', NSUInteger, []),
('argumentIndexStride', NSUInteger, []),
]
MTLStructMember._bases_ = [NSObject]
MTLStructMember._methods_ = [
('structType', MTLStructType, []),
('arrayType', MTLArrayType, []),
('textureReferenceType', MTLTextureReferenceType, []),
('pointerType', MTLPointerType, []),
('name', NSString, []),
('offset', NSUInteger, []),
('dataType', MTLDataType, []),
('argumentIndex', NSUInteger, []),
]
MTLStructType._bases_ = [MTLType]
MTLStructType._methods_ = [
('memberByName:', MTLStructMember, [NSString]),
]
MTLArgument._bases_ = [NSObject]
MTLArgument._methods_ = [
('name', NSString, []),
('type', MTLArgumentType, []),
('access', MTLBindingAccess, []),
('index', NSUInteger, []),
('isActive', BOOL, []),
('bufferAlignment', NSUInteger, []),
('bufferDataSize', NSUInteger, []),
('bufferDataType', MTLDataType, []),
('bufferStructType', MTLStructType, []),
('bufferPointerType', MTLPointerType, []),
('threadgroupMemoryAlignment', NSUInteger, []),
('threadgroupMemoryDataSize', NSUInteger, []),
('textureType', MTLTextureType, []),
('textureDataType', MTLDataType, []),
('isDepthTexture', BOOL, []),
('arrayLength', NSUInteger, []),
]
class enum_MTLFunctionType(NSUInteger, c.Enum): pass
MTLFunctionTypeVertex = enum_MTLFunctionType.define('MTLFunctionTypeVertex', 1)
MTLFunctionTypeFragment = enum_MTLFunctionType.define('MTLFunctionTypeFragment', 2)
MTLFunctionTypeKernel = enum_MTLFunctionType.define('MTLFunctionTypeKernel', 3)
MTLFunctionTypeVisible = enum_MTLFunctionType.define('MTLFunctionTypeVisible', 5)
MTLFunctionTypeIntersection = enum_MTLFunctionType.define('MTLFunctionTypeIntersection', 6)
MTLFunctionTypeMesh = enum_MTLFunctionType.define('MTLFunctionTypeMesh', 7)
MTLFunctionTypeObject = enum_MTLFunctionType.define('MTLFunctionTypeObject', 8)
MTLFunctionType: TypeAlias = enum_MTLFunctionType
class enum_MTLPatchType(NSUInteger, c.Enum): pass
MTLPatchTypeNone = enum_MTLPatchType.define('MTLPatchTypeNone', 0)
MTLPatchTypeTriangle = enum_MTLPatchType.define('MTLPatchTypeTriangle', 1)
MTLPatchTypeQuad = enum_MTLPatchType.define('MTLPatchTypeQuad', 2)
MTLPatchType: TypeAlias = enum_MTLPatchType
class enum_MTLFunctionOptions(NSUInteger, c.Enum): pass
MTLFunctionOptionNone = enum_MTLFunctionOptions.define('MTLFunctionOptionNone', 0)
MTLFunctionOptionCompileToBinary = enum_MTLFunctionOptions.define('MTLFunctionOptionCompileToBinary', 1)
MTLFunctionOptionStoreFunctionInMetalScript = enum_MTLFunctionOptions.define('MTLFunctionOptionStoreFunctionInMetalScript', 2)
MTLFunctionOptions: TypeAlias = enum_MTLFunctionOptions
MTLFunction._bases_ = [NSObject]
MTLFunction._methods_ = [
('newArgumentEncoderWithBufferIndex:', MTLArgumentEncoder, [NSUInteger], True),
('newArgumentEncoderWithBufferIndex:reflection:', MTLArgumentEncoder, [NSUInteger, c.POINTER[MTLArgument]], True),
('label', NSString, []),
('setLabel:', None, [NSString]),
('device', MTLDevice, []),
('functionType', MTLFunctionType, []),
('patchType', MTLPatchType, []),
('patchControlPointCount', NSInteger, []),
('name', NSString, []),
('options', MTLFunctionOptions, []),
]
class MTLStageInputOutputDescriptor(objc.Spec): pass
class MTLBufferLayoutDescriptorArray(objc.Spec): pass
class MTLBufferLayoutDescriptor(objc.Spec): pass
class enum_MTLStepFunction(NSUInteger, c.Enum): pass
MTLStepFunctionConstant = enum_MTLStepFunction.define('MTLStepFunctionConstant', 0)
MTLStepFunctionPerVertex = enum_MTLStepFunction.define('MTLStepFunctionPerVertex', 1)
MTLStepFunctionPerInstance = enum_MTLStepFunction.define('MTLStepFunctionPerInstance', 2)
MTLStepFunctionPerPatch = enum_MTLStepFunction.define('MTLStepFunctionPerPatch', 3)
MTLStepFunctionPerPatchControlPoint = enum_MTLStepFunction.define('MTLStepFunctionPerPatchControlPoint', 4)
MTLStepFunctionThreadPositionInGridX = enum_MTLStepFunction.define('MTLStepFunctionThreadPositionInGridX', 5)
MTLStepFunctionThreadPositionInGridY = enum_MTLStepFunction.define('MTLStepFunctionThreadPositionInGridY', 6)
MTLStepFunctionThreadPositionInGridXIndexed = enum_MTLStepFunction.define('MTLStepFunctionThreadPositionInGridXIndexed', 7)
MTLStepFunctionThreadPositionInGridYIndexed = enum_MTLStepFunction.define('MTLStepFunctionThreadPositionInGridYIndexed', 8)
MTLStepFunction: TypeAlias = enum_MTLStepFunction
MTLBufferLayoutDescriptor._bases_ = [NSObject]
MTLBufferLayoutDescriptor._methods_ = [
('stride', NSUInteger, []),
('setStride:', None, [NSUInteger]),
('stepFunction', MTLStepFunction, []),
('setStepFunction:', None, [MTLStepFunction]),
('stepRate', NSUInteger, []),
('setStepRate:', None, [NSUInteger]),
]
MTLBufferLayoutDescriptorArray._bases_ = [NSObject]
MTLBufferLayoutDescriptorArray._methods_ = [
('objectAtIndexedSubscript:', MTLBufferLayoutDescriptor, [NSUInteger]),
('setObject:atIndexedSubscript:', None, [MTLBufferLayoutDescriptor, NSUInteger]),
]
class MTLAttributeDescriptorArray(objc.Spec): pass
class MTLAttributeDescriptor(objc.Spec): pass
class enum_MTLAttributeFormat(NSUInteger, c.Enum): pass
MTLAttributeFormatInvalid = enum_MTLAttributeFormat.define('MTLAttributeFormatInvalid', 0)
MTLAttributeFormatUChar2 = enum_MTLAttributeFormat.define('MTLAttributeFormatUChar2', 1)
MTLAttributeFormatUChar3 = enum_MTLAttributeFormat.define('MTLAttributeFormatUChar3', 2)
MTLAttributeFormatUChar4 = enum_MTLAttributeFormat.define('MTLAttributeFormatUChar4', 3)
MTLAttributeFormatChar2 = enum_MTLAttributeFormat.define('MTLAttributeFormatChar2', 4)
MTLAttributeFormatChar3 = enum_MTLAttributeFormat.define('MTLAttributeFormatChar3', 5)
MTLAttributeFormatChar4 = enum_MTLAttributeFormat.define('MTLAttributeFormatChar4', 6)
MTLAttributeFormatUChar2Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatUChar2Normalized', 7)
MTLAttributeFormatUChar3Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatUChar3Normalized', 8)
MTLAttributeFormatUChar4Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatUChar4Normalized', 9)
MTLAttributeFormatChar2Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatChar2Normalized', 10)
MTLAttributeFormatChar3Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatChar3Normalized', 11)
MTLAttributeFormatChar4Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatChar4Normalized', 12)
MTLAttributeFormatUShort2 = enum_MTLAttributeFormat.define('MTLAttributeFormatUShort2', 13)
MTLAttributeFormatUShort3 = enum_MTLAttributeFormat.define('MTLAttributeFormatUShort3', 14)
MTLAttributeFormatUShort4 = enum_MTLAttributeFormat.define('MTLAttributeFormatUShort4', 15)
MTLAttributeFormatShort2 = enum_MTLAttributeFormat.define('MTLAttributeFormatShort2', 16)
MTLAttributeFormatShort3 = enum_MTLAttributeFormat.define('MTLAttributeFormatShort3', 17)
MTLAttributeFormatShort4 = enum_MTLAttributeFormat.define('MTLAttributeFormatShort4', 18)
MTLAttributeFormatUShort2Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatUShort2Normalized', 19)
MTLAttributeFormatUShort3Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatUShort3Normalized', 20)
MTLAttributeFormatUShort4Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatUShort4Normalized', 21)
MTLAttributeFormatShort2Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatShort2Normalized', 22)
MTLAttributeFormatShort3Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatShort3Normalized', 23)
MTLAttributeFormatShort4Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatShort4Normalized', 24)
MTLAttributeFormatHalf2 = enum_MTLAttributeFormat.define('MTLAttributeFormatHalf2', 25)
MTLAttributeFormatHalf3 = enum_MTLAttributeFormat.define('MTLAttributeFormatHalf3', 26)
MTLAttributeFormatHalf4 = enum_MTLAttributeFormat.define('MTLAttributeFormatHalf4', 27)
MTLAttributeFormatFloat = enum_MTLAttributeFormat.define('MTLAttributeFormatFloat', 28)
MTLAttributeFormatFloat2 = enum_MTLAttributeFormat.define('MTLAttributeFormatFloat2', 29)
MTLAttributeFormatFloat3 = enum_MTLAttributeFormat.define('MTLAttributeFormatFloat3', 30)
MTLAttributeFormatFloat4 = enum_MTLAttributeFormat.define('MTLAttributeFormatFloat4', 31)
MTLAttributeFormatInt = enum_MTLAttributeFormat.define('MTLAttributeFormatInt', 32)
MTLAttributeFormatInt2 = enum_MTLAttributeFormat.define('MTLAttributeFormatInt2', 33)
MTLAttributeFormatInt3 = enum_MTLAttributeFormat.define('MTLAttributeFormatInt3', 34)
MTLAttributeFormatInt4 = enum_MTLAttributeFormat.define('MTLAttributeFormatInt4', 35)
MTLAttributeFormatUInt = enum_MTLAttributeFormat.define('MTLAttributeFormatUInt', 36)
MTLAttributeFormatUInt2 = enum_MTLAttributeFormat.define('MTLAttributeFormatUInt2', 37)
MTLAttributeFormatUInt3 = enum_MTLAttributeFormat.define('MTLAttributeFormatUInt3', 38)
MTLAttributeFormatUInt4 = enum_MTLAttributeFormat.define('MTLAttributeFormatUInt4', 39)
MTLAttributeFormatInt1010102Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatInt1010102Normalized', 40)
MTLAttributeFormatUInt1010102Normalized = enum_MTLAttributeFormat.define('MTLAttributeFormatUInt1010102Normalized', 41)
MTLAttributeFormatUChar4Normalized_BGRA = enum_MTLAttributeFormat.define('MTLAttributeFormatUChar4Normalized_BGRA', 42)
MTLAttributeFormatUChar = enum_MTLAttributeFormat.define('MTLAttributeFormatUChar', 45)
MTLAttributeFormatChar = enum_MTLAttributeFormat.define('MTLAttributeFormatChar', 46)
MTLAttributeFormatUCharNormalized = enum_MTLAttributeFormat.define('MTLAttributeFormatUCharNormalized', 47)
MTLAttributeFormatCharNormalized = enum_MTLAttributeFormat.define('MTLAttributeFormatCharNormalized', 48)
MTLAttributeFormatUShort = enum_MTLAttributeFormat.define('MTLAttributeFormatUShort', 49)
MTLAttributeFormatShort = enum_MTLAttributeFormat.define('MTLAttributeFormatShort', 50)
MTLAttributeFormatUShortNormalized = enum_MTLAttributeFormat.define('MTLAttributeFormatUShortNormalized', 51)
MTLAttributeFormatShortNormalized = enum_MTLAttributeFormat.define('MTLAttributeFormatShortNormalized', 52)
MTLAttributeFormatHalf = enum_MTLAttributeFormat.define('MTLAttributeFormatHalf', 53)
MTLAttributeFormatFloatRG11B10 = enum_MTLAttributeFormat.define('MTLAttributeFormatFloatRG11B10', 54)
MTLAttributeFormatFloatRGB9E5 = enum_MTLAttributeFormat.define('MTLAttributeFormatFloatRGB9E5', 55)
MTLAttributeFormat: TypeAlias = enum_MTLAttributeFormat
MTLAttributeDescriptor._bases_ = [NSObject]
MTLAttributeDescriptor._methods_ = [
('format', MTLAttributeFormat, []),
('setFormat:', None, [MTLAttributeFormat]),
('offset', NSUInteger, []),
('setOffset:', None, [NSUInteger]),
('bufferIndex', NSUInteger, []),
('setBufferIndex:', None, [NSUInteger]),
]
MTLAttributeDescriptorArray._bases_ = [NSObject]
MTLAttributeDescriptorArray._methods_ = [
('objectAtIndexedSubscript:', MTLAttributeDescriptor, [NSUInteger]),
('setObject:atIndexedSubscript:', None, [MTLAttributeDescriptor, NSUInteger]),
]
class enum_MTLIndexType(NSUInteger, c.Enum): pass
MTLIndexTypeUInt16 = enum_MTLIndexType.define('MTLIndexTypeUInt16', 0)
MTLIndexTypeUInt32 = enum_MTLIndexType.define('MTLIndexTypeUInt32', 1)
MTLIndexType: TypeAlias = enum_MTLIndexType
MTLStageInputOutputDescriptor._bases_ = [NSObject]
MTLStageInputOutputDescriptor._methods_ = [
('reset', None, []),
('layouts', MTLBufferLayoutDescriptorArray, []),
('attributes', MTLAttributeDescriptorArray, []),
('indexType', MTLIndexType, []),
('setIndexType:', None, [MTLIndexType]),
('indexBufferIndex', NSUInteger, []),
('setIndexBufferIndex:', None, [NSUInteger]),
]
MTLStageInputOutputDescriptor._classmethods_ = [
('stageInputOutputDescriptor', MTLStageInputOutputDescriptor, []),
]
class MTLPipelineBufferDescriptorArray(objc.Spec): pass
class MTLPipelineBufferDescriptor(objc.Spec): pass
class enum_MTLMutability(NSUInteger, c.Enum): pass
MTLMutabilityDefault = enum_MTLMutability.define('MTLMutabilityDefault', 0)
MTLMutabilityMutable = enum_MTLMutability.define('MTLMutabilityMutable', 1)
MTLMutabilityImmutable = enum_MTLMutability.define('MTLMutabilityImmutable', 2)
MTLMutability: TypeAlias = enum_MTLMutability
MTLPipelineBufferDescriptor._bases_ = [NSObject]
MTLPipelineBufferDescriptor._methods_ = [
('mutability', MTLMutability, []),
('setMutability:', None, [MTLMutability]),
]
MTLPipelineBufferDescriptorArray._bases_ = [NSObject]
MTLPipelineBufferDescriptorArray._methods_ = [
('objectAtIndexedSubscript:', MTLPipelineBufferDescriptor, [NSUInteger]),
('setObject:atIndexedSubscript:', None, [MTLPipelineBufferDescriptor, NSUInteger]),
]
class MTLLinkedFunctions(objc.Spec): pass
MTLLinkedFunctions._bases_ = [NSObject]
MTLLinkedFunctions._classmethods_ = [
('linkedFunctions', MTLLinkedFunctions, []),
]
MTLComputePipelineDescriptor._bases_ = [NSObject]
MTLComputePipelineDescriptor._methods_ = [
('reset', None, []),
('label', NSString, []),
('setLabel:', None, [NSString]),
('computeFunction', MTLFunction, []),
('setComputeFunction:', None, [MTLFunction]),
('threadGroupSizeIsMultipleOfThreadExecutionWidth', BOOL, []),
('setThreadGroupSizeIsMultipleOfThreadExecutionWidth:', None, [BOOL]),
('maxTotalThreadsPerThreadgroup', NSUInteger, []),
('setMaxTotalThreadsPerThreadgroup:', None, [NSUInteger]),
('stageInputDescriptor', MTLStageInputOutputDescriptor, []),
('setStageInputDescriptor:', None, [MTLStageInputOutputDescriptor]),
('buffers', MTLPipelineBufferDescriptorArray, []),
('supportIndirectCommandBuffers', BOOL, []),
('setSupportIndirectCommandBuffers:', None, [BOOL]),
('linkedFunctions', MTLLinkedFunctions, []),
('setLinkedFunctions:', None, [MTLLinkedFunctions]),
('supportAddingBinaryFunctions', BOOL, []),
('setSupportAddingBinaryFunctions:', None, [BOOL]),
('maxCallStackDepth', NSUInteger, []),
('setMaxCallStackDepth:', None, [NSUInteger]),
]
class MTLFunctionHandle(objc.Spec): pass
class MTLVisibleFunctionTableDescriptor(objc.Spec): pass
class MTLIntersectionFunctionTableDescriptor(objc.Spec): pass
@c.record
class struct_MTLResourceID(c.Struct):
SIZE = 8
_impl: Annotated[uint64_t, 0]
MTLResourceID: TypeAlias = struct_MTLResourceID
MTLComputePipelineState._bases_ = [NSObject]
MTLComputePipelineState._methods_ = [
('imageblockMemoryLengthForDimensions:', NSUInteger, [MTLSize]),
('functionHandleWithFunction:', MTLFunctionHandle, [MTLFunction]),
('newVisibleFunctionTableWithDescriptor:', MTLVisibleFunctionTable, [MTLVisibleFunctionTableDescriptor], True),
('newIntersectionFunctionTableWithDescriptor:', MTLIntersectionFunctionTable, [MTLIntersectionFunctionTableDescriptor], True),
('label', NSString, []),
('device', MTLDevice, []),
('maxTotalThreadsPerThreadgroup', NSUInteger, []),
('threadExecutionWidth', NSUInteger, []),
('staticThreadgroupMemoryLength', NSUInteger, []),
('supportIndirectCommandBuffers', BOOL, []),
('gpuResourceID', MTLResourceID, []),
]
class MTLCommandQueue(objc.Spec): pass
class MTLCommandBuffer(objc.Spec): pass
class MTLDrawable(objc.Spec): pass
CFTimeInterval: TypeAlias = Annotated[float, ctypes.c_double]
class MTLBlitCommandEncoder(objc.Spec): pass
class enum_MTLBlitOption(NSUInteger, c.Enum): pass
MTLBlitOptionNone = enum_MTLBlitOption.define('MTLBlitOptionNone', 0)
MTLBlitOptionDepthFromDepthStencil = enum_MTLBlitOption.define('MTLBlitOptionDepthFromDepthStencil', 1)
MTLBlitOptionStencilFromDepthStencil = enum_MTLBlitOption.define('MTLBlitOptionStencilFromDepthStencil', 2)
MTLBlitOptionRowLinearPVRTC = enum_MTLBlitOption.define('MTLBlitOptionRowLinearPVRTC', 4)
MTLBlitOption: TypeAlias = enum_MTLBlitOption
MTLBlitCommandEncoder._bases_ = [MTLCommandEncoder]
MTLBlitCommandEncoder._methods_ = [
('synchronizeResource:', None, [MTLResource]),
('synchronizeTexture:slice:level:', None, [MTLTexture, NSUInteger, NSUInteger]),
('copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:', None, [MTLTexture, NSUInteger, NSUInteger, MTLOrigin, MTLSize, MTLTexture, NSUInteger, NSUInteger, MTLOrigin]),
('copyFromBuffer:sourceOffset:sourceBytesPerRow:sourceBytesPerImage:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:', None, [MTLBuffer, NSUInteger, NSUInteger, NSUInteger, MTLSize, MTLTexture, NSUInteger, NSUInteger, MTLOrigin]),
('copyFromBuffer:sourceOffset:sourceBytesPerRow:sourceBytesPerImage:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:options:', None, [MTLBuffer, NSUInteger, NSUInteger, NSUInteger, MTLSize, MTLTexture, NSUInteger, NSUInteger, MTLOrigin, MTLBlitOption]),
('copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toBuffer:destinationOffset:destinationBytesPerRow:destinationBytesPerImage:', None, [MTLTexture, NSUInteger, NSUInteger, MTLOrigin, MTLSize, MTLBuffer, NSUInteger, NSUInteger, NSUInteger]),
('copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toBuffer:destinationOffset:destinationBytesPerRow:destinationBytesPerImage:options:', None, [MTLTexture, NSUInteger, NSUInteger, MTLOrigin, MTLSize, MTLBuffer, NSUInteger, NSUInteger, NSUInteger, MTLBlitOption]),
('generateMipmapsForTexture:', None, [MTLTexture]),
('fillBuffer:range:value:', None, [MTLBuffer, NSRange, uint8_t]),
('copyFromTexture:sourceSlice:sourceLevel:toTexture:destinationSlice:destinationLevel:sliceCount:levelCount:', None, [MTLTexture, NSUInteger, NSUInteger, MTLTexture, NSUInteger, NSUInteger, NSUInteger, NSUInteger]),
('copyFromTexture:toTexture:', None, [MTLTexture, MTLTexture]),
('copyFromBuffer:sourceOffset:toBuffer:destinationOffset:size:', None, [MTLBuffer, NSUInteger, MTLBuffer, NSUInteger, NSUInteger]),
('updateFence:', None, [MTLFence]),
('waitForFence:', None, [MTLFence]),
('getTextureAccessCounters:region:mipLevel:slice:resetCounters:countersBuffer:countersBufferOffset:', None, [MTLTexture, MTLRegion, NSUInteger, NSUInteger, BOOL, MTLBuffer, NSUInteger]),
('resetTextureAccessCounters:region:mipLevel:slice:', None, [MTLTexture, MTLRegion, NSUInteger, NSUInteger]),
('optimizeContentsForGPUAccess:', None, [MTLTexture]),
('optimizeContentsForGPUAccess:slice:level:', None, [MTLTexture, NSUInteger, NSUInteger]),
('optimizeContentsForCPUAccess:', None, [MTLTexture]),
('optimizeContentsForCPUAccess:slice:level:', None, [MTLTexture, NSUInteger, NSUInteger]),
('resetCommandsInBuffer:withRange:', None, [MTLIndirectCommandBuffer, NSRange]),
('copyIndirectCommandBuffer:sourceRange:destination:destinationIndex:', None, [MTLIndirectCommandBuffer, NSRange, MTLIndirectCommandBuffer, NSUInteger]),
('optimizeIndirectCommandBuffer:withRange:', None, [MTLIndirectCommandBuffer, NSRange]),
('sampleCountersInBuffer:atSampleIndex:withBarrier:', None, [MTLCounterSampleBuffer, NSUInteger, BOOL]),
('resolveCounters:inRange:destinationBuffer:destinationOffset:', None, [MTLCounterSampleBuffer, NSRange, MTLBuffer, NSUInteger]),
]
class MTLRenderCommandEncoder(objc.Spec): pass
class MTLRenderPassDescriptor(objc.Spec): pass
@c.record
class MTLSamplePosition(c.Struct):
SIZE = 8
x: Annotated[Annotated[float, ctypes.c_float], 0]
y: Annotated[Annotated[float, ctypes.c_float], 4]
class MTLRenderPassColorAttachmentDescriptorArray(objc.Spec): pass
class MTLRenderPassColorAttachmentDescriptor(objc.Spec): pass
@c.record
class MTLClearColor(c.Struct):
SIZE = 32
red: Annotated[Annotated[float, ctypes.c_double], 0]
green: Annotated[Annotated[float, ctypes.c_double], 8]
blue: Annotated[Annotated[float, ctypes.c_double], 16]
alpha: Annotated[Annotated[float, ctypes.c_double], 24]
class MTLRenderPassAttachmentDescriptor(objc.Spec): pass
class enum_MTLLoadAction(NSUInteger, c.Enum): pass
MTLLoadActionDontCare = enum_MTLLoadAction.define('MTLLoadActionDontCare', 0)
MTLLoadActionLoad = enum_MTLLoadAction.define('MTLLoadActionLoad', 1)
MTLLoadActionClear = enum_MTLLoadAction.define('MTLLoadActionClear', 2)
MTLLoadAction: TypeAlias = enum_MTLLoadAction
class enum_MTLStoreAction(NSUInteger, c.Enum): pass
MTLStoreActionDontCare = enum_MTLStoreAction.define('MTLStoreActionDontCare', 0)
MTLStoreActionStore = enum_MTLStoreAction.define('MTLStoreActionStore', 1)
MTLStoreActionMultisampleResolve = enum_MTLStoreAction.define('MTLStoreActionMultisampleResolve', 2)
MTLStoreActionStoreAndMultisampleResolve = enum_MTLStoreAction.define('MTLStoreActionStoreAndMultisampleResolve', 3)
MTLStoreActionUnknown = enum_MTLStoreAction.define('MTLStoreActionUnknown', 4)
MTLStoreActionCustomSampleDepthStore = enum_MTLStoreAction.define('MTLStoreActionCustomSampleDepthStore', 5)
MTLStoreAction: TypeAlias = enum_MTLStoreAction
class enum_MTLStoreActionOptions(NSUInteger, c.Enum): pass
MTLStoreActionOptionNone = enum_MTLStoreActionOptions.define('MTLStoreActionOptionNone', 0)
MTLStoreActionOptionCustomSamplePositions = enum_MTLStoreActionOptions.define('MTLStoreActionOptionCustomSamplePositions', 1)
MTLStoreActionOptions: TypeAlias = enum_MTLStoreActionOptions
MTLRenderPassAttachmentDescriptor._bases_ = [NSObject]
MTLRenderPassAttachmentDescriptor._methods_ = [
('texture', MTLTexture, []),
('setTexture:', None, [MTLTexture]),
('level', NSUInteger, []),
('setLevel:', None, [NSUInteger]),
('slice', NSUInteger, []),
('setSlice:', None, [NSUInteger]),
('depthPlane', NSUInteger, []),
('setDepthPlane:', None, [NSUInteger]),
('resolveTexture', MTLTexture, []),
('setResolveTexture:', None, [MTLTexture]),
('resolveLevel', NSUInteger, []),
('setResolveLevel:', None, [NSUInteger]),
('resolveSlice', NSUInteger, []),
('setResolveSlice:', None, [NSUInteger]),
('resolveDepthPlane', NSUInteger, []),
('setResolveDepthPlane:', None, [NSUInteger]),
('loadAction', MTLLoadAction, []),
('setLoadAction:', None, [MTLLoadAction]),
('storeAction', MTLStoreAction, []),
('setStoreAction:', None, [MTLStoreAction]),
('storeActionOptions', MTLStoreActionOptions, []),
('setStoreActionOptions:', None, [MTLStoreActionOptions]),
]
MTLRenderPassColorAttachmentDescriptor._bases_ = [MTLRenderPassAttachmentDescriptor]
MTLRenderPassColorAttachmentDescriptor._methods_ = [
('clearColor', MTLClearColor, []),
('setClearColor:', None, [MTLClearColor]),
]
MTLRenderPassColorAttachmentDescriptorArray._bases_ = [NSObject]
MTLRenderPassColorAttachmentDescriptorArray._methods_ = [
('objectAtIndexedSubscript:', MTLRenderPassColorAttachmentDescriptor, [NSUInteger]),
('setObject:atIndexedSubscript:', None, [MTLRenderPassColorAttachmentDescriptor, NSUInteger]),
]
class MTLRenderPassDepthAttachmentDescriptor(objc.Spec): pass
class enum_MTLMultisampleDepthResolveFilter(NSUInteger, c.Enum): pass
MTLMultisampleDepthResolveFilterSample0 = enum_MTLMultisampleDepthResolveFilter.define('MTLMultisampleDepthResolveFilterSample0', 0)
MTLMultisampleDepthResolveFilterMin = enum_MTLMultisampleDepthResolveFilter.define('MTLMultisampleDepthResolveFilterMin', 1)
MTLMultisampleDepthResolveFilterMax = enum_MTLMultisampleDepthResolveFilter.define('MTLMultisampleDepthResolveFilterMax', 2)
MTLMultisampleDepthResolveFilter: TypeAlias = enum_MTLMultisampleDepthResolveFilter
MTLRenderPassDepthAttachmentDescriptor._bases_ = [MTLRenderPassAttachmentDescriptor]
MTLRenderPassDepthAttachmentDescriptor._methods_ = [
('clearDepth', Annotated[float, ctypes.c_double], []),
('setClearDepth:', None, [Annotated[float, ctypes.c_double]]),
('depthResolveFilter', MTLMultisampleDepthResolveFilter, []),
('setDepthResolveFilter:', None, [MTLMultisampleDepthResolveFilter]),
]
class MTLRenderPassStencilAttachmentDescriptor(objc.Spec): pass
class enum_MTLMultisampleStencilResolveFilter(NSUInteger, c.Enum): pass
MTLMultisampleStencilResolveFilterSample0 = enum_MTLMultisampleStencilResolveFilter.define('MTLMultisampleStencilResolveFilterSample0', 0)
MTLMultisampleStencilResolveFilterDepthResolvedSample = enum_MTLMultisampleStencilResolveFilter.define('MTLMultisampleStencilResolveFilterDepthResolvedSample', 1)
MTLMultisampleStencilResolveFilter: TypeAlias = enum_MTLMultisampleStencilResolveFilter
MTLRenderPassStencilAttachmentDescriptor._bases_ = [MTLRenderPassAttachmentDescriptor]
MTLRenderPassStencilAttachmentDescriptor._methods_ = [
('clearStencil', uint32_t, []),
('setClearStencil:', None, [uint32_t]),
('stencilResolveFilter', MTLMultisampleStencilResolveFilter, []),
('setStencilResolveFilter:', None, [MTLMultisampleStencilResolveFilter]),
]
class MTLRasterizationRateMap(objc.Spec): pass
class MTLRenderPassSampleBufferAttachmentDescriptorArray(objc.Spec): pass
class MTLRenderPassSampleBufferAttachmentDescriptor(objc.Spec): pass
MTLRenderPassSampleBufferAttachmentDescriptor._bases_ = [NSObject]
MTLRenderPassSampleBufferAttachmentDescriptor._methods_ = [
('sampleBuffer', MTLCounterSampleBuffer, []),
('setSampleBuffer:', None, [MTLCounterSampleBuffer]),
('startOfVertexSampleIndex', NSUInteger, []),
('setStartOfVertexSampleIndex:', None, [NSUInteger]),
('endOfVertexSampleIndex', NSUInteger, []),
('setEndOfVertexSampleIndex:', None, [NSUInteger]),
('startOfFragmentSampleIndex', NSUInteger, []),
('setStartOfFragmentSampleIndex:', None, [NSUInteger]),
('endOfFragmentSampleIndex', NSUInteger, []),
('setEndOfFragmentSampleIndex:', None, [NSUInteger]),
]
MTLRenderPassSampleBufferAttachmentDescriptorArray._bases_ = [NSObject]
MTLRenderPassSampleBufferAttachmentDescriptorArray._methods_ = [
('objectAtIndexedSubscript:', MTLRenderPassSampleBufferAttachmentDescriptor, [NSUInteger]),
('setObject:atIndexedSubscript:', None, [MTLRenderPassSampleBufferAttachmentDescriptor, NSUInteger]),
]
MTLRenderPassDescriptor._bases_ = [NSObject]
MTLRenderPassDescriptor._methods_ = [
('setSamplePositions:count:', None, [c.POINTER[MTLSamplePosition], NSUInteger]),
('getSamplePositions:count:', NSUInteger, [c.POINTER[MTLSamplePosition], NSUInteger]),
('colorAttachments', MTLRenderPassColorAttachmentDescriptorArray, []),
('depthAttachment', MTLRenderPassDepthAttachmentDescriptor, []),
('setDepthAttachment:', None, [MTLRenderPassDepthAttachmentDescriptor]),
('stencilAttachment', MTLRenderPassStencilAttachmentDescriptor, []),
('setStencilAttachment:', None, [MTLRenderPassStencilAttachmentDescriptor]),
('visibilityResultBuffer', MTLBuffer, []),
('setVisibilityResultBuffer:', None, [MTLBuffer]),
('renderTargetArrayLength', NSUInteger, []),
('setRenderTargetArrayLength:', None, [NSUInteger]),
('imageblockSampleLength', NSUInteger, []),
('setImageblockSampleLength:', None, [NSUInteger]),
('threadgroupMemoryLength', NSUInteger, []),
('setThreadgroupMemoryLength:', None, [NSUInteger]),
('tileWidth', NSUInteger, []),
('setTileWidth:', None, [NSUInteger]),
('tileHeight', NSUInteger, []),
('setTileHeight:', None, [NSUInteger]),
('defaultRasterSampleCount', NSUInteger, []),
('setDefaultRasterSampleCount:', None, [NSUInteger]),
('renderTargetWidth', NSUInteger, []),
('setRenderTargetWidth:', None, [NSUInteger]),
('renderTargetHeight', NSUInteger, []),
('setRenderTargetHeight:', None, [NSUInteger]),
('rasterizationRateMap', MTLRasterizationRateMap, []),
('setRasterizationRateMap:', None, [MTLRasterizationRateMap]),
('sampleBufferAttachments', MTLRenderPassSampleBufferAttachmentDescriptorArray, []),
]
MTLRenderPassDescriptor._classmethods_ = [
('renderPassDescriptor', MTLRenderPassDescriptor, []),
]
class MTLComputePassDescriptor(objc.Spec): pass
class MTLComputePassSampleBufferAttachmentDescriptorArray(objc.Spec): pass
class MTLComputePassSampleBufferAttachmentDescriptor(objc.Spec): pass
MTLComputePassSampleBufferAttachmentDescriptor._bases_ = [NSObject]
MTLComputePassSampleBufferAttachmentDescriptor._methods_ = [
('sampleBuffer', MTLCounterSampleBuffer, []),
('setSampleBuffer:', None, [MTLCounterSampleBuffer]),
('startOfEncoderSampleIndex', NSUInteger, []),
('setStartOfEncoderSampleIndex:', None, [NSUInteger]),
('endOfEncoderSampleIndex', NSUInteger, []),
('setEndOfEncoderSampleIndex:', None, [NSUInteger]),
]
MTLComputePassSampleBufferAttachmentDescriptorArray._bases_ = [NSObject]
MTLComputePassSampleBufferAttachmentDescriptorArray._methods_ = [
('objectAtIndexedSubscript:', MTLComputePassSampleBufferAttachmentDescriptor, [NSUInteger]),
('setObject:atIndexedSubscript:', None, [MTLComputePassSampleBufferAttachmentDescriptor, NSUInteger]),
]
MTLComputePassDescriptor._bases_ = [NSObject]
MTLComputePassDescriptor._methods_ = [
('dispatchType', MTLDispatchType, []),
('setDispatchType:', None, [MTLDispatchType]),
('sampleBufferAttachments', MTLComputePassSampleBufferAttachmentDescriptorArray, []),
]
MTLComputePassDescriptor._classmethods_ = [
('computePassDescriptor', MTLComputePassDescriptor, []),
]
class MTLBlitPassDescriptor(objc.Spec): pass
class MTLBlitPassSampleBufferAttachmentDescriptorArray(objc.Spec): pass
class MTLBlitPassSampleBufferAttachmentDescriptor(objc.Spec): pass
MTLBlitPassSampleBufferAttachmentDescriptor._bases_ = [NSObject]
MTLBlitPassSampleBufferAttachmentDescriptor._methods_ = [
('sampleBuffer', MTLCounterSampleBuffer, []),
('setSampleBuffer:', None, [MTLCounterSampleBuffer]),
('startOfEncoderSampleIndex', NSUInteger, []),
('setStartOfEncoderSampleIndex:', None, [NSUInteger]),
('endOfEncoderSampleIndex', NSUInteger, []),
('setEndOfEncoderSampleIndex:', None, [NSUInteger]),
]
MTLBlitPassSampleBufferAttachmentDescriptorArray._bases_ = [NSObject]
MTLBlitPassSampleBufferAttachmentDescriptorArray._methods_ = [
('objectAtIndexedSubscript:', MTLBlitPassSampleBufferAttachmentDescriptor, [NSUInteger]),
('setObject:atIndexedSubscript:', None, [MTLBlitPassSampleBufferAttachmentDescriptor, NSUInteger]),
]
MTLBlitPassDescriptor._bases_ = [NSObject]
MTLBlitPassDescriptor._methods_ = [
('sampleBufferAttachments', MTLBlitPassSampleBufferAttachmentDescriptorArray, []),
]
MTLBlitPassDescriptor._classmethods_ = [
('blitPassDescriptor', MTLBlitPassDescriptor, []),
]
class MTLEvent(objc.Spec): pass
class MTLParallelRenderCommandEncoder(objc.Spec): pass
class MTLResourceStateCommandEncoder(objc.Spec): pass
class enum_MTLSparseTextureMappingMode(NSUInteger, c.Enum): pass
MTLSparseTextureMappingModeMap = enum_MTLSparseTextureMappingMode.define('MTLSparseTextureMappingModeMap', 0)
MTLSparseTextureMappingModeUnmap = enum_MTLSparseTextureMappingMode.define('MTLSparseTextureMappingModeUnmap', 1)
MTLSparseTextureMappingMode: TypeAlias = enum_MTLSparseTextureMappingMode
MTLResourceStateCommandEncoder._bases_ = [MTLCommandEncoder]
MTLResourceStateCommandEncoder._methods_ = [
('updateTextureMappings:mode:regions:mipLevels:slices:numRegions:', None, [MTLTexture, MTLSparseTextureMappingMode, c.POINTER[MTLRegion], c.POINTER[NSUInteger], c.POINTER[NSUInteger], NSUInteger]),
('updateTextureMapping:mode:region:mipLevel:slice:', None, [MTLTexture, MTLSparseTextureMappingMode, MTLRegion, NSUInteger, NSUInteger]),
('updateTextureMapping:mode:indirectBuffer:indirectBufferOffset:', None, [MTLTexture, MTLSparseTextureMappingMode, MTLBuffer, NSUInteger]),
('updateFence:', None, [MTLFence]),
('waitForFence:', None, [MTLFence]),
('moveTextureMappingsFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:', None, [MTLTexture, NSUInteger, NSUInteger, MTLOrigin, MTLSize, MTLTexture, NSUInteger, NSUInteger, MTLOrigin]),
]
class MTLResourceStatePassDescriptor(objc.Spec): pass
class MTLResourceStatePassSampleBufferAttachmentDescriptorArray(objc.Spec): pass
class MTLResourceStatePassSampleBufferAttachmentDescriptor(objc.Spec): pass
MTLResourceStatePassSampleBufferAttachmentDescriptor._bases_ = [NSObject]
MTLResourceStatePassSampleBufferAttachmentDescriptor._methods_ = [
('sampleBuffer', MTLCounterSampleBuffer, []),
('setSampleBuffer:', None, [MTLCounterSampleBuffer]),
('startOfEncoderSampleIndex', NSUInteger, []),
('setStartOfEncoderSampleIndex:', None, [NSUInteger]),
('endOfEncoderSampleIndex', NSUInteger, []),
('setEndOfEncoderSampleIndex:', None, [NSUInteger]),
]
MTLResourceStatePassSampleBufferAttachmentDescriptorArray._bases_ = [NSObject]
MTLResourceStatePassSampleBufferAttachmentDescriptorArray._methods_ = [
('objectAtIndexedSubscript:', MTLResourceStatePassSampleBufferAttachmentDescriptor, [NSUInteger]),
('setObject:atIndexedSubscript:', None, [MTLResourceStatePassSampleBufferAttachmentDescriptor, NSUInteger]),
]
MTLResourceStatePassDescriptor._bases_ = [NSObject]
MTLResourceStatePassDescriptor._methods_ = [
('sampleBufferAttachments', MTLResourceStatePassSampleBufferAttachmentDescriptorArray, []),
]
MTLResourceStatePassDescriptor._classmethods_ = [
('resourceStatePassDescriptor', MTLResourceStatePassDescriptor, []),
]
class MTLAccelerationStructureCommandEncoder(objc.Spec): pass
class MTLAccelerationStructurePassDescriptor(objc.Spec): pass
class MTLAccelerationStructurePassSampleBufferAttachmentDescriptorArray(objc.Spec): pass
class MTLAccelerationStructurePassSampleBufferAttachmentDescriptor(objc.Spec): pass
MTLAccelerationStructurePassSampleBufferAttachmentDescriptor._bases_ = [NSObject]
MTLAccelerationStructurePassSampleBufferAttachmentDescriptor._methods_ = [
('sampleBuffer', MTLCounterSampleBuffer, []),
('setSampleBuffer:', None, [MTLCounterSampleBuffer]),
('startOfEncoderSampleIndex', NSUInteger, []),
('setStartOfEncoderSampleIndex:', None, [NSUInteger]),
('endOfEncoderSampleIndex', NSUInteger, []),
('setEndOfEncoderSampleIndex:', None, [NSUInteger]),
]
MTLAccelerationStructurePassSampleBufferAttachmentDescriptorArray._bases_ = [NSObject]
MTLAccelerationStructurePassSampleBufferAttachmentDescriptorArray._methods_ = [
('objectAtIndexedSubscript:', MTLAccelerationStructurePassSampleBufferAttachmentDescriptor, [NSUInteger]),
('setObject:atIndexedSubscript:', None, [MTLAccelerationStructurePassSampleBufferAttachmentDescriptor, NSUInteger]),
]
MTLAccelerationStructurePassDescriptor._bases_ = [NSObject]
MTLAccelerationStructurePassDescriptor._methods_ = [
('sampleBufferAttachments', MTLAccelerationStructurePassSampleBufferAttachmentDescriptorArray, []),
]
MTLAccelerationStructurePassDescriptor._classmethods_ = [
('accelerationStructurePassDescriptor', MTLAccelerationStructurePassDescriptor, []),
]
class enum_MTLCommandBufferErrorOption(NSUInteger, c.Enum): pass
MTLCommandBufferErrorOptionNone = enum_MTLCommandBufferErrorOption.define('MTLCommandBufferErrorOptionNone', 0)
MTLCommandBufferErrorOptionEncoderExecutionStatus = enum_MTLCommandBufferErrorOption.define('MTLCommandBufferErrorOptionEncoderExecutionStatus', 1)
MTLCommandBufferErrorOption: TypeAlias = enum_MTLCommandBufferErrorOption
class MTLLogContainer(objc.Spec): pass
class enum_MTLCommandBufferStatus(NSUInteger, c.Enum): pass
MTLCommandBufferStatusNotEnqueued = enum_MTLCommandBufferStatus.define('MTLCommandBufferStatusNotEnqueued', 0)
MTLCommandBufferStatusEnqueued = enum_MTLCommandBufferStatus.define('MTLCommandBufferStatusEnqueued', 1)
MTLCommandBufferStatusCommitted = enum_MTLCommandBufferStatus.define('MTLCommandBufferStatusCommitted', 2)
MTLCommandBufferStatusScheduled = enum_MTLCommandBufferStatus.define('MTLCommandBufferStatusScheduled', 3)
MTLCommandBufferStatusCompleted = enum_MTLCommandBufferStatus.define('MTLCommandBufferStatusCompleted', 4)
MTLCommandBufferStatusError = enum_MTLCommandBufferStatus.define('MTLCommandBufferStatusError', 5)
MTLCommandBufferStatus: TypeAlias = enum_MTLCommandBufferStatus
class NSError(objc.Spec): pass
NSErrorDomain: TypeAlias = NSString
NSError._bases_ = [NSObject]
NSError._methods_ = [
('domain', NSErrorDomain, []),
('code', NSInteger, []),
('localizedDescription', NSString, []),
('localizedFailureReason', NSString, []),
('localizedRecoverySuggestion', NSString, []),
('recoveryAttempter', objc.id_, []),
('helpAnchor', NSString, []),
]
MTLCommandBuffer._bases_ = [NSObject]
MTLCommandBuffer._methods_ = [
('enqueue', None, []),
('commit', None, []),
('presentDrawable:', None, [MTLDrawable]),
('presentDrawable:atTime:', None, [MTLDrawable, CFTimeInterval]),
('presentDrawable:afterMinimumDuration:', None, [MTLDrawable, CFTimeInterval]),
('waitUntilScheduled', None, []),
('waitUntilCompleted', None, []),
('blitCommandEncoder', MTLBlitCommandEncoder, []),
('renderCommandEncoderWithDescriptor:', MTLRenderCommandEncoder, [MTLRenderPassDescriptor]),
('computeCommandEncoderWithDescriptor:', MTLComputeCommandEncoder, [MTLComputePassDescriptor]),
('blitCommandEncoderWithDescriptor:', MTLBlitCommandEncoder, [MTLBlitPassDescriptor]),
('computeCommandEncoder', MTLComputeCommandEncoder, []),
('computeCommandEncoderWithDispatchType:', MTLComputeCommandEncoder, [MTLDispatchType]),
('encodeWaitForEvent:value:', None, [MTLEvent, uint64_t]),
('encodeSignalEvent:value:', None, [MTLEvent, uint64_t]),
('parallelRenderCommandEncoderWithDescriptor:', MTLParallelRenderCommandEncoder, [MTLRenderPassDescriptor]),
('resourceStateCommandEncoder', MTLResourceStateCommandEncoder, []),
('resourceStateCommandEncoderWithDescriptor:', MTLResourceStateCommandEncoder, [MTLResourceStatePassDescriptor]),
('accelerationStructureCommandEncoder', MTLAccelerationStructureCommandEncoder, []),
('accelerationStructureCommandEncoderWithDescriptor:', MTLAccelerationStructureCommandEncoder, [MTLAccelerationStructurePassDescriptor]),
('pushDebugGroup:', None, [NSString]),
('popDebugGroup', None, []),
('device', MTLDevice, []),
('commandQueue', MTLCommandQueue, []),
('retainedReferences', BOOL, []),
('errorOptions', MTLCommandBufferErrorOption, []),
('label', NSString, []),
('setLabel:', None, [NSString]),
('kernelStartTime', CFTimeInterval, []),
('kernelEndTime', CFTimeInterval, []),
('logs', MTLLogContainer, []),
('GPUStartTime', CFTimeInterval, []),
('GPUEndTime', CFTimeInterval, []),
('status', MTLCommandBufferStatus, []),
('error', NSError, []),
]
class MTLCommandBufferDescriptor(objc.Spec): pass
MTLCommandBufferDescriptor._bases_ = [NSObject]
MTLCommandBufferDescriptor._methods_ = [
('retainedReferences', BOOL, []),
('setRetainedReferences:', None, [BOOL]),
('errorOptions', MTLCommandBufferErrorOption, []),
('setErrorOptions:', None, [MTLCommandBufferErrorOption]),
]
MTLCommandQueue._bases_ = [NSObject]
MTLCommandQueue._methods_ = [
('commandBuffer', MTLCommandBuffer, []),
('commandBufferWithDescriptor:', MTLCommandBuffer, [MTLCommandBufferDescriptor]),
('commandBufferWithUnretainedReferences', MTLCommandBuffer, []),
('insertDebugCaptureBoundary', None, []),
('label', NSString, []),
('setLabel:', None, [NSString]),
('device', MTLDevice, []),
]
class enum_MTLIOCompressionMethod(NSInteger, c.Enum): pass
MTLIOCompressionMethodZlib = enum_MTLIOCompressionMethod.define('MTLIOCompressionMethodZlib', 0)
MTLIOCompressionMethodLZFSE = enum_MTLIOCompressionMethod.define('MTLIOCompressionMethodLZFSE', 1)
MTLIOCompressionMethodLZ4 = enum_MTLIOCompressionMethod.define('MTLIOCompressionMethodLZ4', 2)
MTLIOCompressionMethodLZMA = enum_MTLIOCompressionMethod.define('MTLIOCompressionMethodLZMA', 3)
MTLIOCompressionMethodLZBitmap = enum_MTLIOCompressionMethod.define('MTLIOCompressionMethodLZBitmap', 4)
MTLIOCompressionMethod: TypeAlias = enum_MTLIOCompressionMethod
@dll.bind
def MTLCreateSystemDefaultDevice() -> MTLDevice: ...
MTLCreateSystemDefaultDevice = objc.returns_retained(MTLCreateSystemDefaultDevice)
MTLDeviceNotificationName: TypeAlias = NSString
try: MTLDeviceWasAddedNotification = MTLDeviceNotificationName.in_dll(dll, 'MTLDeviceWasAddedNotification') # type: ignore
except (ValueError,AttributeError): pass
try: MTLDeviceRemovalRequestedNotification = MTLDeviceNotificationName.in_dll(dll, 'MTLDeviceRemovalRequestedNotification') # type: ignore
except (ValueError,AttributeError): pass
try: MTLDeviceWasRemovedNotification = MTLDeviceNotificationName.in_dll(dll, 'MTLDeviceWasRemovedNotification') # type: ignore
except (ValueError,AttributeError): pass
@dll.bind
def MTLRemoveDeviceObserver(observer:NSObject) -> None: ...
class enum_MTLFeatureSet(NSUInteger, c.Enum): pass
MTLFeatureSet_iOS_GPUFamily1_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily1_v1', 0)
MTLFeatureSet_iOS_GPUFamily2_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily2_v1', 1)
MTLFeatureSet_iOS_GPUFamily1_v2 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily1_v2', 2)
MTLFeatureSet_iOS_GPUFamily2_v2 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily2_v2', 3)
MTLFeatureSet_iOS_GPUFamily3_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily3_v1', 4)
MTLFeatureSet_iOS_GPUFamily1_v3 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily1_v3', 5)
MTLFeatureSet_iOS_GPUFamily2_v3 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily2_v3', 6)
MTLFeatureSet_iOS_GPUFamily3_v2 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily3_v2', 7)
MTLFeatureSet_iOS_GPUFamily1_v4 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily1_v4', 8)
MTLFeatureSet_iOS_GPUFamily2_v4 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily2_v4', 9)
MTLFeatureSet_iOS_GPUFamily3_v3 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily3_v3', 10)
MTLFeatureSet_iOS_GPUFamily4_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily4_v1', 11)
MTLFeatureSet_iOS_GPUFamily1_v5 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily1_v5', 12)
MTLFeatureSet_iOS_GPUFamily2_v5 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily2_v5', 13)
MTLFeatureSet_iOS_GPUFamily3_v4 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily3_v4', 14)
MTLFeatureSet_iOS_GPUFamily4_v2 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily4_v2', 15)
MTLFeatureSet_iOS_GPUFamily5_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_iOS_GPUFamily5_v1', 16)
MTLFeatureSet_macOS_GPUFamily1_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_macOS_GPUFamily1_v1', 10000)
MTLFeatureSet_OSX_GPUFamily1_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_OSX_GPUFamily1_v1', 10000)
MTLFeatureSet_macOS_GPUFamily1_v2 = enum_MTLFeatureSet.define('MTLFeatureSet_macOS_GPUFamily1_v2', 10001)
MTLFeatureSet_OSX_GPUFamily1_v2 = enum_MTLFeatureSet.define('MTLFeatureSet_OSX_GPUFamily1_v2', 10001)
MTLFeatureSet_macOS_ReadWriteTextureTier2 = enum_MTLFeatureSet.define('MTLFeatureSet_macOS_ReadWriteTextureTier2', 10002)
MTLFeatureSet_OSX_ReadWriteTextureTier2 = enum_MTLFeatureSet.define('MTLFeatureSet_OSX_ReadWriteTextureTier2', 10002)
MTLFeatureSet_macOS_GPUFamily1_v3 = enum_MTLFeatureSet.define('MTLFeatureSet_macOS_GPUFamily1_v3', 10003)
MTLFeatureSet_macOS_GPUFamily1_v4 = enum_MTLFeatureSet.define('MTLFeatureSet_macOS_GPUFamily1_v4', 10004)
MTLFeatureSet_macOS_GPUFamily2_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_macOS_GPUFamily2_v1', 10005)
MTLFeatureSet_tvOS_GPUFamily1_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_tvOS_GPUFamily1_v1', 30000)
MTLFeatureSet_TVOS_GPUFamily1_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_TVOS_GPUFamily1_v1', 30000)
MTLFeatureSet_tvOS_GPUFamily1_v2 = enum_MTLFeatureSet.define('MTLFeatureSet_tvOS_GPUFamily1_v2', 30001)
MTLFeatureSet_tvOS_GPUFamily1_v3 = enum_MTLFeatureSet.define('MTLFeatureSet_tvOS_GPUFamily1_v3', 30002)
MTLFeatureSet_tvOS_GPUFamily2_v1 = enum_MTLFeatureSet.define('MTLFeatureSet_tvOS_GPUFamily2_v1', 30003)
MTLFeatureSet_tvOS_GPUFamily1_v4 = enum_MTLFeatureSet.define('MTLFeatureSet_tvOS_GPUFamily1_v4', 30004)
MTLFeatureSet_tvOS_GPUFamily2_v2 = enum_MTLFeatureSet.define('MTLFeatureSet_tvOS_GPUFamily2_v2', 30005)
MTLFeatureSet: TypeAlias = enum_MTLFeatureSet
class enum_MTLGPUFamily(NSInteger, c.Enum): pass
MTLGPUFamilyApple1 = enum_MTLGPUFamily.define('MTLGPUFamilyApple1', 1001)
MTLGPUFamilyApple2 = enum_MTLGPUFamily.define('MTLGPUFamilyApple2', 1002)
MTLGPUFamilyApple3 = enum_MTLGPUFamily.define('MTLGPUFamilyApple3', 1003)
MTLGPUFamilyApple4 = enum_MTLGPUFamily.define('MTLGPUFamilyApple4', 1004)
MTLGPUFamilyApple5 = enum_MTLGPUFamily.define('MTLGPUFamilyApple5', 1005)
MTLGPUFamilyApple6 = enum_MTLGPUFamily.define('MTLGPUFamilyApple6', 1006)
MTLGPUFamilyApple7 = enum_MTLGPUFamily.define('MTLGPUFamilyApple7', 1007)
MTLGPUFamilyApple8 = enum_MTLGPUFamily.define('MTLGPUFamilyApple8', 1008)
MTLGPUFamilyApple9 = enum_MTLGPUFamily.define('MTLGPUFamilyApple9', 1009)
MTLGPUFamilyMac1 = enum_MTLGPUFamily.define('MTLGPUFamilyMac1', 2001)
MTLGPUFamilyMac2 = enum_MTLGPUFamily.define('MTLGPUFamilyMac2', 2002)
MTLGPUFamilyCommon1 = enum_MTLGPUFamily.define('MTLGPUFamilyCommon1', 3001)
MTLGPUFamilyCommon2 = enum_MTLGPUFamily.define('MTLGPUFamilyCommon2', 3002)
MTLGPUFamilyCommon3 = enum_MTLGPUFamily.define('MTLGPUFamilyCommon3', 3003)
MTLGPUFamilyMacCatalyst1 = enum_MTLGPUFamily.define('MTLGPUFamilyMacCatalyst1', 4001)
MTLGPUFamilyMacCatalyst2 = enum_MTLGPUFamily.define('MTLGPUFamilyMacCatalyst2', 4002)
MTLGPUFamilyMetal3 = enum_MTLGPUFamily.define('MTLGPUFamilyMetal3', 5001)
MTLGPUFamily: TypeAlias = enum_MTLGPUFamily
class enum_MTLDeviceLocation(NSUInteger, c.Enum): pass
MTLDeviceLocationBuiltIn = enum_MTLDeviceLocation.define('MTLDeviceLocationBuiltIn', 0)
MTLDeviceLocationSlot = enum_MTLDeviceLocation.define('MTLDeviceLocationSlot', 1)
MTLDeviceLocationExternal = enum_MTLDeviceLocation.define('MTLDeviceLocationExternal', 2)
MTLDeviceLocationUnspecified = enum_MTLDeviceLocation.define('MTLDeviceLocationUnspecified', -1)
MTLDeviceLocation: TypeAlias = enum_MTLDeviceLocation
class enum_MTLPipelineOption(NSUInteger, c.Enum): pass
MTLPipelineOptionNone = enum_MTLPipelineOption.define('MTLPipelineOptionNone', 0)
MTLPipelineOptionArgumentInfo = enum_MTLPipelineOption.define('MTLPipelineOptionArgumentInfo', 1)
MTLPipelineOptionBufferTypeInfo = enum_MTLPipelineOption.define('MTLPipelineOptionBufferTypeInfo', 2)
MTLPipelineOptionFailOnBinaryArchiveMiss = enum_MTLPipelineOption.define('MTLPipelineOptionFailOnBinaryArchiveMiss', 4)
MTLPipelineOption: TypeAlias = enum_MTLPipelineOption
class enum_MTLReadWriteTextureTier(NSUInteger, c.Enum): pass
MTLReadWriteTextureTierNone = enum_MTLReadWriteTextureTier.define('MTLReadWriteTextureTierNone', 0)
MTLReadWriteTextureTier1 = enum_MTLReadWriteTextureTier.define('MTLReadWriteTextureTier1', 1)
MTLReadWriteTextureTier2 = enum_MTLReadWriteTextureTier.define('MTLReadWriteTextureTier2', 2)
MTLReadWriteTextureTier: TypeAlias = enum_MTLReadWriteTextureTier
class enum_MTLArgumentBuffersTier(NSUInteger, c.Enum): pass
MTLArgumentBuffersTier1 = enum_MTLArgumentBuffersTier.define('MTLArgumentBuffersTier1', 0)
MTLArgumentBuffersTier2 = enum_MTLArgumentBuffersTier.define('MTLArgumentBuffersTier2', 1)
MTLArgumentBuffersTier: TypeAlias = enum_MTLArgumentBuffersTier
class enum_MTLSparseTextureRegionAlignmentMode(NSUInteger, c.Enum): pass
MTLSparseTextureRegionAlignmentModeOutward = enum_MTLSparseTextureRegionAlignmentMode.define('MTLSparseTextureRegionAlignmentModeOutward', 0)
MTLSparseTextureRegionAlignmentModeInward = enum_MTLSparseTextureRegionAlignmentMode.define('MTLSparseTextureRegionAlignmentModeInward', 1)
MTLSparseTextureRegionAlignmentMode: TypeAlias = enum_MTLSparseTextureRegionAlignmentMode
class enum_MTLSparsePageSize(NSInteger, c.Enum): pass
MTLSparsePageSize16 = enum_MTLSparsePageSize.define('MTLSparsePageSize16', 101)
MTLSparsePageSize64 = enum_MTLSparsePageSize.define('MTLSparsePageSize64', 102)
MTLSparsePageSize256 = enum_MTLSparsePageSize.define('MTLSparsePageSize256', 103)
MTLSparsePageSize: TypeAlias = enum_MTLSparsePageSize
@c.record
class MTLAccelerationStructureSizes(c.Struct):
SIZE = 24
accelerationStructureSize: Annotated[NSUInteger, 0]
buildScratchBufferSize: Annotated[NSUInteger, 8]
refitScratchBufferSize: Annotated[NSUInteger, 16]
class enum_MTLCounterSamplingPoint(NSUInteger, c.Enum): pass
MTLCounterSamplingPointAtStageBoundary = enum_MTLCounterSamplingPoint.define('MTLCounterSamplingPointAtStageBoundary', 0)
MTLCounterSamplingPointAtDrawBoundary = enum_MTLCounterSamplingPoint.define('MTLCounterSamplingPointAtDrawBoundary', 1)
MTLCounterSamplingPointAtDispatchBoundary = enum_MTLCounterSamplingPoint.define('MTLCounterSamplingPointAtDispatchBoundary', 2)
MTLCounterSamplingPointAtTileDispatchBoundary = enum_MTLCounterSamplingPoint.define('MTLCounterSamplingPointAtTileDispatchBoundary', 3)
MTLCounterSamplingPointAtBlitBoundary = enum_MTLCounterSamplingPoint.define('MTLCounterSamplingPointAtBlitBoundary', 4)
MTLCounterSamplingPoint: TypeAlias = enum_MTLCounterSamplingPoint
@c.record
class MTLSizeAndAlign(c.Struct):
SIZE = 16
size: Annotated[NSUInteger, 0]
align: Annotated[NSUInteger, 8]
class MTLRenderPipelineReflection(objc.Spec): pass
class MTLArgumentDescriptor(objc.Spec): pass
MTLArgumentDescriptor._bases_ = [NSObject]
MTLArgumentDescriptor._methods_ = [
('dataType', MTLDataType, []),
('setDataType:', None, [MTLDataType]),
('index', NSUInteger, []),
('setIndex:', None, [NSUInteger]),
('arrayLength', NSUInteger, []),
('setArrayLength:', None, [NSUInteger]),
('access', MTLBindingAccess, []),
('setAccess:', None, [MTLBindingAccess]),
('textureType', MTLTextureType, []),
('setTextureType:', None, [MTLTextureType]),
('constantBlockAlignment', NSUInteger, []),
('setConstantBlockAlignment:', None, [NSUInteger]),
]
MTLArgumentDescriptor._classmethods_ = [
('argumentDescriptor', MTLArgumentDescriptor, []),
]
class MTLArchitecture(objc.Spec): pass
MTLArchitecture._bases_ = [NSObject]
MTLArchitecture._methods_ = [
('name', NSString, []),
]
class MTLHeapDescriptor(objc.Spec): pass
class MTLDepthStencilState(objc.Spec): pass
class MTLDepthStencilDescriptor(objc.Spec): pass
class struct___IOSurface(ctypes.Structure): pass
IOSurfaceRef: TypeAlias = c.POINTER[struct___IOSurface]
class MTLSharedTextureHandle(objc.Spec): pass
MTLSharedTextureHandle._bases_ = [NSObject]
MTLSharedTextureHandle._methods_ = [
('device', MTLDevice, []),
('label', NSString, []),
]
class MTLSamplerDescriptor(objc.Spec): pass
class MTLLibrary(objc.Spec): pass
class MTLFunctionConstantValues(objc.Spec): pass
MTLFunctionConstantValues._bases_ = [NSObject]
MTLFunctionConstantValues._methods_ = [
('setConstantValue:type:atIndex:', None, [ctypes.c_void_p, MTLDataType, NSUInteger]),
('setConstantValues:type:withRange:', None, [ctypes.c_void_p, MTLDataType, NSRange]),
('setConstantValue:type:withName:', None, [ctypes.c_void_p, MTLDataType, NSString]),
('reset', None, []),
]
class MTLFunctionDescriptor(objc.Spec): pass
MTLFunctionDescriptor._bases_ = [NSObject]
MTLFunctionDescriptor._methods_ = [
('name', NSString, []),
('setName:', None, [NSString]),
('specializedName', NSString, []),
('setSpecializedName:', None, [NSString]),
('constantValues', MTLFunctionConstantValues, []),
('setConstantValues:', None, [MTLFunctionConstantValues]),
('options', MTLFunctionOptions, []),
('setOptions:', None, [MTLFunctionOptions]),
]
MTLFunctionDescriptor._classmethods_ = [
('functionDescriptor', MTLFunctionDescriptor, []),
]
class MTLIntersectionFunctionDescriptor(objc.Spec): pass
class enum_MTLLibraryType(NSInteger, c.Enum): pass
MTLLibraryTypeExecutable = enum_MTLLibraryType.define('MTLLibraryTypeExecutable', 0)
MTLLibraryTypeDynamic = enum_MTLLibraryType.define('MTLLibraryTypeDynamic', 1)
MTLLibraryType: TypeAlias = enum_MTLLibraryType
MTLLibrary._bases_ = [NSObject]
MTLLibrary._methods_ = [
('newFunctionWithName:', MTLFunction, [NSString], True),
('newFunctionWithName:constantValues:error:', MTLFunction, [NSString, MTLFunctionConstantValues, c.POINTER[NSError]], True),
('newFunctionWithDescriptor:error:', MTLFunction, [MTLFunctionDescriptor, c.POINTER[NSError]], True),
('newIntersectionFunctionWithDescriptor:error:', MTLFunction, [MTLIntersectionFunctionDescriptor, c.POINTER[NSError]], True),
('label', NSString, []),
('setLabel:', None, [NSString]),
('device', MTLDevice, []),
('type', MTLLibraryType, []),
('installName', NSString, []),
]
class NSBundle(objc.Spec): pass
class NSURL(objc.Spec): pass
NSURLResourceKey: TypeAlias = NSString
class enum_NSURLBookmarkCreationOptions(NSUInteger, c.Enum): pass
NSURLBookmarkCreationPreferFileIDResolution = enum_NSURLBookmarkCreationOptions.define('NSURLBookmarkCreationPreferFileIDResolution', 256)
NSURLBookmarkCreationMinimalBookmark = enum_NSURLBookmarkCreationOptions.define('NSURLBookmarkCreationMinimalBookmark', 512)
NSURLBookmarkCreationSuitableForBookmarkFile = enum_NSURLBookmarkCreationOptions.define('NSURLBookmarkCreationSuitableForBookmarkFile', 1024)
NSURLBookmarkCreationWithSecurityScope = enum_NSURLBookmarkCreationOptions.define('NSURLBookmarkCreationWithSecurityScope', 2048)
NSURLBookmarkCreationSecurityScopeAllowOnlyReadAccess = enum_NSURLBookmarkCreationOptions.define('NSURLBookmarkCreationSecurityScopeAllowOnlyReadAccess', 4096)
NSURLBookmarkCreationWithoutImplicitSecurityScope = enum_NSURLBookmarkCreationOptions.define('NSURLBookmarkCreationWithoutImplicitSecurityScope', 536870912)
NSURLBookmarkCreationOptions: TypeAlias = enum_NSURLBookmarkCreationOptions
class enum_NSURLBookmarkResolutionOptions(NSUInteger, c.Enum): pass
NSURLBookmarkResolutionWithoutUI = enum_NSURLBookmarkResolutionOptions.define('NSURLBookmarkResolutionWithoutUI', 256)
NSURLBookmarkResolutionWithoutMounting = enum_NSURLBookmarkResolutionOptions.define('NSURLBookmarkResolutionWithoutMounting', 512)
NSURLBookmarkResolutionWithSecurityScope = enum_NSURLBookmarkResolutionOptions.define('NSURLBookmarkResolutionWithSecurityScope', 1024)
NSURLBookmarkResolutionWithoutImplicitStartAccessing = enum_NSURLBookmarkResolutionOptions.define('NSURLBookmarkResolutionWithoutImplicitStartAccessing', 32768)
NSURLBookmarkResolutionOptions: TypeAlias = enum_NSURLBookmarkResolutionOptions
class NSNumber(objc.Spec): pass
class enum_NSComparisonResult(NSInteger, c.Enum): pass
NSOrderedAscending = enum_NSComparisonResult.define('NSOrderedAscending', -1)
NSOrderedSame = enum_NSComparisonResult.define('NSOrderedSame', 0)
NSOrderedDescending = enum_NSComparisonResult.define('NSOrderedDescending', 1)
NSComparisonResult: TypeAlias = enum_NSComparisonResult
class NSValue(objc.Spec): pass
NSValue._bases_ = [NSObject]
NSValue._methods_ = [
('getValue:size:', None, [ctypes.c_void_p, NSUInteger]),
('initWithBytes:objCType:', 'instancetype', [ctypes.c_void_p, c.POINTER[Annotated[bytes, ctypes.c_char]]]),
('initWithCoder:', 'instancetype', [NSCoder]),
('objCType', c.POINTER[Annotated[bytes, ctypes.c_char]], []),
]
NSNumber._bases_ = [NSValue]
NSNumber._methods_ = [
('initWithCoder:', 'instancetype', [NSCoder]),
('initWithChar:', NSNumber, [Annotated[bytes, ctypes.c_char]]),
('initWithUnsignedChar:', NSNumber, [Annotated[int, ctypes.c_ubyte]]),
('initWithShort:', NSNumber, [Annotated[int, ctypes.c_int16]]),
('initWithUnsignedShort:', NSNumber, [Annotated[int, ctypes.c_uint16]]),
('initWithInt:', NSNumber, [Annotated[int, ctypes.c_int32]]),
('initWithUnsignedInt:', NSNumber, [Annotated[int, ctypes.c_uint32]]),
('initWithLong:', NSNumber, [Annotated[int, ctypes.c_int64]]),
('initWithUnsignedLong:', NSNumber, [Annotated[int, ctypes.c_uint64]]),
('initWithLongLong:', NSNumber, [Annotated[int, ctypes.c_int64]]),
('initWithUnsignedLongLong:', NSNumber, [Annotated[int, ctypes.c_uint64]]),
('initWithFloat:', NSNumber, [Annotated[float, ctypes.c_float]]),
('initWithDouble:', NSNumber, [Annotated[float, ctypes.c_double]]),
('initWithBool:', NSNumber, [BOOL]),
('initWithInteger:', NSNumber, [NSInteger]),
('initWithUnsignedInteger:', NSNumber, [NSUInteger]),
('compare:', NSComparisonResult, [NSNumber]),
('isEqualToNumber:', BOOL, [NSNumber]),
('descriptionWithLocale:', NSString, [objc.id_]),
('charValue', Annotated[bytes, ctypes.c_char], []),
('unsignedCharValue', Annotated[int, ctypes.c_ubyte], []),
('shortValue', Annotated[int, ctypes.c_int16], []),
('unsignedShortValue', Annotated[int, ctypes.c_uint16], []),
('intValue', Annotated[int, ctypes.c_int32], []),
('unsignedIntValue', Annotated[int, ctypes.c_uint32], []),
('longValue', Annotated[int, ctypes.c_int64], []),
('unsignedLongValue', Annotated[int, ctypes.c_uint64], []),
('longLongValue', Annotated[int, ctypes.c_int64], []),
('unsignedLongLongValue', Annotated[int, ctypes.c_uint64], []),
('floatValue', Annotated[float, ctypes.c_float], []),
('doubleValue', Annotated[float, ctypes.c_double], []),
('boolValue', BOOL, []),
('integerValue', NSInteger, []),
('unsignedIntegerValue', NSUInteger, []),
('stringValue', NSString, []),
]
NSURLBookmarkFileCreationOptions: TypeAlias = Annotated[int, ctypes.c_uint64]
NSURL._bases_ = [NSObject]
NSURL._methods_ = [
('initWithScheme:host:path:', 'instancetype', [NSString, NSString, NSString]),
('initFileURLWithPath:isDirectory:relativeToURL:', 'instancetype', [NSString, BOOL, NSURL]),
('initFileURLWithPath:relativeToURL:', 'instancetype', [NSString, NSURL]),
('initFileURLWithPath:isDirectory:', 'instancetype', [NSString, BOOL]),
('initFileURLWithPath:', 'instancetype', [NSString]),
('initFileURLWithFileSystemRepresentation:isDirectory:relativeToURL:', 'instancetype', [c.POINTER[Annotated[bytes, ctypes.c_char]], BOOL, NSURL]),
('initWithString:', 'instancetype', [NSString]),
('initWithString:relativeToURL:', 'instancetype', [NSString, NSURL]),
('initWithString:encodingInvalidCharacters:', 'instancetype', [NSString, BOOL]),
('initWithDataRepresentation:relativeToURL:', 'instancetype', [NSData, NSURL]),
('initAbsoluteURLWithDataRepresentation:relativeToURL:', 'instancetype', [NSData, NSURL]),
('getFileSystemRepresentation:maxLength:', BOOL, [c.POINTER[Annotated[bytes, ctypes.c_char]], NSUInteger]),
('isFileReferenceURL', BOOL, []),
('fileReferenceURL', NSURL, []),
('getResourceValue:forKey:error:', BOOL, [c.POINTER[objc.id_], NSURLResourceKey, c.POINTER[NSError]]),
('setResourceValue:forKey:error:', BOOL, [objc.id_, NSURLResourceKey, c.POINTER[NSError]]),
('removeCachedResourceValueForKey:', None, [NSURLResourceKey]),
('removeAllCachedResourceValues', None, []),
('setTemporaryResourceValue:forKey:', None, [objc.id_, NSURLResourceKey]),
('initByResolvingBookmarkData:options:relativeToURL:bookmarkDataIsStale:error:', 'instancetype', [NSData, NSURLBookmarkResolutionOptions, NSURL, c.POINTER[BOOL], c.POINTER[NSError]]),
('startAccessingSecurityScopedResource', BOOL, []),
('stopAccessingSecurityScopedResource', None, []),
('dataRepresentation', NSData, []),
('absoluteString', NSString, []),
('relativeString', NSString, []),
('baseURL', NSURL, []),
('absoluteURL', NSURL, []),
('scheme', NSString, []),
('resourceSpecifier', NSString, []),
('host', NSString, []),
('port', NSNumber, []),
('user', NSString, []),
('password', NSString, []),
('path', NSString, []),
('fragment', NSString, []),
('parameterString', NSString, []),
('query', NSString, []),
('relativePath', NSString, []),
('hasDirectoryPath', BOOL, []),
('fileSystemRepresentation', c.POINTER[Annotated[bytes, ctypes.c_char]], []),
('isFileURL', BOOL, []),
('standardizedURL', NSURL, []),
('filePathURL', NSURL, []),
]
NSURL._classmethods_ = [
('fileURLWithPath:isDirectory:relativeToURL:', NSURL, [NSString, BOOL, NSURL]),
('fileURLWithPath:relativeToURL:', NSURL, [NSString, NSURL]),
('fileURLWithPath:isDirectory:', NSURL, [NSString, BOOL]),
('fileURLWithPath:', NSURL, [NSString]),
('fileURLWithFileSystemRepresentation:isDirectory:relativeToURL:', NSURL, [c.POINTER[Annotated[bytes, ctypes.c_char]], BOOL, NSURL]),
('URLWithString:', 'instancetype', [NSString]),
('URLWithString:relativeToURL:', 'instancetype', [NSString, NSURL]),
('URLWithString:encodingInvalidCharacters:', 'instancetype', [NSString, BOOL]),
('URLWithDataRepresentation:relativeToURL:', NSURL, [NSData, NSURL]),
('absoluteURLWithDataRepresentation:relativeToURL:', NSURL, [NSData, NSURL]),
('URLByResolvingBookmarkData:options:relativeToURL:bookmarkDataIsStale:error:', 'instancetype', [NSData, NSURLBookmarkResolutionOptions, NSURL, c.POINTER[BOOL], c.POINTER[NSError]]),
('writeBookmarkData:toURL:options:error:', BOOL, [NSData, NSURL, NSURLBookmarkFileCreationOptions, c.POINTER[NSError]]),
('bookmarkDataWithContentsOfURL:error:', NSData, [NSURL, c.POINTER[NSError]]),
('URLByResolvingAliasFileAtURL:options:error:', 'instancetype', [NSURL, NSURLBookmarkResolutionOptions, c.POINTER[NSError]]),
]
class NSAttributedString(objc.Spec): pass
NSAttributedString._bases_ = [NSObject]
NSAttributedString._methods_ = [
('string', NSString, []),
]
NSBundle._bases_ = [NSObject]
NSBundle._methods_ = [
('initWithPath:', 'instancetype', [NSString]),
('initWithURL:', 'instancetype', [NSURL]),
('load', BOOL, []),
('unload', BOOL, []),
('preflightAndReturnError:', BOOL, [c.POINTER[NSError]]),
('loadAndReturnError:', BOOL, [c.POINTER[NSError]]),
('URLForAuxiliaryExecutable:', NSURL, [NSString]),
('pathForAuxiliaryExecutable:', NSString, [NSString]),
('URLForResource:withExtension:', NSURL, [NSString, NSString]),
('URLForResource:withExtension:subdirectory:', NSURL, [NSString, NSString, NSString]),
('URLForResource:withExtension:subdirectory:localization:', NSURL, [NSString, NSString, NSString, NSString]),
('pathForResource:ofType:', NSString, [NSString, NSString]),
('pathForResource:ofType:inDirectory:', NSString, [NSString, NSString, NSString]),
('pathForResource:ofType:inDirectory:forLocalization:', NSString, [NSString, NSString, NSString, NSString]),
('localizedStringForKey:value:table:', NSString, [NSString, NSString, NSString]),
('localizedAttributedStringForKey:value:table:', NSAttributedString, [NSString, NSString, NSString]),
('objectForInfoDictionaryKey:', objc.id_, [NSString]),
('isLoaded', BOOL, []),
('bundleURL', NSURL, []),
('resourceURL', NSURL, []),
('executableURL', NSURL, []),
('privateFrameworksURL', NSURL, []),
('sharedFrameworksURL', NSURL, []),
('sharedSupportURL', NSURL, []),
('builtInPlugInsURL', NSURL, []),
('appStoreReceiptURL', NSURL, []),
('bundlePath', NSString, []),
('resourcePath', NSString, []),
('executablePath', NSString, []),
('privateFrameworksPath', NSString, []),
('sharedFrameworksPath', NSString, []),
('sharedSupportPath', NSString, []),
('builtInPlugInsPath', NSString, []),
('bundleIdentifier', NSString, []),
('developmentLocalization', NSString, []),
]
NSBundle._classmethods_ = [
('bundleWithPath:', 'instancetype', [NSString]),
('bundleWithURL:', 'instancetype', [NSURL]),
('bundleWithIdentifier:', NSBundle, [NSString]),
('URLForResource:withExtension:subdirectory:inBundleWithURL:', NSURL, [NSString, NSString, NSString, NSURL]),
('pathForResource:ofType:inDirectory:', NSString, [NSString, NSString, NSString]),
('mainBundle', NSBundle, []),
]
class MTLCompileOptions(objc.Spec): pass
class enum_MTLLanguageVersion(NSUInteger, c.Enum): pass
MTLLanguageVersion1_0 = enum_MTLLanguageVersion.define('MTLLanguageVersion1_0', 65536)
MTLLanguageVersion1_1 = enum_MTLLanguageVersion.define('MTLLanguageVersion1_1', 65537)
MTLLanguageVersion1_2 = enum_MTLLanguageVersion.define('MTLLanguageVersion1_2', 65538)
MTLLanguageVersion2_0 = enum_MTLLanguageVersion.define('MTLLanguageVersion2_0', 131072)
MTLLanguageVersion2_1 = enum_MTLLanguageVersion.define('MTLLanguageVersion2_1', 131073)
MTLLanguageVersion2_2 = enum_MTLLanguageVersion.define('MTLLanguageVersion2_2', 131074)
MTLLanguageVersion2_3 = enum_MTLLanguageVersion.define('MTLLanguageVersion2_3', 131075)
MTLLanguageVersion2_4 = enum_MTLLanguageVersion.define('MTLLanguageVersion2_4', 131076)
MTLLanguageVersion3_0 = enum_MTLLanguageVersion.define('MTLLanguageVersion3_0', 196608)
MTLLanguageVersion3_1 = enum_MTLLanguageVersion.define('MTLLanguageVersion3_1', 196609)
MTLLanguageVersion: TypeAlias = enum_MTLLanguageVersion
class enum_MTLLibraryOptimizationLevel(NSInteger, c.Enum): pass
MTLLibraryOptimizationLevelDefault = enum_MTLLibraryOptimizationLevel.define('MTLLibraryOptimizationLevelDefault', 0)
MTLLibraryOptimizationLevelSize = enum_MTLLibraryOptimizationLevel.define('MTLLibraryOptimizationLevelSize', 1)
MTLLibraryOptimizationLevel: TypeAlias = enum_MTLLibraryOptimizationLevel
class enum_MTLCompileSymbolVisibility(NSInteger, c.Enum): pass
MTLCompileSymbolVisibilityDefault = enum_MTLCompileSymbolVisibility.define('MTLCompileSymbolVisibilityDefault', 0)
MTLCompileSymbolVisibilityHidden = enum_MTLCompileSymbolVisibility.define('MTLCompileSymbolVisibilityHidden', 1)
MTLCompileSymbolVisibility: TypeAlias = enum_MTLCompileSymbolVisibility
MTLCompileOptions._bases_ = [NSObject]
MTLCompileOptions._methods_ = [
('fastMathEnabled', BOOL, []),
('setFastMathEnabled:', None, [BOOL]),
('languageVersion', MTLLanguageVersion, []),
('setLanguageVersion:', None, [MTLLanguageVersion]),
('libraryType', MTLLibraryType, []),
('setLibraryType:', None, [MTLLibraryType]),
('installName', NSString, []),
('setInstallName:', None, [NSString]),
('preserveInvariance', BOOL, []),
('setPreserveInvariance:', None, [BOOL]),
('optimizationLevel', MTLLibraryOptimizationLevel, []),
('setOptimizationLevel:', None, [MTLLibraryOptimizationLevel]),
('compileSymbolVisibility', MTLCompileSymbolVisibility, []),
('setCompileSymbolVisibility:', None, [MTLCompileSymbolVisibility]),
('allowReferencingUndefinedSymbols', BOOL, []),
('setAllowReferencingUndefinedSymbols:', None, [BOOL]),
('maxTotalThreadsPerThreadgroup', NSUInteger, []),
('setMaxTotalThreadsPerThreadgroup:', None, [NSUInteger]),
]
class MTLStitchedLibraryDescriptor(objc.Spec): pass
class MTLRenderPipelineState(objc.Spec): pass
class MTLRenderPipelineDescriptor(objc.Spec): pass
class MTLTileRenderPipelineDescriptor(objc.Spec): pass
class MTLMeshRenderPipelineDescriptor(objc.Spec): pass
class MTLRasterizationRateMapDescriptor(objc.Spec): pass
class MTLIndirectCommandBufferDescriptor(objc.Spec): pass
class MTLSharedEvent(objc.Spec): pass
class MTLSharedEventHandle(objc.Spec): pass
class MTLIOFileHandle(objc.Spec): pass
class MTLIOCommandQueue(objc.Spec): pass
class MTLIOCommandQueueDescriptor(objc.Spec): pass
class MTLCounterSampleBufferDescriptor(objc.Spec): pass
class MTLCounterSet(objc.Spec): pass
MTLCounterSet._bases_ = [NSObject]
MTLCounterSet._methods_ = [
('name', NSString, []),
]
MTLCounterSampleBufferDescriptor._bases_ = [NSObject]
MTLCounterSampleBufferDescriptor._methods_ = [
('counterSet', MTLCounterSet, []),
('setCounterSet:', None, [MTLCounterSet]),
('label', NSString, []),
('setLabel:', None, [NSString]),
('storageMode', MTLStorageMode, []),
('setStorageMode:', None, [MTLStorageMode]),
('sampleCount', NSUInteger, []),
('setSampleCount:', None, [NSUInteger]),
]
MTLTimestamp: TypeAlias = Annotated[int, ctypes.c_uint64]
class MTLBufferBinding(objc.Spec): pass
class MTLBinding(objc.Spec): pass
MTLBufferBinding._bases_ = [MTLBinding]
MTLBufferBinding._methods_ = [
('bufferAlignment', NSUInteger, []),
('bufferDataSize', NSUInteger, []),
('bufferDataType', MTLDataType, []),
('bufferStructType', MTLStructType, []),
('bufferPointerType', MTLPointerType, []),
]
class MTLDynamicLibrary(objc.Spec): pass
class MTLBinaryArchive(objc.Spec): pass
class MTLBinaryArchiveDescriptor(objc.Spec): pass
class MTLAccelerationStructureDescriptor(objc.Spec): pass
MTLDevice._bases_ = [NSObject]
MTLDevice._methods_ = [
('newCommandQueue', MTLCommandQueue, [], True),
('newCommandQueueWithMaxCommandBufferCount:', MTLCommandQueue, [NSUInteger], True),
('heapTextureSizeAndAlignWithDescriptor:', MTLSizeAndAlign, [MTLTextureDescriptor]),
('heapBufferSizeAndAlignWithLength:options:', MTLSizeAndAlign, [NSUInteger, MTLResourceOptions]),
('newHeapWithDescriptor:', MTLHeap, [MTLHeapDescriptor], True),
('newBufferWithLength:options:', MTLBuffer, [NSUInteger, MTLResourceOptions], True),
('newBufferWithBytes:length:options:', MTLBuffer, [ctypes.c_void_p, NSUInteger, MTLResourceOptions], True),
('newDepthStencilStateWithDescriptor:', MTLDepthStencilState, [MTLDepthStencilDescriptor], True),
('newTextureWithDescriptor:', MTLTexture, [MTLTextureDescriptor], True),
('newTextureWithDescriptor:iosurface:plane:', MTLTexture, [MTLTextureDescriptor, IOSurfaceRef, NSUInteger], True),
('newSharedTextureWithDescriptor:', MTLTexture, [MTLTextureDescriptor], True),
('newSharedTextureWithHandle:', MTLTexture, [MTLSharedTextureHandle], True),
('newSamplerStateWithDescriptor:', MTLSamplerState, [MTLSamplerDescriptor], True),
('newDefaultLibrary', MTLLibrary, [], True),
('newDefaultLibraryWithBundle:error:', MTLLibrary, [NSBundle, c.POINTER[NSError]], True),
('newLibraryWithFile:error:', MTLLibrary, [NSString, c.POINTER[NSError]], True),
('newLibraryWithURL:error:', MTLLibrary, [NSURL, c.POINTER[NSError]], True),
('newLibraryWithData:error:', MTLLibrary, [objc.id_, c.POINTER[NSError]], True),
('newLibraryWithSource:options:error:', MTLLibrary, [NSString, MTLCompileOptions, c.POINTER[NSError]], True),
('newLibraryWithStitchedDescriptor:error:', MTLLibrary, [MTLStitchedLibraryDescriptor, c.POINTER[NSError]], True),
('newRenderPipelineStateWithDescriptor:error:', MTLRenderPipelineState, [MTLRenderPipelineDescriptor, c.POINTER[NSError]], True),
('newRenderPipelineStateWithDescriptor:options:reflection:error:', MTLRenderPipelineState, [MTLRenderPipelineDescriptor, MTLPipelineOption, c.POINTER[MTLRenderPipelineReflection], c.POINTER[NSError]], True),
('newComputePipelineStateWithFunction:error:', MTLComputePipelineState, [MTLFunction, c.POINTER[NSError]], True),
('newComputePipelineStateWithFunction:options:reflection:error:', MTLComputePipelineState, [MTLFunction, MTLPipelineOption, c.POINTER[MTLComputePipelineReflection], c.POINTER[NSError]], True),
('newComputePipelineStateWithDescriptor:options:reflection:error:', MTLComputePipelineState, [MTLComputePipelineDescriptor, MTLPipelineOption, c.POINTER[MTLComputePipelineReflection], c.POINTER[NSError]], True),
('newFence', MTLFence, [], True),
('supportsFeatureSet:', BOOL, [MTLFeatureSet]),
('supportsFamily:', BOOL, [MTLGPUFamily]),
('supportsTextureSampleCount:', BOOL, [NSUInteger]),
('minimumLinearTextureAlignmentForPixelFormat:', NSUInteger, [MTLPixelFormat]),
('minimumTextureBufferAlignmentForPixelFormat:', NSUInteger, [MTLPixelFormat]),
('newRenderPipelineStateWithTileDescriptor:options:reflection:error:', MTLRenderPipelineState, [MTLTileRenderPipelineDescriptor, MTLPipelineOption, c.POINTER[MTLRenderPipelineReflection], c.POINTER[NSError]], True),
('newRenderPipelineStateWithMeshDescriptor:options:reflection:error:', MTLRenderPipelineState, [MTLMeshRenderPipelineDescriptor, MTLPipelineOption, c.POINTER[MTLRenderPipelineReflection], c.POINTER[NSError]], True),
('getDefaultSamplePositions:count:', None, [c.POINTER[MTLSamplePosition], NSUInteger]),
('supportsRasterizationRateMapWithLayerCount:', BOOL, [NSUInteger]),
('newRasterizationRateMapWithDescriptor:', MTLRasterizationRateMap, [MTLRasterizationRateMapDescriptor], True),
('newIndirectCommandBufferWithDescriptor:maxCommandCount:options:', MTLIndirectCommandBuffer, [MTLIndirectCommandBufferDescriptor, NSUInteger, MTLResourceOptions], True),
('newEvent', MTLEvent, [], True),
('newSharedEvent', MTLSharedEvent, [], True),
('newSharedEventWithHandle:', MTLSharedEvent, [MTLSharedEventHandle], True),
('newIOHandleWithURL:error:', MTLIOFileHandle, [NSURL, c.POINTER[NSError]], True),
('newIOCommandQueueWithDescriptor:error:', MTLIOCommandQueue, [MTLIOCommandQueueDescriptor, c.POINTER[NSError]], True),
('newIOHandleWithURL:compressionMethod:error:', MTLIOFileHandle, [NSURL, MTLIOCompressionMethod, c.POINTER[NSError]], True),
('newIOFileHandleWithURL:error:', MTLIOFileHandle, [NSURL, c.POINTER[NSError]], True),
('newIOFileHandleWithURL:compressionMethod:error:', MTLIOFileHandle, [NSURL, MTLIOCompressionMethod, c.POINTER[NSError]], True),
('sparseTileSizeWithTextureType:pixelFormat:sampleCount:', MTLSize, [MTLTextureType, MTLPixelFormat, NSUInteger]),
('convertSparsePixelRegions:toTileRegions:withTileSize:alignmentMode:numRegions:', None, [c.POINTER[MTLRegion], c.POINTER[MTLRegion], MTLSize, MTLSparseTextureRegionAlignmentMode, NSUInteger]),
('convertSparseTileRegions:toPixelRegions:withTileSize:numRegions:', None, [c.POINTER[MTLRegion], c.POINTER[MTLRegion], MTLSize, NSUInteger]),
('sparseTileSizeInBytesForSparsePageSize:', NSUInteger, [MTLSparsePageSize]),
('sparseTileSizeWithTextureType:pixelFormat:sampleCount:sparsePageSize:', MTLSize, [MTLTextureType, MTLPixelFormat, NSUInteger, MTLSparsePageSize]),
('newCounterSampleBufferWithDescriptor:error:', MTLCounterSampleBuffer, [MTLCounterSampleBufferDescriptor, c.POINTER[NSError]], True),
('sampleTimestamps:gpuTimestamp:', None, [c.POINTER[MTLTimestamp], c.POINTER[MTLTimestamp]]),
('newArgumentEncoderWithBufferBinding:', MTLArgumentEncoder, [MTLBufferBinding], True),
('supportsCounterSampling:', BOOL, [MTLCounterSamplingPoint]),
('supportsVertexAmplificationCount:', BOOL, [NSUInteger]),
('newDynamicLibrary:error:', MTLDynamicLibrary, [MTLLibrary, c.POINTER[NSError]], True),
('newDynamicLibraryWithURL:error:', MTLDynamicLibrary, [NSURL, c.POINTER[NSError]], True),
('newBinaryArchiveWithDescriptor:error:', MTLBinaryArchive, [MTLBinaryArchiveDescriptor, c.POINTER[NSError]], True),
('accelerationStructureSizesWithDescriptor:', MTLAccelerationStructureSizes, [MTLAccelerationStructureDescriptor]),
('newAccelerationStructureWithSize:', MTLAccelerationStructure, [NSUInteger], True),
('newAccelerationStructureWithDescriptor:', MTLAccelerationStructure, [MTLAccelerationStructureDescriptor], True),
('heapAccelerationStructureSizeAndAlignWithSize:', MTLSizeAndAlign, [NSUInteger]),
('heapAccelerationStructureSizeAndAlignWithDescriptor:', MTLSizeAndAlign, [MTLAccelerationStructureDescriptor]),
('name', NSString, []),
('registryID', uint64_t, []),
('architecture', MTLArchitecture, []),
('maxThreadsPerThreadgroup', MTLSize, []),
('isLowPower', BOOL, []),
('isHeadless', BOOL, []),
('isRemovable', BOOL, []),
('hasUnifiedMemory', BOOL, []),
('recommendedMaxWorkingSetSize', uint64_t, []),
('location', MTLDeviceLocation, []),
('locationNumber', NSUInteger, []),
('maxTransferRate', uint64_t, []),
('isDepth24Stencil8PixelFormatSupported', BOOL, []),
('readWriteTextureSupport', MTLReadWriteTextureTier, []),
('argumentBuffersSupport', MTLArgumentBuffersTier, []),
('areRasterOrderGroupsSupported', BOOL, []),
('supports32BitFloatFiltering', BOOL, []),
('supports32BitMSAA', BOOL, []),
('supportsQueryTextureLOD', BOOL, []),
('supportsBCTextureCompression', BOOL, []),
('supportsPullModelInterpolation', BOOL, []),
('areBarycentricCoordsSupported', BOOL, []),
('supportsShaderBarycentricCoordinates', BOOL, []),
('currentAllocatedSize', NSUInteger, []),
('maxThreadgroupMemoryLength', NSUInteger, []),
('maxArgumentBufferSamplerCount', NSUInteger, []),
('areProgrammableSamplePositionsSupported', BOOL, []),
('peerGroupID', uint64_t, []),
('peerIndex', uint32_t, []),
('peerCount', uint32_t, []),
('sparseTileSizeInBytes', NSUInteger, []),
('maxBufferLength', NSUInteger, []),
('supportsDynamicLibraries', BOOL, []),
('supportsRenderDynamicLibraries', BOOL, []),
('supportsRaytracing', BOOL, []),
('supportsFunctionPointers', BOOL, []),
('supportsFunctionPointersFromRender', BOOL, []),
('supportsRaytracingFromRender', BOOL, []),
('supportsPrimitiveMotionBlur', BOOL, []),
('shouldMaximizeConcurrentCompilation', BOOL, []),
('setShouldMaximizeConcurrentCompilation:', None, [BOOL]),
('maximumConcurrentCompilationTaskCount', NSUInteger, []),
]
class enum_MTLIndirectCommandType(NSUInteger, c.Enum): pass
MTLIndirectCommandTypeDraw = enum_MTLIndirectCommandType.define('MTLIndirectCommandTypeDraw', 1)
MTLIndirectCommandTypeDrawIndexed = enum_MTLIndirectCommandType.define('MTLIndirectCommandTypeDrawIndexed', 2)
MTLIndirectCommandTypeDrawPatches = enum_MTLIndirectCommandType.define('MTLIndirectCommandTypeDrawPatches', 4)
MTLIndirectCommandTypeDrawIndexedPatches = enum_MTLIndirectCommandType.define('MTLIndirectCommandTypeDrawIndexedPatches', 8)
MTLIndirectCommandTypeConcurrentDispatch = enum_MTLIndirectCommandType.define('MTLIndirectCommandTypeConcurrentDispatch', 32)
MTLIndirectCommandTypeConcurrentDispatchThreads = enum_MTLIndirectCommandType.define('MTLIndirectCommandTypeConcurrentDispatchThreads', 64)
MTLIndirectCommandTypeDrawMeshThreadgroups = enum_MTLIndirectCommandType.define('MTLIndirectCommandTypeDrawMeshThreadgroups', 128)
MTLIndirectCommandTypeDrawMeshThreads = enum_MTLIndirectCommandType.define('MTLIndirectCommandTypeDrawMeshThreads', 256)
MTLIndirectCommandType: TypeAlias = enum_MTLIndirectCommandType
@c.record
class MTLIndirectCommandBufferExecutionRange(c.Struct):
SIZE = 8
location: Annotated[uint32_t, 0]
length: Annotated[uint32_t, 4]
MTLIndirectCommandBufferDescriptor._bases_ = [NSObject]
MTLIndirectCommandBufferDescriptor._methods_ = [
('commandTypes', MTLIndirectCommandType, []),
('setCommandTypes:', None, [MTLIndirectCommandType]),
('inheritPipelineState', BOOL, []),
('setInheritPipelineState:', None, [BOOL]),
('inheritBuffers', BOOL, []),
('setInheritBuffers:', None, [BOOL]),
('maxVertexBufferBindCount', NSUInteger, []),
('setMaxVertexBufferBindCount:', None, [NSUInteger]),
('maxFragmentBufferBindCount', NSUInteger, []),
('setMaxFragmentBufferBindCount:', None, [NSUInteger]),
('maxKernelBufferBindCount', NSUInteger, []),
('setMaxKernelBufferBindCount:', None, [NSUInteger]),
('maxKernelThreadgroupMemoryBindCount', NSUInteger, []),
('setMaxKernelThreadgroupMemoryBindCount:', None, [NSUInteger]),
('maxObjectBufferBindCount', NSUInteger, []),
('setMaxObjectBufferBindCount:', None, [NSUInteger]),
('maxMeshBufferBindCount', NSUInteger, []),
('setMaxMeshBufferBindCount:', None, [NSUInteger]),
('maxObjectThreadgroupMemoryBindCount', NSUInteger, []),
('setMaxObjectThreadgroupMemoryBindCount:', None, [NSUInteger]),
('supportRayTracing', BOOL, []),
('setSupportRayTracing:', None, [BOOL]),
('supportDynamicAttributeStride', BOOL, []),
('setSupportDynamicAttributeStride:', None, [BOOL]),
]
class MTLIndirectRenderCommand(objc.Spec): pass
class enum_MTLPrimitiveType(NSUInteger, c.Enum): pass
MTLPrimitiveTypePoint = enum_MTLPrimitiveType.define('MTLPrimitiveTypePoint', 0)
MTLPrimitiveTypeLine = enum_MTLPrimitiveType.define('MTLPrimitiveTypeLine', 1)
MTLPrimitiveTypeLineStrip = enum_MTLPrimitiveType.define('MTLPrimitiveTypeLineStrip', 2)
MTLPrimitiveTypeTriangle = enum_MTLPrimitiveType.define('MTLPrimitiveTypeTriangle', 3)
MTLPrimitiveTypeTriangleStrip = enum_MTLPrimitiveType.define('MTLPrimitiveTypeTriangleStrip', 4)
MTLPrimitiveType: TypeAlias = enum_MTLPrimitiveType
MTLIndirectRenderCommand._bases_ = [NSObject]
MTLIndirectRenderCommand._methods_ = [
('setRenderPipelineState:', None, [MTLRenderPipelineState]),
('setVertexBuffer:offset:atIndex:', None, [MTLBuffer, NSUInteger, NSUInteger]),
('setFragmentBuffer:offset:atIndex:', None, [MTLBuffer, NSUInteger, NSUInteger]),
('setVertexBuffer:offset:attributeStride:atIndex:', None, [MTLBuffer, NSUInteger, NSUInteger, NSUInteger]),
('drawPatches:patchStart:patchCount:patchIndexBuffer:patchIndexBufferOffset:instanceCount:baseInstance:tessellationFactorBuffer:tessellationFactorBufferOffset:tessellationFactorBufferInstanceStride:', None, [NSUInteger, NSUInteger, NSUInteger, MTLBuffer, NSUInteger, NSUInteger, NSUInteger, MTLBuffer, NSUInteger, NSUInteger]),
('drawIndexedPatches:patchStart:patchCount:patchIndexBuffer:patchIndexBufferOffset:controlPointIndexBuffer:controlPointIndexBufferOffset:instanceCount:baseInstance:tessellationFactorBuffer:tessellationFactorBufferOffset:tessellationFactorBufferInstanceStride:', None, [NSUInteger, NSUInteger, NSUInteger, MTLBuffer, NSUInteger, MTLBuffer, NSUInteger, NSUInteger, NSUInteger, MTLBuffer, NSUInteger, NSUInteger]),
('drawPrimitives:vertexStart:vertexCount:instanceCount:baseInstance:', None, [MTLPrimitiveType, NSUInteger, NSUInteger, NSUInteger, NSUInteger]),
('drawIndexedPrimitives:indexCount:indexType:indexBuffer:indexBufferOffset:instanceCount:baseVertex:baseInstance:', None, [MTLPrimitiveType, NSUInteger, MTLIndexType, MTLBuffer, NSUInteger, NSUInteger, NSInteger, NSUInteger]),
('setObjectThreadgroupMemoryLength:atIndex:', None, [NSUInteger, NSUInteger]),
('setObjectBuffer:offset:atIndex:', None, [MTLBuffer, NSUInteger, NSUInteger]),
('setMeshBuffer:offset:atIndex:', None, [MTLBuffer, NSUInteger, NSUInteger]),
('drawMeshThreadgroups:threadsPerObjectThreadgroup:threadsPerMeshThreadgroup:', None, [MTLSize, MTLSize, MTLSize]),
('drawMeshThreads:threadsPerObjectThreadgroup:threadsPerMeshThreadgroup:', None, [MTLSize, MTLSize, MTLSize]),
('setBarrier', None, []),
('clearBarrier', None, []),
('reset', None, []),
]
class MTLIndirectComputeCommand(objc.Spec): pass
MTLIndirectComputeCommand._bases_ = [NSObject]
MTLIndirectComputeCommand._methods_ = [
('setComputePipelineState:', None, [MTLComputePipelineState]),
('setKernelBuffer:offset:atIndex:', None, [MTLBuffer, NSUInteger, NSUInteger]),
('setKernelBuffer:offset:attributeStride:atIndex:', None, [MTLBuffer, NSUInteger, NSUInteger, NSUInteger]),
('concurrentDispatchThreadgroups:threadsPerThreadgroup:', None, [MTLSize, MTLSize]),
('concurrentDispatchThreads:threadsPerThreadgroup:', None, [MTLSize, MTLSize]),
('setBarrier', None, []),
('clearBarrier', None, []),
('setImageblockWidth:height:', None, [NSUInteger, NSUInteger]),
('reset', None, []),
('setThreadgroupMemoryLength:atIndex:', None, [NSUInteger, NSUInteger]),
('setStageInRegion:', None, [MTLRegion]),
]
MTLIndirectCommandBuffer._bases_ = [MTLResource]
MTLIndirectCommandBuffer._methods_ = [
('resetWithRange:', None, [NSRange]),
('indirectRenderCommandAtIndex:', MTLIndirectRenderCommand, [NSUInteger]),
('indirectComputeCommandAtIndex:', MTLIndirectComputeCommand, [NSUInteger]),
('size', NSUInteger, []),
('gpuResourceID', MTLResourceID, []),
]
MTLCommandEncoder._bases_ = [NSObject]
MTLCommandEncoder._methods_ = [
('endEncoding', None, []),
('insertDebugSignpost:', None, [NSString]),
('pushDebugGroup:', None, [NSString]),
('popDebugGroup', None, []),
('device', MTLDevice, []),
('label', NSString, []),
('setLabel:', None, [NSString]),
]
c.init_records()
MTLResourceCPUCacheModeShift = 0 # type: ignore
MTLResourceCPUCacheModeMask = (0xf << MTLResourceCPUCacheModeShift) # type: ignore
MTLResourceStorageModeShift = 4 # type: ignore
MTLResourceStorageModeMask = (0xf << MTLResourceStorageModeShift) # type: ignore
MTLResourceHazardTrackingModeShift = 8 # type: ignore
MTLResourceHazardTrackingModeMask = (0x3 << MTLResourceHazardTrackingModeShift) # type: ignore | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/metal.py",
"license": "MIT License",
"lines": 1990,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:tinygrad/runtime/support/objc.py | import ctypes, ctypes.util, functools, sys
from tinygrad.runtime.support.c import del_an
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING: id_ = ctypes.c_void_p
else:
class id_(ctypes.c_void_p):
_is_finalizing = sys.is_finalizing # FIXME: why is this needed
retain: bool = False
# This prevents ctypes from converting response to plain int, and dict.fromkeys() can use it to dedup
def __hash__(self): return hash(self.value)
def __eq__(self, other): return self.value == other.value
def __del__(self):
if self.retain and not self._is_finalizing(): self.release()
def release(self): msg("release")(self)
def retained(self):
setattr(self, 'retain', True)
return self
def returns_retained(f): return functools.wraps(f)(lambda *args, **kwargs: f(*args, **kwargs).retained())
lib = ctypes.CDLL(ctypes.util.find_library('objc'))
lib.sel_registerName.restype = id_
getsel = functools.cache(lib.sel_registerName)
lib.objc_getClass.restype = id_
dispatch_data_create = ctypes.CDLL("/usr/lib/libSystem.dylib").dispatch_data_create
dispatch_data_create.restype = id_
dispatch_data_create = returns_retained(dispatch_data_create)
def msg(sel:str, restype=id_, argtypes=[], retain=False, clsmeth=False):
# Using attribute access returns a new reference so setting restype is safe
(sender:=lib["objc_msgSend"]).restype, sender.argtypes = del_an(restype), [id_, id_]+[del_an(a) for a in argtypes] if argtypes else []
def f(ptr, *args): return sender(ptr._objc_class_ if clsmeth else ptr, getsel(sel.encode()), *args)
return returns_retained(f) if retain else f
if TYPE_CHECKING:
import _ctypes
class MetaSpec(_ctypes._PyCSimpleType):
_objc_class_: id_
def __getattr__(cls, nm:str) -> Any: ...
def __setattr__(cls, nm:str, v:Any): ...
else:
class MetaSpec(type(id_)):
def __new__(mcs, name, bases, dct):
cls = super().__new__(mcs, name, bases, {'_objc_class_': lib.objc_getClass(name.encode()), '_children_': set(), **dct})
cls._methods_, cls._classmethods_ = dct.get('_methods_', []), dct.get('_classmethods_', [])
return cls
def __setattr__(cls, k, v):
super().__setattr__(k, v)
if k in ("_methods_", "_classmethods_"):
for m in v: cls._addmeth(m, clsmeth=(v=="_classmethods_"))
for c in cls._children_: c._inherit(cls)
if k == "_bases_":
for b in v:
b._children_.add(cls)
cls._inherit(b)
def _inherit(cls, b):
for _b in getattr(b, "_bases_", []): cls._inherit(_b)
for m in getattr(b, "_methods_", []): cls._addmeth(m)
for m in getattr(b, "_classmethods_", []): cls._addmeth(m, True)
for c in cls._children_: c._inherit(cls)
def _addmeth(cls, m, clsmeth=False):
nm = m[0].strip(':').replace(':', '_')
if clsmeth: setattr(cls, nm, classmethod(msg(m[0], cls if m[1] == 'instancetype' else m[1],
[cls if a == 'instancetype' else a for a in m[2]], *m[3:], clsmeth=True))) # type: ignore[misc]
else: setattr(cls, nm, msg(m[0], cls if m[1] == 'instancetype' else m[1], [cls if a == 'instancetype' else a for a in m[2]], *m[3:]))
class Spec(id_, metaclass=MetaSpec):
if TYPE_CHECKING:
def __getattr__(self, nm:str) -> Any: ...
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/support/objc.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:test/external/external_openpilot_image_warp.py | import time
from tinygrad.tensor import Tensor, Device
MODEL_WIDTH = 512
MODEL_HEIGHT = 256
MODEL_FRAME_SIZE = MODEL_WIDTH * MODEL_HEIGHT * 3 // 2
IMG_INPUT_SHAPE = (1, 12, 128, 256)
def tensor_arange(end): return Tensor([float(i) for i in range(end)])
def tensor_round(tensor:Tensor): return (tensor + 0.5).floor()
h_src, w_src = 1208, 1928
h_dst, w_dst = MODEL_HEIGHT, MODEL_WIDTH
x = tensor_arange(w_dst).reshape(1, w_dst).expand(h_dst, w_dst)
y = tensor_arange(h_dst).reshape(h_dst, 1).expand(h_dst, w_dst)
ones = Tensor.ones_like(x)
dst_coords = x.reshape((1,-1)).cat(y.reshape((1,-1))).cat(ones.reshape((1,-1)))
def warp_perspective_tinygrad(src:Tensor, M_inv:Tensor) -> Tensor:
src_coords = M_inv @ dst_coords
src_coords = src_coords / src_coords[2:3, :]
x_src = src_coords[0].reshape(h_dst, w_dst)
y_src = src_coords[1].reshape(h_dst, w_dst)
x_nearest = tensor_round(x_src).clip(0, w_src - 1).cast('int')
y_nearest = tensor_round(y_src).clip(0, h_src - 1).cast('int')
# TODO: make 2d indexing fast
idx = y_nearest*src.shape[1] + x_nearest
dst = src.flatten()[idx]
return dst.reshape(h_dst, w_dst)
if __name__ == "__main__":
from tinygrad.engine.jit import TinyJit
update_img_jit = TinyJit(warp_perspective_tinygrad, prune=True)
step_times = []
for _ in range(10):
# regenerate inputs
inputs = [Tensor.randn(1928,1208), Tensor.randn(3,3)]
Tensor.realize(*inputs)
Device.default.synchronize()
# do the warp
st = time.perf_counter()
out = update_img_jit(*inputs)
mt = time.perf_counter()
val = out.contiguous().realize()
Device.default.synchronize()
et = time.perf_counter()
# measure the time
step_times.append((et-st)*1e3)
print(f"enqueue {(mt-st)*1e3:6.2f} ms -- total run {step_times[-1]:6.2f} ms")
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "test/external/external_openpilot_image_warp.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
tinygrad/tinygrad:tinygrad/runtime/autogen/nv.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
@c.record
class MCTP_HEADER(c.Struct):
SIZE = 7
constBlob: Annotated[NvU32, 0]
msgType: Annotated[NvU8, 4]
vendorId: Annotated[NvU16, 5]
NvU32: TypeAlias = Annotated[int, ctypes.c_uint32]
NvU8: TypeAlias = Annotated[int, ctypes.c_ubyte]
NvU16: TypeAlias = Annotated[int, ctypes.c_uint16]
@c.record
class NVDM_PAYLOAD_COT(c.Struct):
SIZE = 860
version: Annotated[NvU16, 0]
size: Annotated[NvU16, 2]
gspFmcSysmemOffset: Annotated[NvU64, 4]
frtsSysmemOffset: Annotated[NvU64, 12]
frtsSysmemSize: Annotated[NvU32, 20]
frtsVidmemOffset: Annotated[NvU64, 24]
frtsVidmemSize: Annotated[NvU32, 32]
hash384: Annotated[c.Array[NvU32, Literal[12]], 36]
publicKey: Annotated[c.Array[NvU32, Literal[96]], 84]
signature: Annotated[c.Array[NvU32, Literal[96]], 468]
gspBootArgsSysmemOffset: Annotated[NvU64, 852]
NvU64: TypeAlias = Annotated[int, ctypes.c_uint64]
@c.record
class MESSAGE_QUEUE_INIT_ARGUMENTS(c.Struct):
SIZE = 32
sharedMemPhysAddr: Annotated[NvU64, 0]
pageTableEntryCount: Annotated[NvU32, 8]
cmdQueueOffset: Annotated[NvLength, 16]
statQueueOffset: Annotated[NvLength, 24]
NvLength: TypeAlias = Annotated[int, ctypes.c_uint64]
@c.record
class GSP_SR_INIT_ARGUMENTS(c.Struct):
SIZE = 12
oldLevel: Annotated[NvU32, 0]
flags: Annotated[NvU32, 4]
bInPMTransition: Annotated[NvBool, 8]
NvBool: TypeAlias = Annotated[int, ctypes.c_ubyte]
@c.record
class GSP_ARGUMENTS_CACHED(c.Struct):
SIZE = 72
messageQueueInitArguments: Annotated[MESSAGE_QUEUE_INIT_ARGUMENTS, 0]
srInitArguments: Annotated[GSP_SR_INIT_ARGUMENTS, 32]
gpuInstance: Annotated[NvU32, 44]
bDmemStack: Annotated[NvBool, 48]
profilerArgs: Annotated[GSP_ARGUMENTS_CACHED_profilerArgs, 56]
@c.record
class GSP_ARGUMENTS_CACHED_profilerArgs(c.Struct):
SIZE = 16
pa: Annotated[NvU64, 0]
size: Annotated[NvU64, 8]
class GSP_DMA_TARGET(Annotated[int, ctypes.c_uint32], c.Enum): pass
GSP_DMA_TARGET_LOCAL_FB = GSP_DMA_TARGET.define('GSP_DMA_TARGET_LOCAL_FB', 0)
GSP_DMA_TARGET_COHERENT_SYSTEM = GSP_DMA_TARGET.define('GSP_DMA_TARGET_COHERENT_SYSTEM', 1)
GSP_DMA_TARGET_NONCOHERENT_SYSTEM = GSP_DMA_TARGET.define('GSP_DMA_TARGET_NONCOHERENT_SYSTEM', 2)
GSP_DMA_TARGET_COUNT = GSP_DMA_TARGET.define('GSP_DMA_TARGET_COUNT', 3)
@c.record
class struct_GSP_FMC_INIT_PARAMS(c.Struct):
SIZE = 4
regkeys: Annotated[NvU32, 0]
GSP_FMC_INIT_PARAMS: TypeAlias = struct_GSP_FMC_INIT_PARAMS
@c.record
class struct_GSP_ACR_BOOT_GSP_RM_PARAMS(c.Struct):
SIZE = 32
target: Annotated[GSP_DMA_TARGET, 0]
gspRmDescSize: Annotated[NvU32, 4]
gspRmDescOffset: Annotated[NvU64, 8]
wprCarveoutOffset: Annotated[NvU64, 16]
wprCarveoutSize: Annotated[NvU32, 24]
bIsGspRmBoot: Annotated[NvBool, 28]
GSP_ACR_BOOT_GSP_RM_PARAMS: TypeAlias = struct_GSP_ACR_BOOT_GSP_RM_PARAMS
@c.record
class struct_GSP_RM_PARAMS(c.Struct):
SIZE = 16
target: Annotated[GSP_DMA_TARGET, 0]
bootArgsOffset: Annotated[NvU64, 8]
GSP_RM_PARAMS: TypeAlias = struct_GSP_RM_PARAMS
@c.record
class struct_GSP_SPDM_PARAMS(c.Struct):
SIZE = 24
target: Annotated[GSP_DMA_TARGET, 0]
payloadBufferOffset: Annotated[NvU64, 8]
payloadBufferSize: Annotated[NvU32, 16]
GSP_SPDM_PARAMS: TypeAlias = struct_GSP_SPDM_PARAMS
@c.record
class struct_GSP_FMC_BOOT_PARAMS(c.Struct):
SIZE = 80
initParams: Annotated[GSP_FMC_INIT_PARAMS, 0]
bootGspRmParams: Annotated[GSP_ACR_BOOT_GSP_RM_PARAMS, 8]
gspRmParams: Annotated[GSP_RM_PARAMS, 40]
gspSpdmParams: Annotated[GSP_SPDM_PARAMS, 56]
GSP_FMC_BOOT_PARAMS: TypeAlias = struct_GSP_FMC_BOOT_PARAMS
@c.record
class GspFwWprMeta(c.Struct):
SIZE = 256
magic: Annotated[NvU64, 0]
revision: Annotated[NvU64, 8]
sysmemAddrOfRadix3Elf: Annotated[NvU64, 16]
sizeOfRadix3Elf: Annotated[NvU64, 24]
sysmemAddrOfBootloader: Annotated[NvU64, 32]
sizeOfBootloader: Annotated[NvU64, 40]
bootloaderCodeOffset: Annotated[NvU64, 48]
bootloaderDataOffset: Annotated[NvU64, 56]
bootloaderManifestOffset: Annotated[NvU64, 64]
sysmemAddrOfSignature: Annotated[NvU64, 72]
sizeOfSignature: Annotated[NvU64, 80]
gspFwHeapFreeListWprOffset: Annotated[NvU32, 72]
unused0: Annotated[NvU32, 76]
unused1: Annotated[NvU64, 80]
gspFwRsvdStart: Annotated[NvU64, 88]
nonWprHeapOffset: Annotated[NvU64, 96]
nonWprHeapSize: Annotated[NvU64, 104]
gspFwWprStart: Annotated[NvU64, 112]
gspFwHeapOffset: Annotated[NvU64, 120]
gspFwHeapSize: Annotated[NvU64, 128]
gspFwOffset: Annotated[NvU64, 136]
bootBinOffset: Annotated[NvU64, 144]
frtsOffset: Annotated[NvU64, 152]
frtsSize: Annotated[NvU64, 160]
gspFwWprEnd: Annotated[NvU64, 168]
fbSize: Annotated[NvU64, 176]
vgaWorkspaceOffset: Annotated[NvU64, 184]
vgaWorkspaceSize: Annotated[NvU64, 192]
bootCount: Annotated[NvU64, 200]
partitionRpcAddr: Annotated[NvU64, 208]
partitionRpcRequestOffset: Annotated[NvU16, 216]
partitionRpcReplyOffset: Annotated[NvU16, 218]
elfCodeOffset: Annotated[NvU32, 220]
elfDataOffset: Annotated[NvU32, 224]
elfCodeSize: Annotated[NvU32, 228]
elfDataSize: Annotated[NvU32, 232]
lsUcodeVersion: Annotated[NvU32, 236]
partitionRpcPadding: Annotated[c.Array[NvU32, Literal[4]], 208]
sysmemAddrOfCrashReportQueue: Annotated[NvU64, 224]
sizeOfCrashReportQueue: Annotated[NvU32, 232]
lsUcodeVersionPadding: Annotated[c.Array[NvU32, Literal[1]], 236]
gspFwHeapVfPartitionCount: Annotated[NvU8, 240]
flags: Annotated[NvU8, 241]
padding: Annotated[c.Array[NvU8, Literal[2]], 242]
pmuReservedSize: Annotated[NvU32, 244]
verified: Annotated[NvU64, 248]
@c.record
class GspFwHeapFreeRegion(c.Struct):
SIZE = 8
offs: Annotated[NvU32, 0]
length: Annotated[NvU32, 4]
@c.record
class GspFwHeapFreeList(c.Struct):
SIZE = 1040
magic: Annotated[NvU64, 0]
nregions: Annotated[NvU32, 8]
regions: Annotated[c.Array[GspFwHeapFreeRegion, Literal[128]], 12]
@c.record
class GspFwSRMeta(c.Struct):
SIZE = 256
magic: Annotated[NvU64, 0]
revision: Annotated[NvU64, 8]
sysmemAddrOfSuspendResumeData: Annotated[NvU64, 16]
sizeOfSuspendResumeData: Annotated[NvU64, 24]
internal: Annotated[c.Array[NvU32, Literal[32]], 32]
flags: Annotated[NvU32, 160]
subrevision: Annotated[NvU32, 164]
padding: Annotated[c.Array[NvU32, Literal[22]], 168]
@c.record
class RM_RISCV_UCODE_DESC(c.Struct):
SIZE = 84
version: Annotated[NvU32, 0]
bootloaderOffset: Annotated[NvU32, 4]
bootloaderSize: Annotated[NvU32, 8]
bootloaderParamOffset: Annotated[NvU32, 12]
bootloaderParamSize: Annotated[NvU32, 16]
riscvElfOffset: Annotated[NvU32, 20]
riscvElfSize: Annotated[NvU32, 24]
appVersion: Annotated[NvU32, 28]
manifestOffset: Annotated[NvU32, 32]
manifestSize: Annotated[NvU32, 36]
monitorDataOffset: Annotated[NvU32, 40]
monitorDataSize: Annotated[NvU32, 44]
monitorCodeOffset: Annotated[NvU32, 48]
monitorCodeSize: Annotated[NvU32, 52]
bIsMonitorEnabled: Annotated[NvU32, 56]
swbromCodeOffset: Annotated[NvU32, 60]
swbromCodeSize: Annotated[NvU32, 64]
swbromDataOffset: Annotated[NvU32, 68]
swbromDataSize: Annotated[NvU32, 72]
fbReservedSize: Annotated[NvU32, 76]
bSignedAsCode: Annotated[NvU32, 80]
class RPC_GR_BUFFER_TYPE(Annotated[int, ctypes.c_uint32], c.Enum): pass
RPC_GR_BUFFER_TYPE_GRAPHICS = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS', 0)
RPC_GR_BUFFER_TYPE_GRAPHICS_ZCULL = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_ZCULL', 1)
RPC_GR_BUFFER_TYPE_GRAPHICS_GRAPHICS_PM = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_GRAPHICS_PM', 2)
RPC_GR_BUFFER_TYPE_COMPUTE_PREEMPT = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_COMPUTE_PREEMPT', 3)
RPC_GR_BUFFER_TYPE_GRAPHICS_PATCH = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_PATCH', 4)
RPC_GR_BUFFER_TYPE_GRAPHICS_BUNDLE_CB = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_BUNDLE_CB', 5)
RPC_GR_BUFFER_TYPE_GRAPHICS_PAGEPOOL_GLOBAL = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_PAGEPOOL_GLOBAL', 6)
RPC_GR_BUFFER_TYPE_GRAPHICS_ATTRIBUTE_CB = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_ATTRIBUTE_CB', 7)
RPC_GR_BUFFER_TYPE_GRAPHICS_RTV_CB_GLOBAL = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_RTV_CB_GLOBAL', 8)
RPC_GR_BUFFER_TYPE_GRAPHICS_GFXP_POOL = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_GFXP_POOL', 9)
RPC_GR_BUFFER_TYPE_GRAPHICS_GFXP_CTRL_BLK = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_GFXP_CTRL_BLK', 10)
RPC_GR_BUFFER_TYPE_GRAPHICS_FECS_EVENT = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_FECS_EVENT', 11)
RPC_GR_BUFFER_TYPE_GRAPHICS_PRIV_ACCESS_MAP = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_PRIV_ACCESS_MAP', 12)
RPC_GR_BUFFER_TYPE_GRAPHICS_MAX = RPC_GR_BUFFER_TYPE.define('RPC_GR_BUFFER_TYPE_GRAPHICS_MAX', 13)
class FECS_ERROR_EVENT_TYPE(Annotated[int, ctypes.c_uint32], c.Enum): pass
FECS_ERROR_EVENT_TYPE_NONE = FECS_ERROR_EVENT_TYPE.define('FECS_ERROR_EVENT_TYPE_NONE', 0)
FECS_ERROR_EVENT_TYPE_BUFFER_RESET_REQUIRED = FECS_ERROR_EVENT_TYPE.define('FECS_ERROR_EVENT_TYPE_BUFFER_RESET_REQUIRED', 1)
FECS_ERROR_EVENT_TYPE_BUFFER_FULL = FECS_ERROR_EVENT_TYPE.define('FECS_ERROR_EVENT_TYPE_BUFFER_FULL', 2)
FECS_ERROR_EVENT_TYPE_MAX = FECS_ERROR_EVENT_TYPE.define('FECS_ERROR_EVENT_TYPE_MAX', 3)
class NV_RPC_UPDATE_PDE_BAR_TYPE(Annotated[int, ctypes.c_uint32], c.Enum): pass
NV_RPC_UPDATE_PDE_BAR_1 = NV_RPC_UPDATE_PDE_BAR_TYPE.define('NV_RPC_UPDATE_PDE_BAR_1', 0)
NV_RPC_UPDATE_PDE_BAR_2 = NV_RPC_UPDATE_PDE_BAR_TYPE.define('NV_RPC_UPDATE_PDE_BAR_2', 1)
NV_RPC_UPDATE_PDE_BAR_INVALID = NV_RPC_UPDATE_PDE_BAR_TYPE.define('NV_RPC_UPDATE_PDE_BAR_INVALID', 2)
@c.record
class struct_VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS(c.Struct):
SIZE = 12
headIndex: Annotated[NvU32, 0]
maxHResolution: Annotated[NvU32, 4]
maxVResolution: Annotated[NvU32, 8]
VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS: TypeAlias = struct_VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
@c.record
class struct_VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS(c.Struct):
SIZE = 8
numHeads: Annotated[NvU32, 0]
maxNumHeads: Annotated[NvU32, 4]
VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS: TypeAlias = struct_VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
class GPU_RECOVERY_EVENT_TYPE(Annotated[int, ctypes.c_uint32], c.Enum): pass
GPU_RECOVERY_EVENT_TYPE_REFRESH = GPU_RECOVERY_EVENT_TYPE.define('GPU_RECOVERY_EVENT_TYPE_REFRESH', 0)
GPU_RECOVERY_EVENT_TYPE_GPU_DRAIN_P2P = GPU_RECOVERY_EVENT_TYPE.define('GPU_RECOVERY_EVENT_TYPE_GPU_DRAIN_P2P', 1)
GPU_RECOVERY_EVENT_TYPE_SYS_REBOOT = GPU_RECOVERY_EVENT_TYPE.define('GPU_RECOVERY_EVENT_TYPE_SYS_REBOOT', 2)
class rpc_fns(Annotated[int, ctypes.c_uint32], c.Enum): pass
NV_VGPU_MSG_FUNCTION_NOP = rpc_fns.define('NV_VGPU_MSG_FUNCTION_NOP', 0)
NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO', 1)
NV_VGPU_MSG_FUNCTION_ALLOC_ROOT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_ROOT', 2)
NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE', 3)
NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY', 4)
NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA', 5)
NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA', 6)
NV_VGPU_MSG_FUNCTION_MAP_MEMORY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_MAP_MEMORY', 7)
NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA', 8)
NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT', 9)
NV_VGPU_MSG_FUNCTION_FREE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_FREE', 10)
NV_VGPU_MSG_FUNCTION_LOG = rpc_fns.define('NV_VGPU_MSG_FUNCTION_LOG', 11)
NV_VGPU_MSG_FUNCTION_ALLOC_VIDMEM = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_VIDMEM', 12)
NV_VGPU_MSG_FUNCTION_UNMAP_MEMORY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UNMAP_MEMORY', 13)
NV_VGPU_MSG_FUNCTION_MAP_MEMORY_DMA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_MAP_MEMORY_DMA', 14)
NV_VGPU_MSG_FUNCTION_UNMAP_MEMORY_DMA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UNMAP_MEMORY_DMA', 15)
NV_VGPU_MSG_FUNCTION_GET_EDID = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_EDID', 16)
NV_VGPU_MSG_FUNCTION_ALLOC_DISP_CHANNEL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_DISP_CHANNEL', 17)
NV_VGPU_MSG_FUNCTION_ALLOC_DISP_OBJECT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_DISP_OBJECT', 18)
NV_VGPU_MSG_FUNCTION_ALLOC_SUBDEVICE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_SUBDEVICE', 19)
NV_VGPU_MSG_FUNCTION_ALLOC_DYNAMIC_MEMORY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_DYNAMIC_MEMORY', 20)
NV_VGPU_MSG_FUNCTION_DUP_OBJECT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_DUP_OBJECT', 21)
NV_VGPU_MSG_FUNCTION_IDLE_CHANNELS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_IDLE_CHANNELS', 22)
NV_VGPU_MSG_FUNCTION_ALLOC_EVENT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_EVENT', 23)
NV_VGPU_MSG_FUNCTION_SEND_EVENT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SEND_EVENT', 24)
NV_VGPU_MSG_FUNCTION_REMAPPER_CONTROL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_REMAPPER_CONTROL', 25)
NV_VGPU_MSG_FUNCTION_DMA_CONTROL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_DMA_CONTROL', 26)
NV_VGPU_MSG_FUNCTION_DMA_FILL_PTE_MEM = rpc_fns.define('NV_VGPU_MSG_FUNCTION_DMA_FILL_PTE_MEM', 27)
NV_VGPU_MSG_FUNCTION_MANAGE_HW_RESOURCE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_MANAGE_HW_RESOURCE', 28)
NV_VGPU_MSG_FUNCTION_BIND_ARBITRARY_CTX_DMA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_BIND_ARBITRARY_CTX_DMA', 29)
NV_VGPU_MSG_FUNCTION_CREATE_FB_SEGMENT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CREATE_FB_SEGMENT', 30)
NV_VGPU_MSG_FUNCTION_DESTROY_FB_SEGMENT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_DESTROY_FB_SEGMENT', 31)
NV_VGPU_MSG_FUNCTION_ALLOC_SHARE_DEVICE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_SHARE_DEVICE', 32)
NV_VGPU_MSG_FUNCTION_DEFERRED_API_CONTROL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_DEFERRED_API_CONTROL', 33)
NV_VGPU_MSG_FUNCTION_REMOVE_DEFERRED_API = rpc_fns.define('NV_VGPU_MSG_FUNCTION_REMOVE_DEFERRED_API', 34)
NV_VGPU_MSG_FUNCTION_SIM_ESCAPE_READ = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SIM_ESCAPE_READ', 35)
NV_VGPU_MSG_FUNCTION_SIM_ESCAPE_WRITE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SIM_ESCAPE_WRITE', 36)
NV_VGPU_MSG_FUNCTION_SIM_MANAGE_DISPLAY_CONTEXT_DMA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SIM_MANAGE_DISPLAY_CONTEXT_DMA', 37)
NV_VGPU_MSG_FUNCTION_FREE_VIDMEM_VIRT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_FREE_VIDMEM_VIRT', 38)
NV_VGPU_MSG_FUNCTION_PERF_GET_PSTATE_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_PERF_GET_PSTATE_INFO', 39)
NV_VGPU_MSG_FUNCTION_PERF_GET_PERFMON_SAMPLE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_PERF_GET_PERFMON_SAMPLE', 40)
NV_VGPU_MSG_FUNCTION_PERF_GET_VIRTUAL_PSTATE_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_PERF_GET_VIRTUAL_PSTATE_INFO', 41)
NV_VGPU_MSG_FUNCTION_PERF_GET_LEVEL_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_PERF_GET_LEVEL_INFO', 42)
NV_VGPU_MSG_FUNCTION_MAP_SEMA_MEMORY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_MAP_SEMA_MEMORY', 43)
NV_VGPU_MSG_FUNCTION_UNMAP_SEMA_MEMORY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UNMAP_SEMA_MEMORY', 44)
NV_VGPU_MSG_FUNCTION_SET_SURFACE_PROPERTIES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SET_SURFACE_PROPERTIES', 45)
NV_VGPU_MSG_FUNCTION_CLEANUP_SURFACE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CLEANUP_SURFACE', 46)
NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER', 47)
NV_VGPU_MSG_FUNCTION_TDR_SET_TIMEOUT_STATE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_TDR_SET_TIMEOUT_STATE', 48)
NV_VGPU_MSG_FUNCTION_SWITCH_TO_VGA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SWITCH_TO_VGA', 49)
NV_VGPU_MSG_FUNCTION_GPU_EXEC_REG_OPS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GPU_EXEC_REG_OPS', 50)
NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO', 51)
NV_VGPU_MSG_FUNCTION_ALLOC_VIRTMEM = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ALLOC_VIRTMEM', 52)
NV_VGPU_MSG_FUNCTION_UPDATE_PDE_2 = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UPDATE_PDE_2', 53)
NV_VGPU_MSG_FUNCTION_SET_PAGE_DIRECTORY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SET_PAGE_DIRECTORY', 54)
NV_VGPU_MSG_FUNCTION_GET_STATIC_PSTATE_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_STATIC_PSTATE_INFO', 55)
NV_VGPU_MSG_FUNCTION_TRANSLATE_GUEST_GPU_PTES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_TRANSLATE_GUEST_GPU_PTES', 56)
NV_VGPU_MSG_FUNCTION_RESERVED_57 = rpc_fns.define('NV_VGPU_MSG_FUNCTION_RESERVED_57', 57)
NV_VGPU_MSG_FUNCTION_RESET_CURRENT_GR_CONTEXT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_RESET_CURRENT_GR_CONTEXT', 58)
NV_VGPU_MSG_FUNCTION_SET_SEMA_MEM_VALIDATION_STATE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SET_SEMA_MEM_VALIDATION_STATE', 59)
NV_VGPU_MSG_FUNCTION_GET_ENGINE_UTILIZATION = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_ENGINE_UTILIZATION', 60)
NV_VGPU_MSG_FUNCTION_UPDATE_GPU_PDES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UPDATE_GPU_PDES', 61)
NV_VGPU_MSG_FUNCTION_GET_ENCODER_CAPACITY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_ENCODER_CAPACITY', 62)
NV_VGPU_MSG_FUNCTION_VGPU_PF_REG_READ32 = rpc_fns.define('NV_VGPU_MSG_FUNCTION_VGPU_PF_REG_READ32', 63)
NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO_EXT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO_EXT', 64)
NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO', 65)
NV_VGPU_MSG_FUNCTION_RMFS_INIT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_RMFS_INIT', 66)
NV_VGPU_MSG_FUNCTION_RMFS_CLOSE_QUEUE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_RMFS_CLOSE_QUEUE', 67)
NV_VGPU_MSG_FUNCTION_RMFS_CLEANUP = rpc_fns.define('NV_VGPU_MSG_FUNCTION_RMFS_CLEANUP', 68)
NV_VGPU_MSG_FUNCTION_RMFS_TEST = rpc_fns.define('NV_VGPU_MSG_FUNCTION_RMFS_TEST', 69)
NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE', 70)
NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD', 71)
NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO', 72)
NV_VGPU_MSG_FUNCTION_SET_REGISTRY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SET_REGISTRY', 73)
NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU', 74)
NV_VGPU_MSG_FUNCTION_SUBDEV_EVENT_SET_NOTIFICATION = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SUBDEV_EVENT_SET_NOTIFICATION', 75)
NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL', 76)
NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO2 = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO2', 77)
NV_VGPU_MSG_FUNCTION_DUMP_PROTOBUF_COMPONENT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_DUMP_PROTOBUF_COMPONENT', 78)
NV_VGPU_MSG_FUNCTION_UNSET_PAGE_DIRECTORY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UNSET_PAGE_DIRECTORY', 79)
NV_VGPU_MSG_FUNCTION_GET_CONSOLIDATED_STATIC_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_CONSOLIDATED_STATIC_INFO', 80)
NV_VGPU_MSG_FUNCTION_GMMU_REGISTER_FAULT_BUFFER = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GMMU_REGISTER_FAULT_BUFFER', 81)
NV_VGPU_MSG_FUNCTION_GMMU_UNREGISTER_FAULT_BUFFER = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GMMU_UNREGISTER_FAULT_BUFFER', 82)
NV_VGPU_MSG_FUNCTION_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER', 83)
NV_VGPU_MSG_FUNCTION_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER', 84)
NV_VGPU_MSG_FUNCTION_CTRL_SET_VGPU_FB_USAGE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SET_VGPU_FB_USAGE', 85)
NV_VGPU_MSG_FUNCTION_CTRL_NVFBC_SW_SESSION_UPDATE_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_NVFBC_SW_SESSION_UPDATE_INFO', 86)
NV_VGPU_MSG_FUNCTION_CTRL_NVENC_SW_SESSION_UPDATE_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_NVENC_SW_SESSION_UPDATE_INFO', 87)
NV_VGPU_MSG_FUNCTION_CTRL_RESET_CHANNEL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_RESET_CHANNEL', 88)
NV_VGPU_MSG_FUNCTION_CTRL_RESET_ISOLATED_CHANNEL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_RESET_ISOLATED_CHANNEL', 89)
NV_VGPU_MSG_FUNCTION_CTRL_GPU_HANDLE_VF_PRI_FAULT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GPU_HANDLE_VF_PRI_FAULT', 90)
NV_VGPU_MSG_FUNCTION_CTRL_CLK_GET_EXTENDED_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_CLK_GET_EXTENDED_INFO', 91)
NV_VGPU_MSG_FUNCTION_CTRL_PERF_BOOST = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_PERF_BOOST', 92)
NV_VGPU_MSG_FUNCTION_CTRL_PERF_VPSTATES_GET_CONTROL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_PERF_VPSTATES_GET_CONTROL', 93)
NV_VGPU_MSG_FUNCTION_CTRL_GET_ZBC_CLEAR_TABLE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_ZBC_CLEAR_TABLE', 94)
NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_COLOR_CLEAR = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_COLOR_CLEAR', 95)
NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_DEPTH_CLEAR = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_DEPTH_CLEAR', 96)
NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_SCHEDULE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_SCHEDULE', 97)
NV_VGPU_MSG_FUNCTION_CTRL_SET_TIMESLICE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SET_TIMESLICE', 98)
NV_VGPU_MSG_FUNCTION_CTRL_PREEMPT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_PREEMPT', 99)
NV_VGPU_MSG_FUNCTION_CTRL_FIFO_DISABLE_CHANNELS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FIFO_DISABLE_CHANNELS', 100)
NV_VGPU_MSG_FUNCTION_CTRL_SET_TSG_INTERLEAVE_LEVEL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SET_TSG_INTERLEAVE_LEVEL', 101)
NV_VGPU_MSG_FUNCTION_CTRL_SET_CHANNEL_INTERLEAVE_LEVEL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SET_CHANNEL_INTERLEAVE_LEVEL', 102)
NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC', 103)
NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS_V2 = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS_V2', 104)
NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_AES_ENCRYPT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_AES_ENCRYPT', 105)
NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_SESSION_KEY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_SESSION_KEY', 106)
NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_SESSION_KEY_STATUS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_SESSION_KEY_STATUS', 107)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES', 108)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_READ_ALL_SM_ERROR_STATES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_READ_ALL_SM_ERROR_STATES', 109)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_EXCEPTION_MASK = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_EXCEPTION_MASK', 110)
NV_VGPU_MSG_FUNCTION_CTRL_GPU_PROMOTE_CTX = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GPU_PROMOTE_CTX', 111)
NV_VGPU_MSG_FUNCTION_CTRL_GR_CTXSW_PREEMPTION_BIND = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GR_CTXSW_PREEMPTION_BIND', 112)
NV_VGPU_MSG_FUNCTION_CTRL_GR_SET_CTXSW_PREEMPTION_MODE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GR_SET_CTXSW_PREEMPTION_MODE', 113)
NV_VGPU_MSG_FUNCTION_CTRL_GR_CTXSW_ZCULL_BIND = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GR_CTXSW_ZCULL_BIND', 114)
NV_VGPU_MSG_FUNCTION_CTRL_GPU_INITIALIZE_CTX = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GPU_INITIALIZE_CTX', 115)
NV_VGPU_MSG_FUNCTION_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES', 116)
NV_VGPU_MSG_FUNCTION_CTRL_FIFO_CLEAR_FAULTED_BIT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FIFO_CLEAR_FAULTED_BIT', 117)
NV_VGPU_MSG_FUNCTION_CTRL_GET_LATEST_ECC_ADDRESSES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_LATEST_ECC_ADDRESSES', 118)
NV_VGPU_MSG_FUNCTION_CTRL_MC_SERVICE_INTERRUPTS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_MC_SERVICE_INTERRUPTS', 119)
NV_VGPU_MSG_FUNCTION_CTRL_DMA_SET_DEFAULT_VASPACE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DMA_SET_DEFAULT_VASPACE', 120)
NV_VGPU_MSG_FUNCTION_CTRL_GET_CE_PCE_MASK = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_CE_PCE_MASK', 121)
NV_VGPU_MSG_FUNCTION_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY', 122)
NV_VGPU_MSG_FUNCTION_CTRL_GET_NVLINK_PEER_ID_MASK = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_NVLINK_PEER_ID_MASK', 123)
NV_VGPU_MSG_FUNCTION_CTRL_GET_NVLINK_STATUS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_NVLINK_STATUS', 124)
NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS', 125)
NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS_MATRIX = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS_MATRIX', 126)
NV_VGPU_MSG_FUNCTION_RESERVED_0 = rpc_fns.define('NV_VGPU_MSG_FUNCTION_RESERVED_0', 127)
NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_PM_AREA_SMPC = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_PM_AREA_SMPC', 128)
NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_HWPM_LEGACY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_HWPM_LEGACY', 129)
NV_VGPU_MSG_FUNCTION_CTRL_B0CC_EXEC_REG_OPS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_B0CC_EXEC_REG_OPS', 130)
NV_VGPU_MSG_FUNCTION_CTRL_BIND_PM_RESOURCES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_BIND_PM_RESOURCES', 131)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_SUSPEND_CONTEXT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_SUSPEND_CONTEXT', 132)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_RESUME_CONTEXT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_RESUME_CONTEXT', 133)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_EXEC_REG_OPS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_EXEC_REG_OPS', 134)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_MMU_DEBUG = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_MMU_DEBUG', 135)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_READ_SINGLE_SM_ERROR_STATE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_READ_SINGLE_SM_ERROR_STATE', 136)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE', 137)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_ERRBAR_DEBUG = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_ERRBAR_DEBUG', 138)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE', 139)
NV_VGPU_MSG_FUNCTION_CTRL_ALLOC_PMA_STREAM = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_ALLOC_PMA_STREAM', 140)
NV_VGPU_MSG_FUNCTION_CTRL_PMA_STREAM_UPDATE_GET_PUT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_PMA_STREAM_UPDATE_GET_PUT', 141)
NV_VGPU_MSG_FUNCTION_CTRL_FB_GET_INFO_V2 = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FB_GET_INFO_V2', 142)
NV_VGPU_MSG_FUNCTION_CTRL_FIFO_SET_CHANNEL_PROPERTIES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FIFO_SET_CHANNEL_PROPERTIES', 143)
NV_VGPU_MSG_FUNCTION_CTRL_GR_GET_CTX_BUFFER_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GR_GET_CTX_BUFFER_INFO', 144)
NV_VGPU_MSG_FUNCTION_CTRL_KGR_GET_CTX_BUFFER_PTES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_KGR_GET_CTX_BUFFER_PTES', 145)
NV_VGPU_MSG_FUNCTION_CTRL_GPU_EVICT_CTX = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GPU_EVICT_CTX', 146)
NV_VGPU_MSG_FUNCTION_CTRL_FB_GET_FS_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FB_GET_FS_INFO', 147)
NV_VGPU_MSG_FUNCTION_CTRL_GRMGR_GET_GR_FS_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GRMGR_GET_GR_FS_INFO', 148)
NV_VGPU_MSG_FUNCTION_CTRL_STOP_CHANNEL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_STOP_CHANNEL', 149)
NV_VGPU_MSG_FUNCTION_CTRL_GR_PC_SAMPLING_MODE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GR_PC_SAMPLING_MODE', 150)
NV_VGPU_MSG_FUNCTION_CTRL_PERF_RATED_TDP_GET_STATUS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_PERF_RATED_TDP_GET_STATUS', 151)
NV_VGPU_MSG_FUNCTION_CTRL_PERF_RATED_TDP_SET_CONTROL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_PERF_RATED_TDP_SET_CONTROL', 152)
NV_VGPU_MSG_FUNCTION_CTRL_FREE_PMA_STREAM = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FREE_PMA_STREAM', 153)
NV_VGPU_MSG_FUNCTION_CTRL_TIMER_SET_GR_TICK_FREQ = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_TIMER_SET_GR_TICK_FREQ', 154)
NV_VGPU_MSG_FUNCTION_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB', 155)
NV_VGPU_MSG_FUNCTION_GET_CONSOLIDATED_GR_STATIC_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_CONSOLIDATED_GR_STATIC_INFO', 156)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP', 157)
NV_VGPU_MSG_FUNCTION_CTRL_GR_GET_TPC_PARTITION_MODE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GR_GET_TPC_PARTITION_MODE', 158)
NV_VGPU_MSG_FUNCTION_CTRL_GR_SET_TPC_PARTITION_MODE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GR_SET_TPC_PARTITION_MODE', 159)
NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_ALLOCATE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_ALLOCATE', 160)
NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_DESTROY = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_DESTROY', 161)
NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_MAP = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_MAP', 162)
NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_UNMAP = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_UNMAP', 163)
NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_PUSH_STREAM = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_PUSH_STREAM', 164)
NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_SET_HANDLES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_SET_HANDLES', 165)
NV_VGPU_MSG_FUNCTION_UVM_METHOD_STREAM_GUEST_PAGES_OPERATION = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UVM_METHOD_STREAM_GUEST_PAGES_OPERATION', 166)
NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL', 167)
NV_VGPU_MSG_FUNCTION_DCE_RM_INIT = rpc_fns.define('NV_VGPU_MSG_FUNCTION_DCE_RM_INIT', 168)
NV_VGPU_MSG_FUNCTION_REGISTER_VIRTUAL_EVENT_BUFFER = rpc_fns.define('NV_VGPU_MSG_FUNCTION_REGISTER_VIRTUAL_EVENT_BUFFER', 169)
NV_VGPU_MSG_FUNCTION_CTRL_EVENT_BUFFER_UPDATE_GET = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_EVENT_BUFFER_UPDATE_GET', 170)
NV_VGPU_MSG_FUNCTION_GET_PLCABLE_ADDRESS_KIND = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_PLCABLE_ADDRESS_KIND', 171)
NV_VGPU_MSG_FUNCTION_CTRL_PERF_LIMITS_SET_STATUS_V2 = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_PERF_LIMITS_SET_STATUS_V2', 172)
NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM', 173)
NV_VGPU_MSG_FUNCTION_CTRL_GET_MMU_DEBUG_MODE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_MMU_DEBUG_MODE', 174)
NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS', 175)
NV_VGPU_MSG_FUNCTION_CTRL_FLCN_GET_CTX_BUFFER_SIZE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FLCN_GET_CTX_BUFFER_SIZE', 176)
NV_VGPU_MSG_FUNCTION_CTRL_FLCN_GET_CTX_BUFFER_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FLCN_GET_CTX_BUFFER_INFO', 177)
NV_VGPU_MSG_FUNCTION_DISABLE_CHANNELS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_DISABLE_CHANNELS', 178)
NV_VGPU_MSG_FUNCTION_CTRL_FABRIC_MEMORY_DESCRIBE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FABRIC_MEMORY_DESCRIBE', 179)
NV_VGPU_MSG_FUNCTION_CTRL_FABRIC_MEM_STATS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FABRIC_MEM_STATS', 180)
NV_VGPU_MSG_FUNCTION_SAVE_HIBERNATION_DATA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SAVE_HIBERNATION_DATA', 181)
NV_VGPU_MSG_FUNCTION_RESTORE_HIBERNATION_DATA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_RESTORE_HIBERNATION_DATA', 182)
NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED', 183)
NV_VGPU_MSG_FUNCTION_CTRL_EXEC_PARTITIONS_CREATE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_EXEC_PARTITIONS_CREATE', 184)
NV_VGPU_MSG_FUNCTION_CTRL_EXEC_PARTITIONS_DELETE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_EXEC_PARTITIONS_DELETE', 185)
NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN', 186)
NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX', 187)
NV_VGPU_MSG_FUNCTION_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION = rpc_fns.define('NV_VGPU_MSG_FUNCTION_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION', 188)
NV_VGPU_MSG_FUNCTION_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK', 189)
NV_VGPU_MSG_FUNCTION_SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER = rpc_fns.define('NV_VGPU_MSG_FUNCTION_SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER', 190)
NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_P2P_CAPS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_P2P_CAPS', 191)
NV_VGPU_MSG_FUNCTION_CTRL_BUS_SET_P2P_MAPPING = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_BUS_SET_P2P_MAPPING', 192)
NV_VGPU_MSG_FUNCTION_CTRL_BUS_UNSET_P2P_MAPPING = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_BUS_UNSET_P2P_MAPPING', 193)
NV_VGPU_MSG_FUNCTION_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK', 194)
NV_VGPU_MSG_FUNCTION_CTRL_GPU_MIGRATABLE_OPS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GPU_MIGRATABLE_OPS', 195)
NV_VGPU_MSG_FUNCTION_CTRL_GET_TOTAL_HS_CREDITS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_TOTAL_HS_CREDITS', 196)
NV_VGPU_MSG_FUNCTION_CTRL_GET_HS_CREDITS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GET_HS_CREDITS', 197)
NV_VGPU_MSG_FUNCTION_CTRL_SET_HS_CREDITS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SET_HS_CREDITS', 198)
NV_VGPU_MSG_FUNCTION_CTRL_PM_AREA_PC_SAMPLER = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_PM_AREA_PC_SAMPLER', 199)
NV_VGPU_MSG_FUNCTION_INVALIDATE_TLB = rpc_fns.define('NV_VGPU_MSG_FUNCTION_INVALIDATE_TLB', 200)
NV_VGPU_MSG_FUNCTION_CTRL_GPU_QUERY_ECC_STATUS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GPU_QUERY_ECC_STATUS', 201)
NV_VGPU_MSG_FUNCTION_ECC_NOTIFIER_WRITE_ACK = rpc_fns.define('NV_VGPU_MSG_FUNCTION_ECC_NOTIFIER_WRITE_ACK', 202)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_GET_MODE_MMU_DEBUG = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_GET_MODE_MMU_DEBUG', 203)
NV_VGPU_MSG_FUNCTION_RM_API_CONTROL = rpc_fns.define('NV_VGPU_MSG_FUNCTION_RM_API_CONTROL', 204)
NV_VGPU_MSG_FUNCTION_CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE', 205)
NV_VGPU_MSG_FUNCTION_CTRL_NVLINK_GET_INBAND_RECEIVED_DATA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_NVLINK_GET_INBAND_RECEIVED_DATA', 206)
NV_VGPU_MSG_FUNCTION_GET_STATIC_DATA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_STATIC_DATA', 207)
NV_VGPU_MSG_FUNCTION_RESERVED_208 = rpc_fns.define('NV_VGPU_MSG_FUNCTION_RESERVED_208', 208)
NV_VGPU_MSG_FUNCTION_CTRL_GPU_GET_INFO_V2 = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_GPU_GET_INFO_V2', 209)
NV_VGPU_MSG_FUNCTION_GET_BRAND_CAPS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_GET_BRAND_CAPS', 210)
NV_VGPU_MSG_FUNCTION_CTRL_CMD_NVLINK_INBAND_SEND_DATA = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_CMD_NVLINK_INBAND_SEND_DATA', 211)
NV_VGPU_MSG_FUNCTION_UPDATE_GPM_GUEST_BUFFER_INFO = rpc_fns.define('NV_VGPU_MSG_FUNCTION_UPDATE_GPM_GUEST_BUFFER_INFO', 212)
NV_VGPU_MSG_FUNCTION_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE', 213)
NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_STENCIL_CLEAR = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_STENCIL_CLEAR', 214)
NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS', 215)
NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS', 216)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_MMU_GCC_DEBUG = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_MMU_GCC_DEBUG', 217)
NV_VGPU_MSG_FUNCTION_CTRL_DBG_GET_MODE_MMU_GCC_DEBUG = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_DBG_GET_MODE_MMU_GCC_DEBUG', 218)
NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_HES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_HES', 219)
NV_VGPU_MSG_FUNCTION_CTRL_RELEASE_HES = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_RELEASE_HES', 220)
NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_CCU_PROF = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_CCU_PROF', 221)
NV_VGPU_MSG_FUNCTION_CTRL_RELEASE_CCU_PROF = rpc_fns.define('NV_VGPU_MSG_FUNCTION_CTRL_RELEASE_CCU_PROF', 222)
NV_VGPU_MSG_FUNCTION_NUM_FUNCTIONS = rpc_fns.define('NV_VGPU_MSG_FUNCTION_NUM_FUNCTIONS', 223)
class rpc_events(Annotated[int, ctypes.c_uint32], c.Enum): pass
NV_VGPU_MSG_EVENT_FIRST_EVENT = rpc_events.define('NV_VGPU_MSG_EVENT_FIRST_EVENT', 4096)
NV_VGPU_MSG_EVENT_GSP_INIT_DONE = rpc_events.define('NV_VGPU_MSG_EVENT_GSP_INIT_DONE', 4097)
NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER = rpc_events.define('NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER', 4098)
NV_VGPU_MSG_EVENT_POST_EVENT = rpc_events.define('NV_VGPU_MSG_EVENT_POST_EVENT', 4099)
NV_VGPU_MSG_EVENT_RC_TRIGGERED = rpc_events.define('NV_VGPU_MSG_EVENT_RC_TRIGGERED', 4100)
NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED = rpc_events.define('NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED', 4101)
NV_VGPU_MSG_EVENT_OS_ERROR_LOG = rpc_events.define('NV_VGPU_MSG_EVENT_OS_ERROR_LOG', 4102)
NV_VGPU_MSG_EVENT_RG_LINE_INTR = rpc_events.define('NV_VGPU_MSG_EVENT_RG_LINE_INTR', 4103)
NV_VGPU_MSG_EVENT_GPUACCT_PERFMON_UTIL_SAMPLES = rpc_events.define('NV_VGPU_MSG_EVENT_GPUACCT_PERFMON_UTIL_SAMPLES', 4104)
NV_VGPU_MSG_EVENT_SIM_READ = rpc_events.define('NV_VGPU_MSG_EVENT_SIM_READ', 4105)
NV_VGPU_MSG_EVENT_SIM_WRITE = rpc_events.define('NV_VGPU_MSG_EVENT_SIM_WRITE', 4106)
NV_VGPU_MSG_EVENT_SEMAPHORE_SCHEDULE_CALLBACK = rpc_events.define('NV_VGPU_MSG_EVENT_SEMAPHORE_SCHEDULE_CALLBACK', 4107)
NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT = rpc_events.define('NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT', 4108)
NV_VGPU_MSG_EVENT_VGPU_GSP_PLUGIN_TRIGGERED = rpc_events.define('NV_VGPU_MSG_EVENT_VGPU_GSP_PLUGIN_TRIGGERED', 4109)
NV_VGPU_MSG_EVENT_PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK = rpc_events.define('NV_VGPU_MSG_EVENT_PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK', 4110)
NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE = rpc_events.define('NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE', 4111)
NV_VGPU_MSG_EVENT_VGPU_CONFIG = rpc_events.define('NV_VGPU_MSG_EVENT_VGPU_CONFIG', 4112)
NV_VGPU_MSG_EVENT_DISPLAY_MODESET = rpc_events.define('NV_VGPU_MSG_EVENT_DISPLAY_MODESET', 4113)
NV_VGPU_MSG_EVENT_EXTDEV_INTR_SERVICE = rpc_events.define('NV_VGPU_MSG_EVENT_EXTDEV_INTR_SERVICE', 4114)
NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_256 = rpc_events.define('NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_256', 4115)
NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_512 = rpc_events.define('NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_512', 4116)
NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_1024 = rpc_events.define('NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_1024', 4117)
NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_2048 = rpc_events.define('NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_2048', 4118)
NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_4096 = rpc_events.define('NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_4096', 4119)
NV_VGPU_MSG_EVENT_TIMED_SEMAPHORE_RELEASE = rpc_events.define('NV_VGPU_MSG_EVENT_TIMED_SEMAPHORE_RELEASE', 4120)
NV_VGPU_MSG_EVENT_NVLINK_IS_GPU_DEGRADED = rpc_events.define('NV_VGPU_MSG_EVENT_NVLINK_IS_GPU_DEGRADED', 4121)
NV_VGPU_MSG_EVENT_PFM_REQ_HNDLR_STATE_SYNC_CALLBACK = rpc_events.define('NV_VGPU_MSG_EVENT_PFM_REQ_HNDLR_STATE_SYNC_CALLBACK', 4122)
NV_VGPU_MSG_EVENT_NVLINK_FAULT_UP = rpc_events.define('NV_VGPU_MSG_EVENT_NVLINK_FAULT_UP', 4123)
NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE = rpc_events.define('NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE', 4124)
NV_VGPU_MSG_EVENT_MIG_CI_CONFIG_UPDATE = rpc_events.define('NV_VGPU_MSG_EVENT_MIG_CI_CONFIG_UPDATE', 4125)
NV_VGPU_MSG_EVENT_UPDATE_GSP_TRACE = rpc_events.define('NV_VGPU_MSG_EVENT_UPDATE_GSP_TRACE', 4126)
NV_VGPU_MSG_EVENT_NVLINK_FATAL_ERROR_RECOVERY = rpc_events.define('NV_VGPU_MSG_EVENT_NVLINK_FATAL_ERROR_RECOVERY', 4127)
NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD = rpc_events.define('NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD', 4128)
NV_VGPU_MSG_EVENT_FECS_ERROR = rpc_events.define('NV_VGPU_MSG_EVENT_FECS_ERROR', 4129)
NV_VGPU_MSG_EVENT_RECOVERY_ACTION = rpc_events.define('NV_VGPU_MSG_EVENT_RECOVERY_ACTION', 4130)
NV_VGPU_MSG_EVENT_NUM_EVENTS = rpc_events.define('NV_VGPU_MSG_EVENT_NUM_EVENTS', 4131)
LibosAddress: TypeAlias = Annotated[int, ctypes.c_uint64]
class LibosMemoryRegionKind(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBOS_MEMORY_REGION_NONE = LibosMemoryRegionKind.define('LIBOS_MEMORY_REGION_NONE', 0)
LIBOS_MEMORY_REGION_CONTIGUOUS = LibosMemoryRegionKind.define('LIBOS_MEMORY_REGION_CONTIGUOUS', 1)
LIBOS_MEMORY_REGION_RADIX3 = LibosMemoryRegionKind.define('LIBOS_MEMORY_REGION_RADIX3', 2)
class LibosMemoryRegionLoc(Annotated[int, ctypes.c_uint32], c.Enum): pass
LIBOS_MEMORY_REGION_LOC_NONE = LibosMemoryRegionLoc.define('LIBOS_MEMORY_REGION_LOC_NONE', 0)
LIBOS_MEMORY_REGION_LOC_SYSMEM = LibosMemoryRegionLoc.define('LIBOS_MEMORY_REGION_LOC_SYSMEM', 1)
LIBOS_MEMORY_REGION_LOC_FB = LibosMemoryRegionLoc.define('LIBOS_MEMORY_REGION_LOC_FB', 2)
@c.record
class LibosMemoryRegionInitArgument(c.Struct):
SIZE = 32
id8: Annotated[LibosAddress, 0]
pa: Annotated[LibosAddress, 8]
size: Annotated[LibosAddress, 16]
kind: Annotated[NvU8, 24]
loc: Annotated[NvU8, 25]
@c.record
class msgqTxHeader(c.Struct):
SIZE = 32
version: Annotated[NvU32, 0]
size: Annotated[NvU32, 4]
msgSize: Annotated[NvU32, 8]
msgCount: Annotated[NvU32, 12]
writePtr: Annotated[NvU32, 16]
flags: Annotated[NvU32, 20]
rxHdrOff: Annotated[NvU32, 24]
entryOff: Annotated[NvU32, 28]
@c.record
class msgqRxHeader(c.Struct):
SIZE = 4
readPtr: Annotated[NvU32, 0]
@c.record
class msgqMetadata(c.Struct):
SIZE = 232
pOurTxHdr: Annotated[c.POINTER[msgqTxHeader], 0]
pTheirTxHdr: Annotated[c.POINTER[msgqTxHeader], 8]
pOurRxHdr: Annotated[c.POINTER[msgqRxHeader], 16]
pTheirRxHdr: Annotated[c.POINTER[msgqRxHeader], 24]
pOurEntries: Annotated[c.POINTER[NvU8], 32]
pTheirEntries: Annotated[c.POINTER[NvU8], 40]
pReadIncoming: Annotated[c.POINTER[NvU32], 48]
pWriteIncoming: Annotated[c.POINTER[NvU32], 56]
pReadOutgoing: Annotated[c.POINTER[NvU32], 64]
pWriteOutgoing: Annotated[c.POINTER[NvU32], 72]
tx: Annotated[msgqTxHeader, 80]
txReadPtr: Annotated[NvU32, 112]
txFree: Annotated[NvU32, 116]
txLinked: Annotated[NvBool, 120]
rx: Annotated[msgqTxHeader, 124]
rxReadPtr: Annotated[NvU32, 156]
rxAvail: Annotated[NvU32, 160]
rxLinked: Annotated[NvBool, 164]
rxSwapped: Annotated[NvBool, 165]
fcnNotify: Annotated[msgqFcnNotifyRemote, 168]
fcnNotifyArg: Annotated[ctypes.c_void_p, 176]
fcnBackendRw: Annotated[msgqFcnBackendRw, 184]
fcnBackendRwArg: Annotated[ctypes.c_void_p, 192]
fcnInvalidate: Annotated[msgqFcnCacheOp, 200]
fcnFlush: Annotated[msgqFcnCacheOp, 208]
fcnZero: Annotated[msgqFcnCacheOp, 216]
fcnBarrier: Annotated[msgqFcnBarrier, 224]
msgqFcnNotifyRemote: TypeAlias = c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [Annotated[int, ctypes.c_int32], ctypes.c_void_p]]
msgqFcnBackendRw: TypeAlias = c.CFUNCTYPE[Annotated[int, ctypes.c_int32], [ctypes.c_void_p, ctypes.c_void_p, Annotated[int, ctypes.c_uint32], Annotated[int, ctypes.c_uint32], ctypes.c_void_p]]
msgqFcnCacheOp: TypeAlias = c.CFUNCTYPE[None, [ctypes.c_void_p, Annotated[int, ctypes.c_uint32]]]
msgqFcnBarrier: TypeAlias = c.CFUNCTYPE[None, []]
@c.record
class struct_rpc_set_guest_system_info_v03_00(c.Struct):
SIZE = 792
vgxVersionMajorNum: Annotated[NvU32, 0]
vgxVersionMinorNum: Annotated[NvU32, 4]
guestDriverVersionBufferLength: Annotated[NvU32, 8]
guestVersionBufferLength: Annotated[NvU32, 12]
guestTitleBufferLength: Annotated[NvU32, 16]
guestClNum: Annotated[NvU32, 20]
guestDriverVersion: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 24]
guestVersion: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 280]
guestTitle: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 536]
rpc_set_guest_system_info_v03_00: TypeAlias = struct_rpc_set_guest_system_info_v03_00
rpc_set_guest_system_info_v: TypeAlias = struct_rpc_set_guest_system_info_v03_00
@c.record
class struct_rpc_set_guest_system_info_ext_v15_02(c.Struct):
SIZE = 264
guestDriverBranch: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 0]
domain: Annotated[NvU32, 256]
bus: Annotated[NvU16, 260]
device: Annotated[NvU16, 262]
rpc_set_guest_system_info_ext_v15_02: TypeAlias = struct_rpc_set_guest_system_info_ext_v15_02
@c.record
class struct_rpc_set_guest_system_info_ext_v25_1B(c.Struct):
SIZE = 268
guestDriverBranch: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 0]
domain: Annotated[NvU32, 256]
bus: Annotated[NvU16, 260]
device: Annotated[NvU16, 262]
gridBuildCsp: Annotated[NvU32, 264]
rpc_set_guest_system_info_ext_v25_1B: TypeAlias = struct_rpc_set_guest_system_info_ext_v25_1B
rpc_set_guest_system_info_ext_v: TypeAlias = struct_rpc_set_guest_system_info_ext_v25_1B
@c.record
class struct_rpc_alloc_root_v07_00(c.Struct):
SIZE = 108
hClient: Annotated[NvHandle, 0]
processID: Annotated[NvU32, 4]
processName: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[100]], 8]
NvHandle: TypeAlias = Annotated[int, ctypes.c_uint32]
rpc_alloc_root_v07_00: TypeAlias = struct_rpc_alloc_root_v07_00
rpc_alloc_root_v: TypeAlias = struct_rpc_alloc_root_v07_00
@c.record
class struct_rpc_alloc_memory_v13_01(c.Struct):
SIZE = 56
hClient: Annotated[NvHandle, 0]
hDevice: Annotated[NvHandle, 4]
hMemory: Annotated[NvHandle, 8]
hClass: Annotated[NvU32, 12]
flags: Annotated[NvU32, 16]
pteAdjust: Annotated[NvU32, 20]
format: Annotated[NvU32, 24]
length: Annotated[NvU64, 32]
pageCount: Annotated[NvU32, 40]
pteDesc: Annotated[struct_pte_desc, 48]
@c.record
class struct_pte_desc(c.Struct):
SIZE = 8
idr: Annotated[NvU32, 0, 2, 0]
reserved1: Annotated[NvU32, 0, 14, 2]
length: Annotated[NvU32, 2, 16, 0]
pte_pde: Annotated[c.Array[struct_pte_desc_pte_pde, Literal[0]], 8]
@c.record
class struct_pte_desc_pte_pde(c.Struct):
SIZE = 8
pte: Annotated[NvU64, 0]
pde: Annotated[NvU64, 0]
rpc_alloc_memory_v13_01: TypeAlias = struct_rpc_alloc_memory_v13_01
rpc_alloc_memory_v: TypeAlias = struct_rpc_alloc_memory_v13_01
@c.record
class struct_rpc_alloc_channel_dma_v1F_04(c.Struct):
SIZE = 248
hClient: Annotated[NvHandle, 0]
hDevice: Annotated[NvHandle, 4]
hChannel: Annotated[NvHandle, 8]
hClass: Annotated[NvU32, 12]
flags: Annotated[NvU32, 16]
params: Annotated[NV_CHANNEL_ALLOC_PARAMS_v1F_04, 24]
chid: Annotated[NvU32, 240]
@c.record
class struct_NV_CHANNEL_ALLOC_PARAMS_v1F_04(c.Struct):
SIZE = 216
hObjectError: Annotated[NvHandle, 0]
hObjectBuffer: Annotated[NvHandle, 4]
gpFifoOffset: Annotated[NvU64, 8]
gpFifoEntries: Annotated[NvU32, 16]
flags: Annotated[NvU32, 20]
hContextShare: Annotated[NvHandle, 24]
hVASpace: Annotated[NvHandle, 28]
hUserdMemory: Annotated[c.Array[NvHandle, Literal[1]], 32]
userdOffset: Annotated[c.Array[NvU64, Literal[1]], 40]
engineType: Annotated[NvU32, 48]
hObjectEccError: Annotated[NvHandle, 52]
instanceMem: Annotated[NV_MEMORY_DESC_PARAMS_v18_01, 56]
ramfcMem: Annotated[NV_MEMORY_DESC_PARAMS_v18_01, 80]
userdMem: Annotated[NV_MEMORY_DESC_PARAMS_v18_01, 104]
mthdbufMem: Annotated[NV_MEMORY_DESC_PARAMS_v18_01, 128]
hPhysChannelGroup: Annotated[NvHandle, 152]
subDeviceId: Annotated[NvHandle, 156]
internalFlags: Annotated[NvU32, 160]
errorNotifierMem: Annotated[NV_MEMORY_DESC_PARAMS_v18_01, 168]
eccErrorNotifierMem: Annotated[NV_MEMORY_DESC_PARAMS_v18_01, 192]
NV_CHANNEL_ALLOC_PARAMS_v1F_04: TypeAlias = struct_NV_CHANNEL_ALLOC_PARAMS_v1F_04
@c.record
class struct_NV_MEMORY_DESC_PARAMS_v18_01(c.Struct):
SIZE = 24
base: Annotated[NvU64, 0]
size: Annotated[NvU64, 8]
addressSpace: Annotated[NvU32, 16]
cacheAttrib: Annotated[NvU32, 20]
NV_MEMORY_DESC_PARAMS_v18_01: TypeAlias = struct_NV_MEMORY_DESC_PARAMS_v18_01
rpc_alloc_channel_dma_v1F_04: TypeAlias = struct_rpc_alloc_channel_dma_v1F_04
rpc_alloc_channel_dma_v: TypeAlias = struct_rpc_alloc_channel_dma_v1F_04
@c.record
class struct_rpc_alloc_object_v25_08(c.Struct):
SIZE = 64
hClient: Annotated[NvHandle, 0]
hParent: Annotated[NvHandle, 4]
hObject: Annotated[NvHandle, 8]
hClass: Annotated[NvU32, 12]
param_len: Annotated[NvU32, 16]
params: Annotated[alloc_object_params_v25_08, 24]
@c.record
class union_alloc_object_params_v25_08(c.Struct):
SIZE = 40
param_NV50_TESLA: Annotated[alloc_object_NV50_TESLA_v03_00, 0]
param_GT212_DMA_COPY: Annotated[alloc_object_GT212_DMA_COPY_v03_00, 0]
param_GF100_DISP_SW: Annotated[alloc_object_GF100_DISP_SW_v03_00, 0]
param_KEPLER_CHANNEL_GROUP_A: Annotated[alloc_object_KEPLER_CHANNEL_GROUP_A_v12_08, 0]
param_FERMI_CONTEXT_SHARE_A: Annotated[alloc_object_FERMI_CONTEXT_SHARE_A_v04_00, 0]
param_NVD0B7_VIDEO_ENCODER: Annotated[alloc_object_NVD0B7_VIDEO_ENCODER_v03_00, 0]
param_FERMI_VASPACE_A: Annotated[alloc_object_FERMI_VASPACE_A_v03_00, 0]
param_NVB0B0_VIDEO_DECODER: Annotated[alloc_object_NVB0B0_VIDEO_DECODER_v03_00, 0]
param_NV83DE_ALLOC_PARAMETERS: Annotated[alloc_object_NV83DE_ALLOC_PARAMETERS_v03_00, 0]
param_NVENC_SW_SESSION: Annotated[alloc_object_NVENC_SW_SESSION_v06_01, 0]
param_NVC4B0_VIDEO_DECODER: Annotated[alloc_object_NVC4B0_VIDEO_DECODER_v12_02, 0]
param_NVFBC_SW_SESSION: Annotated[alloc_object_NVFBC_SW_SESSION_v12_04, 0]
param_NV_NVJPG_ALLOCATION_PARAMETERS: Annotated[alloc_object_NV_NVJPG_ALLOCATION_PARAMETERS_v20_02, 0]
param_NV503B_ALLOC_PARAMETERS: Annotated[alloc_object_NV503B_ALLOC_PARAMETERS_v1D_02, 0]
param_NVC637_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC637_ALLOCATION_PARAMETERS_v13_00, 0]
param_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS: Annotated[alloc_object_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS_v13_03, 0]
param_NVC638_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC638_ALLOCATION_PARAMETERS_v18_06, 0]
param_NV503C_ALLOC_PARAMETERS: Annotated[alloc_object_NV503C_ALLOC_PARAMETERS_v18_15, 0]
param_NVC670_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC670_ALLOCATION_PARAMETERS_v1A_01, 0]
param_NVB1CC_ALLOC_PARAMETERS: Annotated[alloc_object_NVB1CC_ALLOC_PARAMETERS_v1A_03, 0]
param_NVB2CC_ALLOC_PARAMETERS: Annotated[alloc_object_NVB2CC_ALLOC_PARAMETERS_v1A_03, 0]
param_NV_GR_ALLOCATION_PARAMETERS: Annotated[NV_GR_ALLOCATION_PARAMETERS_v1A_17, 0]
param_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS: Annotated[alloc_object_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS_v1A_1B, 0]
param_NV00F8_ALLOCATION_PARAMETERS: Annotated[alloc_object_NV00F8_ALLOCATION_PARAMETERS_v1E_0C, 0]
param_NVC9FA_VIDEO_OFA: Annotated[alloc_object_NVC9FA_VIDEO_OFA_v1F_00, 0]
param_NV2081_ALLOC_PARAMETERS: Annotated[alloc_object_NV2081_ALLOC_PARAMETERS_v25_08, 0]
alloc_object_params_v25_08: TypeAlias = union_alloc_object_params_v25_08
@c.record
class struct_alloc_object_NV50_TESLA_v03_00(c.Struct):
SIZE = 16
version: Annotated[NvU32, 0]
flags: Annotated[NvU32, 4]
size: Annotated[NvU32, 8]
caps: Annotated[NvU32, 12]
alloc_object_NV50_TESLA_v03_00: TypeAlias = struct_alloc_object_NV50_TESLA_v03_00
@c.record
class struct_alloc_object_GT212_DMA_COPY_v03_00(c.Struct):
SIZE = 8
version: Annotated[NvU32, 0]
engineInstance: Annotated[NvU32, 4]
alloc_object_GT212_DMA_COPY_v03_00: TypeAlias = struct_alloc_object_GT212_DMA_COPY_v03_00
@c.record
class struct_alloc_object_GF100_DISP_SW_v03_00(c.Struct):
SIZE = 32
_reserved1: Annotated[NvU32, 0]
_reserved2: Annotated[NvU64, 8]
logicalHeadId: Annotated[NvU32, 16]
displayMask: Annotated[NvU32, 20]
caps: Annotated[NvU32, 24]
alloc_object_GF100_DISP_SW_v03_00: TypeAlias = struct_alloc_object_GF100_DISP_SW_v03_00
@c.record
class struct_alloc_object_KEPLER_CHANNEL_GROUP_A_v12_08(c.Struct):
SIZE = 12
hObjectError: Annotated[NvU32, 0]
hVASpace: Annotated[NvU32, 4]
engineType: Annotated[NvU32, 8]
alloc_object_KEPLER_CHANNEL_GROUP_A_v12_08: TypeAlias = struct_alloc_object_KEPLER_CHANNEL_GROUP_A_v12_08
@c.record
class struct_alloc_object_FERMI_CONTEXT_SHARE_A_v04_00(c.Struct):
SIZE = 12
hVASpace: Annotated[NvU32, 0]
flags: Annotated[NvU32, 4]
subctxId: Annotated[NvU32, 8]
alloc_object_FERMI_CONTEXT_SHARE_A_v04_00: TypeAlias = struct_alloc_object_FERMI_CONTEXT_SHARE_A_v04_00
@c.record
class struct_alloc_object_NVD0B7_VIDEO_ENCODER_v03_00(c.Struct):
SIZE = 12
size: Annotated[NvU32, 0]
prohibitMultipleInstances: Annotated[NvU32, 4]
engineInstance: Annotated[NvU32, 8]
alloc_object_NVD0B7_VIDEO_ENCODER_v03_00: TypeAlias = struct_alloc_object_NVD0B7_VIDEO_ENCODER_v03_00
@c.record
class struct_alloc_object_FERMI_VASPACE_A_v03_00(c.Struct):
SIZE = 32
index: Annotated[NvU32, 0]
flags: Annotated[NvU32, 4]
vaSize: Annotated[NvU64, 8]
bigPageSize: Annotated[NvU32, 16]
vaBase: Annotated[NvU64, 24]
alloc_object_FERMI_VASPACE_A_v03_00: TypeAlias = struct_alloc_object_FERMI_VASPACE_A_v03_00
@c.record
class struct_alloc_object_NVB0B0_VIDEO_DECODER_v03_00(c.Struct):
SIZE = 8
size: Annotated[NvU32, 0]
prohibitMultipleInstances: Annotated[NvU32, 4]
alloc_object_NVB0B0_VIDEO_DECODER_v03_00: TypeAlias = struct_alloc_object_NVB0B0_VIDEO_DECODER_v03_00
@c.record
class struct_alloc_object_NV83DE_ALLOC_PARAMETERS_v03_00(c.Struct):
SIZE = 12
hDebuggerClient: Annotated[NvHandle, 0]
hAppClient: Annotated[NvHandle, 4]
hClass3dObject: Annotated[NvHandle, 8]
alloc_object_NV83DE_ALLOC_PARAMETERS_v03_00: TypeAlias = struct_alloc_object_NV83DE_ALLOC_PARAMETERS_v03_00
@c.record
class struct_alloc_object_NVENC_SW_SESSION_v06_01(c.Struct):
SIZE = 12
codecType: Annotated[NvU32, 0]
hResolution: Annotated[NvU32, 4]
vResolution: Annotated[NvU32, 8]
alloc_object_NVENC_SW_SESSION_v06_01: TypeAlias = struct_alloc_object_NVENC_SW_SESSION_v06_01
@c.record
class struct_alloc_object_NVC4B0_VIDEO_DECODER_v12_02(c.Struct):
SIZE = 12
size: Annotated[NvU32, 0]
prohibitMultipleInstances: Annotated[NvU32, 4]
engineInstance: Annotated[NvU32, 8]
alloc_object_NVC4B0_VIDEO_DECODER_v12_02: TypeAlias = struct_alloc_object_NVC4B0_VIDEO_DECODER_v12_02
@c.record
class struct_alloc_object_NVFBC_SW_SESSION_v12_04(c.Struct):
SIZE = 20
displayOrdinal: Annotated[NvU32, 0]
sessionType: Annotated[NvU32, 4]
sessionFlags: Annotated[NvU32, 8]
hMaxResolution: Annotated[NvU32, 12]
vMaxResolution: Annotated[NvU32, 16]
alloc_object_NVFBC_SW_SESSION_v12_04: TypeAlias = struct_alloc_object_NVFBC_SW_SESSION_v12_04
@c.record
class struct_alloc_object_NV_NVJPG_ALLOCATION_PARAMETERS_v20_02(c.Struct):
SIZE = 12
size: Annotated[NvU32, 0]
prohibitMultipleInstances: Annotated[NvU32, 4]
engineInstance: Annotated[NvU32, 8]
alloc_object_NV_NVJPG_ALLOCATION_PARAMETERS_v20_02: TypeAlias = struct_alloc_object_NV_NVJPG_ALLOCATION_PARAMETERS_v20_02
@c.record
class struct_alloc_object_NV503B_ALLOC_PARAMETERS_v1D_02(c.Struct):
SIZE = 32
hSubDevice: Annotated[NvHandle, 0]
hPeerSubDevice: Annotated[NvHandle, 4]
subDevicePeerIdMask: Annotated[NvU32, 8]
peerSubDevicePeerIdMask: Annotated[NvU32, 12]
mailboxBar1Addr: Annotated[NvU64, 16]
mailboxTotalSize: Annotated[NvU32, 24]
flags: Annotated[NvU32, 28]
alloc_object_NV503B_ALLOC_PARAMETERS_v1D_02: TypeAlias = struct_alloc_object_NV503B_ALLOC_PARAMETERS_v1D_02
@c.record
class struct_alloc_object_NVC637_ALLOCATION_PARAMETERS_v13_00(c.Struct):
SIZE = 4
swizzId: Annotated[NvU32, 0]
alloc_object_NVC637_ALLOCATION_PARAMETERS_v13_00: TypeAlias = struct_alloc_object_NVC637_ALLOCATION_PARAMETERS_v13_00
@c.record
class struct_alloc_object_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS_v13_03(c.Struct):
SIZE = 24
offset: Annotated[NvU64, 0]
limit: Annotated[NvU64, 8]
hVASpace: Annotated[NvHandle, 16]
alloc_object_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS_v13_03: TypeAlias = struct_alloc_object_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS_v13_03
@c.record
class struct_alloc_object_NVC638_ALLOCATION_PARAMETERS_v18_06(c.Struct):
SIZE = 4
execPartitionId: Annotated[NvU32, 0]
alloc_object_NVC638_ALLOCATION_PARAMETERS_v18_06: TypeAlias = struct_alloc_object_NVC638_ALLOCATION_PARAMETERS_v18_06
@c.record
class struct_alloc_object_NV503C_ALLOC_PARAMETERS_v18_15(c.Struct):
SIZE = 16
flags: Annotated[NvU32, 0]
p2pToken: Annotated[NvU64, 8]
alloc_object_NV503C_ALLOC_PARAMETERS_v18_15: TypeAlias = struct_alloc_object_NV503C_ALLOC_PARAMETERS_v18_15
@c.record
class struct_alloc_object_NVC670_ALLOCATION_PARAMETERS_v1A_01(c.Struct):
SIZE = 12
numHeads: Annotated[NvU32, 0]
numSors: Annotated[NvU32, 4]
numDsis: Annotated[NvU32, 8]
alloc_object_NVC670_ALLOCATION_PARAMETERS_v1A_01: TypeAlias = struct_alloc_object_NVC670_ALLOCATION_PARAMETERS_v1A_01
@c.record
class struct_alloc_object_NVB1CC_ALLOC_PARAMETERS_v1A_03(c.Struct):
SIZE = 4
hSubDevice: Annotated[NvHandle, 0]
alloc_object_NVB1CC_ALLOC_PARAMETERS_v1A_03: TypeAlias = struct_alloc_object_NVB1CC_ALLOC_PARAMETERS_v1A_03
@c.record
class struct_alloc_object_NVB2CC_ALLOC_PARAMETERS_v1A_03(c.Struct):
SIZE = 8
hClientTarget: Annotated[NvHandle, 0]
hContextTarget: Annotated[NvHandle, 4]
alloc_object_NVB2CC_ALLOC_PARAMETERS_v1A_03: TypeAlias = struct_alloc_object_NVB2CC_ALLOC_PARAMETERS_v1A_03
@c.record
class struct_NV_GR_ALLOCATION_PARAMETERS_v1A_17(c.Struct):
SIZE = 16
version: Annotated[NvU32, 0]
flags: Annotated[NvU32, 4]
size: Annotated[NvU32, 8]
caps: Annotated[NvU32, 12]
NV_GR_ALLOCATION_PARAMETERS_v1A_17: TypeAlias = struct_NV_GR_ALLOCATION_PARAMETERS_v1A_17
@c.record
class struct_alloc_object_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS_v1A_1B(c.Struct):
SIZE = 8
hClient: Annotated[NvHandle, 0]
hChannel: Annotated[NvHandle, 4]
alloc_object_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS_v1A_1B: TypeAlias = struct_alloc_object_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS_v1A_1B
@c.record
class struct_alloc_object_NV00F8_ALLOCATION_PARAMETERS_v1E_0C(c.Struct):
SIZE = 40
alignment: Annotated[NvU64, 0]
allocSize: Annotated[NvU64, 8]
pageSize: Annotated[NvU32, 16]
allocFlags: Annotated[NvU32, 20]
map: Annotated[NV00F8_ALLOCATION_PARAMETERS_MAP_STRUCT_v1E_0C, 24]
alloc_object_NV00F8_ALLOCATION_PARAMETERS_v1E_0C: TypeAlias = struct_alloc_object_NV00F8_ALLOCATION_PARAMETERS_v1E_0C
@c.record
class struct_NV00F8_ALLOCATION_PARAMETERS_MAP_STRUCT_v1E_0C(c.Struct):
SIZE = 16
offset: Annotated[NvU64, 0]
hVidMem: Annotated[NvHandle, 8]
flags: Annotated[NvU32, 12]
NV00F8_ALLOCATION_PARAMETERS_MAP_STRUCT_v1E_0C: TypeAlias = struct_NV00F8_ALLOCATION_PARAMETERS_MAP_STRUCT_v1E_0C
@c.record
class struct_alloc_object_NVC9FA_VIDEO_OFA_v1F_00(c.Struct):
SIZE = 8
size: Annotated[NvU32, 0]
prohibitMultipleInstances: Annotated[NvU32, 4]
alloc_object_NVC9FA_VIDEO_OFA_v1F_00: TypeAlias = struct_alloc_object_NVC9FA_VIDEO_OFA_v1F_00
@c.record
class struct_alloc_object_NV2081_ALLOC_PARAMETERS_v25_08(c.Struct):
SIZE = 4
reserved: Annotated[NvU32, 0]
alloc_object_NV2081_ALLOC_PARAMETERS_v25_08: TypeAlias = struct_alloc_object_NV2081_ALLOC_PARAMETERS_v25_08
rpc_alloc_object_v25_08: TypeAlias = struct_rpc_alloc_object_v25_08
@c.record
class struct_rpc_alloc_object_v26_00(c.Struct):
SIZE = 80
hClient: Annotated[NvHandle, 0]
hParent: Annotated[NvHandle, 4]
hObject: Annotated[NvHandle, 8]
hClass: Annotated[NvU32, 12]
param_len: Annotated[NvU32, 16]
params: Annotated[alloc_object_params_v26_00, 24]
@c.record
class union_alloc_object_params_v26_00(c.Struct):
SIZE = 56
param_NV50_TESLA: Annotated[alloc_object_NV50_TESLA_v03_00, 0]
param_GT212_DMA_COPY: Annotated[alloc_object_GT212_DMA_COPY_v03_00, 0]
param_GF100_DISP_SW: Annotated[alloc_object_GF100_DISP_SW_v03_00, 0]
param_KEPLER_CHANNEL_GROUP_A: Annotated[alloc_object_KEPLER_CHANNEL_GROUP_A_v12_08, 0]
param_FERMI_CONTEXT_SHARE_A: Annotated[alloc_object_FERMI_CONTEXT_SHARE_A_v04_00, 0]
param_NVD0B7_VIDEO_ENCODER: Annotated[alloc_object_NVD0B7_VIDEO_ENCODER_v03_00, 0]
param_FERMI_VASPACE_A: Annotated[alloc_object_FERMI_VASPACE_A_v03_00, 0]
param_NVB0B0_VIDEO_DECODER: Annotated[alloc_object_NVB0B0_VIDEO_DECODER_v03_00, 0]
param_NV83DE_ALLOC_PARAMETERS: Annotated[alloc_object_NV83DE_ALLOC_PARAMETERS_v03_00, 0]
param_NVENC_SW_SESSION: Annotated[alloc_object_NVENC_SW_SESSION_v06_01, 0]
param_NVC4B0_VIDEO_DECODER: Annotated[alloc_object_NVC4B0_VIDEO_DECODER_v12_02, 0]
param_NVFBC_SW_SESSION: Annotated[alloc_object_NVFBC_SW_SESSION_v12_04, 0]
param_NV_NVJPG_ALLOCATION_PARAMETERS: Annotated[alloc_object_NV_NVJPG_ALLOCATION_PARAMETERS_v20_02, 0]
param_NV503B_ALLOC_PARAMETERS: Annotated[alloc_object_NV503B_ALLOC_PARAMETERS_v1D_02, 0]
param_NVC637_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC637_ALLOCATION_PARAMETERS_v13_00, 0]
param_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS: Annotated[alloc_object_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS_v13_03, 0]
param_NVC638_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC638_ALLOCATION_PARAMETERS_v18_06, 0]
param_NV503C_ALLOC_PARAMETERS: Annotated[alloc_object_NV503C_ALLOC_PARAMETERS_v18_15, 0]
param_NVC670_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC670_ALLOCATION_PARAMETERS_v1A_01, 0]
param_NVB1CC_ALLOC_PARAMETERS: Annotated[alloc_object_NVB1CC_ALLOC_PARAMETERS_v1A_03, 0]
param_NVB2CC_ALLOC_PARAMETERS: Annotated[alloc_object_NVB2CC_ALLOC_PARAMETERS_v1A_03, 0]
param_NV_GR_ALLOCATION_PARAMETERS: Annotated[NV_GR_ALLOCATION_PARAMETERS_v1A_17, 0]
param_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS: Annotated[alloc_object_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS_v1A_1B, 0]
param_NV00F8_ALLOCATION_PARAMETERS: Annotated[alloc_object_NV00F8_ALLOCATION_PARAMETERS_v1E_0C, 0]
param_NVC9FA_VIDEO_OFA: Annotated[alloc_object_NVC9FA_VIDEO_OFA_v1F_00, 0]
param_NV2081_ALLOC_PARAMETERS: Annotated[alloc_object_NV2081_ALLOC_PARAMETERS_v25_08, 0]
param_padding: Annotated[c.Array[NvU8, Literal[56]], 0]
alloc_object_params_v26_00: TypeAlias = union_alloc_object_params_v26_00
rpc_alloc_object_v26_00: TypeAlias = struct_rpc_alloc_object_v26_00
@c.record
class struct_rpc_alloc_object_v27_00(c.Struct):
SIZE = 80
hClient: Annotated[NvHandle, 0]
hParent: Annotated[NvHandle, 4]
hObject: Annotated[NvHandle, 8]
hClass: Annotated[NvU32, 12]
param_len: Annotated[NvU32, 16]
params: Annotated[alloc_object_params_v27_00, 24]
@c.record
class union_alloc_object_params_v27_00(c.Struct):
SIZE = 56
param_NV50_TESLA: Annotated[alloc_object_NV50_TESLA_v03_00, 0]
param_GT212_DMA_COPY: Annotated[alloc_object_GT212_DMA_COPY_v03_00, 0]
param_GF100_DISP_SW: Annotated[alloc_object_GF100_DISP_SW_v03_00, 0]
param_KEPLER_CHANNEL_GROUP_A: Annotated[alloc_object_KEPLER_CHANNEL_GROUP_A_v12_08, 0]
param_FERMI_CONTEXT_SHARE_A: Annotated[alloc_object_FERMI_CONTEXT_SHARE_A_v04_00, 0]
param_NVD0B7_VIDEO_ENCODER: Annotated[alloc_object_NVD0B7_VIDEO_ENCODER_v03_00, 0]
param_FERMI_VASPACE_A: Annotated[alloc_object_FERMI_VASPACE_A_v03_00, 0]
param_NVB0B0_VIDEO_DECODER: Annotated[alloc_object_NVB0B0_VIDEO_DECODER_v03_00, 0]
param_NV83DE_ALLOC_PARAMETERS: Annotated[alloc_object_NV83DE_ALLOC_PARAMETERS_v03_00, 0]
param_NVENC_SW_SESSION: Annotated[alloc_object_NVENC_SW_SESSION_v06_01, 0]
param_NVC4B0_VIDEO_DECODER: Annotated[alloc_object_NVC4B0_VIDEO_DECODER_v12_02, 0]
param_NVFBC_SW_SESSION: Annotated[alloc_object_NVFBC_SW_SESSION_v12_04, 0]
param_NV_NVJPG_ALLOCATION_PARAMETERS: Annotated[alloc_object_NV_NVJPG_ALLOCATION_PARAMETERS_v20_02, 0]
param_NV503B_ALLOC_PARAMETERS: Annotated[alloc_object_NV503B_ALLOC_PARAMETERS_v1D_02, 0]
param_NVC637_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC637_ALLOCATION_PARAMETERS_v13_00, 0]
param_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS: Annotated[alloc_object_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS_v13_03, 0]
param_NVC638_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC638_ALLOCATION_PARAMETERS_v18_06, 0]
param_NV503C_ALLOC_PARAMETERS: Annotated[alloc_object_NV503C_ALLOC_PARAMETERS_v18_15, 0]
param_NVC670_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC670_ALLOCATION_PARAMETERS_v1A_01, 0]
param_NVB1CC_ALLOC_PARAMETERS: Annotated[alloc_object_NVB1CC_ALLOC_PARAMETERS_v1A_03, 0]
param_NVB2CC_ALLOC_PARAMETERS: Annotated[alloc_object_NVB2CC_ALLOC_PARAMETERS_v1A_03, 0]
param_NV_GR_ALLOCATION_PARAMETERS: Annotated[NV_GR_ALLOCATION_PARAMETERS_v1A_17, 0]
param_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS: Annotated[alloc_object_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS_v1A_1B, 0]
param_NV00F8_ALLOCATION_PARAMETERS: Annotated[alloc_object_NV00F8_ALLOCATION_PARAMETERS_v1E_0C, 0]
param_NVC9FA_VIDEO_OFA: Annotated[alloc_object_NVC9FA_VIDEO_OFA_v1F_00, 0]
param_NV2081_ALLOC_PARAMETERS: Annotated[alloc_object_NV2081_ALLOC_PARAMETERS_v25_08, 0]
param_padding: Annotated[c.Array[NvU8, Literal[56]], 0]
alloc_object_params_v27_00: TypeAlias = union_alloc_object_params_v27_00
rpc_alloc_object_v27_00: TypeAlias = struct_rpc_alloc_object_v27_00
@c.record
class struct_rpc_alloc_object_v29_06(c.Struct):
SIZE = 80
hClient: Annotated[NvHandle, 0]
hParent: Annotated[NvHandle, 4]
hObject: Annotated[NvHandle, 8]
hClass: Annotated[NvU32, 12]
param_len: Annotated[NvU32, 16]
params: Annotated[alloc_object_params_v29_06, 24]
@c.record
class union_alloc_object_params_v29_06(c.Struct):
SIZE = 56
param_NV50_TESLA: Annotated[alloc_object_NV50_TESLA_v03_00, 0]
param_GT212_DMA_COPY: Annotated[alloc_object_GT212_DMA_COPY_v03_00, 0]
param_GF100_DISP_SW: Annotated[alloc_object_GF100_DISP_SW_v03_00, 0]
param_KEPLER_CHANNEL_GROUP_A: Annotated[alloc_object_KEPLER_CHANNEL_GROUP_A_v12_08, 0]
param_FERMI_CONTEXT_SHARE_A: Annotated[alloc_object_FERMI_CONTEXT_SHARE_A_v04_00, 0]
param_NVD0B7_VIDEO_ENCODER: Annotated[alloc_object_NVD0B7_VIDEO_ENCODER_v03_00, 0]
param_FERMI_VASPACE_A: Annotated[alloc_object_FERMI_VASPACE_A_v03_00, 0]
param_NVB0B0_VIDEO_DECODER: Annotated[alloc_object_NVB0B0_VIDEO_DECODER_v03_00, 0]
param_NV83DE_ALLOC_PARAMETERS: Annotated[alloc_object_NV83DE_ALLOC_PARAMETERS_v03_00, 0]
param_NVENC_SW_SESSION: Annotated[alloc_object_NVENC_SW_SESSION_v06_01, 0]
param_NVC4B0_VIDEO_DECODER: Annotated[alloc_object_NVC4B0_VIDEO_DECODER_v12_02, 0]
param_NVFBC_SW_SESSION: Annotated[alloc_object_NVFBC_SW_SESSION_v12_04, 0]
param_NV_NVJPG_ALLOCATION_PARAMETERS: Annotated[alloc_object_NV_NVJPG_ALLOCATION_PARAMETERS_v20_02, 0]
param_NV503B_ALLOC_PARAMETERS: Annotated[alloc_object_NV503B_ALLOC_PARAMETERS_v1D_02, 0]
param_NVC637_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC637_ALLOCATION_PARAMETERS_v13_00, 0]
param_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS: Annotated[alloc_object_NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS_v13_03, 0]
param_NVC638_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC638_ALLOCATION_PARAMETERS_v18_06, 0]
param_NV503C_ALLOC_PARAMETERS: Annotated[alloc_object_NV503C_ALLOC_PARAMETERS_v18_15, 0]
param_NVC670_ALLOCATION_PARAMETERS: Annotated[alloc_object_NVC670_ALLOCATION_PARAMETERS_v1A_01, 0]
param_NVB1CC_ALLOC_PARAMETERS: Annotated[alloc_object_NVB1CC_ALLOC_PARAMETERS_v1A_03, 0]
param_NVB2CC_ALLOC_PARAMETERS: Annotated[alloc_object_NVB2CC_ALLOC_PARAMETERS_v1A_03, 0]
param_NV_GR_ALLOCATION_PARAMETERS: Annotated[NV_GR_ALLOCATION_PARAMETERS_v1A_17, 0]
param_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS: Annotated[alloc_object_NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS_v1A_1B, 0]
param_NV00F8_ALLOCATION_PARAMETERS: Annotated[alloc_object_NV00F8_ALLOCATION_PARAMETERS_v1E_0C, 0]
param_NVC9FA_VIDEO_OFA: Annotated[alloc_object_NVC9FA_VIDEO_OFA_v29_06, 0]
param_NV2081_ALLOC_PARAMETERS: Annotated[alloc_object_NV2081_ALLOC_PARAMETERS_v25_08, 0]
param_padding: Annotated[c.Array[NvU8, Literal[56]], 0]
alloc_object_params_v29_06: TypeAlias = union_alloc_object_params_v29_06
@c.record
class struct_alloc_object_NVC9FA_VIDEO_OFA_v29_06(c.Struct):
SIZE = 12
size: Annotated[NvU32, 0]
prohibitMultipleInstances: Annotated[NvU32, 4]
engineInstance: Annotated[NvU32, 8]
alloc_object_NVC9FA_VIDEO_OFA_v29_06: TypeAlias = struct_alloc_object_NVC9FA_VIDEO_OFA_v29_06
rpc_alloc_object_v29_06: TypeAlias = struct_rpc_alloc_object_v29_06
rpc_alloc_object_v: TypeAlias = struct_rpc_alloc_object_v29_06
@c.record
class struct_rpc_free_v03_00(c.Struct):
SIZE = 16
params: Annotated[NVOS00_PARAMETERS_v03_00, 0]
@c.record
class struct_NVOS00_PARAMETERS_v03_00(c.Struct):
SIZE = 16
hRoot: Annotated[NvHandle, 0]
hObjectParent: Annotated[NvHandle, 4]
hObjectOld: Annotated[NvHandle, 8]
status: Annotated[NvV32, 12]
NVOS00_PARAMETERS_v03_00: TypeAlias = struct_NVOS00_PARAMETERS_v03_00
NvV32: TypeAlias = Annotated[int, ctypes.c_uint32]
rpc_free_v03_00: TypeAlias = struct_rpc_free_v03_00
rpc_free_v: TypeAlias = struct_rpc_free_v03_00
@c.record
class struct_rpc_log_v03_00(c.Struct):
SIZE = 8
level: Annotated[NvU32, 0]
log_len: Annotated[NvU32, 4]
log_msg: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[0]], 8]
rpc_log_v03_00: TypeAlias = struct_rpc_log_v03_00
rpc_log_v: TypeAlias = struct_rpc_log_v03_00
@c.record
class struct_rpc_map_memory_dma_v03_00(c.Struct):
SIZE = 56
params: Annotated[NVOS46_PARAMETERS_v03_00, 0]
@c.record
class struct_NVOS46_PARAMETERS_v03_00(c.Struct):
SIZE = 56
hClient: Annotated[NvHandle, 0]
hDevice: Annotated[NvHandle, 4]
hDma: Annotated[NvHandle, 8]
hMemory: Annotated[NvHandle, 12]
offset: Annotated[NvU64, 16]
length: Annotated[NvU64, 24]
flags: Annotated[NvV32, 32]
dmaOffset: Annotated[NvU64, 40]
status: Annotated[NvV32, 48]
NVOS46_PARAMETERS_v03_00: TypeAlias = struct_NVOS46_PARAMETERS_v03_00
rpc_map_memory_dma_v03_00: TypeAlias = struct_rpc_map_memory_dma_v03_00
rpc_map_memory_dma_v: TypeAlias = struct_rpc_map_memory_dma_v03_00
@c.record
class struct_rpc_unmap_memory_dma_v03_00(c.Struct):
SIZE = 40
params: Annotated[NVOS47_PARAMETERS_v03_00, 0]
@c.record
class struct_NVOS47_PARAMETERS_v03_00(c.Struct):
SIZE = 40
hClient: Annotated[NvHandle, 0]
hDevice: Annotated[NvHandle, 4]
hDma: Annotated[NvHandle, 8]
hMemory: Annotated[NvHandle, 12]
flags: Annotated[NvV32, 16]
dmaOffset: Annotated[NvU64, 24]
status: Annotated[NvV32, 32]
NVOS47_PARAMETERS_v03_00: TypeAlias = struct_NVOS47_PARAMETERS_v03_00
rpc_unmap_memory_dma_v03_00: TypeAlias = struct_rpc_unmap_memory_dma_v03_00
rpc_unmap_memory_dma_v: TypeAlias = struct_rpc_unmap_memory_dma_v03_00
@c.record
class struct_rpc_alloc_subdevice_v08_01(c.Struct):
SIZE = 40
subDeviceInst: Annotated[NvU32, 0]
params: Annotated[NVOS21_PARAMETERS_v03_00, 8]
@c.record
class struct_NVOS21_PARAMETERS_v03_00(c.Struct):
SIZE = 32
hRoot: Annotated[NvHandle, 0]
hObjectParent: Annotated[NvHandle, 4]
hObjectNew: Annotated[NvHandle, 8]
hClass: Annotated[NvV32, 12]
pAllocParms: Annotated[NvP64, 16]
status: Annotated[NvV32, 24]
NVOS21_PARAMETERS_v03_00: TypeAlias = struct_NVOS21_PARAMETERS_v03_00
NvP64: TypeAlias = ctypes.c_void_p
rpc_alloc_subdevice_v08_01: TypeAlias = struct_rpc_alloc_subdevice_v08_01
rpc_alloc_subdevice_v: TypeAlias = struct_rpc_alloc_subdevice_v08_01
@c.record
class struct_rpc_dup_object_v03_00(c.Struct):
SIZE = 28
params: Annotated[NVOS55_PARAMETERS_v03_00, 0]
@c.record
class struct_NVOS55_PARAMETERS_v03_00(c.Struct):
SIZE = 28
hClient: Annotated[NvHandle, 0]
hParent: Annotated[NvHandle, 4]
hObject: Annotated[NvHandle, 8]
hClientSrc: Annotated[NvHandle, 12]
hObjectSrc: Annotated[NvHandle, 16]
flags: Annotated[NvU32, 20]
status: Annotated[NvU32, 24]
NVOS55_PARAMETERS_v03_00: TypeAlias = struct_NVOS55_PARAMETERS_v03_00
rpc_dup_object_v03_00: TypeAlias = struct_rpc_dup_object_v03_00
rpc_dup_object_v: TypeAlias = struct_rpc_dup_object_v03_00
@c.record
class struct_rpc_idle_channels_v03_00(c.Struct):
SIZE = 12
flags: Annotated[NvU32, 0]
timeout: Annotated[NvU32, 4]
nchannels: Annotated[NvU32, 8]
channel_list: Annotated[c.Array[idle_channel_list_v03_00, Literal[0]], 12]
@c.record
class struct_idle_channel_list_v03_00(c.Struct):
SIZE = 12
phClient: Annotated[NvU32, 0]
phDevice: Annotated[NvU32, 4]
phChannel: Annotated[NvU32, 8]
idle_channel_list_v03_00: TypeAlias = struct_idle_channel_list_v03_00
rpc_idle_channels_v03_00: TypeAlias = struct_rpc_idle_channels_v03_00
rpc_idle_channels_v: TypeAlias = struct_rpc_idle_channels_v03_00
@c.record
class struct_rpc_alloc_event_v03_00(c.Struct):
SIZE = 28
hClient: Annotated[NvHandle, 0]
hParentClient: Annotated[NvHandle, 4]
hChannel: Annotated[NvHandle, 8]
hObject: Annotated[NvHandle, 12]
hEvent: Annotated[NvHandle, 16]
hClass: Annotated[NvU32, 20]
notifyIndex: Annotated[NvU32, 24]
rpc_alloc_event_v03_00: TypeAlias = struct_rpc_alloc_event_v03_00
rpc_alloc_event_v: TypeAlias = struct_rpc_alloc_event_v03_00
@c.record
class struct_rpc_rm_api_control_v25_0D(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
@c.record
class struct_NVOS54_PARAMETERS_v03_00(c.Struct):
SIZE = 32
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
cmd: Annotated[NvRmctrlCmd, 8]
params: Annotated[NvP64, 16]
paramsSize: Annotated[NvU32, 24]
status: Annotated[NvV32, 28]
NVOS54_PARAMETERS_v03_00: TypeAlias = struct_NVOS54_PARAMETERS_v03_00
NvRmctrlCmd: TypeAlias = Annotated[int, ctypes.c_uint32]
rpc_rm_api_control_v25_0D: TypeAlias = struct_rpc_rm_api_control_v25_0D
@c.record
class struct_rpc_rm_api_control_v25_0F(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v25_0F: TypeAlias = struct_rpc_rm_api_control_v25_0F
@c.record
class struct_rpc_rm_api_control_v25_10(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v25_10: TypeAlias = struct_rpc_rm_api_control_v25_10
@c.record
class struct_rpc_rm_api_control_v25_14(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v25_14: TypeAlias = struct_rpc_rm_api_control_v25_14
@c.record
class struct_rpc_rm_api_control_v25_15(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v25_15: TypeAlias = struct_rpc_rm_api_control_v25_15
@c.record
class struct_rpc_rm_api_control_v25_16(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v25_16: TypeAlias = struct_rpc_rm_api_control_v25_16
@c.record
class struct_rpc_rm_api_control_v25_17(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v25_17: TypeAlias = struct_rpc_rm_api_control_v25_17
@c.record
class struct_rpc_rm_api_control_v25_18(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v25_18: TypeAlias = struct_rpc_rm_api_control_v25_18
@c.record
class struct_rpc_rm_api_control_v25_19(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v25_19: TypeAlias = struct_rpc_rm_api_control_v25_19
@c.record
class struct_rpc_rm_api_control_v25_1A(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v25_1A: TypeAlias = struct_rpc_rm_api_control_v25_1A
@c.record
class struct_rpc_rm_api_control_v27_03(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v27_03: TypeAlias = struct_rpc_rm_api_control_v27_03
@c.record
class struct_rpc_rm_api_control_v29_04(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v29_04: TypeAlias = struct_rpc_rm_api_control_v29_04
@c.record
class struct_rpc_rm_api_control_v29_09(c.Struct):
SIZE = 40
params: Annotated[NVOS54_PARAMETERS_v03_00, 0]
rm_api_params: Annotated[NvP64, 32]
rpc_rm_api_control_v29_09: TypeAlias = struct_rpc_rm_api_control_v29_09
rpc_rm_api_control_v: TypeAlias = struct_rpc_rm_api_control_v29_09
@c.record
class struct_rpc_alloc_share_device_v03_00(c.Struct):
SIZE = 64
hClient: Annotated[NvHandle, 0]
hDevice: Annotated[NvHandle, 4]
hClass: Annotated[NvU32, 8]
params: Annotated[NV_DEVICE_ALLOCATION_PARAMETERS_v03_00, 16]
@c.record
class struct_NV_DEVICE_ALLOCATION_PARAMETERS_v03_00(c.Struct):
SIZE = 48
szName: Annotated[NvP64, 0]
hClientShare: Annotated[NvHandle, 8]
hTargetClient: Annotated[NvHandle, 12]
hTargetDevice: Annotated[NvHandle, 16]
flags: Annotated[NvV32, 20]
vaSpaceSize: Annotated[NvU64, 24]
vaMode: Annotated[NvV32, 32]
vaBase: Annotated[NvU64, 40]
NV_DEVICE_ALLOCATION_PARAMETERS_v03_00: TypeAlias = struct_NV_DEVICE_ALLOCATION_PARAMETERS_v03_00
rpc_alloc_share_device_v03_00: TypeAlias = struct_rpc_alloc_share_device_v03_00
rpc_alloc_share_device_v: TypeAlias = struct_rpc_alloc_share_device_v03_00
@c.record
class struct_rpc_get_engine_utilization_v1F_0E(c.Struct):
SIZE = 4048
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
cmd: Annotated[NvU32, 8]
params: Annotated[vgpuGetEngineUtilization_data_v1F_0E, 16]
@c.record
class union_vgpuGetEngineUtilization_data_v1F_0E(c.Struct):
SIZE = 4032
vidPerfmonSample: Annotated[NV2080_CTRL_PERF_GET_VID_ENG_PERFMON_SAMPLE_PARAMS_v05_00, 0]
getAccountingState: Annotated[NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS_v09_0C, 0]
setAccountingState: Annotated[NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS_v09_0C, 0]
getAccountingPidList: Annotated[NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_v09_0C, 0]
procAccountingInfo: Annotated[NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS_v09_0C, 0]
clearAccountingInfo: Annotated[NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS_v09_0C, 0]
gpumonPerfmonsampleV2: Annotated[c.Array[NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v1F_0E, Literal[72]], 0]
vgpuGetEngineUtilization_data_v1F_0E: TypeAlias = union_vgpuGetEngineUtilization_data_v1F_0E
@c.record
class struct_NV2080_CTRL_PERF_GET_VID_ENG_PERFMON_SAMPLE_PARAMS_v05_00(c.Struct):
SIZE = 12
engineType: Annotated[NV2080_CTRL_CMD_PERF_VID_ENG, 0]
clkPercentBusy: Annotated[NvU32, 4]
samplingPeriodUs: Annotated[NvU32, 8]
NV2080_CTRL_PERF_GET_VID_ENG_PERFMON_SAMPLE_PARAMS_v05_00: TypeAlias = struct_NV2080_CTRL_PERF_GET_VID_ENG_PERFMON_SAMPLE_PARAMS_v05_00
class enum_NV2080_CTRL_CMD_PERF_VID_ENG(Annotated[int, ctypes.c_uint32], c.Enum): pass
NV2080_CTRL_CMD_PERF_VID_ENG_NVENC = enum_NV2080_CTRL_CMD_PERF_VID_ENG.define('NV2080_CTRL_CMD_PERF_VID_ENG_NVENC', 1)
NV2080_CTRL_CMD_PERF_VID_ENG_NVDEC = enum_NV2080_CTRL_CMD_PERF_VID_ENG.define('NV2080_CTRL_CMD_PERF_VID_ENG_NVDEC', 2)
NV2080_CTRL_CMD_PERF_VID_ENG_NVJPG = enum_NV2080_CTRL_CMD_PERF_VID_ENG.define('NV2080_CTRL_CMD_PERF_VID_ENG_NVJPG', 3)
NV2080_CTRL_CMD_PERF_VID_ENG_NVOFA = enum_NV2080_CTRL_CMD_PERF_VID_ENG.define('NV2080_CTRL_CMD_PERF_VID_ENG_NVOFA', 4)
NV2080_CTRL_CMD_PERF_VID_ENG: TypeAlias = enum_NV2080_CTRL_CMD_PERF_VID_ENG
@c.record
class struct_NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS_v09_0C(c.Struct):
SIZE = 12
gpuId: Annotated[NvU32, 0]
vmPid: Annotated[NvU32, 4]
state: Annotated[NvU32, 8]
NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS_v09_0C: TypeAlias = struct_NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS_v09_0C
@c.record
class struct_NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS_v09_0C(c.Struct):
SIZE = 12
gpuId: Annotated[NvU32, 0]
vmPid: Annotated[NvU32, 4]
newState: Annotated[NvU32, 8]
NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS_v09_0C: TypeAlias = struct_NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS_v09_0C
@c.record
class struct_NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_v09_0C(c.Struct):
SIZE = 4016
gpuId: Annotated[NvU32, 0]
vmPid: Annotated[NvU32, 4]
passIndex: Annotated[NvU32, 8]
pidCount: Annotated[NvU32, 12]
pidTable: Annotated[c.Array[NvU32, Literal[1000]], 16]
NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_v09_0C: TypeAlias = struct_NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_v09_0C
@c.record
class struct_NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS_v09_0C(c.Struct):
SIZE = 48
gpuId: Annotated[NvU32, 0]
pid: Annotated[NvU32, 4]
subPid: Annotated[NvU32, 8]
gpuUtil: Annotated[NvU32, 12]
fbUtil: Annotated[NvU32, 16]
maxFbUsage: Annotated[NvU64, 24]
startTime: Annotated[NvU64, 32]
endTime: Annotated[NvU64, 40]
NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS_v09_0C: TypeAlias = struct_NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS_v09_0C
@c.record
class struct_NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS_v09_0C(c.Struct):
SIZE = 8
gpuId: Annotated[NvU32, 0]
vmPid: Annotated[NvU32, 4]
NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS_v09_0C: TypeAlias = struct_NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS_v09_0C
@c.record
class struct_NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v1F_0E(c.Struct):
SIZE = 56
timeStamp: Annotated[NvU64, 0]
fb: Annotated[NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00, 8]
gr: Annotated[NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00, 20]
nvenc: Annotated[NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00, 32]
nvdec: Annotated[NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00, 44]
NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v1F_0E: TypeAlias = struct_NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v1F_0E
@c.record
class struct_NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00(c.Struct):
SIZE = 12
util: Annotated[NvU32, 0]
procId: Annotated[NvU32, 4]
subProcessID: Annotated[NvU32, 8]
NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00: TypeAlias = struct_NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00
rpc_get_engine_utilization_v1F_0E: TypeAlias = struct_rpc_get_engine_utilization_v1F_0E
rpc_get_engine_utilization_v: TypeAlias = struct_rpc_get_engine_utilization_v1F_0E
@c.record
class struct_rpc_perf_get_level_info_v03_00(c.Struct):
SIZE = 24
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
level: Annotated[NvU32, 8]
flags: Annotated[NvU32, 12]
perfGetClkInfoListSize: Annotated[NvU32, 16]
param_size: Annotated[NvU32, 20]
params: Annotated[c.Array[NvU32, Literal[0]], 24]
rpc_perf_get_level_info_v03_00: TypeAlias = struct_rpc_perf_get_level_info_v03_00
rpc_perf_get_level_info_v: TypeAlias = struct_rpc_perf_get_level_info_v03_00
@c.record
class struct_rpc_set_surface_properties_v07_07(c.Struct):
SIZE = 76
hClient: Annotated[NvHandle, 0]
params: Annotated[NVA080_CTRL_VGPU_DISPLAY_SET_SURFACE_PROPERTIES_v07_07, 4]
@c.record
class struct_NVA080_CTRL_VGPU_DISPLAY_SET_SURFACE_PROPERTIES_v07_07(c.Struct):
SIZE = 72
headIndex: Annotated[NvU32, 0]
isPrimary: Annotated[NvU32, 4]
offset: Annotated[NvU32, 8]
surfaceType: Annotated[NvU32, 12]
surfaceBlockHeight: Annotated[NvU32, 16]
surfacePitch: Annotated[NvU32, 20]
surfaceFormat: Annotated[NvU32, 24]
surfaceWidth: Annotated[NvU32, 28]
surfaceHeight: Annotated[NvU32, 32]
rectX: Annotated[NvU32, 36]
rectY: Annotated[NvU32, 40]
rectWidth: Annotated[NvU32, 44]
rectHeight: Annotated[NvU32, 48]
surfaceSize: Annotated[NvU32, 52]
surfaceKind: Annotated[NvU32, 56]
hHwResDevice: Annotated[NvU32, 60]
hHwResHandle: Annotated[NvU32, 64]
effectiveFbPageSize: Annotated[NvU32, 68]
NVA080_CTRL_VGPU_DISPLAY_SET_SURFACE_PROPERTIES_v07_07: TypeAlias = struct_NVA080_CTRL_VGPU_DISPLAY_SET_SURFACE_PROPERTIES_v07_07
rpc_set_surface_properties_v07_07: TypeAlias = struct_rpc_set_surface_properties_v07_07
rpc_set_surface_properties_v: TypeAlias = struct_rpc_set_surface_properties_v07_07
@c.record
class struct_rpc_cleanup_surface_v03_00(c.Struct):
SIZE = 8
params: Annotated[NVA080_CTRL_VGPU_DISPLAY_CLEANUP_SURFACE_PARAMS_v03_00, 0]
@c.record
class struct_NVA080_CTRL_VGPU_DISPLAY_CLEANUP_SURFACE_PARAMS_v03_00(c.Struct):
SIZE = 8
headIndex: Annotated[NvU32, 0]
blankingEnabled: Annotated[NvU32, 4]
NVA080_CTRL_VGPU_DISPLAY_CLEANUP_SURFACE_PARAMS_v03_00: TypeAlias = struct_NVA080_CTRL_VGPU_DISPLAY_CLEANUP_SURFACE_PARAMS_v03_00
rpc_cleanup_surface_v03_00: TypeAlias = struct_rpc_cleanup_surface_v03_00
rpc_cleanup_surface_v: TypeAlias = struct_rpc_cleanup_surface_v03_00
@c.record
class struct_rpc_unloading_guest_driver_v1F_07(c.Struct):
SIZE = 8
bInPMTransition: Annotated[NvBool, 0]
bGc6Entering: Annotated[NvBool, 1]
newLevel: Annotated[NvU32, 4]
rpc_unloading_guest_driver_v1F_07: TypeAlias = struct_rpc_unloading_guest_driver_v1F_07
rpc_unloading_guest_driver_v: TypeAlias = struct_rpc_unloading_guest_driver_v1F_07
@c.record
class struct_rpc_gpu_exec_reg_ops_v12_01(c.Struct):
SIZE = 56
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[gpu_exec_reg_ops_v12_01, 8]
@c.record
class struct_gpu_exec_reg_ops_v12_01(c.Struct):
SIZE = 48
reg_op_params: Annotated[NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v12_01, 0]
operations: Annotated[c.Array[NV2080_CTRL_GPU_REG_OP_v03_00, Literal[0]], 48]
gpu_exec_reg_ops_v12_01: TypeAlias = struct_gpu_exec_reg_ops_v12_01
@c.record
class struct_NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v12_01(c.Struct):
SIZE = 48
hClientTarget: Annotated[NvHandle, 0]
hChannelTarget: Annotated[NvHandle, 4]
reserved00: Annotated[c.Array[NvU32, Literal[3]], 8]
regOpCount: Annotated[NvU32, 20]
grRouteInfo: Annotated[NV2080_CTRL_GR_ROUTE_INFO_v12_01, 24]
regOps: Annotated[NvP64, 40]
NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v12_01: TypeAlias = struct_NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v12_01
@c.record
class struct_NV2080_CTRL_GR_ROUTE_INFO_v12_01(c.Struct):
SIZE = 16
flags: Annotated[NvU32, 0]
route: Annotated[NvU64, 8]
NV2080_CTRL_GR_ROUTE_INFO_v12_01: TypeAlias = struct_NV2080_CTRL_GR_ROUTE_INFO_v12_01
@c.record
class struct_NV2080_CTRL_GPU_REG_OP_v03_00(c.Struct):
SIZE = 32
regOp: Annotated[NvU8, 0]
regType: Annotated[NvU8, 1]
regStatus: Annotated[NvU8, 2]
regQuad: Annotated[NvU8, 3]
regGroupMask: Annotated[NvU32, 4]
regSubGroupMask: Annotated[NvU32, 8]
regOffset: Annotated[NvU32, 12]
regValueHi: Annotated[NvU32, 16]
regValueLo: Annotated[NvU32, 20]
regAndNMaskHi: Annotated[NvU32, 24]
regAndNMaskLo: Annotated[NvU32, 28]
NV2080_CTRL_GPU_REG_OP_v03_00: TypeAlias = struct_NV2080_CTRL_GPU_REG_OP_v03_00
rpc_gpu_exec_reg_ops_v12_01: TypeAlias = struct_rpc_gpu_exec_reg_ops_v12_01
rpc_gpu_exec_reg_ops_v: TypeAlias = struct_rpc_gpu_exec_reg_ops_v12_01
@c.record
class struct_rpc_get_static_data_v25_0E(c.Struct):
SIZE = 8
offset: Annotated[NvU32, 0]
size: Annotated[NvU32, 4]
payload: Annotated[c.Array[NvU8, Literal[0]], 8]
rpc_get_static_data_v25_0E: TypeAlias = struct_rpc_get_static_data_v25_0E
@c.record
class struct_rpc_get_static_data_v27_01(c.Struct):
SIZE = 8
offset: Annotated[NvU32, 0]
size: Annotated[NvU32, 4]
payload: Annotated[c.Array[NvU8, Literal[0]], 8]
rpc_get_static_data_v27_01: TypeAlias = struct_rpc_get_static_data_v27_01
rpc_get_static_data_v: TypeAlias = struct_rpc_get_static_data_v27_01
@c.record
class struct_rpc_get_consolidated_gr_static_info_v1B_04(c.Struct):
SIZE = 8
offset: Annotated[NvU32, 0]
size: Annotated[NvU32, 4]
payload: Annotated[c.Array[NvU8, Literal[0]], 8]
rpc_get_consolidated_gr_static_info_v1B_04: TypeAlias = struct_rpc_get_consolidated_gr_static_info_v1B_04
rpc_get_consolidated_gr_static_info_v: TypeAlias = struct_rpc_get_consolidated_gr_static_info_v1B_04
@c.record
class struct_rpc_set_page_directory_v1E_05(c.Struct):
SIZE = 48
hClient: Annotated[NvHandle, 0]
hDevice: Annotated[NvHandle, 4]
pasid: Annotated[NvU32, 8]
params: Annotated[NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05, 16]
@c.record
class struct_NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05(c.Struct):
SIZE = 32
physAddress: Annotated[NvU64, 0]
numEntries: Annotated[NvU32, 8]
flags: Annotated[NvU32, 12]
hVASpace: Annotated[NvHandle, 16]
chId: Annotated[NvU32, 20]
subDeviceId: Annotated[NvU32, 24]
pasid: Annotated[NvU32, 28]
NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05: TypeAlias = struct_NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05
rpc_set_page_directory_v1E_05: TypeAlias = struct_rpc_set_page_directory_v1E_05
rpc_set_page_directory_v: TypeAlias = struct_rpc_set_page_directory_v1E_05
@c.record
class struct_rpc_unset_page_directory_v1E_05(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hDevice: Annotated[NvHandle, 4]
params: Annotated[NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05, 8]
@c.record
class struct_NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05(c.Struct):
SIZE = 8
hVASpace: Annotated[NvHandle, 0]
subDeviceId: Annotated[NvU32, 4]
NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05: TypeAlias = struct_NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05
rpc_unset_page_directory_v1E_05: TypeAlias = struct_rpc_unset_page_directory_v1E_05
rpc_unset_page_directory_v: TypeAlias = struct_rpc_unset_page_directory_v1E_05
@c.record
class struct_rpc_get_gsp_static_info_v14_00(c.Struct):
SIZE = 4
data: Annotated[NvU32, 0]
rpc_get_gsp_static_info_v14_00: TypeAlias = struct_rpc_get_gsp_static_info_v14_00
rpc_get_gsp_static_info_v: TypeAlias = struct_rpc_get_gsp_static_info_v14_00
@c.record
class struct_rpc_update_bar_pde_v15_00(c.Struct):
SIZE = 24
info: Annotated[UpdateBarPde_v15_00, 0]
@c.record
class struct_UpdateBarPde_v15_00(c.Struct):
SIZE = 24
barType: Annotated[NV_RPC_UPDATE_PDE_BAR_TYPE, 0]
entryValue: Annotated[NvU64, 8]
entryLevelShift: Annotated[NvU64, 16]
UpdateBarPde_v15_00: TypeAlias = struct_UpdateBarPde_v15_00
rpc_update_bar_pde_v15_00: TypeAlias = struct_rpc_update_bar_pde_v15_00
rpc_update_bar_pde_v: TypeAlias = struct_rpc_update_bar_pde_v15_00
@c.record
class struct_rpc_get_encoder_capacity_v07_00(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
encoderCapacity: Annotated[NvU32, 8]
rpc_get_encoder_capacity_v07_00: TypeAlias = struct_rpc_get_encoder_capacity_v07_00
rpc_get_encoder_capacity_v: TypeAlias = struct_rpc_get_encoder_capacity_v07_00
@c.record
class struct_rpc_vgpu_pf_reg_read32_v15_00(c.Struct):
SIZE = 16
address: Annotated[NvU64, 0]
value: Annotated[NvU32, 8]
grEngId: Annotated[NvU32, 12]
rpc_vgpu_pf_reg_read32_v15_00: TypeAlias = struct_rpc_vgpu_pf_reg_read32_v15_00
rpc_vgpu_pf_reg_read32_v: TypeAlias = struct_rpc_vgpu_pf_reg_read32_v15_00
@c.record
class struct_rpc_ctrl_set_vgpu_fb_usage_v1A_08(c.Struct):
SIZE = 8
setFbUsage: Annotated[NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02, 0]
@c.record
class struct_NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02(c.Struct):
SIZE = 8
fbUsed: Annotated[NvU64, 0]
NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02: TypeAlias = struct_NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02
rpc_ctrl_set_vgpu_fb_usage_v1A_08: TypeAlias = struct_rpc_ctrl_set_vgpu_fb_usage_v1A_08
rpc_ctrl_set_vgpu_fb_usage_v: TypeAlias = struct_rpc_ctrl_set_vgpu_fb_usage_v1A_08
@c.record
class struct_rpc_ctrl_nvenc_sw_session_update_info_v1A_09(c.Struct):
SIZE = 40
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
nvencSessionUpdate: Annotated[NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS_v06_01, 8]
@c.record
class struct_NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS_v06_01(c.Struct):
SIZE = 32
hResolution: Annotated[NvU32, 0]
vResolution: Annotated[NvU32, 4]
averageEncodeLatency: Annotated[NvU32, 8]
averageEncodeFps: Annotated[NvU32, 12]
timestampBufferSize: Annotated[NvU32, 16]
timestampBuffer: Annotated[NvP64, 24]
NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS_v06_01: TypeAlias = struct_NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS_v06_01
rpc_ctrl_nvenc_sw_session_update_info_v1A_09: TypeAlias = struct_rpc_ctrl_nvenc_sw_session_update_info_v1A_09
rpc_ctrl_nvenc_sw_session_update_info_v: TypeAlias = struct_rpc_ctrl_nvenc_sw_session_update_info_v1A_09
@c.record
class struct_rpc_ctrl_reset_channel_v1A_09(c.Struct):
SIZE = 20
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
resetChannel: Annotated[NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_v10_01, 8]
@c.record
class struct_NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_v10_01(c.Struct):
SIZE = 12
engineID: Annotated[NvU32, 0]
subdeviceInstance: Annotated[NvU32, 4]
resetReason: Annotated[NvU32, 8]
NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_v10_01: TypeAlias = struct_NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_v10_01
rpc_ctrl_reset_channel_v1A_09: TypeAlias = struct_rpc_ctrl_reset_channel_v1A_09
rpc_ctrl_reset_channel_v: TypeAlias = struct_rpc_ctrl_reset_channel_v1A_09
@c.record
class struct_rpc_ctrl_reset_isolated_channel_v1A_09(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
resetIsolatedChannel: Annotated[NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS_v03_00, 8]
@c.record
class struct_NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS_v03_00(c.Struct):
SIZE = 8
exceptType: Annotated[NvU32, 0]
engineID: Annotated[NvU32, 4]
NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS_v03_00: TypeAlias = struct_NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS_v03_00
rpc_ctrl_reset_isolated_channel_v1A_09: TypeAlias = struct_rpc_ctrl_reset_isolated_channel_v1A_09
rpc_ctrl_reset_isolated_channel_v: TypeAlias = struct_rpc_ctrl_reset_isolated_channel_v1A_09
@c.record
class struct_rpc_ctrl_gpu_handle_vf_pri_fault_v1A_09(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
handleVfPriFault: Annotated[NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS_v18_09, 8]
@c.record
class struct_NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS_v18_09(c.Struct):
SIZE = 4
faultType: Annotated[NvU32, 0]
NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS_v18_09: TypeAlias = struct_NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS_v18_09
rpc_ctrl_gpu_handle_vf_pri_fault_v1A_09: TypeAlias = struct_rpc_ctrl_gpu_handle_vf_pri_fault_v1A_09
rpc_ctrl_gpu_handle_vf_pri_fault_v: TypeAlias = struct_rpc_ctrl_gpu_handle_vf_pri_fault_v1A_09
@c.record
class struct_rpc_ctrl_perf_boost_v1A_09(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
perfBoost: Annotated[NV2080_CTRL_PERF_BOOST_PARAMS_v03_00, 8]
@c.record
class struct_NV2080_CTRL_PERF_BOOST_PARAMS_v03_00(c.Struct):
SIZE = 8
flags: Annotated[NvU32, 0]
duration: Annotated[NvU32, 4]
NV2080_CTRL_PERF_BOOST_PARAMS_v03_00: TypeAlias = struct_NV2080_CTRL_PERF_BOOST_PARAMS_v03_00
rpc_ctrl_perf_boost_v1A_09: TypeAlias = struct_rpc_ctrl_perf_boost_v1A_09
rpc_ctrl_perf_boost_v: TypeAlias = struct_rpc_ctrl_perf_boost_v1A_09
@c.record
class struct_rpc_ctrl_get_zbc_clear_table_v1A_09(c.Struct):
SIZE = 64
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
getZbcClearTable: Annotated[NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_v04_00, 8]
@c.record
class struct_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_v04_00(c.Struct):
SIZE = 56
value: Annotated[NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_value_v04_00, 0]
indexSize: Annotated[NvU32, 40]
indexUsed: Annotated[NvU32, 44]
format: Annotated[NvU32, 48]
valType: Annotated[NvU32, 52]
NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_v04_00: TypeAlias = struct_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_v04_00
@c.record
class struct_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_value_v04_00(c.Struct):
SIZE = 40
colorFB: Annotated[c.Array[NvU32, Literal[4]], 0]
colorDS: Annotated[c.Array[NvU32, Literal[4]], 16]
depth: Annotated[NvU32, 32]
stencil: Annotated[NvU32, 36]
NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_value_v04_00: TypeAlias = struct_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_value_v04_00
rpc_ctrl_get_zbc_clear_table_v1A_09: TypeAlias = struct_rpc_ctrl_get_zbc_clear_table_v1A_09
rpc_ctrl_get_zbc_clear_table_v: TypeAlias = struct_rpc_ctrl_get_zbc_clear_table_v1A_09
@c.record
class struct_rpc_ctrl_set_zbc_color_clear_v1A_09(c.Struct):
SIZE = 44
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
setZbcColorClr: Annotated[NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS_v03_00, 8]
@c.record
class struct_NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS_v03_00(c.Struct):
SIZE = 36
colorFB: Annotated[c.Array[NvU32, Literal[4]], 0]
colorDS: Annotated[c.Array[NvU32, Literal[4]], 16]
format: Annotated[NvU32, 32]
NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS_v03_00: TypeAlias = struct_NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS_v03_00
rpc_ctrl_set_zbc_color_clear_v1A_09: TypeAlias = struct_rpc_ctrl_set_zbc_color_clear_v1A_09
rpc_ctrl_set_zbc_color_clear_v: TypeAlias = struct_rpc_ctrl_set_zbc_color_clear_v1A_09
@c.record
class struct_rpc_ctrl_set_zbc_depth_clear_v1A_09(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
setZbcDepthClr: Annotated[NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS_v03_00, 8]
@c.record
class struct_NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS_v03_00(c.Struct):
SIZE = 8
depth: Annotated[NvU32, 0]
format: Annotated[NvU32, 4]
NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS_v03_00: TypeAlias = struct_NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS_v03_00
rpc_ctrl_set_zbc_depth_clear_v1A_09: TypeAlias = struct_rpc_ctrl_set_zbc_depth_clear_v1A_09
rpc_ctrl_set_zbc_depth_clear_v: TypeAlias = struct_rpc_ctrl_set_zbc_depth_clear_v1A_09
@c.record
class struct_rpc_ctrl_set_zbc_stencil_clear_v27_06(c.Struct):
SIZE = 20
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
setZbcStencilClr: Annotated[NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS_v27_06, 8]
@c.record
class struct_NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS_v27_06(c.Struct):
SIZE = 12
stencil: Annotated[NvU32, 0]
format: Annotated[NvU32, 4]
bSkipL2Table: Annotated[NvBool, 8]
NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS_v27_06: TypeAlias = struct_NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS_v27_06
rpc_ctrl_set_zbc_stencil_clear_v27_06: TypeAlias = struct_rpc_ctrl_set_zbc_stencil_clear_v27_06
rpc_ctrl_set_zbc_stencil_clear_v: TypeAlias = struct_rpc_ctrl_set_zbc_stencil_clear_v27_06
@c.record
class struct_rpc_ctrl_gpfifo_schedule_v1A_0A(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
cmd: Annotated[NvU32, 8]
gpfifoSchedule: Annotated[NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_v03_00, 12]
@c.record
class struct_NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_v03_00(c.Struct):
SIZE = 1
bEnable: Annotated[NvBool, 0]
NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_v03_00: TypeAlias = struct_NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_v03_00
rpc_ctrl_gpfifo_schedule_v1A_0A: TypeAlias = struct_rpc_ctrl_gpfifo_schedule_v1A_0A
rpc_ctrl_gpfifo_schedule_v: TypeAlias = struct_rpc_ctrl_gpfifo_schedule_v1A_0A
@c.record
class struct_rpc_ctrl_set_timeslice_v1A_0A(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
setTimeSlice: Annotated[NVA06C_CTRL_TIMESLICE_PARAMS_v06_00, 8]
@c.record
class struct_NVA06C_CTRL_TIMESLICE_PARAMS_v06_00(c.Struct):
SIZE = 8
timesliceUs: Annotated[NvU64, 0]
NVA06C_CTRL_TIMESLICE_PARAMS_v06_00: TypeAlias = struct_NVA06C_CTRL_TIMESLICE_PARAMS_v06_00
rpc_ctrl_set_timeslice_v1A_0A: TypeAlias = struct_rpc_ctrl_set_timeslice_v1A_0A
rpc_ctrl_set_timeslice_v: TypeAlias = struct_rpc_ctrl_set_timeslice_v1A_0A
@c.record
class struct_rpc_ctrl_fifo_disable_channels_v1A_0A(c.Struct):
SIZE = 544
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
fifoDisableChannels: Annotated[NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_v06_00, 8]
@c.record
class struct_NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_v06_00(c.Struct):
SIZE = 536
bDisable: Annotated[NvBool, 0]
numChannels: Annotated[NvU32, 4]
bOnlyDisableScheduling: Annotated[NvBool, 8]
bRewindGpPut: Annotated[NvBool, 9]
pRunlistPreemptEvent: Annotated[NvP64, 16]
hClientList: Annotated[c.Array[NvHandle, Literal[64]], 24]
hChannelList: Annotated[c.Array[NvHandle, Literal[64]], 280]
NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_v06_00: TypeAlias = struct_NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_v06_00
rpc_ctrl_fifo_disable_channels_v1A_0A: TypeAlias = struct_rpc_ctrl_fifo_disable_channels_v1A_0A
rpc_ctrl_fifo_disable_channels_v: TypeAlias = struct_rpc_ctrl_fifo_disable_channels_v1A_0A
@c.record
class struct_rpc_ctrl_preempt_v1A_0A(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
cmdPreempt: Annotated[NVA06C_CTRL_PREEMPT_PARAMS_v09_0A, 8]
@c.record
class struct_NVA06C_CTRL_PREEMPT_PARAMS_v09_0A(c.Struct):
SIZE = 8
bWait: Annotated[NvBool, 0]
bManualTimeout: Annotated[NvBool, 1]
timeoutUs: Annotated[NvU32, 4]
NVA06C_CTRL_PREEMPT_PARAMS_v09_0A: TypeAlias = struct_NVA06C_CTRL_PREEMPT_PARAMS_v09_0A
rpc_ctrl_preempt_v1A_0A: TypeAlias = struct_rpc_ctrl_preempt_v1A_0A
rpc_ctrl_preempt_v: TypeAlias = struct_rpc_ctrl_preempt_v1A_0A
@c.record
class struct_rpc_ctrl_set_tsg_interleave_level_v1A_0A(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
interleaveLevelTSG: Annotated[NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02, 8]
@c.record
class struct_NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02(c.Struct):
SIZE = 4
tsgInterleaveLevel: Annotated[NvU32, 0]
NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02: TypeAlias = struct_NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02
rpc_ctrl_set_tsg_interleave_level_v1A_0A: TypeAlias = struct_rpc_ctrl_set_tsg_interleave_level_v1A_0A
rpc_ctrl_set_tsg_interleave_level_v: TypeAlias = struct_rpc_ctrl_set_tsg_interleave_level_v1A_0A
@c.record
class struct_rpc_ctrl_set_channel_interleave_level_v1A_0A(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
interleaveLevelChannel: Annotated[NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02, 8]
@c.record
class struct_NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02(c.Struct):
SIZE = 4
channelInterleaveLevel: Annotated[NvU32, 0]
NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02: TypeAlias = struct_NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02
rpc_ctrl_set_channel_interleave_level_v1A_0A: TypeAlias = struct_rpc_ctrl_set_channel_interleave_level_v1A_0A
rpc_ctrl_set_channel_interleave_level_v: TypeAlias = struct_rpc_ctrl_set_channel_interleave_level_v1A_0A
@c.record
class struct_rpc_ctrl_gr_ctxsw_preemption_bind_v1A_0E(c.Struct):
SIZE = 112
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v12_01, 8]
@c.record
class struct_NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v12_01(c.Struct):
SIZE = 104
flags: Annotated[NvU32, 0]
hClient: Annotated[NvHandle, 4]
hChannel: Annotated[NvHandle, 8]
vMemPtrs: Annotated[c.Array[NvU64, Literal[8]], 16]
gfxpPreemptMode: Annotated[NvU32, 80]
cilpPreemptMode: Annotated[NvU32, 84]
grRouteInfo: Annotated[NV2080_CTRL_GR_ROUTE_INFO_v12_01, 88]
NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v12_01: TypeAlias = struct_NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v12_01
rpc_ctrl_gr_ctxsw_preemption_bind_v1A_0E: TypeAlias = struct_rpc_ctrl_gr_ctxsw_preemption_bind_v1A_0E
@c.record
class struct_rpc_ctrl_gr_ctxsw_preemption_bind_v28_07(c.Struct):
SIZE = 120
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v28_07, 8]
@c.record
class struct_NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v28_07(c.Struct):
SIZE = 112
flags: Annotated[NvU32, 0]
hClient: Annotated[NvHandle, 4]
hChannel: Annotated[NvHandle, 8]
vMemPtrs: Annotated[c.Array[NvU64, Literal[9]], 16]
gfxpPreemptMode: Annotated[NvU32, 88]
cilpPreemptMode: Annotated[NvU32, 92]
grRouteInfo: Annotated[NV2080_CTRL_GR_ROUTE_INFO_v12_01, 96]
NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v28_07: TypeAlias = struct_NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v28_07
rpc_ctrl_gr_ctxsw_preemption_bind_v28_07: TypeAlias = struct_rpc_ctrl_gr_ctxsw_preemption_bind_v28_07
rpc_ctrl_gr_ctxsw_preemption_bind_v: TypeAlias = struct_rpc_ctrl_gr_ctxsw_preemption_bind_v28_07
@c.record
class struct_rpc_ctrl_gr_set_ctxsw_preemption_mode_v1A_0E(c.Struct):
SIZE = 40
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS_v12_01, 8]
@c.record
class struct_NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS_v12_01(c.Struct):
SIZE = 32
flags: Annotated[NvU32, 0]
hChannel: Annotated[NvHandle, 4]
gfxpPreemptMode: Annotated[NvU32, 8]
cilpPreemptMode: Annotated[NvU32, 12]
grRouteInfo: Annotated[NV2080_CTRL_GR_ROUTE_INFO_v12_01, 16]
NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS_v12_01: TypeAlias = struct_NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS_v12_01
rpc_ctrl_gr_set_ctxsw_preemption_mode_v1A_0E: TypeAlias = struct_rpc_ctrl_gr_set_ctxsw_preemption_mode_v1A_0E
rpc_ctrl_gr_set_ctxsw_preemption_mode_v: TypeAlias = struct_rpc_ctrl_gr_set_ctxsw_preemption_mode_v1A_0E
@c.record
class struct_rpc_ctrl_gr_ctxsw_zcull_bind_v1A_0E(c.Struct):
SIZE = 32
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS_v03_00, 8]
@c.record
class struct_NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS_v03_00(c.Struct):
SIZE = 24
hClient: Annotated[NvHandle, 0]
hChannel: Annotated[NvHandle, 4]
vMemPtr: Annotated[NvU64, 8]
zcullMode: Annotated[NvU32, 16]
NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS_v03_00: TypeAlias = struct_NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS_v03_00
rpc_ctrl_gr_ctxsw_zcull_bind_v1A_0E: TypeAlias = struct_rpc_ctrl_gr_ctxsw_zcull_bind_v1A_0E
rpc_ctrl_gr_ctxsw_zcull_bind_v: TypeAlias = struct_rpc_ctrl_gr_ctxsw_zcull_bind_v1A_0E
@c.record
class struct_rpc_ctrl_gpu_initialize_ctx_v1A_0E(c.Struct):
SIZE = 64
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_v03_00, 8]
@c.record
class struct_NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_v03_00(c.Struct):
SIZE = 56
engineType: Annotated[NvU32, 0]
hClient: Annotated[NvHandle, 4]
ChID: Annotated[NvU32, 8]
hChanClient: Annotated[NvHandle, 12]
hObject: Annotated[NvHandle, 16]
hVirtMemory: Annotated[NvHandle, 20]
physAddress: Annotated[NvU64, 24]
physAttr: Annotated[NvU32, 32]
hDmaHandle: Annotated[NvHandle, 36]
index: Annotated[NvU32, 40]
size: Annotated[NvU64, 48]
NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_v03_00: TypeAlias = struct_NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_v03_00
rpc_ctrl_gpu_initialize_ctx_v1A_0E: TypeAlias = struct_rpc_ctrl_gpu_initialize_ctx_v1A_0E
rpc_ctrl_gpu_initialize_ctx_v: TypeAlias = struct_rpc_ctrl_gpu_initialize_ctx_v1A_0E
@c.record
class struct_rpc_ctrl_vaspace_copy_server_reserved_pdes_v1E_04(c.Struct):
SIZE = 192
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_v1E_04, 8]
@c.record
class struct_NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_v1E_04(c.Struct):
SIZE = 184
hSubDevice: Annotated[NvHandle, 0]
subDeviceId: Annotated[NvU32, 4]
pageSize: Annotated[NvU64, 8]
virtAddrLo: Annotated[NvU64, 16]
virtAddrHi: Annotated[NvU64, 24]
numLevelsToCopy: Annotated[NvU32, 32]
levels: Annotated[c.Array[NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_levels_v1E_04, Literal[6]], 40]
NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_v1E_04: TypeAlias = struct_NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_v1E_04
@c.record
class struct_NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_levels_v1E_04(c.Struct):
SIZE = 24
physAddress: Annotated[NvU64, 0]
size: Annotated[NvU64, 8]
aperture: Annotated[NvU32, 16]
pageShift: Annotated[NvU8, 20]
NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_levels_v1E_04: TypeAlias = struct_NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_levels_v1E_04
rpc_ctrl_vaspace_copy_server_reserved_pdes_v1E_04: TypeAlias = struct_rpc_ctrl_vaspace_copy_server_reserved_pdes_v1E_04
rpc_ctrl_vaspace_copy_server_reserved_pdes_v: TypeAlias = struct_rpc_ctrl_vaspace_copy_server_reserved_pdes_v1E_04
@c.record
class struct_rpc_ctrl_mc_service_interrupts_v1A_0E(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_v15_01, 8]
@c.record
class struct_NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_v15_01(c.Struct):
SIZE = 4
engines: Annotated[NvU32, 0]
NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_v15_01: TypeAlias = struct_NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_v15_01
rpc_ctrl_mc_service_interrupts_v1A_0E: TypeAlias = struct_rpc_ctrl_mc_service_interrupts_v1A_0E
rpc_ctrl_mc_service_interrupts_v: TypeAlias = struct_rpc_ctrl_mc_service_interrupts_v1A_0E
@c.record
class struct_rpc_ctrl_get_p2p_caps_v2_v1F_0D(c.Struct):
SIZE = 2208
iter: Annotated[NvU8, 0]
gpuIds: Annotated[c.Array[NvU32, Literal[32]], 4]
gpuCount: Annotated[NvU32, 132]
p2pCaps: Annotated[NvU32, 136]
p2pOptimalReadCEs: Annotated[NvU32, 140]
p2pOptimalWriteCEs: Annotated[NvU32, 144]
p2pCapsStatus: Annotated[c.Array[NvU8, Literal[9]], 148]
busPeerIds: Annotated[c.Array[NvU32, Literal[512]], 160]
rpc_ctrl_get_p2p_caps_v2_v1F_0D: TypeAlias = struct_rpc_ctrl_get_p2p_caps_v2_v1F_0D
rpc_ctrl_get_p2p_caps_v2_v: TypeAlias = struct_rpc_ctrl_get_p2p_caps_v2_v1F_0D
@c.record
class struct_rpc_ctrl_subdevice_get_p2p_caps_v21_02(c.Struct):
SIZE = 1544
ctrlParams: Annotated[NV2080_CTRL_GET_P2P_CAPS_PARAMS_v21_02, 0]
@c.record
class struct_NV2080_CTRL_GET_P2P_CAPS_PARAMS_v21_02(c.Struct):
SIZE = 1544
bAllCaps: Annotated[NvBool, 0]
bUseUuid: Annotated[NvBool, 1]
peerGpuCount: Annotated[NvU32, 4]
peerGpuCaps: Annotated[c.Array[NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO_v21_02, Literal[32]], 8]
NV2080_CTRL_GET_P2P_CAPS_PARAMS_v21_02: TypeAlias = struct_NV2080_CTRL_GET_P2P_CAPS_PARAMS_v21_02
@c.record
class struct_NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO_v21_02(c.Struct):
SIZE = 48
gpuId: Annotated[NvU32, 0]
gpuUuid: Annotated[c.Array[NvU8, Literal[16]], 4]
p2pCaps: Annotated[NvU32, 20]
p2pOptimalReadCEs: Annotated[NvU32, 24]
p2pOptimalWriteCEs: Annotated[NvU32, 28]
p2pCapsStatus: Annotated[c.Array[NvU8, Literal[9]], 32]
busPeerId: Annotated[NvU32, 44]
NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO_v21_02: TypeAlias = struct_NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO_v21_02
rpc_ctrl_subdevice_get_p2p_caps_v21_02: TypeAlias = struct_rpc_ctrl_subdevice_get_p2p_caps_v21_02
rpc_ctrl_subdevice_get_p2p_caps_v: TypeAlias = struct_rpc_ctrl_subdevice_get_p2p_caps_v21_02
@c.record
class struct_rpc_ctrl_subdevice_get_vgpu_heap_stats_v28_03(c.Struct):
SIZE = 40
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_03, 8]
@c.record
class struct_NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_03(c.Struct):
SIZE = 32
allocatedSize: Annotated[NvU64, 0]
peakAllocatedSize: Annotated[NvU64, 8]
managedSize: Annotated[NvU64, 16]
allocationCount: Annotated[NvU32, 24]
peakAllocationCount: Annotated[NvU32, 28]
NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_03: TypeAlias = struct_NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_03
rpc_ctrl_subdevice_get_vgpu_heap_stats_v28_03: TypeAlias = struct_rpc_ctrl_subdevice_get_vgpu_heap_stats_v28_03
@c.record
class struct_rpc_ctrl_subdevice_get_vgpu_heap_stats_v28_06(c.Struct):
SIZE = 48
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_06, 8]
@c.record
class struct_NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_06(c.Struct):
SIZE = 40
allocatedSize: Annotated[NvU64, 0]
peakAllocatedSize: Annotated[NvU64, 8]
managedSize: Annotated[NvU64, 16]
allocationCount: Annotated[NvU32, 24]
peakAllocationCount: Annotated[NvU32, 28]
largestFreeChunkSize: Annotated[NvU64, 32]
NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_06: TypeAlias = struct_NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_06
rpc_ctrl_subdevice_get_vgpu_heap_stats_v28_06: TypeAlias = struct_rpc_ctrl_subdevice_get_vgpu_heap_stats_v28_06
rpc_ctrl_subdevice_get_vgpu_heap_stats_v: TypeAlias = struct_rpc_ctrl_subdevice_get_vgpu_heap_stats_v28_06
@c.record
class struct_rpc_ctrl_dbg_clear_all_sm_error_states_v1A_0C(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_v03_00, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_v03_00(c.Struct):
SIZE = 8
hTargetChannel: Annotated[NvHandle, 0]
numSMsToClear: Annotated[NvU32, 4]
NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_v03_00: TypeAlias = struct_NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_v03_00
rpc_ctrl_dbg_clear_all_sm_error_states_v1A_0C: TypeAlias = struct_rpc_ctrl_dbg_clear_all_sm_error_states_v1A_0C
rpc_ctrl_dbg_clear_all_sm_error_states_v: TypeAlias = struct_rpc_ctrl_dbg_clear_all_sm_error_states_v1A_0C
@c.record
class struct_rpc_ctrl_dbg_read_all_sm_error_states_v21_06(c.Struct):
SIZE = 3872
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_v21_06, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_v21_06(c.Struct):
SIZE = 3864
hTargetChannel: Annotated[NvHandle, 0]
numSMsToRead: Annotated[NvU32, 4]
smErrorStateArray: Annotated[c.Array[NV83DE_SM_ERROR_STATE_REGISTERS_v21_06, Literal[80]], 8]
mmuFaultInfo: Annotated[NvU32, 3848]
mmuFault: Annotated[NV83DE_MMU_FAULT_INFO_v16_03, 3852]
startingSM: Annotated[NvU32, 3860]
NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_v21_06: TypeAlias = struct_NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_v21_06
@c.record
class struct_NV83DE_SM_ERROR_STATE_REGISTERS_v21_06(c.Struct):
SIZE = 48
hwwGlobalEsr: Annotated[NvU32, 0]
hwwWarpEsr: Annotated[NvU32, 4]
hwwWarpEsrPc: Annotated[NvU32, 8]
hwwGlobalEsrReportMask: Annotated[NvU32, 12]
hwwWarpEsrReportMask: Annotated[NvU32, 16]
hwwEsrAddr: Annotated[NvU64, 24]
hwwWarpEsrPc64: Annotated[NvU64, 32]
hwwCgaEsr: Annotated[NvU32, 40]
hwwCgaEsrReportMask: Annotated[NvU32, 44]
NV83DE_SM_ERROR_STATE_REGISTERS_v21_06: TypeAlias = struct_NV83DE_SM_ERROR_STATE_REGISTERS_v21_06
@c.record
class struct_NV83DE_MMU_FAULT_INFO_v16_03(c.Struct):
SIZE = 8
valid: Annotated[NvBool, 0]
faultInfo: Annotated[NvU32, 4]
NV83DE_MMU_FAULT_INFO_v16_03: TypeAlias = struct_NV83DE_MMU_FAULT_INFO_v16_03
rpc_ctrl_dbg_read_all_sm_error_states_v21_06: TypeAlias = struct_rpc_ctrl_dbg_read_all_sm_error_states_v21_06
rpc_ctrl_dbg_read_all_sm_error_states_v: TypeAlias = struct_rpc_ctrl_dbg_read_all_sm_error_states_v21_06
@c.record
class struct_rpc_ctrl_dbg_set_exception_mask_v1A_0C(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_v03_00, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_v03_00(c.Struct):
SIZE = 4
exceptionMask: Annotated[NvU32, 0]
NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_v03_00: TypeAlias = struct_NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_v03_00
rpc_ctrl_dbg_set_exception_mask_v1A_0C: TypeAlias = struct_rpc_ctrl_dbg_set_exception_mask_v1A_0C
rpc_ctrl_dbg_set_exception_mask_v: TypeAlias = struct_rpc_ctrl_dbg_set_exception_mask_v1A_0C
@c.record
class struct_rpc_ctrl_gpu_promote_ctx_v1A_20(c.Struct):
SIZE = 568
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
promoteCtx: Annotated[NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_v1A_20, 8]
@c.record
class struct_NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_v1A_20(c.Struct):
SIZE = 560
engineType: Annotated[NvU32, 0]
hClient: Annotated[NvHandle, 4]
ChID: Annotated[NvU32, 8]
hChanClient: Annotated[NvHandle, 12]
hObject: Annotated[NvHandle, 16]
hVirtMemory: Annotated[NvHandle, 20]
virtAddress: Annotated[NvU64, 24]
size: Annotated[NvU64, 32]
entryCount: Annotated[NvU32, 40]
promoteEntry: Annotated[c.Array[NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY_v1A_20, Literal[16]], 48]
NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_v1A_20: TypeAlias = struct_NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_v1A_20
@c.record
class struct_NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY_v1A_20(c.Struct):
SIZE = 32
gpuPhysAddr: Annotated[NvU64, 0]
gpuVirtAddr: Annotated[NvU64, 8]
size: Annotated[NvU64, 16]
physAttr: Annotated[NvU32, 24]
bufferId: Annotated[NvU16, 28]
bInitialize: Annotated[NvU8, 30]
bNonmapped: Annotated[NvU8, 31]
NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY_v1A_20: TypeAlias = struct_NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY_v1A_20
rpc_ctrl_gpu_promote_ctx_v1A_20: TypeAlias = struct_rpc_ctrl_gpu_promote_ctx_v1A_20
rpc_ctrl_gpu_promote_ctx_v: TypeAlias = struct_rpc_ctrl_gpu_promote_ctx_v1A_20
@c.record
class struct_rpc_ctrl_dbg_suspend_context_v1A_10(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_v1A_06, 8]
@c.record
class struct_NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_v1A_06(c.Struct):
SIZE = 8
waitForEvent: Annotated[NvU32, 0]
hResidentChannel: Annotated[NvHandle, 4]
NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_v1A_06: TypeAlias = struct_NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_v1A_06
rpc_ctrl_dbg_suspend_context_v1A_10: TypeAlias = struct_rpc_ctrl_dbg_suspend_context_v1A_10
rpc_ctrl_dbg_suspend_context_v: TypeAlias = struct_rpc_ctrl_dbg_suspend_context_v1A_10
@c.record
class struct_rpc_ctrl_dbg_resume_context_v1A_10(c.Struct):
SIZE = 8
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
rpc_ctrl_dbg_resume_context_v1A_10: TypeAlias = struct_rpc_ctrl_dbg_resume_context_v1A_10
rpc_ctrl_dbg_resume_context_v: TypeAlias = struct_rpc_ctrl_dbg_resume_context_v1A_10
@c.record
class struct_rpc_ctrl_dbg_exec_reg_ops_v1A_10(c.Struct):
SIZE = 3216
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_v1A_06, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_v1A_06(c.Struct):
SIZE = 3208
bNonTransactional: Annotated[NvBool, 0]
regOpCount: Annotated[NvU32, 4]
regOps: Annotated[c.Array[NV2080_CTRL_GPU_REG_OP_v03_00, Literal[100]], 8]
NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_v1A_06: TypeAlias = struct_NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_v1A_06
rpc_ctrl_dbg_exec_reg_ops_v1A_10: TypeAlias = struct_rpc_ctrl_dbg_exec_reg_ops_v1A_10
rpc_ctrl_dbg_exec_reg_ops_v: TypeAlias = struct_rpc_ctrl_dbg_exec_reg_ops_v1A_10
@c.record
class struct_rpc_ctrl_dbg_set_mode_mmu_debug_v1A_10(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_v1A_06, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_v1A_06(c.Struct):
SIZE = 4
action: Annotated[NvU32, 0]
NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_v1A_06: TypeAlias = struct_NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_v1A_06
rpc_ctrl_dbg_set_mode_mmu_debug_v1A_10: TypeAlias = struct_rpc_ctrl_dbg_set_mode_mmu_debug_v1A_10
rpc_ctrl_dbg_set_mode_mmu_debug_v: TypeAlias = struct_rpc_ctrl_dbg_set_mode_mmu_debug_v1A_10
@c.record
class struct_rpc_ctrl_dbg_set_mode_mmu_gcc_debug_v29_07(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS_v29_07, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS_v29_07(c.Struct):
SIZE = 4
action: Annotated[NvU32, 0]
NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS_v29_07: TypeAlias = struct_NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS_v29_07
rpc_ctrl_dbg_set_mode_mmu_gcc_debug_v29_07: TypeAlias = struct_rpc_ctrl_dbg_set_mode_mmu_gcc_debug_v29_07
rpc_ctrl_dbg_set_mode_mmu_gcc_debug_v: TypeAlias = struct_rpc_ctrl_dbg_set_mode_mmu_gcc_debug_v29_07
@c.record
class struct_rpc_ctrl_dbg_read_single_sm_error_state_v21_06(c.Struct):
SIZE = 64
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_v21_06, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_v21_06(c.Struct):
SIZE = 56
hTargetChannel: Annotated[NvHandle, 0]
smID: Annotated[NvU32, 4]
smErrorState: Annotated[NV83DE_SM_ERROR_STATE_REGISTERS_v21_06, 8]
NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_v21_06: TypeAlias = struct_NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_v21_06
rpc_ctrl_dbg_read_single_sm_error_state_v21_06: TypeAlias = struct_rpc_ctrl_dbg_read_single_sm_error_state_v21_06
rpc_ctrl_dbg_read_single_sm_error_state_v: TypeAlias = struct_rpc_ctrl_dbg_read_single_sm_error_state_v21_06
@c.record
class struct_rpc_ctrl_dbg_clear_single_sm_error_state_v1A_10(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_v1A_06, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_v1A_06(c.Struct):
SIZE = 8
hTargetChannel: Annotated[NvHandle, 0]
smID: Annotated[NvU32, 4]
NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_v1A_06: TypeAlias = struct_NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_v1A_06
rpc_ctrl_dbg_clear_single_sm_error_state_v1A_10: TypeAlias = struct_rpc_ctrl_dbg_clear_single_sm_error_state_v1A_10
rpc_ctrl_dbg_clear_single_sm_error_state_v: TypeAlias = struct_rpc_ctrl_dbg_clear_single_sm_error_state_v1A_10
@c.record
class struct_rpc_ctrl_dbg_set_mode_errbar_debug_v1A_10(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_v1A_06, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_v1A_06(c.Struct):
SIZE = 4
action: Annotated[NvU32, 0]
NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_v1A_06: TypeAlias = struct_NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_v1A_06
rpc_ctrl_dbg_set_mode_errbar_debug_v1A_10: TypeAlias = struct_rpc_ctrl_dbg_set_mode_errbar_debug_v1A_10
rpc_ctrl_dbg_set_mode_errbar_debug_v: TypeAlias = struct_rpc_ctrl_dbg_set_mode_errbar_debug_v1A_10
@c.record
class struct_rpc_ctrl_dbg_set_next_stop_trigger_type_v1A_10(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_v1A_06, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_v1A_06(c.Struct):
SIZE = 4
stopTriggerType: Annotated[NvU32, 0]
NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_v1A_06: TypeAlias = struct_NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_v1A_06
rpc_ctrl_dbg_set_next_stop_trigger_type_v1A_10: TypeAlias = struct_rpc_ctrl_dbg_set_next_stop_trigger_type_v1A_10
rpc_ctrl_dbg_set_next_stop_trigger_type_v: TypeAlias = struct_rpc_ctrl_dbg_set_next_stop_trigger_type_v1A_10
@c.record
class struct_rpc_ctrl_dma_set_default_vaspace_v1A_0E(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_v03_00, 8]
@c.record
class struct_NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_v03_00(c.Struct):
SIZE = 4
hVASpace: Annotated[NvHandle, 0]
NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_v03_00: TypeAlias = struct_NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_v03_00
rpc_ctrl_dma_set_default_vaspace_v1A_0E: TypeAlias = struct_rpc_ctrl_dma_set_default_vaspace_v1A_0E
rpc_ctrl_dma_set_default_vaspace_v: TypeAlias = struct_rpc_ctrl_dma_set_default_vaspace_v1A_0E
@c.record
class struct_rpc_ctrl_get_ce_pce_mask_v1A_0E(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_v1A_07, 8]
@c.record
class struct_NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_v1A_07(c.Struct):
SIZE = 8
ceEngineType: Annotated[NvU32, 0]
pceMask: Annotated[NvU32, 4]
NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_v1A_07: TypeAlias = struct_NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_v1A_07
rpc_ctrl_get_ce_pce_mask_v1A_0E: TypeAlias = struct_rpc_ctrl_get_ce_pce_mask_v1A_0E
rpc_ctrl_get_ce_pce_mask_v: TypeAlias = struct_rpc_ctrl_get_ce_pce_mask_v1A_0E
@c.record
class struct_rpc_ctrl_get_zbc_clear_table_entry_v1A_0E(c.Struct):
SIZE = 64
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_v1A_07, 8]
@c.record
class struct_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_v1A_07(c.Struct):
SIZE = 56
value: Annotated[NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_value_v1A_07, 0]
format: Annotated[NvU32, 40]
index: Annotated[NvU32, 44]
bIndexValid: Annotated[NvBool, 48]
tableType: Annotated[NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE, 52]
NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_v1A_07: TypeAlias = struct_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_v1A_07
@c.record
class struct_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_value_v1A_07(c.Struct):
SIZE = 40
colorFB: Annotated[c.Array[NvU32, Literal[4]], 0]
colorDS: Annotated[c.Array[NvU32, Literal[4]], 16]
depth: Annotated[NvU32, 32]
stencil: Annotated[NvU32, 36]
NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_value_v1A_07: TypeAlias = struct_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_value_v1A_07
class enum_NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE(Annotated[int, ctypes.c_uint32], c.Enum): pass
NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_INVALID = enum_NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE.define('NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_INVALID', 0)
NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COLOR = enum_NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE.define('NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COLOR', 1)
NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_DEPTH = enum_NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE.define('NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_DEPTH', 2)
NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_STENCIL = enum_NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE.define('NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_STENCIL', 3)
NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COUNT = enum_NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE.define('NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COUNT', 4)
NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE: TypeAlias = enum_NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE
rpc_ctrl_get_zbc_clear_table_entry_v1A_0E: TypeAlias = struct_rpc_ctrl_get_zbc_clear_table_entry_v1A_0E
rpc_ctrl_get_zbc_clear_table_entry_v: TypeAlias = struct_rpc_ctrl_get_zbc_clear_table_entry_v1A_0E
@c.record
class struct_rpc_ctrl_get_nvlink_status_v23_04(c.Struct):
SIZE = 3088
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v23_04, 8]
@c.record
class struct_NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v23_04(c.Struct):
SIZE = 3080
enabledLinkMask: Annotated[NvU32, 0]
linkInfo: Annotated[c.Array[NV2080_CTRL_NVLINK_LINK_STATUS_INFO_v18_0D, Literal[24]], 8]
NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v23_04: TypeAlias = struct_NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v23_04
@c.record
class struct_NV2080_CTRL_NVLINK_LINK_STATUS_INFO_v18_0D(c.Struct):
SIZE = 128
capsTbl: Annotated[NvU32, 0]
phyType: Annotated[NvU8, 4]
subLinkWidth: Annotated[NvU8, 5]
linkState: Annotated[NvU32, 8]
rxSublinkStatus: Annotated[NvU8, 12]
txSublinkStatus: Annotated[NvU8, 13]
nvlinkVersion: Annotated[NvU8, 14]
nciVersion: Annotated[NvU8, 15]
phyVersion: Annotated[NvU8, 16]
nvlinkLinkClockKHz: Annotated[NvU32, 20]
nvlinkLineRateMbps: Annotated[NvU32, 24]
connected: Annotated[NvBool, 28]
remoteDeviceLinkNumber: Annotated[NvU8, 29]
localDeviceLinkNumber: Annotated[NvU8, 30]
remoteDeviceInfo: Annotated[NV2080_CTRL_NVLINK_DEVICE_INFO_v15_02, 32]
localDeviceInfo: Annotated[NV2080_CTRL_NVLINK_DEVICE_INFO_v15_02, 80]
NV2080_CTRL_NVLINK_LINK_STATUS_INFO_v18_0D: TypeAlias = struct_NV2080_CTRL_NVLINK_LINK_STATUS_INFO_v18_0D
@c.record
class struct_NV2080_CTRL_NVLINK_DEVICE_INFO_v15_02(c.Struct):
SIZE = 48
deviceIdFlags: Annotated[NvU32, 0]
domain: Annotated[NvU32, 4]
bus: Annotated[NvU16, 8]
device: Annotated[NvU16, 10]
function: Annotated[NvU16, 12]
pciDeviceId: Annotated[NvU32, 16]
deviceType: Annotated[NvU64, 24]
deviceUUID: Annotated[c.Array[NvU8, Literal[16]], 32]
NV2080_CTRL_NVLINK_DEVICE_INFO_v15_02: TypeAlias = struct_NV2080_CTRL_NVLINK_DEVICE_INFO_v15_02
rpc_ctrl_get_nvlink_status_v23_04: TypeAlias = struct_rpc_ctrl_get_nvlink_status_v23_04
@c.record
class struct_rpc_ctrl_get_nvlink_status_v28_09(c.Struct):
SIZE = 3472
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v28_09, 8]
@c.record
class struct_NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v28_09(c.Struct):
SIZE = 3464
enabledLinkMask: Annotated[NvU32, 0]
linkInfo: Annotated[c.Array[NV2080_CTRL_NVLINK_LINK_STATUS_INFO_v28_09, Literal[24]], 8]
NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v28_09: TypeAlias = struct_NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v28_09
@c.record
class struct_NV2080_CTRL_NVLINK_LINK_STATUS_INFO_v28_09(c.Struct):
SIZE = 144
capsTbl: Annotated[NvU32, 0]
phyType: Annotated[NvU8, 4]
subLinkWidth: Annotated[NvU8, 5]
linkState: Annotated[NvU32, 8]
rxSublinkStatus: Annotated[NvU8, 12]
txSublinkStatus: Annotated[NvU8, 13]
nvlinkVersion: Annotated[NvU8, 14]
nciVersion: Annotated[NvU8, 15]
phyVersion: Annotated[NvU8, 16]
nvlinkLinkClockKHz: Annotated[NvU32, 20]
nvlinkLineRateMbps: Annotated[NvU32, 24]
connected: Annotated[NvBool, 28]
remoteDeviceLinkNumber: Annotated[NvU8, 29]
localDeviceLinkNumber: Annotated[NvU8, 30]
remoteDeviceInfo: Annotated[NV2080_CTRL_NVLINK_DEVICE_INFO_v28_09, 32]
localDeviceInfo: Annotated[NV2080_CTRL_NVLINK_DEVICE_INFO_v28_09, 88]
NV2080_CTRL_NVLINK_LINK_STATUS_INFO_v28_09: TypeAlias = struct_NV2080_CTRL_NVLINK_LINK_STATUS_INFO_v28_09
@c.record
class struct_NV2080_CTRL_NVLINK_DEVICE_INFO_v28_09(c.Struct):
SIZE = 56
deviceIdFlags: Annotated[NvU32, 0]
domain: Annotated[NvU32, 4]
bus: Annotated[NvU16, 8]
device: Annotated[NvU16, 10]
function: Annotated[NvU16, 12]
pciDeviceId: Annotated[NvU32, 16]
deviceType: Annotated[NvU64, 24]
deviceUUID: Annotated[c.Array[NvU8, Literal[16]], 32]
fabricRecoveryStatusMask: Annotated[NvU32, 48]
NV2080_CTRL_NVLINK_DEVICE_INFO_v28_09: TypeAlias = struct_NV2080_CTRL_NVLINK_DEVICE_INFO_v28_09
rpc_ctrl_get_nvlink_status_v28_09: TypeAlias = struct_rpc_ctrl_get_nvlink_status_v28_09
rpc_ctrl_get_nvlink_status_v: TypeAlias = struct_rpc_ctrl_get_nvlink_status_v28_09
@c.record
class struct_rpc_ctrl_get_p2p_caps_v1F_0D(c.Struct):
SIZE = 164
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_v1F_0D, 8]
@c.record
class struct_NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_v1F_0D(c.Struct):
SIZE = 156
gpuIds: Annotated[c.Array[NvU32, Literal[32]], 0]
gpuCount: Annotated[NvU32, 128]
p2pCaps: Annotated[NvU32, 132]
p2pOptimalReadCEs: Annotated[NvU32, 136]
p2pOptimalWriteCEs: Annotated[NvU32, 140]
p2pCapsStatus: Annotated[c.Array[NvU8, Literal[9]], 144]
NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_v1F_0D: TypeAlias = struct_NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_v1F_0D
rpc_ctrl_get_p2p_caps_v1F_0D: TypeAlias = struct_rpc_ctrl_get_p2p_caps_v1F_0D
rpc_ctrl_get_p2p_caps_v: TypeAlias = struct_rpc_ctrl_get_p2p_caps_v1F_0D
@c.record
class struct_rpc_ctrl_get_p2p_caps_matrix_v1A_0E(c.Struct):
SIZE = 1360
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_v18_0A, 8]
@c.record
class struct_NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_v18_0A(c.Struct):
SIZE = 1352
grpACount: Annotated[NvU32, 0]
grpBCount: Annotated[NvU32, 4]
gpuIdGrpA: Annotated[c.Array[NvU32, Literal[8]], 8]
gpuIdGrpB: Annotated[c.Array[NvU32, Literal[8]], 40]
p2pCaps: Annotated[c.Array[NV0000_CTRL_P2P_CAPS_MATRIX_ROW_v18_0A, Literal[8]], 72]
a2bOptimalReadCes: Annotated[c.Array[NV0000_CTRL_P2P_CAPS_MATRIX_ROW_v18_0A, Literal[8]], 328]
a2bOptimalWriteCes: Annotated[c.Array[NV0000_CTRL_P2P_CAPS_MATRIX_ROW_v18_0A, Literal[8]], 584]
b2aOptimalReadCes: Annotated[c.Array[NV0000_CTRL_P2P_CAPS_MATRIX_ROW_v18_0A, Literal[8]], 840]
b2aOptimalWriteCes: Annotated[c.Array[NV0000_CTRL_P2P_CAPS_MATRIX_ROW_v18_0A, Literal[8]], 1096]
NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_v18_0A: TypeAlias = struct_NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_v18_0A
@c.record
class struct_NV0000_CTRL_P2P_CAPS_MATRIX_ROW_v18_0A(c.Struct):
SIZE = 32
array: Annotated[c.Array[NvU32, Literal[8]], 0]
NV0000_CTRL_P2P_CAPS_MATRIX_ROW_v18_0A: TypeAlias = struct_NV0000_CTRL_P2P_CAPS_MATRIX_ROW_v18_0A
rpc_ctrl_get_p2p_caps_matrix_v1A_0E: TypeAlias = struct_rpc_ctrl_get_p2p_caps_matrix_v1A_0E
rpc_ctrl_get_p2p_caps_matrix_v: TypeAlias = struct_rpc_ctrl_get_p2p_caps_matrix_v1A_0E
@c.record
class struct_rpc_ctrl_reserve_pm_area_smpc_v1A_0F(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS_v1A_0F, 8]
@c.record
class struct_NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS_v1A_0F(c.Struct):
SIZE = 1
ctxsw: Annotated[NvBool, 0]
NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS_v1A_0F: TypeAlias = struct_NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS_v1A_0F
rpc_ctrl_reserve_pm_area_smpc_v1A_0F: TypeAlias = struct_rpc_ctrl_reserve_pm_area_smpc_v1A_0F
rpc_ctrl_reserve_pm_area_smpc_v: TypeAlias = struct_rpc_ctrl_reserve_pm_area_smpc_v1A_0F
@c.record
class struct_rpc_ctrl_reserve_hwpm_legacy_v1A_0F(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS_v1A_0F, 8]
@c.record
class struct_NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS_v1A_0F(c.Struct):
SIZE = 1
ctxsw: Annotated[NvBool, 0]
NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS_v1A_0F: TypeAlias = struct_NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS_v1A_0F
rpc_ctrl_reserve_hwpm_legacy_v1A_0F: TypeAlias = struct_rpc_ctrl_reserve_hwpm_legacy_v1A_0F
rpc_ctrl_reserve_hwpm_legacy_v: TypeAlias = struct_rpc_ctrl_reserve_hwpm_legacy_v1A_0F
@c.record
class struct_rpc_ctrl_b0cc_exec_reg_ops_v1A_0F(c.Struct):
SIZE = 3988
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F, 8]
@c.record
class struct_NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F(c.Struct):
SIZE = 3980
regOpCount: Annotated[NvU32, 0]
mode: Annotated[NVB0CC_REGOPS_MODE, 4]
bPassed: Annotated[NvBool, 8]
bDirect: Annotated[NvBool, 9]
regOps: Annotated[c.Array[NV2080_CTRL_GPU_REG_OP_v03_00, Literal[124]], 12]
NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F: TypeAlias = struct_NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F
class enum_NVB0CC_REGOPS_MODE(Annotated[int, ctypes.c_uint32], c.Enum): pass
NVB0CC_REGOPS_MODE_ALL_OR_NONE = enum_NVB0CC_REGOPS_MODE.define('NVB0CC_REGOPS_MODE_ALL_OR_NONE', 0)
NVB0CC_REGOPS_MODE_CONTINUE_ON_ERROR = enum_NVB0CC_REGOPS_MODE.define('NVB0CC_REGOPS_MODE_CONTINUE_ON_ERROR', 1)
NVB0CC_REGOPS_MODE: TypeAlias = enum_NVB0CC_REGOPS_MODE
rpc_ctrl_b0cc_exec_reg_ops_v1A_0F: TypeAlias = struct_rpc_ctrl_b0cc_exec_reg_ops_v1A_0F
rpc_ctrl_b0cc_exec_reg_ops_v: TypeAlias = struct_rpc_ctrl_b0cc_exec_reg_ops_v1A_0F
@c.record
class struct_rpc_ctrl_bind_pm_resources_v1A_0F(c.Struct):
SIZE = 8
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
rpc_ctrl_bind_pm_resources_v1A_0F: TypeAlias = struct_rpc_ctrl_bind_pm_resources_v1A_0F
rpc_ctrl_bind_pm_resources_v: TypeAlias = struct_rpc_ctrl_bind_pm_resources_v1A_0F
@c.record
class struct_rpc_ctrl_alloc_pma_stream_v1A_14(c.Struct):
SIZE = 64
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS_v1A_14, 8]
@c.record
class struct_NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS_v1A_14(c.Struct):
SIZE = 56
hMemPmaBuffer: Annotated[NvHandle, 0]
pmaBufferOffset: Annotated[NvU64, 8]
pmaBufferSize: Annotated[NvU64, 16]
hMemPmaBytesAvailable: Annotated[NvHandle, 24]
pmaBytesAvailableOffset: Annotated[NvU64, 32]
ctxsw: Annotated[NvBool, 40]
pmaChannelIdx: Annotated[NvU32, 44]
pmaBufferVA: Annotated[NvU64, 48]
NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS_v1A_14: TypeAlias = struct_NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS_v1A_14
rpc_ctrl_alloc_pma_stream_v1A_14: TypeAlias = struct_rpc_ctrl_alloc_pma_stream_v1A_14
rpc_ctrl_alloc_pma_stream_v: TypeAlias = struct_rpc_ctrl_alloc_pma_stream_v1A_14
@c.record
class struct_rpc_ctrl_pma_stream_update_get_put_v1A_14(c.Struct):
SIZE = 56
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14, 8]
@c.record
class struct_NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14(c.Struct):
SIZE = 48
bytesConsumed: Annotated[NvU64, 0]
bUpdateAvailableBytes: Annotated[NvBool, 8]
bWait: Annotated[NvBool, 9]
bytesAvailable: Annotated[NvU64, 16]
bReturnPut: Annotated[NvBool, 24]
putPtr: Annotated[NvU64, 32]
pmaChannelIdx: Annotated[NvU32, 40]
NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14: TypeAlias = struct_NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14
rpc_ctrl_pma_stream_update_get_put_v1A_14: TypeAlias = struct_rpc_ctrl_pma_stream_update_get_put_v1A_14
rpc_ctrl_pma_stream_update_get_put_v: TypeAlias = struct_rpc_ctrl_pma_stream_update_get_put_v1A_14
@c.record
class struct_rpc_ctrl_fb_get_info_v2_v25_0A(c.Struct):
SIZE = 452
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v25_0A, 8]
@c.record
class struct_NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v25_0A(c.Struct):
SIZE = 444
fbInfoListSize: Annotated[NvU32, 0]
fbInfoList: Annotated[c.Array[NV2080_CTRL_FB_INFO_v1A_15, Literal[55]], 4]
NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v25_0A: TypeAlias = struct_NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v25_0A
@c.record
class struct_NV2080_CTRL_FB_INFO_v1A_15(c.Struct):
SIZE = 8
index: Annotated[NvU32, 0]
data: Annotated[NvU32, 4]
NV2080_CTRL_FB_INFO_v1A_15: TypeAlias = struct_NV2080_CTRL_FB_INFO_v1A_15
rpc_ctrl_fb_get_info_v2_v25_0A: TypeAlias = struct_rpc_ctrl_fb_get_info_v2_v25_0A
@c.record
class struct_rpc_ctrl_fb_get_info_v2_v27_00(c.Struct):
SIZE = 468
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v27_00, 8]
@c.record
class struct_NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v27_00(c.Struct):
SIZE = 460
fbInfoListSize: Annotated[NvU32, 0]
fbInfoList: Annotated[c.Array[NV2080_CTRL_FB_INFO_v1A_15, Literal[57]], 4]
NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v27_00: TypeAlias = struct_NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v27_00
rpc_ctrl_fb_get_info_v2_v27_00: TypeAlias = struct_rpc_ctrl_fb_get_info_v2_v27_00
rpc_ctrl_fb_get_info_v2_v: TypeAlias = struct_rpc_ctrl_fb_get_info_v2_v27_00
@c.record
class struct_rpc_ctrl_fifo_set_channel_properties_v1A_16(c.Struct):
SIZE = 24
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_v03_00, 8]
@c.record
class struct_NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_v03_00(c.Struct):
SIZE = 16
hChannel: Annotated[NvHandle, 0]
property: Annotated[NvU32, 4]
value: Annotated[NvU64, 8]
NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_v03_00: TypeAlias = struct_NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_v03_00
rpc_ctrl_fifo_set_channel_properties_v1A_16: TypeAlias = struct_rpc_ctrl_fifo_set_channel_properties_v1A_16
rpc_ctrl_fifo_set_channel_properties_v: TypeAlias = struct_rpc_ctrl_fifo_set_channel_properties_v1A_16
@c.record
class struct_rpc_ctrl_gpu_evict_ctx_v1A_1C(c.Struct):
SIZE = 28
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_GPU_EVICT_CTX_PARAMS_v03_00, 8]
@c.record
class struct_NV2080_CTRL_GPU_EVICT_CTX_PARAMS_v03_00(c.Struct):
SIZE = 20
engineType: Annotated[NvU32, 0]
hClient: Annotated[NvHandle, 4]
ChID: Annotated[NvU32, 8]
hChanClient: Annotated[NvHandle, 12]
hObject: Annotated[NvHandle, 16]
NV2080_CTRL_GPU_EVICT_CTX_PARAMS_v03_00: TypeAlias = struct_NV2080_CTRL_GPU_EVICT_CTX_PARAMS_v03_00
rpc_ctrl_gpu_evict_ctx_v1A_1C: TypeAlias = struct_rpc_ctrl_gpu_evict_ctx_v1A_1C
rpc_ctrl_gpu_evict_ctx_v: TypeAlias = struct_rpc_ctrl_gpu_evict_ctx_v1A_1C
@c.record
class struct_rpc_ctrl_fb_get_fs_info_v24_00(c.Struct):
SIZE = 3856
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v24_00, 8]
@c.record
class struct_NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v24_00(c.Struct):
SIZE = 3848
numQueries: Annotated[NvU16, 0]
reserved: Annotated[c.Array[NvU8, Literal[6]], 2]
queries: Annotated[c.Array[NV2080_CTRL_FB_FS_INFO_QUERY_v1A_1D, Literal[120]], 8]
NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v24_00: TypeAlias = struct_NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v24_00
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_QUERY_v1A_1D(c.Struct):
SIZE = 32
queryType: Annotated[NvU16, 0]
reserved: Annotated[c.Array[NvU8, Literal[2]], 2]
status: Annotated[NvU32, 4]
queryParams: Annotated[NV2080_CTRL_FB_FS_INFO_QUERY_DATA_v1A_1D, 8]
NV2080_CTRL_FB_FS_INFO_QUERY_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_QUERY_v1A_1D
@c.record
class union_NV2080_CTRL_FB_FS_INFO_QUERY_DATA_v1A_1D(c.Struct):
SIZE = 24
inv: Annotated[NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS_v1A_1D, 0]
fbp: Annotated[NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS_v1A_1D, 0]
ltc: Annotated[NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS_v1A_1D, 0]
lts: Annotated[NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS_v1A_1D, 0]
fbpa: Annotated[NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS_v1A_1D, 0]
rop: Annotated[NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS_v1A_1D, 0]
dmLtc: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS_v1A_1D, 0]
dmLts: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS_v1A_1D, 0]
dmFbpa: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS_v1A_1D, 0]
dmRop: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS_v1A_1D, 0]
dmFbpaSubp: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS_v1A_1D, 0]
fbpaSubp: Annotated[NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS_v1A_1D, 0]
fbpLogicalMap: Annotated[NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS_v1A_1D, 0]
NV2080_CTRL_FB_FS_INFO_QUERY_DATA_v1A_1D: TypeAlias = union_NV2080_CTRL_FB_FS_INFO_QUERY_DATA_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS_v1A_1D(c.Struct):
SIZE = 24
data: Annotated[c.Array[NvU8, Literal[24]], 0]
NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 16
swizzId: Annotated[NvU32, 0]
fbpEnMask: Annotated[NvU64, 8]
NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 8
fbpIndex: Annotated[NvU32, 0]
ltcEnMask: Annotated[NvU32, 4]
NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 8
fbpIndex: Annotated[NvU32, 0]
ltsEnMask: Annotated[NvU32, 4]
NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 8
fbpIndex: Annotated[NvU32, 0]
fbpaEnMask: Annotated[NvU32, 4]
NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 8
fbpIndex: Annotated[NvU32, 0]
ropEnMask: Annotated[NvU32, 4]
NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 12
fbpIndex: Annotated[NvU32, 0]
swizzId: Annotated[NvU32, 4]
ltcEnMask: Annotated[NvU32, 8]
NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 12
fbpIndex: Annotated[NvU32, 0]
swizzId: Annotated[NvU32, 4]
ltsEnMask: Annotated[NvU32, 8]
NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 12
fbpIndex: Annotated[NvU32, 0]
swizzId: Annotated[NvU32, 4]
fbpaEnMask: Annotated[NvU32, 8]
NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 12
fbpIndex: Annotated[NvU32, 0]
swizzId: Annotated[NvU32, 4]
ropEnMask: Annotated[NvU32, 8]
NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 16
fbpIndex: Annotated[NvU32, 0]
swizzId: Annotated[NvU32, 4]
fbpaSubpEnMask: Annotated[NvU64, 8]
NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 8
fbpIndex: Annotated[NvU32, 0]
fbpaSubpEnMask: Annotated[NvU32, 4]
NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS_v1A_1D(c.Struct):
SIZE = 8
fbpIndex: Annotated[NvU32, 0]
fbpLogicalIndex: Annotated[NvU32, 4]
NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS_v1A_1D
rpc_ctrl_fb_get_fs_info_v24_00: TypeAlias = struct_rpc_ctrl_fb_get_fs_info_v24_00
@c.record
class struct_rpc_ctrl_fb_get_fs_info_v26_04(c.Struct):
SIZE = 3856
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v26_04, 8]
@c.record
class struct_NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v26_04(c.Struct):
SIZE = 3848
numQueries: Annotated[NvU16, 0]
reserved: Annotated[c.Array[NvU8, Literal[6]], 2]
queries: Annotated[c.Array[NV2080_CTRL_FB_FS_INFO_QUERY_v26_04, Literal[120]], 8]
NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v26_04: TypeAlias = struct_NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v26_04
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_QUERY_v26_04(c.Struct):
SIZE = 32
queryType: Annotated[NvU16, 0]
reserved: Annotated[c.Array[NvU8, Literal[2]], 2]
status: Annotated[NvU32, 4]
queryParams: Annotated[NV2080_CTRL_FB_FS_INFO_QUERY_DATA_v26_04, 8]
NV2080_CTRL_FB_FS_INFO_QUERY_v26_04: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_QUERY_v26_04
@c.record
class union_NV2080_CTRL_FB_FS_INFO_QUERY_DATA_v26_04(c.Struct):
SIZE = 24
inv: Annotated[NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS_v1A_1D, 0]
fbp: Annotated[NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS_v1A_1D, 0]
ltc: Annotated[NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS_v1A_1D, 0]
lts: Annotated[NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS_v1A_1D, 0]
fbpa: Annotated[NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS_v1A_1D, 0]
rop: Annotated[NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS_v1A_1D, 0]
dmLtc: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS_v1A_1D, 0]
dmLts: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS_v1A_1D, 0]
dmFbpa: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS_v1A_1D, 0]
dmRop: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS_v1A_1D, 0]
dmFbpaSubp: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS_v1A_1D, 0]
fbpaSubp: Annotated[NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS_v1A_1D, 0]
fbpLogicalMap: Annotated[NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS_v1A_1D, 0]
sysl2Ltc: Annotated[NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK_PARAMS_v26_04, 0]
pac: Annotated[NV2080_CTRL_FB_FS_INFO_PAC_MASK_PARAMS_v26_04, 0]
logicalLtc: Annotated[NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK_PARAMS_v26_04, 0]
dmLogicalLtc: Annotated[NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK_PARAMS_v26_04, 0]
NV2080_CTRL_FB_FS_INFO_QUERY_DATA_v26_04: TypeAlias = union_NV2080_CTRL_FB_FS_INFO_QUERY_DATA_v26_04
@c.record
class struct_NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK_PARAMS_v26_04(c.Struct):
SIZE = 8
sysIdx: Annotated[NvU32, 0]
sysl2LtcEnMask: Annotated[NvU32, 4]
NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK_PARAMS_v26_04: TypeAlias = struct_NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK_PARAMS_v26_04
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_PAC_MASK_PARAMS_v26_04(c.Struct):
SIZE = 8
fbpIndex: Annotated[NvU32, 0]
pacEnMask: Annotated[NvU32, 4]
NV2080_CTRL_FB_FS_INFO_PAC_MASK_PARAMS_v26_04: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_PAC_MASK_PARAMS_v26_04
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK_PARAMS_v26_04(c.Struct):
SIZE = 16
fbpIndex: Annotated[NvU32, 0]
logicalLtcEnMask: Annotated[NvU64, 8]
NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK_PARAMS_v26_04: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK_PARAMS_v26_04
@c.record
class struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK_PARAMS_v26_04(c.Struct):
SIZE = 16
fbpIndex: Annotated[NvU32, 0]
swizzId: Annotated[NvU32, 4]
logicalLtcEnMask: Annotated[NvU64, 8]
NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK_PARAMS_v26_04: TypeAlias = struct_NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK_PARAMS_v26_04
rpc_ctrl_fb_get_fs_info_v26_04: TypeAlias = struct_rpc_ctrl_fb_get_fs_info_v26_04
rpc_ctrl_fb_get_fs_info_v: TypeAlias = struct_rpc_ctrl_fb_get_fs_info_v26_04
@c.record
class struct_rpc_ctrl_grmgr_get_gr_fs_info_v1A_1D(c.Struct):
SIZE = 1936
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v1A_1D, 8]
@c.record
class struct_NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v1A_1D(c.Struct):
SIZE = 1928
numQueries: Annotated[NvU16, 0]
reserved: Annotated[c.Array[NvU8, Literal[6]], 2]
queries: Annotated[c.Array[NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS_v1A_1D, Literal[96]], 8]
NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS_v1A_1D(c.Struct):
SIZE = 20
queryType: Annotated[NvU16, 0]
reserved: Annotated[c.Array[NvU8, Literal[2]], 2]
status: Annotated[NvU32, 4]
queryData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_DATA_v1A_1D, 8]
NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS_v1A_1D
@c.record
class union_NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_DATA_v1A_1D(c.Struct):
SIZE = 12
gpcCountData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS_v1A_1D, 0]
chipletGpcMapData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS_v1A_1D, 0]
tpcMaskData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS_v1A_1D, 0]
ppcMaskData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS_v1A_1D, 0]
partitionGpcMapData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS_v1A_1D, 0]
syspipeMaskData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS_v1A_1D, 0]
partitionChipletSyspipeData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS_v1A_1D, 0]
dmGpcMaskData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS_v1A_1D, 0]
partitionSyspipeIdData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS_v1A_1D, 0]
ropMaskData: Annotated[NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS_v1A_1D, 0]
NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_DATA_v1A_1D: TypeAlias = union_NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_DATA_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS_v1A_1D(c.Struct):
SIZE = 4
gpcCount: Annotated[NvU32, 0]
NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS_v1A_1D(c.Struct):
SIZE = 8
gpcId: Annotated[NvU32, 0]
chipletGpcMap: Annotated[NvU32, 4]
NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 8
gpcId: Annotated[NvU32, 0]
tpcMask: Annotated[NvU32, 4]
NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 8
gpcId: Annotated[NvU32, 0]
ppcMask: Annotated[NvU32, 4]
NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS_v1A_1D(c.Struct):
SIZE = 12
swizzId: Annotated[NvU32, 0]
gpcId: Annotated[NvU32, 4]
chipletGpcMap: Annotated[NvU32, 8]
NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 4
chipletSyspipeMask: Annotated[NvU32, 0]
NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS_v1A_1D(c.Struct):
SIZE = 12
swizzId: Annotated[NvU16, 0]
physSyspipeIdCount: Annotated[NvU16, 2]
physSyspipeId: Annotated[c.Array[NvU8, Literal[8]], 4]
NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 12
swizzId: Annotated[NvU32, 0]
grIdx: Annotated[NvU32, 4]
gpcEnMask: Annotated[NvU32, 8]
NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS_v1A_1D(c.Struct):
SIZE = 4
syspipeId: Annotated[NvU32, 0]
NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS_v1A_1D
@c.record
class struct_NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS_v1A_1D(c.Struct):
SIZE = 8
gpcId: Annotated[NvU32, 0]
ropMask: Annotated[NvU32, 4]
NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS_v1A_1D: TypeAlias = struct_NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS_v1A_1D
rpc_ctrl_grmgr_get_gr_fs_info_v1A_1D: TypeAlias = struct_rpc_ctrl_grmgr_get_gr_fs_info_v1A_1D
rpc_ctrl_grmgr_get_gr_fs_info_v: TypeAlias = struct_rpc_ctrl_grmgr_get_gr_fs_info_v1A_1D
@c.record
class struct_rpc_ctrl_stop_channel_v1A_1E(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVA06F_CTRL_STOP_CHANNEL_PARAMS_v1A_1E, 8]
@c.record
class struct_NVA06F_CTRL_STOP_CHANNEL_PARAMS_v1A_1E(c.Struct):
SIZE = 1
bImmediate: Annotated[NvBool, 0]
NVA06F_CTRL_STOP_CHANNEL_PARAMS_v1A_1E: TypeAlias = struct_NVA06F_CTRL_STOP_CHANNEL_PARAMS_v1A_1E
rpc_ctrl_stop_channel_v1A_1E: TypeAlias = struct_rpc_ctrl_stop_channel_v1A_1E
rpc_ctrl_stop_channel_v: TypeAlias = struct_rpc_ctrl_stop_channel_v1A_1E
@c.record
class struct_rpc_ctrl_gr_pc_sampling_mode_v1A_1F(c.Struct):
SIZE = 32
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS_v1A_1F, 8]
@c.record
class struct_NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS_v1A_1F(c.Struct):
SIZE = 24
hChannel: Annotated[NvHandle, 0]
samplingMode: Annotated[NvU32, 4]
grRouteInfo: Annotated[NV2080_CTRL_GR_ROUTE_INFO_v12_01, 8]
NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS_v1A_1F: TypeAlias = struct_NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS_v1A_1F
rpc_ctrl_gr_pc_sampling_mode_v1A_1F: TypeAlias = struct_rpc_ctrl_gr_pc_sampling_mode_v1A_1F
rpc_ctrl_gr_pc_sampling_mode_v: TypeAlias = struct_rpc_ctrl_gr_pc_sampling_mode_v1A_1F
@c.record
class struct_rpc_ctrl_perf_rated_tdp_get_status_v1A_1F(c.Struct):
SIZE = 40
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_v1A_1F, 8]
@c.record
class struct_NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_v1A_1F(c.Struct):
SIZE = 32
rm: Annotated[PERF_RATED_TDP_RM_INTERNAL_STATE_STRUCT_v1A_1F, 0]
output: Annotated[NV2080_CTRL_PERF_RATED_TDP_ACTION, 8]
inputs: Annotated[c.Array[NV2080_CTRL_PERF_RATED_TDP_ACTION, Literal[5]], 12]
NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_v1A_1F: TypeAlias = struct_NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_v1A_1F
@c.record
class struct_PERF_RATED_TDP_RM_INTERNAL_STATE_STRUCT_v1A_1F(c.Struct):
SIZE = 8
clientActiveMask: Annotated[NvU32, 0]
bRegkeyLimitRatedTdp: Annotated[NvU8, 4]
PERF_RATED_TDP_RM_INTERNAL_STATE_STRUCT_v1A_1F: TypeAlias = struct_PERF_RATED_TDP_RM_INTERNAL_STATE_STRUCT_v1A_1F
class enum_NV2080_CTRL_PERF_RATED_TDP_ACTION(Annotated[int, ctypes.c_uint32], c.Enum): pass
NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT = enum_NV2080_CTRL_PERF_RATED_TDP_ACTION.define('NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT', 0)
NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED = enum_NV2080_CTRL_PERF_RATED_TDP_ACTION.define('NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED', 1)
NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT = enum_NV2080_CTRL_PERF_RATED_TDP_ACTION.define('NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT', 2)
NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LOCK = enum_NV2080_CTRL_PERF_RATED_TDP_ACTION.define('NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LOCK', 3)
NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_FLOOR = enum_NV2080_CTRL_PERF_RATED_TDP_ACTION.define('NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_FLOOR', 4)
NV2080_CTRL_PERF_RATED_TDP_ACTION: TypeAlias = enum_NV2080_CTRL_PERF_RATED_TDP_ACTION
rpc_ctrl_perf_rated_tdp_get_status_v1A_1F: TypeAlias = struct_rpc_ctrl_perf_rated_tdp_get_status_v1A_1F
rpc_ctrl_perf_rated_tdp_get_status_v: TypeAlias = struct_rpc_ctrl_perf_rated_tdp_get_status_v1A_1F
@c.record
class struct_rpc_ctrl_perf_rated_tdp_set_control_v1A_1F(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS_v1A_1F, 8]
@c.record
class struct_NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS_v1A_1F(c.Struct):
SIZE = 8
client: Annotated[NV2080_CTRL_PERF_RATED_TDP_CLIENT, 0]
input: Annotated[NV2080_CTRL_PERF_RATED_TDP_ACTION, 4]
NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS_v1A_1F: TypeAlias = struct_NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS_v1A_1F
class enum_NV2080_CTRL_PERF_RATED_TDP_CLIENT(Annotated[int, ctypes.c_uint32], c.Enum): pass
NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM = enum_NV2080_CTRL_PERF_RATED_TDP_CLIENT.define('NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM', 0)
NV2080_CTRL_PERF_RATED_TDP_CLIENT_WAR_BUG_1785342 = enum_NV2080_CTRL_PERF_RATED_TDP_CLIENT.define('NV2080_CTRL_PERF_RATED_TDP_CLIENT_WAR_BUG_1785342', 1)
NV2080_CTRL_PERF_RATED_TDP_CLIENT_GLOBAL = enum_NV2080_CTRL_PERF_RATED_TDP_CLIENT.define('NV2080_CTRL_PERF_RATED_TDP_CLIENT_GLOBAL', 2)
NV2080_CTRL_PERF_RATED_TDP_CLIENT_OS = enum_NV2080_CTRL_PERF_RATED_TDP_CLIENT.define('NV2080_CTRL_PERF_RATED_TDP_CLIENT_OS', 3)
NV2080_CTRL_PERF_RATED_TDP_CLIENT_PROFILE = enum_NV2080_CTRL_PERF_RATED_TDP_CLIENT.define('NV2080_CTRL_PERF_RATED_TDP_CLIENT_PROFILE', 4)
NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS = enum_NV2080_CTRL_PERF_RATED_TDP_CLIENT.define('NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS', 5)
NV2080_CTRL_PERF_RATED_TDP_CLIENT: TypeAlias = enum_NV2080_CTRL_PERF_RATED_TDP_CLIENT
rpc_ctrl_perf_rated_tdp_set_control_v1A_1F: TypeAlias = struct_rpc_ctrl_perf_rated_tdp_set_control_v1A_1F
rpc_ctrl_perf_rated_tdp_set_control_v: TypeAlias = struct_rpc_ctrl_perf_rated_tdp_set_control_v1A_1F
@c.record
class struct_rpc_ctrl_timer_set_gr_tick_freq_v1A_1F(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_v1A_1F, 8]
@c.record
class struct_NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_v1A_1F(c.Struct):
SIZE = 1
bSetMaxFreq: Annotated[NvBool, 0]
NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_v1A_1F: TypeAlias = struct_NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_v1A_1F
rpc_ctrl_timer_set_gr_tick_freq_v1A_1F: TypeAlias = struct_rpc_ctrl_timer_set_gr_tick_freq_v1A_1F
rpc_ctrl_timer_set_gr_tick_freq_v: TypeAlias = struct_rpc_ctrl_timer_set_gr_tick_freq_v1A_1F
@c.record
class struct_rpc_ctrl_free_pma_stream_v1A_1F(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS_v1A_1F, 8]
@c.record
class struct_NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS_v1A_1F(c.Struct):
SIZE = 4
pmaChannelIdx: Annotated[NvU32, 0]
NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS_v1A_1F: TypeAlias = struct_NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS_v1A_1F
rpc_ctrl_free_pma_stream_v1A_1F: TypeAlias = struct_rpc_ctrl_free_pma_stream_v1A_1F
rpc_ctrl_free_pma_stream_v: TypeAlias = struct_rpc_ctrl_free_pma_stream_v1A_1F
@c.record
class struct_rpc_ctrl_fifo_setup_vf_zombie_subctx_pdb_v1A_23(c.Struct):
SIZE = 32
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_v1A_23, 8]
@c.record
class struct_NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_v1A_23(c.Struct):
SIZE = 24
base: Annotated[NvU64, 0]
size: Annotated[NvU64, 8]
addressSpace: Annotated[NvU32, 16]
cacheAttrib: Annotated[NvU32, 20]
NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_v1A_23: TypeAlias = struct_NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_v1A_23
rpc_ctrl_fifo_setup_vf_zombie_subctx_pdb_v1A_23: TypeAlias = struct_rpc_ctrl_fifo_setup_vf_zombie_subctx_pdb_v1A_23
rpc_ctrl_fifo_setup_vf_zombie_subctx_pdb_v: TypeAlias = struct_rpc_ctrl_fifo_setup_vf_zombie_subctx_pdb_v1A_23
@c.record
class struct_rpc_ctrl_dbg_set_single_sm_single_step_v1C_02(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_v1C_02, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_v1C_02(c.Struct):
SIZE = 8
smID: Annotated[NvU32, 0]
bSingleStep: Annotated[NvBool, 4]
NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_v1C_02: TypeAlias = struct_NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_v1C_02
rpc_ctrl_dbg_set_single_sm_single_step_v1C_02: TypeAlias = struct_rpc_ctrl_dbg_set_single_sm_single_step_v1C_02
rpc_ctrl_dbg_set_single_sm_single_step_v: TypeAlias = struct_rpc_ctrl_dbg_set_single_sm_single_step_v1C_02
@c.record
class struct_rpc_ctrl_gr_get_tpc_partition_mode_v1C_04(c.Struct):
SIZE = 40
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS_v1C_04, 8]
@c.record
class struct_NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS_v1C_04(c.Struct):
SIZE = 32
hChannelGroup: Annotated[NvHandle, 0]
mode: Annotated[NV0080_CTRL_GR_TPC_PARTITION_MODE, 4]
bEnableAllTpcs: Annotated[NvBool, 8]
grRouteInfo: Annotated[NV2080_CTRL_GR_ROUTE_INFO_v12_01, 16]
NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS_v1C_04: TypeAlias = struct_NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS_v1C_04
class enum_NV0080_CTRL_GR_TPC_PARTITION_MODE(Annotated[int, ctypes.c_uint32], c.Enum): pass
NV0080_CTRL_GR_TPC_PARTITION_MODE_NONE = enum_NV0080_CTRL_GR_TPC_PARTITION_MODE.define('NV0080_CTRL_GR_TPC_PARTITION_MODE_NONE', 0)
NV0080_CTRL_GR_TPC_PARTITION_MODE_STATIC = enum_NV0080_CTRL_GR_TPC_PARTITION_MODE.define('NV0080_CTRL_GR_TPC_PARTITION_MODE_STATIC', 1)
NV0080_CTRL_GR_TPC_PARTITION_MODE_DYNAMIC = enum_NV0080_CTRL_GR_TPC_PARTITION_MODE.define('NV0080_CTRL_GR_TPC_PARTITION_MODE_DYNAMIC', 2)
NV0080_CTRL_GR_TPC_PARTITION_MODE: TypeAlias = enum_NV0080_CTRL_GR_TPC_PARTITION_MODE
rpc_ctrl_gr_get_tpc_partition_mode_v1C_04: TypeAlias = struct_rpc_ctrl_gr_get_tpc_partition_mode_v1C_04
rpc_ctrl_gr_get_tpc_partition_mode_v: TypeAlias = struct_rpc_ctrl_gr_get_tpc_partition_mode_v1C_04
@c.record
class struct_rpc_ctrl_gr_set_tpc_partition_mode_v1C_04(c.Struct):
SIZE = 40
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS_v1C_04, 8]
rpc_ctrl_gr_set_tpc_partition_mode_v1C_04: TypeAlias = struct_rpc_ctrl_gr_set_tpc_partition_mode_v1C_04
rpc_ctrl_gr_set_tpc_partition_mode_v: TypeAlias = struct_rpc_ctrl_gr_set_tpc_partition_mode_v1C_04
@c.record
class struct_rpc_ctrl_internal_promote_fault_method_buffers_v1E_07(c.Struct):
SIZE = 96
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS_v1E_07, 8]
@c.record
class struct_NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS_v1E_07(c.Struct):
SIZE = 88
methodBufferMemdesc: Annotated[c.Array[NV2080_CTRL_INTERNAL_MEMDESC_INFO_v1E_07, Literal[2]], 0]
bar2Addr: Annotated[c.Array[NvU64, Literal[2]], 64]
numValidEntries: Annotated[NvU32, 80]
NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS_v1E_07: TypeAlias = struct_NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS_v1E_07
@c.record
class struct_NV2080_CTRL_INTERNAL_MEMDESC_INFO_v1E_07(c.Struct):
SIZE = 32
base: Annotated[NvU64, 0]
size: Annotated[NvU64, 8]
alignment: Annotated[NvU64, 16]
addressSpace: Annotated[NvU32, 24]
cpuCacheAttrib: Annotated[NvU32, 28]
NV2080_CTRL_INTERNAL_MEMDESC_INFO_v1E_07: TypeAlias = struct_NV2080_CTRL_INTERNAL_MEMDESC_INFO_v1E_07
rpc_ctrl_internal_promote_fault_method_buffers_v1E_07: TypeAlias = struct_rpc_ctrl_internal_promote_fault_method_buffers_v1E_07
rpc_ctrl_internal_promote_fault_method_buffers_v: TypeAlias = struct_rpc_ctrl_internal_promote_fault_method_buffers_v1E_07
@c.record
class struct_rpc_ctrl_internal_memsys_set_zbc_referenced_v1F_05(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_v1F_05, 8]
@c.record
class struct_NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_v1F_05(c.Struct):
SIZE = 1
bZbcSurfacesExist: Annotated[NvBool, 0]
NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_v1F_05: TypeAlias = struct_NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_v1F_05
rpc_ctrl_internal_memsys_set_zbc_referenced_v1F_05: TypeAlias = struct_rpc_ctrl_internal_memsys_set_zbc_referenced_v1F_05
rpc_ctrl_internal_memsys_set_zbc_referenced_v: TypeAlias = struct_rpc_ctrl_internal_memsys_set_zbc_referenced_v1F_05
@c.record
class struct_rpc_ctrl_fabric_memory_describe_v1E_0C(c.Struct):
SIZE = 2080
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV00F8_CTRL_DESCRIBE_PARAMS_v1E_0C, 8]
@c.record
class struct_NV00F8_CTRL_DESCRIBE_PARAMS_v1E_0C(c.Struct):
SIZE = 2072
offset: Annotated[NvU64, 0]
totalPfns: Annotated[NvU64, 8]
pfnArray: Annotated[c.Array[NvU32, Literal[512]], 16]
numPfns: Annotated[NvU32, 2064]
NV00F8_CTRL_DESCRIBE_PARAMS_v1E_0C: TypeAlias = struct_NV00F8_CTRL_DESCRIBE_PARAMS_v1E_0C
rpc_ctrl_fabric_memory_describe_v1E_0C: TypeAlias = struct_rpc_ctrl_fabric_memory_describe_v1E_0C
rpc_ctrl_fabric_memory_describe_v: TypeAlias = struct_rpc_ctrl_fabric_memory_describe_v1E_0C
@c.record
class struct_rpc_ctrl_fabric_mem_stats_v1E_0C(c.Struct):
SIZE = 24
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_v1E_0C, 8]
@c.record
class struct_NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_v1E_0C(c.Struct):
SIZE = 16
totalSize: Annotated[NvU64, 0]
freeSize: Annotated[NvU64, 8]
NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_v1E_0C: TypeAlias = struct_NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_v1E_0C
rpc_ctrl_fabric_mem_stats_v1E_0C: TypeAlias = struct_rpc_ctrl_fabric_mem_stats_v1E_0C
rpc_ctrl_fabric_mem_stats_v: TypeAlias = struct_rpc_ctrl_fabric_mem_stats_v1E_0C
@c.record
class struct_rpc_ctrl_bus_set_p2p_mapping_v21_03(c.Struct):
SIZE = 44
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v21_03, 8]
@c.record
class struct_NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v21_03(c.Struct):
SIZE = 36
connectionType: Annotated[NvU32, 0]
peerId: Annotated[NvU32, 4]
bSpaAccessOnly: Annotated[NvU32, 8]
bUseUuid: Annotated[NvBool, 12]
remoteGpuId: Annotated[NvU32, 16]
remoteGpuUuid: Annotated[c.Array[NvU8, Literal[16]], 20]
NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v21_03: TypeAlias = struct_NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v21_03
rpc_ctrl_bus_set_p2p_mapping_v21_03: TypeAlias = struct_rpc_ctrl_bus_set_p2p_mapping_v21_03
@c.record
class struct_rpc_ctrl_bus_set_p2p_mapping_v29_08(c.Struct):
SIZE = 48
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v29_08, 8]
@c.record
class struct_NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v29_08(c.Struct):
SIZE = 40
connectionType: Annotated[NvU32, 0]
peerId: Annotated[NvU32, 4]
bEgmPeer: Annotated[NvBool, 8]
bSpaAccessOnly: Annotated[NvU32, 12]
bUseUuid: Annotated[NvBool, 16]
remoteGpuId: Annotated[NvU32, 20]
remoteGpuUuid: Annotated[c.Array[NvU8, Literal[16]], 24]
NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v29_08: TypeAlias = struct_NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v29_08
rpc_ctrl_bus_set_p2p_mapping_v29_08: TypeAlias = struct_rpc_ctrl_bus_set_p2p_mapping_v29_08
rpc_ctrl_bus_set_p2p_mapping_v: TypeAlias = struct_rpc_ctrl_bus_set_p2p_mapping_v29_08
@c.record
class struct_rpc_ctrl_bus_unset_p2p_mapping_v21_03(c.Struct):
SIZE = 40
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS_v21_03, 8]
@c.record
class struct_NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS_v21_03(c.Struct):
SIZE = 32
connectionType: Annotated[NvU32, 0]
peerId: Annotated[NvU32, 4]
bUseUuid: Annotated[NvBool, 8]
remoteGpuId: Annotated[NvU32, 12]
remoteGpuUuid: Annotated[c.Array[NvU8, Literal[16]], 16]
NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS_v21_03: TypeAlias = struct_NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS_v21_03
rpc_ctrl_bus_unset_p2p_mapping_v21_03: TypeAlias = struct_rpc_ctrl_bus_unset_p2p_mapping_v21_03
rpc_ctrl_bus_unset_p2p_mapping_v: TypeAlias = struct_rpc_ctrl_bus_unset_p2p_mapping_v21_03
@c.record
class struct_rpc_ctrl_gpu_get_info_v2_v25_11(c.Struct):
SIZE = 532
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v25_11, 8]
@c.record
class struct_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v25_11(c.Struct):
SIZE = 524
gpuInfoListSize: Annotated[NvU32, 0]
gpuInfoList: Annotated[c.Array[NV2080_CTRL_GPU_INFO_v25_11, Literal[65]], 4]
NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v25_11: TypeAlias = struct_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v25_11
@c.record
class struct_NV2080_CTRL_GPU_INFO_v25_11(c.Struct):
SIZE = 8
index: Annotated[NvU32, 0]
data: Annotated[NvU32, 4]
NV2080_CTRL_GPU_INFO_v25_11: TypeAlias = struct_NV2080_CTRL_GPU_INFO_v25_11
rpc_ctrl_gpu_get_info_v2_v25_11: TypeAlias = struct_rpc_ctrl_gpu_get_info_v2_v25_11
rpc_ctrl_gpu_get_info_v2_v: TypeAlias = struct_rpc_ctrl_gpu_get_info_v2_v25_11
@c.record
class struct_rpc_update_gpm_guest_buffer_info_v27_01(c.Struct):
SIZE = 24
gpfn: Annotated[NvU64, 0]
swizzId: Annotated[NvU32, 8]
computeId: Annotated[NvU32, 12]
bufSize: Annotated[NvU32, 16]
bMap: Annotated[NvBool, 20]
rpc_update_gpm_guest_buffer_info_v27_01: TypeAlias = struct_rpc_update_gpm_guest_buffer_info_v27_01
rpc_update_gpm_guest_buffer_info_v: TypeAlias = struct_rpc_update_gpm_guest_buffer_info_v27_01
@c.record
class struct_rpc_ctrl_internal_quiesce_pma_channel_v1C_08(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS_v1C_08, 8]
@c.record
class struct_NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS_v1C_08(c.Struct):
SIZE = 8
pmaChannelIdx: Annotated[NvU32, 0]
bMembytesPollingRequired: Annotated[NvBool, 4]
NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS_v1C_08: TypeAlias = struct_NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS_v1C_08
rpc_ctrl_internal_quiesce_pma_channel_v1C_08: TypeAlias = struct_rpc_ctrl_internal_quiesce_pma_channel_v1C_08
rpc_ctrl_internal_quiesce_pma_channel_v: TypeAlias = struct_rpc_ctrl_internal_quiesce_pma_channel_v1C_08
@c.record
class struct_rpc_ctrl_internal_sriov_promote_pma_stream_v1C_0C(c.Struct):
SIZE = 56
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS_v1C_0C, 8]
@c.record
class struct_NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS_v1C_0C(c.Struct):
SIZE = 48
pmaChannelIdx: Annotated[NvU32, 0]
pmaBufferVA: Annotated[NvU64, 8]
pmaBufferSize: Annotated[NvU64, 16]
membytesVA: Annotated[NvU64, 24]
hwpmIBPA: Annotated[NvU64, 32]
hwpmIBAperture: Annotated[NvU8, 40]
NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS_v1C_0C: TypeAlias = struct_NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS_v1C_0C
rpc_ctrl_internal_sriov_promote_pma_stream_v1C_0C: TypeAlias = struct_rpc_ctrl_internal_sriov_promote_pma_stream_v1C_0C
rpc_ctrl_internal_sriov_promote_pma_stream_v: TypeAlias = struct_rpc_ctrl_internal_sriov_promote_pma_stream_v1C_0C
@c.record
class struct_rpc_ctrl_exec_partitions_create_v24_05(c.Struct):
SIZE = 436
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
status: Annotated[NvU32, 8]
execPartitionsCreate: Annotated[NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS_v24_05, 12]
@c.record
class struct_NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS_v24_05(c.Struct):
SIZE = 424
bQuery: Annotated[NvBool, 0]
execPartCount: Annotated[NvU32, 4]
execPartInfo: Annotated[c.Array[NVC637_CTRL_EXEC_PARTITIONS_INFO_v24_05, Literal[8]], 8]
execPartId: Annotated[c.Array[NvU32, Literal[8]], 392]
NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS_v24_05: TypeAlias = struct_NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS_v24_05
@c.record
class struct_NVC637_CTRL_EXEC_PARTITIONS_INFO_v24_05(c.Struct):
SIZE = 48
gpcCount: Annotated[NvU32, 0]
gfxGpcCount: Annotated[NvU32, 4]
veidCount: Annotated[NvU32, 8]
ceCount: Annotated[NvU32, 12]
nvEncCount: Annotated[NvU32, 16]
nvDecCount: Annotated[NvU32, 20]
nvJpgCount: Annotated[NvU32, 24]
ofaCount: Annotated[NvU32, 28]
sharedEngFlag: Annotated[NvU32, 32]
smCount: Annotated[NvU32, 36]
spanStart: Annotated[NvU32, 40]
computeSize: Annotated[NvU32, 44]
NVC637_CTRL_EXEC_PARTITIONS_INFO_v24_05: TypeAlias = struct_NVC637_CTRL_EXEC_PARTITIONS_INFO_v24_05
rpc_ctrl_exec_partitions_create_v24_05: TypeAlias = struct_rpc_ctrl_exec_partitions_create_v24_05
rpc_ctrl_exec_partitions_create_v: TypeAlias = struct_rpc_ctrl_exec_partitions_create_v24_05
@c.record
class struct_rpc_ctrl_fla_setup_instance_mem_block_v21_05(c.Struct):
SIZE = 24
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_v13_04, 8]
@c.record
class struct_NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_v13_04(c.Struct):
SIZE = 16
imbPhysAddr: Annotated[NvU64, 0]
addrSpace: Annotated[NvU32, 8]
flaAction: Annotated[NvU32, 12]
NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_v13_04: TypeAlias = struct_NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_v13_04
rpc_ctrl_fla_setup_instance_mem_block_v21_05: TypeAlias = struct_rpc_ctrl_fla_setup_instance_mem_block_v21_05
rpc_ctrl_fla_setup_instance_mem_block_v: TypeAlias = struct_rpc_ctrl_fla_setup_instance_mem_block_v21_05
@c.record
class struct_rpc_ctrl_get_total_hs_credits_v21_08(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS_v21_08, 8]
@c.record
class struct_NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS_v21_08(c.Struct):
SIZE = 4
numCredits: Annotated[NvU32, 0]
NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS_v21_08: TypeAlias = struct_NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS_v21_08
rpc_ctrl_get_total_hs_credits_v21_08: TypeAlias = struct_rpc_ctrl_get_total_hs_credits_v21_08
rpc_ctrl_get_total_hs_credits_v: TypeAlias = struct_rpc_ctrl_get_total_hs_credits_v21_08
@c.record
class struct_rpc_ctrl_get_hs_credits_v21_08(c.Struct):
SIZE = 264
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_GET_HS_CREDITS_PARAMS_v21_08, 8]
@c.record
class struct_NVB0CC_CTRL_GET_HS_CREDITS_PARAMS_v21_08(c.Struct):
SIZE = 256
pmaChannelIdx: Annotated[NvU8, 0]
numEntries: Annotated[NvU8, 1]
statusInfo: Annotated[NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_STATUS_v21_08, 2]
creditInfo: Annotated[c.Array[NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_INFO_v21_08, Literal[63]], 4]
NVB0CC_CTRL_GET_HS_CREDITS_PARAMS_v21_08: TypeAlias = struct_NVB0CC_CTRL_GET_HS_CREDITS_PARAMS_v21_08
@c.record
class struct_NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_STATUS_v21_08(c.Struct):
SIZE = 2
status: Annotated[NvU8, 0]
entryIndex: Annotated[NvU8, 1]
NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_STATUS_v21_08: TypeAlias = struct_NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_STATUS_v21_08
@c.record
class struct_NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_INFO_v21_08(c.Struct):
SIZE = 4
chipletType: Annotated[NvU8, 0]
chipletIndex: Annotated[NvU8, 1]
numCredits: Annotated[NvU16, 2]
NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_INFO_v21_08: TypeAlias = struct_NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_INFO_v21_08
rpc_ctrl_get_hs_credits_v21_08: TypeAlias = struct_rpc_ctrl_get_hs_credits_v21_08
rpc_ctrl_get_hs_credits_v: TypeAlias = struct_rpc_ctrl_get_hs_credits_v21_08
@c.record
class struct_rpc_ctrl_reserve_hes_v29_07(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_RESERVE_HES_PARAMS_v29_07, 8]
@c.record
class struct_NVB0CC_CTRL_RESERVE_HES_PARAMS_v29_07(c.Struct):
SIZE = 8
type: Annotated[NvU32, 0]
reserveParams: Annotated[NVB0CC_CTRL_HES_RESERVATION_UNION_v29_07, 4]
NVB0CC_CTRL_RESERVE_HES_PARAMS_v29_07: TypeAlias = struct_NVB0CC_CTRL_RESERVE_HES_PARAMS_v29_07
@c.record
class struct_NVB0CC_CTRL_HES_RESERVATION_UNION_v29_07(c.Struct):
SIZE = 1
cwd: Annotated[NVB0CC_CTRL_RESERVE_HES_CWD_PARAMS_v29_07, 0]
NVB0CC_CTRL_HES_RESERVATION_UNION_v29_07: TypeAlias = struct_NVB0CC_CTRL_HES_RESERVATION_UNION_v29_07
@c.record
class struct_NVB0CC_CTRL_RESERVE_HES_CWD_PARAMS_v29_07(c.Struct):
SIZE = 1
ctxsw: Annotated[NvBool, 0]
NVB0CC_CTRL_RESERVE_HES_CWD_PARAMS_v29_07: TypeAlias = struct_NVB0CC_CTRL_RESERVE_HES_CWD_PARAMS_v29_07
rpc_ctrl_reserve_hes_v29_07: TypeAlias = struct_rpc_ctrl_reserve_hes_v29_07
rpc_ctrl_reserve_hes_v: TypeAlias = struct_rpc_ctrl_reserve_hes_v29_07
@c.record
class struct_rpc_ctrl_release_hes_v29_07(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_RELEASE_HES_PARAMS_v29_07, 8]
@c.record
class struct_NVB0CC_CTRL_RELEASE_HES_PARAMS_v29_07(c.Struct):
SIZE = 4
type: Annotated[NvU32, 0]
NVB0CC_CTRL_RELEASE_HES_PARAMS_v29_07: TypeAlias = struct_NVB0CC_CTRL_RELEASE_HES_PARAMS_v29_07
rpc_ctrl_release_hes_v29_07: TypeAlias = struct_rpc_ctrl_release_hes_v29_07
rpc_ctrl_release_hes_v: TypeAlias = struct_rpc_ctrl_release_hes_v29_07
@c.record
class struct_rpc_ctrl_reserve_ccu_prof_v29_07(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v29_07, 8]
@c.record
class struct_NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v29_07(c.Struct):
SIZE = 1
ctxsw: Annotated[NvBool, 0]
NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v29_07: TypeAlias = struct_NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v29_07
rpc_ctrl_reserve_ccu_prof_v29_07: TypeAlias = struct_rpc_ctrl_reserve_ccu_prof_v29_07
rpc_ctrl_reserve_ccu_prof_v: TypeAlias = struct_rpc_ctrl_reserve_ccu_prof_v29_07
@c.record
class struct_rpc_ctrl_release_ccu_prof_v29_07(c.Struct):
SIZE = 8
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
rpc_ctrl_release_ccu_prof_v29_07: TypeAlias = struct_rpc_ctrl_release_ccu_prof_v29_07
rpc_ctrl_release_ccu_prof_v: TypeAlias = struct_rpc_ctrl_release_ccu_prof_v29_07
@c.record
class struct_rpc_ctrl_set_hs_credits_v21_08(c.Struct):
SIZE = 264
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NVB0CC_CTRL_SET_HS_CREDITS_PARAMS_v21_08, 8]
@c.record
class struct_NVB0CC_CTRL_SET_HS_CREDITS_PARAMS_v21_08(c.Struct):
SIZE = 256
pmaChannelIdx: Annotated[NvU8, 0]
numEntries: Annotated[NvU8, 1]
statusInfo: Annotated[NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_STATUS_v21_08, 2]
creditInfo: Annotated[c.Array[NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_INFO_v21_08, Literal[63]], 4]
NVB0CC_CTRL_SET_HS_CREDITS_PARAMS_v21_08: TypeAlias = struct_NVB0CC_CTRL_SET_HS_CREDITS_PARAMS_v21_08
rpc_ctrl_set_hs_credits_v21_08: TypeAlias = struct_rpc_ctrl_set_hs_credits_v21_08
rpc_ctrl_set_hs_credits_v: TypeAlias = struct_rpc_ctrl_set_hs_credits_v21_08
@c.record
class struct_rpc_ctrl_pm_area_pc_sampler_v21_0B(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
cmd: Annotated[NvU32, 8]
rpc_ctrl_pm_area_pc_sampler_v21_0B: TypeAlias = struct_rpc_ctrl_pm_area_pc_sampler_v21_0B
rpc_ctrl_pm_area_pc_sampler_v: TypeAlias = struct_rpc_ctrl_pm_area_pc_sampler_v21_0B
@c.record
class struct_rpc_ctrl_exec_partitions_delete_v1F_0A(c.Struct):
SIZE = 44
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
execPartitionsDelete: Annotated[NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS_v18_05, 8]
@c.record
class struct_NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS_v18_05(c.Struct):
SIZE = 36
execPartCount: Annotated[NvU32, 0]
execPartId: Annotated[c.Array[NvU32, Literal[8]], 4]
NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS_v18_05: TypeAlias = struct_NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS_v18_05
rpc_ctrl_exec_partitions_delete_v1F_0A: TypeAlias = struct_rpc_ctrl_exec_partitions_delete_v1F_0A
rpc_ctrl_exec_partitions_delete_v: TypeAlias = struct_rpc_ctrl_exec_partitions_delete_v1F_0A
@c.record
class struct_rpc_ctrl_gpfifo_get_work_submit_token_v1F_0A(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
workSubmitToken: Annotated[NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_v08_00, 8]
@c.record
class struct_NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_v08_00(c.Struct):
SIZE = 4
workSubmitToken: Annotated[NvU32, 0]
NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_v08_00: TypeAlias = struct_NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_v08_00
rpc_ctrl_gpfifo_get_work_submit_token_v1F_0A: TypeAlias = struct_rpc_ctrl_gpfifo_get_work_submit_token_v1F_0A
rpc_ctrl_gpfifo_get_work_submit_token_v: TypeAlias = struct_rpc_ctrl_gpfifo_get_work_submit_token_v1F_0A
@c.record
class struct_rpc_ctrl_gpfifo_set_work_submit_token_notif_index_v1F_0A(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
setWorkSubmitTokenIndex: Annotated[NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS_v16_04, 8]
@c.record
class struct_NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS_v16_04(c.Struct):
SIZE = 4
index: Annotated[NvU32, 0]
NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS_v16_04: TypeAlias = struct_NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS_v16_04
rpc_ctrl_gpfifo_set_work_submit_token_notif_index_v1F_0A: TypeAlias = struct_rpc_ctrl_gpfifo_set_work_submit_token_notif_index_v1F_0A
rpc_ctrl_gpfifo_set_work_submit_token_notif_index_v: TypeAlias = struct_rpc_ctrl_gpfifo_set_work_submit_token_notif_index_v1F_0A
@c.record
class struct_rpc_ctrl_master_get_virtual_function_error_cont_intr_mask_v1F_0D(c.Struct):
SIZE = 16
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
vfErrContIntrMask: Annotated[NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS_v18_0B, 8]
@c.record
class struct_NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS_v18_0B(c.Struct):
SIZE = 8
eccMask: Annotated[NvU32, 0]
nvlinkMask: Annotated[NvU32, 4]
NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS_v18_0B: TypeAlias = struct_NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS_v18_0B
rpc_ctrl_master_get_virtual_function_error_cont_intr_mask_v1F_0D: TypeAlias = struct_rpc_ctrl_master_get_virtual_function_error_cont_intr_mask_v1F_0D
rpc_ctrl_master_get_virtual_function_error_cont_intr_mask_v: TypeAlias = struct_rpc_ctrl_master_get_virtual_function_error_cont_intr_mask_v1F_0D
@c.record
class struct_rpc_save_hibernation_data_v1E_0E(c.Struct):
SIZE = 4
remainedBytes: Annotated[NvU32, 0]
payload: Annotated[c.Array[NvU8, Literal[0]], 4]
rpc_save_hibernation_data_v1E_0E: TypeAlias = struct_rpc_save_hibernation_data_v1E_0E
rpc_save_hibernation_data_v: TypeAlias = struct_rpc_save_hibernation_data_v1E_0E
@c.record
class struct_rpc_restore_hibernation_data_v1E_0E(c.Struct):
SIZE = 4
remainedBytes: Annotated[NvU32, 0]
payload: Annotated[c.Array[NvU8, Literal[0]], 4]
rpc_restore_hibernation_data_v1E_0E: TypeAlias = struct_rpc_restore_hibernation_data_v1E_0E
rpc_restore_hibernation_data_v: TypeAlias = struct_rpc_restore_hibernation_data_v1E_0E
@c.record
class struct_rpc_ctrl_get_mmu_debug_mode_v1E_06(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS_v1E_06, 8]
@c.record
class struct_NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS_v1E_06(c.Struct):
SIZE = 1
bMode: Annotated[NvBool, 0]
NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS_v1E_06: TypeAlias = struct_NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS_v1E_06
rpc_ctrl_get_mmu_debug_mode_v1E_06: TypeAlias = struct_rpc_ctrl_get_mmu_debug_mode_v1E_06
rpc_ctrl_get_mmu_debug_mode_v: TypeAlias = struct_rpc_ctrl_get_mmu_debug_mode_v1E_06
@c.record
class struct_rpc_disable_channels_v1E_0B(c.Struct):
SIZE = 4
bDisable: Annotated[NvU32, 0]
rpc_disable_channels_v1E_0B: TypeAlias = struct_rpc_disable_channels_v1E_0B
rpc_disable_channels_v: TypeAlias = struct_rpc_disable_channels_v1E_0B
@c.record
class struct_rpc_ctrl_gpu_migratable_ops_v21_07(c.Struct):
SIZE = 1840
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS_v21_07, 8]
@c.record
class struct_NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS_v21_07(c.Struct):
SIZE = 1832
hClientTarget: Annotated[NvHandle, 0]
hChannelTarget: Annotated[NvHandle, 4]
bNonTransactional: Annotated[NvU32, 8]
regOpCount: Annotated[NvU32, 12]
smIds: Annotated[c.Array[NvU32, Literal[50]], 16]
regOps: Annotated[c.Array[NV2080_CTRL_GPU_REG_OP_v03_00, Literal[50]], 216]
grRouteInfo: Annotated[NV2080_CTRL_GR_ROUTE_INFO_v12_01, 1816]
NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS_v21_07: TypeAlias = struct_NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS_v21_07
rpc_ctrl_gpu_migratable_ops_v21_07: TypeAlias = struct_rpc_ctrl_gpu_migratable_ops_v21_07
rpc_ctrl_gpu_migratable_ops_v: TypeAlias = struct_rpc_ctrl_gpu_migratable_ops_v21_07
@c.record
class struct_rpc_invalidate_tlb_v23_03(c.Struct):
SIZE = 16
pdbAddress: Annotated[NvU64, 0]
regVal: Annotated[NvU32, 8]
rpc_invalidate_tlb_v23_03: TypeAlias = struct_rpc_invalidate_tlb_v23_03
rpc_invalidate_tlb_v: TypeAlias = struct_rpc_invalidate_tlb_v23_03
@c.record
class struct_rpc_get_brand_caps_v25_12(c.Struct):
SIZE = 4
brands: Annotated[NvU32, 0]
rpc_get_brand_caps_v25_12: TypeAlias = struct_rpc_get_brand_caps_v25_12
rpc_get_brand_caps_v: TypeAlias = struct_rpc_get_brand_caps_v25_12
@c.record
class struct_rpc_gsp_set_system_info_v17_00(c.Struct):
SIZE = 4
data: Annotated[NvU32, 0]
rpc_gsp_set_system_info_v17_00: TypeAlias = struct_rpc_gsp_set_system_info_v17_00
rpc_gsp_set_system_info_v: TypeAlias = struct_rpc_gsp_set_system_info_v17_00
@c.record
class struct_rpc_gsp_rm_alloc_v03_00(c.Struct):
SIZE = 32
hClient: Annotated[NvHandle, 0]
hParent: Annotated[NvHandle, 4]
hObject: Annotated[NvHandle, 8]
hClass: Annotated[NvU32, 12]
status: Annotated[NvU32, 16]
paramsSize: Annotated[NvU32, 20]
flags: Annotated[NvU32, 24]
reserved: Annotated[c.Array[NvU8, Literal[4]], 28]
params: Annotated[c.Array[NvU8, Literal[0]], 32]
rpc_gsp_rm_alloc_v03_00: TypeAlias = struct_rpc_gsp_rm_alloc_v03_00
rpc_gsp_rm_alloc_v: TypeAlias = struct_rpc_gsp_rm_alloc_v03_00
@c.record
class struct_rpc_gsp_rm_control_v03_00(c.Struct):
SIZE = 24
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
cmd: Annotated[NvU32, 8]
status: Annotated[NvU32, 12]
paramsSize: Annotated[NvU32, 16]
flags: Annotated[NvU32, 20]
params: Annotated[c.Array[NvU8, Literal[0]], 24]
rpc_gsp_rm_control_v03_00: TypeAlias = struct_rpc_gsp_rm_control_v03_00
rpc_gsp_rm_control_v: TypeAlias = struct_rpc_gsp_rm_control_v03_00
@c.record
class struct_rpc_dump_protobuf_component_v18_12(c.Struct):
SIZE = 16
component: Annotated[NvU16, 0]
nvDumpType: Annotated[NvU8, 2]
countOnly: Annotated[NvBool, 3]
bugCheckCode: Annotated[NvU32, 4]
internalCode: Annotated[NvU32, 8]
bufferSize: Annotated[NvU32, 12]
blob: Annotated[c.Array[NvU8, Literal[0]], 16]
rpc_dump_protobuf_component_v18_12: TypeAlias = struct_rpc_dump_protobuf_component_v18_12
rpc_dump_protobuf_component_v: TypeAlias = struct_rpc_dump_protobuf_component_v18_12
@c.record
class struct_rpc_run_cpu_sequencer_v17_00(c.Struct):
SIZE = 40
bufferSizeDWord: Annotated[NvU32, 0]
cmdIndex: Annotated[NvU32, 4]
regSaveArea: Annotated[c.Array[NvU32, Literal[8]], 8]
commandBuffer: Annotated[c.Array[NvU32, Literal[0]], 40]
rpc_run_cpu_sequencer_v17_00: TypeAlias = struct_rpc_run_cpu_sequencer_v17_00
rpc_run_cpu_sequencer_v: TypeAlias = struct_rpc_run_cpu_sequencer_v17_00
@c.record
class struct_rpc_post_event_v17_00(c.Struct):
SIZE = 32
hClient: Annotated[NvHandle, 0]
hEvent: Annotated[NvHandle, 4]
notifyIndex: Annotated[NvU32, 8]
data: Annotated[NvU32, 12]
info16: Annotated[NvU16, 16]
status: Annotated[NvU32, 20]
eventDataSize: Annotated[NvU32, 24]
bNotifyList: Annotated[NvBool, 28]
eventData: Annotated[c.Array[NvU8, Literal[0]], 29]
rpc_post_event_v17_00: TypeAlias = struct_rpc_post_event_v17_00
rpc_post_event_v: TypeAlias = struct_rpc_post_event_v17_00
@c.record
class struct_rpc_rc_triggered_v17_02(c.Struct):
SIZE = 48
nv2080EngineType: Annotated[NvU32, 0]
chid: Annotated[NvU32, 4]
gfid: Annotated[NvU32, 8]
exceptLevel: Annotated[NvU32, 12]
exceptType: Annotated[NvU32, 16]
scope: Annotated[NvU32, 20]
partitionAttributionId: Annotated[NvU16, 24]
mmuFaultAddrLo: Annotated[NvU32, 28]
mmuFaultAddrHi: Annotated[NvU32, 32]
mmuFaultType: Annotated[NvU32, 36]
bCallbackNeeded: Annotated[NvBool, 40]
rcJournalBufferSize: Annotated[NvU32, 44]
rcJournalBuffer: Annotated[c.Array[NvU8, Literal[0]], 48]
rpc_rc_triggered_v17_02: TypeAlias = struct_rpc_rc_triggered_v17_02
rpc_rc_triggered_v: TypeAlias = struct_rpc_rc_triggered_v17_02
@c.record
class struct_rpc_os_error_log_v17_00(c.Struct):
SIZE = 268
exceptType: Annotated[NvU32, 0]
runlistId: Annotated[NvU32, 4]
chid: Annotated[NvU32, 8]
errString: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 12]
rpc_os_error_log_v17_00: TypeAlias = struct_rpc_os_error_log_v17_00
rpc_os_error_log_v: TypeAlias = struct_rpc_os_error_log_v17_00
@c.record
class struct_rpc_rg_line_intr_v17_00(c.Struct):
SIZE = 8
head: Annotated[NvU32, 0]
rgIntr: Annotated[NvU32, 4]
rpc_rg_line_intr_v17_00: TypeAlias = struct_rpc_rg_line_intr_v17_00
rpc_rg_line_intr_v: TypeAlias = struct_rpc_rg_line_intr_v17_00
@c.record
class struct_rpc_display_modeset_v01_00(c.Struct):
SIZE = 12
bModesetStart: Annotated[NvBool, 0]
minRequiredIsoBandwidthKBPS: Annotated[NvU32, 4]
minRequiredFloorBandwidthKBPS: Annotated[NvU32, 8]
rpc_display_modeset_v01_00: TypeAlias = struct_rpc_display_modeset_v01_00
rpc_display_modeset_v: TypeAlias = struct_rpc_display_modeset_v01_00
@c.record
class struct_rpc_gpuacct_perfmon_util_samples_v1F_0E(c.Struct):
SIZE = 4048
params: Annotated[NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v1F_0E, 0]
@c.record
class struct_NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v1F_0E(c.Struct):
SIZE = 4048
type: Annotated[NvU8, 0]
bufSize: Annotated[NvU32, 4]
count: Annotated[NvU32, 8]
tracker: Annotated[NvU32, 12]
samples: Annotated[c.Array[NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v1F_0E, Literal[72]], 16]
NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v1F_0E: TypeAlias = struct_NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v1F_0E
rpc_gpuacct_perfmon_util_samples_v1F_0E: TypeAlias = struct_rpc_gpuacct_perfmon_util_samples_v1F_0E
rpc_gpuacct_perfmon_util_samples_v: TypeAlias = struct_rpc_gpuacct_perfmon_util_samples_v1F_0E
@c.record
class struct_rpc_vgpu_gsp_plugin_triggered_v17_00(c.Struct):
SIZE = 8
gfid: Annotated[NvU32, 0]
notifyIndex: Annotated[NvU32, 4]
rpc_vgpu_gsp_plugin_triggered_v17_00: TypeAlias = struct_rpc_vgpu_gsp_plugin_triggered_v17_00
rpc_vgpu_gsp_plugin_triggered_v: TypeAlias = struct_rpc_vgpu_gsp_plugin_triggered_v17_00
@c.record
class struct_rpc_vgpu_config_event_v17_00(c.Struct):
SIZE = 4
notifyIndex: Annotated[NvU32, 0]
rpc_vgpu_config_event_v17_00: TypeAlias = struct_rpc_vgpu_config_event_v17_00
rpc_vgpu_config_event_v: TypeAlias = struct_rpc_vgpu_config_event_v17_00
@c.record
class struct_rpc_dce_rm_init_v01_00(c.Struct):
SIZE = 8
bInit: Annotated[NvBool, 0]
hInternalClient: Annotated[NvU32, 4]
rpc_dce_rm_init_v01_00: TypeAlias = struct_rpc_dce_rm_init_v01_00
rpc_dce_rm_init_v: TypeAlias = struct_rpc_dce_rm_init_v01_00
@c.record
class struct_rpc_sim_read_v1E_01(c.Struct):
SIZE = 264
path: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 0]
index: Annotated[NvU32, 256]
count: Annotated[NvU32, 260]
rpc_sim_read_v1E_01: TypeAlias = struct_rpc_sim_read_v1E_01
rpc_sim_read_v: TypeAlias = struct_rpc_sim_read_v1E_01
@c.record
class struct_rpc_sim_write_v1E_01(c.Struct):
SIZE = 268
path: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[256]], 0]
index: Annotated[NvU32, 256]
count: Annotated[NvU32, 260]
data: Annotated[NvU32, 264]
rpc_sim_write_v1E_01: TypeAlias = struct_rpc_sim_write_v1E_01
rpc_sim_write_v: TypeAlias = struct_rpc_sim_write_v1E_01
@c.record
class struct_rpc_ucode_libos_print_v1E_08(c.Struct):
SIZE = 8
ucodeEngDesc: Annotated[NvU32, 0]
libosPrintBufSize: Annotated[NvU32, 4]
libosPrintBuf: Annotated[c.Array[NvU8, Literal[0]], 8]
rpc_ucode_libos_print_v1E_08: TypeAlias = struct_rpc_ucode_libos_print_v1E_08
rpc_ucode_libos_print_v: TypeAlias = struct_rpc_ucode_libos_print_v1E_08
@c.record
class struct_rpc_init_done_v17_00(c.Struct):
SIZE = 4
not_used: Annotated[NvU32, 0]
rpc_init_done_v17_00: TypeAlias = struct_rpc_init_done_v17_00
rpc_init_done_v: TypeAlias = struct_rpc_init_done_v17_00
@c.record
class struct_rpc_semaphore_schedule_callback_v17_00(c.Struct):
SIZE = 32
GPUVA: Annotated[NvU64, 0]
hVASpace: Annotated[NvU32, 8]
ReleaseValue: Annotated[NvU32, 12]
Flags: Annotated[NvU32, 16]
completionStatus: Annotated[NvU32, 20]
hClient: Annotated[NvHandle, 24]
hEvent: Annotated[NvHandle, 28]
rpc_semaphore_schedule_callback_v17_00: TypeAlias = struct_rpc_semaphore_schedule_callback_v17_00
rpc_semaphore_schedule_callback_v: TypeAlias = struct_rpc_semaphore_schedule_callback_v17_00
@c.record
class struct_rpc_timed_semaphore_release_v01_00(c.Struct):
SIZE = 40
semaphoreVA: Annotated[NvU64, 0]
notifierVA: Annotated[NvU64, 8]
hVASpace: Annotated[NvU32, 16]
releaseValue: Annotated[NvU32, 20]
completionStatus: Annotated[NvU32, 24]
hClient: Annotated[NvHandle, 28]
hDevice: Annotated[NvHandle, 32]
rpc_timed_semaphore_release_v01_00: TypeAlias = struct_rpc_timed_semaphore_release_v01_00
rpc_timed_semaphore_release_v: TypeAlias = struct_rpc_timed_semaphore_release_v01_00
@c.record
class struct_rpc_perf_gpu_boost_sync_limits_callback_v17_00(c.Struct):
SIZE = 16
params: Annotated[NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v17_00, 0]
@c.record
class struct_NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v17_00(c.Struct):
SIZE = 16
flags: Annotated[NvU32, 0]
bBridgeless: Annotated[NvBool, 4]
currLimits: Annotated[c.Array[NvU32, Literal[2]], 8]
NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v17_00: TypeAlias = struct_NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v17_00
rpc_perf_gpu_boost_sync_limits_callback_v17_00: TypeAlias = struct_rpc_perf_gpu_boost_sync_limits_callback_v17_00
rpc_perf_gpu_boost_sync_limits_callback_v: TypeAlias = struct_rpc_perf_gpu_boost_sync_limits_callback_v17_00
@c.record
class struct_rpc_perf_bridgeless_info_update_v17_00(c.Struct):
SIZE = 8
bBridgeless: Annotated[NvU64, 0]
rpc_perf_bridgeless_info_update_v17_00: TypeAlias = struct_rpc_perf_bridgeless_info_update_v17_00
rpc_perf_bridgeless_info_update_v: TypeAlias = struct_rpc_perf_bridgeless_info_update_v17_00
@c.record
class struct_rpc_nvlink_fault_up_v17_00(c.Struct):
SIZE = 4
linkId: Annotated[NvU32, 0]
rpc_nvlink_fault_up_v17_00: TypeAlias = struct_rpc_nvlink_fault_up_v17_00
rpc_nvlink_fault_up_v: TypeAlias = struct_rpc_nvlink_fault_up_v17_00
@c.record
class struct_rpc_nvlink_inband_received_data_256_v17_00(c.Struct):
SIZE = 260
params: Annotated[NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_256_PARAMS_v17_00, 0]
@c.record
class struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_256_PARAMS_v17_00(c.Struct):
SIZE = 260
dataSize: Annotated[NvU32, 0]
data: Annotated[c.Array[NvU8, Literal[256]], 4]
NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_256_PARAMS_v17_00: TypeAlias = struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_256_PARAMS_v17_00
rpc_nvlink_inband_received_data_256_v17_00: TypeAlias = struct_rpc_nvlink_inband_received_data_256_v17_00
rpc_nvlink_inband_received_data_256_v: TypeAlias = struct_rpc_nvlink_inband_received_data_256_v17_00
@c.record
class struct_rpc_nvlink_inband_received_data_512_v17_00(c.Struct):
SIZE = 516
params: Annotated[NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_512_PARAMS_v17_00, 0]
@c.record
class struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_512_PARAMS_v17_00(c.Struct):
SIZE = 516
dataSize: Annotated[NvU32, 0]
data: Annotated[c.Array[NvU8, Literal[512]], 4]
NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_512_PARAMS_v17_00: TypeAlias = struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_512_PARAMS_v17_00
rpc_nvlink_inband_received_data_512_v17_00: TypeAlias = struct_rpc_nvlink_inband_received_data_512_v17_00
rpc_nvlink_inband_received_data_512_v: TypeAlias = struct_rpc_nvlink_inband_received_data_512_v17_00
@c.record
class struct_rpc_nvlink_inband_received_data_1024_v17_00(c.Struct):
SIZE = 1028
params: Annotated[NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_1024_PARAMS_v17_00, 0]
@c.record
class struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_1024_PARAMS_v17_00(c.Struct):
SIZE = 1028
dataSize: Annotated[NvU32, 0]
data: Annotated[c.Array[NvU8, Literal[1024]], 4]
NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_1024_PARAMS_v17_00: TypeAlias = struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_1024_PARAMS_v17_00
rpc_nvlink_inband_received_data_1024_v17_00: TypeAlias = struct_rpc_nvlink_inband_received_data_1024_v17_00
rpc_nvlink_inband_received_data_1024_v: TypeAlias = struct_rpc_nvlink_inband_received_data_1024_v17_00
@c.record
class struct_rpc_nvlink_inband_received_data_2048_v17_00(c.Struct):
SIZE = 2052
params: Annotated[NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_2048_PARAMS_v17_00, 0]
@c.record
class struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_2048_PARAMS_v17_00(c.Struct):
SIZE = 2052
dataSize: Annotated[NvU32, 0]
data: Annotated[c.Array[NvU8, Literal[2048]], 4]
NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_2048_PARAMS_v17_00: TypeAlias = struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_2048_PARAMS_v17_00
rpc_nvlink_inband_received_data_2048_v17_00: TypeAlias = struct_rpc_nvlink_inband_received_data_2048_v17_00
rpc_nvlink_inband_received_data_2048_v: TypeAlias = struct_rpc_nvlink_inband_received_data_2048_v17_00
@c.record
class struct_rpc_nvlink_inband_received_data_4096_v17_00(c.Struct):
SIZE = 4100
params: Annotated[NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_4096_PARAMS_v17_00, 0]
@c.record
class struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_4096_PARAMS_v17_00(c.Struct):
SIZE = 4100
dataSize: Annotated[NvU32, 0]
data: Annotated[c.Array[NvU8, Literal[4096]], 4]
NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_4096_PARAMS_v17_00: TypeAlias = struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_4096_PARAMS_v17_00
rpc_nvlink_inband_received_data_4096_v17_00: TypeAlias = struct_rpc_nvlink_inband_received_data_4096_v17_00
rpc_nvlink_inband_received_data_4096_v: TypeAlias = struct_rpc_nvlink_inband_received_data_4096_v17_00
@c.record
class struct_rpc_nvlink_is_gpu_degraded_v17_00(c.Struct):
SIZE = 8
params: Annotated[NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_v17_00, 0]
@c.record
class struct_NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_v17_00(c.Struct):
SIZE = 8
linkId: Annotated[NvU32, 0]
bIsGpuDegraded: Annotated[NvBool, 4]
NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_v17_00: TypeAlias = struct_NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_v17_00
rpc_nvlink_is_gpu_degraded_v17_00: TypeAlias = struct_rpc_nvlink_is_gpu_degraded_v17_00
rpc_nvlink_is_gpu_degraded_v: TypeAlias = struct_rpc_nvlink_is_gpu_degraded_v17_00
@c.record
class struct_rpc_nvlink_fatal_error_recovery_v17_00(c.Struct):
SIZE = 2
params: Annotated[NV2080_CTRL_NVLINK_FATAL_ERROR_RECOVERY_PARAMS_v17_00, 0]
@c.record
class struct_NV2080_CTRL_NVLINK_FATAL_ERROR_RECOVERY_PARAMS_v17_00(c.Struct):
SIZE = 2
bRecoverable: Annotated[NvBool, 0]
bLazy: Annotated[NvBool, 1]
NV2080_CTRL_NVLINK_FATAL_ERROR_RECOVERY_PARAMS_v17_00: TypeAlias = struct_NV2080_CTRL_NVLINK_FATAL_ERROR_RECOVERY_PARAMS_v17_00
rpc_nvlink_fatal_error_recovery_v17_00: TypeAlias = struct_rpc_nvlink_fatal_error_recovery_v17_00
rpc_nvlink_fatal_error_recovery_v: TypeAlias = struct_rpc_nvlink_fatal_error_recovery_v17_00
@c.record
class struct_rpc_update_gsp_trace_v01_00(c.Struct):
SIZE = 8
records: Annotated[NvU32, 0]
data: Annotated[NvU32, 4]
rpc_update_gsp_trace_v01_00: TypeAlias = struct_rpc_update_gsp_trace_v01_00
rpc_update_gsp_trace_v: TypeAlias = struct_rpc_update_gsp_trace_v01_00
@c.record
class struct_rpc_gsp_post_nocat_record_v01_00(c.Struct):
SIZE = 4
data: Annotated[NvU32, 0]
rpc_gsp_post_nocat_record_v01_00: TypeAlias = struct_rpc_gsp_post_nocat_record_v01_00
rpc_gsp_post_nocat_record_v: TypeAlias = struct_rpc_gsp_post_nocat_record_v01_00
@c.record
class struct_rpc_extdev_intr_service_v17_00(c.Struct):
SIZE = 4
lossRegStatus: Annotated[NvU8, 0]
gainRegStatus: Annotated[NvU8, 1]
miscRegStatus: Annotated[NvU8, 2]
rmStatus: Annotated[NvBool, 3]
rpc_extdev_intr_service_v17_00: TypeAlias = struct_rpc_extdev_intr_service_v17_00
rpc_extdev_intr_service_v: TypeAlias = struct_rpc_extdev_intr_service_v17_00
@c.record
class struct_rpc_pfm_req_hndlr_state_sync_callback_v21_04(c.Struct):
SIZE = 16
params: Annotated[NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_PARAMS_v21_04, 0]
@c.record
class struct_NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_PARAMS_v21_04(c.Struct):
SIZE = 16
flags: Annotated[NvU8, 0]
syncData: Annotated[NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_data_v21_04, 4]
NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_PARAMS_v21_04: TypeAlias = struct_NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_PARAMS_v21_04
@c.record
class struct_NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_data_v21_04(c.Struct):
SIZE = 12
type: Annotated[NvU8, 0]
data: Annotated[NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_type_v21_04, 4]
NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_data_v21_04: TypeAlias = struct_NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_data_v21_04
@c.record
class union_NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_type_v21_04(c.Struct):
SIZE = 8
smbpbi: Annotated[NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_SMBPBI_v21_04, 0]
NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_type_v21_04: TypeAlias = union_NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_type_v21_04
@c.record
class struct_NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_SMBPBI_v21_04(c.Struct):
SIZE = 8
sensorId: Annotated[NvU32, 0]
limit: Annotated[NvU32, 4]
NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_SMBPBI_v21_04: TypeAlias = struct_NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_SMBPBI_v21_04
rpc_pfm_req_hndlr_state_sync_callback_v21_04: TypeAlias = struct_rpc_pfm_req_hndlr_state_sync_callback_v21_04
rpc_pfm_req_hndlr_state_sync_callback_v: TypeAlias = struct_rpc_pfm_req_hndlr_state_sync_callback_v21_04
@c.record
class struct_rpc_vgpu_gsp_mig_ci_config_v21_03(c.Struct):
SIZE = 44
execPartCount: Annotated[NvU32, 0]
execPartId: Annotated[c.Array[NvU32, Literal[8]], 4]
gfid: Annotated[NvU32, 36]
bDelete: Annotated[NvBool, 40]
rpc_vgpu_gsp_mig_ci_config_v21_03: TypeAlias = struct_rpc_vgpu_gsp_mig_ci_config_v21_03
rpc_vgpu_gsp_mig_ci_config_v: TypeAlias = struct_rpc_vgpu_gsp_mig_ci_config_v21_03
@c.record
class struct_rpc_gsp_lockdown_notice_v17_00(c.Struct):
SIZE = 1
bLockdownEngaging: Annotated[NvBool, 0]
rpc_gsp_lockdown_notice_v17_00: TypeAlias = struct_rpc_gsp_lockdown_notice_v17_00
rpc_gsp_lockdown_notice_v: TypeAlias = struct_rpc_gsp_lockdown_notice_v17_00
@c.record
class struct_rpc_ctrl_gpu_query_ecc_status_v24_06(c.Struct):
SIZE = 1016
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_GPU_QUERY_ECC_STATUS_DEPRECATED_RPC_PARAMS_v24_06, 8]
@c.record
class struct_NV2080_CTRL_GPU_QUERY_ECC_STATUS_DEPRECATED_RPC_PARAMS_v24_06(c.Struct):
SIZE = 1008
units: Annotated[c.Array[NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS_v15_01, Literal[25]], 0]
bFatalPoisonError: Annotated[NvBool, 1000]
flags: Annotated[NvU32, 1004]
NV2080_CTRL_GPU_QUERY_ECC_STATUS_DEPRECATED_RPC_PARAMS_v24_06: TypeAlias = struct_NV2080_CTRL_GPU_QUERY_ECC_STATUS_DEPRECATED_RPC_PARAMS_v24_06
@c.record
class struct_NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS_v15_01(c.Struct):
SIZE = 40
enabled: Annotated[NvBool, 0]
scrubComplete: Annotated[NvBool, 1]
supported: Annotated[NvBool, 2]
dbe: Annotated[NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS_v15_01, 8]
dbeNonResettable: Annotated[NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS_v15_01, 16]
sbe: Annotated[NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS_v15_01, 24]
sbeNonResettable: Annotated[NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS_v15_01, 32]
NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS_v15_01: TypeAlias = struct_NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS_v15_01
@c.record
class struct_NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS_v15_01(c.Struct):
SIZE = 8
count: Annotated[NvU64, 0]
NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS_v15_01: TypeAlias = struct_NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS_v15_01
rpc_ctrl_gpu_query_ecc_status_v24_06: TypeAlias = struct_rpc_ctrl_gpu_query_ecc_status_v24_06
@c.record
class struct_rpc_ctrl_gpu_query_ecc_status_v26_02(c.Struct):
SIZE = 1216
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
params: Annotated[NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS_v26_02, 8]
@c.record
class struct_NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS_v26_02(c.Struct):
SIZE = 1208
units: Annotated[c.Array[NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS_v15_01, Literal[30]], 0]
bFatalPoisonError: Annotated[NvBool, 1200]
flags: Annotated[NvU32, 1204]
NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS_v26_02: TypeAlias = struct_NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS_v26_02
rpc_ctrl_gpu_query_ecc_status_v26_02: TypeAlias = struct_rpc_ctrl_gpu_query_ecc_status_v26_02
rpc_ctrl_gpu_query_ecc_status_v: TypeAlias = struct_rpc_ctrl_gpu_query_ecc_status_v26_02
@c.record
class struct_rpc_ctrl_dbg_get_mode_mmu_debug_v25_04(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_v25_04, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_v25_04(c.Struct):
SIZE = 4
value: Annotated[NvU32, 0]
NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_v25_04: TypeAlias = struct_NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_v25_04
rpc_ctrl_dbg_get_mode_mmu_debug_v25_04: TypeAlias = struct_rpc_ctrl_dbg_get_mode_mmu_debug_v25_04
rpc_ctrl_dbg_get_mode_mmu_debug_v: TypeAlias = struct_rpc_ctrl_dbg_get_mode_mmu_debug_v25_04
@c.record
class struct_rpc_ctrl_dbg_get_mode_mmu_gcc_debug_v29_07(c.Struct):
SIZE = 12
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS_v29_07, 8]
@c.record
class struct_NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS_v29_07(c.Struct):
SIZE = 4
value: Annotated[NvU32, 0]
NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS_v29_07: TypeAlias = struct_NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS_v29_07
rpc_ctrl_dbg_get_mode_mmu_gcc_debug_v29_07: TypeAlias = struct_rpc_ctrl_dbg_get_mode_mmu_gcc_debug_v29_07
rpc_ctrl_dbg_get_mode_mmu_gcc_debug_v: TypeAlias = struct_rpc_ctrl_dbg_get_mode_mmu_gcc_debug_v29_07
@c.record
class struct_rpc_ctrl_cmd_internal_gpu_start_fabric_probe_v25_09(c.Struct):
SIZE = 1
bwMode: Annotated[NvU8, 0]
rpc_ctrl_cmd_internal_gpu_start_fabric_probe_v25_09: TypeAlias = struct_rpc_ctrl_cmd_internal_gpu_start_fabric_probe_v25_09
rpc_ctrl_cmd_internal_gpu_start_fabric_probe_v: TypeAlias = struct_rpc_ctrl_cmd_internal_gpu_start_fabric_probe_v25_09
@c.record
class struct_rpc_ctrl_nvlink_get_inband_received_data_v25_0C(c.Struct):
SIZE = 520
message_type: Annotated[NvU16, 0]
more: Annotated[NvBool, 2]
payload: Annotated[NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS_v25_0C, 4]
@c.record
class struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS_v25_0C(c.Struct):
SIZE = 516
dataSize: Annotated[NvU32, 0]
data: Annotated[c.Array[NvU8, Literal[512]], 4]
NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS_v25_0C: TypeAlias = struct_NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS_v25_0C
rpc_ctrl_nvlink_get_inband_received_data_v25_0C: TypeAlias = struct_rpc_ctrl_nvlink_get_inband_received_data_v25_0C
rpc_ctrl_nvlink_get_inband_received_data_v: TypeAlias = struct_rpc_ctrl_nvlink_get_inband_received_data_v25_0C
@c.record
class struct_rpc_fecs_error_v26_02(c.Struct):
SIZE = 8
grIdx: Annotated[NvU32, 0]
error_type: Annotated[NvU8, 4]
rpc_fecs_error_v26_02: TypeAlias = struct_rpc_fecs_error_v26_02
rpc_fecs_error_v: TypeAlias = struct_rpc_fecs_error_v26_02
@c.record
class struct_rpc_ctrl_cmd_nvlink_inband_send_data_v26_05(c.Struct):
SIZE = 1028
buffer: Annotated[c.Array[NvU8, Literal[1024]], 0]
dataSize: Annotated[NvU32, 1024]
rpc_ctrl_cmd_nvlink_inband_send_data_v26_05: TypeAlias = struct_rpc_ctrl_cmd_nvlink_inband_send_data_v26_05
rpc_ctrl_cmd_nvlink_inband_send_data_v: TypeAlias = struct_rpc_ctrl_cmd_nvlink_inband_send_data_v26_05
@c.record
class struct_rpc_ctrl_cmd_internal_control_gsp_trace_v28_00(c.Struct):
SIZE = 32
bufferSize: Annotated[NvU32, 0]
tracepointMask: Annotated[NvU32, 4]
bufferWatermark: Annotated[NvU32, 8]
bufferAddr: Annotated[NvU64, 16]
flag: Annotated[NvU8, 24]
rpc_ctrl_cmd_internal_control_gsp_trace_v28_00: TypeAlias = struct_rpc_ctrl_cmd_internal_control_gsp_trace_v28_00
rpc_ctrl_cmd_internal_control_gsp_trace_v: TypeAlias = struct_rpc_ctrl_cmd_internal_control_gsp_trace_v28_00
@c.record
class struct_rpc_recovery_action_v28_01(c.Struct):
SIZE = 8
type: Annotated[NvU32, 0]
value: Annotated[NvBool, 4]
rpc_recovery_action_v28_01: TypeAlias = struct_rpc_recovery_action_v28_01
rpc_recovery_action_v: TypeAlias = struct_rpc_recovery_action_v28_01
@c.record
class struct_rpc_ctrl_subdevice_get_libos_heap_stats_v29_02(c.Struct):
SIZE = 1048
hClient: Annotated[NvHandle, 0]
hObject: Annotated[NvHandle, 4]
ctrlParams: Annotated[NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS_v29_02, 8]
@c.record
class struct_NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS_v29_02(c.Struct):
SIZE = 1040
poolStats: Annotated[c.Array[NV2080_CTRL_GSP_LIBOS_POOL_STATS_v29_02, Literal[64]], 0]
totalHeapSize: Annotated[NvU64, 1024]
poolCount: Annotated[NvU8, 1032]
NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS_v29_02: TypeAlias = struct_NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS_v29_02
@c.record
class struct_NV2080_CTRL_GSP_LIBOS_POOL_STATS_v29_02(c.Struct):
SIZE = 16
allocations: Annotated[NvU32, 0]
peakAllocations: Annotated[NvU32, 4]
objectSize: Annotated[NvU64, 8]
NV2080_CTRL_GSP_LIBOS_POOL_STATS_v29_02: TypeAlias = struct_NV2080_CTRL_GSP_LIBOS_POOL_STATS_v29_02
rpc_ctrl_subdevice_get_libos_heap_stats_v29_02: TypeAlias = struct_rpc_ctrl_subdevice_get_libos_heap_stats_v29_02
rpc_ctrl_subdevice_get_libos_heap_stats_v: TypeAlias = struct_rpc_ctrl_subdevice_get_libos_heap_stats_v29_02
@c.record
class struct_GSP_MSG_QUEUE_ELEMENT(c.Struct):
SIZE = 48
authTagBuffer: Annotated[c.Array[NvU8, Literal[16]], 0]
aadBuffer: Annotated[c.Array[NvU8, Literal[16]], 16]
checkSum: Annotated[NvU32, 32]
seqNum: Annotated[NvU32, 36]
elemCount: Annotated[NvU32, 40]
padding: Annotated[NvU32, 44]
GSP_MSG_QUEUE_ELEMENT: TypeAlias = struct_GSP_MSG_QUEUE_ELEMENT
@c.record
class union_rpc_message_rpc_union_field_v03_00(c.Struct):
SIZE = 4
spare: Annotated[NvU32, 0]
cpuRmGfid: Annotated[NvU32, 0]
rpc_message_rpc_union_field_v03_00: TypeAlias = union_rpc_message_rpc_union_field_v03_00
rpc_message_rpc_union_field_v: TypeAlias = union_rpc_message_rpc_union_field_v03_00
@c.record
class struct_rpc_message_header_v03_00(c.Struct):
SIZE = 32
header_version: Annotated[NvU32, 0]
signature: Annotated[NvU32, 4]
length: Annotated[NvU32, 8]
function: Annotated[NvU32, 12]
rpc_result: Annotated[NvU32, 16]
rpc_result_private: Annotated[NvU32, 20]
sequence: Annotated[NvU32, 24]
u: Annotated[rpc_message_rpc_union_field_v, 28]
rpc_message_header_v03_00: TypeAlias = struct_rpc_message_header_v03_00
rpc_message_header_v: TypeAlias = struct_rpc_message_header_v03_00
@c.record
class struct_PACKED_REGISTRY_ENTRY(c.Struct):
SIZE = 16
nameOffset: Annotated[NvU32, 0]
type: Annotated[NvU8, 4]
data: Annotated[NvU32, 8]
length: Annotated[NvU32, 12]
PACKED_REGISTRY_ENTRY: TypeAlias = struct_PACKED_REGISTRY_ENTRY
@c.record
class struct_PACKED_REGISTRY_TABLE(c.Struct):
SIZE = 8
size: Annotated[NvU32, 0]
numEntries: Annotated[NvU32, 4]
PACKED_REGISTRY_TABLE: TypeAlias = struct_PACKED_REGISTRY_TABLE
class DISPMUXSTATE(Annotated[int, ctypes.c_uint32], c.Enum): pass
dispMuxState_None = DISPMUXSTATE.define('dispMuxState_None', 0)
dispMuxState_IntegratedGPU = DISPMUXSTATE.define('dispMuxState_IntegratedGPU', 1)
dispMuxState_DiscreteGPU = DISPMUXSTATE.define('dispMuxState_DiscreteGPU', 2)
@c.record
class ACPI_DSM_CACHE(c.Struct):
SIZE = 28
suppFuncStatus: Annotated[NvU32, 0]
suppFuncs: Annotated[c.Array[NvU8, Literal[8]], 4]
suppFuncsLen: Annotated[NvU32, 12]
bArg3isInteger: Annotated[NvBool, 16]
callbackStatus: Annotated[NvU32, 20]
callback: Annotated[NvU32, 24]
@c.record
class ACPI_DATA(c.Struct):
SIZE = 472
dsm: Annotated[c.Array[ACPI_DSM_CACHE, Literal[12]], 0]
dispStatusHotplugFunc: Annotated[ACPI_DSM_FUNCTION, 336]
dispStatusConfigFunc: Annotated[ACPI_DSM_FUNCTION, 340]
perfPostPowerStateFunc: Annotated[ACPI_DSM_FUNCTION, 344]
stereo3dStateActiveFunc: Annotated[ACPI_DSM_FUNCTION, 348]
dsmPlatCapsCache: Annotated[c.Array[NvU32, Literal[12]], 352]
MDTLFeatureSupport: Annotated[NvU32, 400]
dsmCurrentFunc: Annotated[c.Array[ACPI_DSM_FUNCTION, Literal[8]], 404]
dsmCurrentSubFunc: Annotated[c.Array[NvU32, Literal[8]], 436]
dsmCurrentFuncSupport: Annotated[NvU32, 468]
class enum__ACPI_DSM_FUNCTION(Annotated[int, ctypes.c_uint32], c.Enum): pass
ACPI_DSM_FUNCTION_NBSI = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_NBSI', 0)
ACPI_DSM_FUNCTION_NVHG = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_NVHG', 1)
ACPI_DSM_FUNCTION_MXM = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_MXM', 2)
ACPI_DSM_FUNCTION_NBCI = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_NBCI', 3)
ACPI_DSM_FUNCTION_NVOP = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_NVOP', 4)
ACPI_DSM_FUNCTION_PCFG = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_PCFG', 5)
ACPI_DSM_FUNCTION_GPS_2X = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_GPS_2X', 6)
ACPI_DSM_FUNCTION_JT = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_JT', 7)
ACPI_DSM_FUNCTION_PEX = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_PEX', 8)
ACPI_DSM_FUNCTION_NVPCF_2X = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_NVPCF_2X', 9)
ACPI_DSM_FUNCTION_GPS = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_GPS', 10)
ACPI_DSM_FUNCTION_NVPCF = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_NVPCF', 11)
ACPI_DSM_FUNCTION_COUNT = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_COUNT', 12)
ACPI_DSM_FUNCTION_CURRENT = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_CURRENT', 13)
ACPI_DSM_FUNCTION_INVALID = enum__ACPI_DSM_FUNCTION.define('ACPI_DSM_FUNCTION_INVALID', 255)
ACPI_DSM_FUNCTION: TypeAlias = enum__ACPI_DSM_FUNCTION
@c.record
class struct_DOD_METHOD_DATA(c.Struct):
SIZE = 72
status: Annotated[NV_STATUS, 0]
acpiIdListLen: Annotated[NvU32, 4]
acpiIdList: Annotated[c.Array[NvU32, Literal[16]], 8]
NV_STATUS: TypeAlias = Annotated[int, ctypes.c_uint32]
DOD_METHOD_DATA: TypeAlias = struct_DOD_METHOD_DATA
@c.record
class struct_JT_METHOD_DATA(c.Struct):
SIZE = 12
status: Annotated[NV_STATUS, 0]
jtCaps: Annotated[NvU32, 4]
jtRevId: Annotated[NvU16, 8]
bSBIOSCaps: Annotated[NvBool, 10]
JT_METHOD_DATA: TypeAlias = struct_JT_METHOD_DATA
@c.record
class struct_MUX_METHOD_DATA_ELEMENT(c.Struct):
SIZE = 12
acpiId: Annotated[NvU32, 0]
mode: Annotated[NvU32, 4]
status: Annotated[NV_STATUS, 8]
MUX_METHOD_DATA_ELEMENT: TypeAlias = struct_MUX_METHOD_DATA_ELEMENT
@c.record
class struct_MUX_METHOD_DATA(c.Struct):
SIZE = 580
tableLen: Annotated[NvU32, 0]
acpiIdMuxModeTable: Annotated[c.Array[MUX_METHOD_DATA_ELEMENT, Literal[16]], 4]
acpiIdMuxPartTable: Annotated[c.Array[MUX_METHOD_DATA_ELEMENT, Literal[16]], 196]
acpiIdMuxStateTable: Annotated[c.Array[MUX_METHOD_DATA_ELEMENT, Literal[16]], 388]
MUX_METHOD_DATA: TypeAlias = struct_MUX_METHOD_DATA
@c.record
class struct_CAPS_METHOD_DATA(c.Struct):
SIZE = 8
status: Annotated[NV_STATUS, 0]
optimusCaps: Annotated[NvU32, 4]
CAPS_METHOD_DATA: TypeAlias = struct_CAPS_METHOD_DATA
@c.record
class struct_ACPI_METHOD_DATA(c.Struct):
SIZE = 676
bValid: Annotated[NvBool, 0]
dodMethodData: Annotated[DOD_METHOD_DATA, 4]
jtMethodData: Annotated[JT_METHOD_DATA, 76]
muxMethodData: Annotated[MUX_METHOD_DATA, 88]
capsMethodData: Annotated[CAPS_METHOD_DATA, 668]
ACPI_METHOD_DATA: TypeAlias = struct_ACPI_METHOD_DATA
class RM_ENGINE_TYPE(Annotated[int, ctypes.c_uint32], c.Enum): pass
RM_ENGINE_TYPE_NULL = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NULL', 0)
RM_ENGINE_TYPE_GR0 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_GR0', 1)
RM_ENGINE_TYPE_GR1 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_GR1', 2)
RM_ENGINE_TYPE_GR2 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_GR2', 3)
RM_ENGINE_TYPE_GR3 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_GR3', 4)
RM_ENGINE_TYPE_GR4 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_GR4', 5)
RM_ENGINE_TYPE_GR5 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_GR5', 6)
RM_ENGINE_TYPE_GR6 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_GR6', 7)
RM_ENGINE_TYPE_GR7 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_GR7', 8)
RM_ENGINE_TYPE_COPY0 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY0', 9)
RM_ENGINE_TYPE_COPY1 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY1', 10)
RM_ENGINE_TYPE_COPY2 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY2', 11)
RM_ENGINE_TYPE_COPY3 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY3', 12)
RM_ENGINE_TYPE_COPY4 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY4', 13)
RM_ENGINE_TYPE_COPY5 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY5', 14)
RM_ENGINE_TYPE_COPY6 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY6', 15)
RM_ENGINE_TYPE_COPY7 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY7', 16)
RM_ENGINE_TYPE_COPY8 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY8', 17)
RM_ENGINE_TYPE_COPY9 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY9', 18)
RM_ENGINE_TYPE_COPY10 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY10', 19)
RM_ENGINE_TYPE_COPY11 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY11', 20)
RM_ENGINE_TYPE_COPY12 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY12', 21)
RM_ENGINE_TYPE_COPY13 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY13', 22)
RM_ENGINE_TYPE_COPY14 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY14', 23)
RM_ENGINE_TYPE_COPY15 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY15', 24)
RM_ENGINE_TYPE_COPY16 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY16', 25)
RM_ENGINE_TYPE_COPY17 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY17', 26)
RM_ENGINE_TYPE_COPY18 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY18', 27)
RM_ENGINE_TYPE_COPY19 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_COPY19', 28)
RM_ENGINE_TYPE_NVDEC0 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVDEC0', 29)
RM_ENGINE_TYPE_NVDEC1 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVDEC1', 30)
RM_ENGINE_TYPE_NVDEC2 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVDEC2', 31)
RM_ENGINE_TYPE_NVDEC3 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVDEC3', 32)
RM_ENGINE_TYPE_NVDEC4 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVDEC4', 33)
RM_ENGINE_TYPE_NVDEC5 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVDEC5', 34)
RM_ENGINE_TYPE_NVDEC6 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVDEC6', 35)
RM_ENGINE_TYPE_NVDEC7 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVDEC7', 36)
RM_ENGINE_TYPE_NVENC0 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVENC0', 37)
RM_ENGINE_TYPE_NVENC1 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVENC1', 38)
RM_ENGINE_TYPE_NVENC2 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVENC2', 39)
RM_ENGINE_TYPE_NVENC3 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVENC3', 40)
RM_ENGINE_TYPE_VP = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_VP', 41)
RM_ENGINE_TYPE_ME = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_ME', 42)
RM_ENGINE_TYPE_PPP = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_PPP', 43)
RM_ENGINE_TYPE_MPEG = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_MPEG', 44)
RM_ENGINE_TYPE_SW = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_SW', 45)
RM_ENGINE_TYPE_TSEC = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_TSEC', 46)
RM_ENGINE_TYPE_VIC = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_VIC', 47)
RM_ENGINE_TYPE_MP = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_MP', 48)
RM_ENGINE_TYPE_SEC2 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_SEC2', 49)
RM_ENGINE_TYPE_HOST = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_HOST', 50)
RM_ENGINE_TYPE_DPU = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_DPU', 51)
RM_ENGINE_TYPE_PMU = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_PMU', 52)
RM_ENGINE_TYPE_FBFLCN = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_FBFLCN', 53)
RM_ENGINE_TYPE_NVJPEG0 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVJPEG0', 54)
RM_ENGINE_TYPE_NVJPEG1 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVJPEG1', 55)
RM_ENGINE_TYPE_NVJPEG2 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVJPEG2', 56)
RM_ENGINE_TYPE_NVJPEG3 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVJPEG3', 57)
RM_ENGINE_TYPE_NVJPEG4 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVJPEG4', 58)
RM_ENGINE_TYPE_NVJPEG5 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVJPEG5', 59)
RM_ENGINE_TYPE_NVJPEG6 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVJPEG6', 60)
RM_ENGINE_TYPE_NVJPEG7 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_NVJPEG7', 61)
RM_ENGINE_TYPE_OFA0 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_OFA0', 62)
RM_ENGINE_TYPE_OFA1 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_OFA1', 63)
RM_ENGINE_TYPE_RESERVED40 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED40', 64)
RM_ENGINE_TYPE_RESERVED41 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED41', 65)
RM_ENGINE_TYPE_RESERVED42 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED42', 66)
RM_ENGINE_TYPE_RESERVED43 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED43', 67)
RM_ENGINE_TYPE_RESERVED44 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED44', 68)
RM_ENGINE_TYPE_RESERVED45 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED45', 69)
RM_ENGINE_TYPE_RESERVED46 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED46', 70)
RM_ENGINE_TYPE_RESERVED47 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED47', 71)
RM_ENGINE_TYPE_RESERVED48 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED48', 72)
RM_ENGINE_TYPE_RESERVED49 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED49', 73)
RM_ENGINE_TYPE_RESERVED4a = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED4a', 74)
RM_ENGINE_TYPE_RESERVED4b = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED4b', 75)
RM_ENGINE_TYPE_RESERVED4c = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED4c', 76)
RM_ENGINE_TYPE_RESERVED4d = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED4d', 77)
RM_ENGINE_TYPE_RESERVED4e = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED4e', 78)
RM_ENGINE_TYPE_RESERVED4f = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED4f', 79)
RM_ENGINE_TYPE_RESERVED50 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED50', 80)
RM_ENGINE_TYPE_RESERVED51 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED51', 81)
RM_ENGINE_TYPE_RESERVED52 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED52', 82)
RM_ENGINE_TYPE_RESERVED53 = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_RESERVED53', 83)
RM_ENGINE_TYPE_LAST = RM_ENGINE_TYPE.define('RM_ENGINE_TYPE_LAST', 84)
@c.record
class BUSINFO(c.Struct):
SIZE = 10
deviceID: Annotated[NvU16, 0]
vendorID: Annotated[NvU16, 2]
subdeviceID: Annotated[NvU16, 4]
subvendorID: Annotated[NvU16, 6]
revisionID: Annotated[NvU8, 8]
@c.record
class struct_GSP_VF_INFO(c.Struct):
SIZE = 40
totalVFs: Annotated[NvU32, 0]
firstVFOffset: Annotated[NvU32, 4]
FirstVFBar0Address: Annotated[NvU64, 8]
FirstVFBar1Address: Annotated[NvU64, 16]
FirstVFBar2Address: Annotated[NvU64, 24]
b64bitBar0: Annotated[NvBool, 32]
b64bitBar1: Annotated[NvBool, 33]
b64bitBar2: Annotated[NvBool, 34]
GSP_VF_INFO: TypeAlias = struct_GSP_VF_INFO
@c.record
class GSP_PCIE_CONFIG_REG(c.Struct):
SIZE = 4
linkCap: Annotated[NvU32, 0]
@c.record
class EcidManufacturingInfo(c.Struct):
SIZE = 12
ecidLow: Annotated[NvU32, 0]
ecidHigh: Annotated[NvU32, 4]
ecidExtended: Annotated[NvU32, 8]
@c.record
class FW_WPR_LAYOUT_OFFSET(c.Struct):
SIZE = 16
nonWprHeapOffset: Annotated[NvU64, 0]
frtsOffset: Annotated[NvU64, 8]
@c.record
class struct_GspStaticConfigInfo_t(c.Struct):
SIZE = 1656
grCapsBits: Annotated[c.Array[NvU8, Literal[23]], 0]
gidInfo: Annotated[NV2080_CTRL_GPU_GET_GID_INFO_PARAMS, 24]
SKUInfo: Annotated[NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS, 292]
fbRegionInfoParams: Annotated[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS, 344]
sriovCaps: Annotated[NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS, 1120]
sriovMaxGfid: Annotated[NvU32, 1200]
engineCaps: Annotated[c.Array[NvU32, Literal[3]], 1204]
poisonFuseEnabled: Annotated[NvBool, 1216]
fb_length: Annotated[NvU64, 1224]
fbio_mask: Annotated[NvU64, 1232]
fb_bus_width: Annotated[NvU32, 1240]
fb_ram_type: Annotated[NvU32, 1244]
fbp_mask: Annotated[NvU64, 1248]
l2_cache_size: Annotated[NvU32, 1256]
gpuNameString: Annotated[c.Array[NvU8, Literal[64]], 1260]
gpuShortNameString: Annotated[c.Array[NvU8, Literal[64]], 1324]
gpuNameString_Unicode: Annotated[c.Array[NvU16, Literal[64]], 1388]
bGpuInternalSku: Annotated[NvBool, 1516]
bIsQuadroGeneric: Annotated[NvBool, 1517]
bIsQuadroAd: Annotated[NvBool, 1518]
bIsNvidiaNvs: Annotated[NvBool, 1519]
bIsVgx: Annotated[NvBool, 1520]
bGeforceSmb: Annotated[NvBool, 1521]
bIsTitan: Annotated[NvBool, 1522]
bIsTesla: Annotated[NvBool, 1523]
bIsMobile: Annotated[NvBool, 1524]
bIsGc6Rtd3Allowed: Annotated[NvBool, 1525]
bIsGc8Rtd3Allowed: Annotated[NvBool, 1526]
bIsGcOffRtd3Allowed: Annotated[NvBool, 1527]
bIsGcoffLegacyAllowed: Annotated[NvBool, 1528]
bIsMigSupported: Annotated[NvBool, 1529]
RTD3GC6TotalBoardPower: Annotated[NvU16, 1530]
RTD3GC6PerstDelay: Annotated[NvU16, 1532]
bar1PdeBase: Annotated[NvU64, 1536]
bar2PdeBase: Annotated[NvU64, 1544]
bVbiosValid: Annotated[NvBool, 1552]
vbiosSubVendor: Annotated[NvU32, 1556]
vbiosSubDevice: Annotated[NvU32, 1560]
bPageRetirementSupported: Annotated[NvBool, 1564]
bSplitVasBetweenServerClientRm: Annotated[NvBool, 1565]
bClRootportNeedsNosnoopWAR: Annotated[NvBool, 1566]
displaylessMaxHeads: Annotated[VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS, 1568]
displaylessMaxResolution: Annotated[VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS, 1576]
displaylessMaxPixels: Annotated[NvU64, 1592]
hInternalClient: Annotated[NvHandle, 1600]
hInternalDevice: Annotated[NvHandle, 1604]
hInternalSubdevice: Annotated[NvHandle, 1608]
bSelfHostedMode: Annotated[NvBool, 1612]
bAtsSupported: Annotated[NvBool, 1613]
bIsGpuUefi: Annotated[NvBool, 1614]
bIsEfiInit: Annotated[NvBool, 1615]
ecidInfo: Annotated[c.Array[EcidManufacturingInfo, Literal[2]], 1616]
fwWprLayoutOffset: Annotated[FW_WPR_LAYOUT_OFFSET, 1640]
@c.record
class struct_NV2080_CTRL_GPU_GET_GID_INFO_PARAMS(c.Struct):
SIZE = 268
index: Annotated[NvU32, 0]
flags: Annotated[NvU32, 4]
length: Annotated[NvU32, 8]
data: Annotated[c.Array[NvU8, Literal[256]], 12]
NV2080_CTRL_GPU_GET_GID_INFO_PARAMS: TypeAlias = struct_NV2080_CTRL_GPU_GET_GID_INFO_PARAMS
@c.record
class struct_NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS(c.Struct):
SIZE = 48
BoardID: Annotated[NvU32, 0]
chipSKU: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[9]], 4]
chipSKUMod: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[5]], 13]
skuConfigVersion: Annotated[NvU32, 20]
project: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[5]], 24]
projectSKU: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[5]], 29]
CDP: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[6]], 34]
projectSKUMod: Annotated[c.Array[Annotated[bytes, ctypes.c_char], Literal[2]], 40]
businessCycle: Annotated[NvU32, 44]
NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS: TypeAlias = struct_NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS
@c.record
class struct_NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS(c.Struct):
SIZE = 776
numFBRegions: Annotated[NvU32, 0]
fbRegion: Annotated[c.Array[NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO, Literal[16]], 8]
NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS: TypeAlias = struct_NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS
@c.record
class struct_NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO(c.Struct):
SIZE = 48
base: Annotated[NvU64, 0]
limit: Annotated[NvU64, 8]
reserved: Annotated[NvU64, 16]
performance: Annotated[NvU32, 24]
supportCompressed: Annotated[NvBool, 28]
supportISO: Annotated[NvBool, 29]
bProtected: Annotated[NvBool, 30]
blackList: Annotated[NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG, 31]
NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO: TypeAlias = struct_NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO
NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG: TypeAlias = c.Array[Annotated[int, ctypes.c_ubyte], Literal[17]]
@c.record
class struct_NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS(c.Struct):
SIZE = 80
totalVFs: Annotated[NvU32, 0]
firstVfOffset: Annotated[NvU32, 4]
vfFeatureMask: Annotated[NvU32, 8]
FirstVFBar0Address: Annotated[NvU64, 16]
FirstVFBar1Address: Annotated[NvU64, 24]
FirstVFBar2Address: Annotated[NvU64, 32]
bar0Size: Annotated[NvU64, 40]
bar1Size: Annotated[NvU64, 48]
bar2Size: Annotated[NvU64, 56]
b64bitBar0: Annotated[NvBool, 64]
b64bitBar1: Annotated[NvBool, 65]
b64bitBar2: Annotated[NvBool, 66]
bSriovEnabled: Annotated[NvBool, 67]
bSriovHeavyEnabled: Annotated[NvBool, 68]
bEmulateVFBar0TlbInvalidationRegister: Annotated[NvBool, 69]
bClientRmAllocatedCtxBuffer: Annotated[NvBool, 70]
bNonPowerOf2ChannelCountSupported: Annotated[NvBool, 71]
bVfResizableBAR1Supported: Annotated[NvBool, 72]
NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS: TypeAlias = struct_NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS
GspStaticConfigInfo: TypeAlias = struct_GspStaticConfigInfo_t
@c.record
class struct_GspSystemInfo(c.Struct):
SIZE = 928
gpuPhysAddr: Annotated[NvU64, 0]
gpuPhysFbAddr: Annotated[NvU64, 8]
gpuPhysInstAddr: Annotated[NvU64, 16]
gpuPhysIoAddr: Annotated[NvU64, 24]
nvDomainBusDeviceFunc: Annotated[NvU64, 32]
simAccessBufPhysAddr: Annotated[NvU64, 40]
notifyOpSharedSurfacePhysAddr: Annotated[NvU64, 48]
pcieAtomicsOpMask: Annotated[NvU64, 56]
consoleMemSize: Annotated[NvU64, 64]
maxUserVa: Annotated[NvU64, 72]
pciConfigMirrorBase: Annotated[NvU32, 80]
pciConfigMirrorSize: Annotated[NvU32, 84]
PCIDeviceID: Annotated[NvU32, 88]
PCISubDeviceID: Annotated[NvU32, 92]
PCIRevisionID: Annotated[NvU32, 96]
pcieAtomicsCplDeviceCapMask: Annotated[NvU32, 100]
oorArch: Annotated[NvU8, 104]
clPdbProperties: Annotated[NvU64, 112]
Chipset: Annotated[NvU32, 120]
bGpuBehindBridge: Annotated[NvBool, 124]
bFlrSupported: Annotated[NvBool, 125]
b64bBar0Supported: Annotated[NvBool, 126]
bMnocAvailable: Annotated[NvBool, 127]
chipsetL1ssEnable: Annotated[NvU32, 128]
bUpstreamL0sUnsupported: Annotated[NvBool, 132]
bUpstreamL1Unsupported: Annotated[NvBool, 133]
bUpstreamL1PorSupported: Annotated[NvBool, 134]
bUpstreamL1PorMobileOnly: Annotated[NvBool, 135]
bSystemHasMux: Annotated[NvBool, 136]
upstreamAddressValid: Annotated[NvU8, 137]
FHBBusInfo: Annotated[BUSINFO, 138]
chipsetIDInfo: Annotated[BUSINFO, 148]
acpiMethodData: Annotated[ACPI_METHOD_DATA, 160]
hypervisorType: Annotated[NvU32, 836]
bIsPassthru: Annotated[NvBool, 840]
sysTimerOffsetNs: Annotated[NvU64, 848]
gspVFInfo: Annotated[GSP_VF_INFO, 856]
bIsPrimary: Annotated[NvBool, 896]
isGridBuild: Annotated[NvBool, 897]
pcieConfigReg: Annotated[GSP_PCIE_CONFIG_REG, 900]
gridBuildCsp: Annotated[NvU32, 904]
bPreserveVideoMemoryAllocations: Annotated[NvBool, 908]
bTdrEventSupported: Annotated[NvBool, 909]
bFeatureStretchVblankCapable: Annotated[NvBool, 910]
bEnableDynamicGranularityPageArrays: Annotated[NvBool, 911]
bClockBoostSupported: Annotated[NvBool, 912]
bRouteDispIntrsToCPU: Annotated[NvBool, 913]
hostPageSize: Annotated[NvU64, 920]
GspSystemInfo: TypeAlias = struct_GspSystemInfo
@c.record
class FALCON_APPLICATION_INTERFACE_HEADER_V1(c.Struct):
SIZE = 4
version: Annotated[NvU8, 0]
headerSize: Annotated[NvU8, 1]
entrySize: Annotated[NvU8, 2]
entryCount: Annotated[NvU8, 3]
@c.record
class FALCON_APPLICATION_INTERFACE_ENTRY_V1(c.Struct):
SIZE = 8
id: Annotated[NvU32, 0]
dmemOffset: Annotated[NvU32, 4]
@c.record
class FALCON_APPLICATION_INTERFACE_DMEM_MAPPER_V3(c.Struct):
SIZE = 64
signature: Annotated[NvU32, 0]
version: Annotated[NvU16, 4]
size: Annotated[NvU16, 6]
cmd_in_buffer_offset: Annotated[NvU32, 8]
cmd_in_buffer_size: Annotated[NvU32, 12]
cmd_out_buffer_offset: Annotated[NvU32, 16]
cmd_out_buffer_size: Annotated[NvU32, 20]
nvf_img_data_buffer_offset: Annotated[NvU32, 24]
nvf_img_data_buffer_size: Annotated[NvU32, 28]
printfBufferHdr: Annotated[NvU32, 32]
ucode_build_time_stamp: Annotated[NvU32, 36]
ucode_signature: Annotated[NvU32, 40]
init_cmd: Annotated[NvU32, 44]
ucode_feature: Annotated[NvU32, 48]
ucode_cmd_mask0: Annotated[NvU32, 52]
ucode_cmd_mask1: Annotated[NvU32, 56]
multiTgtTbl: Annotated[NvU32, 60]
@c.record
class struct_BIT_HEADER_V1_00(c.Struct):
SIZE = 12
Id: Annotated[Annotated[int, ctypes.c_uint16], 0]
Signature: Annotated[Annotated[int, ctypes.c_uint32], 2]
BCD_Version: Annotated[Annotated[int, ctypes.c_uint16], 6]
HeaderSize: Annotated[Annotated[int, ctypes.c_ubyte], 8]
TokenSize: Annotated[Annotated[int, ctypes.c_ubyte], 9]
TokenEntries: Annotated[Annotated[int, ctypes.c_ubyte], 10]
HeaderChksum: Annotated[Annotated[int, ctypes.c_ubyte], 11]
BIT_HEADER_V1_00: TypeAlias = struct_BIT_HEADER_V1_00
@c.record
class struct_BIT_TOKEN_V1_00(c.Struct):
SIZE = 8
TokenId: Annotated[Annotated[int, ctypes.c_ubyte], 0]
DataVersion: Annotated[Annotated[int, ctypes.c_ubyte], 1]
DataSize: Annotated[Annotated[int, ctypes.c_uint16], 2]
DataPtr: Annotated[Annotated[int, ctypes.c_uint32], 4]
BIT_TOKEN_V1_00: TypeAlias = struct_BIT_TOKEN_V1_00
@c.record
class BIT_DATA_BIOSDATA_BINVER(c.Struct):
SIZE = 5
Version: Annotated[Annotated[int, ctypes.c_uint32], 0]
OemVersion: Annotated[Annotated[int, ctypes.c_ubyte], 4]
@c.record
class BIT_DATA_FALCON_DATA_V2(c.Struct):
SIZE = 4
FalconUcodeTablePtr: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class FALCON_UCODE_TABLE_HDR_V1(c.Struct):
SIZE = 6
Version: Annotated[Annotated[int, ctypes.c_ubyte], 0]
HeaderSize: Annotated[Annotated[int, ctypes.c_ubyte], 1]
EntrySize: Annotated[Annotated[int, ctypes.c_ubyte], 2]
EntryCount: Annotated[Annotated[int, ctypes.c_ubyte], 3]
DescVersion: Annotated[Annotated[int, ctypes.c_ubyte], 4]
DescSize: Annotated[Annotated[int, ctypes.c_ubyte], 5]
@c.record
class FALCON_UCODE_TABLE_ENTRY_V1(c.Struct):
SIZE = 6
ApplicationID: Annotated[Annotated[int, ctypes.c_ubyte], 0]
TargetID: Annotated[Annotated[int, ctypes.c_ubyte], 1]
DescPtr: Annotated[Annotated[int, ctypes.c_uint32], 2]
@c.record
class FALCON_UCODE_DESC_HEADER(c.Struct):
SIZE = 4
vDesc: Annotated[Annotated[int, ctypes.c_uint32], 0]
@c.record
class FALCON_UCODE_DESC_V3(c.Struct):
SIZE = 44
Hdr: Annotated[FALCON_UCODE_DESC_HEADER, 0]
StoredSize: Annotated[Annotated[int, ctypes.c_uint32], 4]
PKCDataOffset: Annotated[Annotated[int, ctypes.c_uint32], 8]
InterfaceOffset: Annotated[Annotated[int, ctypes.c_uint32], 12]
IMEMPhysBase: Annotated[Annotated[int, ctypes.c_uint32], 16]
IMEMLoadSize: Annotated[Annotated[int, ctypes.c_uint32], 20]
IMEMVirtBase: Annotated[Annotated[int, ctypes.c_uint32], 24]
DMEMPhysBase: Annotated[Annotated[int, ctypes.c_uint32], 28]
DMEMLoadSize: Annotated[Annotated[int, ctypes.c_uint32], 32]
EngineIdMask: Annotated[Annotated[int, ctypes.c_uint16], 36]
UcodeId: Annotated[Annotated[int, ctypes.c_ubyte], 38]
SignatureCount: Annotated[Annotated[int, ctypes.c_ubyte], 39]
SignatureVersions: Annotated[Annotated[int, ctypes.c_uint16], 40]
Reserved: Annotated[Annotated[int, ctypes.c_uint16], 42]
@c.record
class FWSECLIC_READ_VBIOS_DESC(c.Struct):
SIZE = 24
version: Annotated[NvU32, 0]
size: Annotated[NvU32, 4]
gfwImageOffset: Annotated[NvU64, 8]
gfwImageSize: Annotated[NvU32, 16]
flags: Annotated[NvU32, 20]
@c.record
class FWSECLIC_FRTS_REGION_DESC(c.Struct):
SIZE = 20
version: Annotated[NvU32, 0]
size: Annotated[NvU32, 4]
frtsRegionOffset4K: Annotated[NvU32, 8]
frtsRegionSize: Annotated[NvU32, 12]
frtsRegionMediaType: Annotated[NvU32, 16]
@c.record
class FWSECLIC_FRTS_CMD(c.Struct):
SIZE = 44
readVbiosDesc: Annotated[FWSECLIC_READ_VBIOS_DESC, 0]
frtsRegionDesc: Annotated[FWSECLIC_FRTS_REGION_DESC, 24]
@c.record
class struct__PCI_EXP_ROM_STANDARD(c.Struct):
SIZE = 30
sig: Annotated[NvU16, 0]
reserved: Annotated[c.Array[NvU8, Literal[22]], 2]
pciDataStrucPtr: Annotated[NvU16, 24]
sizeOfBlock: Annotated[NvU32, 26]
PCI_EXP_ROM_STANDARD: TypeAlias = struct__PCI_EXP_ROM_STANDARD
PPCI_EXP_ROM_STANDARD: TypeAlias = c.POINTER[struct__PCI_EXP_ROM_STANDARD]
@c.record
class struct__PCI_EXP_ROM_NBSI(c.Struct):
SIZE = 30
sig: Annotated[NvU16, 0]
reserved: Annotated[c.Array[NvU8, Literal[20]], 2]
nbsiDataOffset: Annotated[NvU16, 22]
pciDataStrucPtr: Annotated[NvU16, 24]
sizeOfBlock: Annotated[NvU32, 26]
PCI_EXP_ROM_NBSI: TypeAlias = struct__PCI_EXP_ROM_NBSI
PPCI_EXP_ROM_NBSI: TypeAlias = c.POINTER[struct__PCI_EXP_ROM_NBSI]
@c.record
class union__PCI_EXP_ROM(c.Struct):
SIZE = 30
standard: Annotated[PCI_EXP_ROM_STANDARD, 0]
nbsi: Annotated[PCI_EXP_ROM_NBSI, 0]
PCI_EXP_ROM: TypeAlias = union__PCI_EXP_ROM
PPCI_EXP_ROM: TypeAlias = c.POINTER[union__PCI_EXP_ROM]
@c.record
class struct__PCI_DATA_STRUCT(c.Struct):
SIZE = 24
sig: Annotated[NvU32, 0]
vendorID: Annotated[NvU16, 4]
deviceID: Annotated[NvU16, 6]
deviceListPtr: Annotated[NvU16, 8]
pciDataStructLen: Annotated[NvU16, 10]
pciDataStructRev: Annotated[NvU8, 12]
classCode: Annotated[c.Array[NvU8, Literal[3]], 13]
imageLen: Annotated[NvU16, 16]
vendorRomRev: Annotated[NvU16, 18]
codeType: Annotated[NvU8, 20]
lastImage: Annotated[NvU8, 21]
maxRunTimeImageLen: Annotated[NvU16, 22]
PCI_DATA_STRUCT: TypeAlias = struct__PCI_DATA_STRUCT
PPCI_DATA_STRUCT: TypeAlias = c.POINTER[struct__PCI_DATA_STRUCT]
@c.record
class struct__NV_PCI_DATA_EXT_STRUCT(c.Struct):
SIZE = 12
signature: Annotated[NvU32, 0]
nvPciDataExtRev: Annotated[NvU16, 4]
nvPciDataExtLen: Annotated[NvU16, 6]
subimageLen: Annotated[NvU16, 8]
privLastImage: Annotated[NvU8, 10]
flags: Annotated[NvU8, 11]
NV_PCI_DATA_EXT_STRUCT: TypeAlias = struct__NV_PCI_DATA_EXT_STRUCT
PNV_PCI_DATA_EXT_STRUCT: TypeAlias = c.POINTER[struct__NV_PCI_DATA_EXT_STRUCT]
c.init_records()
GSP_FW_WPR_META_VERIFIED = 0xa0a0a0a0a0a0a0a0 # type: ignore
GSP_FW_WPR_META_REVISION = 1 # type: ignore
GSP_FW_WPR_META_MAGIC = 0xdc3aae21371a60b3 # type: ignore
GSP_FW_WPR_HEAP_FREE_REGION_COUNT = 128 # type: ignore
GSP_FW_HEAP_FREE_LIST_MAGIC = 0x4845415046524545 # type: ignore
GSP_FW_SR_META_MAGIC = 0x8a3bb9e6c6c39d93 # type: ignore
GSP_FW_SR_META_REVISION = 2 # type: ignore
GSP_FW_SR_META_INTERNAL_SIZE = 128 # type: ignore
NVDM_TYPE_HULK = 0x11 # type: ignore
NVDM_TYPE_FIRMWARE_UPDATE = 0x12 # type: ignore
NVDM_TYPE_PRC = 0x13 # type: ignore
NVDM_TYPE_COT = 0x14 # type: ignore
NVDM_TYPE_FSP_RESPONSE = 0x15 # type: ignore
NVDM_TYPE_CAPS_QUERY = 0x16 # type: ignore
NVDM_TYPE_INFOROM = 0x17 # type: ignore
NVDM_TYPE_SMBPBI = 0x18 # type: ignore
NVDM_TYPE_ROMREAD = 0x1A # type: ignore
NVDM_TYPE_UEFI_RM = 0x1C # type: ignore
NVDM_TYPE_UEFI_XTL_DEBUG_INTR = 0x1D # type: ignore
NVDM_TYPE_TNVL = 0x1F # type: ignore
NVDM_TYPE_CLOCK_BOOST = 0x20 # type: ignore
NVDM_TYPE_FSP_GSP_COMM = 0x21 # type: ignore
MAX_GPC_COUNT = 32 # type: ignore
VGPU_MAX_REGOPS_PER_RPC = 100 # type: ignore
VGPU_RESERVED_HANDLE_BASE = 0xCAF3F000 # type: ignore
VGPU_RESERVED_HANDLE_RANGE = 0x1000 # type: ignore
VGPU_CALC_PARAM_OFFSET = lambda prev_offset,prev_params: (prev_offset + NV_ALIGN_UP(sizeof(prev_params), sizeof(NvU32))) # type: ignore
NV_VGPU_MSG_HEADER_VERSION_MAJOR_TOT = 0x00000003 # type: ignore
NV_VGPU_MSG_HEADER_VERSION_MINOR_TOT = 0x00000000 # type: ignore
NV_VGPU_MSG_SIGNATURE_VALID = 0x43505256 # type: ignore
NV_VGPU_MSG_RESULT_VMIOP_INVAL = 0xFF000001 # type: ignore
NV_VGPU_MSG_RESULT_VMIOP_RESOURCE = 0xFF000002 # type: ignore
NV_VGPU_MSG_RESULT_VMIOP_RANGE = 0xFF000003 # type: ignore
NV_VGPU_MSG_RESULT_VMIOP_READ_ONLY = 0xFF000004 # type: ignore
NV_VGPU_MSG_RESULT_VMIOP_NOT_FOUND = 0xFF000005 # type: ignore
NV_VGPU_MSG_RESULT_VMIOP_NO_ADDRESS_SPACE = 0xFF000006 # type: ignore
NV_VGPU_MSG_RESULT_VMIOP_TIMEOUT = 0xFF000007 # type: ignore
NV_VGPU_MSG_RESULT_VMIOP_NOT_ALLOWED_IN_CALLBACK = 0xFF000008 # type: ignore
NV_VGPU_MSG_RESULT_VMIOP_ECC_MISMATCH = 0xFF000009 # type: ignore
NV_VGPU_MSG_RESULT_VMIOP_NOT_SUPPORTED = 0xFF00000a # type: ignore
NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION = 0xFF100001 # type: ignore
NV_VGPU_MSG_RESULT_RPC_INVALID_MESSAGE_FORMAT = 0xFF100002 # type: ignore
NV_VGPU_MSG_RESULT_RPC_HANDLE_NOT_FOUND = 0xFF100003 # type: ignore
NV_VGPU_MSG_RESULT_RPC_HANDLE_EXISTS = 0xFF100004 # type: ignore
NV_VGPU_MSG_RESULT_RPC_UNKNOWN_RM_ERROR = 0xFF100005 # type: ignore
NV_VGPU_MSG_RESULT_RPC_UNKNOWN_VMIOP_ERROR = 0xFF100006 # type: ignore
NV_VGPU_MSG_RESULT_RPC_RESERVED_HANDLE = 0xFF100007 # type: ignore
NV_VGPU_MSG_RESULT_RPC_CUDA_PROFILING_DISABLED = 0xFF100008 # type: ignore
NV_VGPU_MSG_RESULT_RPC_API_CONTROL_NOT_SUPPORTED = 0xFF100009 # type: ignore
NV_VGPU_MSG_RESULT_RPC_PENDING = 0xFFFFFFFF # type: ignore
NV_VGPU_MSG_UNION_INIT = 0x00000000 # type: ignore
NV_VGPU_PTEDESC_INIT = 0x00000000 # type: ignore
NV_VGPU_PTEDESC__PROD = 0x00000000 # type: ignore
NV_VGPU_PTEDESC_IDR_NONE = 0x00000000 # type: ignore
NV_VGPU_PTEDESC_IDR_SINGLE = 0x00000001 # type: ignore
NV_VGPU_PTEDESC_IDR_DOUBLE = 0x00000002 # type: ignore
NV_VGPU_PTEDESC_IDR_TRIPLE = 0x00000003 # type: ignore
NV_VGPU_PTE_PAGE_SIZE = 0x1000 # type: ignore
NV_VGPU_PTE_SIZE = 4 # type: ignore
NV_VGPU_PTE_INDEX_SHIFT = 10 # type: ignore
NV_VGPU_PTE_INDEX_MASK = 0x3FF # type: ignore
NV_VGPU_PTE_64_PAGE_SIZE = 0x1000 # type: ignore
NV_VGPU_PTE_64_SIZE = 8 # type: ignore
NV_VGPU_PTE_64_INDEX_SHIFT = 9 # type: ignore
NV_VGPU_PTE_64_INDEX_MASK = 0x1FF # type: ignore
NV_VGPU_LOG_LEVEL_FATAL = 0x00000000 # type: ignore
NV_VGPU_LOG_LEVEL_ERROR = 0x00000001 # type: ignore
NV_VGPU_LOG_LEVEL_NOTICE = 0x00000002 # type: ignore
NV_VGPU_LOG_LEVEL_STATUS = 0x00000003 # type: ignore
NV_VGPU_LOG_LEVEL_DEBUG = 0x00000004 # type: ignore
VGPU_RPC_GET_P2P_CAPS_V2_MAX_GPUS_SQUARED_PER_RPC = 512 # type: ignore
GR_MAX_RPC_CTX_BUFFER_COUNT = 32 # type: ignore
VGPU_RPC_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PER_RPC_v21_06 = 80 # type: ignore
LIBOS_MEMORY_REGION_INIT_ARGUMENTS_MAX = 4096 # type: ignore
LIBOS_MEMORY_REGION_RADIX_PAGE_SIZE = 4096 # type: ignore
LIBOS_MEMORY_REGION_RADIX_PAGE_LOG2 = 12 # type: ignore
MSGQ_VERSION = 0 # type: ignore
MAX_DSM_SUPPORTED_FUNCS_RTN_LEN = 8 # type: ignore
NV_ACPI_GENERIC_FUNC_COUNT = 8 # type: ignore
REGISTRY_TABLE_ENTRY_TYPE_UNKNOWN = 0 # type: ignore
REGISTRY_TABLE_ENTRY_TYPE_DWORD = 1 # type: ignore
REGISTRY_TABLE_ENTRY_TYPE_BINARY = 2 # type: ignore
REGISTRY_TABLE_ENTRY_TYPE_STRING = 3 # type: ignore
MAX_GROUP_COUNT = 2 # type: ignore
RM_ENGINE_TYPE_GRAPHICS = RM_ENGINE_TYPE_GR0 # type: ignore
RM_ENGINE_TYPE_BSP = RM_ENGINE_TYPE_NVDEC0 # type: ignore
RM_ENGINE_TYPE_MSENC = RM_ENGINE_TYPE_NVENC0 # type: ignore
RM_ENGINE_TYPE_CIPHER = RM_ENGINE_TYPE_TSEC # type: ignore
RM_ENGINE_TYPE_NVJPG = RM_ENGINE_TYPE_NVJPEG0 # type: ignore
RM_ENGINE_TYPE_COPY_SIZE = 20 # type: ignore
RM_ENGINE_TYPE_NVENC_SIZE = 4 # type: ignore
RM_ENGINE_TYPE_NVJPEG_SIZE = 8 # type: ignore
RM_ENGINE_TYPE_NVDEC_SIZE = 8 # type: ignore
RM_ENGINE_TYPE_OFA_SIZE = 2 # type: ignore
RM_ENGINE_TYPE_GR_SIZE = 8 # type: ignore
NVGPU_ENGINE_CAPS_MASK_BITS = 32 # type: ignore
NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX = ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1) # type: ignore
NVGPU_GET_ENGINE_CAPS_MASK = lambda caps,id: (caps[(id)/NVGPU_ENGINE_CAPS_MASK_BITS] & NVBIT((id) % NVGPU_ENGINE_CAPS_MASK_BITS)) # type: ignore
FALCON_APPLICATION_INTERFACE_ENTRY_ID_DMEMMAPPER = (0x4) # type: ignore
FALCON_APPLICATION_INTERFACE_DMEM_MAPPER_V3_CMD_FRTS = (0x15) # type: ignore
FALCON_APPLICATION_INTERFACE_DMEM_MAPPER_V3_CMD_SB = (0x19) # type: ignore
BIT_HEADER_ID = 0xB8FF # type: ignore
BIT_HEADER_SIGNATURE = 0x00544942 # type: ignore
BIT_HEADER_SIZE_OFFSET = 8 # type: ignore
BIT_HEADER_V1_00_FMT = "1w1d1w4b" # type: ignore
BIT_TOKEN_V1_00_SIZE_6 = 6 # type: ignore
BIT_TOKEN_V1_00_SIZE_8 = 8 # type: ignore
BIT_TOKEN_V1_00_FMT_SIZE_6 = "2b2w" # type: ignore
BIT_TOKEN_V1_00_FMT_SIZE_8 = "2b1w1d" # type: ignore
BIT_TOKEN_BIOSDATA = 0x42 # type: ignore
BIT_DATA_BIOSDATA_VERSION_1 = 0x1 # type: ignore
BIT_DATA_BIOSDATA_VERSION_2 = 0x2 # type: ignore
BIT_DATA_BIOSDATA_BINVER_FMT = "1d1b" # type: ignore
BIT_DATA_BIOSDATA_BINVER_SIZE_5 = 5 # type: ignore
BIT_TOKEN_FALCON_DATA = 0x70 # type: ignore
BIT_DATA_FALCON_DATA_V2_4_FMT = "1d" # type: ignore
BIT_DATA_FALCON_DATA_V2_SIZE_4 = 4 # type: ignore
FALCON_UCODE_TABLE_HDR_V1_VERSION = 1 # type: ignore
FALCON_UCODE_TABLE_HDR_V1_SIZE_6 = 6 # type: ignore
FALCON_UCODE_TABLE_HDR_V1_6_FMT = "6b" # type: ignore
FALCON_UCODE_TABLE_ENTRY_V1_VERSION = 1 # type: ignore
FALCON_UCODE_TABLE_ENTRY_V1_SIZE_6 = 6 # type: ignore
FALCON_UCODE_TABLE_ENTRY_V1_6_FMT = "2b1d" # type: ignore
FALCON_UCODE_ENTRY_APPID_FIRMWARE_SEC_LIC = 0x05 # type: ignore
FALCON_UCODE_ENTRY_APPID_FWSEC_DBG = 0x45 # type: ignore
FALCON_UCODE_ENTRY_APPID_FWSEC_PROD = 0x85 # type: ignore
NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_FLAGS_VERSION_UNAVAILABLE = 0x00 # type: ignore
NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_FLAGS_VERSION_AVAILABLE = 0x01 # type: ignore
NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V1 = 0x01 # type: ignore
NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V2 = 0x02 # type: ignore
NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V3 = 0x03 # type: ignore
NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V4 = 0x04 # type: ignore
FALCON_UCODE_DESC_HEADER_FORMAT = "1d" # type: ignore
FALCON_UCODE_DESC_V3_SIZE_44 = 44 # type: ignore
FALCON_UCODE_DESC_V3_44_FMT = "9d1w2b2w" # type: ignore
BCRT30_RSA3K_SIG_SIZE = 384 # type: ignore
FWSECLIC_READ_VBIOS_STRUCT_FLAGS = (2) # type: ignore
FWSECLIC_FRTS_REGION_MEDIA_FB = (2) # type: ignore
FWSECLIC_FRTS_REGION_SIZE_1MB_IN_4K = (0x100) # type: ignore
NV_BCRT_HASH_INFO_BASE_CODE_TYPE_VBIOS_BASE = 0x00 # type: ignore
NV_BCRT_HASH_INFO_BASE_CODE_TYPE_VBIOS_EXT = 0xE0 # type: ignore
PCI_EXP_ROM_SIGNATURE = 0xaa55 # type: ignore
PCI_EXP_ROM_SIGNATURE_NV = 0x4e56 # type: ignore
PCI_EXP_ROM_SIGNATURE_NV2 = 0xbb77 # type: ignore
IS_VALID_PCI_ROM_SIG = lambda sig: ((sig == PCI_EXP_ROM_SIGNATURE) or (sig == PCI_EXP_ROM_SIGNATURE_NV) or (sig == PCI_EXP_ROM_SIGNATURE_NV2)) # type: ignore
OFFSETOF_PCI_EXP_ROM_SIG = 0x0 # type: ignore
OFFSETOF_PCI_EXP_ROM_NBSI_DATA_OFFSET = 0x16 # type: ignore
OFFSETOF_PCI_EXP_ROM_PCI_DATA_STRUCT_PTR = 0x18 # type: ignore
PCI_DATA_STRUCT_SIGNATURE = 0x52494350 # type: ignore
PCI_DATA_STRUCT_SIGNATURE_NV = 0x5344504E # type: ignore
PCI_DATA_STRUCT_SIGNATURE_NV2 = 0x53494752 # type: ignore
IS_VALID_PCI_DATA_SIG = lambda sig: ((sig == PCI_DATA_STRUCT_SIGNATURE) or (sig == PCI_DATA_STRUCT_SIGNATURE_NV) or (sig == PCI_DATA_STRUCT_SIGNATURE_NV2)) # type: ignore
PCI_ROM_IMAGE_BLOCK_SIZE = 512 # type: ignore
OFFSETOF_PCI_DATA_STRUCT_SIG = 0x0 # type: ignore
OFFSETOF_PCI_DATA_STRUCT_VENDOR_ID = 0x4 # type: ignore
OFFSETOF_PCI_DATA_STRUCT_LEN = 0xa # type: ignore
OFFSETOF_PCI_DATA_STRUCT_CLASS_CODE = 0xd # type: ignore
OFFSETOF_PCI_DATA_STRUCT_CODE_TYPE = 0x14 # type: ignore
OFFSETOF_PCI_DATA_STRUCT_IMAGE_LEN = 0x10 # type: ignore
OFFSETOF_PCI_DATA_STRUCT_LAST_IMAGE = 0x15 # type: ignore
NV_PCI_DATA_EXT_SIG = 0x4544504E # type: ignore
NV_PCI_DATA_EXT_REV_10 = 0x100 # type: ignore
NV_PCI_DATA_EXT_REV_11 = 0x101 # type: ignore
OFFSETOF_PCI_DATA_EXT_STRUCT_SIG = 0x0 # type: ignore
OFFSETOF_PCI_DATA_EXT_STRUCT_LEN = 0x6 # type: ignore
OFFSETOF_PCI_DATA_EXT_STRUCT_REV = 0x4 # type: ignore
OFFSETOF_PCI_DATA_EXT_STRUCT_SUBIMAGE_LEN = 0x8 # type: ignore
OFFSETOF_PCI_DATA_EXT_STRUCT_LAST_IMAGE = 0xa # type: ignore
OFFSETOF_PCI_DATA_EXT_STRUCT_FLAGS = 0xb # type: ignore
PCI_DATA_EXT_STRUCT_FLAGS_CHECKSUM_DISABLED = 0x04 # type: ignore | {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/nv.py",
"license": "MIT License",
"lines": 4776,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:tinygrad/runtime/autogen/nvjitlink.py | # mypy: disable-error-code="empty-body"
from __future__ import annotations
import ctypes
from typing import Annotated, Literal, TypeAlias
from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR
from tinygrad.runtime.support import c
import sysconfig
dll = c.DLL('nvjitlink', 'nvJitLink', [f'/{pre}/cuda/targets/{sysconfig.get_config_vars().get("MULTIARCH", "").rsplit("-", 1)[0]}/lib' for pre in ['opt', 'usr/local']])
class nvJitLinkResult(Annotated[int, ctypes.c_uint32], c.Enum): pass
NVJITLINK_SUCCESS = nvJitLinkResult.define('NVJITLINK_SUCCESS', 0)
NVJITLINK_ERROR_UNRECOGNIZED_OPTION = nvJitLinkResult.define('NVJITLINK_ERROR_UNRECOGNIZED_OPTION', 1)
NVJITLINK_ERROR_MISSING_ARCH = nvJitLinkResult.define('NVJITLINK_ERROR_MISSING_ARCH', 2)
NVJITLINK_ERROR_INVALID_INPUT = nvJitLinkResult.define('NVJITLINK_ERROR_INVALID_INPUT', 3)
NVJITLINK_ERROR_PTX_COMPILE = nvJitLinkResult.define('NVJITLINK_ERROR_PTX_COMPILE', 4)
NVJITLINK_ERROR_NVVM_COMPILE = nvJitLinkResult.define('NVJITLINK_ERROR_NVVM_COMPILE', 5)
NVJITLINK_ERROR_INTERNAL = nvJitLinkResult.define('NVJITLINK_ERROR_INTERNAL', 6)
class nvJitLinkInputType(Annotated[int, ctypes.c_uint32], c.Enum): pass
NVJITLINK_INPUT_NONE = nvJitLinkInputType.define('NVJITLINK_INPUT_NONE', 0)
NVJITLINK_INPUT_CUBIN = nvJitLinkInputType.define('NVJITLINK_INPUT_CUBIN', 1)
NVJITLINK_INPUT_PTX = nvJitLinkInputType.define('NVJITLINK_INPUT_PTX', 2)
NVJITLINK_INPUT_LTOIR = nvJitLinkInputType.define('NVJITLINK_INPUT_LTOIR', 3)
NVJITLINK_INPUT_FATBIN = nvJitLinkInputType.define('NVJITLINK_INPUT_FATBIN', 4)
NVJITLINK_INPUT_OBJECT = nvJitLinkInputType.define('NVJITLINK_INPUT_OBJECT', 5)
NVJITLINK_INPUT_LIBRARY = nvJitLinkInputType.define('NVJITLINK_INPUT_LIBRARY', 6)
class struct_nvJitLink(ctypes.Structure): pass
nvJitLinkHandle: TypeAlias = c.POINTER[struct_nvJitLink]
uint32_t: TypeAlias = Annotated[int, ctypes.c_uint32]
@dll.bind
def nvJitLinkCreate(handle:c.POINTER[nvJitLinkHandle], numOptions:uint32_t, options:c.POINTER[c.POINTER[Annotated[bytes, ctypes.c_char]]]) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkDestroy(handle:c.POINTER[nvJitLinkHandle]) -> nvJitLinkResult: ...
size_t: TypeAlias = Annotated[int, ctypes.c_uint64]
@dll.bind
def nvJitLinkAddData(handle:nvJitLinkHandle, inputType:nvJitLinkInputType, data:ctypes.c_void_p, size:size_t, name:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkAddFile(handle:nvJitLinkHandle, inputType:nvJitLinkInputType, fileName:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkComplete(handle:nvJitLinkHandle) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkGetLinkedCubinSize(handle:nvJitLinkHandle, size:c.POINTER[size_t]) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkGetLinkedCubin(handle:nvJitLinkHandle, cubin:ctypes.c_void_p) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkGetLinkedPtxSize(handle:nvJitLinkHandle, size:c.POINTER[size_t]) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkGetLinkedPtx(handle:nvJitLinkHandle, ptx:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkGetErrorLogSize(handle:nvJitLinkHandle, size:c.POINTER[size_t]) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkGetErrorLog(handle:nvJitLinkHandle, log:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkGetInfoLogSize(handle:nvJitLinkHandle, size:c.POINTER[size_t]) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkGetInfoLog(handle:nvJitLinkHandle, log:c.POINTER[Annotated[bytes, ctypes.c_char]]) -> nvJitLinkResult: ...
@dll.bind
def nvJitLinkVersion(major:c.POINTER[Annotated[int, ctypes.c_uint32]], minor:c.POINTER[Annotated[int, ctypes.c_uint32]]) -> nvJitLinkResult: ...
c.init_records()
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/autogen/nvjitlink.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:extra/gemm/mi350x_uop_matmul_2.py | import os
import numpy as np
np.set_printoptions(linewidth=1000000)
os.environ["AMD_LLVM"] = "0"
from tinygrad import Tensor, Context, dtypes, UOp, GlobalCounters
from tinygrad.helpers import DEBUG, getenv
from tinygrad.dtype import AddrSpace
from tinygrad.uop.ops import sint, AxisType, KernelInfo, Ops
WARP_SIZE = 64
# Reg tile sizes (tensor cores)
TC_M = 16
TC_N = 16
TC_K = 32
N,M,K = 4096,4096,4096
# Threadblock tile sizes (block-level tile of C that a block computes)
BLOCK_M = 64
BLOCK_N = 64
BLOCK_K = 64
WARPGROUP_SIZE = 1
BLOCK_M = BLOCK_M * WARPGROUP_SIZE
TID_SIZE = WARPGROUP_SIZE*WARP_SIZE
def copy(dest:UOp, src:UOp, rng:int, set=False, upcast=()):
assert dest.shape == src.shape
rngs = [UOp.range(s, rng+i, AxisType.UPCAST if i in upcast else AxisType.LOOP) for i,s in enumerate(src.shape)]
copy = dest[*rngs].store(src[*rngs]).end(*rngs)
return dest.after(copy) if set else copy
def compute_on_locals(acc:UOp, Asl:UOp, Bsl:UOp, rng:int, afters:tuple[UOp, ...], warpgroup, warp) -> UOp:
K_inner_loop = UOp.range(BLOCK_K//TC_K, rng, AxisType.REDUCE)
# load from locals into registers
Ar = UOp.placeholder((BLOCK_M//TC_M//WARPGROUP_SIZE,), dtypes.half.vec(8), slot=1, addrspace=AddrSpace.REG)
Br = UOp.placeholder((BLOCK_N//TC_N,), dtypes.half.vec(8), slot=2, addrspace=AddrSpace.REG)
M_load_loop = UOp.range(BLOCK_M//TC_M//WARPGROUP_SIZE, rng+10)
Asl = Asl.reshape(BLOCK_K//TC_K, TC_K, BLOCK_M//TC_M//WARPGROUP_SIZE, WARPGROUP_SIZE, TC_M)
load_rng = UOp.range(8, rng+11, axis_type=AxisType.UPCAST)
A_in = Asl[K_inner_loop, (warp//16)*8+load_rng, M_load_loop, warpgroup, warp%16].contract(load_rng)
Ar = Ar[M_load_loop].set(A_in, end=M_load_loop)
N_load_loop = UOp.range(BLOCK_N//TC_N, rng+20)
Bsl = Bsl.reshape(BLOCK_K//TC_K, TC_K, BLOCK_N//TC_N, TC_N)
load_rng = UOp.range(8, rng+21, axis_type=AxisType.UPCAST)
B_in = Bsl[K_inner_loop, (warp//16)*8+load_rng, N_load_loop, warp%16].contract(load_rng)
Br = Br[N_load_loop].set(B_in, end=N_load_loop)
M_inner_loop = UOp.range(BLOCK_M//TC_M//WARPGROUP_SIZE, rng+30)
N_inner_loop = UOp.range(BLOCK_N//TC_N, rng+31)
# load values
acc_after = acc.after(*afters, M_inner_loop, N_inner_loop, K_inner_loop)
acc_load = acc_after[N_inner_loop, M_inner_loop]
# do WMMA
wmma_arg = ('WMMA_16_16_32_half_float', (16, 16, 32), dtypes.half, dtypes.float, 'AMD', 64, ((), (), ((3, 2), (2, 2))), ())
out = UOp(Ops.WMMA, dtypes.float.vec(4), (Ar[M_inner_loop], Br[N_inner_loop], acc_load), arg=wmma_arg)
# store back the acc
acc_store = acc[N_inner_loop, M_inner_loop].store(out)
return acc_store.end(M_inner_loop, N_inner_loop, K_inner_loop)
def custom_gemm(C:UOp, A:UOp, B:UOp) -> UOp:
gx, gy = UOp.special(M//BLOCK_M, "gidx0"), UOp.special(N//BLOCK_N, "gidx1")
K_outer_loop = UOp.range(K//BLOCK_K, 0, AxisType.REDUCE)
# split out the globals into blocks
C = C.src[0].cast(dtypes.float.vec(4).ptr(C.ptrdtype.size)).reshape((M//BLOCK_M, BLOCK_M, N//BLOCK_N, BLOCK_N))
A = A.reshape((M//BLOCK_M, BLOCK_M, K//BLOCK_K, BLOCK_K))[gx, :, K_outer_loop, :]
B = B.reshape((K//BLOCK_K, BLOCK_K, N//BLOCK_N, BLOCK_N))[K_outer_loop, :, gy, :]
# ---------------------------
# GLOBAL -> LOCAL (As, Bs)
# ---------------------------
tid = UOp.special(TID_SIZE, "lidx0")
warpgroup, warp = tid//WARP_SIZE, tid%WARP_SIZE
A_view = A.reshape(-1, TID_SIZE, 8)
B_view = B.reshape(-1, TID_SIZE, 8)
# A: read BM x BK tiles (permute on store into locals)
As = UOp.placeholder((BLOCK_K, BLOCK_M), dtypes.half, slot=0, addrspace=AddrSpace.LOCAL).shrink_to(BLOCK_K, BLOCK_M)
As_view = As.reshape(-1, TID_SIZE, 8)
Bs = UOp.placeholder((BLOCK_K, BLOCK_N+4), dtypes.half, slot=1, addrspace=AddrSpace.LOCAL).shrink_to(BLOCK_K, BLOCK_N)
Bs_view = Bs.reshape(-1, TID_SIZE, 8)
outer_copy = UOp.range(A_view.shape[0], 100, AxisType.UPCAST)
inner_copy = UOp.range(A_view.shape[2], 101, AxisType.UPCAST)
As_store = As_view[outer_copy, tid, inner_copy].store(A_view[outer_copy, tid, inner_copy])
Bs_store = Bs_view[outer_copy, tid, inner_copy].store(B_view[outer_copy, tid, inner_copy])
if getenv("NOLOAD"):
As_store = As[0,0].store(0)
Bs_store = Bs[0,0].store(0)
# TODO: can we automate barrier?
barrier = UOp.barrier(UOp.group(As_store, Bs_store).end(outer_copy, inner_copy))
if getenv("COMPUTE"):
As, Bs = As.after(barrier), Bs.after(barrier)
acc = UOp.placeholder((BLOCK_N//TC_N, BLOCK_M//TC_M//WARPGROUP_SIZE), dtypes.float.vec(4), 0, AddrSpace.REG)
sink = compute_on_locals(acc, As, Bs, 200, afters=(barrier,), warpgroup=warpgroup, warp=warp)
sink = sink.end(K_outer_loop)
C_view = C[gx, :, gy, :].reshape(BLOCK_M//TC_M//WARPGROUP_SIZE, WARPGROUP_SIZE, TC_M, BLOCK_N//TC_N, TC_N)[:, warpgroup, warp%16, :, (warp//16)*4]
sink = copy(C_view, acc.after(sink), rng=300)
else:
sink = C.after(barrier.end(K_outer_loop))[0,0,0,0].store(As[0,0]+Bs[0,0])
return sink.sink(arg=KernelInfo(name="custom_gemm", opts_to_apply=())).simplify()
if __name__ == "__main__":
a = Tensor.randn(M, K, dtype=dtypes.half)
b = Tensor.randn(K, N, dtype=dtypes.half)
c = Tensor.empty(M, N, dtype=dtypes.float)
with Context(DEBUG=0): Tensor.realize(a,b)
GlobalCounters.reset()
with Context(DEBUG=max(2, DEBUG.value), DEVECTORIZE=2):
tst = Tensor.custom_kernel(c, a, b, fxn=custom_gemm)[0]
tst.realize()
print(f"{(N*M*K*2 / GlobalCounters.time_sum_s)*1e-12:.2f} REAL TFLOPS")
with Context(DEBUG=0):
ref = a.dot(b, dtype=dtypes.float)
ref.realize()
#print(ref.numpy())
#print(tst.numpy())
assert Tensor.isclose(ref, tst, atol=1e-2).all().item(), "matrix not close"
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/gemm/mi350x_uop_matmul_2.py",
"license": "MIT License",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
tinygrad/tinygrad:tinygrad/runtime/support/autogen.py | import ctypes, itertools, re, functools, os, keyword
from tinygrad.helpers import unwrap
import tinygrad.runtime.autogen.libclang as clang # use REGEN=1 to regenerate libclang bindings
def unwrap_cursor(c: clang.CXCursor) -> clang.CXCursor:
assert c != clang.clang_getNullCursor()
return c
def children(c: clang.CXCursor) -> list[clang.CXCursor]:
ret = []
@clang.CXCursorVisitor
def visitor(child, _0, _1):
nonlocal ret
ret.append(child)
return clang.CXChildVisit_Continue
clang.clang_visitChildren(c, visitor, None)
return ret
def fields(t: clang.CXType) -> list[clang.CXCursor]:
ret = []
@clang.CXFieldVisitor
def visitor(child, _):
nonlocal ret
ret.append(child)
return clang.CXVisit_Continue
clang.clang_Type_visitFields(t, visitor, None)
return ret
# flattens anonymous structs/unions
def all_fields(t, off=0):
for f in fields(t):
if clang.clang_Cursor_isAnonymousRecordDecl(clang.clang_getTypeDeclaration(clang.clang_getCursorType(f))):
yield from all_fields(clang.clang_getCursorType(f), off + clang.clang_Cursor_getOffsetOfField(f) // 8)
elif nm(f): yield f, off+clang.clang_Cursor_getOffsetOfField(f) // 8 # ignores unnamed fields
def arguments(c: clang.CXCursor|clang.CXType):
yield from ((clang.clang_Cursor_getArgument if isinstance(c, clang.CXCursor) else clang.clang_getArgType)(c, i)
for i in range(clang.clang_Cursor_getNumArguments(c) if isinstance(c, clang.CXCursor) else clang.clang_getNumArgTypes(c)))
class Tokens:
def __init__(self, c: clang.CXCursor):
clang.clang_tokenize(tu:=clang.clang_Cursor_getTranslationUnit(c), clang.clang_getCursorExtent(c),
toks:=(ctypes.POINTER(clang.CXToken)()), cnt:=ctypes.c_uint32())
self.tu, self.toks = tu, toks[:cnt.value]
for t in self.toks: t._tu = tu
def __getitem__(self, idx): return self.toks[idx]
def __len__(self): return len(self.toks)
def __del__(self):
if self.toks: clang.clang_disposeTokens(self.tu, self.toks[0], len(self.toks))
def cxs(fn):
@functools.wraps(fn)
def wrap(*args, **kwargs) -> str:
if ctypes.cast(clang.clang_getCString(cxs:=fn(*args, **kwargs)), ctypes.c_void_p).value is None: return ""
ret = ctypes.string_at(clang.clang_getCString(cxs)).decode()
clang.clang_disposeString(cxs)
return ret
return wrap
# TODO: caching this would be nice?
nm = cxs(lambda c: getattr(clang, f"clang_get{c.__class__.__name__[2:]}Spelling")(*([c._tu, c] if isinstance(c, clang.CXToken) else [c])))
def extent(c): return getattr(clang, f"clang_get{c.__class__.__name__[2:]}Extent")(*([c._tu, c] if isinstance(c, clang.CXToken) else [c]))
def loc(c): return getattr(clang, f"clang_get{c.__class__.__name__[2:]}Location")(*([c._tu, c] if isinstance(c, clang.CXToken) else [c]))
def gel(loc: clang.CXSourceLocation):
clang.clang_getExpansionLocation(loc, file:=clang.CXFile(), line:=ctypes.c_uint32(), None, offset:=ctypes.c_uint32())
return {"file":clang.clang_getFileName(file), "line":line.value, "offset":offset.value}
loc_file = cxs(lambda loc: gel(loc)['file'])
def loc_off(loc: clang.CXSourceLocation) -> int: return gel(loc)['offset']
def loc_line(loc: clang.CXSourceLocation) -> int: return gel(loc)['line']
def readext(f, fst, snd=None):
with open(f, "r") as f: # reopening this every time is dumb...
f.seek(start:=loc_off(clang.clang_getRangeStart(fst) if isinstance(fst, clang.CXSourceRange) else fst))
return f.read(loc_off(clang.clang_getRangeEnd(fst) if isinstance(fst, clang.CXSourceRange) else snd)-start)
def attrs(c): return list(filter(lambda k: k >= 400 and k < 500, map(lambda c: c.kind, children(c))))
def protocols(t): yield from (clang.clang_Type_getObjCProtocolDecl(t, i) for i in range(clang.clang_Type_getNumObjCProtocolRefs(t)))
def basetype(t): return clang.clang_Type_getObjCObjectBaseType(t)
base_rules = [(r'\s*\\\n\s*', ' '), (r'\s*\n\s*', ' '), (r'//.*', ''), (r'/\*.*?\*/', ''), (r'\b(0[xX][0-9a-fA-F]+|\d+)[uUlL]+\b', r'\1'),
(r'\b0+(?=\d)', ''), (r'\s*&&\s*', r' and '), (r'\s*\|\|\s*', r' or '), (r'\s*!\s*', ' not '),
(r'(struct|union|enum)\s*([a-zA-Z_][a-zA-Z0-9_]*\b)', r'\1_\2'),
(r'\((unsigned )?(char|uint64_t)\)', ''), (r'^.*\d+:\d+.*$', ''), (r'^.*\w##\w.*$', '')]
uints = (clang.CXType_Char_U, clang.CXType_UChar, clang.CXType_UShort, clang.CXType_UInt, clang.CXType_ULong, clang.CXType_ULongLong)
ints = uints + (clang.CXType_Char_S, clang.CXType_Short, clang.CXType_Int, clang.CXType_ULong, clang.CXType_LongLong)
fps, specs = (clang.CXType_FunctionProto, clang.CXType_FunctionNoProto), (clang.CXCursor_ObjCSuperClassRef,) # this could include protocols
# https://clang.llvm.org/docs/AutomaticReferenceCounting.html#arc-method-families
arc_families = ['alloc', 'copy', 'mutableCopy', 'new']
def normalize(a): return ("_" + n if keyword.iskeyword(n:=nm(a)) else n)
def an(py, dt): return f"Annotated[{py}, ctypes.c_{dt}]"
def gen(name, dll, files, args=[], prolog=[], rules=[], epilog=[], recsym=False, errno=False, anon_names={}, types={}, parse_macros=True, paths=[]):
macros, lines, anoncnt, types, objc, fns = [], [], itertools.count().__next__, {k:(v,True) for k,v in types.items()}, False, set()
def tname(t, suggested_name=None, typedef=None) -> str:
suggested_name = anon_names.get(f"{loc_file(loc(decl:=clang.clang_getTypeDeclaration(t)))}:{loc_line(loc(decl))}", suggested_name)
nonlocal lines, types, anoncnt, objc
tmap = {clang.CXType_Void:"None",clang.CXType_Char_U:an("int","ubyte"),clang.CXType_UChar:an("int","ubyte"),clang.CXType_WChar:an("str","wchar"),
clang.CXType_Char_S:an("bytes","char"),clang.CXType_SChar:an("int","byte"),clang.CXType_Bool:an("bool","bool"),
**{getattr(clang, f'CXType_{k}'):an("float", k.lower()) for k in ["Float", "Double", "LongDouble"]},
**{getattr(clang, f'CXType_{k}'):an("int", f"{'u' if 'U' in k else ''}int{sz}") for sz,k in
[(16, "UShort"), (16, "Short"), (32, "UInt"), (32, "Int"), (64, "ULong"), (64, "Long"), (64, "ULongLong"), (64, "LongLong")]}}
if t.kind in tmap: return tmap[t.kind]
if nm(t) in types and types[nm(t)][1]: return types[nm(t)][0]
if ((f:=t).kind in fps) or (t.kind == clang.CXType_Pointer and (f:=clang.clang_getPointeeType(t)).kind in fps):
return (f"c.CFUNCTYPE[{tname(clang.clang_getResultType(f))}, [" + ', '.join(map(tname, arguments(f))) + "]]")
match t.kind:
case clang.CXType_Pointer:
return "ctypes.c_void_p" if (p:=clang.clang_getPointeeType(t)).kind==clang.CXType_Void else f"c.POINTER[{tname(p)}]"
case clang.CXType_ObjCObjectPointer: return tname(clang.clang_getPointeeType(t)) # TODO: this seems wrong
case clang.CXType_Elaborated: return tname(clang.clang_Type_getNamedType(t), suggested_name)
case clang.CXType_Typedef if nm(t) == nm(canon:=clang.clang_getCanonicalType(t)): return tname(canon)
case clang.CXType_Typedef:
defined, cnm = nm(canon:=clang.clang_getCanonicalType(t)) in types, tname(canon, typedef=nm(t))
types[nm(t)] = cnm if nm(t).startswith("__") else nm(t).replace('::', '_'), True
# RECORDs need to handle typedefs specially to allow for self-reference
if canon.kind != clang.CXType_Record or defined: lines.append(f"{nm(t).replace('::', '_')}: TypeAlias = {cnm}")
return types[nm(t)][0]
case clang.CXType_Record:
# TODO: packed unions
# libclang does not use CXType_Elaborated for function parameters with type qualifiers (eg. void (*)(const struct foo))
if (_nm:=re.sub(r"^const ", "", nm(t))) in types and types[_nm][1]: return types[_nm][0]
# check for forward declaration
if _nm in types: types[_nm] = (tnm:=types[_nm][0]), len(fields(t)) != 0, (ln:=types[_nm][2])
else:
real_nm = ((suggested_name or (f"_anon{'struct' if decl.kind==clang.CXCursor_StructDecl else 'union'}{anoncnt()}"))
if clang.clang_Cursor_isAnonymous(decl) else _nm)
types[_nm] = (tnm:=real_nm.replace(' ', '_').replace('::', '_')), len(fields(t)) != 0, (ln:=len(lines))
lines.append(f"class {tnm}(ctypes.{'Structure' if decl.kind==clang.CXCursor_StructDecl else 'Union'}): pass")
if typedef:
lines.append(f"{typedef.replace('::', '_')}: TypeAlias = {tnm}")
types[typedef] = typedef.replace('::', '_'), True
ff=[(f, tname(clang.clang_getCursorType(f), f"{tnm}_{nm(f)}"), offset) +
((clang.clang_getFieldDeclBitWidth(f), clang.clang_Cursor_getOffsetOfField(f) % 8) *clang.clang_Cursor_isBitField(f))
for f,offset in all_fields(t)]
if ff: lines[ln] = '\n'.join(["@c.record", f"class {tnm}(c.Struct):", f" SIZE = {clang.clang_Type_getSizeOf(t)}",
*[f" {normalize(f)}: Annotated[{', '.join(str(a) for a in args)}]" for f,*args in ff]])
return tnm
case clang.CXType_Enum:
# TODO: C++ and GNU C have forward declared enums
if clang.clang_Cursor_isAnonymous(decl): types[nm(t)] = suggested_name or f"_anonenum{anoncnt()}", True
else: types[nm(t)] = nm(t).replace(' ', '_').replace('::', '_'), True
ety = clang.clang_getEnumDeclIntegerType(decl)
def value(e): return (clang.clang_getEnumConstantDeclUnsignedValue if ety.kind in uints else clang.clang_getEnumConstantDeclValue)(e)
lines.append(f"class {types[nm(t)][0]}({tname(ety)}, c.Enum): pass\n" +
"\n".join(f"{nm(e)} = {types[nm(t)][0]}.define('{nm(e)}', {value(e)})" for e in children(decl)
if e.kind == clang.CXCursor_EnumConstantDecl) + "\n")
return types[nm(t)][0]
case clang.CXType_ConstantArray: return ("c.Array[" + tname(clang.clang_getArrayElementType(t), suggested_name and suggested_name.rstrip('s'))
+ f", Literal[{clang.clang_getArraySize(t)}]]")
case clang.CXType_IncompleteArray:
return f"c.Array[{tname(clang.clang_getArrayElementType(t), suggested_name and suggested_name.rstrip('s'))}, Literal[0]]"
case clang.CXType_ObjCInterface:
is_defn = bool([f.kind for f in children(decl) if f.kind in (clang.CXCursor_ObjCInstanceMethodDecl, clang.CXCursor_ObjCClassMethodDecl)])
if (tnm:=nm(t)) not in types: lines.append(f"class {tnm}(objc.Spec): pass")
types[tnm] = tnm, is_defn
if is_defn:
ims, cms = parse_objc_spec(decl, tnm, clang.CXCursor_ObjCInstanceMethodDecl), parse_objc_spec(decl, tnm, clang.CXCursor_ObjCClassMethodDecl)
bases = [tname(clang.clang_getCursorType(b)) for b in children(decl) if b.kind in specs]
lines.extend([*([f"{tnm}._bases_ = [{', '.join(bases)}]"] if bases else []),
*([f"{tnm}._methods_ = [", *ims, ']'] if ims else []), *([f"{tnm}._classmethods_ = [", *cms, ']'] if cms else [])])
return tnm
case clang.CXType_ObjCSel: return "objc.id_"
case clang.CXType_ObjCId: return (objc:=True, "objc.id_")[1]
case clang.CXType_ObjCObject:
if basetype(t).kind != clang.CXType_ObjCId: raise NotImplementedError(f"generics unsupported: {nm(t)}")
if len(ps:=[proto(p) for p in protocols(t)]) == 0:
types[nm(t)] = "objc.id_", True
return "objc.id_"
if len(ps) == 1:
types[nm(t)] = ps[0], True
return ps[0]
types[nm(t)] = (tnm:=f"_anondynamic{anoncnt()}"), True
lines.append(f"class {tnm}({', '.join(ps)}): pass # {nm(t)}")
return tnm
case _: raise NotImplementedError(f"unsupported type {t.kind}")
# parses an objc @interface or @protocol, returning a list of declerations that objc.Spec can parse, for the specified kind
# NB: ivars are unsupported
def parse_objc_spec(decl:clang.CXCursor, dnm:str, kind) -> list[str]:
nonlocal lines, types
ms = []
for d in filter(lambda d: d.kind == kind, children(decl)):
rollback = lines, types
try: ms.append(f" ('{nm(d)}', {repr('instancetype') if nm(rt:=clang.clang_getCursorResultType(d))=='instancetype' else tname(rt)}, " +
f"[{', '.join('instancetype' if nm(a) == 'instancetype' else tname(clang.clang_getCursorType(a)) for a in arguments(d))}]" +
(", True" * (clang.CXCursor_NSReturnsRetained in attrs(d) or (any(nm(d).startswith(s) for s in arc_families) and rt.kind!=clang.CXType_Void)))
+ "),")
except NotImplementedError as e:
print(f"skipping {dnm}.{nm(d)}: {e}")
lines, types = rollback
return ms
# libclang doesn't have a "type" for @protocol, so we have to do this here...
def proto(decl):
nonlocal lines, types
if (dnm:=nm(decl)) in types and types[dnm][1]: return types[dnm][0]
# check if this is a forward declaration
is_defn = bool([f.kind for f in children(decl) if f.kind in (clang.CXCursor_ObjCInstanceMethodDecl, clang.CXCursor_ObjCClassMethodDecl)])
if dnm not in types: lines.append(f"class {dnm}(objc.Spec): pass")
types[dnm] = dnm, is_defn
if is_defn:
bases = [proto(b) for b in children(decl) if b.kind==clang.CXCursor_ObjCProtocolRef and nm(b) != nm(decl)]
ims, cms = parse_objc_spec(decl, dnm, clang.CXCursor_ObjCInstanceMethodDecl), parse_objc_spec(decl, dnm, clang.CXCursor_ObjCClassMethodDecl)
lines.extend([*([f"{dnm}._bases_ = [{', '.join(bases)}]"] if bases else []),
*([f"{dnm}._methods_ = [", *ims, "]"] if ims else []), *([f"{dnm}._classmethods_ = [", *cms, "]"] if cms else [])])
return dnm
for f in files:
aa = ctypes.cast((ctypes.c_char_p * len(args))(*[x.encode() for x in args]), ctypes.POINTER(ctypes.POINTER(ctypes.c_char))) if len(args) else None
tu = clang.clang_parseTranslationUnit(idx:=clang.clang_createIndex(False, 0), os.fspath(f).encode(), aa, len(args), None, 0,
clang.CXTranslationUnit_DetailedPreprocessingRecord)
q = list(children(unwrap_cursor(clang.clang_getTranslationUnitCursor(tu))))[::-1]
while q:
c = q.pop()
if loc_file(loc(c)) != str(f) and (not recsym or c.kind not in (clang.CXCursor_FunctionDecl,)): continue
rollback = lines, types
try:
match c.kind:
case clang.CXCursor_FunctionDecl if clang.clang_getCursorLinkage(c) == clang.CXLinkage_External and dll and nm(c) not in fns:
# TODO: we could support name-mangling
fns.add(nm(c))
argus = [f"{normalize(arg) or '_' + str(i)}:{tname(clang.clang_getCursorType(arg))}" for i, arg in enumerate(arguments(c))]
lines.extend(["@dll.bind", f"def {nm(c)}({', '.join(argus)}) -> {tname(clang.clang_getCursorResultType(c))}: ..."])
if clang.CXCursor_NSReturnsRetained in attrs(c): lines.append(f"{nm(c)} = objc.returns_retained({nm(c)})")
case (clang.CXCursor_StructDecl | clang.CXCursor_UnionDecl | clang.CXCursor_TypedefDecl | clang.CXCursor_EnumDecl
| clang.CXCursor_ObjCInterfaceDecl): tname(clang.clang_getCursorType(c))
case clang.CXCursor_MacroDefinition if parse_macros and nm(c) and len(toks:=Tokens(c)) > 1:
if nm(toks[1])=='(' and clang.clang_equalLocations(clang.clang_getRangeEnd(extent(toks[0])), clang.clang_getRangeStart(extent(toks[1]))):
it = iter(toks[1:])
_args = [nm(t) for t in itertools.takewhile(lambda t:nm(t)!=')', it) if clang.clang_getTokenKind(t) == clang.CXToken_Identifier]
if len(body:=list(it)) == 0: continue
macros += [f"{nm(c)} = lambda{' ' * bool(_args)}{','.join(_args)}: {readext(f,loc(body[0]),clang.clang_getRangeEnd(extent(toks[-1])))}"]
else: macros += [f"{nm(c)} = {readext(f, loc(toks[1]), clang.clang_getRangeEnd(extent(toks[-1])))}"]
case clang.CXCursor_VarDecl if clang.clang_getCursorLinkage(c) == clang.CXLinkage_Internal:
ty = clang.clang_getCursorType(c)
if (ty.kind == clang.CXType_ConstantArray and clang.clang_getCanonicalType(clang.clang_getArrayElementType(ty)).kind in ints and
(init:=children(c)[-1]).kind == clang.CXCursor_InitListExpr
and all(re.match(r"\[.*\].*=", readext(f, extent(c))) for c in children(init))):
cs = children(init)
macros += [f"{nm(c)} = {{{','.join(f'{readext(f, extent(next(it:=iter(children(c)))))}:{readext(f, extent(next(it)))}' for c in cs)}}}"]
elif clang.clang_getCanonicalType(ty).kind in ints: macros += [f"{nm(c)} = {readext(f, extent(children(c)[-1]))}"]
else: macros += [f"{nm(c)} = {tname(ty)}({readext(f, extent(children(c)[-1]))})"]
case clang.CXCursor_VarDecl if clang.clang_getCursorLinkage(c) == clang.CXLinkage_External and dll:
lines.append(f"try: {nm(c)} = {tname(clang.clang_getCursorType(c))}.in_dll(dll, '{nm(c)}') # type: ignore\n" +
"except (ValueError,AttributeError): pass")
case clang.CXCursor_ObjCProtocolDecl: proto(c)
case clang.CXCursor_Namespace | clang.CXCursor_LinkageSpec: q.extend(list(children(c))[::-1])
except NotImplementedError as e:
print(f"skipping {nm(c)}: {e}")
lines, types = rollback
clang.clang_disposeTranslationUnit(tu)
clang.clang_disposeIndex(idx)
main = '\n'.join(['# mypy: disable-error-code="empty-body"', "from __future__ import annotations", "import ctypes",
"from typing import Annotated, Literal, TypeAlias", "from tinygrad.runtime.support.c import _IO, _IOW, _IOR, _IOWR",
"from tinygrad.runtime.support import c", *prolog, *(["from tinygrad.runtime.support import objc"]*objc),
*([f"dll = c.DLL('{name}', {dll}{f', {paths}'*bool(paths)}{', use_errno=True'*errno})"] if dll else []), *lines,
"c.init_records()"]) + '\n'
macros = [f"{r} # type: ignore" for m in macros if (r:=functools.reduce(lambda s,r:re.sub(r[0], r[1], s), rules + base_rules, m))]
while True:
try:
exec(main + '\n'.join(macros), {})
break
except (SyntaxError, NameError, TypeError) as e:
macrono = unwrap(e.lineno if isinstance(e, SyntaxError) else unwrap(unwrap(e.__traceback__).tb_next).tb_lineno) - main.count('\n') - 1
assert macrono >= 0 and macrono < len(macros), f"error outside macro range: {e}"
print(f"skipping {macros[macrono]}: {e}")
del macros[macrono]
except Exception as e: raise Exception("parsing failed") from e
return main + '\n'.join(macros + epilog)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/support/autogen.py",
"license": "MIT License",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:tinygrad/runtime/support/c.py | from __future__ import annotations
import ctypes, functools, os, pathlib, re, sys, sysconfig
from tinygrad.helpers import ceildiv, getenv, unwrap, DEBUG, OSX, WIN
from _ctypes import Array as _CArray, _SimpleCData, _Pointer
from typing import TYPE_CHECKING, get_type_hints, get_args, get_origin, overload, Annotated, Any, Generic, Iterable, ParamSpec, TypeVar
def _do_ioctl(__idir, __base, __nr, __struct, __fd, *args, __payload=None, **kwargs):
assert not WIN, "ioctl not supported"
import tinygrad.runtime.support.hcq as hcq, fcntl
ioctl = __fd.ioctl if isinstance(__fd, hcq.FileIOInterface) else functools.partial(fcntl.ioctl, __fd)
if (rc:=ioctl((__idir<<30)|(ctypes.sizeof(out:=(__payload or __struct(*args, **kwargs)))<<16)|(__base<<8)|__nr, out)):
raise RuntimeError(f"ioctl returned {rc}")
return out
def _IO(base, nr): return functools.partial(_do_ioctl, 0, ord(base) if isinstance(base, str) else base, nr, None)
def _IOW(base, nr, typ): return functools.partial(_do_ioctl, 1, ord(base) if isinstance(base, str) else base, nr, del_an(typ))
def _IOR(base, nr, typ): return functools.partial(_do_ioctl, 2, ord(base) if isinstance(base, str) else base, nr, del_an(typ))
def _IOWR(base, nr, typ): return functools.partial(_do_ioctl, 3, ord(base) if isinstance(base, str) else base, nr, del_an(typ))
def del_an(ty):
if isinstance(ty, type) and issubclass(ty, Enum): return del_an(ty.__orig_bases__[0]) # type: ignore
return ty.__metadata__[0] if get_origin(ty) is Annotated else (None if ty is type(None) else ty)
_pending_records = []
T = TypeVar("T")
U = TypeVar("U")
V = TypeVar("V")
P = ParamSpec("P")
if TYPE_CHECKING:
from ctypes import _CFunctionType
from _ctypes import _CData
class Array(Generic[T, U], _CData):
@overload
def __getitem__(self: Array[_SimpleCData[V], Any], key: int) -> V: ...
@overload
def __getitem__(self: Array[T, Any], key: int) -> T: ...
def __getitem__(self, key) -> Any: ...
@overload
def __setitem__(self: Array[_SimpleCData[V], Any], key: int, val: V): ...
@overload
def __setitem__(self: Array[T, Any], key: int, val: T): ...
@overload
def __setitem__(self: Array[T, Any], key: slice, val: Iterable[T]): ...
def __setitem__(self, key, val): ...
class POINTER(Generic[T], _Pointer): ...
class CFUNCTYPE(Generic[T, P], _CFunctionType): ...
class Enum(_SimpleCData):
@classmethod
def get(cls, val:int, default="unknown") -> str: ...
@classmethod
def items(cls) -> Iterable[tuple[int,str]]: ...
@classmethod
def define(cls, name:str, val:int) -> int: ...
CT = TypeVar("CT", bound=_CData)
def pointer(obj: CT) -> POINTER[CT]: ...
else:
class _Array:
def __getitem__(self, key): return del_an(key[0]) * get_args(key[1])[0]
def __call__(self, ty, l): return del_an(ty) * l
Array = _Array()
class POINTER:
def __class_getitem__(cls, key): return ctypes.POINTER(del_an(key))
class CFUNCTYPE:
def __class_getitem__(cls, key): return ctypes.CFUNCTYPE(del_an(key[0]), *(del_an(a) for a in key[1]))
class Enum:
def __init_subclass__(cls): cls._val_to_name_ = {}
@classmethod
def get(cls, val, default="unknown"): return cls._val_to_name_.get(val, default)
@classmethod
def items(cls): return cls._val_to_name_.items()
@classmethod
def define(cls, name:str, val:int) -> int:
cls._val_to_name_[val] = name
return val
def pointer(obj): return ctypes.pointer(obj)
def i2b(i:int, sz:int) -> bytes: return i.to_bytes(sz, sys.byteorder)
def b2i(b:bytes) -> int: return int.from_bytes(b, sys.byteorder)
def mv(st) -> memoryview: return memoryview(st).cast('B')
class Struct(ctypes.Structure):
def __init__(self, *args, **kwargs):
ctypes.Structure.__init__(self)
self._objects_ = {}
for f,v in [*zip((rf[0] for rf in self._real_fields_), args), *kwargs.items()]: setattr(self, f, v)
def record(cls) -> type[Struct]:
struct = type(cls.__name__, (Struct,), {'_fields_': [('_mem_', ctypes.c_byte * cls.SIZE)]})
_pending_records.append((cls, struct, unwrap(sys._getframe().f_back).f_globals))
return struct
def init_records() -> None:
for cls, struct, ns in _pending_records:
setattr(struct, '_real_fields_', [])
for nm, t in get_type_hints(cls, globalns=ns, include_extras=True).items():
if t.__origin__ in (bool, bytes, str, int, float): setattr(struct, nm, Field(*(f:=t.__metadata__)))
else: setattr(struct, nm, Field(*(f:=(del_an(t.__origin__), *t.__metadata__))))
struct._real_fields_.append((nm,) + f) # type: ignore
_pending_records.clear()
class Field(property):
def __init__(self, typ, off:int, bit_width=None, bit_off=0):
if bit_width is not None:
sl, set_mask = slice(off,off+(sz:=ceildiv(bit_width+bit_off, 8))), ~((mask:=(1 << bit_width) - 1) << bit_off)
# FIXME: signedness
super().__init__(lambda self: (b2i(mv(self)[sl]) >> bit_off) & mask,
lambda self,v: mv(self).__setitem__(sl, i2b((b2i(mv(self)[sl]) & set_mask) | (v << bit_off), sz)))
else:
sl = slice(off, off + ctypes.sizeof(typ))
def set_with_objs(f):
def wrapper(self, v):
if hasattr(v, '_objects') and hasattr(self, '_objects_'): self._objects_[off] = {'_self_': v, **(v._objects or {})}
mv(self).__setitem__(sl, bytes(v if isinstance(v, typ) else f(v)))
return wrapper
if issubclass(typ, _CArray):
getter = (lambda self: typ.from_buffer(mv(self)[sl]).value) if typ._type_ is ctypes.c_char else (lambda self: typ.from_buffer(mv(self)[sl]))
super().__init__(getter, set_with_objs(lambda v: typ(*v)))
else: super().__init__(lambda self: v.value if isinstance(v:=typ.from_buffer(mv(self)[sl]), _SimpleCData) else v, set_with_objs(typ))
self.offset = off
@functools.cache
def init_c_struct_t(sz:int, fields: tuple[tuple, ...]):
CStruct = type("CStruct", (Struct,), {'_fields_': [('_mem_', ctypes.c_byte * sz)], '_real_fields_': []})
for nm,ty,*args in fields:
setattr(CStruct, nm, Field(*(f:=(del_an(ty), *args))))
CStruct._real_fields_.append((nm,) + f) # type: ignore
return CStruct
def init_c_var(ty, creat_cb): return (creat_cb(v:=del_an(ty)()), v)[1]
class DLL(ctypes.CDLL):
_loaded_: set[str] = set()
@staticmethod
def findlib(nm:str, paths:list[str], extra_paths=[]):
if nm == 'libc' and OSX: return '/usr/lib/libc.dylib'
if pathlib.Path(path:=getenv(nm.replace('-', '_').upper()+"_PATH", '')).is_file(): return path
for p in paths:
libpaths = {"posix": ["/usr/lib64", "/usr/lib", "/usr/local/lib"], "nt": os.environ['PATH'].split(os.pathsep),
"darwin": ["/opt/homebrew/lib", f"/System/Library/Frameworks/{p}.framework", f"/System/Library/PrivateFrameworks/{p}.framework"],
'linux': ['/lib', '/lib64', f"/lib/{sysconfig.get_config_var('MULTIARCH')}", "/usr/lib/wsl/lib/"]}
if (pth:=pathlib.Path(p)).is_absolute():
if pth.is_file(): return p
else: continue
for pre in (pathlib.Path(pre) for pre in ([path] if path else []) + libpaths.get(os.name, []) + libpaths.get(sys.platform, []) + extra_paths):
if not pre.is_dir(): continue
if WIN or OSX:
for base in ([f"lib{p}.dylib", f"{p}.dylib", str(p)] if OSX else [f"{p}.dll"]):
if (l:=pre / base).is_file() or (OSX and 'framework' in str(l) and l.is_symlink()): return str(l)
else:
for l in (l for l in pre.iterdir() if l.is_file() and re.fullmatch(f"lib{p}\\.so\\.?[0-9]*", l.name)):
# filter out linker scripts
with open(l, 'rb') as f:
if f.read(4) == b'\x7FELF': return str(l)
def __init__(self, nm:str, paths:str|list[str], extra_paths=[], emsg="", **kwargs):
self.nm, self.emsg = nm, emsg
if (path:= DLL.findlib(nm, paths if isinstance(paths, list) else [paths], extra_paths if isinstance(extra_paths, list) else [extra_paths])):
if DEBUG >= 3: print(f"loading {nm} from {path}")
try:
super().__init__(path, **kwargs)
self._loaded_.add(self.nm)
except OSError as e:
self.emsg = str(e)
if DEBUG >= 3: print(f"loading {nm} failed: {e}")
elif DEBUG >= 3: print(f"loading {nm} failed: not found on system")
def bind(self, fn):
restype, argtypes = del_an((hints:=get_type_hints(fn, include_extras=True)).pop('return', None)), tuple(del_an(h) for h in hints.values())
cfunc = None
def wrapper(*args):
nonlocal cfunc
if cfunc is None: (cfunc:=getattr(self, fn.__name__)).argtypes, cfunc.restype = argtypes, restype
return cfunc(*args)
return wrapper
def __getattr__(self, nm):
if self.nm not in self._loaded_:
raise AttributeError(f"failed to load library {self.nm}: " + (self.emsg or f"try setting {self.nm.upper()+'_PATH'}?"))
return super().__getattr__(nm)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "tinygrad/runtime/support/c.py",
"license": "MIT License",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
tinygrad/tinygrad:extra/thunder/tiny/tk/group.py | import math
from typing import cast, Callable
from tinygrad import dtypes
from tinygrad.uop.ops import AxisType, UOp, Ops
from tinygrad.dtype import AddrSpace, PtrDType
from tinygrad.helpers import prod
from extra.thunder.tiny.tk import WARP_THREADS
from extra.thunder.tiny.tk.tiles import ALL_TILES, ST, RT, RV, TileLayout, VecLayout
class Group:
def __init__(self, warps:int, ker):
self.warps = warps
self.group_threads = warps * WARP_THREADS
self.ker = ker
# helpers
@property
def laneid(self): return self.ker.threadIdx_x % self.group_threads
@property
def warpid(self): return self.laneid // WARP_THREADS
@property
def groupid(self): return self.ker.threadIdx_x // self.group_threads
# ops that only work on a single warp
def clear(self, reg:ALL_TILES, value:float=0):
reg = cast(UOp, reg)
assert self.warps == 1
rngs_for_shape = tuple(self.ker.raw_range(dim) for dim in reg.shape)
reg_store = reg[*rngs_for_shape].store(value).end(*rngs_for_shape)
self.ker.push_store(reg_store, reg)
return reg.after(reg_store).reshape(reg.shape)
def zero(self, reg:ALL_TILES): return self.clear(reg, 0)
def ones(self, reg:ALL_TILES): return self.clear(reg, 1)
def neg_inf(self, reg:ALL_TILES): return self.clear(reg, -math.inf)
def copy(self, dst:ALL_TILES, src:ALL_TILES):
dst, src = cast(UOp, dst), cast(UOp, src)
assert self.warps == 1
assert dst.shape == src.shape
rngs_for_shape = tuple(self.ker.raw_range(dim) for dim in dst.shape)
src_load = src[*rngs_for_shape]
if src.dtype.base != dst.dtype.base:
src_load = src_load.cast(dst.dtype.base)
dst_store = dst[*rngs_for_shape].store(src_load).end(*rngs_for_shape)
self.ker.push_store(dst_store, dst)
return dst.after(dst_store).reshape(dst.shape)
def transpose(self, dst:UOp|RT, src:UOp|RT):
dst, src = cast(UOp, dst), cast(UOp, src)
assert self.warps == 1
for height in self.ker.range(src.shape[-3], track=False):
for width in self.ker.range(src.shape[-2], track=False):
for inner in self.ker.range(src.shape[-1], track=False):
src_load = src[height, width, inner]
if src.dtype.base != dst.dtype.base:
src_load = src_load.cast(dst.dtype.base)
dst_store = dst[width, height, inner].store(src_load).end(height, width, inner)
self.ker.push_store(dst_store, dst)
return dst.after(dst_store).reshape(dst.shape)
def mma_AB(self, c:UOp|RT, a:UOp|RT, b:UOp|RT):
c, a, b = cast(UOp, c), cast(UOp, a), cast(UOp, b)
assert self.warps == 1
a_base_shape = cast(RT, a).base_shape
if a_base_shape.cols == 16:
wmma_arg = ('WMMA_16_16_16___bf16_float', (16, 16, 16), dtypes.bfloat16, dtypes.float, 'AMD', 64, (((4, 2), (3, 2)), ((4, 2), (3, 2)), ((4, 2), (3, 2))), ()) # type: ignore
elif a_base_shape.cols == 32:
wmma_arg = ('WMMA_16_16_32___bf16_float', (16, 16, 32), dtypes.bfloat16, dtypes.float, 'AMD', 64, (((4, 2), (3, 2), (9, 2)), ((4, 2), (3, 2), (9, 2)), ((4, 2), (3, 2))), ()) # type: ignore
else: raise NotImplementedError(f"mma_AB not implemented for {a_base_shape.cols=}")
for height in self.ker.range(c.shape[-3], track=False):
for width in self.ker.range(c.shape[-2], track=False):
for inner in self.ker.range(a.shape[-2], axis_type=AxisType.REDUCE, track=False):
if a_base_shape.cols == 16:
a_in = UOp.vectorize(*[a[height, inner, i] for i in range(4)])
b_in = UOp.vectorize(*[b[inner, width, i] for i in range(4)])
elif a_base_shape.cols == 32:
a_in = UOp.vectorize(*[a[height, inner, i] for i in range(8)])
b_in = UOp.vectorize(*[b[inner, width, i] for i in range(8)])
else: raise NotImplementedError(f"mma_AB not implemented for {a_base_shape.cols=}")
d_in = UOp.vectorize(*[c[height, width, i] for i in range(4)])
out = UOp(Ops.WMMA, dtypes.float32.vec(4), (a_in, b_in, d_in), arg=wmma_arg)
c_i = [c[height, width, i].store(out.gep(i)) for i in range(4)]
c_store = UOp.group(*c_i).end(height, width, inner)
self.ker.push_store(c_store, c)
return c.after(c_store).reshape(c.shape)
def mma_ABt(self, c:UOp|RT, a:UOp|RT, b:UOp|RT):
c, a, b = cast(UOp, c), cast(UOp, a), cast(UOp, b)
assert self.warps == 1
a_base_shape = cast(RT, a).base_shape
if a_base_shape.cols == 16:
wmma_arg = ('WMMA_16_16_16___bf16_float', (16, 16, 16), dtypes.bfloat16, dtypes.float, 'AMD', 64, (((4, 2), (3, 2)), ((4, 2), (3, 2)), ((4, 2), (3, 2))), ()) # type: ignore
elif a_base_shape.cols == 32:
wmma_arg = ('WMMA_16_16_32___bf16_float', (16, 16, 32), dtypes.bfloat16, dtypes.float, 'AMD', 64, (((4, 2), (3, 2), (9, 2)), ((4, 2), (3, 2), (9, 2)), ((4, 2), (3, 2))), ()) # type: ignore
else: raise NotImplementedError(f"mma_ABt not implemented for {a_base_shape.cols=}")
for height in self.ker.range(c.shape[-3], track=False):
for width in self.ker.range(c.shape[-2], track=False):
for inner in self.ker.range(a.shape[-2], axis_type=AxisType.REDUCE, track=False):
if a_base_shape.cols == 16:
a_in = UOp.vectorize(*[a[height, inner, i] for i in range(4)])
b_in = UOp.vectorize(*[b[width, inner, i] for i in range(4)])
elif a_base_shape.cols == 32:
a_in = UOp.vectorize(*[a[height, inner, i] for i in range(8)])
b_in = UOp.vectorize(*[b[width, inner, i] for i in range(8)])
else: raise NotImplementedError(f"mma_ABt not implemented for {a_base_shape.cols=}")
d_in = UOp.vectorize(*[c[height, width, i] for i in range(4)])
out = UOp(Ops.WMMA, dtypes.float32.vec(4), (a_in, b_in, d_in), arg=wmma_arg)
c_i = [c[height, width, i].store(out.gep(i)) for i in range(4)]
c_store = UOp.group(*c_i).end(height, width, inner)
self.ker.push_store(c_store, c)
return c.after(c_store).reshape(c.shape)
def mma_AtB(self, c:UOp|RT, a:UOp|RT, b:UOp|RT):
c, a, b = cast(UOp, c), cast(UOp, a), cast(UOp, b)
assert self.warps == 1
a_base_shape = cast(RT, a).base_shape
if a_base_shape.cols == 16:
wmma_arg = ('WMMA_16_16_16___bf16_float', (16, 16, 16), dtypes.bfloat16, dtypes.float, 'AMD', 64, (((4, 2), (3, 2)), ((4, 2), (3, 2)), ((4, 2), (3, 2))), ()) # type: ignore
elif a_base_shape.cols == 32:
wmma_arg = ('WMMA_16_16_32___bf16_float', (16, 16, 32), dtypes.bfloat16, dtypes.float, 'AMD', 64, (((4, 2), (3, 2), (9, 2)), ((4, 2), (3, 2), (9, 2)), ((4, 2), (3, 2))), ()) # type: ignore
else: raise NotImplementedError(f"mma_AtB not implemented for {a_base_shape.cols=}")
for height in self.ker.range(c.shape[-3], track=False):
for width in self.ker.range(c.shape[-2], track=False):
for inner in self.ker.range(a.shape[-3], axis_type=AxisType.REDUCE, track=False):
if a_base_shape.cols == 16:
a_in = UOp.vectorize(*[a[inner, height, i] for i in range(4)])
b_in = UOp.vectorize(*[b[inner, width, i] for i in range(4)])
elif a_base_shape.cols == 32:
a_in = UOp.vectorize(*[a[inner, height, i] for i in range(8)])
b_in = UOp.vectorize(*[b[inner, width, i] for i in range(8)])
else: raise NotImplementedError(f"mma_AtB not implemented for {a_base_shape.cols=}")
d_in = UOp.vectorize(*[c[height, width, i] for i in range(4)])
out = UOp(Ops.WMMA, dtypes.float32.vec(4), (a_in, b_in, d_in), arg=wmma_arg)
c_i = [c[height, width, i].store(out.gep(i)) for i in range(4)]
c_store = UOp.group(*c_i).end(height, width, inner)
self.ker.push_store(c_store, c)
return c.after(c_store).reshape(c.shape)
def mma_AtBt(self, c:UOp|RT, a:UOp|RT, b:UOp|RT):
c, a, b = cast(UOp, c), cast(UOp, a), cast(UOp, b)
assert self.warps == 1
a_base_shape = cast(RT, a).base_shape
if a_base_shape.cols == 16:
wmma_arg = ('WMMA_16_16_16___bf16_float', (16, 16, 16), dtypes.bfloat16, dtypes.float, 'AMD', 64, (((4, 2), (3, 2)), ((4, 2), (3, 2)), ((4, 2), (3, 2))), ()) # type: ignore
elif a_base_shape.cols == 32:
wmma_arg = ('WMMA_16_16_32___bf16_float', (16, 16, 32), dtypes.bfloat16, dtypes.float, 'AMD', 64, (((4, 2), (3, 2), (9, 2)), ((4, 2), (3, 2), (9, 2)), ((4, 2), (3, 2))), ()) # type: ignore
else: raise NotImplementedError(f"mma_AtBt not implemented for {a_base_shape.cols=}")
for height in self.ker.range(c.shape[-3], track=False):
for width in self.ker.range(c.shape[-2], track=False):
for inner in self.ker.range(a.shape[-3], axis_type=AxisType.REDUCE, track=False):
if a_base_shape.cols == 16:
a_in = UOp.vectorize(*[a[inner, height, i] for i in range(4)])
b_in = UOp.vectorize(*[b[width, inner, i] for i in range(4)])
elif a_base_shape.cols == 32:
a_in = UOp.vectorize(*[a[inner, height, i] for i in range(8)])
b_in = UOp.vectorize(*[b[width, inner, i] for i in range(8)])
else: raise NotImplementedError(f"mma_AtBt not implemented for {a_base_shape.cols=}")
d_in = UOp.vectorize(*[c[height, width, i] for i in range(4)])
out = UOp(Ops.WMMA, dtypes.float32.vec(4), (a_in, b_in, d_in), arg=wmma_arg)
c_i = [c[height, width, i].store(out.gep(i)) for i in range(4)]
c_store = UOp.group(*c_i).end(height, width, inner)
self.ker.push_store(c_store, c)
return c.after(c_store).reshape(c.shape)
def map(self, a:ALL_TILES, op:Callable[[UOp], UOp]|Callable[[UOp, tuple], UOp]):
a = cast(UOp, a)
assert self.warps == 1
rngs_for_shape = tuple(self.ker.raw_range(dim) for dim in a.shape)
if op.__code__.co_argcount == 1:
to_store = op(a[*rngs_for_shape]) # type: ignore
else:
to_store = op(a[*rngs_for_shape], rngs_for_shape) # type: ignore
a_store = a[*rngs_for_shape].store(to_store).end(*rngs_for_shape)
self.ker.push_store(a_store, a)
return a.after(a_store).reshape(a.shape)
def row_reduce(self, vec:UOp|RV, src:UOp|RT, op:Callable[[UOp, UOp], UOp], init_value:float=0.0):
vec, src = cast(UOp, vec), cast(UOp, src)
assert self.warps == 1
red_local = self.ker.alloc((self.group_threads,), src.dtype.base, AddrSpace.LOCAL)
red_reg = self.ker.alloc((1,), src.dtype.base, AddrSpace.REG)
for height in self.ker.range(src.shape[-3], track=False):
i = self.ker.raw_range(red_reg.size)
red_reg = red_reg.after(height, *[tkr._rng for tkr in self.ker.range_stack])
reg_store = red_reg.flatten()[i].store(init_value).end(i)
red_reg = red_reg.after(reg_store).reshape(red_reg.shape)
for width in self.ker.range(src.shape[-2], axis_type=AxisType.REDUCE, track=False):
for inner in self.ker.range(4, axis_type=AxisType.REDUCE, track=False):
reg_store = red_reg[0].store(op(red_reg[0], src[height, width, inner])).end(width, inner)
red_reg = red_reg.after(reg_store).reshape(red_reg.shape)
# store to shared memory
red_local_store = red_local[self.laneid].store(red_reg[0])
red_local = red_local.after(red_local_store.barrier()).reshape(red_local.shape)
# reduce from shared memory
for inner in self.ker.range(3, axis_type=AxisType.REDUCE, track=False):
offset = (self.laneid + (1 + inner) * 16) % self.group_threads
reg_store = red_reg[0].store(op(red_reg[0], red_local[offset])).end(inner)
red_reg = red_reg.after(reg_store).reshape(red_reg.shape)
# reduce with vec
vec_store = vec[height, 0].store(op(vec[height, 0], red_reg[0])).end(height)
self.ker.push_store(vec_store, vec)
return vec.after(vec_store).reshape(vec.shape)
def col_reduce(self, vec:UOp|RV, src:UOp|RT, op:Callable[[UOp, UOp], UOp], init_value:float=0.0):
vec, src = cast(UOp, vec), cast(UOp, src)
assert self.warps == 1
red_local = self.ker.alloc((self.group_threads,), src.dtype.base, AddrSpace.LOCAL)
red_reg = self.ker.alloc((1,), src.dtype.base, AddrSpace.REG)
for width in self.ker.range(src.shape[-2], track=False):
i = self.ker.raw_range(red_reg.size)
red_reg = red_reg.after(width, *[tkr._rng for tkr in self.ker.range_stack])
reg_store = red_reg.flatten()[i].store(init_value).end(i)
red_reg = red_reg.after(reg_store).reshape(red_reg.shape)
for height in self.ker.range(src.shape[-3], axis_type=AxisType.REDUCE, track=False):
for inner in self.ker.range(4, axis_type=AxisType.REDUCE, track=False):
reg_store = red_reg[0].store(op(red_reg[0], src[height, width, inner])).end(height, inner)
red_reg = red_reg.after(reg_store).reshape(red_reg.shape)
# store to shared memory
red_local_store = red_local[self.laneid].store(red_reg[0])
red_local = red_local.after(red_local_store.barrier()).reshape(red_local.shape)
# reduce from shared memory
for inner in self.ker.range(3, axis_type=AxisType.REDUCE, track=False):
offset = (self.laneid + (1 + inner) * 16) % self.group_threads
reg_store = red_reg[0].store(op(red_reg[0], red_local[offset])).end(inner)
red_reg = red_reg.after(reg_store).reshape(red_reg.shape)
# reduce with vec
vec_store = vec[width, 0].store(op(vec[width, 0], red_reg[0])).end(width)
self.ker.push_store(vec_store, vec)
return vec.after(vec_store).reshape(vec.shape)
# ops that can work across multiple warps
def load(self, dst:ALL_TILES, src:ALL_TILES, dst_idxs:tuple[UOp|int,...]=(), idxs:tuple[UOp|int,...]=(), axis:int=0):
dst, src = cast(UOp, dst), cast(UOp, src)
assert isinstance(dst.dtype, PtrDType) and isinstance(src.dtype, PtrDType)
dst_dtype, src_dtype = dst.dtype, src.dtype
if dst_dtype.addrspace == AddrSpace.REG and src_dtype.addrspace == AddrSpace.LOCAL:
laneid = self.ker.laneid
rt, st = cast(RT, dst), cast(ST, src)
elements_per_thread = rt.base_shape.elements_per_thread
for height in self.ker.range(dst.shape[-3], track=False):
for width in self.ker.range(dst.shape[-2], track=False):
for inner in self.ker.range(elements_per_thread, track=False):
if rt.layout != st.layout:
row = rt.base_shape.stride * (laneid // rt.base_shape.cols) + inner
col = laneid % rt.base_shape.cols
else:
row = laneid % rt.base_shape.rows
col = rt.base_shape.stride * (laneid // rt.base_shape.rows) + inner
sheight = height
swidth = width
if len(idxs) == 2:
row_idx = idxs[0] * dst.shape[-3] * rt.base_shape.rows
col_idx = idxs[1] * dst.shape[-2] * rt.base_shape.cols
row += row_idx % st.base_shape.rows
col += col_idx % st.base_shape.cols
sheight += row_idx // st.base_shape.rows
swidth += col_idx // st.base_shape.cols
srow, scol = cast(ST, src).swizzle(row, col)
src_load = src[*idxs[:-2], sheight, swidth, srow, scol]
if src.dtype.base != dst.dtype.base:
src_load = src_load.cast(dst.dtype.base)
dst_store = dst[*dst_idxs, height, width, inner].store(src_load)
dst_store = dst_store.end(height, width, inner)
elif dst_dtype.addrspace == AddrSpace.LOCAL and src_dtype.addrspace == AddrSpace.GLOBAL:
srcf = src.flatten()
row_stride = prod(src.shape[axis+1:])
st = cast(ST, dst)
idxs = tuple(idx * st.rows if i == axis else idx for i, idx in enumerate(idxs))
idxs = tuple(idx * st.cols if i == 3 else idx for i, idx in enumerate(idxs))
src_i = ((idxs[0] * src.shape[-3] + idxs[1]) * src.shape[-2] + idxs[2]) * src.shape[-1] + idxs[3]
elements_per_thread = st.base_shape.elements_per_thread
memcpy_per_row = st.cols // elements_per_thread
total_calls = (dst.shape[-4] * dst.shape[-3] * st.base_shape.num_elements) // (self.group_threads * elements_per_thread)
for outer in self.ker.range(total_calls, track=False):
for inner in self.ker.range(elements_per_thread, axis_type=AxisType.UPCAST, track=False):
load_idx = outer * self.group_threads + self.laneid
row = load_idx // memcpy_per_row
col = (load_idx * elements_per_thread) % st.cols + inner
height = row // st.base_shape.rows
width = col // st.base_shape.cols
row = row % st.base_shape.rows
col = col % st.base_shape.cols
srow, scol = cast(ST, dst).swizzle(row, col)
src_i += height * st.base_shape.rows * row_stride + width * st.base_shape.cols
src_i += row * row_stride + col
src_load = srcf[src_i]
if src.dtype.base != dst.dtype.base:
src_load = src_load.cast(dst.dtype.base)
dst_store = dst[*dst_idxs, height, width, srow, scol].store(src_load)
dst_store = dst_store.end(height, width, outer, inner).barrier()
elif dst_dtype.addrspace == AddrSpace.REG and src_dtype.addrspace == AddrSpace.GLOBAL and isinstance(dst, RT):
srcf = src.flatten()
row_stride = prod(src.shape[axis+1:])
laneid = self.ker.laneid
rt = cast(RT, dst)
elements_per_thread = rt.base_shape.elements_per_thread
idxs = tuple(idx * dst.shape[-3] * rt.base_shape.rows if i == axis else idx for i, idx in enumerate(idxs))
idxs = tuple(idx * dst.shape[-2] * rt.base_shape.cols if i == 3 else idx for i, idx in enumerate(idxs))
src_i = ((idxs[0] * src.shape[-3] + idxs[1]) * src.shape[-2] + idxs[2]) * src.shape[-1] + idxs[3]
for height in self.ker.range(dst.shape[-3], track=False):
for width in self.ker.range(dst.shape[-2], track=False):
for inner in self.ker.range(elements_per_thread, track=False):
base_row = height * rt.base_shape.rows
base_col = width * rt.base_shape.cols
if rt.layout == TileLayout.COL:
row = rt.base_shape.stride * (laneid // rt.base_shape.cols) + inner
col = laneid % rt.base_shape.cols
else:
row = laneid % rt.base_shape.rows
col = rt.base_shape.stride * (laneid // rt.base_shape.rows) + inner
srow, scol = base_row + row, base_col + col
src_i += srow * row_stride + scol
src_load = srcf[src_i]
if src.dtype.base != dst.dtype.base:
src_load = src_load.cast(dst.dtype.base)
dst_store = dst[*dst_idxs, height, width, inner].store(src_load).end(height, width, inner)
elif dst_dtype.addrspace == AddrSpace.REG and src_dtype.addrspace == AddrSpace.GLOBAL and isinstance(dst, RV):
srcf = src.flatten()
row_stride = prod(src.shape[axis+1:])
laneid = self.ker.laneid
rv = cast(RV, dst)
reductions = rv.base_shape.rows
assert rv.layout == VecLayout.ORTHO, "only ortho layout supported"
idxs = tuple(idx * rv.length if i == 3 else idx for i, idx in enumerate(idxs))
src_i = ((idxs[0] * src.shape[-3] + idxs[1]) * src.shape[-2] + idxs[2]) * src.shape[-1] + idxs[3]
for outer in self.ker.range(dst.shape[-2], track=False):
src_i += outer * reductions + (laneid % reductions)
src_load = srcf[src_i]
if src.dtype.base != dst.dtype.base:
src_load = src_load.cast(dst.dtype.base)
dst_store = dst[outer, 0].store(src_load).end(outer)
else:
raise NotImplementedError(f"load from {src_dtype.addrspace} to {dst_dtype.addrspace} not implemented for {type(dst)=}")
self.ker.push_store(dst_store, dst)
return dst.after(dst_store).reshape(dst.shape)
def store(self, dst:ALL_TILES, src:ALL_TILES, idxs:tuple[UOp|int,...]=(), src_idxs:tuple[UOp|int,...]=(), axis:int=0):
dst, src = cast(UOp, dst), cast(UOp, src)
assert isinstance(dst.dtype, PtrDType) and isinstance(src.dtype, PtrDType)
dst_dtype, src_dtype = dst.dtype, src.dtype
if src_dtype.addrspace == AddrSpace.REG and dst_dtype.addrspace == AddrSpace.LOCAL:
laneid = self.ker.laneid
st, rt = cast(ST, dst), cast(RT, src)
elements_per_thread = rt.base_shape.elements_per_thread
for height in self.ker.range(src.shape[-3], track=False):
for width in self.ker.range(src.shape[-2], track=False):
for inner in self.ker.range(elements_per_thread, track=False):
if rt.layout != st.layout:
row = rt.base_shape.stride * (laneid // rt.base_shape.cols) + inner
col = laneid % rt.base_shape.cols
else:
row = laneid % rt.base_shape.rows
col = rt.base_shape.stride * (laneid // rt.base_shape.rows) + inner
srow, scol = cast(ST, dst).swizzle(row, col)
src_load = src[*src_idxs, height, width, inner]
if src.dtype.base != dst.dtype.base:
src_load = src_load.cast(dst.dtype.base)
dst_store = dst[*idxs[:-2], height, width, srow, scol].store(src_load)
dst_store = dst_store.end(height, width, inner)
elif src_dtype.addrspace == AddrSpace.REG and dst_dtype.addrspace == AddrSpace.GLOBAL and isinstance(src, RT):
dstf = dst.flatten()
row_stride = prod(dst.shape[axis+1:])
laneid = self.ker.laneid
rt = cast(RT, src)
elements_per_thread = rt.base_shape.elements_per_thread
idxs = tuple(idx * src.shape[-3] * rt.base_shape.rows if i == axis else idx for i, idx in enumerate(idxs))
idxs = tuple(idx * src.shape[-2] * rt.base_shape.cols if i == 3 else idx for i, idx in enumerate(idxs))
dst_i = ((idxs[0] * dst.shape[-3] + idxs[1]) * dst.shape[-2] + idxs[2]) * dst.shape[-1] + idxs[3]
for height in self.ker.range(src.shape[-3], track=False):
for width in self.ker.range(src.shape[-2], track=False):
for inner in self.ker.range(elements_per_thread, track=False):
base_row = height * rt.base_shape.rows
base_col = width * rt.base_shape.cols
if rt.layout == TileLayout.COL:
row = rt.base_shape.stride * (laneid // rt.base_shape.cols) + inner
col = laneid % rt.base_shape.cols
else:
row = laneid % rt.base_shape.rows
col = rt.base_shape.stride * (laneid // rt.base_shape.rows) + inner
srow, scol = base_row + row, base_col + col
dst_i += srow * row_stride + scol
src_load = src[*src_idxs, height, width, inner]
if src.dtype.base != dst.dtype.base:
src_load = src_load.cast(dst.dtype.base)
dst_store = dstf[dst_i].store(src_load).end(height, width, inner)
elif src_dtype.addrspace == AddrSpace.REG and dst_dtype.addrspace == AddrSpace.GLOBAL and isinstance(src, RV):
dstf = dst.flatten()
row_stride = prod(dst.shape[axis+1:])
laneid = self.ker.laneid
rv = cast(RV, src)
reductions = rv.base_shape.rows
assert rv.layout == VecLayout.ORTHO, "only ortho layout supported"
idxs = tuple(idx * rv.length if i == 3 else idx for i, idx in enumerate(idxs))
dst_i = ((idxs[0] * dst.shape[-3] + idxs[1]) * dst.shape[-2] + idxs[2]) * dst.shape[-1] + idxs[3]
for outer in self.ker.range(src.shape[-2], track=False):
dst_i += outer * reductions + (laneid % reductions)
src_load = src[outer, 0]
if src.dtype.base != dst.dtype.base:
src_load = src_load.cast(dst.dtype.base)
dst_store = dstf[dst_i].store(src_load).end(outer)
else:
raise NotImplementedError(f"store from {src_dtype.addrspace} to {dst_dtype.addrspace} not implemented for {type(src)=}")
self.ker.push_store(dst_store, dst)
return dst.after(dst_store).reshape(dst.shape)
| {
"repo_id": "tinygrad/tinygrad",
"file_path": "extra/thunder/tiny/tk/group.py",
"license": "MIT License",
"lines": 390,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.