sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
infiniflow/ragflow:test/testcases/test_sdk_api/test_dataset_mangement/test_delete_datasets.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_create_datasets
from configs import HOST_ADDRESS, INVALID_API_TOKEN
from ragflow_sdk import RAGFlow
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "Authentication error: API key is invalid!"),
(INVALID_API_TOKEN, "Authentication error: API key is invalid!"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.delete_datasets()
assert str(exception_info.value) == expected_message
class TestCapability:
@pytest.mark.p3
def test_delete_dataset_1k(self, client):
datasets = batch_create_datasets(client, 1_000)
client.delete_datasets(**{"ids": [dataset.id for dataset in datasets]})
datasets = client.list_datasets()
assert len(datasets) == 0, datasets
@pytest.mark.p3
def test_concurrent_deletion(self, client):
count = 1_000
datasets = batch_create_datasets(client, count)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.delete_datasets, **{"ids": [dataset.id for dataset in datasets][i : i + 1]}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
datasets = client.list_datasets()
assert len(datasets) == 0, datasets
class TestDatasetsDelete:
@pytest.mark.p1
@pytest.mark.parametrize(
"func, remaining",
[
(lambda r: {"ids": r[:1]}, 2),
(lambda r: {"ids": r}, 0),
],
ids=["single_dataset", "multiple_datasets"],
)
def test_ids(self, client, add_datasets_func, func, remaining):
payload = None
if callable(func):
payload = func([dataset.id for dataset in add_datasets_func])
client.delete_datasets(**payload)
datasets = client.list_datasets()
assert len(datasets) == remaining, str(datasets)
@pytest.mark.p2
@pytest.mark.usefixtures("add_dataset_func")
def test_ids_empty(self, client):
payload = {"ids": []}
client.delete_datasets(**payload)
datasets = client.list_datasets()
assert len(datasets) == 1, str(datasets)
@pytest.mark.p3
@pytest.mark.usefixtures("add_datasets_func")
def test_ids_none(self, client):
payload = {"ids": None}
client.delete_datasets(**payload)
datasets = client.list_datasets()
assert len(datasets) == 0, str(datasets)
@pytest.mark.p2
@pytest.mark.usefixtures("add_dataset_func")
def test_id_not_uuid(self, client):
payload = {"ids": ["not_uuid"]}
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "Invalid UUID1 format" in str(exception_info.value), str(exception_info.value)
datasets = client.list_datasets()
assert len(datasets) == 1, str(datasets)
@pytest.mark.p3
@pytest.mark.usefixtures("add_dataset_func")
def test_id_not_uuid1(self, client):
payload = {"ids": [uuid.uuid4().hex]}
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "Invalid UUID1 format" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
@pytest.mark.usefixtures("add_dataset_func")
def test_id_wrong_uuid(self, client):
payload = {"ids": ["d94a8dc02c9711f0930f7fbc369eab6d"]}
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
datasets = client.list_datasets()
assert len(datasets) == 1, str(datasets)
@pytest.mark.p2
@pytest.mark.parametrize(
"func",
[
lambda r: {"ids": ["d94a8dc02c9711f0930f7fbc369eab6d"] + r},
lambda r: {"ids": r[:1] + ["d94a8dc02c9711f0930f7fbc369eab6d"] + r[1:3]},
lambda r: {"ids": r + ["d94a8dc02c9711f0930f7fbc369eab6d"]},
],
)
def test_ids_partial_invalid(self, client, add_datasets_func, func):
if callable(func):
payload = func([dataset.id for dataset in add_datasets_func])
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
datasets = client.list_datasets()
assert len(datasets) == 3, str(datasets)
@pytest.mark.p2
def test_ids_duplicate(self, client, add_datasets_func):
dataset_ids = [dataset.id for dataset in add_datasets_func]
payload = {"ids": dataset_ids + dataset_ids}
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "Duplicate ids:" in str(exception_info.value), str(exception_info.value)
datasets = client.list_datasets()
assert len(datasets) == 3, str(datasets)
@pytest.mark.p2
def test_repeated_delete(self, client, add_datasets_func):
dataset_ids = [dataset.id for dataset in add_datasets_func]
payload = {"ids": dataset_ids}
client.delete_datasets(**payload)
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
@pytest.mark.usefixtures("add_dataset_func")
def test_field_unsupported(self, client):
payload = {"unknown_field": "unknown_field"}
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "got an unexpected keyword argument 'unknown_field'" in str(exception_info.value), str(exception_info.value)
datasets = client.list_datasets()
assert len(datasets) == 1, str(datasets)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_dataset_mangement/test_delete_datasets.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_dataset_mangement/test_list_datasets.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from configs import HOST_ADDRESS, INVALID_API_TOKEN
from ragflow_sdk import RAGFlow
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "Authentication error: API key is invalid!"),
(INVALID_API_TOKEN, "Authentication error: API key is invalid!"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.list_datasets()
assert expected_message in str(exception_info.value)
class TestCapability:
@pytest.mark.p3
def test_concurrent_list(self, client):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
client.list_datasets,
)
for _ in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.usefixtures("add_datasets")
class TestDatasetsList:
@pytest.mark.p2
def test_params_unset(self, client):
datasets = client.list_datasets()
assert len(datasets) == 5, str(datasets)
@pytest.mark.p2
def test_params_empty(self, client):
datasets = client.list_datasets(**{})
assert len(datasets) == 5, str(datasets)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"page": 2, "page_size": 2}, 2),
({"page": 3, "page_size": 2}, 1),
({"page": 4, "page_size": 2}, 0),
({"page": 1, "page_size": 10}, 5),
],
ids=["normal_middle_page", "normal_last_partial_page", "beyond_max_page", "full_data_single_page"],
)
def test_page(self, client, params, expected_page_size):
datasets = client.list_datasets(**params)
assert len(datasets) == expected_page_size, str(datasets)
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_message",
[
({"page": 0}, "Input should be greater than or equal to 1"),
({"page": "a"}, "not instance of"),
],
ids=["page_0", "page_a"],
)
def test_page_invalid(self, client, params, expected_message):
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_page_none(self, client):
params = {"page": None}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"page_size": 1}, 1),
({"page_size": 3}, 3),
({"page_size": 5}, 5),
({"page_size": 6}, 5),
],
ids=["min_valid_page_size", "medium_page_size", "page_size_equals_total", "page_size_exceeds_total"],
)
def test_page_size(self, client, params, expected_page_size):
datasets = client.list_datasets(**params)
assert len(datasets) == expected_page_size, str(datasets)
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_message",
[
({"page_size": 0}, "Input should be greater than or equal to 1"),
({"page_size": "a"}, "not instance of"),
],
)
def test_page_size_invalid(self, client, params, expected_message):
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_page_size_none(self, client):
params = {"page_size": None}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
@pytest.mark.parametrize(
"params",
[
{"orderby": "create_time"},
{"orderby": "update_time"},
],
ids=["orderby_create_time", "orderby_update_time"],
)
def test_orderby(self, client, params):
client.list_datasets(**params)
@pytest.mark.p3
@pytest.mark.parametrize(
"params",
[
{"orderby": ""},
{"orderby": "unknown"},
{"orderby": "CREATE_TIME"},
{"orderby": "UPDATE_TIME"},
{"orderby": " create_time "},
],
ids=["empty", "unknown", "orderby_create_time_upper", "orderby_update_time_upper", "whitespace"],
)
def test_orderby_invalid(self, client, params):
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "Input should be 'create_time' or 'update_time'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_orderby_none(self, client):
params = {"orderby": None}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
@pytest.mark.parametrize(
"params",
[
{"desc": True},
{"desc": False},
],
ids=["desc=True", "desc=False"],
)
def test_desc(self, client, params):
client.list_datasets(**params)
@pytest.mark.p3
@pytest.mark.parametrize(
"params",
[
{"desc": 3.14},
{"desc": "unknown"},
],
ids=["float_value", "invalid_string"],
)
def test_desc_invalid(self, client, params):
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_desc_none(self, client):
params = {"desc": None}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
def test_name(self, client):
params = {"name": "dataset_1"}
datasets = client.list_datasets(**params)
assert len(datasets) == 1, str(datasets)
assert datasets[0].name == "dataset_1", str(datasets)
@pytest.mark.p2
def test_name_wrong(self, client):
params = {"name": "wrong name"}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_get_dataset_not_found_raises(self, client, monkeypatch):
monkeypatch.setattr(client, "list_datasets", lambda **_: [])
with pytest.raises(Exception) as exception_info:
client.get_dataset(name="missing-name-for-coverage")
assert "Dataset missing-name-for-coverage not found" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_name_empty(self, client):
params = {"name": ""}
datasets = client.list_datasets(**params)
assert len(datasets) == 5, str(datasets)
@pytest.mark.p2
def test_name_none(self, client):
params = {"name": None}
datasets = client.list_datasets(**params)
assert len(datasets) == 5, str(datasets)
@pytest.mark.p1
def test_id(self, client, add_datasets):
dataset_ids = [dataset.id for dataset in add_datasets]
params = {"id": dataset_ids[0]}
datasets = client.list_datasets(**params)
assert len(datasets) == 1, str(datasets)
assert datasets[0].id == dataset_ids[0], str(datasets)
@pytest.mark.p2
def test_id_not_uuid(self, client):
params = {"id": "not_uuid"}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "Invalid UUID1 format" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_id_not_uuid1(self, client):
params = {"id": uuid.uuid4().hex}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "Invalid UUID1 format" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_id_wrong_uuid(self, client):
params = {"id": "d94a8dc02c9711f0930f7fbc369eab6d"}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_id_empty(self, client):
params = {"id": ""}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "Invalid UUID1 format" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_id_none(self, client):
params = {"id": None}
datasets = client.list_datasets(**params)
assert len(datasets) == 5, str(datasets)
@pytest.mark.p2
@pytest.mark.parametrize(
"func, name, expected_num",
[
(lambda r: r[0].id, "dataset_0", 1),
(lambda r: r[0].id, "dataset_1", 0),
],
ids=["name_and_id_match", "name_and_id_mismatch"],
)
def test_name_and_id(self, client, add_datasets, func, name, expected_num):
params = None
if callable(func):
params = {"id": func(add_datasets), "name": name}
datasets = client.list_datasets(**params)
assert len(datasets) == expected_num, str(datasets)
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, name",
[
(lambda r: r[0].id, "wrong_name"),
(uuid.uuid1().hex, "dataset_0"),
],
ids=["name", "id"],
)
def test_name_and_id_wrong(self, client, add_datasets, dataset_id, name):
if callable(dataset_id):
params = {"id": dataset_id(add_datasets), "name": name}
else:
params = {"id": dataset_id, "name": name}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_field_unsupported(self, client):
params = {"unknown_field": "unknown_field"}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "got an unexpected keyword argument" in str(exception_info.value), str(exception_info.value)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_dataset_mangement/test_list_datasets.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_dataset_mangement/test_update_dataset.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from operator import attrgetter
import pytest
from configs import DATASET_NAME_LIMIT
from hypothesis import HealthCheck, example, given, settings
from ragflow_sdk import DataSet
from utils import encode_avatar
from utils.file_utils import create_image_file
from utils.hypothesis_utils import valid_names
from configs import DEFAULT_PARSER_CONFIG
from utils.engine_utils import get_doc_engine
class TestRquest:
@pytest.mark.p2
def test_payload_empty(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({})
assert "No properties were modified" in str(exception_info.value), str(exception_info.value)
class TestCapability:
@pytest.mark.p3
def test_update_dateset_concurrent(self, add_dataset_func):
dataset = add_dataset_func
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(dataset.update, {"name": f"dataset_{i}"}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
class TestDatasetUpdate:
@pytest.mark.p1
@given(name=valid_names())
@example("a" * 128)
@settings(max_examples=20, suppress_health_check=[HealthCheck.function_scoped_fixture])
def test_name(self, client, add_dataset_func, name):
dataset = add_dataset_func
payload = {"name": name}
dataset.update(payload)
assert dataset.name == name, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.name == name, str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_message",
[
("", "String should have at least 1 character"),
(" ", "String should have at least 1 character"),
("a" * (DATASET_NAME_LIMIT + 1), "String should have at most 128 characters"),
(0, "Input should be a valid string"),
(None, "Input should be a valid string"),
],
ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"],
)
def test_name_invalid(self, add_dataset_func, name, expected_message):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"name": name})
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_name_duplicated(self, add_datasets_func):
datasets = add_datasets_func
name = "dataset_1"
with pytest.raises(Exception) as exception_info:
datasets[0].update({"name": name})
assert f"Dataset name '{name}' already exists" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_name_case_insensitive(self, add_datasets_func):
dataset = add_datasets_func[0]
name = "DATASET_1"
with pytest.raises(Exception) as exception_info:
dataset.update({"name": name})
assert f"Dataset name '{name}' already exists" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_avatar(self, client, add_dataset_func, tmp_path):
dataset = add_dataset_func
fn = create_image_file(tmp_path / "ragflow_test.png")
avatar_data = f"data:image/png;base64,{encode_avatar(fn)}"
dataset.update({"avatar": avatar_data})
assert dataset.avatar == avatar_data, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.avatar == avatar_data, str(retrieved_dataset)
@pytest.mark.p3
def test_avatar_exceeds_limit_length(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"avatar": "a" * 65536})
assert "String should have at most 65535 characters" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
@pytest.mark.parametrize(
"avatar_prefix, expected_message",
[
("", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>"),
("data:image/png;base64", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>"),
("invalid_mine_prefix:image/png;base64,", "Invalid MIME prefix format. Must start with 'data:'"),
("data:unsupported_mine_type;base64,", "Unsupported MIME type. Allowed: ['image/jpeg', 'image/png']"),
],
ids=["empty_prefix", "missing_comma", "unsupported_mine_type", "invalid_mine_type"],
)
def test_avatar_invalid_prefix(self, add_dataset_func, tmp_path, avatar_prefix, expected_message):
dataset = add_dataset_func
fn = create_image_file(tmp_path / "ragflow_test.png")
with pytest.raises(Exception) as exception_info:
dataset.update({"avatar": f"{avatar_prefix}{encode_avatar(fn)}"})
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_avatar_none(self, client, add_dataset_func):
dataset = add_dataset_func
dataset.update({"avatar": None})
assert dataset.avatar is None, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.avatar is None, str(retrieved_dataset)
@pytest.mark.p2
def test_description(self, client, add_dataset_func):
dataset = add_dataset_func
dataset.update({"description": "description"})
assert dataset.description == "description", str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.description == "description", str(retrieved_dataset)
@pytest.mark.p3
def test_description_exceeds_limit_length(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"description": "a" * 65536})
assert "String should have at most 65535 characters" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_description_none(self, client, add_dataset_func):
dataset = add_dataset_func
dataset.update({"description": None})
assert dataset.description is None, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.description is None, str(retrieved_dataset)
@pytest.mark.p1
@pytest.mark.parametrize(
"embedding_model",
[
"BAAI/bge-small-en-v1.5@Builtin",
"embedding-3@ZHIPU-AI",
],
ids=["builtin_baai", "tenant_zhipu"],
)
def test_embedding_model(self, client, add_dataset_func, embedding_model):
dataset = add_dataset_func
dataset.update({"embedding_model": embedding_model})
assert dataset.embedding_model == embedding_model, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.embedding_model == embedding_model, str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, embedding_model",
[
("unknown_llm_name", "unknown@ZHIPU-AI"),
("unknown_llm_factory", "embedding-3@unknown"),
("tenant_no_auth_default_tenant_llm", "text-embedding-v3@Tongyi-Qianwen"),
("tenant_no_auth", "text-embedding-3-small@OpenAI"),
],
ids=["unknown_llm_name", "unknown_llm_factory", "tenant_no_auth_default_tenant_llm", "tenant_no_auth"],
)
def test_embedding_model_invalid(self, add_dataset_func, name, embedding_model):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"name": name, "embedding_model": embedding_model})
error_msg = str(exception_info.value)
if "tenant_no_auth" in name:
assert error_msg == f"Unauthorized model: <{embedding_model}>", error_msg
else:
assert error_msg == f"Unsupported model: <{embedding_model}>", error_msg
@pytest.mark.p2
@pytest.mark.parametrize(
"name, embedding_model",
[
("empty", ""),
("space", " "),
("missing_at", "BAAI/bge-small-en-v1.5Builtin"),
("missing_model_name", "@Builtin"),
("missing_provider", "BAAI/bge-small-en-v1.5@"),
("whitespace_only_model_name", " @Builtin"),
("whitespace_only_provider", "BAAI/bge-small-en-v1.5@ "),
],
ids=["empty", "space", "missing_at", "empty_model_name", "empty_provider", "whitespace_only_model_name", "whitespace_only_provider"],
)
def test_embedding_model_format(self, add_dataset_func, name, embedding_model):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"name": name, "embedding_model": embedding_model})
error_msg = str(exception_info.value)
if name in ["empty", "space", "missing_at"]:
assert "Embedding model identifier must follow <model_name>@<provider> format" in error_msg, error_msg
else:
assert "Both model_name and provider must be non-empty strings" in error_msg, error_msg
@pytest.mark.p2
def test_embedding_model_none(self, client, add_dataset_func):
dataset = add_dataset_func
dataset.update({"embedding_model": None})
assert dataset.embedding_model == "BAAI/bge-small-en-v1.5@Builtin", str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.embedding_model == "BAAI/bge-small-en-v1.5@Builtin", str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"permission",
[
"me",
"team",
],
ids=["me", "team"],
)
def test_permission(self, client, add_dataset_func, permission):
dataset = add_dataset_func
dataset.update({"permission": permission})
assert dataset.permission == permission.lower().strip(), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.permission == permission.lower().strip(), str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"permission",
[
"",
"unknown",
list(),
"ME",
"TEAM",
" ME ",
],
ids=["empty", "unknown", "type_error", "me_upercase", "team_upercase", "whitespace"],
)
def test_permission_invalid(self, add_dataset_func, permission):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"permission": permission})
assert "Input should be 'me' or 'team'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_permission_none(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"permission": None})
assert "Input should be 'me' or 'team'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"chunk_method",
[
"naive",
"book",
"email",
"laws",
"manual",
"one",
"paper",
"picture",
"presentation",
"qa",
"table",
"tag",
],
ids=["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"],
)
def test_chunk_method(self, client, add_dataset_func, chunk_method):
dataset = add_dataset_func
dataset.update({"chunk_method": chunk_method})
assert dataset.chunk_method == chunk_method, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.chunk_method == chunk_method, str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"chunk_method",
[
"",
"unknown",
list(),
],
ids=["empty", "unknown", "type_error"],
)
def test_chunk_method_invalid(self, add_dataset_func, chunk_method):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"chunk_method": chunk_method})
assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_chunk_method_none(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"chunk_method": None})
assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="#8208")
@pytest.mark.p2
@pytest.mark.parametrize("pagerank", [0, 50, 100], ids=["min", "mid", "max"])
def test_pagerank(self, client, add_dataset_func, pagerank):
if get_doc_engine(client) == "infinity":
pytest.skip("#8208")
dataset = add_dataset_func
dataset.update({"pagerank": pagerank})
assert dataset.pagerank == pagerank, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.pagerank == pagerank, str(retrieved_dataset)
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="#8208")
@pytest.mark.p2
def test_pagerank_set_to_0(self, client, add_dataset_func):
if get_doc_engine(client) == "infinity":
pytest.skip("#8208")
dataset = add_dataset_func
dataset.update({"pagerank": 50})
assert dataset.pagerank == 50, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.pagerank == 50, str(retrieved_dataset)
dataset.update({"pagerank": 0})
assert dataset.pagerank == 0, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.pagerank == 0, str(retrieved_dataset)
@pytest.mark.skipif(os.getenv("DOC_ENGINE") != "infinity", reason="#8208")
@pytest.mark.p2
def test_pagerank_infinity(self, client, add_dataset_func):
if get_doc_engine(client) != "infinity":
pytest.skip("#8208")
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"pagerank": 50})
assert "'pagerank' can only be set when doc_engine is elasticsearch" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
@pytest.mark.parametrize(
"pagerank, expected_message",
[
(-1, "Input should be greater than or equal to 0"),
(101, "Input should be less than or equal to 100"),
],
ids=["min_limit", "max_limit"],
)
def test_pagerank_invalid(self, add_dataset_func, pagerank, expected_message):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"pagerank": pagerank})
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_pagerank_none(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"pagerank": None})
assert "Input should be a valid integer" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"parser_config",
[
{"auto_keywords": 0},
{"auto_keywords": 16},
{"auto_keywords": 32},
{"auto_questions": 0},
{"auto_questions": 5},
{"auto_questions": 10},
{"chunk_token_num": 1},
{"chunk_token_num": 1024},
{"chunk_token_num": 2048},
{"delimiter": "\n"},
{"delimiter": " "},
{"html4excel": True},
{"html4excel": False},
{"layout_recognize": "DeepDOC"},
{"layout_recognize": "Plain Text"},
{"tag_kb_ids": ["1", "2"]},
{"topn_tags": 1},
{"topn_tags": 5},
{"topn_tags": 10},
{"filename_embd_weight": 0.1},
{"filename_embd_weight": 0.5},
{"filename_embd_weight": 1.0},
{"task_page_size": 1},
{"task_page_size": None},
{"pages": [[1, 100]]},
{"pages": None},
{"graphrag": {"use_graphrag": True}},
{"graphrag": {"use_graphrag": False}},
{"graphrag": {"entity_types": ["age", "sex", "height", "weight"]}},
{"graphrag": {"method": "general"}},
{"graphrag": {"method": "light"}},
{"graphrag": {"community": True}},
{"graphrag": {"community": False}},
{"graphrag": {"resolution": True}},
{"graphrag": {"resolution": False}},
{"raptor": {"use_raptor": True}},
{"raptor": {"use_raptor": False}},
{"raptor": {"prompt": "Who are you?"}},
{"raptor": {"max_token": 1}},
{"raptor": {"max_token": 1024}},
{"raptor": {"max_token": 2048}},
{"raptor": {"threshold": 0.0}},
{"raptor": {"threshold": 0.5}},
{"raptor": {"threshold": 1.0}},
{"raptor": {"max_cluster": 1}},
{"raptor": {"max_cluster": 512}},
{"raptor": {"max_cluster": 1024}},
{"raptor": {"random_seed": 0}},
],
ids=[
"auto_keywords_min",
"auto_keywords_mid",
"auto_keywords_max",
"auto_questions_min",
"auto_questions_mid",
"auto_questions_max",
"chunk_token_num_min",
"chunk_token_num_mid",
"chunk_token_num_max",
"delimiter",
"delimiter_space",
"html4excel_true",
"html4excel_false",
"layout_recognize_DeepDOC",
"layout_recognize_navie",
"tag_kb_ids",
"topn_tags_min",
"topn_tags_mid",
"topn_tags_max",
"filename_embd_weight_min",
"filename_embd_weight_mid",
"filename_embd_weight_max",
"task_page_size_min",
"task_page_size_None",
"pages",
"pages_none",
"graphrag_true",
"graphrag_false",
"graphrag_entity_types",
"graphrag_method_general",
"graphrag_method_light",
"graphrag_community_true",
"graphrag_community_false",
"graphrag_resolution_true",
"graphrag_resolution_false",
"raptor_true",
"raptor_false",
"raptor_prompt",
"raptor_max_token_min",
"raptor_max_token_mid",
"raptor_max_token_max",
"raptor_threshold_min",
"raptor_threshold_mid",
"raptor_threshold_max",
"raptor_max_cluster_min",
"raptor_max_cluster_mid",
"raptor_max_cluster_max",
"raptor_random_seed_min",
],
)
def test_parser_config(self, client, add_dataset_func, parser_config):
dataset = add_dataset_func
dataset.update({"parser_config": parser_config})
for k, v in parser_config.items():
if isinstance(v, dict):
for kk, vv in v.items():
assert attrgetter(f"{k}.{kk}")(dataset.parser_config) == vv, str(dataset)
else:
assert attrgetter(k)(dataset.parser_config) == v, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
for k, v in parser_config.items():
if isinstance(v, dict):
for kk, vv in v.items():
assert attrgetter(f"{k}.{kk}")(retrieved_dataset.parser_config) == vv, str(retrieved_dataset)
else:
assert attrgetter(k)(retrieved_dataset.parser_config) == v, str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"parser_config, expected_message",
[
({"auto_keywords": -1}, "Input should be greater than or equal to 0"),
({"auto_keywords": 33}, "Input should be less than or equal to 32"),
({"auto_keywords": 3.14}, "Input should be a valid integer"),
({"auto_keywords": "string"}, "Input should be a valid integer"),
({"auto_questions": -1}, "Input should be greater than or equal to 0"),
({"auto_questions": 11}, "Input should be less than or equal to 10"),
({"auto_questions": 3.14}, "Input should be a valid integer"),
({"auto_questions": "string"}, "Input should be a valid integer"),
({"chunk_token_num": 0}, "Input should be greater than or equal to 1"),
({"chunk_token_num": 2049}, "Input should be less than or equal to 2048"),
({"chunk_token_num": 3.14}, "Input should be a valid integer"),
({"chunk_token_num": "string"}, "Input should be a valid integer"),
({"delimiter": ""}, "String should have at least 1 character"),
({"html4excel": "string"}, "Input should be a valid boolean"),
({"tag_kb_ids": "1,2"}, "Input should be a valid list"),
({"tag_kb_ids": [1, 2]}, "Input should be a valid string"),
({"topn_tags": 0}, "Input should be greater than or equal to 1"),
({"topn_tags": 11}, "Input should be less than or equal to 10"),
({"topn_tags": 3.14}, "Input should be a valid integer"),
({"topn_tags": "string"}, "Input should be a valid integer"),
({"filename_embd_weight": -1}, "Input should be greater than or equal to 0"),
({"filename_embd_weight": 1.1}, "Input should be less than or equal to 1"),
({"filename_embd_weight": "string"}, "Input should be a valid number"),
({"task_page_size": 0}, "Input should be greater than or equal to 1"),
({"task_page_size": 3.14}, "Input should be a valid integer"),
({"task_page_size": "string"}, "Input should be a valid integer"),
({"pages": "1,2"}, "Input should be a valid list"),
({"pages": ["1,2"]}, "Input should be a valid list"),
({"pages": [["string1", "string2"]]}, "Input should be a valid integer"),
({"graphrag": {"use_graphrag": "string"}}, "Input should be a valid boolean"),
({"graphrag": {"entity_types": "1,2"}}, "Input should be a valid list"),
({"graphrag": {"entity_types": [1, 2]}}, "nput should be a valid string"),
({"graphrag": {"method": "unknown"}}, "Input should be 'light' or 'general'"),
({"graphrag": {"method": None}}, "Input should be 'light' or 'general'"),
({"graphrag": {"community": "string"}}, "Input should be a valid boolean"),
({"graphrag": {"resolution": "string"}}, "Input should be a valid boolean"),
({"raptor": {"use_raptor": "string"}}, "Input should be a valid boolean"),
({"raptor": {"prompt": ""}}, "String should have at least 1 character"),
({"raptor": {"prompt": " "}}, "String should have at least 1 character"),
({"raptor": {"max_token": 0}}, "Input should be greater than or equal to 1"),
({"raptor": {"max_token": 2049}}, "Input should be less than or equal to 2048"),
({"raptor": {"max_token": 3.14}}, "Input should be a valid integer"),
({"raptor": {"max_token": "string"}}, "Input should be a valid integer"),
({"raptor": {"threshold": -0.1}}, "Input should be greater than or equal to 0"),
({"raptor": {"threshold": 1.1}}, "Input should be less than or equal to 1"),
({"raptor": {"threshold": "string"}}, "Input should be a valid number"),
({"raptor": {"max_cluster": 0}}, "Input should be greater than or equal to 1"),
({"raptor": {"max_cluster": 1025}}, "Input should be less than or equal to 1024"),
({"raptor": {"max_cluster": 3.14}}, "Input should be a valid integer"),
({"raptor": {"max_cluster": "string"}}, "Input should be a valid integer"),
({"raptor": {"random_seed": -1}}, "Input should be greater than or equal to 0"),
({"raptor": {"random_seed": 3.14}}, "Input should be a valid integer"),
({"raptor": {"random_seed": "string"}}, "Input should be a valid integer"),
({"delimiter": "a" * 65536}, "Parser config exceeds size limit (max 65,535 characters)"),
],
ids=[
"auto_keywords_min_limit",
"auto_keywords_max_limit",
"auto_keywords_float_not_allowed",
"auto_keywords_type_invalid",
"auto_questions_min_limit",
"auto_questions_max_limit",
"auto_questions_float_not_allowed",
"auto_questions_type_invalid",
"chunk_token_num_min_limit",
"chunk_token_num_max_limit",
"chunk_token_num_float_not_allowed",
"chunk_token_num_type_invalid",
"delimiter_empty",
"html4excel_type_invalid",
"tag_kb_ids_not_list",
"tag_kb_ids_int_in_list",
"topn_tags_min_limit",
"topn_tags_max_limit",
"topn_tags_float_not_allowed",
"topn_tags_type_invalid",
"filename_embd_weight_min_limit",
"filename_embd_weight_max_limit",
"filename_embd_weight_type_invalid",
"task_page_size_min_limit",
"task_page_size_float_not_allowed",
"task_page_size_type_invalid",
"pages_not_list",
"pages_not_list_in_list",
"pages_not_int_list",
"graphrag_type_invalid",
"graphrag_entity_types_not_list",
"graphrag_entity_types_not_str_in_list",
"graphrag_method_unknown",
"graphrag_method_none",
"graphrag_community_type_invalid",
"graphrag_resolution_type_invalid",
"raptor_type_invalid",
"raptor_prompt_empty",
"raptor_prompt_space",
"raptor_max_token_min_limit",
"raptor_max_token_max_limit",
"raptor_max_token_float_not_allowed",
"raptor_max_token_type_invalid",
"raptor_threshold_min_limit",
"raptor_threshold_max_limit",
"raptor_threshold_type_invalid",
"raptor_max_cluster_min_limit",
"raptor_max_cluster_max_limit",
"raptor_max_cluster_float_not_allowed",
"raptor_max_cluster_type_invalid",
"raptor_random_seed_min_limit",
"raptor_random_seed_float_not_allowed",
"raptor_random_seed_type_invalid",
"parser_config_type_invalid",
],
)
def test_parser_config_invalid(self, add_dataset_func, parser_config, expected_message):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"parser_config": parser_config})
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_parser_config_empty(self, client, add_dataset_func):
dataset = add_dataset_func
expected_config = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
dataset.update({"parser_config": {}})
assert str(dataset.parser_config) == str(expected_config), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert str(retrieved_dataset.parser_config) == str(expected_config), str(retrieved_dataset)
@pytest.mark.p3
def test_parser_config_none(self, client, add_dataset_func):
dataset = add_dataset_func
expected_config = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
dataset.update({"parser_config": None})
assert str(dataset.parser_config) == str(expected_config), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert str(retrieved_dataset.parser_config) == str(expected_config), str(retrieved_dataset)
@pytest.mark.p3
def test_parser_config_empty_with_chunk_method_change(self, client, add_dataset_func):
dataset = add_dataset_func
expected_config = DataSet.ParserConfig(
client,
{
"raptor": {"use_raptor": False},
"graphrag": {"use_graphrag": False},
"image_context_size": 0,
"table_context_size": 0,
},
)
dataset.update({"chunk_method": "qa", "parser_config": {}})
assert str(dataset.parser_config) == str(expected_config), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert str(retrieved_dataset.parser_config) == str(expected_config), str(retrieved_dataset)
@pytest.mark.p3
def test_parser_config_unset_with_chunk_method_change(self, client, add_dataset_func):
dataset = add_dataset_func
expected_config = DataSet.ParserConfig(
client,
{
"raptor": {"use_raptor": False},
"graphrag": {"use_graphrag": False},
"image_context_size": 0,
"table_context_size": 0,
},
)
dataset.update({"chunk_method": "qa"})
assert str(dataset.parser_config) == str(expected_config), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert str(retrieved_dataset.parser_config) == str(expected_config), str(retrieved_dataset)
@pytest.mark.p3
def test_parser_config_none_with_chunk_method_change(self, client, add_dataset_func):
dataset = add_dataset_func
expected_config = DataSet.ParserConfig(
client,
{
"raptor": {"use_raptor": False},
"graphrag": {"use_graphrag": False},
"image_context_size": 0,
"table_context_size": 0,
},
)
dataset.update({"chunk_method": "qa", "parser_config": None})
assert str(dataset.parser_config) == str(expected_config), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert str(retrieved_dataset.parser_config) == str(expected_config), str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload",
[
{"id": "id"},
{"tenant_id": "e57c1966f99211efb41e9e45646e0111"},
{"created_by": "created_by"},
{"create_date": "Tue, 11 Mar 2025 13:37:23 GMT"},
{"create_time": 1741671443322},
{"update_date": "Tue, 11 Mar 2025 13:37:23 GMT"},
{"update_time": 1741671443339},
{"document_count": 1},
{"chunk_count": 1},
{"token_num": 1},
{"status": "1"},
{"unknown_field": "unknown_field"},
],
)
def test_field_unsupported(self, add_dataset_func, payload):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update(payload)
assert "Extra inputs are not permitted" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_field_unset(self, client, add_dataset_func):
dataset = add_dataset_func
original_dataset = client.get_dataset(name=dataset.name)
dataset.update({"name": "default_unset"})
updated_dataset = client.get_dataset(name="default_unset")
assert updated_dataset.avatar == original_dataset.avatar, str(updated_dataset)
assert updated_dataset.description == original_dataset.description, str(updated_dataset)
assert updated_dataset.embedding_model == original_dataset.embedding_model, str(updated_dataset)
assert updated_dataset.permission == original_dataset.permission, str(updated_dataset)
assert updated_dataset.chunk_method == original_dataset.chunk_method, str(updated_dataset)
assert updated_dataset.pagerank == original_dataset.pagerank, str(updated_dataset)
assert str(updated_dataset.parser_config) == str(original_dataset.parser_config), str(updated_dataset)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_dataset_mangement/test_update_dataset.py",
"license": "Apache License 2.0",
"lines": 696,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/common.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
import requests
from configs import HOST_ADDRESS, VERSION
from requests_toolbelt import MultipartEncoder
from utils.file_utils import create_txt_file
HEADERS = {"Content-Type": "application/json"}
DATASETS_API_URL = f"/api/{VERSION}/datasets"
FILE_API_URL = f"/api/{VERSION}/datasets/{{dataset_id}}/documents"
FILE_CHUNK_API_URL = f"/api/{VERSION}/datasets/{{dataset_id}}/chunks"
CHUNK_API_URL = f"/api/{VERSION}/datasets/{{dataset_id}}/documents/{{document_id}}/chunks"
CHAT_ASSISTANT_API_URL = f"/api/{VERSION}/chats"
SESSION_WITH_CHAT_ASSISTANT_API_URL = f"/api/{VERSION}/chats/{{chat_id}}/sessions"
SESSION_WITH_AGENT_API_URL = f"/api/{VERSION}/agents/{{agent_id}}/sessions"
AGENT_API_URL = f"/api/{VERSION}/agents"
RETRIEVAL_API_URL = f"/api/{VERSION}/retrieval"
# DATASET MANAGEMENT
def create_dataset(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DATASETS_API_URL}", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def list_datasets(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{DATASETS_API_URL}", headers=headers, auth=auth, params=params)
return res.json()
def update_dataset(auth, dataset_id, payload=None, *, headers=HEADERS, data=None):
res = requests.put(url=f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def delete_datasets(auth, payload=None, *, headers=HEADERS, data=None):
"""
Delete datasets.
The endpoint is DELETE /api/{VERSION}/datasets with payload {"ids": [...]}
This is the standard SDK REST API endpoint for dataset deletion.
"""
res = requests.delete(url=f"{HOST_ADDRESS}{DATASETS_API_URL}", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def batch_create_datasets(auth, num):
ids = []
for i in range(num):
res = create_dataset(auth, {"name": f"dataset_{i}"})
ids.append(res["data"]["id"])
return ids
# FILE MANAGEMENT WITHIN DATASET
def upload_documents(auth, dataset_id, files_path=None):
url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
if files_path is None:
files_path = []
fields = []
file_objects = []
try:
for fp in files_path:
p = Path(fp)
f = p.open("rb")
fields.append(("file", (p.name, f)))
file_objects.append(f)
m = MultipartEncoder(fields=fields)
res = requests.post(
url=url,
headers={"Content-Type": m.content_type},
auth=auth,
data=m,
)
return res.json()
finally:
for f in file_objects:
f.close()
def download_document(auth, dataset_id, document_id, save_path):
url = f"{HOST_ADDRESS}{FILE_API_URL}/{document_id}".format(dataset_id=dataset_id)
res = requests.get(url=url, auth=auth, stream=True)
try:
if res.status_code == 200:
with open(save_path, "wb") as f:
for chunk in res.iter_content(chunk_size=8192):
f.write(chunk)
finally:
res.close()
return res
def list_documents(auth, dataset_id, params=None):
url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
res = requests.get(url=url, headers=HEADERS, auth=auth, params=params)
return res.json()
def update_document(auth, dataset_id, document_id, payload=None):
url = f"{HOST_ADDRESS}{FILE_API_URL}/{document_id}".format(dataset_id=dataset_id)
res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def delete_documents(auth, dataset_id, payload=None):
url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
res = requests.delete(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def parse_documents(auth, dataset_id, payload=None):
url = f"{HOST_ADDRESS}{FILE_CHUNK_API_URL}".format(dataset_id=dataset_id)
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def stop_parse_documents(auth, dataset_id, payload=None):
url = f"{HOST_ADDRESS}{FILE_CHUNK_API_URL}".format(dataset_id=dataset_id)
res = requests.delete(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def bulk_upload_documents(auth, dataset_id, num, tmp_path):
fps = []
for i in range(num):
fp = create_txt_file(tmp_path / f"ragflow_test_upload_{i}.txt")
fps.append(fp)
res = upload_documents(auth, dataset_id, fps)
document_ids = []
for document in res["data"]:
document_ids.append(document["id"])
return document_ids
# CHUNK MANAGEMENT WITHIN DATASET
def add_chunk(auth, dataset_id, document_id, payload=None):
url = f"{HOST_ADDRESS}{CHUNK_API_URL}".format(dataset_id=dataset_id, document_id=document_id)
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def list_chunks(auth, dataset_id, document_id, params=None):
url = f"{HOST_ADDRESS}{CHUNK_API_URL}".format(dataset_id=dataset_id, document_id=document_id)
res = requests.get(url=url, headers=HEADERS, auth=auth, params=params)
return res.json()
def update_chunk(auth, dataset_id, document_id, chunk_id, payload=None):
url = f"{HOST_ADDRESS}{CHUNK_API_URL}/{chunk_id}".format(dataset_id=dataset_id, document_id=document_id)
res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def delete_chunks(auth, dataset_id, document_id, payload=None):
url = f"{HOST_ADDRESS}{CHUNK_API_URL}".format(dataset_id=dataset_id, document_id=document_id)
res = requests.delete(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def retrieval_chunks(auth, payload=None):
url = f"{HOST_ADDRESS}{RETRIEVAL_API_URL}"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def batch_add_chunks(auth, dataset_id, document_id, num):
chunk_ids = []
for i in range(num):
res = add_chunk(auth, dataset_id, document_id, {"content": f"chunk test {i}"})
chunk_ids.append(res["data"]["chunk"]["id"])
return chunk_ids
# CHAT ASSISTANT MANAGEMENT
def create_chat_assistant(auth, payload=None):
url = f"{HOST_ADDRESS}{CHAT_ASSISTANT_API_URL}"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def list_chat_assistants(auth, params=None):
url = f"{HOST_ADDRESS}{CHAT_ASSISTANT_API_URL}"
res = requests.get(url=url, headers=HEADERS, auth=auth, params=params)
return res.json()
def update_chat_assistant(auth, chat_assistant_id, payload=None):
url = f"{HOST_ADDRESS}{CHAT_ASSISTANT_API_URL}/{chat_assistant_id}"
res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def delete_chat_assistants(auth, payload=None):
url = f"{HOST_ADDRESS}{CHAT_ASSISTANT_API_URL}"
res = requests.delete(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def batch_create_chat_assistants(auth, num):
chat_assistant_ids = []
for i in range(num):
res = create_chat_assistant(auth, {"name": f"test_chat_assistant_{i}", "dataset_ids": []})
chat_assistant_ids.append(res["data"]["id"])
return chat_assistant_ids
# SESSION MANAGEMENT
def create_session_with_chat_assistant(auth, chat_assistant_id, payload=None):
url = f"{HOST_ADDRESS}{SESSION_WITH_CHAT_ASSISTANT_API_URL}".format(chat_id=chat_assistant_id)
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def list_session_with_chat_assistants(auth, chat_assistant_id, params=None):
url = f"{HOST_ADDRESS}{SESSION_WITH_CHAT_ASSISTANT_API_URL}".format(chat_id=chat_assistant_id)
res = requests.get(url=url, headers=HEADERS, auth=auth, params=params)
return res.json()
def update_session_with_chat_assistant(auth, chat_assistant_id, session_id, payload=None):
url = f"{HOST_ADDRESS}{SESSION_WITH_CHAT_ASSISTANT_API_URL}/{session_id}".format(chat_id=chat_assistant_id)
res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def delete_session_with_chat_assistants(auth, chat_assistant_id, payload=None):
url = f"{HOST_ADDRESS}{SESSION_WITH_CHAT_ASSISTANT_API_URL}".format(chat_id=chat_assistant_id)
if payload is None:
payload = {}
res = requests.delete(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def batch_add_sessions_with_chat_assistant(auth, chat_assistant_id, num):
session_ids = []
for i in range(num):
res = create_session_with_chat_assistant(auth, chat_assistant_id, {"name": f"session_with_chat_assistant_{i}"})
session_ids.append(res["data"]["id"])
return session_ids
# DATASET GRAPH AND TASKS
def knowledge_graph(auth, dataset_id, params=None):
url = f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}/knowledge_graph"
res = requests.get(url=url, headers=HEADERS, auth=auth, params=params)
return res.json()
def delete_knowledge_graph(auth, dataset_id, payload=None):
url = f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}/knowledge_graph"
if payload is None:
res = requests.delete(url=url, headers=HEADERS, auth=auth)
else:
res = requests.delete(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def run_graphrag(auth, dataset_id, payload=None):
url = f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}/run_graphrag"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def trace_graphrag(auth, dataset_id, params=None):
url = f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}/trace_graphrag"
res = requests.get(url=url, headers=HEADERS, auth=auth, params=params)
return res.json()
def run_raptor(auth, dataset_id, payload=None):
url = f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}/run_raptor"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def trace_raptor(auth, dataset_id, params=None):
url = f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}/trace_raptor"
res = requests.get(url=url, headers=HEADERS, auth=auth, params=params)
return res.json()
def metadata_summary(auth, dataset_id, params=None):
url = f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}/metadata/summary"
res = requests.get(url=url, headers=HEADERS, auth=auth, params=params)
return res.json()
def metadata_batch_update(auth, dataset_id, payload=None):
url = f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}/metadata/update"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
# CHAT COMPLETIONS AND RELATED QUESTIONS
def related_questions(auth, payload=None):
url = f"{HOST_ADDRESS}/api/{VERSION}/sessions/related_questions"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
# AGENT MANAGEMENT AND SESSIONS
def create_agent(auth, payload=None):
url = f"{HOST_ADDRESS}{AGENT_API_URL}"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def list_agents(auth, params=None):
url = f"{HOST_ADDRESS}{AGENT_API_URL}"
res = requests.get(url=url, headers=HEADERS, auth=auth, params=params)
return res.json()
def delete_agent(auth, agent_id):
url = f"{HOST_ADDRESS}{AGENT_API_URL}/{agent_id}"
res = requests.delete(url=url, headers=HEADERS, auth=auth)
return res.json()
def create_agent_session(auth, agent_id, payload=None, params=None):
url = f"{HOST_ADDRESS}{SESSION_WITH_AGENT_API_URL}".format(agent_id=agent_id)
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload, params=params)
return res.json()
def list_agent_sessions(auth, agent_id, params=None):
url = f"{HOST_ADDRESS}{SESSION_WITH_AGENT_API_URL}".format(agent_id=agent_id)
res = requests.get(url=url, headers=HEADERS, auth=auth, params=params)
return res.json()
def delete_agent_sessions(auth, agent_id, payload=None):
url = f"{HOST_ADDRESS}{SESSION_WITH_AGENT_API_URL}".format(agent_id=agent_id)
if payload is None:
payload = {}
res = requests.delete(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def agent_completions(auth, agent_id, payload=None):
url = f"{HOST_ADDRESS}{AGENT_API_URL}/{agent_id}/completions"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def chat_completions(auth, chat_id, payload=None):
"""
Send a question/message to a chat assistant and get completion.
Args:
auth: Authentication object
chat_id: Chat assistant ID
payload: Dictionary containing:
- question: str (required) - The question to ask
- stream: bool (optional) - Whether to stream responses, default False
- session_id: str (optional) - Session ID for conversation context
Returns:
Response JSON with answer data
"""
url = f"{HOST_ADDRESS}/api/{VERSION}/chats/{chat_id}/completions"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def chat_completions_openai(auth, chat_id, payload=None):
"""
Send a request to the OpenAI-compatible chat completions endpoint.
Args:
auth: Authentication object
chat_id: Chat assistant ID
payload: Dictionary in OpenAI chat completions format containing:
- messages: list (required) - List of message objects with 'role' and 'content'
- stream: bool (optional) - Whether to stream responses, default False
Returns:
Response JSON in OpenAI chat completions format with usage information
"""
url = f"{HOST_ADDRESS}/api/{VERSION}/chats_openai/{chat_id}/chat/completions"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/common.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chat_assistant_management/test_create_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import create_chat_assistant
from configs import CHAT_ASSISTANT_NAME_LIMIT, INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
from utils import encode_avatar
from utils.file_utils import create_image_file
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = create_chat_assistant(invalid_auth)
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.usefixtures("clear_chat_assistants")
class TestChatAssistantCreate:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"name": "valid_name"}, 0, ""),
pytest.param({"name": "a" * (CHAT_ASSISTANT_NAME_LIMIT + 1)}, 102, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": 1}, 100, "", marks=pytest.mark.skip(reason="issues/")),
({"name": ""}, 102, "`name` is required."),
({"name": "duplicated_name"}, 102, "Duplicated chat name in creating chat."),
({"name": "case insensitive"}, 102, "Duplicated chat name in creating chat."),
],
)
def test_name(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
payload["dataset_ids"] = [] # issues/
if payload["name"] == "duplicated_name":
create_chat_assistant(HttpApiAuth, payload)
elif payload["name"] == "case insensitive":
create_chat_assistant(HttpApiAuth, {"name": payload["name"].upper()})
res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert res["data"]["name"] == payload["name"]
else:
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"dataset_ids, expected_code, expected_message",
[
([], 0, ""),
(lambda r: [r], 0, ""),
(["invalid_dataset_id"], 102, "You don't own the dataset invalid_dataset_id"),
("invalid_dataset_id", 102, "You don't own the dataset i"),
],
)
def test_dataset_ids(self, HttpApiAuth, add_chunks, dataset_ids, expected_code, expected_message):
dataset_id, _, _ = add_chunks
payload = {"name": "ragflow test"}
if callable(dataset_ids):
payload["dataset_ids"] = dataset_ids(dataset_id)
else:
payload["dataset_ids"] = dataset_ids
res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert res["data"]["name"] == payload["name"]
else:
assert res["message"] == expected_message
@pytest.mark.p3
def test_avatar(self, HttpApiAuth, tmp_path):
fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": []}
res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == 0
@pytest.mark.p2
@pytest.mark.parametrize(
"llm, expected_code, expected_message",
[
({}, 0, ""),
({"model_name": "glm-4"}, 0, ""),
({"model_name": "unknown"}, 102, "`model_name` unknown doesn't exist"),
({"temperature": 0}, 0, ""),
({"temperature": 1}, 0, ""),
pytest.param({"temperature": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"temperature": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"temperature": "a"}, 0, "", marks=pytest.mark.skip),
({"top_p": 0}, 0, ""),
({"top_p": 1}, 0, ""),
pytest.param({"top_p": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"top_p": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"top_p": "a"}, 0, "", marks=pytest.mark.skip),
({"presence_penalty": 0}, 0, ""),
({"presence_penalty": 1}, 0, ""),
pytest.param({"presence_penalty": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": "a"}, 0, "", marks=pytest.mark.skip),
({"frequency_penalty": 0}, 0, ""),
({"frequency_penalty": 1}, 0, ""),
pytest.param({"frequency_penalty": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": "a"}, 0, "", marks=pytest.mark.skip),
({"max_token": 0}, 0, ""),
({"max_token": 1024}, 0, ""),
pytest.param({"max_token": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"max_token": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"max_token": "a"}, 0, "", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip),
],
)
def test_llm(self, HttpApiAuth, add_chunks, llm, expected_code, expected_message):
dataset_id, _, _ = add_chunks
payload = {"name": "llm_test", "dataset_ids": [dataset_id], "llm": llm}
res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == expected_code
if expected_code == 0:
if llm:
for k, v in llm.items():
assert res["data"]["llm"][k] == v
else:
assert res["data"]["llm"]["model_name"] == "glm-4-flash@ZHIPU-AI"
assert res["data"]["llm"]["temperature"] == 0.1
assert res["data"]["llm"]["top_p"] == 0.3
assert res["data"]["llm"]["presence_penalty"] == 0.4
assert res["data"]["llm"]["frequency_penalty"] == 0.7
assert res["data"]["llm"]["max_tokens"] == 512
else:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"prompt, expected_code, expected_message",
[
({}, 0, ""),
({"similarity_threshold": 0}, 0, ""),
({"similarity_threshold": 1}, 0, ""),
pytest.param({"similarity_threshold": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": "a"}, 0, "", marks=pytest.mark.skip),
({"keywords_similarity_weight": 0}, 0, ""),
({"keywords_similarity_weight": 1}, 0, ""),
pytest.param({"keywords_similarity_weight": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": "a"}, 0, "", marks=pytest.mark.skip),
({"variables": []}, 0, ""),
({"top_n": 0}, 0, ""),
({"top_n": 1}, 0, ""),
pytest.param({"top_n": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"top_n": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"top_n": "a"}, 0, "", marks=pytest.mark.skip),
({"empty_response": "Hello World"}, 0, ""),
({"empty_response": ""}, 0, ""),
({"empty_response": "!@#$%^&*()"}, 0, ""),
({"empty_response": "中文测试"}, 0, ""),
pytest.param({"empty_response": 123}, 0, "", marks=pytest.mark.skip),
pytest.param({"empty_response": True}, 0, "", marks=pytest.mark.skip),
pytest.param({"empty_response": " "}, 0, "", marks=pytest.mark.skip),
({"opener": "Hello World"}, 0, ""),
({"opener": ""}, 0, ""),
({"opener": "!@#$%^&*()"}, 0, ""),
({"opener": "中文测试"}, 0, ""),
pytest.param({"opener": 123}, 0, "", marks=pytest.mark.skip),
pytest.param({"opener": True}, 0, "", marks=pytest.mark.skip),
pytest.param({"opener": " "}, 0, "", marks=pytest.mark.skip),
({"show_quote": True}, 0, ""),
({"show_quote": False}, 0, ""),
({"prompt": "Hello World {knowledge}"}, 0, ""),
({"prompt": "{knowledge}"}, 0, ""),
({"prompt": "!@#$%^&*() {knowledge}"}, 0, ""),
({"prompt": "中文测试 {knowledge}"}, 0, ""),
({"prompt": "Hello World"}, 102, "Parameter 'knowledge' is not used"),
({"prompt": "Hello World", "variables": []}, 0, ""),
pytest.param({"prompt": 123}, 100, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"prompt": True}, 100, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip),
],
)
def test_prompt(self, HttpApiAuth, add_chunks, prompt, expected_code, expected_message):
dataset_id, _, _ = add_chunks
payload = {"name": "prompt_test", "dataset_ids": [dataset_id], "prompt": prompt}
res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == expected_code
if expected_code == 0:
if prompt:
for k, v in prompt.items():
if k == "keywords_similarity_weight":
assert res["data"]["prompt"][k] == 1 - v
else:
assert res["data"]["prompt"][k] == v
else:
assert res["data"]["prompt"]["similarity_threshold"] == 0.2
assert res["data"]["prompt"]["keywords_similarity_weight"] == 0.7
assert res["data"]["prompt"]["top_n"] == 6
assert res["data"]["prompt"]["variables"] == [{"key": "knowledge", "optional": False}]
assert res["data"]["prompt"]["rerank_model"] == ""
assert res["data"]["prompt"]["empty_response"] == "Sorry! No relevant content was found in the knowledge base!"
assert res["data"]["prompt"]["opener"] == "Hi! I'm your assistant. What can I do for you?"
assert res["data"]["prompt"]["show_quote"] is True
assert (
res["data"]["prompt"]["prompt"]
== 'You are an intelligent assistant. Please summarize the content of the dataset to answer the question. Please list the data in the dataset and answer in detail. When all dataset content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the dataset!" Answers need to consider chat history.\n Here is the knowledge base:\n {knowledge}\n The above is the knowledge base.'
)
else:
assert res["message"] == expected_message
@pytest.mark.p2
def test_create_additional_guards_p2(self, HttpApiAuth):
tenant_payload = {"name": "guard-tenant-id", "dataset_ids": [], "tenant_id": "tenant-should-not-pass"}
res = create_chat_assistant(HttpApiAuth, tenant_payload)
assert res["code"] == 102
assert res["message"] == "`tenant_id` must not be provided."
rerank_payload = {
"name": "guard-rerank-id",
"dataset_ids": [],
"prompt": {"rerank_model": "unknown-rerank-model"},
}
res = create_chat_assistant(HttpApiAuth, rerank_payload)
assert res["code"] == 102
assert "`rerank_model` unknown-rerank-model doesn't exist" in res["message"]
class TestChatAssistantCreate2:
@pytest.mark.p2
def test_unparsed_document(self, HttpApiAuth, add_document):
dataset_id, _ = add_document
payload = {"name": "prompt_test", "dataset_ids": [dataset_id]}
res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == 102
assert "doesn't own parsed file" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chat_assistant_management/test_create_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chat_assistant_management/test_delete_chat_assistants.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_create_chat_assistants, delete_chat_assistants, list_chat_assistants
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = delete_chat_assistants(invalid_auth)
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestChatAssistantsDelete:
@pytest.mark.parametrize(
"payload, expected_code, expected_message, remaining",
[
pytest.param(None, 0, "", 0, marks=pytest.mark.p3),
pytest.param({"ids": []}, 0, "", 0, marks=pytest.mark.p3),
pytest.param({"ids": ["invalid_id"]}, 102, "Assistant(invalid_id) not found.", 5, marks=pytest.mark.p3),
pytest.param({"ids": ["\n!?。;!?\"'"]}, 102, """Assistant(\n!?。;!?"\') not found.""", 5, marks=pytest.mark.p3),
pytest.param("not json", 100, "AttributeError(\"'str' object has no attribute 'get'\")", 5, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1]}, 0, "", 4, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r}, 0, "", 0, marks=pytest.mark.p1),
],
)
def test_basic_scenarios(self, HttpApiAuth, add_chat_assistants_func, payload, expected_code, expected_message, remaining):
_, _, chat_assistant_ids = add_chat_assistants_func
if callable(payload):
payload = payload(chat_assistant_ids)
res = delete_chat_assistants(HttpApiAuth, payload)
assert res["code"] == expected_code
if res["code"] != 0:
assert res["message"] == expected_message
res = list_chat_assistants(HttpApiAuth)
assert len(res["data"]) == remaining
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:5]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, HttpApiAuth, add_chat_assistants_func, payload):
_, _, chat_assistant_ids = add_chat_assistants_func
if callable(payload):
payload = payload(chat_assistant_ids)
res = delete_chat_assistants(HttpApiAuth, payload)
assert res["code"] == 0
assert res["data"]["errors"][0] == "Assistant(invalid_id) not found."
assert res["data"]["success_count"] == 5
res = list_chat_assistants(HttpApiAuth)
assert len(res["data"]) == 0
@pytest.mark.p3
def test_repeated_deletion(self, HttpApiAuth, add_chat_assistants_func):
_, _, chat_assistant_ids = add_chat_assistants_func
res = delete_chat_assistants(HttpApiAuth, {"ids": chat_assistant_ids})
assert res["code"] == 0
res = delete_chat_assistants(HttpApiAuth, {"ids": chat_assistant_ids})
assert res["code"] == 102
assert "not found" in res["message"]
@pytest.mark.p3
def test_duplicate_deletion(self, HttpApiAuth, add_chat_assistants_func):
_, _, chat_assistant_ids = add_chat_assistants_func
res = delete_chat_assistants(HttpApiAuth, {"ids": chat_assistant_ids + chat_assistant_ids})
assert res["code"] == 0
assert "Duplicate assistant ids" in res["data"]["errors"][0]
assert res["data"]["success_count"] == 5
res = list_chat_assistants(HttpApiAuth)
assert res["code"] == 0
@pytest.mark.p3
def test_concurrent_deletion(self, HttpApiAuth):
count = 100
ids = batch_create_chat_assistants(HttpApiAuth, count)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(delete_chat_assistants, HttpApiAuth, {"ids": ids[i : i + 1]}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_delete_10k(self, HttpApiAuth):
ids = batch_create_chat_assistants(HttpApiAuth, 1_000)
res = delete_chat_assistants(HttpApiAuth, {"ids": ids})
assert res["code"] == 0
res = list_chat_assistants(HttpApiAuth)
assert len(res["data"]) == 0
@pytest.mark.p2
def test_delete_all_errors_no_success_p2(self, HttpApiAuth, add_chat_assistants_func):
delete_payload = {"ids": ["missing-1", "missing-2"]}
res = delete_chat_assistants(HttpApiAuth, delete_payload)
assert res["code"] == 102
assert "Assistant(missing-1) not found." in res["message"]
assert "Assistant(missing-2) not found." in res["message"]
@pytest.mark.p2
def test_delete_duplicate_partial_success_p2(self, HttpApiAuth, add_chat_assistants_func):
_, _, chat_assistant_ids = add_chat_assistants_func
payload = {"ids": [chat_assistant_ids[0], chat_assistant_ids[0]]}
res = delete_chat_assistants(HttpApiAuth, payload)
assert res["code"] == 0
assert res["data"]["success_count"] == 1
assert "Duplicate assistant ids" in res["data"]["errors"][0]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chat_assistant_management/test_delete_chat_assistants.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chat_assistant_management/test_list_chat_assistants.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import delete_datasets, list_chat_assistants
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
from utils import is_sorted
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = list_chat_assistants(invalid_auth)
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.usefixtures("add_chat_assistants")
class TestChatAssistantsList:
@pytest.mark.p1
def test_default(self, HttpApiAuth):
res = list_chat_assistants(HttpApiAuth)
assert res["code"] == 0
assert len(res["data"]) == 5
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 0, 2, ""),
({"page": 0, "page_size": 2}, 0, 2, ""),
({"page": 2, "page_size": 2}, 0, 2, ""),
({"page": 3, "page_size": 2}, 0, 1, ""),
({"page": "3", "page_size": 2}, 0, 1, ""),
pytest.param(
{"page": -1, "page_size": 2},
100,
0,
"1064",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page": "a", "page_size": 2},
100,
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page(self, HttpApiAuth, params, expected_code, expected_page_size, expected_message):
res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"page_size": None}, 0, 5, ""),
({"page_size": 0}, 0, 0, ""),
({"page_size": 1}, 0, 1, ""),
({"page_size": 6}, 0, 5, ""),
({"page_size": "1"}, 0, 1, ""),
pytest.param(
{"page_size": -1},
100,
0,
"1064",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page_size": "a"},
100,
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page_size(
self,
HttpApiAuth,
params,
expected_code,
expected_page_size,
expected_message,
):
res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_code, assertions, expected_message",
[
({"orderby": None}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"orderby": "create_time"}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"orderby": "update_time"}, 0, lambda r: (is_sorted(r["data"], "update_time", True)), ""),
pytest.param(
{"orderby": "name", "desc": "False"},
0,
lambda r: (is_sorted(r["data"], "name", False)),
"",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"orderby": "unknown"},
102,
0,
"orderby should be create_time or update_time",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_orderby(
self,
HttpApiAuth,
params,
expected_code,
assertions,
expected_message,
):
res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if callable(assertions):
assert assertions(res)
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_code, assertions, expected_message",
[
({"desc": None}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"desc": "true"}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"desc": "True"}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"desc": True}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"desc": "false"}, 0, lambda r: (is_sorted(r["data"], "create_time", False)), ""),
({"desc": "False"}, 0, lambda r: (is_sorted(r["data"], "create_time", False)), ""),
({"desc": False}, 0, lambda r: (is_sorted(r["data"], "create_time", False)), ""),
({"desc": "False", "orderby": "update_time"}, 0, lambda r: (is_sorted(r["data"], "update_time", False)), ""),
pytest.param(
{"desc": "unknown"},
102,
0,
"desc should be true or false",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_desc(
self,
HttpApiAuth,
params,
expected_code,
assertions,
expected_message,
):
res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if callable(assertions):
assert assertions(res)
else:
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_num, expected_message",
[
({"name": None}, 0, 5, ""),
({"name": ""}, 0, 5, ""),
({"name": "test_chat_assistant_1"}, 0, 1, ""),
({"name": "unknown"}, 102, 0, "The chat doesn't exist"),
],
)
def test_name(self, HttpApiAuth, params, expected_code, expected_num, expected_message):
res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if params["name"] in [None, ""]:
assert len(res["data"]) == expected_num
else:
assert res["data"][0]["name"] == params["name"]
else:
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"chat_assistant_id, expected_code, expected_num, expected_message",
[
(None, 0, 5, ""),
("", 0, 5, ""),
(lambda r: r[0], 0, 1, ""),
("unknown", 102, 0, "The chat doesn't exist"),
],
)
def test_id(
self,
HttpApiAuth,
add_chat_assistants,
chat_assistant_id,
expected_code,
expected_num,
expected_message,
):
_, _, chat_assistant_ids = add_chat_assistants
if callable(chat_assistant_id):
params = {"id": chat_assistant_id(chat_assistant_ids)}
else:
params = {"id": chat_assistant_id}
res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if params["id"] in [None, ""]:
assert len(res["data"]) == expected_num
else:
assert res["data"][0]["id"] == params["id"]
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"chat_assistant_id, name, expected_code, expected_num, expected_message",
[
(lambda r: r[0], "test_chat_assistant_0", 0, 1, ""),
(lambda r: r[0], "test_chat_assistant_1", 102, 0, "The chat doesn't exist"),
(lambda r: r[0], "unknown", 102, 0, "The chat doesn't exist"),
("id", "chat_assistant_0", 102, 0, "The chat doesn't exist"),
],
)
def test_name_and_id(
self,
HttpApiAuth,
add_chat_assistants,
chat_assistant_id,
name,
expected_code,
expected_num,
expected_message,
):
_, _, chat_assistant_ids = add_chat_assistants
if callable(chat_assistant_id):
params = {"id": chat_assistant_id(chat_assistant_ids), "name": name}
else:
params = {"id": chat_assistant_id, "name": name}
res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]) == expected_num
else:
assert res["message"] == expected_message
@pytest.mark.p3
def test_concurrent_list(self, HttpApiAuth):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_chat_assistants, HttpApiAuth) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_invalid_params(self, HttpApiAuth):
params = {"a": "b"}
res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == 0
assert len(res["data"]) == 5
@pytest.mark.p2
def test_list_chats_after_deleting_associated_dataset(self, HttpApiAuth, add_chat_assistants):
dataset_id, _, _ = add_chat_assistants
res = delete_datasets(HttpApiAuth, {"ids": [dataset_id]})
assert res["code"] == 0
res = list_chat_assistants(HttpApiAuth)
assert res["code"] == 0
assert len(res["data"]) == 5
@pytest.mark.p2
def test_desc_false_parse_branch_p2(self, HttpApiAuth):
res = list_chat_assistants(HttpApiAuth, params={"desc": "False", "orderby": "create_time"})
assert res["code"] == 0
assert is_sorted(res["data"], "create_time", False)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chat_assistant_management/test_list_chat_assistants.py",
"license": "Apache License 2.0",
"lines": 301,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chat_assistant_management/test_update_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import create_chat_assistant, list_chat_assistants, update_chat_assistant
from configs import CHAT_ASSISTANT_NAME_LIMIT, INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
from utils import encode_avatar
from utils.file_utils import create_image_file
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = update_chat_assistant(invalid_auth, "chat_assistant_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestChatAssistantUpdate:
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
pytest.param({"name": "valid_name"}, 0, "", marks=pytest.mark.p1),
pytest.param({"name": "a" * (CHAT_ASSISTANT_NAME_LIMIT + 1)}, 102, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": 1}, 100, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": ""}, 102, "`name` cannot be empty.", marks=pytest.mark.p3),
pytest.param({"name": "test_chat_assistant_1"}, 102, "Duplicated chat name in updating chat.", marks=pytest.mark.p3),
pytest.param({"name": "TEST_CHAT_ASSISTANT_1"}, 102, "Duplicated chat name in updating chat.", marks=pytest.mark.p3),
],
)
def test_name(self, HttpApiAuth, add_chat_assistants_func, payload, expected_code, expected_message):
_, _, chat_assistant_ids = add_chat_assistants_func
res = update_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == expected_code, res
if expected_code == 0:
res = list_chat_assistants(HttpApiAuth, {"id": chat_assistant_ids[0]})
assert res["data"][0]["name"] == payload.get("name")
else:
assert res["message"] == expected_message
@pytest.mark.parametrize(
"dataset_ids, expected_code, expected_message",
[
pytest.param([], 0, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param(lambda r: [r], 0, "", marks=pytest.mark.p1),
pytest.param(["invalid_dataset_id"], 102, "You don't own the dataset invalid_dataset_id", marks=pytest.mark.p3),
pytest.param("invalid_dataset_id", 102, "You don't own the dataset i", marks=pytest.mark.p3),
],
)
def test_dataset_ids(self, HttpApiAuth, add_chat_assistants_func, dataset_ids, expected_code, expected_message):
dataset_id, _, chat_assistant_ids = add_chat_assistants_func
payload = {"name": "ragflow test"}
if callable(dataset_ids):
payload["dataset_ids"] = dataset_ids(dataset_id)
else:
payload["dataset_ids"] = dataset_ids
res = update_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == expected_code, res
if expected_code == 0:
res = list_chat_assistants(HttpApiAuth, {"id": chat_assistant_ids[0]})
assert res["data"][0]["name"] == payload.get("name")
else:
assert res["message"] == expected_message
@pytest.mark.p3
def test_avatar(self, HttpApiAuth, add_chat_assistants_func, tmp_path):
dataset_id, _, chat_assistant_ids = add_chat_assistants_func
fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": [dataset_id]}
res = update_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == 0
@pytest.mark.p3
@pytest.mark.parametrize(
"llm, expected_code, expected_message",
[
({}, 0, ""),
({"model_name": "glm-4"}, 0, ""),
({"model_name": "unknown"}, 102, "`model_name` unknown doesn't exist"),
({"temperature": 0}, 0, ""),
({"temperature": 1}, 0, ""),
pytest.param({"temperature": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"temperature": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"temperature": "a"}, 0, "", marks=pytest.mark.skip),
({"top_p": 0}, 0, ""),
({"top_p": 1}, 0, ""),
pytest.param({"top_p": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"top_p": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"top_p": "a"}, 0, "", marks=pytest.mark.skip),
({"presence_penalty": 0}, 0, ""),
({"presence_penalty": 1}, 0, ""),
pytest.param({"presence_penalty": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": "a"}, 0, "", marks=pytest.mark.skip),
({"frequency_penalty": 0}, 0, ""),
({"frequency_penalty": 1}, 0, ""),
pytest.param({"frequency_penalty": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": "a"}, 0, "", marks=pytest.mark.skip),
({"max_token": 0}, 0, ""),
({"max_token": 1024}, 0, ""),
pytest.param({"max_token": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"max_token": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"max_token": "a"}, 0, "", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip),
],
)
def test_llm(self, HttpApiAuth, add_chat_assistants_func, chat_assistant_llm_model_type, llm, expected_code, expected_message):
dataset_id, _, chat_assistant_ids = add_chat_assistants_func
llm_payload = dict(llm)
llm_payload.setdefault("model_type", chat_assistant_llm_model_type)
payload = {"name": "llm_test", "dataset_ids": [dataset_id], "llm": llm_payload}
res = update_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == expected_code
if expected_code == 0:
res = list_chat_assistants(HttpApiAuth, {"id": chat_assistant_ids[0]})
if llm:
for k, v in llm.items():
assert res["data"][0]["llm"][k] == v
else:
assert res["data"][0]["llm"]["model_name"] == "glm-4-flash@ZHIPU-AI"
assert res["data"][0]["llm"]["temperature"] == 0.1
assert res["data"][0]["llm"]["top_p"] == 0.3
assert res["data"][0]["llm"]["presence_penalty"] == 0.4
assert res["data"][0]["llm"]["frequency_penalty"] == 0.7
assert res["data"][0]["llm"]["max_tokens"] == 512
else:
assert expected_message in res["message"]
@pytest.mark.p3
@pytest.mark.parametrize(
"prompt, expected_code, expected_message",
[
({}, 100, "ValueError"),
({"similarity_threshold": 0}, 0, ""),
({"similarity_threshold": 1}, 0, ""),
pytest.param({"similarity_threshold": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": "a"}, 0, "", marks=pytest.mark.skip),
({"keywords_similarity_weight": 0}, 0, ""),
({"keywords_similarity_weight": 1}, 0, ""),
pytest.param({"keywords_similarity_weight": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": "a"}, 0, "", marks=pytest.mark.skip),
({"variables": []}, 0, ""),
({"top_n": 0}, 0, ""),
({"top_n": 1}, 0, ""),
pytest.param({"top_n": -1}, 0, "", marks=pytest.mark.skip),
pytest.param({"top_n": 10}, 0, "", marks=pytest.mark.skip),
pytest.param({"top_n": "a"}, 0, "", marks=pytest.mark.skip),
({"empty_response": "Hello World"}, 0, ""),
({"empty_response": ""}, 0, ""),
({"empty_response": "!@#$%^&*()"}, 0, ""),
({"empty_response": "中文测试"}, 0, ""),
pytest.param({"empty_response": 123}, 0, "", marks=pytest.mark.skip),
pytest.param({"empty_response": True}, 0, "", marks=pytest.mark.skip),
pytest.param({"empty_response": " "}, 0, "", marks=pytest.mark.skip),
({"opener": "Hello World"}, 0, ""),
({"opener": ""}, 0, ""),
({"opener": "!@#$%^&*()"}, 0, ""),
({"opener": "中文测试"}, 0, ""),
pytest.param({"opener": 123}, 0, "", marks=pytest.mark.skip),
pytest.param({"opener": True}, 0, "", marks=pytest.mark.skip),
pytest.param({"opener": " "}, 0, "", marks=pytest.mark.skip),
({"show_quote": True}, 0, ""),
({"show_quote": False}, 0, ""),
({"prompt": "Hello World {knowledge}"}, 0, ""),
({"prompt": "{knowledge}"}, 0, ""),
({"prompt": "!@#$%^&*() {knowledge}"}, 0, ""),
({"prompt": "中文测试 {knowledge}"}, 0, ""),
({"prompt": "Hello World"}, 102, "Parameter 'knowledge' is not used"),
({"prompt": "Hello World", "variables": []}, 0, ""),
pytest.param({"prompt": 123}, 100, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"prompt": True}, 100, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip),
],
)
def test_prompt(self, HttpApiAuth, add_chat_assistants_func, prompt, expected_code, expected_message):
dataset_id, _, chat_assistant_ids = add_chat_assistants_func
payload = {"name": "prompt_test", "dataset_ids": [dataset_id], "prompt": prompt}
res = update_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == expected_code
if expected_code == 0:
res = list_chat_assistants(HttpApiAuth, {"id": chat_assistant_ids[0]})
if prompt:
for k, v in prompt.items():
if k == "keywords_similarity_weight":
assert res["data"][0]["prompt"][k] == 1 - v
else:
assert res["data"][0]["prompt"][k] == v
else:
assert res["data"]["prompt"][0]["similarity_threshold"] == 0.2
assert res["data"]["prompt"][0]["keywords_similarity_weight"] == 0.7
assert res["data"]["prompt"][0]["top_n"] == 6
assert res["data"]["prompt"][0]["variables"] == [{"key": "knowledge", "optional": False}]
assert res["data"]["prompt"][0]["rerank_model"] == ""
assert res["data"]["prompt"][0]["empty_response"] == "Sorry! No relevant content was found in the knowledge base!"
assert res["data"]["prompt"][0]["opener"] == "Hi! I'm your assistant. What can I do for you?"
assert res["data"]["prompt"][0]["show_quote"] is True
assert (
res["data"]["prompt"][0]["prompt"]
== 'You are an intelligent assistant. Please summarize the content of the dataset to answer the question. Please list the data in the dataset and answer in detail. When all dataset content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the dataset!" Answers need to consider chat history.\n Here is the knowledge base:\n {knowledge}\n The above is the knowledge base.'
)
else:
assert expected_message in res["message"]
@pytest.mark.p2
def test_update_mapping_and_validation_branches_p2(self, HttpApiAuth, add_chat_assistants_func, chat_assistant_llm_model_type):
dataset_id, _, chat_assistant_ids = add_chat_assistants_func
chat_id = chat_assistant_ids[0]
res = update_chat_assistant(HttpApiAuth, "invalid-chat-id", {"name": "anything"})
assert res["code"] == 102
assert res["message"] == "You do not own the chat"
res = update_chat_assistant(HttpApiAuth, chat_id, {"show_quotation": False, "dataset_ids": [dataset_id]})
assert res["code"] == 0
res = update_chat_assistant(
HttpApiAuth,
chat_id,
{"llm": {"model_name": "unknown-llm-model", "model_type": chat_assistant_llm_model_type}},
)
assert res["code"] == 102
assert "`model_name` unknown-llm-model doesn't exist" in res["message"]
res = update_chat_assistant(
HttpApiAuth,
chat_id,
{"prompt": {"rerank_model": "unknown-rerank-model"}},
)
assert res["code"] == 102
assert "`rerank_model` unknown-rerank-model doesn't exist" in res["message"]
res = update_chat_assistant(HttpApiAuth, chat_id, {"name": ""})
assert res["code"] == 102
assert res["message"] == "`name` cannot be empty."
res = update_chat_assistant(HttpApiAuth, chat_id, {"name": "test_chat_assistant_1"})
assert res["code"] == 102
assert res["message"] == "Duplicated chat name in updating chat."
res = update_chat_assistant(
HttpApiAuth,
chat_id,
{"prompt": {"prompt": "No required placeholder", "variables": [{"key": "knowledge", "optional": False}]}},
)
assert res["code"] == 102
assert "Parameter 'knowledge' is not used" in res["message"]
res = update_chat_assistant(HttpApiAuth, chat_id, {"avatar": "raw-avatar-value"})
assert res["code"] == 0
listed = list_chat_assistants(HttpApiAuth, {"id": chat_id})
assert listed["code"] == 0
assert listed["data"][0]["avatar"] == "raw-avatar-value"
@pytest.mark.p2
def test_update_unparsed_dataset_guard_p2(self, HttpApiAuth, add_dataset_func, clear_chat_assistants):
dataset_id = add_dataset_func
create_res = create_chat_assistant(HttpApiAuth, {"name": "update-unparsed-target", "dataset_ids": []})
assert create_res["code"] == 0
chat_id = create_res["data"]["id"]
res = update_chat_assistant(HttpApiAuth, chat_id, {"dataset_ids": [dataset_id]})
assert res["code"] == 102
assert "doesn't own parsed file" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chat_assistant_management/test_update_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chunk_management_within_dataset/test_add_chunk.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import add_chunk, delete_documents, list_chunks
from configs import INVALID_API_TOKEN, INVALID_ID_32
from libs.auth import RAGFlowHttpApiAuth
def validate_chunk_details(dataset_id, document_id, payload, res):
chunk = res["data"]["chunk"]
assert chunk["dataset_id"] == dataset_id
assert chunk["document_id"] == document_id
assert chunk["content"] == payload["content"]
if "important_keywords" in payload:
assert chunk["important_keywords"] == payload["important_keywords"]
if "questions" in payload:
assert chunk["questions"] == [str(q).strip() for q in payload.get("questions", []) if str(q).strip()]
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = add_chunk(invalid_auth, "dataset_id", "document_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestAddChunk:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content": None}, 100, """TypeError("unsupported operand type(s) for +: \'NoneType\' and \'str\'")"""),
({"content": ""}, 102, "`content` is required"),
pytest.param(
{"content": 1},
100,
"""TypeError("unsupported operand type(s) for +: \'int\' and \'str\'")""",
marks=pytest.mark.skip,
),
({"content": "a"}, 0, ""),
({"content": " "}, 102, "`content` is required"),
({"content": "\n!?。;!?\"'"}, 0, ""),
],
)
def test_content(self, HttpApiAuth, add_document, payload, expected_code, expected_message):
dataset_id, document_id = add_document
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
chunks_count = res["data"]["doc"]["chunk_count"]
res = add_chunk(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == expected_code
if expected_code == 0:
validate_chunk_details(dataset_id, document_id, payload, res)
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
assert res["data"]["doc"]["chunk_count"] == chunks_count + 1
else:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content": "chunk test", "important_keywords": ["a", "b", "c"]}, 0, ""),
({"content": "chunk test", "important_keywords": [""]}, 0, ""),
(
{"content": "chunk test", "important_keywords": [1]},
100,
"TypeError('sequence item 0: expected str instance, int found')",
),
({"content": "chunk test", "important_keywords": ["a", "a"]}, 0, ""),
({"content": "chunk test", "important_keywords": "abc"}, 102, "`important_keywords` is required to be a list"),
({"content": "chunk test", "important_keywords": 123}, 102, "`important_keywords` is required to be a list"),
],
)
def test_important_keywords(self, HttpApiAuth, add_document, payload, expected_code, expected_message):
dataset_id, document_id = add_document
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
chunks_count = res["data"]["doc"]["chunk_count"]
res = add_chunk(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == expected_code
if expected_code == 0:
validate_chunk_details(dataset_id, document_id, payload, res)
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
assert res["data"]["doc"]["chunk_count"] == chunks_count + 1
else:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content": "chunk test", "questions": ["a", "b", "c"]}, 0, ""),
({"content": "chunk test", "questions": [""]}, 0, ""),
({"content": "chunk test", "questions": [1]}, 100, "TypeError('sequence item 0: expected str instance, int found')"),
({"content": "chunk test", "questions": ["a", "a"]}, 0, ""),
({"content": "chunk test", "questions": "abc"}, 102, "`questions` is required to be a list"),
({"content": "chunk test", "questions": 123}, 102, "`questions` is required to be a list"),
],
)
def test_questions(self, HttpApiAuth, add_document, payload, expected_code, expected_message):
dataset_id, document_id = add_document
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
chunks_count = res["data"]["doc"]["chunk_count"]
res = add_chunk(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == expected_code
if expected_code == 0:
validate_chunk_details(dataset_id, document_id, payload, res)
if res["code"] != 0:
assert False, res
res = list_chunks(HttpApiAuth, dataset_id, document_id)
assert res["data"]["doc"]["chunk_count"] == chunks_count + 1
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, expected_code, expected_message",
[
(INVALID_ID_32, 102, f"You don't own the dataset {INVALID_ID_32}."),
],
)
def test_invalid_dataset_id(
self,
HttpApiAuth,
add_document,
dataset_id,
expected_code,
expected_message,
):
_, document_id = add_document
res = add_chunk(HttpApiAuth, dataset_id, document_id, {"content": "a"})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"document_id, expected_code, expected_message",
[
(
INVALID_ID_32,
102,
f"You don't own the document {INVALID_ID_32}.",
),
],
)
def test_invalid_document_id(self, HttpApiAuth, add_document, document_id, expected_code, expected_message):
dataset_id, _ = add_document
res = add_chunk(HttpApiAuth, dataset_id, document_id, {"content": "chunk test"})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
def test_repeated_add_chunk(self, HttpApiAuth, add_document):
payload = {"content": "chunk test"}
dataset_id, document_id = add_document
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
chunks_count = res["data"]["doc"]["chunk_count"]
res = add_chunk(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == 0
validate_chunk_details(dataset_id, document_id, payload, res)
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
assert res["data"]["doc"]["chunk_count"] == chunks_count + 1
res = add_chunk(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == 0
validate_chunk_details(dataset_id, document_id, payload, res)
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
assert res["data"]["doc"]["chunk_count"] == chunks_count + 2
@pytest.mark.p3
def test_add_chunk_to_deleted_document(self, HttpApiAuth, add_document):
dataset_id, document_id = add_document
delete_documents(HttpApiAuth, dataset_id, {"ids": [document_id]})
res = add_chunk(HttpApiAuth, dataset_id, document_id, {"content": "chunk test"})
assert res["code"] == 102
assert res["message"] == f"You don't own the document {document_id}."
@pytest.mark.skip(reason="issues/6411")
def test_concurrent_add_chunk(self, HttpApiAuth, add_document):
count = 50
dataset_id, document_id = add_document
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
chunks_count = res["data"]["doc"]["chunk_count"]
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
add_chunk,
HttpApiAuth,
dataset_id,
document_id,
{"content": f"chunk test {i}"},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
assert res["data"]["doc"]["chunk_count"] == chunks_count + count
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chunk_management_within_dataset/test_add_chunk.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chunk_management_within_dataset/test_delete_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_chunks, delete_chunks, list_chunks
from configs import INVALID_API_TOKEN, INVALID_ID_32
from libs.auth import RAGFlowHttpApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = delete_chunks(invalid_auth, "dataset_id", "document_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestChunksDeletion:
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, expected_code, expected_message",
[
(INVALID_ID_32, 102, f"You don't own the dataset {INVALID_ID_32}."),
],
)
def test_invalid_dataset_id(self, HttpApiAuth, add_chunks_func, dataset_id, expected_code, expected_message):
_, document_id, chunk_ids = add_chunks_func
res = delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": chunk_ids})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"document_id, expected_code, expected_message",
[
(INVALID_ID_32, 100, f"""LookupError("Can't find the document with ID {INVALID_ID_32}!")"""),
],
)
def test_invalid_document_id(self, HttpApiAuth, add_chunks_func, document_id, expected_code, expected_message):
dataset_id, _, chunk_ids = add_chunks_func
res = delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": chunk_ids})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"chunk_ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"chunk_ids": r[:1] + ["invalid_id"] + r[1:4]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"chunk_ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, HttpApiAuth, add_chunks_func, payload):
dataset_id, document_id, chunk_ids = add_chunks_func
if callable(payload):
payload = payload(chunk_ids)
res = delete_chunks(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == 102
assert res["message"] == "rm_chunk deleted chunks 4, expect 5"
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
assert len(res["data"]["chunks"]) == 1
assert res["data"]["total"] == 1
@pytest.mark.p3
def test_repeated_deletion(self, HttpApiAuth, add_chunks_func):
dataset_id, document_id, chunk_ids = add_chunks_func
payload = {"chunk_ids": chunk_ids}
res = delete_chunks(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == 0
res = delete_chunks(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == 102
assert res["message"] == "rm_chunk deleted chunks 0, expect 4"
@pytest.mark.p3
def test_duplicate_deletion(self, HttpApiAuth, add_chunks_func):
dataset_id, document_id, chunk_ids = add_chunks_func
res = delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": chunk_ids * 2})
assert res["code"] == 0
assert "Duplicate chunk ids" in res["data"]["errors"][0]
assert res["data"]["success_count"] == 4
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
assert len(res["data"]["chunks"]) == 1
assert res["data"]["total"] == 1
@pytest.mark.p3
def test_concurrent_deletion(self, HttpApiAuth, add_document):
count = 100
dataset_id, document_id = add_document
chunk_ids = batch_add_chunks(HttpApiAuth, dataset_id, document_id, count)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
delete_chunks,
HttpApiAuth,
dataset_id,
document_id,
{"chunk_ids": chunk_ids[i : i + 1]},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_delete_1k(self, HttpApiAuth, add_document):
chunks_num = 1_000
dataset_id, document_id = add_document
chunk_ids = batch_add_chunks(HttpApiAuth, dataset_id, document_id, chunks_num)
# issues/6487
from time import sleep
sleep(1)
res = delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": chunk_ids})
assert res["code"] == 0
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
assert len(res["data"]["chunks"]) == 0
assert res["data"]["total"] == 0
@pytest.mark.parametrize(
"payload, expected_code, expected_message, remaining",
[
pytest.param(None, 100, """TypeError("argument of type \'NoneType\' is not iterable")""", 5, marks=pytest.mark.skip),
pytest.param({"chunk_ids": ["invalid_id"]}, 102, "rm_chunk deleted chunks 0, expect 1", 5, marks=pytest.mark.p3),
pytest.param("not json", 100, """UnboundLocalError("local variable \'duplicate_messages\' referenced before assignment")""", 5, marks=pytest.mark.skip(reason="pull/6376")),
pytest.param(lambda r: {"chunk_ids": r[:1]}, 0, "", 4, marks=pytest.mark.p3),
pytest.param(lambda r: {"chunk_ids": r}, 0, "", 1, marks=pytest.mark.p1),
pytest.param({"chunk_ids": []}, 0, "", 0, marks=pytest.mark.p3),
],
)
def test_basic_scenarios(
self,
HttpApiAuth,
add_chunks_func,
payload,
expected_code,
expected_message,
remaining,
):
dataset_id, document_id, chunk_ids = add_chunks_func
if callable(payload):
payload = payload(chunk_ids)
res = delete_chunks(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == expected_code
if res["code"] != 0:
assert res["message"] == expected_message
res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0:
assert False, res
assert len(res["data"]["chunks"]) == remaining
assert res["data"]["total"] == remaining
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chunk_management_within_dataset/test_delete_chunks.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chunk_management_within_dataset/test_list_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_chunks, list_chunks
from configs import INVALID_API_TOKEN, INVALID_ID_32
from libs.auth import RAGFlowHttpApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = list_chunks(invalid_auth, "dataset_id", "document_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestChunksList:
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 0, 2, ""),
pytest.param({"page": 0, "page_size": 2}, 100, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
({"page": 2, "page_size": 2}, 0, 2, ""),
({"page": 3, "page_size": 2}, 0, 1, ""),
({"page": "3", "page_size": 2}, 0, 1, ""),
pytest.param({"page": -1, "page_size": 2}, 100, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
pytest.param({"page": "a", "page_size": 2}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page(self, HttpApiAuth, add_chunks, params, expected_code, expected_page_size, expected_message):
dataset_id, document_id, _ = add_chunks
res = list_chunks(HttpApiAuth, dataset_id, document_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"page_size": None}, 0, 5, ""),
pytest.param({"page_size": 0}, 0, 5, ""),
({"page_size": 1}, 0, 1, ""),
({"page_size": 6}, 0, 5, ""),
({"page_size": "1"}, 0, 1, ""),
pytest.param({"page_size": -1}, 0, 5, "", marks=pytest.mark.skip),
pytest.param({"page_size": "a"}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page_size(self, HttpApiAuth, add_chunks, params, expected_code, expected_page_size, expected_message):
dataset_id, document_id, _ = add_chunks
res = list_chunks(HttpApiAuth, dataset_id, document_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"keywords": None}, 5),
({"keywords": ""}, 5),
({"keywords": "1"}, 1),
({"keywords": "chunk"}, 4),
pytest.param({"keywords": "ragflow"}, 1, marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6509")),
pytest.param({"keywords": "ragflow"}, 5, marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") != "infinity", reason="issues/6509")),
({"keywords": "unknown"}, 0),
],
)
def test_keywords(self, HttpApiAuth, add_chunks, params, expected_page_size):
dataset_id, document_id, _ = add_chunks
res = list_chunks(HttpApiAuth, dataset_id, document_id, params=params)
assert res["code"] == 0
assert len(res["data"]["chunks"]) == expected_page_size
@pytest.mark.p1
@pytest.mark.parametrize(
"chunk_id, expected_code, expected_page_size, expected_message",
[
(None, 0, 5, ""),
("", 0, 5, ""),
pytest.param(lambda r: r[0], 0, 1, "", marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6499")),
pytest.param("unknown", 100, 0, """AttributeError("\'NoneType\' object has no attribute \'keys\'")""", marks=pytest.mark.skip),
],
)
def test_id(
self,
HttpApiAuth,
add_chunks,
chunk_id,
expected_code,
expected_page_size,
expected_message,
):
dataset_id, document_id, chunk_ids = add_chunks
if callable(chunk_id):
params = {"id": chunk_id(chunk_ids)}
else:
params = {"id": chunk_id}
res = list_chunks(HttpApiAuth, dataset_id, document_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if params["id"] in [None, ""]:
assert len(res["data"]["chunks"]) == expected_page_size
else:
assert res["data"]["chunks"][0]["id"] == params["id"]
else:
assert res["message"] == expected_message
@pytest.mark.p3
def test_invalid_params(self, HttpApiAuth, add_chunks):
dataset_id, document_id, _ = add_chunks
params = {"a": "b"}
res = list_chunks(HttpApiAuth, dataset_id, document_id, params=params)
assert res["code"] == 0
assert len(res["data"]["chunks"]) == 5
@pytest.mark.p3
def test_concurrent_list(self, HttpApiAuth, add_chunks):
dataset_id, document_id, _ = add_chunks
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_chunks, HttpApiAuth, dataset_id, document_id) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(len(future.result()["data"]["chunks"]) == 5 for future in futures)
@pytest.mark.p1
def test_default(self, HttpApiAuth, add_document):
dataset_id, document_id = add_document
res = list_chunks(HttpApiAuth, dataset_id, document_id)
chunks_count = res["data"]["doc"]["chunk_count"]
batch_add_chunks(HttpApiAuth, dataset_id, document_id, 31)
# issues/6487
from time import sleep
sleep(3)
res = list_chunks(HttpApiAuth, dataset_id, document_id)
assert res["code"] == 0
assert len(res["data"]["chunks"]) == 30
assert res["data"]["doc"]["chunk_count"] == chunks_count + 31
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, expected_code, expected_message",
[
(INVALID_ID_32, 102, f"You don't own the dataset {INVALID_ID_32}."),
],
)
def test_invalid_dataset_id(self, HttpApiAuth, add_chunks, dataset_id, expected_code, expected_message):
_, document_id, _ = add_chunks
res = list_chunks(HttpApiAuth, dataset_id, document_id)
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"document_id, expected_code, expected_message",
[
(
INVALID_ID_32,
102,
f"You don't own the document {INVALID_ID_32}.",
),
],
)
def test_invalid_document_id(self, HttpApiAuth, add_chunks, document_id, expected_code, expected_message):
dataset_id, _, _ = add_chunks
res = list_chunks(HttpApiAuth, dataset_id, document_id)
assert res["code"] == expected_code
assert res["message"] == expected_message
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chunk_management_within_dataset/test_list_chunks.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chunk_management_within_dataset/test_retrieval_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from time import sleep
import pytest
from common import add_chunk, delete_chunks, retrieval_chunks
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = retrieval_chunks(invalid_auth)
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestChunksRetrieval:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"question": "chunk", "dataset_ids": None}, 0, 4, ""),
({"question": "chunk", "document_ids": None}, 102, 0, "`dataset_ids` is required."),
({"question": "chunk", "dataset_ids": None, "document_ids": None}, 0, 4, ""),
({"question": "chunk"}, 102, 0, "`dataset_ids` is required."),
],
)
def test_basic_scenarios(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, document_id, _ = add_chunks
if "dataset_ids" in payload:
payload["dataset_ids"] = [dataset_id]
if "document_ids" in payload:
payload["document_ids"] = [document_id]
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
pytest.param(
{"page": None, "page_size": 2},
100,
2,
"""TypeError("int() argument must be a string, a bytes-like object or a real number, not \'NoneType\'")""",
marks=pytest.mark.skip,
),
pytest.param(
{"page": 0, "page_size": 2},
100,
0,
"ValueError('Search does not support negative slicing.')",
marks=pytest.mark.skip,
),
({"page": 2, "page_size": 2}, 0, 2, ""),
({"page": 3, "page_size": 2}, 0, 0, ""),
({"page": "3", "page_size": 2}, 0, 0, ""),
pytest.param(
{"page": -1, "page_size": 2},
100,
0,
"ValueError('Search does not support negative slicing.')",
marks=pytest.mark.skip,
),
pytest.param(
{"page": "a", "page_size": 2},
100,
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip,
),
],
)
def test_page(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
pytest.param(
{"page_size": None},
100,
0,
"""TypeError("int() argument must be a string, a bytes-like object or a real number, not \'NoneType\'")""",
marks=pytest.mark.skip,
),
# ({"page_size": 0}, 0, 0, ""),
pytest.param({"page_size": 1}, 0, 1, "", marks=pytest.mark.skip(reason="issues/10692")),
({"page_size": 5}, 0, 4, ""),
pytest.param({"page_size": "1"}, 0, 1, "", marks=pytest.mark.skip(reason="issues/10692")),
# ({"page_size": -1}, 0, 0, ""),
pytest.param(
{"page_size": "a"},
100,
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip,
),
],
)
def test_page_size(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"vector_similarity_weight": 0}, 0, 4, ""),
({"vector_similarity_weight": 0.5}, 0, 4, ""),
({"vector_similarity_weight": 10}, 0, 4, ""),
pytest.param(
{"vector_similarity_weight": "a"},
100,
0,
"""ValueError("could not convert string to float: \'a\'")""",
marks=pytest.mark.skip,
),
],
)
def test_vector_similarity_weight(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"top_k": 10}, 0, 4, ""),
pytest.param(
{"top_k": 1},
0,
4,
"",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in ["infinity", "opensearch"], reason="Infinity"),
),
pytest.param(
{"top_k": 1},
0,
1,
"",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch"),
),
pytest.param(
{"top_k": -1},
100,
4,
"must be greater than 0",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in ["infinity", "opensearch"], reason="Infinity"),
),
pytest.param(
{"top_k": -1},
100,
4,
"3014",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch"),
),
pytest.param(
{"top_k": "a"},
100,
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip,
),
],
)
def test_top_k(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size
else:
assert expected_message in res["message"]
@pytest.mark.skip
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"rerank_id": "BAAI/bge-reranker-v2-m3"}, 0, ""),
pytest.param({"rerank_id": "unknown"}, 100, "LookupError('Model(unknown) not authorized')", marks=pytest.mark.skip),
],
)
def test_rerank_id(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["chunks"]) > 0
else:
assert expected_message in res["message"]
@pytest.mark.skip
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"keyword": True}, 0, 5, ""),
({"keyword": "True"}, 0, 5, ""),
({"keyword": False}, 0, 5, ""),
({"keyword": "False"}, 0, 5, ""),
({"keyword": None}, 0, 5, ""),
],
)
def test_keyword(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk test", "dataset_ids": [dataset_id]})
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_highlight, expected_message",
[
({"highlight": True}, 0, True, ""),
({"highlight": "True"}, 0, True, ""),
({"highlight": False}, 0, False, ""),
({"highlight": "False"}, 0, False, ""),
pytest.param({"highlight": None}, 0, False, "", marks=pytest.mark.skip(reason="issues/6648")),
],
)
def test_highlight(self, HttpApiAuth, add_chunks, payload, expected_code, expected_highlight, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code
if expected_highlight:
for chunk in res["data"]["chunks"]:
assert "highlight" in chunk
else:
for chunk in res["data"]["chunks"]:
assert "highlight" not in chunk
if expected_code != 0:
assert res["message"] == expected_message
@pytest.mark.p3
def test_invalid_params(self, HttpApiAuth, add_chunks):
dataset_id, _, _ = add_chunks
payload = {"question": "chunk", "dataset_ids": [dataset_id], "a": "b"}
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == 0
assert len(res["data"]["chunks"]) == 4
@pytest.mark.p3
def test_concurrent_retrieval(self, HttpApiAuth, add_chunks):
dataset_id, _, _ = add_chunks
count = 100
payload = {"question": "chunk", "dataset_ids": [dataset_id]}
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(retrieval_chunks, HttpApiAuth, payload) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
class TestDeletedChunksNotRetrievable:
"""Regression tests for issue #12520: deleted slices should not appear in retrieval/reference."""
@pytest.mark.p1
def test_deleted_chunk_not_in_retrieval(self, HttpApiAuth, add_document):
"""
Test that a deleted chunk is not returned by the retrieval API.
Steps:
1. Add a chunk with unique content
2. Verify the chunk is retrievable
3. Delete the chunk
4. Verify the chunk is no longer retrievable
"""
dataset_id, document_id = add_document
# Add a chunk with unique content that we can search for
unique_content = "UNIQUE_TEST_CONTENT_12520_REGRESSION"
res = add_chunk(HttpApiAuth, dataset_id, document_id, {"content": unique_content})
assert res["code"] == 0, f"Failed to add chunk: {res}"
chunk_id = res["data"]["chunk"]["id"]
# Wait for indexing to complete
sleep(2)
# Verify the chunk is retrievable
payload = {"question": unique_content, "dataset_ids": [dataset_id]}
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == 0, f"Retrieval failed: {res}"
chunk_ids_before = [c["id"] for c in res["data"]["chunks"]]
assert chunk_id in chunk_ids_before, f"Chunk {chunk_id} should be retrievable before deletion"
# Delete the chunk
res = delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": [chunk_id]})
assert res["code"] == 0, f"Failed to delete chunk: {res}"
# Wait for deletion to propagate
sleep(1)
# Verify the chunk is no longer retrievable
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == 0, f"Retrieval failed after deletion: {res}"
chunk_ids_after = [c["id"] for c in res["data"]["chunks"]]
assert chunk_id not in chunk_ids_after, f"Chunk {chunk_id} should NOT be retrievable after deletion"
@pytest.mark.p2
def test_deleted_chunks_batch_not_in_retrieval(self, HttpApiAuth, add_document):
"""
Test that multiple deleted chunks are not returned by retrieval.
"""
dataset_id, document_id = add_document
# Add multiple chunks with unique content
chunk_ids = []
for i in range(3):
unique_content = f"BATCH_DELETE_TEST_CHUNK_{i}_12520"
res = add_chunk(HttpApiAuth, dataset_id, document_id, {"content": unique_content})
assert res["code"] == 0, f"Failed to add chunk {i}: {res}"
chunk_ids.append(res["data"]["chunk"]["id"])
# Wait for indexing
sleep(2)
# Verify chunks are retrievable
payload = {"question": "BATCH_DELETE_TEST_CHUNK", "dataset_ids": [dataset_id]}
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == 0
retrieved_ids_before = [c["id"] for c in res["data"]["chunks"]]
for cid in chunk_ids:
assert cid in retrieved_ids_before, f"Chunk {cid} should be retrievable before deletion"
# Delete all chunks
res = delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": chunk_ids})
assert res["code"] == 0, f"Failed to delete chunks: {res}"
# Wait for deletion to propagate
sleep(1)
# Verify none of the chunks are retrievable
res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == 0
retrieved_ids_after = [c["id"] for c in res["data"]["chunks"]]
for cid in chunk_ids:
assert cid not in retrieved_ids_after, f"Chunk {cid} should NOT be retrievable after deletion"
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chunk_management_within_dataset/test_retrieval_chunks.py",
"license": "Apache License 2.0",
"lines": 365,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chunk_management_within_dataset/test_update_chunk.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from random import randint
import pytest
from common import delete_documents, update_chunk
from configs import INVALID_API_TOKEN, INVALID_ID_32
from libs.auth import RAGFlowHttpApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = update_chunk(invalid_auth, "dataset_id", "document_id", "chunk_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestUpdatedChunk:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
pytest.param({"content": None}, 0, "", marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6509")),
pytest.param(
{"content": ""},
100,
"""APIRequestFailedError(\'Error code: 400, with error text {"error":{"code":"1213","message":"未正常接收到prompt参数。"}}\')""",
marks=pytest.mark.skip(reason="issues/6541"),
),
pytest.param(
{"content": 1},
100,
"TypeError('expected string or bytes-like object')",
marks=pytest.mark.skip,
),
({"content": "update chunk"}, 0, ""),
pytest.param(
{"content": " "},
100,
"""APIRequestFailedError(\'Error code: 400, with error text {"error":{"code":"1213","message":"未正常接收到prompt参数。"}}\')""",
marks=pytest.mark.skip(reason="issues/6541"),
),
({"content": "\n!?。;!?\"'"}, 0, ""),
],
)
def test_content(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], payload)
assert res["code"] == expected_code
if expected_code != 0:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"important_keywords": ["a", "b", "c"]}, 0, ""),
({"important_keywords": [""]}, 0, ""),
({"important_keywords": [1]}, 100, "TypeError('sequence item 0: expected str instance, int found')"),
({"important_keywords": ["a", "a"]}, 0, ""),
({"important_keywords": "abc"}, 102, "`important_keywords` should be a list"),
({"important_keywords": 123}, 102, "`important_keywords` should be a list"),
],
)
def test_important_keywords(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], payload)
assert res["code"] == expected_code
if expected_code != 0:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"questions": ["a", "b", "c"]}, 0, ""),
({"questions": [""]}, 0, ""),
({"questions": [1]}, 100, "TypeError('sequence item 0: expected str instance, int found')"),
({"questions": ["a", "a"]}, 0, ""),
({"questions": "abc"}, 102, "`questions` should be a list"),
({"questions": 123}, 102, "`questions` should be a list"),
],
)
def test_questions(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], payload)
assert res["code"] == expected_code
if expected_code != 0:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"available": True}, 0, ""),
pytest.param({"available": "True"}, 100, """ValueError("invalid literal for int() with base 10: \'True\'")""", marks=pytest.mark.skip),
({"available": 1}, 0, ""),
({"available": False}, 0, ""),
pytest.param({"available": "False"}, 100, """ValueError("invalid literal for int() with base 10: \'False\'")""", marks=pytest.mark.skip),
({"available": 0}, 0, ""),
],
)
def test_available(
self,
HttpApiAuth,
add_chunks,
payload,
expected_code,
expected_message,
):
dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], payload)
assert res["code"] == expected_code
if expected_code != 0:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, expected_code, expected_message",
[
pytest.param(INVALID_ID_32, 102, f"You don't own the dataset {INVALID_ID_32}.", marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="infinity")),
pytest.param(INVALID_ID_32, 102, "Can't find this chunk", marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch")),
],
)
def test_invalid_dataset_id(self, HttpApiAuth, add_chunks, dataset_id, expected_code, expected_message):
_, document_id, chunk_ids = add_chunks
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0])
assert res["code"] == expected_code
assert expected_message in res["message"]
@pytest.mark.p3
@pytest.mark.parametrize(
"document_id, expected_code, expected_message",
[
(
INVALID_ID_32,
102,
f"You don't own the document {INVALID_ID_32}.",
),
],
)
def test_invalid_document_id(self, HttpApiAuth, add_chunks, document_id, expected_code, expected_message):
dataset_id, _, chunk_ids = add_chunks
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0])
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"chunk_id, expected_code, expected_message",
[
(
INVALID_ID_32,
102,
f"Can't find this chunk {INVALID_ID_32}",
),
],
)
def test_invalid_chunk_id(self, HttpApiAuth, add_chunks, chunk_id, expected_code, expected_message):
dataset_id, document_id, _ = add_chunks
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_id)
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
def test_repeated_update_chunk(self, HttpApiAuth, add_chunks):
dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], {"content": "chunk test 1"})
assert res["code"] == 0
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], {"content": "chunk test 2"})
assert res["code"] == 0
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"unknown_key": "unknown_value"}, 0, ""),
({}, 0, ""),
pytest.param(None, 100, """TypeError("argument of type \'NoneType\' is not iterable")""", marks=pytest.mark.skip),
],
)
def test_invalid_params(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], payload)
assert res["code"] == expected_code
if expected_code != 0:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6554")
def test_concurrent_update_chunk(self, HttpApiAuth, add_chunks):
count = 50
dataset_id, document_id, chunk_ids = add_chunks
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
update_chunk,
HttpApiAuth,
dataset_id,
document_id,
chunk_ids[randint(0, 3)],
{"content": f"update chunk test {i}"},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_update_chunk_to_deleted_document(self, HttpApiAuth, add_chunks):
dataset_id, document_id, chunk_ids = add_chunks
delete_documents(HttpApiAuth, dataset_id, {"ids": [document_id]})
res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0])
assert res["code"] == 102
assert res["message"] in [f"You don't own the document {document_id}.", f"Can't find this chunk {chunk_ids[0]}"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chunk_management_within_dataset/test_update_chunk.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_delete_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import bulk_upload_documents, delete_documents, list_documents
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = delete_documents(invalid_auth, "dataset_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestDocumentsDeletion:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_message, remaining",
[
(None, 0, "", 0),
({"ids": []}, 0, "", 0),
({"ids": ["invalid_id"]}, 102, "Documents not found: ['invalid_id']", 3),
(
{"ids": ["\n!?。;!?\"'"]},
102,
"""Documents not found: [\'\\n!?。;!?"\\\'\']""",
3,
),
(
"not json",
100,
"AttributeError(\"'str' object has no attribute 'get'\")",
3,
),
(lambda r: {"ids": r[:1]}, 0, "", 2),
(lambda r: {"ids": r}, 0, "", 0),
],
)
def test_basic_scenarios(
self,
HttpApiAuth,
add_documents_func,
payload,
expected_code,
expected_message,
remaining,
):
dataset_id, document_ids = add_documents_func
if callable(payload):
payload = payload(document_ids)
res = delete_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == expected_code
if res["code"] != 0:
assert res["message"] == expected_message
res = list_documents(HttpApiAuth, dataset_id)
assert len(res["data"]["docs"]) == remaining
assert res["data"]["total"] == remaining
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, expected_code, expected_message",
[
("", 100, "<MethodNotAllowed '405: Method Not Allowed'>"),
(
"invalid_dataset_id",
102,
"You don't own the dataset invalid_dataset_id. ",
),
],
)
def test_invalid_dataset_id(self, HttpApiAuth, add_documents_func, dataset_id, expected_code, expected_message):
_, document_ids = add_documents_func
res = delete_documents(HttpApiAuth, dataset_id, {"ids": document_ids[:1]})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"payload",
[
lambda r: {"ids": ["invalid_id"] + r},
lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:3]},
lambda r: {"ids": r + ["invalid_id"]},
],
)
def test_delete_partial_invalid_id(self, HttpApiAuth, add_documents_func, payload):
dataset_id, document_ids = add_documents_func
if callable(payload):
payload = payload(document_ids)
res = delete_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == 102
assert res["message"] == "Documents not found: ['invalid_id']"
res = list_documents(HttpApiAuth, dataset_id)
assert len(res["data"]["docs"]) == 0
assert res["data"]["total"] == 0
@pytest.mark.p2
def test_repeated_deletion(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func
res = delete_documents(HttpApiAuth, dataset_id, {"ids": document_ids})
assert res["code"] == 0
res = delete_documents(HttpApiAuth, dataset_id, {"ids": document_ids})
assert res["code"] == 102
assert "Documents not found" in res["message"]
@pytest.mark.p2
def test_duplicate_deletion(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func
res = delete_documents(HttpApiAuth, dataset_id, {"ids": document_ids + document_ids})
assert res["code"] == 0
assert "Duplicate document ids" in res["data"]["errors"][0]
assert res["data"]["success_count"] == 3
res = list_documents(HttpApiAuth, dataset_id)
assert len(res["data"]["docs"]) == 0
assert res["data"]["total"] == 0
@pytest.mark.p3
def test_concurrent_deletion(HttpApiAuth, add_dataset, tmp_path):
count = 100
dataset_id = add_dataset
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, count, tmp_path)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
delete_documents,
HttpApiAuth,
dataset_id,
{"ids": document_ids[i : i + 1]},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_delete_1k(HttpApiAuth, add_dataset, tmp_path):
documents_num = 1_000
dataset_id = add_dataset
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, documents_num, tmp_path)
res = list_documents(HttpApiAuth, dataset_id)
assert res["data"]["total"] == documents_num
res = delete_documents(HttpApiAuth, dataset_id, {"ids": document_ids})
assert res["code"] == 0
res = list_documents(HttpApiAuth, dataset_id)
assert res["data"]["total"] == 0
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_delete_documents.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_download_document.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import bulk_upload_documents, download_document, upload_documents
from configs import INVALID_API_TOKEN, INVALID_ID_32
from libs.auth import RAGFlowHttpApiAuth
from requests import codes
from utils import compare_by_hash
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, tmp_path, expected_code, expected_message):
res = download_document(invalid_auth, "dataset_id", "document_id", tmp_path / "ragflow_tes.txt")
assert res.status_code == codes.ok
with (tmp_path / "ragflow_tes.txt").open("r") as f:
response_json = json.load(f)
assert response_json["code"] == expected_code
assert response_json["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"generate_test_files",
[
"docx",
"excel",
"ppt",
"image",
"pdf",
"txt",
"md",
"json",
"eml",
"html",
],
indirect=True,
)
def test_file_type_validation(HttpApiAuth, add_dataset, generate_test_files, request):
dataset_id = add_dataset
fp = generate_test_files[request.node.callspec.params["generate_test_files"]]
res = upload_documents(HttpApiAuth, dataset_id, [fp])
document_id = res["data"][0]["id"]
res = download_document(
HttpApiAuth,
dataset_id,
document_id,
fp.with_stem("ragflow_test_download"),
)
assert res.status_code == codes.ok
assert compare_by_hash(
fp,
fp.with_stem("ragflow_test_download"),
)
class TestDocumentDownload:
@pytest.mark.p3
@pytest.mark.parametrize(
"document_id, expected_code, expected_message",
[
(
INVALID_ID_32,
102,
f"The dataset not own the document {INVALID_ID_32}.",
),
],
)
def test_invalid_document_id(self, HttpApiAuth, add_documents, tmp_path, document_id, expected_code, expected_message):
dataset_id, _ = add_documents
res = download_document(
HttpApiAuth,
dataset_id,
document_id,
tmp_path / "ragflow_test_download_1.txt",
)
assert res.status_code == codes.ok
with (tmp_path / "ragflow_test_download_1.txt").open("r") as f:
response_json = json.load(f)
assert response_json["code"] == expected_code
assert response_json["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, expected_code, expected_message",
[
(
INVALID_ID_32,
102,
f"You do not own the dataset {INVALID_ID_32}.",
),
],
)
def test_invalid_dataset_id(self, HttpApiAuth, add_documents, tmp_path, dataset_id, expected_code, expected_message):
_, document_ids = add_documents
res = download_document(
HttpApiAuth,
dataset_id,
document_ids[0],
tmp_path / "ragflow_test_download_1.txt",
)
assert res.status_code == codes.ok
with (tmp_path / "ragflow_test_download_1.txt").open("r") as f:
response_json = json.load(f)
assert response_json["code"] == expected_code
assert response_json["message"] == expected_message
@pytest.mark.p3
def test_same_file_repeat(self, HttpApiAuth, add_documents, tmp_path, ragflow_tmp_dir):
num = 5
dataset_id, document_ids = add_documents
for i in range(num):
res = download_document(
HttpApiAuth,
dataset_id,
document_ids[0],
tmp_path / f"ragflow_test_download_{i}.txt",
)
assert res.status_code == codes.ok
assert compare_by_hash(
ragflow_tmp_dir / "ragflow_test_upload_0.txt",
tmp_path / f"ragflow_test_download_{i}.txt",
)
@pytest.mark.p3
def test_concurrent_download(HttpApiAuth, add_dataset, tmp_path):
count = 20
dataset_id = add_dataset
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, count, tmp_path)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
download_document,
HttpApiAuth,
dataset_id,
document_ids[i],
tmp_path / f"ragflow_test_download_{i}.txt",
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
for i in range(count):
assert compare_by_hash(
tmp_path / f"ragflow_test_upload_{i}.txt",
tmp_path / f"ragflow_test_download_{i}.txt",
)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_download_document.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_list_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import list_documents
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
from utils import is_sorted
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = list_documents(invalid_auth, "dataset_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestDocumentsList:
@pytest.mark.p1
def test_default(self, HttpApiAuth, add_documents):
dataset_id, _ = add_documents
res = list_documents(HttpApiAuth, dataset_id)
assert res["code"] == 0
assert len(res["data"]["docs"]) == 5
assert res["data"]["total"] == 5
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, expected_code, expected_message",
[
("", 100, "<MethodNotAllowed '405: Method Not Allowed'>"),
(
"invalid_dataset_id",
102,
"You don't own the dataset invalid_dataset_id. ",
),
],
)
def test_invalid_dataset_id(self, HttpApiAuth, dataset_id, expected_code, expected_message):
res = list_documents(HttpApiAuth, dataset_id)
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 0, 2, ""),
({"page": 0, "page_size": 2}, 0, 2, ""),
({"page": 2, "page_size": 2}, 0, 2, ""),
({"page": 3, "page_size": 2}, 0, 1, ""),
({"page": "3", "page_size": 2}, 0, 1, ""),
pytest.param(
{"page": -1, "page_size": 2},
100,
0,
"1064",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page": "a", "page_size": 2},
100,
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page(
self,
HttpApiAuth,
add_documents,
params,
expected_code,
expected_page_size,
expected_message,
):
dataset_id, _ = add_documents
res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["docs"]) == expected_page_size
assert res["data"]["total"] == 5
else:
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"page_size": None}, 0, 5, ""),
({"page_size": 0}, 0, 0, ""),
({"page_size": 1}, 0, 1, ""),
({"page_size": 6}, 0, 5, ""),
({"page_size": "1"}, 0, 1, ""),
pytest.param(
{"page_size": -1},
100,
0,
"1064",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page_size": "a"},
100,
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page_size(
self,
HttpApiAuth,
add_documents,
params,
expected_code,
expected_page_size,
expected_message,
):
dataset_id, _ = add_documents
res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]["docs"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_code, assertions, expected_message",
[
({"orderby": None}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
({"orderby": "create_time"}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
({"orderby": "update_time"}, 0, lambda r: (is_sorted(r["data"]["docs"], "update_time", True)), ""),
pytest.param({"orderby": "name", "desc": "False"}, 0, lambda r: (is_sorted(r["data"]["docs"], "name", False)), "", marks=pytest.mark.skip(reason="issues/5851")),
pytest.param({"orderby": "unknown"}, 102, 0, "orderby should be create_time or update_time", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_orderby(
self,
HttpApiAuth,
add_documents,
params,
expected_code,
assertions,
expected_message,
):
dataset_id, _ = add_documents
res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if callable(assertions):
assert assertions(res)
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_code, assertions, expected_message",
[
({"desc": None}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
({"desc": "true"}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
({"desc": "True"}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
({"desc": True}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
pytest.param({"desc": "false"}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", False)), "", marks=pytest.mark.skip(reason="issues/5851")),
({"desc": "False"}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", False)), ""),
({"desc": False}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", False)), ""),
({"desc": "False", "orderby": "update_time"}, 0, lambda r: (is_sorted(r["data"]["docs"], "update_time", False)), ""),
pytest.param({"desc": "unknown"}, 102, 0, "desc should be true or false", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_desc(
self,
HttpApiAuth,
add_documents,
params,
expected_code,
assertions,
expected_message,
):
dataset_id, _ = add_documents
res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if callable(assertions):
assert assertions(res)
else:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_num",
[
({"keywords": None}, 5),
({"keywords": ""}, 5),
({"keywords": "0"}, 1),
({"keywords": "ragflow_test_upload"}, 5),
({"keywords": "unknown"}, 0),
],
)
def test_keywords(self, HttpApiAuth, add_documents, params, expected_num):
dataset_id, _ = add_documents
res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == 0
assert len(res["data"]["docs"]) == expected_num
assert res["data"]["total"] == expected_num
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_num, expected_message",
[
({"name": None}, 0, 5, ""),
({"name": ""}, 0, 5, ""),
({"name": "ragflow_test_upload_0.txt"}, 0, 1, ""),
(
{"name": "unknown.txt"},
102,
0,
"You don't own the document unknown.txt.",
),
],
)
def test_name(
self,
HttpApiAuth,
add_documents,
params,
expected_code,
expected_num,
expected_message,
):
dataset_id, _ = add_documents
res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if params["name"] in [None, ""]:
assert len(res["data"]["docs"]) == expected_num
else:
assert res["data"]["docs"][0]["name"] == params["name"]
else:
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"document_id, expected_code, expected_num, expected_message",
[
(None, 0, 5, ""),
("", 0, 5, ""),
(lambda r: r[0], 0, 1, ""),
("unknown.txt", 102, 0, "You don't own the document unknown.txt."),
],
)
def test_id(
self,
HttpApiAuth,
add_documents,
document_id,
expected_code,
expected_num,
expected_message,
):
dataset_id, document_ids = add_documents
if callable(document_id):
params = {"id": document_id(document_ids)}
else:
params = {"id": document_id}
res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if params["id"] in [None, ""]:
assert len(res["data"]["docs"]) == expected_num
else:
assert res["data"]["docs"][0]["id"] == params["id"]
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"document_id, name, expected_code, expected_num, expected_message",
[
(lambda r: r[0], "ragflow_test_upload_0.txt", 0, 1, ""),
(lambda r: r[0], "ragflow_test_upload_1.txt", 0, 0, ""),
(lambda r: r[0], "unknown", 102, 0, "You don't own the document unknown."),
(
"id",
"ragflow_test_upload_0.txt",
102,
0,
"You don't own the document id.",
),
],
)
def test_name_and_id(
self,
HttpApiAuth,
add_documents,
document_id,
name,
expected_code,
expected_num,
expected_message,
):
dataset_id, document_ids = add_documents
if callable(document_id):
params = {"id": document_id(document_ids), "name": name}
else:
params = {"id": document_id, "name": name}
res = list_documents(HttpApiAuth, dataset_id, params=params)
if expected_code == 0:
assert len(res["data"]["docs"]) == expected_num
else:
assert res["message"] == expected_message
@pytest.mark.p3
def test_concurrent_list(self, HttpApiAuth, add_documents):
dataset_id, _ = add_documents
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_documents, HttpApiAuth, dataset_id) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_invalid_params(self, HttpApiAuth, add_documents):
dataset_id, _ = add_documents
params = {"a": "b"}
res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == 0
assert len(res["data"]["docs"]) == 5
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_list_documents.py",
"license": "Apache License 2.0",
"lines": 341,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_parse_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import bulk_upload_documents, list_documents, parse_documents
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
from utils import wait_for
@wait_for(30, 1, "Document parsing timeout")
def condition(_auth, _dataset_id, _document_ids=None):
res = list_documents(_auth, _dataset_id)
target_docs = res["data"]["docs"]
if _document_ids is None:
for doc in target_docs:
if doc["run"] != "DONE":
return False
return True
target_ids = set(_document_ids)
for doc in target_docs:
if doc["id"] in target_ids:
if doc.get("run") != "DONE":
return False
return True
def validate_document_details(auth, dataset_id, document_ids):
for document_id in document_ids:
res = list_documents(auth, dataset_id, params={"id": document_id})
doc = res["data"]["docs"][0]
assert doc["run"] == "DONE"
assert len(doc["process_begin_at"]) > 0
assert doc["process_duration"] > 0
assert doc["progress"] > 0
assert "Task done" in doc["progress_msg"]
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = parse_documents(invalid_auth, "dataset_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestDocumentsParse:
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
pytest.param(None, 102, """AttributeError("\'NoneType\' object has no attribute \'get\'")""", marks=pytest.mark.skip),
pytest.param({"document_ids": []}, 102, "`document_ids` is required", marks=pytest.mark.p1),
pytest.param({"document_ids": ["invalid_id"]}, 102, "Documents not found: ['invalid_id']", marks=pytest.mark.p3),
pytest.param({"document_ids": ["\n!?。;!?\"'"]}, 102, """Documents not found: [\'\\n!?。;!?"\\\'\']""", marks=pytest.mark.p3),
pytest.param("not json", 102, "AttributeError(\"'str' object has no attribute 'get'\")", marks=pytest.mark.skip),
pytest.param(lambda r: {"document_ids": r[:1]}, 0, "", marks=pytest.mark.p1),
pytest.param(lambda r: {"document_ids": r}, 0, "", marks=pytest.mark.p1),
],
)
def test_basic_scenarios(self, HttpApiAuth, add_documents_func, payload, expected_code, expected_message):
dataset_id, document_ids = add_documents_func
if callable(payload):
payload = payload(document_ids)
res = parse_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == expected_code
if expected_code != 0:
assert res["message"] == expected_message
if expected_code == 0:
condition(HttpApiAuth, dataset_id, payload["document_ids"])
validate_document_details(HttpApiAuth, dataset_id, payload["document_ids"])
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, expected_code, expected_message",
[
("", 100, "<MethodNotAllowed '405: Method Not Allowed'>"),
(
"invalid_dataset_id",
102,
"You don't own the dataset invalid_dataset_id.",
),
],
)
def test_invalid_dataset_id(
self,
HttpApiAuth,
add_documents_func,
dataset_id,
expected_code,
expected_message,
):
_, document_ids = add_documents_func
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"document_ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"document_ids": r[:1] + ["invalid_id"] + r[1:3]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"document_ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_parse_partial_invalid_document_id(self, HttpApiAuth, add_documents_func, payload):
dataset_id, document_ids = add_documents_func
if callable(payload):
payload = payload(document_ids)
res = parse_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == 102
assert res["message"] == "Documents not found: ['invalid_id']"
condition(HttpApiAuth, dataset_id)
validate_document_details(HttpApiAuth, dataset_id, document_ids)
@pytest.mark.p3
def test_repeated_parse(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0
condition(HttpApiAuth, dataset_id)
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0
@pytest.mark.p3
def test_duplicate_parse(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids + document_ids})
assert res["code"] == 0
assert "Duplicate document ids" in res["data"]["errors"][0]
assert res["data"]["success_count"] == 3
condition(HttpApiAuth, dataset_id)
validate_document_details(HttpApiAuth, dataset_id, document_ids)
@pytest.mark.p3
def test_parse_100_files(HttpApiAuth, add_dataset_func, tmp_path):
@wait_for(200, 1, "Document parsing timeout")
def condition(_auth, _dataset_id, _document_num):
res = list_documents(_auth, _dataset_id, {"page_size": _document_num})
for doc in res["data"]["docs"]:
if doc["run"] != "DONE":
return False
return True
document_num = 100
dataset_id = add_dataset_func
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, document_num, tmp_path)
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0
condition(HttpApiAuth, dataset_id, document_num)
validate_document_details(HttpApiAuth, dataset_id, document_ids)
@pytest.mark.p3
def test_concurrent_parse(HttpApiAuth, add_dataset_func, tmp_path):
@wait_for(200, 1, "Document parsing timeout")
def condition(_auth, _dataset_id, _document_num):
res = list_documents(_auth, _dataset_id, {"page_size": _document_num})
for doc in res["data"]["docs"]:
if doc["run"] != "DONE":
return False
return True
count = 100
dataset_id = add_dataset_func
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, count, tmp_path)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
parse_documents,
HttpApiAuth,
dataset_id,
{"document_ids": document_ids[i : i + 1]},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
condition(HttpApiAuth, dataset_id, count)
validate_document_details(HttpApiAuth, dataset_id, document_ids)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_parse_documents.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_stop_parse_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor
from time import sleep
import pytest
from common import bulk_upload_documents, list_documents, parse_documents, stop_parse_documents
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
from utils import wait_for
def validate_document_parse_done(auth, dataset_id, document_ids):
for document_id in document_ids:
res = list_documents(auth, dataset_id, params={"id": document_id})
doc = res["data"]["docs"][0]
assert doc["run"] == "DONE"
assert len(doc["process_begin_at"]) > 0
assert doc["process_duration"] > 0
assert doc["progress"] > 0
assert "Task done" in doc["progress_msg"]
def validate_document_parse_cancel(auth, dataset_id, document_ids):
for document_id in document_ids:
res = list_documents(auth, dataset_id, params={"id": document_id})
doc = res["data"]["docs"][0]
assert doc["run"] == "CANCEL"
assert len(doc["process_begin_at"]) > 0
assert doc["progress"] == 0.0
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = stop_parse_documents(invalid_auth, "dataset_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.skip
class TestDocumentsParseStop:
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
pytest.param(None, 102, """AttributeError("\'NoneType\' object has no attribute \'get\'")""", marks=pytest.mark.skip),
pytest.param({"document_ids": []}, 102, "`document_ids` is required", marks=pytest.mark.p1),
pytest.param({"document_ids": ["invalid_id"]}, 102, "You don't own the document invalid_id.", marks=pytest.mark.p3),
pytest.param({"document_ids": ["\n!?。;!?\"'"]}, 102, """You don\'t own the document \n!?。;!?"\'.""", marks=pytest.mark.p3),
pytest.param("not json", 102, "AttributeError(\"'str' object has no attribute 'get'\")", marks=pytest.mark.skip),
pytest.param(lambda r: {"document_ids": r[:1]}, 0, "", marks=pytest.mark.p1),
pytest.param(lambda r: {"document_ids": r}, 0, "", marks=pytest.mark.p1),
],
)
def test_basic_scenarios(self, HttpApiAuth, add_documents_func, payload, expected_code, expected_message):
@wait_for(10, 1, "Document parsing timeout")
def condition(_auth, _dataset_id, _document_ids):
for _document_id in _document_ids:
res = list_documents(_auth, _dataset_id, {"id": _document_id})
if res["data"]["docs"][0]["run"] != "DONE":
return False
return True
dataset_id, document_ids = add_documents_func
parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
if callable(payload):
payload = payload(document_ids)
res = stop_parse_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == expected_code
if expected_code != 0:
assert res["message"] == expected_message
else:
completed_document_ids = list(set(document_ids) - set(payload["document_ids"]))
condition(HttpApiAuth, dataset_id, completed_document_ids)
validate_document_parse_cancel(HttpApiAuth, dataset_id, payload["document_ids"])
validate_document_parse_done(HttpApiAuth, dataset_id, completed_document_ids)
@pytest.mark.p3
@pytest.mark.parametrize(
"invalid_dataset_id, expected_code, expected_message",
[
("", 100, "<MethodNotAllowed '405: Method Not Allowed'>"),
(
"invalid_dataset_id",
102,
"You don't own the dataset invalid_dataset_id.",
),
],
)
def test_invalid_dataset_id(
self,
HttpApiAuth,
add_documents_func,
invalid_dataset_id,
expected_code,
expected_message,
):
dataset_id, document_ids = add_documents_func
parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
res = stop_parse_documents(HttpApiAuth, invalid_dataset_id, {"document_ids": document_ids})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.skip
@pytest.mark.parametrize(
"payload",
[
lambda r: {"document_ids": ["invalid_id"] + r},
lambda r: {"document_ids": r[:1] + ["invalid_id"] + r[1:3]},
lambda r: {"document_ids": r + ["invalid_id"]},
],
)
def test_stop_parse_partial_invalid_document_id(self, HttpApiAuth, add_documents_func, payload):
dataset_id, document_ids = add_documents_func
parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
if callable(payload):
payload = payload(document_ids)
res = stop_parse_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == 102
assert res["message"] == "You don't own the document invalid_id."
validate_document_parse_cancel(HttpApiAuth, dataset_id, document_ids)
@pytest.mark.p3
def test_repeated_stop_parse(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func
parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
res = stop_parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0
res = stop_parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 102
assert res["message"] == "Can't stop parsing document that has not started or already completed"
@pytest.mark.p3
def test_duplicate_stop_parse(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func
parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
res = stop_parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids + document_ids})
assert res["code"] == 0
assert res["data"]["success_count"] == 3
assert f"Duplicate document ids: {document_ids[0]}" in res["data"]["errors"]
@pytest.mark.skip(reason="unstable")
def test_stop_parse_100_files(HttpApiAuth, add_dataset_func, tmp_path):
document_num = 100
dataset_id = add_dataset_func
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, document_num, tmp_path)
parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
sleep(1)
res = stop_parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0
validate_document_parse_cancel(HttpApiAuth, dataset_id, document_ids)
@pytest.mark.skip(reason="unstable")
def test_concurrent_parse(HttpApiAuth, add_dataset_func, tmp_path):
document_num = 50
dataset_id = add_dataset_func
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, document_num, tmp_path)
parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
stop_parse_documents,
HttpApiAuth,
dataset_id,
{"document_ids": document_ids[i : i + 1]},
)
for i in range(document_num)
]
responses = [f.result() for f in futures]
assert all(r["code"] == 0 for r in responses)
validate_document_parse_cancel(HttpApiAuth, dataset_id, document_ids)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_stop_parse_documents.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_update_document.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import list_documents, update_document
from configs import DOCUMENT_NAME_LIMIT, INVALID_API_TOKEN, INVALID_ID_32
from libs.auth import RAGFlowHttpApiAuth
from configs import DEFAULT_PARSER_CONFIG
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = update_document(invalid_auth, "dataset_id", "document_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestDocumentsUpdated:
@pytest.mark.p1
@pytest.mark.parametrize(
"name, expected_code, expected_message",
[
("new_name.txt", 0, ""),
(
f"{'a' * (DOCUMENT_NAME_LIMIT - 4)}.txt",
0,
"",
),
(
0,
100,
"""AttributeError("\'int\' object has no attribute \'encode\'")""",
),
(
None,
100,
"""AttributeError("\'NoneType\' object has no attribute \'encode\'")""",
),
(
"",
101,
"The extension of file can't be changed",
),
(
"ragflow_test_upload_0",
101,
"The extension of file can't be changed",
),
(
"ragflow_test_upload_1.txt",
102,
"Duplicated document name in the same dataset.",
),
(
"RAGFLOW_TEST_UPLOAD_1.TXT",
0,
"",
),
],
)
def test_name(self, HttpApiAuth, add_documents, name, expected_code, expected_message):
dataset_id, document_ids = add_documents
res = update_document(HttpApiAuth, dataset_id, document_ids[0], {"name": name})
assert res["code"] == expected_code
if expected_code == 0:
res = list_documents(HttpApiAuth, dataset_id, {"id": document_ids[0]})
assert res["data"]["docs"][0]["name"] == name
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"document_id, expected_code, expected_message",
[
(
INVALID_ID_32,
102,
"The dataset doesn't own the document.",
),
],
)
def test_invalid_document_id(self, HttpApiAuth, add_documents, document_id, expected_code, expected_message):
dataset_id, _ = add_documents
res = update_document(HttpApiAuth, dataset_id, document_id, {"name": "new_name.txt"})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, expected_code, expected_message",
[
(
INVALID_ID_32,
102,
"You don't own the dataset.",
),
],
)
def test_invalid_dataset_id(self, HttpApiAuth, add_documents, dataset_id, expected_code, expected_message):
_, document_ids = add_documents
res = update_document(HttpApiAuth, dataset_id, document_ids[0], {"name": "new_name.txt"})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"meta_fields, expected_code, expected_message",
[({"test": "test"}, 0, ""), ("test", 102, "meta_fields must be a dictionary")],
)
def test_meta_fields(self, HttpApiAuth, add_documents, meta_fields, expected_code, expected_message):
dataset_id, document_ids = add_documents
res = update_document(HttpApiAuth, dataset_id, document_ids[0], {"meta_fields": meta_fields})
if expected_code == 0:
res = list_documents(HttpApiAuth, dataset_id, {"id": document_ids[0]})
assert res["data"]["docs"][0]["meta_fields"] == meta_fields
else:
assert res["message"] == expected_message
@pytest.mark.p2
@pytest.mark.parametrize(
"chunk_method, expected_code, expected_message",
[
("naive", 0, ""),
("manual", 0, ""),
("qa", 0, ""),
("table", 0, ""),
("paper", 0, ""),
("book", 0, ""),
("laws", 0, ""),
("presentation", 0, ""),
("picture", 0, ""),
("one", 0, ""),
("knowledge_graph", 0, ""),
("email", 0, ""),
("tag", 0, ""),
("", 102, "`chunk_method` doesn't exist"),
(
"other_chunk_method",
102,
"`chunk_method` other_chunk_method doesn't exist",
),
],
)
def test_chunk_method(self, HttpApiAuth, add_documents, chunk_method, expected_code, expected_message):
dataset_id, document_ids = add_documents
res = update_document(HttpApiAuth, dataset_id, document_ids[0], {"chunk_method": chunk_method})
assert res["code"] == expected_code
if expected_code == 0:
res = list_documents(HttpApiAuth, dataset_id, {"id": document_ids[0]})
if chunk_method == "":
assert res["data"]["docs"][0]["chunk_method"] == "naive"
else:
assert res["data"]["docs"][0]["chunk_method"] == chunk_method
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"chunk_count": 1}, 102, "Can't change `chunk_count`."),
pytest.param(
{"create_date": "Fri, 14 Mar 2025 16:53:42 GMT"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"create_time": 1},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"created_by": "ragflow_test"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"dataset_id": "ragflow_test"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"id": "ragflow_test"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"location": "ragflow_test.txt"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"process_begin_at": 1},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"process_duration": 1.0},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param({"progress": 1.0}, 102, "Can't change `progress`."),
pytest.param(
{"progress_msg": "ragflow_test"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"run": "ragflow_test"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"size": 1},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"source_type": "ragflow_test"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"thumbnail": "ragflow_test"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
({"token_count": 1}, 102, "Can't change `token_count`."),
pytest.param(
{"type": "ragflow_test"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"update_date": "Fri, 14 Mar 2025 16:33:17 GMT"},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"update_time": 1},
102,
"The input parameters are invalid.",
marks=pytest.mark.skip(reason="issues/6104"),
),
],
)
def test_invalid_field(
self,
HttpApiAuth,
add_documents,
payload,
expected_code,
expected_message,
):
dataset_id, document_ids = add_documents
res = update_document(HttpApiAuth, dataset_id, document_ids[0], payload)
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestUpdateDocumentParserConfig:
@pytest.mark.p2
@pytest.mark.parametrize(
"chunk_method, parser_config, expected_code, expected_message",
[
("naive", {}, 0, ""),
(
"naive",
DEFAULT_PARSER_CONFIG,
0,
"",
),
pytest.param(
"naive",
{"chunk_token_num": -1},
100,
"AssertionError('chunk_token_num should be in range from 1 to 100000000')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": 0},
100,
"AssertionError('chunk_token_num should be in range from 1 to 100000000')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": 100000000},
100,
"AssertionError('chunk_token_num should be in range from 1 to 100000000')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": 3.14},
102,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": "1024"},
100,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
(
"naive",
{"layout_recognize": "DeepDOC"},
0,
"",
),
(
"naive",
{"layout_recognize": "Naive"},
0,
"",
),
("naive", {"html4excel": True}, 0, ""),
("naive", {"html4excel": False}, 0, ""),
pytest.param(
"naive",
{"html4excel": 1},
100,
"AssertionError('html4excel should be True or False')",
marks=pytest.mark.skip(reason="issues/6098"),
),
("naive", {"delimiter": ""}, 0, ""),
("naive", {"delimiter": "`##`"}, 0, ""),
pytest.param(
"naive",
{"delimiter": 1},
100,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": -1},
100,
"AssertionError('task_page_size should be in range from 1 to 100000000')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": 0},
100,
"AssertionError('task_page_size should be in range from 1 to 100000000')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": 100000000},
100,
"AssertionError('task_page_size should be in range from 1 to 100000000')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": 3.14},
100,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": "1024"},
100,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
("naive", {"raptor": {"use_raptor": {
"use_raptor": True,
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
"max_token": 256,
"threshold": 0.1,
"max_cluster": 64,
"random_seed": 0,
},}}, 0, ""),
("naive", {"raptor": {"use_raptor": False}}, 0, ""),
pytest.param(
"naive",
{"invalid_key": "invalid_value"},
100,
"""AssertionError("Abnormal \'parser_config\'. Invalid key: invalid_key")""",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": -1},
100,
"AssertionError('auto_keywords should be in range from 0 to 32')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": 32},
100,
"AssertionError('auto_keywords should be in range from 0 to 32')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": 3.14},
100,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": "1024"},
100,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": -1},
100,
"AssertionError('auto_questions should be in range from 0 to 10')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": 10},
100,
"AssertionError('auto_questions should be in range from 0 to 10')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": 3.14},
100,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": "1024"},
100,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": -1},
100,
"AssertionError('topn_tags should be in range from 0 to 10')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": 10},
100,
"AssertionError('topn_tags should be in range from 0 to 10')",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": 3.14},
100,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": "1024"},
100,
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
],
)
def test_parser_config(
self,
HttpApiAuth,
add_documents,
chunk_method,
parser_config,
expected_code,
expected_message,
):
dataset_id, document_ids = add_documents
res = update_document(
HttpApiAuth,
dataset_id,
document_ids[0],
{"chunk_method": chunk_method, "parser_config": parser_config},
)
assert res["code"] == expected_code
if expected_code == 0:
res = list_documents(HttpApiAuth, dataset_id, {"id": document_ids[0]})
if parser_config == {}:
assert res["data"]["docs"][0]["parser_config"] == DEFAULT_PARSER_CONFIG
else:
for k, v in parser_config.items():
assert res["data"]["docs"][0]["parser_config"][k] == v
if expected_code != 0 or expected_message:
assert res["message"] == expected_message
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_update_document.py",
"license": "Apache License 2.0",
"lines": 528,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_upload_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import string
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
import requests
from common import FILE_API_URL, list_datasets, upload_documents
from configs import DOCUMENT_NAME_LIMIT, HOST_ADDRESS, INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
from requests_toolbelt import MultipartEncoder
from utils.file_utils import create_txt_file
@pytest.mark.p1
@pytest.mark.usefixtures("clear_datasets")
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = upload_documents(invalid_auth, "dataset_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestDocumentsUpload:
@pytest.mark.p1
def test_valid_single_upload(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0
assert res["data"][0]["dataset_id"] == dataset_id
assert res["data"][0]["name"] == fp.name
@pytest.mark.p1
@pytest.mark.parametrize(
"generate_test_files",
[
"docx",
"excel",
"ppt",
"image",
"pdf",
"txt",
"md",
"json",
"eml",
"html",
],
indirect=True,
)
def test_file_type_validation(self, HttpApiAuth, add_dataset_func, generate_test_files, request):
dataset_id = add_dataset_func
fp = generate_test_files[request.node.callspec.params["generate_test_files"]]
res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0
assert res["data"][0]["dataset_id"] == dataset_id
assert res["data"][0]["name"] == fp.name
@pytest.mark.p3
@pytest.mark.parametrize(
"file_type",
["exe", "unknown"],
)
def test_unsupported_file_type(self, HttpApiAuth, add_dataset_func, tmp_path, file_type):
dataset_id = add_dataset_func
fp = tmp_path / f"ragflow_test.{file_type}"
fp.touch()
res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 500
assert res["message"] == f"ragflow_test.{file_type}: This type of file has not been supported yet!"
@pytest.mark.p2
def test_missing_file(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func
res = upload_documents(HttpApiAuth, dataset_id)
assert res["code"] == 101
assert res["message"] == "No file part!"
@pytest.mark.p3
def test_empty_file(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
fp = tmp_path / "empty.txt"
fp.touch()
res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0
assert res["data"][0]["size"] == 0
@pytest.mark.p3
def test_filename_empty(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
with fp.open("rb") as file_obj:
fields = (("file", ("", file_obj)),)
m = MultipartEncoder(fields=fields)
res = requests.post(
url=url,
headers={"Content-Type": m.content_type},
auth=HttpApiAuth,
data=m,
)
assert res.json()["code"] == 101
assert res.json()["message"] == "No file selected!"
@pytest.mark.p2
def test_filename_max_length(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
fp = create_txt_file(tmp_path / f"{'a' * (DOCUMENT_NAME_LIMIT - 4)}.txt")
res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0
assert res["data"][0]["name"] == fp.name
@pytest.mark.p2
def test_invalid_dataset_id(self, HttpApiAuth, tmp_path):
fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(HttpApiAuth, "invalid_dataset_id", [fp])
assert res["code"] == 100
assert res["message"] == """LookupError("Can\'t find the dataset with ID invalid_dataset_id!")"""
@pytest.mark.p2
def test_duplicate_files(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(HttpApiAuth, dataset_id, [fp, fp])
assert res["code"] == 0
assert len(res["data"]) == 2
for i in range(len(res["data"])):
assert res["data"][i]["dataset_id"] == dataset_id
expected_name = fp.name
if i != 0:
expected_name = f"{fp.stem}({i}){fp.suffix}"
assert res["data"][i]["name"] == expected_name
@pytest.mark.p2
def test_same_file_repeat(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
for i in range(3):
res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0
assert len(res["data"]) == 1
assert res["data"][0]["dataset_id"] == dataset_id
expected_name = fp.name
if i != 0:
expected_name = f"{fp.stem}({i}){fp.suffix}"
assert res["data"][0]["name"] == expected_name
@pytest.mark.p3
def test_filename_special_characters(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
illegal_chars = '<>:"/\\|?*'
translation_table = str.maketrans({char: "_" for char in illegal_chars})
safe_filename = string.punctuation.translate(translation_table)
fp = tmp_path / f"{safe_filename}.txt"
fp.write_text("Sample text content")
res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0
assert len(res["data"]) == 1
assert res["data"][0]["dataset_id"] == dataset_id
assert res["data"][0]["name"] == fp.name
@pytest.mark.p1
def test_multiple_files(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
expected_document_count = 20
fps = []
for i in range(expected_document_count):
fp = create_txt_file(tmp_path / f"ragflow_test_{i}.txt")
fps.append(fp)
res = upload_documents(HttpApiAuth, dataset_id, fps)
assert res["code"] == 0
res = list_datasets(HttpApiAuth, {"id": dataset_id})
assert res["data"][0]["document_count"] == expected_document_count
@pytest.mark.p3
def test_concurrent_upload(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
count = 20
fps = []
for i in range(count):
fp = create_txt_file(tmp_path / f"ragflow_test_{i}.txt")
fps.append(fp)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(upload_documents, HttpApiAuth, dataset_id, [fp]) for fp in fps]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
res = list_datasets(HttpApiAuth, {"id": dataset_id})
assert res["data"][0]["document_count"] == count
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_upload_documents.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_create_session_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import create_session_with_chat_assistant, delete_chat_assistants, list_session_with_chat_assistants
from configs import INVALID_API_TOKEN, SESSION_WITH_CHAT_NAME_LIMIT
from libs.auth import RAGFlowHttpApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = create_session_with_chat_assistant(invalid_auth, "chat_assistant_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.usefixtures("clear_session_with_chat_assistants")
class TestSessionWithChatAssistantCreate:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"name": "valid_name"}, 0, ""),
pytest.param({"name": "a" * (SESSION_WITH_CHAT_NAME_LIMIT + 1)}, 102, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": 1}, 100, "", marks=pytest.mark.skip(reason="issues/")),
({"name": ""}, 102, "`name` can not be empty."),
({"name": "duplicated_name"}, 0, ""),
({"name": "case insensitive"}, 0, ""),
],
)
def test_name(self, HttpApiAuth, add_chat_assistants, payload, expected_code, expected_message):
_, _, chat_assistant_ids = add_chat_assistants
if payload["name"] == "duplicated_name":
create_session_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
elif payload["name"] == "case insensitive":
create_session_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], {"name": payload["name"].upper()})
res = create_session_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert res["data"]["name"] == payload["name"]
assert res["data"]["chat_id"] == chat_assistant_ids[0]
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"chat_assistant_id, expected_code, expected_message",
[
("", 100, "<MethodNotAllowed '405: Method Not Allowed'>"),
("invalid_chat_assistant_id", 102, "You do not own the assistant."),
],
)
def test_invalid_chat_assistant_id(self, HttpApiAuth, chat_assistant_id, expected_code, expected_message):
res = create_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, {"name": "valid_name"})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
def test_concurrent_create_session(self, HttpApiAuth, add_chat_assistants):
count = 1000
_, _, chat_assistant_ids = add_chat_assistants
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_ids[0])
if res["code"] != 0:
assert False, res
sessions_count = len(res["data"])
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
create_session_with_chat_assistant,
HttpApiAuth,
chat_assistant_ids[0],
{"name": f"session with chat assistant test {i}"},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_ids[0], {"page_size": count * 2})
if res["code"] != 0:
assert False, res
assert len(res["data"]) == sessions_count + count
@pytest.mark.p3
def test_add_session_to_deleted_chat_assistant(self, HttpApiAuth, add_chat_assistants):
_, _, chat_assistant_ids = add_chat_assistants
res = delete_chat_assistants(HttpApiAuth, {"ids": [chat_assistant_ids[0]]})
assert res["code"] == 0
res = create_session_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], {"name": "valid_name"})
assert res["code"] == 102
assert res["message"] == "You do not own the assistant."
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_session_management/test_create_session_with_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_delete_sessions_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_sessions_with_chat_assistant, delete_session_with_chat_assistants, list_session_with_chat_assistants
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = delete_session_with_chat_assistants(invalid_auth, "chat_assistant_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestSessionWithChatAssistantDelete:
@pytest.mark.p3
@pytest.mark.parametrize(
"chat_assistant_id, expected_code, expected_message",
[
("", 100, "<MethodNotAllowed '405: Method Not Allowed'>"),
(
"invalid_chat_assistant_id",
102,
"You don't own the chat",
),
],
)
def test_invalid_chat_assistant_id(self, HttpApiAuth, add_sessions_with_chat_assistant_func, chat_assistant_id, expected_code, expected_message):
_, session_ids = add_sessions_with_chat_assistant_func
res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, {"ids": session_ids})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:5]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, HttpApiAuth, add_sessions_with_chat_assistant_func, payload):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
if callable(payload):
payload = payload(session_ids)
res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, payload)
assert res["code"] == 0
assert res["data"]["errors"][0] == "The chat doesn't own the session invalid_id"
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
if res["code"] != 0:
assert False, res
assert len(res["data"]) == 0
@pytest.mark.p3
def test_repeated_deletion(self, HttpApiAuth, add_sessions_with_chat_assistant_func):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
payload = {"ids": session_ids}
res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, payload)
assert res["code"] == 0
res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, payload)
assert res["code"] == 102
assert "The chat doesn't own the session" in res["message"]
@pytest.mark.p3
def test_duplicate_deletion(self, HttpApiAuth, add_sessions_with_chat_assistant_func):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, {"ids": session_ids * 2})
assert res["code"] == 0
assert "Duplicate session ids" in res["data"]["errors"][0]
assert res["data"]["success_count"] == 5
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
if res["code"] != 0:
assert False, res
assert len(res["data"]) == 0
@pytest.mark.p3
def test_concurrent_deletion(self, HttpApiAuth, add_chat_assistants):
count = 100
_, _, chat_assistant_ids = add_chat_assistants
session_ids = batch_add_sessions_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], count)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
delete_session_with_chat_assistants,
HttpApiAuth,
chat_assistant_ids[0],
{"ids": session_ids[i : i + 1]},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_delete_1k(self, HttpApiAuth, add_chat_assistants):
sessions_num = 1_000
_, _, chat_assistant_ids = add_chat_assistants
session_ids = batch_add_sessions_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], sessions_num)
res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_ids[0], {"ids": session_ids})
assert res["code"] == 0
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_ids[0])
if res["code"] != 0:
assert False, res
assert len(res["data"]) == 0
@pytest.mark.parametrize(
"payload, expected_code, expected_message, remaining",
[
pytest.param(None, 0, """TypeError("argument of type \'NoneType\' is not iterable")""", 0, marks=pytest.mark.skip),
pytest.param({"ids": ["invalid_id"]}, 102, "The chat doesn't own the session invalid_id", 5, marks=pytest.mark.p3),
pytest.param("not json", 100, """AttributeError("\'str\' object has no attribute \'get\'")""", 5, marks=pytest.mark.skip),
pytest.param(lambda r: {"ids": r[:1]}, 0, "", 4, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r}, 0, "", 0, marks=pytest.mark.p1),
pytest.param({"ids": []}, 0, "", 0, marks=pytest.mark.p3),
],
)
def test_basic_scenarios(
self,
HttpApiAuth,
add_sessions_with_chat_assistant_func,
payload,
expected_code,
expected_message,
remaining,
):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
if callable(payload):
payload = payload(session_ids)
res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, payload)
assert res["code"] == expected_code
if res["code"] != 0:
assert res["message"] == expected_message
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
if res["code"] != 0:
assert False, res
assert len(res["data"]) == remaining
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_session_management/test_delete_sessions_with_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_list_sessions_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import delete_chat_assistants, list_session_with_chat_assistants
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
from utils import is_sorted
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = list_session_with_chat_assistants(invalid_auth, "chat_assistant_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestSessionsWithChatAssistantList:
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 0, 2, ""),
pytest.param({"page": 0, "page_size": 2}, 100, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
({"page": 2, "page_size": 2}, 0, 2, ""),
({"page": 3, "page_size": 2}, 0, 1, ""),
({"page": "3", "page_size": 2}, 0, 1, ""),
pytest.param({"page": -1, "page_size": 2}, 100, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
pytest.param({"page": "a", "page_size": 2}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page(self, HttpApiAuth, add_sessions_with_chat_assistant, params, expected_code, expected_page_size, expected_message):
chat_assistant_id, _ = add_sessions_with_chat_assistant
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"page_size": None}, 0, 5, ""),
({"page_size": 0}, 0, 0, ""),
({"page_size": 1}, 0, 1, ""),
({"page_size": 6}, 0, 5, ""),
({"page_size": "1"}, 0, 1, ""),
pytest.param({"page_size": -1}, 0, 5, "", marks=pytest.mark.skip),
pytest.param({"page_size": "a"}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page_size(self, HttpApiAuth, add_sessions_with_chat_assistant, params, expected_code, expected_page_size, expected_message):
chat_assistant_id, _ = add_sessions_with_chat_assistant
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]) == expected_page_size
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_code, assertions, expected_message",
[
({"orderby": None}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"orderby": "create_time"}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"orderby": "update_time"}, 0, lambda r: (is_sorted(r["data"], "update_time", True)), ""),
({"orderby": "name", "desc": "False"}, 0, lambda r: (is_sorted(r["data"], "name", False)), ""),
pytest.param({"orderby": "unknown"}, 102, 0, "orderby should be create_time or update_time", marks=pytest.mark.skip(reason="issues/")),
],
)
def test_orderby(
self,
HttpApiAuth,
add_sessions_with_chat_assistant,
params,
expected_code,
assertions,
expected_message,
):
chat_assistant_id, _ = add_sessions_with_chat_assistant
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if callable(assertions):
assert assertions(res)
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_code, assertions, expected_message",
[
({"desc": None}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"desc": "true"}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"desc": "True"}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"desc": True}, 0, lambda r: (is_sorted(r["data"], "create_time", True)), ""),
({"desc": "false"}, 0, lambda r: (is_sorted(r["data"], "create_time", False)), ""),
({"desc": "False"}, 0, lambda r: (is_sorted(r["data"], "create_time", False)), ""),
({"desc": False}, 0, lambda r: (is_sorted(r["data"], "create_time", False)), ""),
({"desc": "False", "orderby": "update_time"}, 0, lambda r: (is_sorted(r["data"], "update_time", False)), ""),
pytest.param({"desc": "unknown"}, 102, 0, "desc should be true or false", marks=pytest.mark.skip(reason="issues/")),
],
)
def test_desc(
self,
HttpApiAuth,
add_sessions_with_chat_assistant,
params,
expected_code,
assertions,
expected_message,
):
chat_assistant_id, _ = add_sessions_with_chat_assistant
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if callable(assertions):
assert assertions(res)
else:
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_num, expected_message",
[
({"name": None}, 0, 5, ""),
({"name": ""}, 0, 5, ""),
({"name": "session_with_chat_assistant_1"}, 0, 1, ""),
({"name": "unknown"}, 0, 0, ""),
],
)
def test_name(self, HttpApiAuth, add_sessions_with_chat_assistant, params, expected_code, expected_num, expected_message):
chat_assistant_id, _ = add_sessions_with_chat_assistant
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if params["name"] == "session_with_chat_assistant_1":
assert res["data"][0]["name"] == params["name"]
else:
assert len(res["data"]) == expected_num
else:
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"session_id, expected_code, expected_num, expected_message",
[
(None, 0, 5, ""),
("", 0, 5, ""),
(lambda r: r[0], 0, 1, ""),
("unknown", 0, 0, "The chat doesn't exist"),
],
)
def test_id(self, HttpApiAuth, add_sessions_with_chat_assistant, session_id, expected_code, expected_num, expected_message):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant
if callable(session_id):
params = {"id": session_id(session_ids)}
else:
params = {"id": session_id}
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
if params["id"] == session_ids[0]:
assert res["data"][0]["id"] == params["id"]
else:
assert len(res["data"]) == expected_num
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"session_id, name, expected_code, expected_num, expected_message",
[
(lambda r: r[0], "session_with_chat_assistant_0", 0, 1, ""),
(lambda r: r[0], "session_with_chat_assistant_100", 0, 0, ""),
(lambda r: r[0], "unknown", 0, 0, ""),
("id", "session_with_chat_assistant_0", 0, 0, ""),
],
)
def test_name_and_id(self, HttpApiAuth, add_sessions_with_chat_assistant, session_id, name, expected_code, expected_num, expected_message):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant
if callable(session_id):
params = {"id": session_id(session_ids), "name": name}
else:
params = {"id": session_id, "name": name}
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code
if expected_code == 0:
assert len(res["data"]) == expected_num
else:
assert res["message"] == expected_message
@pytest.mark.p3
def test_concurrent_list(self, HttpApiAuth, add_sessions_with_chat_assistant):
count = 100
chat_assistant_id, _ = add_sessions_with_chat_assistant
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_session_with_chat_assistants, HttpApiAuth, chat_assistant_id) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_invalid_params(self, HttpApiAuth, add_sessions_with_chat_assistant):
chat_assistant_id, _ = add_sessions_with_chat_assistant
params = {"a": "b"}
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == 0
assert len(res["data"]) == 5
@pytest.mark.p3
def test_list_chats_after_deleting_associated_chat_assistant(self, HttpApiAuth, add_sessions_with_chat_assistant):
chat_assistant_id, _ = add_sessions_with_chat_assistant
res = delete_chat_assistants(HttpApiAuth, {"ids": [chat_assistant_id]})
assert res["code"] == 0
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
assert res["code"] == 102
assert "You don't own the assistant" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_session_management/test_list_sessions_with_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_update_session_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
from random import randint
import pytest
from common import delete_chat_assistants, list_session_with_chat_assistants, update_session_with_chat_assistant
from configs import INVALID_API_TOKEN, INVALID_ID_32, SESSION_WITH_CHAT_NAME_LIMIT
from libs.auth import RAGFlowHttpApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "`Authorization` can't be empty"),
(
RAGFlowHttpApiAuth(INVALID_API_TOKEN),
109,
"Authentication error: API key is invalid!",
),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = update_session_with_chat_assistant(invalid_auth, "chat_assistant_id", "session_id")
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestSessionWithChatAssistantUpdate:
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
pytest.param({"name": "valid_name"}, 0, "", marks=pytest.mark.p1),
pytest.param({"name": "a" * (SESSION_WITH_CHAT_NAME_LIMIT + 1)}, 102, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": 1}, 100, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": ""}, 102, "`name` can not be empty.", marks=pytest.mark.p3),
pytest.param({"name": "duplicated_name"}, 0, "", marks=pytest.mark.p3),
pytest.param({"name": "case insensitive"}, 0, "", marks=pytest.mark.p3),
],
)
def test_name(self, HttpApiAuth, add_sessions_with_chat_assistant_func, payload, expected_code, expected_message):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
if payload["name"] == "duplicated_name":
update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], payload)
elif payload["name"] == "case insensitive":
update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], {"name": payload["name"].upper()})
res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], payload)
assert res["code"] == expected_code, res
if expected_code == 0:
res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, {"id": session_ids[0]})
assert res["data"][0]["name"] == payload["name"]
else:
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"chat_assistant_id, expected_code, expected_message",
[
(INVALID_ID_32, 102, "Session does not exist"),
],
)
def test_invalid_chat_assistant_id(self, HttpApiAuth, add_sessions_with_chat_assistant_func, chat_assistant_id, expected_code, expected_message):
_, session_ids = add_sessions_with_chat_assistant_func
res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], {"name": "valid_name"})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
@pytest.mark.parametrize(
"session_id, expected_code, expected_message",
[
("", 100, "<MethodNotAllowed '405: Method Not Allowed'>"),
("invalid_session_id", 102, "Session does not exist"),
],
)
def test_invalid_session_id(self, HttpApiAuth, add_sessions_with_chat_assistant_func, session_id, expected_code, expected_message):
chat_assistant_id, _ = add_sessions_with_chat_assistant_func
res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_id, {"name": "valid_name"})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p3
def test_repeated_update_session(self, HttpApiAuth, add_sessions_with_chat_assistant_func):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], {"name": "valid_name_1"})
assert res["code"] == 0
res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], {"name": "valid_name_2"})
assert res["code"] == 0
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
pytest.param({"unknown_key": "unknown_value"}, 100, "ValueError", marks=pytest.mark.skip),
({}, 0, ""),
pytest.param(None, 100, "TypeError", marks=pytest.mark.skip),
],
)
def test_invalid_params(self, HttpApiAuth, add_sessions_with_chat_assistant_func, payload, expected_code, expected_message):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], payload)
assert res["code"] == expected_code
if expected_code != 0:
assert expected_message in res["message"]
@pytest.mark.p3
def test_concurrent_update_session(self, HttpApiAuth, add_sessions_with_chat_assistant_func):
count = 50
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
update_session_with_chat_assistant,
HttpApiAuth,
chat_assistant_id,
session_ids[randint(0, 4)],
{"name": f"update session test {i}"},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_update_session_to_deleted_chat_assistant(self, HttpApiAuth, add_sessions_with_chat_assistant_func):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
delete_chat_assistants(HttpApiAuth, {"ids": [chat_assistant_id]})
res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], {"name": "valid_name"})
assert res["code"] == 102
assert res["message"] == "You do not own the session"
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_session_management/test_update_session_with_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:api/apps/plugin_app.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from quart import Response
from api.apps import login_required
from api.utils.api_utils import get_json_result
from agent.plugin import GlobalPluginManager
@manager.route('/llm_tools', methods=['GET']) # noqa: F821
@login_required
def llm_tools() -> Response:
tools = GlobalPluginManager.get_llm_tools()
tools_metadata = [t.get_metadata() for t in tools]
return get_json_result(data=tools_metadata)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/apps/plugin_app.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:api/apps/auth/github.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from common.http_client import async_request, sync_request
from .oauth import OAuthClient, UserInfo
class GithubOAuthClient(OAuthClient):
def __init__(self, config):
"""
Initialize the GithubOAuthClient with the provider's configuration.
"""
config.update({
"authorization_url": "https://github.com/login/oauth/authorize",
"token_url": "https://github.com/login/oauth/access_token",
"userinfo_url": "https://api.github.com/user",
"scope": "user:email"
})
super().__init__(config)
def fetch_user_info(self, access_token, **kwargs):
"""
Fetch GitHub user info (synchronous).
"""
user_info = {}
try:
headers = {"Authorization": f"Bearer {access_token}"}
response = sync_request("GET", self.userinfo_url, headers=headers, timeout=self.http_request_timeout)
response.raise_for_status()
user_info.update(response.json())
email_response = sync_request(
"GET", self.userinfo_url + "/emails", headers=headers, timeout=self.http_request_timeout
)
email_response.raise_for_status()
email_info = email_response.json()
user_info["email"] = next((email for email in email_info if email["primary"]), None)["email"]
return self.normalize_user_info(user_info)
except Exception as e:
raise ValueError(f"Failed to fetch github user info: {e}")
async def async_fetch_user_info(self, access_token, **kwargs):
"""Async variant of fetch_user_info using httpx."""
user_info = {}
headers = {"Authorization": f"Bearer {access_token}"}
try:
response = await async_request(
"GET",
self.userinfo_url,
headers=headers,
timeout=self.http_request_timeout,
)
response.raise_for_status()
user_info.update(response.json())
email_response = await async_request(
"GET",
self.userinfo_url + "/emails",
headers=headers,
timeout=self.http_request_timeout,
)
email_response.raise_for_status()
email_info = email_response.json()
user_info["email"] = next((email for email in email_info if email["primary"]), None)["email"]
return self.normalize_user_info(user_info)
except Exception as e:
raise ValueError(f"Failed to fetch github user info: {e}")
def normalize_user_info(self, user_info):
email = user_info.get("email")
username = user_info.get("login", str(email).split("@")[0])
nickname = user_info.get("name", username)
avatar_url = user_info.get("avatar_url", "")
return UserInfo(email=email, username=username, nickname=nickname, avatar_url=avatar_url)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/apps/auth/github.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/utils/validation_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import Counter
import string
from typing import Annotated, Any, Literal
from uuid import UUID
from quart import Request
from pydantic import (
BaseModel,
ConfigDict,
Field,
StringConstraints,
ValidationError,
field_validator,
model_validator,
)
from pydantic_core import PydanticCustomError
from werkzeug.exceptions import BadRequest, UnsupportedMediaType
from api.constants import DATASET_NAME_LIMIT
async def validate_and_parse_json_request(request: Request, validator: type[BaseModel], *, extras: dict[str, Any] | None = None, exclude_unset: bool = False) -> tuple[dict[str, Any] | None, str | None]:
"""
Validates and parses JSON requests through a multi-stage validation pipeline.
Implements a four-stage validation process:
1. Content-Type verification (must be application/json)
2. JSON syntax validation
3. Payload structure type checking
4. Pydantic model validation with error formatting
Args:
request (Request): Flask request object containing HTTP payload
validator (type[BaseModel]): Pydantic model class for data validation
extras (dict[str, Any] | None): Additional fields to merge into payload
before validation. These fields will be removed from the final output
exclude_unset (bool): Whether to exclude fields that have not been explicitly set
Returns:
tuple[Dict[str, Any] | None, str | None]:
- First element:
- Validated dictionary on success
- None on validation failure
- Second element:
- None on success
- Diagnostic error message on failure
Raises:
UnsupportedMediaType: When Content-Type header is not application/json
BadRequest: For structural JSON syntax errors
ValidationError: When payload violates Pydantic schema rules
Examples:
>>> validate_and_parse_json_request(valid_request, DatasetSchema)
({"name": "Dataset1", "format": "csv"}, None)
>>> validate_and_parse_json_request(xml_request, DatasetSchema)
(None, "Unsupported content type: Expected application/json, got text/xml")
>>> validate_and_parse_json_request(bad_json_request, DatasetSchema)
(None, "Malformed JSON syntax: Missing commas/brackets or invalid encoding")
Notes:
1. Validation Priority:
- Content-Type verification precedes JSON parsing
- Structural validation occurs before schema validation
2. Extra fields added via `extras` parameter are automatically removed
from the final output after validation
"""
if request.mimetype != "application/json":
return None, f"Unsupported content type: Expected application/json, got {request.content_type}"
try:
payload = await request.get_json() or {}
except UnsupportedMediaType:
return None, f"Unsupported content type: Expected application/json, got {request.content_type}"
except BadRequest:
return None, "Malformed JSON syntax: Missing commas/brackets or invalid encoding"
if not isinstance(payload, dict):
return None, f"Invalid request payload: expected object, got {type(payload).__name__}"
try:
if extras is not None:
payload.update(extras)
validated_request = validator(**payload)
except ValidationError as e:
return None, format_validation_error_message(e)
parsed_payload = validated_request.model_dump(by_alias=True, exclude_unset=exclude_unset)
if extras is not None:
for key in list(parsed_payload.keys()):
if key in extras:
del parsed_payload[key]
return parsed_payload, None
def validate_and_parse_request_args(request: Request, validator: type[BaseModel], *, extras: dict[str, Any] | None = None) -> tuple[dict[str, Any] | None, str | None]:
"""
Validates and parses request arguments against a Pydantic model.
This function performs a complete request validation workflow:
1. Extracts query parameters from the request
2. Merges with optional extra values (if provided)
3. Validates against the specified Pydantic model
4. Cleans the output by removing extra values
5. Returns either parsed data or an error message
Args:
request (Request): Web framework request object containing query parameters
validator (type[BaseModel]): Pydantic model class for validation
extras (dict[str, Any] | None): Optional additional values to include in validation
but exclude from final output. Defaults to None.
Returns:
tuple[dict[str, Any] | None, str | None]:
- First element: Validated/parsed arguments as dict if successful, None otherwise
- Second element: Formatted error message if validation failed, None otherwise
Behavior:
- Query parameters are merged with extras before validation
- Extras are automatically removed from the final output
- All validation errors are formatted into a human-readable string
Raises:
TypeError: If validator is not a Pydantic BaseModel subclass
Examples:
Successful validation:
>>> validate_and_parse_request_args(request, MyValidator)
({'param1': 'value'}, None)
Failed validation:
>>> validate_and_parse_request_args(request, MyValidator)
(None, "param1: Field required")
With extras:
>>> validate_and_parse_request_args(request, MyValidator, extras={'internal_id': 123})
({'param1': 'value'}, None) # internal_id removed from output
Notes:
- Uses request.args.to_dict() for Flask-compatible parameter extraction
- Maintains immutability of original request arguments
- Preserves type conversion from Pydantic validation
"""
args = request.args.to_dict(flat=True)
try:
if extras is not None:
args.update(extras)
validated_args = validator(**args)
except ValidationError as e:
return None, format_validation_error_message(e)
parsed_args = validated_args.model_dump()
if extras is not None:
for key in list(parsed_args.keys()):
if key in extras:
del parsed_args[key]
return parsed_args, None
def format_validation_error_message(e: ValidationError) -> str:
"""
Formats validation errors into a standardized string format.
Processes pydantic ValidationError objects to create human-readable error messages
containing field locations, error descriptions, and input values.
Args:
e (ValidationError): The validation error instance containing error details
Returns:
str: Formatted error messages joined by newlines. Each line contains:
- Field path (dot-separated)
- Error message
- Truncated input value (max 128 chars)
Example:
>>> try:
... UserModel(name=123, email="invalid")
... except ValidationError as e:
... print(format_validation_error_message(e))
Field: <name> - Message: <Input should be a valid string> - Value: <123>
Field: <email> - Message: <value is not a valid email address> - Value: <invalid>
"""
error_messages = []
for error in e.errors():
field = ".".join(map(str, error["loc"]))
msg = error["msg"]
input_val = error["input"]
input_str = str(input_val)
if len(input_str) > 128:
input_str = input_str[:125] + "..."
error_msg = f"Field: <{field}> - Message: <{msg}> - Value: <{input_str}>"
error_messages.append(error_msg)
return "\n".join(error_messages)
def normalize_str(v: Any) -> Any:
"""
Normalizes string values to a standard format while preserving non-string inputs.
Performs the following transformations when input is a string:
1. Trims leading/trailing whitespace (str.strip())
2. Converts to lowercase (str.lower())
Non-string inputs are returned unchanged, making this function safe for mixed-type
processing pipelines.
Args:
v (Any): Input value to normalize. Accepts any Python object.
Returns:
Any: Normalized string if input was string-type, original value otherwise.
Behavior Examples:
String Input: " Admin " → "admin"
Empty String: " " → "" (empty string)
Non-String:
- 123 → 123
- None → None
- ["User"] → ["User"]
Typical Use Cases:
- Standardizing user input
- Preparing data for case-insensitive comparison
- Cleaning API parameters
- Normalizing configuration values
Edge Cases:
- Unicode whitespace is handled by str.strip()
- Locale-independent lowercasing (str.lower())
- Preserves falsy values (0, False, etc.)
Example:
>>> normalize_str(" ReadOnly ")
'readonly'
>>> normalize_str(42)
42
"""
if isinstance(v, str):
stripped = v.strip()
normalized = stripped.lower()
return normalized
return v
def validate_uuid1_hex(v: Any) -> str:
"""
Validates and converts input to a UUID version 1 hexadecimal string.
This function performs strict validation and normalization:
1. Accepts either UUID objects or UUID-formatted strings
2. Verifies the UUID is version 1 (time-based)
3. Returns the 32-character hexadecimal representation
Args:
v (Any): Input value to validate. Can be:
- UUID object (must be version 1)
- String in UUID format (e.g. "550e8400-e29b-41d4-a716-446655440000")
Returns:
str: 32-character lowercase hexadecimal string without hyphens
Example: "550e8400e29b41d4a716446655440000"
Raises:
PydanticCustomError: With code "invalid_UUID1_format" when:
- Input is not a UUID object or valid UUID string
- UUID version is not 1
- String doesn't match UUID format
Examples:
Valid cases:
>>> validate_uuid1_hex("550e8400-e29b-41d4-a716-446655440000")
'550e8400e29b41d4a716446655440000'
>>> validate_uuid1_hex(UUID('550e8400-e29b-41d4-a716-446655440000'))
'550e8400e29b41d4a716446655440000'
Invalid cases:
>>> validate_uuid1_hex("not-a-uuid") # raises PydanticCustomError
>>> validate_uuid1_hex(12345) # raises PydanticCustomError
>>> validate_uuid1_hex(UUID(int=0)) # v4, raises PydanticCustomError
Notes:
- Uses Python's built-in UUID parser for format validation
- Version check prevents accidental use of other UUID versions
- Hyphens in input strings are automatically removed in output
"""
try:
uuid_obj = UUID(v) if isinstance(v, str) else v
if uuid_obj.version != 1:
raise PydanticCustomError("invalid_UUID1_format", "Must be a UUID1 format")
return uuid_obj.hex
except (AttributeError, ValueError, TypeError):
raise PydanticCustomError("invalid_UUID1_format", "Invalid UUID1 format")
class Base(BaseModel):
model_config = ConfigDict(extra="forbid", strict=True)
class RaptorConfig(Base):
use_raptor: Annotated[bool, Field(default=False)]
prompt: Annotated[
str,
StringConstraints(strip_whitespace=True, min_length=1),
Field(
default="Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize."
),
]
max_token: Annotated[int, Field(default=256, ge=1, le=2048)]
threshold: Annotated[float, Field(default=0.1, ge=0.0, le=1.0)]
max_cluster: Annotated[int, Field(default=64, ge=1, le=1024)]
random_seed: Annotated[int, Field(default=0, ge=0)]
auto_disable_for_structured_data: Annotated[bool, Field(default=True)]
class GraphragConfig(Base):
use_graphrag: Annotated[bool, Field(default=False)]
entity_types: Annotated[list[str], Field(default_factory=lambda: ["organization", "person", "geo", "event", "category"])]
method: Annotated[Literal["light", "general"], Field(default="light")]
community: Annotated[bool, Field(default=False)]
resolution: Annotated[bool, Field(default=False)]
class AutoMetadataField(Base):
"""Schema for a single auto-metadata field configuration."""
name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=255), Field(...)]
type: Annotated[Literal["string", "list", "time"], Field(...)]
description: Annotated[str | None, Field(default=None, max_length=65535)]
examples: Annotated[list[str] | None, Field(default=None)]
restrict_values: Annotated[bool, Field(default=False)]
class AutoMetadataConfig(Base):
"""Top-level auto-metadata configuration attached to a dataset."""
enabled: Annotated[bool, Field(default=True)]
fields: Annotated[list[AutoMetadataField], Field(default_factory=list)]
class ParserConfig(Base):
auto_keywords: Annotated[int, Field(default=0, ge=0, le=32)]
auto_questions: Annotated[int, Field(default=0, ge=0, le=10)]
chunk_token_num: Annotated[int, Field(default=512, ge=1, le=2048)]
delimiter: Annotated[str, Field(default=r"\n", min_length=1)]
graphrag: Annotated[GraphragConfig, Field(default_factory=lambda: GraphragConfig(use_graphrag=False))]
html4excel: Annotated[bool, Field(default=False)]
layout_recognize: Annotated[str, Field(default="DeepDOC")]
raptor: Annotated[RaptorConfig, Field(default_factory=lambda: RaptorConfig(use_raptor=False))]
tag_kb_ids: Annotated[list[str], Field(default_factory=list)]
topn_tags: Annotated[int, Field(default=1, ge=1, le=10)]
filename_embd_weight: Annotated[float | None, Field(default=0.1, ge=0.0, le=1.0)]
task_page_size: Annotated[int | None, Field(default=None, ge=1)]
pages: Annotated[list[list[int]] | None, Field(default=None)]
class CreateDatasetReq(Base):
name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=DATASET_NAME_LIMIT), Field(...)]
avatar: Annotated[str | None, Field(default=None, max_length=65535)]
description: Annotated[str | None, Field(default=None, max_length=65535)]
embedding_model: Annotated[str | None, Field(default=None, max_length=255, serialization_alias="embd_id")]
permission: Annotated[Literal["me", "team"], Field(default="me", min_length=1, max_length=16)]
chunk_method: Annotated[str | None, Field(default=None, serialization_alias="parser_id")]
parse_type: Annotated[int | None, Field(default=None, ge=0, le=64)]
pipeline_id: Annotated[str | None, Field(default=None, min_length=32, max_length=32, serialization_alias="pipeline_id")]
parser_config: Annotated[ParserConfig | None, Field(default=None)]
auto_metadata_config: Annotated[AutoMetadataConfig | None, Field(default=None)]
@field_validator("avatar", mode="after")
@classmethod
def validate_avatar_base64(cls, v: str | None) -> str | None:
"""
Validates Base64-encoded avatar string format and MIME type compliance.
Implements a three-stage validation workflow:
1. MIME prefix existence check
2. MIME type format validation
3. Supported type verification
Args:
v (str): Raw avatar field value
Returns:
str: Validated Base64 string
Raises:
PydanticCustomError: For structural errors in these cases:
- Missing MIME prefix header
- Invalid MIME prefix format
- Unsupported image MIME type
Example:
```python
# Valid case
CreateDatasetReq(avatar="data:image/png;base64,iVBORw0KGg...")
# Invalid cases
CreateDatasetReq(avatar="image/jpeg;base64,...") # Missing 'data:' prefix
CreateDatasetReq(avatar="data:video/mp4;base64,...") # Unsupported MIME type
```
"""
if v is None:
return v
if "," in v:
prefix, _ = v.split(",", 1)
if not prefix.startswith("data:"):
raise PydanticCustomError("format_invalid", "Invalid MIME prefix format. Must start with 'data:'")
mime_type = prefix[5:].split(";")[0]
supported_mime_types = ["image/jpeg", "image/png"]
if mime_type not in supported_mime_types:
raise PydanticCustomError("format_invalid", "Unsupported MIME type. Allowed: {supported_mime_types}", {"supported_mime_types": supported_mime_types})
return v
else:
raise PydanticCustomError("format_invalid", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>")
@field_validator("embedding_model", mode="before")
@classmethod
def normalize_embedding_model(cls, v: Any) -> Any:
"""Normalize embedding model string by stripping whitespace"""
if isinstance(v, str):
return v.strip()
return v
@field_validator("embedding_model", mode="after")
@classmethod
def validate_embedding_model(cls, v: str | None) -> str | None:
"""
Validates embedding model identifier format compliance.
Validation pipeline:
1. Structural format verification
2. Component non-empty check
3. Value normalization
Args:
v (str): Raw model identifier
Returns:
str: Validated <model_name>@<provider> format
Raises:
PydanticCustomError: For these violations:
- Missing @ separator
- Empty model_name/provider
- Invalid component structure
Examples:
Valid: "text-embedding-3-large@openai"
Invalid: "invalid_model" (no @)
Invalid: "@openai" (empty model_name)
Invalid: "text-embedding-3-large@" (empty provider)
"""
if isinstance(v, str):
if "@" not in v:
raise PydanticCustomError("format_invalid", "Embedding model identifier must follow <model_name>@<provider> format")
components = v.split("@", 1)
if len(components) != 2 or not all(components):
raise PydanticCustomError("format_invalid", "Both model_name and provider must be non-empty strings")
model_name, provider = components
if not model_name.strip() or not provider.strip():
raise PydanticCustomError("format_invalid", "Model name and provider cannot be whitespace-only strings")
return v
# @field_validator("permission", mode="before")
# @classmethod
# def normalize_permission(cls, v: Any) -> Any:
# return normalize_str(v)
@field_validator("parser_config", mode="before")
@classmethod
def normalize_empty_parser_config(cls, v: Any) -> Any:
"""
Normalizes empty parser configuration by converting empty dictionaries to None.
This validator ensures consistent handling of empty parser configurations across
the application by converting empty dicts to None values.
Args:
v (Any): Raw input value for the parser config field
Returns:
Any: Returns None if input is an empty dict, otherwise returns the original value
Example:
>>> normalize_empty_parser_config({})
None
>>> normalize_empty_parser_config({"key": "value"})
{"key": "value"}
"""
if v == {}:
return None
return v
@field_validator("parser_config", mode="after")
@classmethod
def validate_parser_config_json_length(cls, v: ParserConfig | None) -> ParserConfig | None:
"""
Validates serialized JSON length constraints for parser configuration.
Implements a two-stage validation workflow:
1. Null check - bypass validation for empty configurations
2. Model serialization - convert Pydantic model to JSON string
3. Size verification - enforce maximum allowed payload size
Args:
v (ParserConfig | None): Raw parser configuration object
Returns:
ParserConfig | None: Validated configuration object
Raises:
PydanticCustomError: When serialized JSON exceeds 65,535 characters
"""
if v is None:
return None
if (json_str := v.model_dump_json()) and len(json_str) > 65535:
raise PydanticCustomError("string_too_long", "Parser config exceeds size limit (max 65,535 characters). Current size: {actual}", {"actual": len(json_str)})
return v
@field_validator("pipeline_id", mode="after")
@classmethod
def validate_pipeline_id(cls, v: str | None) -> str | None:
"""Validate pipeline_id as 32-char lowercase hex string if provided.
Rules:
- None or empty string: treat as None (not set)
- Must be exactly length 32
- Must contain only hex digits (0-9a-fA-F); normalized to lowercase
"""
if v is None:
return None
if v == "":
return None
if len(v) != 32:
raise PydanticCustomError("format_invalid", "pipeline_id must be 32 hex characters")
if any(ch not in string.hexdigits for ch in v):
raise PydanticCustomError("format_invalid", "pipeline_id must be hexadecimal")
return v.lower()
@model_validator(mode="after")
def validate_parser_dependency(self) -> "CreateDatasetReq":
"""
Mixed conditional validation:
- If parser_id is omitted (field not set):
* If both parse_type and pipeline_id are omitted → default chunk_method = "naive"
* If both parse_type and pipeline_id are provided → allow ingestion pipeline mode
- If parser_id is provided (valid enum) → parse_type and pipeline_id must be None (disallow mixed usage)
Raises:
PydanticCustomError with code 'dependency_error' on violation.
"""
# Omitted chunk_method (not in fields) logic
if self.chunk_method is None and "chunk_method" not in self.model_fields_set:
# All three absent → default naive
if self.parse_type is None and self.pipeline_id is None:
object.__setattr__(self, "chunk_method", "naive")
return self
# parser_id omitted: require BOTH parse_type & pipeline_id present (no partial allowed)
if self.parse_type is None or self.pipeline_id is None:
missing = []
if self.parse_type is None:
missing.append("parse_type")
if self.pipeline_id is None:
missing.append("pipeline_id")
raise PydanticCustomError(
"dependency_error",
"parser_id omitted → required fields missing: {fields}",
{"fields": ", ".join(missing)},
)
# Both provided → allow pipeline mode
return self
# parser_id provided (valid): MUST NOT have parse_type or pipeline_id
if isinstance(self.chunk_method, str):
if self.parse_type is not None or self.pipeline_id is not None:
invalid = []
if self.parse_type is not None:
invalid.append("parse_type")
if self.pipeline_id is not None:
invalid.append("pipeline_id")
raise PydanticCustomError(
"dependency_error",
"parser_id provided → disallowed fields present: {fields}",
{"fields": ", ".join(invalid)},
)
return self
@field_validator("chunk_method", mode="wrap")
@classmethod
def validate_chunk_method(cls, v: Any, handler) -> Any:
"""Wrap validation to unify error messages, including type errors (e.g. list)."""
allowed = {"naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"}
error_msg = "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'"
# Omitted field: handler won't be invoked (wrap still gets value); None treated as explicit invalid
if v is None:
raise PydanticCustomError("literal_error", error_msg)
try:
# Run inner validation (type checking)
result = handler(v)
except Exception:
raise PydanticCustomError("literal_error", error_msg)
# After handler, enforce enumeration
if not isinstance(result, str) or result == "" or result not in allowed:
raise PydanticCustomError("literal_error", error_msg)
return result
class UpdateDatasetReq(CreateDatasetReq):
dataset_id: Annotated[str, Field(...)]
name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=DATASET_NAME_LIMIT), Field(default="")]
pagerank: Annotated[int, Field(default=0, ge=0, le=100)]
@field_validator("dataset_id", mode="before")
@classmethod
def validate_dataset_id(cls, v: Any) -> str:
return validate_uuid1_hex(v)
class DeleteReq(Base):
ids: Annotated[list[str] | None, Field(...)]
@field_validator("ids", mode="after")
@classmethod
def validate_ids(cls, v_list: list[str] | None) -> list[str] | None:
"""
Validates and normalizes a list of UUID strings with None handling.
This post-processing validator performs:
1. None input handling (pass-through)
2. UUID version 1 validation for each list item
3. Duplicate value detection
4. Returns normalized UUID hex strings or None
Args:
v_list (list[str] | None): Input list that has passed initial validation.
Either a list of UUID strings or None.
Returns:
list[str] | None:
- None if input was None
- List of normalized UUID hex strings otherwise:
* 32-character lowercase
* Valid UUID version 1
* Unique within list
Raises:
PydanticCustomError: With structured error details when:
- "invalid_UUID1_format": Any string fails UUIDv1 validation
- "duplicate_uuids": If duplicate IDs are detected
Validation Rules:
- None input returns None
- Empty list returns empty list
- All non-None items must be valid UUIDv1
- No duplicates permitted
- Original order preserved
Examples:
Valid cases:
>>> validate_ids(None)
None
>>> validate_ids([])
[]
>>> validate_ids(["550e8400-e29b-41d4-a716-446655440000"])
["550e8400e29b41d4a716446655440000"]
Invalid cases:
>>> validate_ids(["invalid"])
# raises PydanticCustomError(invalid_UUID1_format)
>>> validate_ids(["550e...", "550e..."])
# raises PydanticCustomError(duplicate_uuids)
Security Notes:
- Validates UUID version to prevent version spoofing
- Duplicate check prevents data injection
- None handling maintains pipeline integrity
"""
if v_list is None:
return None
ids_list = []
for v in v_list:
try:
ids_list.append(validate_uuid1_hex(v))
except PydanticCustomError as e:
raise e
duplicates = [item for item, count in Counter(ids_list).items() if count > 1]
if duplicates:
duplicates_str = ", ".join(duplicates)
raise PydanticCustomError("duplicate_uuids", "Duplicate ids: '{duplicate_ids}'", {"duplicate_ids": duplicates_str})
return ids_list
class DeleteDatasetReq(DeleteReq): ...
class BaseListReq(BaseModel):
model_config = ConfigDict(extra="forbid")
id: Annotated[str | None, Field(default=None)]
name: Annotated[str | None, Field(default=None)]
page: Annotated[int, Field(default=1, ge=1)]
page_size: Annotated[int, Field(default=30, ge=1)]
orderby: Annotated[Literal["create_time", "update_time"], Field(default="create_time")]
desc: Annotated[bool, Field(default=True)]
@field_validator("id", mode="before")
@classmethod
def validate_id(cls, v: Any) -> str:
return validate_uuid1_hex(v)
class ListDatasetReq(BaseListReq): ...
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/utils/validation_utils.py",
"license": "Apache License 2.0",
"lines": 607,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
jax-ml/jax:jaxlib/mlir/_mlir_libs/normalize_stubs.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalizes generated .pyi stubs."""
import argparse
import re
def normalize(content: str, *, jaxlib_build: bool = False) -> str:
if jaxlib_build:
# If we are building jaxlib, normalize `mlir.ir` to `jaxlib.mlir.ir`.
content = re.sub(r"\bmlir\.ir", "jaxlib.mlir.ir", content)
# Replace internal module paths with public ones.
content = re.sub(r"mlir\._mlir_libs\._mlir\.ir", "mlir.ir", content)
# Rewrite `import mlir.ir` to `from mlir import ir`.
content = re.sub(
r"import (jaxlib\.)?mlir\.ir", r"from \1mlir import ir", content
)
# Deduplicate consecutive `from mlir import ir` lines.
content = re.sub(
r"(^\s*from (?:jaxlib\.)?mlir import ir\s*\n)"
r"(?:\s*from (?:jaxlib\.)?mlir import ir\s*\n)+",
r"\1",
content,
flags=re.MULTILINE,
)
# Shorten `mlir.ir.<NAME>` to `ir.<NAME>`.
content = re.sub(r"mlir\.ir\.([a-zA-Z0-9_]+)", r"ir.\1", content)
# Add `cls` to `@classmethod def get(*args, **kwargs)` generated by
# old-style MLIR attributes.
content = re.sub(
r"(@classmethod\n(\s+)def get\()\*args",
r"\1cls, *args",
content,
)
return content
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("files", nargs="+", metavar="FILE")
parser.add_argument(
"--jaxlib-build",
action="store_true",
help="Normalize `mlir.ir` to `jaxlib.mlir.ir`.",
)
args = parser.parse_args()
for path in args.files:
with open(path) as f:
content = f.read()
content = normalize(content, jaxlib_build=args.jaxlib_build)
with open(path, "w") as f:
f.write(content)
if __name__ == "__main__":
main()
| {
"repo_id": "jax-ml/jax",
"file_path": "jaxlib/mlir/_mlir_libs/normalize_stubs.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jaxlib/mlir/_mlir_libs/stubgen_runner.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates .pyi stubs for nanobind extensions using nanobind's stubgen."""
import argparse
import importlib.util
import sys
from python.runfiles import Runfiles
# Recreate the MLIR namespace, because dialects might reference it.
from jaxlib.mlir._mlir_libs import _mlir
sys.modules["mlir"] = _mlir
sys.modules["mlir.ir"] = _mlir.ir
sys.modules["mlir.passmanager"] = _mlir.passmanager
sys.modules["mlir.rewrite"] = _mlir.rewrite
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-m", "--module", required=True, help="Module name to generate stubs for"
)
parser.add_argument("-O", "--output", required=True, help="Output directory")
args = parser.parse_args()
runfiles = Runfiles.Create()
stubgen_path = runfiles.Rlocation("nanobind/src/stubgen.py")
spec = importlib.util.spec_from_file_location("stubgen", stubgen_path)
stubgen = importlib.util.module_from_spec(spec)
sys.modules["stubgen"] = stubgen
spec.loader.exec_module(stubgen)
stubgen.main(["-m", args.module, "-r", "-O", args.output])
if __name__ == "__main__":
main()
| {
"repo_id": "jax-ml/jax",
"file_path": "jaxlib/mlir/_mlir_libs/stubgen_runner.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/interpreters/remat.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from functools import partial
from typing import Callable
from jax._src import core
from jax._src import api_util
from jax._src.util import safe_map, safe_zip, unzip2, weakref_lru_cache
from jax._src.interpreters import partial_eval as pe
from jax._src.tree_util import (
FlatTree, Partial, tree_unflatten, tree_leaves_checked)
from jax._src import source_info_util
map = safe_map
zip = safe_zip
# TODO
# [ ] static_argnums and static_argnames (via FlatTree)
# [ ] allow NotAvailable sentinels
# [ ] DCE pass
# [ ] primal-output-to-residual forwarding
def remat_transform(policy, f, *args):
dbg = api_util.debug_info("remat", f, args, {})
with core.take_current_trace() as parent_trace:
jaxpr_trace = pe.DynamicJaxprTrace(None)
trace = RematTrace(parent_trace, jaxpr_trace, core.TraceTag(), policy)
args_ft = FlatTree.flatten_static_argnums_argnames(args, {}, (), ())
new_arg = lambda x: RematTracer(trace, x, jaxpr_trace.new_arg(core.typeof(x), None)) # noqa F821 # type: ignore
in_tracers = args_ft.map(new_arg)
with core.set_current_trace(trace):
args, kwargs = in_tracers.unflatten()
ans_pytree = f(*args, **kwargs)
dbg = dbg.set_result_paths(ans_pytree)
ans_ft = FlatTree.flatten(ans_pytree)
del ans_pytree, args, kwargs
out_ft, out_tracer_ft = ans_ft.map(trace.to_val_tracer_pair).unzip2()
jaxpr, rs = jaxpr_trace.to_jaxpr(list(out_tracer_ft), dbg, source_info_util.current())
in_tree, out_tree = args_ft.tree, out_ft.tree
del trace, in_tracers, out_tracer_ft
def f_rem(rs, *args):
args_flat = tree_leaves_checked(in_tree, (args, {}))
out_flat = core.eval_jaxpr(jaxpr, rs, *args_flat)
return tree_unflatten(out_tree, out_flat)
return out_ft.unflatten(), Partial(f_rem, map(reduce_precision, rs))
class RematTracer(core.Tracer):
def __init__(self, trace, x, jaxpr_tracer):
self._trace = trace # type: ignore
self.val = x
self.tracer = jaxpr_tracer
@property
def aval(self):
return core.typeof(self.val)
class RematTrace(core.Trace):
def __init__(self, parent_trace, jaxpr_trace, tag, policy):
super().__init__()
self.parent_trace = parent_trace
self.jaxpr_trace = jaxpr_trace
self.tag = tag
self.policy = policy
self.requires_low = False
def to_val_tracer_pair(self, x):
if isinstance(x, RematTracer) and x._trace.tag is self.tag:
return (x.val, x.tracer)
else:
raise NotImplementedError # TODO(mattjj)
def process_primitive(self, prim, tracers, params, /):
in_vals, in_vals2 = unzip2(map(self.to_val_tracer_pair, tracers))
if prim in rules:
with core.set_current_trace(self.parent_trace):
out_primal, rem = rules[prim](self.policy, *in_vals, **params)
with core.set_current_trace(self.jaxpr_trace):
out_primal2 = rem(*in_vals2)
else:
with core.set_current_trace(self.parent_trace):
out_primal = prim.bind(*in_vals, **params)
with core.set_current_trace(self.jaxpr_trace):
out_primal2 = prim.bind(*in_vals2, **params)
if prim.multiple_results:
return map(partial(RematTracer, self), out_primal, out_primal2)
else:
return RematTracer(self, out_primal, out_primal2)
def reduce_precision(x):
if (h := reduce_precision_handlers.get(type(t := core.typeof(x)))):
return h(t, x)
return x
rules: dict[core.Primitive, Callable] = {}
reduce_precision_handlers: dict[type, Callable] = {}
def remat_jaxpr(jaxpr, policy):
return _remat_jaxpr(jaxpr, frozenset(policy))
@weakref_lru_cache
def _remat_jaxpr(jaxpr, policy):
dbg = jaxpr.jaxpr.debug_info
fwd_trace = pe.DynamicJaxprTrace(dbg)
rem_trace = pe.DynamicJaxprTrace(dbg, auto_dce=True)
tag = core.TraceTag()
trace = RematTrace(fwd_trace, rem_trace, tag, policy)
rem_trace.tag = tag
src = source_info_util.current()
def new_arg(a):
return RematTracer(trace, fwd_trace.new_arg(a, src), rem_trace.new_arg(a, src)) # type: ignore # noqa: F821
tracers = map(new_arg, jaxpr.in_aval_qdds)
with core.set_current_trace(trace, check_leaks=True):
ans = core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, *tracers)
out_primals, out_rem = unzip2(map(trace.to_val_tracer_pair, ans))
del trace, ans, new_arg, tracers
rem_jaxpr_, rem_consts = rem_trace.to_jaxpr(out_rem, dbg.with_unknown_names(), src)
rem_jaxpr = pe.close_jaxpr(pe.convert_constvars_jaxpr(rem_jaxpr_))
rem_trace.invalidate()
rem_consts = map(partial(fwd_trace.to_jaxpr_tracer, source_info=src), rem_consts)
fwd_jaxpr_, fwd_consts = fwd_trace.to_jaxpr(
[*out_primals, *rem_consts], dbg.with_unknown_names(), src)
fwd_trace.invalidate()
fwd_jaxpr = core.ClosedJaxpr(fwd_jaxpr_, fwd_consts)
return fwd_jaxpr, rem_jaxpr, len(rem_consts)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/interpreters/remat.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mpmd.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""APIs for defining MPMD kernels in Pallas."""
from __future__ import annotations
from collections.abc import Callable, Mapping, Sequence
import contextlib
import functools
from typing import cast, Any, ParamSpec, TypeVar
from jax._src import api
from jax._src import api_util
from jax._src import config
from jax._src import core as jax_core
from jax._src import linear_util as lu
from jax._src import state
from jax._src import tree_util
from jax._src import util
from jax._src.frozen_dict import FrozenDict
from jax._src.interpreters import mlir
from jax._src.interpreters import partial_eval as pe
from jax._src.pallas import core as pallas_core
from jax._src.pallas import pallas_call
_P = ParamSpec("_P")
_T = TypeVar("_T")
mpmd_map_p = jax_core.Primitive("mpmd_map")
mpmd_map_p.multiple_results = True
@mpmd_map_p.def_impl
def _mpmd_map_impl(*args, **params):
jit_impl = api.jit(functools.partial(mpmd_map_p.bind, **params))
with config.disable_jit(False):
return jit_impl(*args)
@mpmd_map_p.def_effectful_abstract_eval
def _mpmd_map_abstract_eval(
*in_avals,
jaxprs,
out_avals,
input_output_aliases,
interpret,
compiler_params,
**params,
):
del params # Unused.
effs = {*pallas_core.get_interpret_effects(interpret)}
if getattr(compiler_params, "has_side_effects", False):
# TODO(slebedev): Fix internal breakages and add
# ``jax_core.GenericEffect(pallas_call_p)`` here.
effs = jax_core.no_effects
for jaxpr in jaxprs:
if not all(isinstance(aval, state.AbstractRef) for aval in jaxpr.in_avals):
raise TypeError("MPMD kernels must only have Ref inputs")
# TODO(slebedev): Handle pinned buffers as in ``pallas_call``.
outin_aliases = {
out_idx: in_idx for in_idx, out_idx in input_output_aliases.items()
}
out_avals = [
in_avals[outin_aliases[out_idx]] if out_idx in outin_aliases else a
for out_idx, a in enumerate(out_avals)
]
# Make sure we don't return ShapedArrayWithMemorySpace to the outside world.
out_avals = [
aval.unwrap()
if isinstance(aval, pallas_core.ShapedArrayWithMemorySpace)
else aval
for aval in out_avals
]
return out_avals, effs
def _mpmd_map_typecheck_rule(ctx_factory, *in_atoms, meshes, **params):
del ctx_factory # Unused.
ctx = contextlib.ExitStack()
for mesh in meshes:
ctx.enter_context(jax_core.extend_axis_env_nd(mesh.shape.items()))
with ctx:
return _mpmd_map_abstract_eval(
*(x.aval for x in in_atoms), meshes=meshes, **params
)
jax_core.custom_typechecks[mpmd_map_p] = _mpmd_map_typecheck_rule
def _mpmd_map_tpu_lowering(
ctx: mlir.LoweringRuleContext,
*in_nodes,
jaxprs,
grid_mappings,
meshes,
input_output_aliases,
debug,
interpret,
compiler_params,
cost_estimate,
out_avals,
metadata,
name,
):
try:
from jax._src.pallas.mosaic import pallas_call_registration
except ImportError:
raise pallas_call._unsupported_lowering_error("tpu")
return pallas_call_registration.mpmd_map_tpu_lowering_rule(
ctx,
*in_nodes,
jaxprs=jaxprs,
grid_mappings=grid_mappings,
meshes=meshes,
input_output_aliases=input_output_aliases,
debug=debug,
interpret=interpret,
compiler_params=compiler_params,
cost_estimate=cost_estimate,
out_avals=out_avals,
metadata=metadata,
name=name,
)
def _mpmd_map_fallback_lowering(
ctx: mlir.LoweringRuleContext,
*in_nodes,
meshes,
jaxprs,
grid_mappings,
out_avals,
input_output_aliases,
compiler_params,
interpret,
debug,
cost_estimate,
metadata,
name,
):
if len(jaxprs) != 1:
raise NotImplementedError(
"Lowering multiple mesh/function pairs is not currently supported"
)
[jaxpr] = jaxprs
[mesh] = meshes
[grid_mapping] = grid_mappings
if hasattr(mesh, "dimension_semantics"):
compiler_params = compiler_params.replace(
dimension_semantics=mesh.dimension_semantics
)
if hasattr(mesh, "kernel_type"):
compiler_params = compiler_params.replace(kernel_type=mesh.kernel_type)
return pallas_call._pallas_call_lowering(
ctx,
*in_nodes,
jaxpr=jaxpr,
grid_mapping=grid_mapping,
mesh=mesh,
input_output_aliases=tuple(input_output_aliases.items()),
debug=debug,
interpret=interpret,
compiler_params=compiler_params,
cost_estimate=cost_estimate,
out_avals=out_avals,
metadata=metadata,
name=name,
)
@functools.partial(mlir.register_lowering, mpmd_map_p)
def _mpmd_map_lowering(ctx: mlir.LoweringRuleContext, *in_nodes, **params):
return mlir.lower_per_platform(
ctx,
"mpmd_map",
dict(
cpu=_mpmd_map_fallback_lowering,
tpu=_mpmd_map_tpu_lowering,
cuda=_mpmd_map_fallback_lowering,
rocm=_mpmd_map_fallback_lowering,
),
None, # default_rule
jax_core.no_effects,
*in_nodes,
**params,
)
def mpmd_map(
meshes_and_fns: Sequence[tuple[pallas_core.Mesh, Callable[_P, _T]]],
/,
out_shapes: tree_util.PyTree,
*,
scratch_shapes: pallas_core.ScratchShapeTree = (),
compiler_params: Any | None = None,
interpret: bool | Any = False,
debug: bool = False,
cost_estimate: pallas_core.CostEstimate | None = None,
name: str | None = None,
metadata: dict[str, str] | None = None,
) -> Callable[_P, _T]:
return _mpmd_map(
meshes_and_fns,
out_shapes,
input_output_aliases={},
scratch_shapes=scratch_shapes,
compiler_params=compiler_params,
interpret=interpret,
debug=debug,
cost_estimate=cost_estimate,
name=name,
metadata=metadata,
)
def _mpmd_map(
meshes_and_fns: Sequence[tuple[pallas_core.Mesh, Callable[_P, _T]]],
/,
out_shapes: tree_util.PyTree,
*,
input_output_aliases: Mapping[int, int] = {},
scratch_shapes: pallas_core.ScratchShapeTree = (),
compiler_params: Any | None = None,
interpret: bool | Any = False,
debug: bool = False,
cost_estimate: pallas_core.CostEstimate | None = None,
name: str | None = None,
metadata: dict[str, str] | None = None,
) -> Callable[_P, _T]:
"""Like ``pallas_call``, but MPMD and without pipelining."""
if not meshes_and_fns:
raise ValueError("At least one mesh/function pair is required")
flat_out_shapes_with_paths, out_tree = tree_util.tree_flatten_with_path(
out_shapes
)
out_paths, flat_out_shapes = util.unzip2(flat_out_shapes_with_paths)
flat_out_avals = tuple(
map(pallas_core._convert_out_shape_to_aval, flat_out_shapes)
)
out_origins = tuple(f"outputs{tree_util.keystr(p)}" for p in out_paths)
@functools.partial(api.jit, inline=True)
def wrapper(*args):
flat_args_with_paths, in_tree = tree_util.tree_flatten_with_path(args)
in_paths, flat_args = util.unzip2(flat_args_with_paths)
flat_avals = tuple(map(jax_core.typeof, flat_args))
in_origins = tuple(f"args{tree_util.keystr(p)}" for p in in_paths)
# NOTE: ``grid_mapping`` are only needed for us to reuse the ``pallas_call``
# lowering machinery.
meshes = []
jaxprs = []
grid_mappings = []
for mesh, fn in meshes_and_fns:
grid_spec = pallas_core.GridSpec(
grid=tuple(mesh.shape.items()), # pyrefly: ignore[bad-argument-type]
in_specs=in_tree.unflatten(
pallas_core.BlockSpec(
memory_space=aval.memory_space
if isinstance(aval, pallas_core.ShapedArrayWithMemorySpace)
else mesh.default_memory_space,
)
for aval in flat_avals
),
out_specs=out_tree.unflatten(
pallas_core.BlockSpec(
memory_space=aval.memory_space
if isinstance(aval, pallas_core.ShapedArrayWithMemorySpace)
else mesh.default_memory_space,
)
for aval in flat_out_avals
),
scratch_shapes=scratch_shapes,
)
kernel_args, grid_mapping = pallas_core.get_grid_mapping(
grid_spec,
flat_avals,
in_tree,
in_origins,
flat_out_avals,
out_tree,
out_origins,
)
flat_kernel_avals, kernel_in_tree = tree_util.tree_flatten(kernel_args)
debug_info = api_util.debug_info(
"mpmd_map",
fn,
kernel_in_tree.unflatten(flat_kernel_avals),
{},
)
if name is not None:
debug_info = debug_info.replace_func_name(name)
flat_fun, out_tree_thunk = api_util.flatten_fun_nokwargs(
lu.wrap_init(fn, debug_info=debug_info), kernel_in_tree
)
with jax_core.extend_axis_env_nd(mesh.shape.items()):
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
flat_fun, flat_kernel_avals
)
fun_out_tree = out_tree_thunk()
if fun_out_tree != tree_util.tree_structure(None):
raise ValueError(
f"The kernel function in mpmd_map {debug_info.func_src_info}"
f" should return None. It returns a PyTree: {fun_out_tree}."
)
if consts:
raise NotImplementedError("MPMD kernels cannot close over constants")
meshes.append(mesh)
jaxprs.append(jaxpr)
grid_mappings.append(grid_mapping)
# TODO(slebedev): The named scope should not be necessary here.
ctx = (
api.named_scope(name) if name is not None else contextlib.nullcontext()
)
with ctx:
flat_outs = mpmd_map_p.bind(
*flat_args,
meshes=tuple(meshes),
jaxprs=tuple(jaxprs),
grid_mappings=tuple(grid_mappings),
out_avals=flat_out_avals,
input_output_aliases=FrozenDict(input_output_aliases),
compiler_params=compiler_params,
interpret=interpret,
debug=debug,
cost_estimate=cost_estimate,
metadata=FrozenDict(metadata) if metadata is not None else None,
name=name,
)
return out_tree.unflatten(flat_outs)
return cast(Callable[_P, _T], wrapper)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mpmd.py",
"license": "Apache License 2.0",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/einshape.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Einshape primitive implementation.
Einshape (see https://github.com/google-deepmind/einshape) is a DSL for various
array transformation operations including reshape, squeeze, expand_dims, and
transpose, using an einsum-like notation.
The DSL consists of an LHS equation and an RHS equation, separated by `->`.
Each side assigns names to dimensions, e.g. `ij` would assign `i` and `j` to the
first and second dimensions of an array.
The DSL uses parentheses `()` to indicate grouping of dimensions:
- On the left-hand side (LHS), parentheses indicate that an existing dimension
should be split into multiple dimensions.
- On the right-hand side (RHS), parentheses indicate that multiple dimensions
should be merged into a single dimension.
Dimension reordering in the RHS string (relative to the LHS) specifies a
transpose.
Example equations:
- "n->n": Identity
- "ab->ba": Transposes the 0th and 1st dimensions.
- "nhwc->nchw": Transposes dimensions from (N, H, W, C) to (N, C, H, W).
- "(ab)c->abc": Splits the first dimension into two dimensions (a, b).
- "abc->(ab)c": Merges the first two dimensions (a, b).
- "a(bc)->(ba)c": Splits the second dimension into (b, c), transposes 0 and 1,
then merges dimensions 0 and 1.
When used inside a Pallas kernel on TPU, `einshape` will attempt to perform a
"tile-preserving" transformation. This is a more efficient implementation that
avoids the overhead of general reshapes or transposes by logically reordering
the underlying TPU vector registers. This is possible if the transformation
preserves the data within the vector registers. If this is not possible,
`einshape` will fall back to a general implementation that will likely involve
vector register relayouts.
As an example, for the equation `a(bc)->bac`, the first step which involves
splitting a(bc)->abc is *not* tile preserving (it changes the sublane dimension
from a to b), but after the transpose to `bac` it is tile preserving. Therefore
the overall `einshape` operation is tile preserving.
Not currently supported:
- Expand dimensions: `a->1a`
- Squeeze dimensions: `a1->a`
"""
import collections
from collections.abc import Sequence
import dataclasses
import functools
import math
from typing import Literal, NamedTuple
from jax._src import api
from jax._src import core as jax_core
from jax._src import dispatch
from jax._src import typing as jax_typing
from jax._src import hijax
from jax._src.frozen_dict import FrozenDict
from jax._src.interpreters import mlir
from jax._src.lax import lax
from jax._src.pallas.mosaic import lowering as tpu_lowering
from jax._src.pallas.mosaic import tpu_info
import jax.numpy as jnp
import numpy as np
@dataclasses.dataclass(frozen=True)
class SplitDims:
index: int
sizes: tuple[int, ...]
def transform_shape(self, shape: tuple[int, ...]) -> tuple[int, ...]:
return (*shape[: self.index], *self.sizes, *shape[self.index + 1 :])
@dataclasses.dataclass(frozen=True)
class MergeDims:
index: int
count: int
def transform_shape(self, shape: tuple[int, ...]) -> tuple[int, ...]:
return (
*shape[: self.index],
math.prod(shape[self.index : self.index + self.count]),
*shape[self.index + self.count :],
)
@dataclasses.dataclass(frozen=True)
class Transpose:
permutation: tuple[int, ...]
def transform_shape(self, shape: tuple[int, ...]) -> tuple[int, ...]:
return tuple(shape[i] for i in self.permutation)
# TODO(sharadmv): unify this with other Pallas Transforms
Transform = SplitDims | MergeDims | Transpose
def _parse_side(s: str) -> list[list[str]]:
"""Parses one side of an einshape equation into groups of named dimensions.
Groups are indicated by parentheses. Dimensions outside of parentheses are
treated as groups of size 1.
For example:
"a(bc)d" -> [['a'], ['b', 'c'], ['d']]
"(ab)c" -> [['a', 'b'], ['c']]
Args:
s: One side of an einshape equation string.
Returns:
A list of lists of characters, where each inner list represents a group of
dimensions.
"""
# Remove spaces
s = s.replace(" ", "")
groups = []
i = 0
while i < len(s):
if s[i] == "(":
# Start of a group
j = s.find(")", i)
if j == -1:
raise ValueError(f"Unmatched parenthesis in {s!r}")
group = list(s[i + 1 : j])
groups.append(group)
i = j + 1
elif s[i] == ")":
raise ValueError(f"Unmatched parenthesis in {s!r}")
else:
# distinct dimension
groups.append([s[i]])
i += 1
return groups
def _parse_equation(equation: str) -> tuple[list[list[str]], list[list[str]]]:
"""Parses an einshape equation."""
if equation.count("->") != 1:
raise ValueError("Equation must contain exactly one '->'")
lhs_str, rhs_str = equation.split("->")
return _parse_side(lhs_str), _parse_side(rhs_str)
def get_einshape_transforms(
equation: str,
input_shape: tuple[int, ...],
**sizes: int,
) -> list[Transform]:
"""Parses an einshape equation into a sequence of transforms.
Args:
equation: String of the form "ab(cd)->cabd".
input_shape: The shape of the input array.
**sizes: Integer sizes for dimensions that are split and cannot be inferred.
Returns:
A list of Split, Transpose, and Merge transforms.
"""
lhs, rhs = _parse_equation(equation)
# Validate LHS against input shape
if len(lhs) != len(input_shape):
raise ValueError(
f"Equation LHS has {len(lhs)} groups but input has {len(input_shape)}"
f" dims. LHS: {lhs}, Input shape: {input_shape}"
)
dim_sizes: dict[str, int] = {}
# Populate known sizes from input
for i, group in enumerate(lhs):
shape_val = input_shape[i]
if len(group) == 1:
name = group[0]
if name in dim_sizes and dim_sizes[name] != shape_val:
raise ValueError(
f"Inconsistent size for {name}: {dim_sizes[name]} vs {shape_val}"
)
dim_sizes[name] = shape_val
else:
# We have a merged dimension on LHS, need to split
known_product = 1
unknown_dims = []
for name in group:
if name in sizes:
dim_sizes[name] = sizes[name]
known_product *= sizes[name]
elif name in dim_sizes:
known_product *= dim_sizes[name]
else:
unknown_dims.append(name)
if not unknown_dims:
if known_product != shape_val:
raise ValueError(
f"Size mismatch for group {group}: expected {shape_val}, got"
f" {known_product}"
)
elif len(unknown_dims) == 1:
if shape_val % known_product != 0:
raise ValueError(
f"Cannot split size {shape_val} with known sizes {known_product}"
)
inferred_size = shape_val // known_product
dim_sizes[unknown_dims[0]] = inferred_size
else:
raise ValueError(
f"Ambiguous split for {group} with size {shape_val}. Unknowns:"
f" {unknown_dims}. Provide sizes via kwargs."
)
# Check if all RHS dims are known
flat_rhs = [name for group in rhs for name in group]
for name in flat_rhs:
if name not in dim_sizes:
if name in sizes:
dim_sizes[name] = sizes[name]
else:
raise ValueError(f"Unknown dimension {name} in RHS")
ops: list[Transform] = []
# 1. Decompose LHS
current_idx = 0
for group in lhs:
if len(group) > 1:
atomic_sizes = tuple(dim_sizes[name] for name in group)
ops.append(SplitDims(current_idx, atomic_sizes))
current_idx += len(group)
else:
current_idx += 1
# 2. Transpose
lhs_atomic_order = [name for group in lhs for name in group]
rhs_atomic_order = [name for group in rhs for name in group]
if set(lhs_atomic_order) != set(rhs_atomic_order):
raise NotImplementedError(
"Only reordering/splitting/merging supported (no broadcast yet)."
)
if lhs_atomic_order != rhs_atomic_order:
perm = tuple(lhs_atomic_order.index(name) for name in rhs_atomic_order)
ops.append(Transpose(perm))
# 3. Compose RHS
current_idx = 0
for group in rhs:
if len(group) > 1:
ops.append(MergeDims(current_idx, len(group)))
current_idx += 1
else:
current_idx += 1
return ops
def _einshape(
equation: str,
value: jax_typing.Array,
**sizes: int,
) -> jax_typing.Array:
"""Reshapes and transposes an array according to an einshape equation.
Args:
equation: String of the form "ab(cd)->cabd". Parentheses indicate grouping
of dimensions. On the LHS, grouped dimensions are split. On the RHS,
dimensions are merged.
value: The array to reshape.
**sizes: Integer sizes for dimensions that are split and cannot be inferred.
Returns:
The reshaped and transposed array.
"""
transforms = get_einshape_transforms(equation, value.shape, **sizes)
for transform in transforms:
match transform:
case SplitDims(_, _):
new_shape = transform.transform_shape(value.shape)
value = value.reshape(new_shape)
case MergeDims(_, _):
new_shape = transform.transform_shape(value.shape)
value = value.reshape(new_shape)
case Transpose(permutation):
value = lax.transpose(value, permutation)
return value
einshape_lo_p = jax_core.Primitive("einshape_lo")
def einshape_lo(
equation: str, x: jax_typing.Array, assert_is_tile_preserving: bool, **sizes: int
) -> jax_typing.Array:
return einshape_lo_p.bind(
x,
equation=equation,
sizes=tuple(sizes.items()),
assert_is_tile_preserving=assert_is_tile_preserving,
)
@einshape_lo_p.def_abstract_eval
def _einshape_lo_abstract_eval(
x_aval: jax_core.ShapedArray,
*,
equation: str,
sizes: tuple[tuple[str, int], ...],
assert_is_tile_preserving: bool,
):
del assert_is_tile_preserving
out_sds = api.eval_shape(
functools.partial(_einshape, equation, **dict(sizes)), x_aval # type: ignore
)
return x_aval.update(shape=out_sds.shape, dtype=out_sds.dtype)
def _einshape_lo_lowering(
ctx: mlir.LoweringRuleContext,
x,
*,
equation: str,
sizes: tuple[tuple[str, int], ...],
assert_is_tile_preserving: bool,
):
del assert_is_tile_preserving
def f(x):
return _einshape(equation, x, **dict(sizes))
return mlir.lower_fun(f, multiple_results=False)(ctx, x)
mlir.register_lowering(einshape_lo_p, _einshape_lo_lowering)
dispatch.simple_impl(einshape_lo_p)
class Einshape(hijax.VJPHiPrimitive):
"""Einshape primitive."""
def __init__(
self,
x_aval: jax_core.ShapedArray,
*,
equation: str,
assert_is_tile_preserving: bool,
sizes: dict[str, int],
):
self.in_avals = (x_aval,)
out_type = api.eval_shape(
functools.partial(_einshape, equation, **sizes), x_aval # type: ignore
)
self.out_aval = hijax.ShapedArray(out_type.shape, out_type.dtype)
self.equation = equation
self.sizes = sizes
self.assert_is_tile_preserving = assert_is_tile_preserving
self.params = dict(
x_aval=x_aval,
equation=equation,
sizes=FrozenDict(sizes),
assert_is_tile_preserving=assert_is_tile_preserving,
)
super().__init__()
def expand(self, x: jax_typing.Array) -> jax_typing.Array: # pyrefly: ignore[bad-override]
return einshape_lo(
self.equation,
x,
assert_is_tile_preserving=self.assert_is_tile_preserving,
**self.sizes,
)
def einshape(
equation: str,
x: jax_typing.Array,
assert_is_tile_preserving: bool = False,
**sizes: int,
) -> jax_typing.Array:
"""Reshapes and transposes an array according to an einshape equation.
Args:
equation: A string defining the transformation, e.g., "ab(cd)->cabd". -
Names (e.g., 'a', 'b') represent dimensions. - Parentheses on the LHS,
like `(cd)`, indicate a dimension that will be split into dimensions `c`
and `d`. - Parentheses on the RHS, like `(ab)`, indicate dimensions `a`
and `b` that will be merged.
x: The input jax_typing.Array to transform.
assert_is_tile_preserving: If True, assert that the transformation is tile
preserving. Note that this check only applies inside of Pallas kernels.
**sizes: Dimension sizes that cannot be inferred from the input shape.
Required when splitting dimensions unless all but one sub-dimension size
is known.
Returns:
The transformed jax_typing.Array.
Examples:
>>> import jax.numpy as jnp
>>> x = jnp.zeros((10, 20))
>>> # Split the second dimension (20) into (4, 5)
>>> y = einshape("a(bc)->abc", x, b=4)
>>> y.shape
(10, 4, 5)
>>> # Transpose and merge the first two dimensions.
>>> z = einshape("abc->(ba)c", y)
>>> z.shape
(40, 5)
"""
return Einshape(
jax_core.typeof(x),
equation=equation,
sizes=sizes,
assert_is_tile_preserving=assert_is_tile_preserving,
)(x)
def _default_einshape_kernel(equation: str, x: jax_typing.Array, **sizes: int):
return _einshape(equation, x, **sizes)
class Factor(NamedTuple):
size: int
kind: Literal["outer", "sublane", "lane"]
def _array_to_2d_tile_array(
x: jax_typing.Array, tiling: tuple[int, ...]
) -> np.ndarray:
t1, t2 = tiling[-2:]
tiled_shape = tuple(x.shape[i] // tiling[i] for i in range(len(x.shape)))
# Allocate an empty object array to ensure Numpy doesn't coerce JAX tracers
tiles = np.empty(tiled_shape, dtype=object)
for idx in np.ndindex(*tiled_shape):
*leading, i1, i2 = idx
slices = tuple(leading) + (
slice(i1 * t1, (i1 + 1) * t1),
slice(i2 * t2, (i2 + 1) * t2),
)
# Standard Integer indexing inherently drops the outer dims -> returns strict 2D array
tiles[idx] = x[slices]
return tiles
def _2d_tile_array_to_array(tiles: np.ndarray) -> jax_typing.Array:
raw_arrays = np.empty(tiles.shape, dtype=object)
for idx in np.ndindex(*tiles.shape):
raw_arrays[idx] = tiles[idx]
return jnp.block(raw_arrays.tolist())
def _consolidate(factors: list[Factor]) -> list[Factor]:
"""Merges contiguous 'outer' factors to allow valid arbitrary outer-dimension reshapes."""
res: list[Factor] = []
for f in factors:
if f.kind == "outer" and res and res[-1].kind == "outer":
res[-1] = Factor(res[-1].size * f.size, "outer")
else:
res.append(f)
return res
def _init_dims(shape: tuple[int, ...], t1: int, t2: int) -> list[list[Factor]]:
dims: list[list[Factor]] = []
for i, s in enumerate(shape):
if i == len(shape) - 2:
kind, t_size = "sublane", t1
elif i == len(shape) - 1:
kind, t_size = "lane", t2
else:
kind, t_size = "outer", 1
current_dim = []
assert s % t_size == 0
if s // t_size > 1:
current_dim.append(Factor(s // t_size, "outer"))
if t_size > 1 or kind != "outer":
current_dim.append(Factor(t_size, kind)) # type: ignore
dims.append(_consolidate(current_dim))
return dims
def _apply_split(
factors: list[Factor], targets: tuple[int, ...]
) -> list[list[Factor]] | None:
factors = _consolidate(factors)
queue = collections.deque(factors)
result = []
for i, needed in enumerate(targets):
new_dim = []
current_size = 1
# Consume factors iteratively until the required shape volume is met
while current_size < needed:
if not queue:
return None
b = queue.popleft()
# Case A: Perfect match or consume smaller outer block
if needed % (current_size * b.size) == 0:
new_dim.append(b)
current_size *= b.size
# Case B: Split a larger block (only allowed over logical outer limits)
elif (current_size * b.size) % needed == 0:
if b.kind != "outer":
return None # Illegal splitting of hardware tile limit
take = needed // current_size
new_dim.append(Factor(take, "outer"))
queue.appendleft(Factor(b.size // take, "outer"))
current_size *= take
else:
return None
# Sweep any trailing physical size-1 markers exactly into the right-most split dimension
if i == len(targets) - 1:
while queue and queue[0].size == 1:
new_dim.append(queue.popleft())
result.append(_consolidate(new_dim))
if queue:
return None
return result
def _tile_preserving_einshape_kernel(
equation: str, x: jax_typing.Array, **size_vars: int
):
tiling = tpu_info.infer_tiling(jax_core.typeof(x))
assert tiling is not None
t1, t2 = tiling[-2:]
assert isinstance(t1, int)
assert isinstance(t2, int)
dims = _init_dims(x.shape, t1, t2)
tiles = _array_to_2d_tile_array(x, tiling) # type: ignore
transforms = get_einshape_transforms(equation, x.shape, **size_vars)
def get_outer_shape(dims_list: list[list[Factor]]) -> tuple[int, ...]:
return tuple(
math.prod([f.size for f in d if f.kind == "outer"]) for d in dims_list
)
for t in transforms:
match t:
case Transpose(permutation):
tiles = np.transpose(tiles, permutation)
dims = [dims[i] for i in permutation]
case SplitDims(index, sizes):
new_dims = _apply_split(dims[index], sizes)
assert (
new_dims is not None
), "Tile preserving check passed but split failed."
dims = dims[:index] + new_dims + dims[index + 1 :]
tiles = tiles.reshape(get_outer_shape(dims))
case MergeDims(index, count):
merged = [b for d in dims[index : index + count] for b in d]
dims = dims[:index] + [_consolidate(merged)] + dims[index + count :]
tiles = tiles.reshape(get_outer_shape(dims))
return _2d_tile_array_to_array(tiles)
def _is_tile_preserving(
shape: tuple[int, ...],
transforms: Sequence[Transform],
tiling: tuple[int, int] | None = None,
) -> bool:
if not tiling or len(shape) < 2:
return False
t1, t2 = tiling
if shape[-2] % t1 != 0 or shape[-1] % t2 != 0:
return False
dims = _init_dims(shape, t1, t2)
for t in transforms:
match t:
case SplitDims(index, sizes):
if (new_dims := _apply_split(dims[index], sizes)) is None:
return False
dims[index : index + 1] = new_dims
case MergeDims(index, count):
merged = [b for d in dims[index : index + count] for b in d]
dims[index : index + count] = [_consolidate(merged)]
case Transpose(permutation):
dims = [dims[i] for i in permutation]
if len(dims) < 2:
return False
# Check that the last two dimensions are tiled along (sublane, lane).
y_dim = dims[-2]
if not y_dim or y_dim[-1] != Factor(t1, "sublane"):
return False
x_dim = dims[-1]
if not x_dim or x_dim[-1] != Factor(t2, "lane"):
return False
return True
def _einshape_kernel(
equation: str,
x: jax_typing.Array,
assert_is_tile_preserving: bool,
**size_vars: int,
):
transforms = get_einshape_transforms(equation, x.shape, **dict(size_vars))
if len(transforms) <= 1:
return _default_einshape_kernel(equation, x, **size_vars)
tiling = tpu_info.infer_tiling(jax_core.ShapedArray(x.shape, x.dtype))
if tiling is not None and _is_tile_preserving(
x.shape, transforms, tiling[-2:] # type: ignore
):
return _tile_preserving_einshape_kernel(equation, x, **size_vars)
elif assert_is_tile_preserving:
raise ValueError(
"Tile preserving check failed for einshape kernel with equation:"
f" {equation} and shape {x.shape} and tiling {tiling}."
)
return _default_einshape_kernel(equation, x, **size_vars)
@tpu_lowering.register_lowering_rule(einshape_lo_p)
def _einshape_lo_lowering_rule(
ctx: tpu_lowering.LoweringRuleContext,
x,
*,
equation: str,
sizes: tuple[tuple[str, int], ...],
assert_is_tile_preserving: bool,
):
return tpu_lowering.lower_fun(
lambda x: _einshape_kernel(
equation,
x,
assert_is_tile_preserving=assert_is_tile_preserving,
**dict(sizes),
),
multiple_results=False,
)(ctx, x)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/einshape.py",
"license": "Apache License 2.0",
"lines": 553,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:tests/pallas/einshape_parse_test.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from absl.testing import absltest
from absl.testing import parameterized
import hypothesis
from hypothesis import strategies as st
import jax
from jax._src import test_util as jtu
from jax._src.pallas import einshape
import numpy as np
jax.config.parse_flags_with_absl()
jtu.setup_hypothesis()
class EinshapeParseTest(parameterized.TestCase):
@parameterized.parameters(
("ab->ba", (2, 3), [einshape.Transpose((1, 0))]),
(
"ab(cd)->cabd",
(2, 3, 20),
[einshape.SplitDims(2, (4, 5)), einshape.Transpose((2, 0, 1, 3))],
{"c": 4},
),
("abcd->ab(cd)", (2, 3, 4, 5), [einshape.MergeDims(2, 2)]),
("a(bc)->abc", (10, 12), [einshape.SplitDims(1, (3, 4))], {"b": 3}),
(
"(ab)c->(ba)c",
(6, 5),
[
einshape.SplitDims(0, (2, 3)),
einshape.Transpose((1, 0, 2)),
einshape.MergeDims(0, 2),
],
{"a": 2},
),
(
"ab(cde)->cadeb",
(2, 3, 4 * 5 * 6),
[
einshape.SplitDims(2, (4, 5, 6)),
einshape.Transpose((2, 0, 3, 4, 1)),
],
{"c": 4, "d": 5},
),
)
def test_get_einshape_transforms(
self, equation, input_shape, expected_ops, sizes=None
):
sizes = sizes or {}
ops = einshape.get_einshape_transforms(equation, input_shape, **sizes)
self.assertEqual(ops, expected_ops)
def test_identity(self):
ops = einshape.get_einshape_transforms("abc->abc", (2, 3, 4))
self.assertEqual(ops, [])
@hypothesis.given(
st.lists(st.integers(1, 4), min_size=1, max_size=4).map(tuple), st.data()
)
@hypothesis.settings(max_examples=50, deadline=None)
def test_hypothesis_get_einshape_transforms(self, atomic_shape, data):
names = list(string.ascii_lowercase)[: len(atomic_shape)]
dim_sizes = dict(zip(names, atomic_shape))
# Randomly group names for LHS
lhs_groups = []
remaining_names = list(names)
while remaining_names:
group_size = data.draw(st.integers(1, len(remaining_names)))
lhs_groups.append(remaining_names[:group_size])
remaining_names = remaining_names[group_size:]
# Randomly group names for RHS (after permutation)
rhs_names = data.draw(st.permutations(names))
rhs_groups = []
remaining_rhs_names = list(rhs_names)
while remaining_rhs_names:
group_size = data.draw(st.integers(1, len(remaining_rhs_names)))
rhs_groups.append(remaining_rhs_names[:group_size])
remaining_rhs_names = remaining_rhs_names[group_size:]
def format_side(groups):
res = ""
for g in groups:
if len(g) == 1:
res += g[0]
else:
res += "(" + "".join(g) + ")"
return res
equation = f"{format_side(lhs_groups)}->{format_side(rhs_groups)}"
# Construct input shape
lhs_shape = []
for g in lhs_groups:
prod = 1
for n in g:
prod *= dim_sizes[n]
lhs_shape.append(prod)
# We might need to provide some sizes for LHS splits
kwargs = {}
for g in lhs_groups:
if len(g) > 1:
for n in g[:-1]:
kwargs[n] = dim_sizes[n]
ops = einshape.get_einshape_transforms(equation, tuple(lhs_shape), **kwargs)
# Verify the ops by applying them to a symbolic or dummy shape
current_shape = list(lhs_shape)
for op in ops:
if isinstance(op, einshape.SplitDims):
self.assertEqual(current_shape[op.index], np.prod(op.sizes))
current_shape[op.index : op.index + 1] = list(op.sizes)
elif isinstance(op, einshape.MergeDims):
merged_size = np.prod(current_shape[op.index : op.index + op.count])
current_shape[op.index : op.index + op.count] = [merged_size]
elif isinstance(op, einshape.Transpose):
current_shape = [current_shape[i] for i in op.permutation]
# Verify final shape matches RHS
expected_rhs_shape = []
for g in rhs_groups:
prod = 1
for n in g:
prod *= dim_sizes[n]
expected_rhs_shape.append(prod)
self.assertEqual(tuple(current_shape), tuple(expected_rhs_shape))
if __name__ == "__main__":
absltest.main()
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/pallas/einshape_parse_test.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:tests/pallas/einshape_test.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import math
import string
from absl.testing import absltest
from absl.testing import parameterized
import hypothesis as hp
from hypothesis import strategies as st
import jax
from jax._src import dtypes
from jax._src import test_util as jtu
from jax.experimental import pallas as pl
from jax.experimental.pallas import tpu as pltpu
import jax.numpy as jnp
jax.config.parse_flags_with_absl()
jtu.setup_hypothesis()
@jax.jit(static_argnames=["equation", "sizes"])
def _einshape_kernel(x, equation, sizes):
sizes = dict(sizes)
x_ref = jax.new_ref(x)
fn = functools.partial(pltpu.einshape, equation, **sizes)
out_ref = jax.new_ref(jnp.empty_like(jax.eval_shape(fn, x)))
@pl.core_map(mesh=pltpu.create_tensorcore_mesh(num_cores=1, axis_name="x"))
def _():
@pl.with_scoped(
pltpu.VMEM(x_ref.shape, x_ref.dtype),
pltpu.VMEM(out_ref.shape, out_ref.dtype),
)
def inner(x_vmem_ref, out_vmem_ref):
pltpu.sync_copy(x_ref, x_vmem_ref)
out_vmem_ref[...] = pltpu.einshape(equation, x_vmem_ref[...], **sizes)
pltpu.sync_copy(out_vmem_ref, out_ref)
inner()
return out_ref[...]
@st.composite
def einshape_strategy(draw, dtype: jnp.dtype, has_shape_constraint: bool):
def partition(lst):
# Partitions a list using hypothesis draws
if not lst:
return []
groups = []
i = 0
while i < len(lst):
g_size = draw(st.integers(1, len(lst) - i))
groups.append(lst[i : i + g_size])
i += g_size
return groups
if has_shape_constraint:
# Construct strictly tile-preserving configurations
num_outer = draw(st.integers(0, 3))
outer_names = list(string.ascii_lowercase[:num_outer])
s_name = string.ascii_lowercase[num_outer + 1]
l_name = string.ascii_lowercase[num_outer + 2]
dim_sizes = {n: draw(st.integers(1, 3)) for n in outer_names}
p = 32 // dtypes.itemsize_bits(dtype)
# sublane dimensions
dim_sizes[s_name] = draw(st.sampled_from([8 * p, 16 * p, 32 * p]))
# lane dimension
dim_sizes[l_name] = draw(st.sampled_from([128, 256, 512]))
def _generate_groups(names):
# Slice names into 3 random groups. One will be used for leading
# dimensions and the other two will be folded into the sublane and lane
# dimensions respectively.
# E.g. if _names = [a, b, c, d] and we draw idx1=1, idx2=3, then
# free = [a], s = [b, c], l = [d]
idx1 = draw(st.integers(0, len(names)))
idx2 = draw(st.integers(idx1, len(names)))
free, s, l = names[:idx1], names[idx1:idx2], names[idx2:]
# The free group is split up further randomly.
groups = partition(free) + [
s + [s_name],
l + [l_name],
]
return groups
lhs_groups = _generate_groups(outer_names)
# Only "outer" dimensions are permuted.
rhs_outer_names = list(draw(st.permutations(outer_names)))
rhs_groups = _generate_groups(rhs_outer_names)
else:
num_dims = draw(st.integers(1, 6))
all_names = list(string.ascii_lowercase[:num_dims])
dim_sizes = {n: draw(st.integers(1, 4)) for n in all_names}
lhs_flat_names = all_names
lhs_groups = partition(lhs_flat_names)
rhs_flat_names = list(draw(st.permutations(all_names)))
rhs_groups = partition(rhs_flat_names)
def format_side(groups):
return "".join(g[0] if len(g) == 1 else f"({''.join(g)})" for g in groups)
equation = f"{format_side(lhs_groups)}->{format_side(rhs_groups)}"
kwargs = {}
for g in lhs_groups:
if len(g) > 1:
drop_idx = draw(st.integers(0, len(g) - 1))
for i, n in enumerate(g):
if i != drop_idx:
kwargs[n] = dim_sizes[n]
lhs_shape = tuple(math.prod(dim_sizes[n] for n in g) for g in lhs_groups)
rhs_shape = tuple(math.prod(dim_sizes[n] for n in g) for g in rhs_groups)
lhs_flat = [n for g in lhs_groups for n in g]
rhs_flat = [n for g in rhs_groups for n in g]
atomic_shape = tuple(dim_sizes[n] for n in lhs_flat)
perm = tuple(lhs_flat.index(n) for n in rhs_flat)
return {
"equation": equation,
"lhs_shape": lhs_shape,
"rhs_shape": rhs_shape,
"kwargs": kwargs,
"atomic_shape": atomic_shape,
"perm": perm,
}
class EinshapeTest(jtu.JaxTestCase):
has_shape_constraint: bool = False
def impl(self, equation, x, **sizes):
return pltpu.einshape(equation, x, **sizes)
@parameterized.product(
einshape=[
# TODO(sharadmv): why does this test time out? Mosaic padding bug?
# ("a(bc)->abc", (2, 3 * 128), {"c": 128}),
("ab(cd)->cabd", (2, 4, 128 * 4), {"c": 4}),
("abcd->ab(cd)", (2, 3, 4, 128), {}),
("abc->a(bc)", (8, 2, 128), {}),
("a(bc)->abc", (10, 128 * 4), {"b": 4}),
("a(bc)->abc", (10, 128 * 2), {"c": 128}),
("a(bc)->abc", (10, 128 * 2), {"b": 2}),
("a(bc)->b(ac)", (8, 256), {"c": 128}),
],
dtype=["int32", "bfloat16"],
)
def test_einshape_basic(self, einshape, dtype):
equation, shape, sizes = einshape
x = jnp.arange(math.prod(shape)).reshape(shape).astype(dtype)
out = self.impl(equation, x, **sizes)
match equation:
case "ab->ab":
expected = x
case "abc->(ab)c":
expected = x.reshape(x.shape[0] * x.shape[1], x.shape[2])
case "ab(cd)->cabd":
a, b, cd = x.shape
c = sizes["c"]
d = cd // c
expected = x.reshape(a, b, c, d).transpose(2, 0, 1, 3)
case "abcd->ab(cd)":
a, b, c, d = x.shape
expected = x.reshape(a, b, c * d)
case "abc->a(bc)":
a, b, c = x.shape
expected = x.reshape(a, b * c)
case "a(bc)->abc":
a, bc = x.shape
if "b" in sizes:
b = sizes["b"]
c = bc // b
else:
c = sizes["c"]
b = bc // c
expected = x.reshape(a, b, c)
case "a(bc)->b(ac)":
a, bc = x.shape
c = sizes["c"]
b = bc // c
expected = x.reshape(a, b, c).transpose(1, 0, 2).reshape(b, a * c)
case _:
raise ValueError(f"Unsupported equation: {equation}")
self.assertArraysEqual(out, expected)
def test_error_ambiguous(self):
x = jnp.zeros((10, 12))
with self.assertRaisesRegex(ValueError, "Ambiguous split"):
self.impl("a(bc)->abc", x)
def test_error_mismatch(self):
x = jnp.zeros((10, 13))
with self.assertRaisesRegex(ValueError, "Cannot split size"):
self.impl("a(bc)->abc", x, b=3)
@hp.given(data=st.data(), dtype=st.sampled_from(["int32", "bfloat16"]))
@hp.settings(max_examples=200)
def test_hypothesis_einshape(self, data, dtype):
case = data.draw(einshape_strategy(dtype, self.has_shape_constraint))
equation = case["equation"]
lhs_shape = case["lhs_shape"]
rhs_shape = case["rhs_shape"]
kwargs = case["kwargs"]
atomic_shape = case["atomic_shape"]
perm = case["perm"]
x = jnp.arange(math.prod(lhs_shape)).reshape(lhs_shape).astype(dtype)
out = self.impl(equation, x, **kwargs)
self.assertEqual(out.shape, rhs_shape)
x_atomic = x.reshape(atomic_shape)
x_transposed = jax.lax.transpose(x_atomic, perm)
expected = x_transposed.reshape(rhs_shape)
self.assertArraysEqual(out, expected)
class EinshapeTPUKernelTest(EinshapeTest):
has_shape_constraint: bool = True
def impl(self, equation, x, **sizes):
return _einshape_kernel(x, equation, tuple(sizes.items()))
def setUp(self):
super().setUp()
if not jtu.is_device_tpu_at_least(4):
self.skipTest("Skipping test because TPU is not supported.")
if not jtu.is_cloud_tpu_at_least(2026, 2, 24):
self.skipTest("Requires a newer libTPU")
if (self._testMethodName == "test_hypothesis_einshape"
and jtu.is_device_tpu(7)):
self.skipTest("test_hypothesis_einshape is failing on TPU 7x")
if __name__ == "__main__":
absltest.main()
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/pallas/einshape_test.py",
"license": "Apache License 2.0",
"lines": 217,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:jax/_src/indexing.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import dataclasses
from jax._src import core
from jax._src import tree_util
from jax._src.typing import Array
@tree_util.register_pytree_node_class
@dataclasses.dataclass
class Slice:
"""A slice with a start index and a size.
Both start index and size can either be static, i.e. known at tracing
and compilation time, or dynamic.
"""
start: int | Array
size: int | Array
stride: int = 1
def __post_init__(self):
if self.stride < 0:
raise ValueError("`stride` must be >= 0.")
@property
def is_dynamic_start(self):
return not core.is_dim(self.start)
@property
def is_dynamic_size(self):
return not core.is_dim(self.size)
def tree_flatten(self):
# If `start` is statically known, we treat it as static information
xs = ()
data = ()
xs += (self.start,) if self.is_dynamic_start else (None,)
data += (None,) if self.is_dynamic_start else (self.start,)
xs += (self.size,) if self.is_dynamic_size else (None,)
data += (None,) if self.is_dynamic_size else (self.size,)
data += (self.stride,)
return xs, data
@classmethod
def tree_unflatten(cls, aux_data, children) -> Slice:
start, size = (
a if a is not None else b for a, b in zip(children, aux_data[:2])
)
return cls(start, size, aux_data[2])
@classmethod
def from_slice(cls, slc: slice, size: int) -> Slice:
start, step, size = core.canonicalize_slice(slc, size)
if step < 1:
raise ValueError(f"slice must have a step >= 1 (found: {step})")
return cls(start, size, step)
def dslice(
start: int | Array | None,
size: int | Array | None = None,
stride: int | None = None,
) -> slice | Slice:
"""Constructs a ``Slice`` from a start index and a size.
The semantics of ``dslice`` mirror those of the builtin ``slice`` type:
* ``dslice(None)`` is ``:``
* ``dslice(j)`` is ``:j``
* ``dslice(i, j)`` is ``i:i+j``
* ``dslice(i, j, stride)`` is ``i:i+j:stride``
Examples:
>>> x = jax.numpy.arange(10)
>>> i = 4
>>> x[i: i + 2] # standard indexing requires i to be static
Array([4, 5], dtype=int32)
>>> x[jax.ds(i, 2)] # equivalent which allows i to be dynamic
Array([4, 5], dtype=int32)
Here is an explicit example of slicing with a dynamic start index:
>>> @jax.jit(static_argnames='size')
... def f(x, i, size): # example of when `
... return x[jax.ds(i, size)]
...
>>> f(x, i, 2)
Array([4, 5], dtype=int32)
"""
if start is None:
return slice(None)
if stride is None:
stride = 1
if not isinstance(stride, int):
raise ValueError("Non-static stride in `dslice`")
if size is None:
if not isinstance(start, int):
raise ValueError("Non-static `dslice`")
return Slice(0, start, stride)
return Slice(start, size, stride)
ds = dslice # Handy alias.
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/indexing.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/lax/scaled_dot.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Sequence
import jax
from jax._src import core
from jax._src import dispatch
from jax._src import dtypes
from jax._src import numpy as jnp
from jax._src.interpreters import batching
from jax._src.interpreters import mlir
from jax._src.lax import lax
from jax._src.typing import Array, DTypeLike
def _validate_operand_scale(
side, operand, scale, contracting_dims: Sequence[int]
):
for i, size in enumerate(operand.shape):
if i in contracting_dims:
if size % scale.shape[i] != 0:
raise TypeError(
f"{side} contracting dim {i} of size {size} must be divisible by "
f"its scale's dim size {scale.shape[i]}."
)
s = size // scale.shape[i]
if s < 2:
raise TypeError(
f"The ratio of {side} contracting dim {i} to its scale's dim size"
f" ({s}) must be at least 2."
)
elif scale.shape[i] != size:
raise TypeError(
f"{side} dim {i} of size {size} does not match scale dim size "
f"{scale.shape[i]}."
)
def _scaled_dot_validate_inputs(
lhs: Array,
rhs: Array,
lhs_scale: Array | None,
rhs_scale: Array | None,
*,
dimension_numbers: lax.DotDimensionNumbers,
preferred_element_type: DTypeLike | None,
):
"""Validates the inputs to scaled_dot."""
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
ndims = [lhs.ndim, rhs.ndim]
if lhs_scale is not None:
ndims.append(lhs_scale.ndim)
if rhs_scale is not None:
ndims.append(rhs_scale.ndim)
if max(ndims) != min(ndims):
raise TypeError(
"All input tensors must have the same rank. Got lhs rank:"
f" {lhs.ndim} rhs rank: {rhs.ndim} lhs_scale rank:"
f" {lhs_scale.ndim if lhs_scale is not None else 'N/A'} rhs_scale"
f" rank: {rhs_scale.ndim if rhs_scale is not None else 'N/A'}."
)
if len(lhs_batch) != len(rhs_batch):
raise TypeError(
"LHS and RHS must have the same number of batch dimensions, got"
f" {len(lhs_batch)} and {len(rhs_batch)}."
)
if len(lhs_contracting) != len(rhs_contracting):
raise TypeError(
"LHS and RHS must have the same number of contracting dimensions, got"
f" {len(lhs_contracting)} and {len(rhs_contracting)}."
)
for i_lhs, i_rhs in zip(lhs_batch, rhs_batch):
batch_dims_sizes = [
lhs.shape[i_lhs],
rhs.shape[i_rhs],
]
if lhs_scale is not None:
batch_dims_sizes.append(lhs_scale.shape[i_lhs])
if rhs_scale is not None:
batch_dims_sizes.append(rhs_scale.shape[i_rhs])
if max(batch_dims_sizes) != min(batch_dims_sizes):
raise TypeError(
"All input tensors must have the same batch dimension size for"
f" batch dims ({i_lhs}, {i_rhs})."
)
# Check contracting dimensions are the same.
for i, j in zip(lhs_contracting, rhs_contracting):
if lhs.shape[i] != rhs.shape[j]:
raise TypeError(
f"LHS contracting dim {i} of size"
f" {lhs.shape[i]} does not match RHS"
f" contracting dim {j} of size"
f" {rhs.shape[j]}."
)
if lhs_scale is not None:
_validate_operand_scale("LHS", lhs, lhs_scale, lhs_contracting)
if rhs_scale is not None:
_validate_operand_scale("RHS", rhs, rhs_scale, rhs_contracting)
def _scaled_dot_abstract_eval(
lhs,
rhs,
lhs_scale,
rhs_scale,
*,
dimension_numbers: lax.DotDimensionNumbers,
preferred_element_type: DTypeLike | None = None,
):
_scaled_dot_validate_inputs(
lhs,
rhs,
lhs_scale,
rhs_scale,
dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type,
)
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
lhs_shape, rhs_shape = lhs.shape, rhs.shape
batch_dims_shape = [lhs_shape[i] for i in lhs_batch]
lhs_kept = sorted(
i
for i in range(len(lhs_shape))
if i not in lhs_contracting and i not in lhs_batch
)
rhs_kept = sorted([
i
for i in range(len(rhs_shape))
if i not in rhs_contracting and i not in rhs_batch
])
output_shape = tuple(
batch_dims_shape
+ [lhs_shape[i] for i in lhs_kept]
+ [rhs_shape[i] for i in rhs_kept]
)
if preferred_element_type is not None:
output_dtype = preferred_element_type
else:
output_dtype = dtypes.bfloat16
return core.ShapedArray(output_shape, output_dtype)
def _scale_broadcast(
scale: Array,
operand_shape: tuple[int, ...],
contracting_dims: Sequence[int],
) -> Array:
for i in contracting_dims:
if scale.shape[i] != operand_shape[i]:
multiplier = operand_shape[i] // scale.shape[i]
new_broadcast_shape = list(scale.shape)
new_broadcast_shape.insert(i + 1, multiplier)
scale = jnp.expand_dims(scale, axis=i + 1)
scale = jnp.broadcast_to(scale, new_broadcast_shape)
new_reshape_shape = list(scale.shape)
new_reshape_shape[i] = new_reshape_shape[i] * new_reshape_shape[i + 1]
new_reshape_shape.pop(i + 1)
scale = scale.reshape(new_reshape_shape)
return scale
# 4. Primal Implementation
@partial(lax.composite, name="xla.scaled_dot")
def _scaled_dot_impl(
lhs: Array,
rhs: Array,
lhs_scale: Array,
rhs_scale: Array,
*,
dimension_numbers: lax.DotDimensionNumbers,
preferred_element_type: DTypeLike | None = None,
) -> Array:
"""Implementation of scaled_dot that could be replaced by XLA."""
(lhs_contracting, rhs_contracting), _ = dimension_numbers
lhs_scale = _scale_broadcast(lhs_scale, lhs.shape, lhs_contracting)
lhs = lhs.astype(dtypes.bfloat16)
lhs_scale = lhs_scale.astype(dtypes.bfloat16)
lhs_scaled = lhs * lhs_scale
rhs_scale = _scale_broadcast(rhs_scale, rhs.shape, rhs_contracting)
rhs = rhs.astype(dtypes.bfloat16)
rhs_scale = rhs_scale.astype(dtypes.bfloat16)
rhs_scaled = rhs * rhs_scale
result = jax.lax.dot_general(
lhs_scaled,
rhs_scaled,
dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type,
)
return result
scaled_dot_p = core.Primitive("scaled_dot")
scaled_dot_p.def_abstract_eval(_scaled_dot_abstract_eval)
scaled_dot_lowering = mlir.lower_fun(_scaled_dot_impl, multiple_results=False)
mlir.register_lowering(scaled_dot_p, scaled_dot_lowering)
scaled_dot_p.def_impl(partial(dispatch.apply_primitive, scaled_dot_p))
def _create_dummy_scale(operand, contracting_dims):
shape = list(operand.shape)
for d in contracting_dims:
shape[d] = 1
return jnp.ones(shape, dtype=jnp.bfloat16).astype(dtypes.float8_e8m0fnu)
def _scaled_dot_batching_rule(
batched_args, batch_dims, *, dimension_numbers, preferred_element_type
):
# Unpack arguments and batch dimensions for inputs.
lhs, rhs, lhs_scale, rhs_scale = batched_args
lhs_bdim, rhs_bdim, lhs_scale_bdim, rhs_scale_bdim = batch_dims
# Determine the batch size from the first argument that has a batch dimension.
# We iterate through args and corresponding batch dims; if bdim is not None,
# it means that argument is batched, so we take its size at that dimension.
size = next(
x.shape[d] for x, d in zip(batched_args, batch_dims) if d is not None
)
# Ensure the batch dimension is at the front (index 0) for all inputs.
# If an input is broadcasted (bdim is None), this broadcasts it to include
# the batch dimension at the front. If it is already batched but at a
# different index, it moves it to 0.
lhs = batching.bdim_at_front(lhs, lhs_bdim, size)
rhs = batching.bdim_at_front(rhs, rhs_bdim, size)
if lhs_scale is not None:
lhs_scale = batching.bdim_at_front(lhs_scale, lhs_scale_bdim, size)
if rhs_scale is not None:
rhs_scale = batching.bdim_at_front(rhs_scale, rhs_scale_bdim, size)
# Unpack the original dimension numbers.
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
# Since we moved the batch dimension to index 0 for all inputs, all existing
# dimension indices must be shifted by 1.
lhs_contract = tuple(d + 1 for d in lhs_contract)
rhs_contract = tuple(d + 1 for d in rhs_contract)
lhs_batch = tuple(d + 1 for d in lhs_batch)
rhs_batch = tuple(d + 1 for d in rhs_batch)
# Add the new leading batch dimension (index 0) to the set of batch dimensions
# for both LHS and RHS. This effectively batches the operation.
new_lhs_batch = (0,) + lhs_batch
new_rhs_batch = (0,) + rhs_batch
# Reconstruct dimension_numbers with the shifted and new indices.
new_dimension_numbers = (
(lhs_contract, rhs_contract),
(new_lhs_batch, new_rhs_batch),
)
# Bind the primitive with the batched operands and updated dimension numbers.
# This creates the batched scaled_dot operation in the jaxpr.
result = scaled_dot_p.bind(
lhs,
rhs,
lhs_scale,
rhs_scale,
dimension_numbers=new_dimension_numbers,
preferred_element_type=preferred_element_type,
)
# Return the result and the index of the batch dimension in the result (0).
return result, 0
batching.primitive_batchers[scaled_dot_p] = _scaled_dot_batching_rule
def scaled_dot(
lhs: Array,
rhs: Array,
*,
lhs_scale: Array | None = None,
rhs_scale: Array | None = None,
dimension_numbers: lax.DotDimensionNumbers | None = None,
preferred_element_type: DTypeLike | None = None,
):
"""Computes a scaled dot product.
This function computes `(lhs * lhs_scale) @ (rhs * rhs_scale)` in
`preferred_element_type` precision, where `@` denotes `jax.lax.dot_general`.
Non-contracting dimensions of the operand and scale must have the same size.
Contracting dimension size of the operand must be an integer multiple of the
scale contracting dimension size (subchannel size). Latency of the op depends
on what subchannel sizes are natively supported on your platform.
.. note::
This currently isn't differentiable (no transpose rule).
Example:
::
B = 32
M = 16384
N = 16
K = 4096
subchannel_size = 32
lhs_shape = (B, M, K)
rhs_shape = (B, K, N)
lhs_scales_shape = (B, M, K // subchannel_size)
rhs_scales_shape = (B, K // subchannel_size, N)
key = jax.random.key(42)
lhs = jax.random.normal(key, lhs_shape, dtype=jnp.float8_e4m3fn)
rhs = jax.random.normal(key, rhs_shape, dtype=jnp.float8_e4m3fn)
lhs_scales = jax.random.normal(
key, lhs_scales_shape, dtype=jnp.float8_e8m0fnu
)
rhs_scales = jax.random.normal(
key, rhs_scales_shape, dtype=jnp.float8_e8m0fnu
)
@jax.jit
def scaled_dot_fn(lhs, rhs, lhs_scale, rhs_scale):
return jax.lax.scaled_dot(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
preferred_element_type=jnp.bfloat16,
)
result = scaled_dot_fn(
lhs,
rhs,
lhs_scale=lhs_scales,
rhs_scale=rhs_scales,
)
Args:
lhs: The left-hand side operand of the dot product.
rhs: The right-hand side operand of the dot product.
lhs_scale: The scale factor for `lhs`. It should be at least 2x smaller
along the contracting dimension as compared to the operand.
rhs_scale: The scale factor for `rhs`. It should be at least 2x smaller
along the contracting dimension as compared to the operand.
dimension_numbers: A tuple of tuples of the form `((lhs_contracting_dims,
rhs_contracting_dims), (lhs_batch_dims, rhs_batch_dims))`. If not
provided, default is `(((1,), (0,)), ((), ()))` for 2D inputs which is
lhs_contracting_dim=1, rhs_contracting_dim=0, and `(((2,), (1,)), ((0,),
(0,)))` for 3D inputs which is lhs_contracting_dim=2,
rhs_contracting_dim=1 and lhs_batch_dim=0, rhs_batch_dim=0.
preferred_element_type: The desired dtype of the output and intermediate
accumulations, can be `bfloat16` or `float32`. Defaults to `bfloat16`.
Returns:
The result of the scaled dot product.
"""
# Syntax sugar for dimension numbers it allows for None to be passed for the
# default case.
if dimension_numbers is None:
if lhs.ndim == 0 or lhs.ndim == 1:
raise TypeError("scaled_dot does not support 0-rank and 1-rank operands.")
else:
n = lhs.ndim
dimension_numbers = (
((n - 1,), (n - 2,)),
(tuple(range(n - 2)), tuple(range(n - 2))),
)
(lhs_contracting, rhs_contracting), _ = dimension_numbers
if lhs_scale is None:
lhs_scale = _create_dummy_scale(lhs, lhs_contracting)
if rhs_scale is None:
rhs_scale = _create_dummy_scale(rhs, rhs_contracting)
element_type = (
preferred_element_type
if preferred_element_type is not None
else dtypes.bfloat16
)
element_type = dtypes.check_and_canonicalize_user_dtype(
element_type, "scaled_dot"
)
return scaled_dot_p.bind(
lhs,
rhs,
lhs_scale,
rhs_scale,
dimension_numbers=dimension_numbers,
preferred_element_type=element_type,
)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/lax/scaled_dot.py",
"license": "Apache License 2.0",
"lines": 355,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:tests/scaled_dot_test.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import os
import sys
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax._src import config
from jax._src import test_util as jtu
from jax._src.typing import ArrayLike, DTypeLike
import jax.numpy as jnp
if "pytest" not in sys.modules:
os.environ["XLA_FLAGS"] = (
"--xla_gpu_experimental_scaled_dot_with_triton=true "
)
config.parse_flags_with_absl()
def _e8m0fnu(shape):
key = jax.random.key(42)
return jax.random.randint(
key, shape, minval=0, maxval=256, dtype=jnp.int8
).astype(jnp.float8_e8m0fnu)
def _e4m3fn(shape):
key = jax.random.key(42)
return jax.random.normal(key, shape, dtype=jnp.float8_e4m3fn)
def _bf16(shape):
key = jax.random.key(42)
return jax.random.normal(key, shape, dtype=jnp.bfloat16)
def _scale(shape, dtype=jnp.float8_e8m0fnu):
key = jax.random.key(42)
if dtype == jnp.float8_e8m0fnu:
return jax.random.randint(
key, shape, minval=0, maxval=256, dtype=jnp.int8
).astype(jnp.float8_e8m0fnu)
elif dtype == jnp.bfloat16:
return jax.random.normal(key, shape, dtype=jnp.bfloat16)
else:
raise ValueError(f"Unsupported dtype: {dtype}")
def _scaled_dot_2d(
*args,
lhs: ArrayLike | None = None,
rhs: ArrayLike | None = None,
lhs_scales: ArrayLike | None = None,
rhs_scales: ArrayLike | None = None,
preferred_element_type: DTypeLike | None = None,
scale_dtype: DTypeLike = jnp.float8_e8m0fnu,
):
"""Helper function to create a scaled dot 2d.
If the arguments are not provided, the default values are:
lhs = _e4m3fn((1, 32), jnp.float8_e4m3fn),
rhs = _e4m3fn((32, 4), jnp.float8_e4m3fn),
lhs_scales = _e8m0fnu((1, 1), jnp.float8_e8m0fnu),
rhs_scales = _e8m0fnu((1, 4), jnp.float8_e8m0fnu),
"""
if args:
raise ValueError(
"Wrong test setup: all the arguments must be passed as keyword"
" arguments."
)
if lhs is None:
lhs = _e4m3fn((1, 32))
if rhs is None:
rhs = _e4m3fn((32, 4))
if lhs_scales is None:
lhs_scales = _scale((1, 1), scale_dtype)
if rhs_scales is None:
rhs_scales = _scale((1, 4), scale_dtype)
jax.lax.scaled_dot(
lhs,
rhs,
lhs_scale=lhs_scales,
rhs_scale=rhs_scales,
preferred_element_type=preferred_element_type,
)
def _scaled_dot_3d(
*args,
lhs: ArrayLike | None = None,
rhs: ArrayLike | None = None,
lhs_scales: ArrayLike | None = None,
rhs_scales: ArrayLike | None = None,
preferred_element_type: DTypeLike | None = None,
scale_dtype: DTypeLike = jnp.float8_e8m0fnu,
):
"""Helper function to create a scaled dot 3d.
If the arguments are not provided, the default values are:
lhs = _e4m3fn((1, 1, 32))
rhs = _e4m3fn((1, 32, 4))
lhs_scales = _e8m0fnu((1, 1, 1))
rhs_scales = _e8m0fnu((1, 1, 4))
"""
if args:
raise ValueError(
"Wrong test setup: all the arguments must be passed as keyword"
" arguments."
)
if lhs is None:
lhs = _e4m3fn((1, 1, 32))
if rhs is None:
rhs = _e4m3fn((1, 32, 4))
if lhs_scales is None:
lhs_scales = _scale((1, 1, 1), scale_dtype)
if rhs_scales is None:
rhs_scales = _scale((1, 1, 4), scale_dtype)
jax.lax.scaled_dot(
lhs,
rhs,
lhs_scale=lhs_scales,
rhs_scale=rhs_scales,
preferred_element_type=preferred_element_type,
)
def _quantize_to_fp8(x: jnp.ndarray, subchannel_size: int = 32):
"""Quantizes a bfloat16 tensor to fp8e4m3fn and returns fp8e8m0fnu scales."""
assert x.dtype == jnp.bfloat16
assert x.ndim == 3
B, M, K = x.shape
assert K % subchannel_size == 0
num_subchannels = K // subchannel_size
# Reshape for subchannel quantization
x_reshaped = x.reshape(B, M, num_subchannels, subchannel_size)
# Find maximum absolute value for scaling
scales = jnp.max(jnp.abs(x_reshaped), axis=-1)
scales = jnp.where(scales == 0.0, 1.0, scales)
scales = scales.astype(jnp.float8_e8m0fnu)
# Apply scales and quantize
inv_scales = 1.0 / scales.astype(jnp.bfloat16)
x_quantized = (x_reshaped * jnp.expand_dims(inv_scales, axis=-1)).astype(
jnp.float8_e4m3fn
)
x_quantized = x_quantized.reshape(B, M, K)
return x_quantized, scales
@jax.jit(static_argnames=["dimension_numbers", "preferred_element_type"])
def scaled_dot_fn(
lhs,
rhs,
*,
lhs_scale=None,
rhs_scale=None,
dimension_numbers=None,
preferred_element_type=None,
):
return jax.lax.scaled_dot(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type,
)
class ScaledDotTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if "pytest" in sys.modules:
self.skipTest(
"Skip scaled_dot_test when running under pytest. We don't want to set"
" XLA_FLAGS in pytest because it could affect other tests."
)
def _should_skip_test(self, scale_dtype):
if jtu.device_under_test() == "tpu" and scale_dtype == jnp.float8_e8m0fnu:
self.skipTest("Skip. TPU does not support f8e8m0fnu.")
if (
jtu.device_under_test() == "gpu"
and scale_dtype == jnp.float8_e8m0fnu
and not jtu.is_cuda_compute_capability_at_least("8.9")
):
self.skipTest("Skip. Old GPU does not support f8e8m0fnu.")
@parameterized.product(
scale_dtype=[jnp.float8_e8m0fnu, jnp.bfloat16],
)
def test_working_example(self, scale_dtype):
self._should_skip_test(scale_dtype)
B = 32
M = 1024
N = 16
K = 4096
subchannel_size = 32
lhs_shape = (B, M, K)
rhs_shape = (B, K, N)
lhs_scales_shape = (B, M, K // subchannel_size)
rhs_scales_shape = (B, K // subchannel_size, N)
key = jax.random.key(42)
lhs = jax.random.normal(key, lhs_shape, dtype=jnp.float8_e4m3fn)
rhs = jax.random.normal(key, rhs_shape, dtype=jnp.float8_e4m3fn)
lhs_scales = jax.random.normal(key, lhs_scales_shape, dtype=scale_dtype)
rhs_scales = jax.random.normal(key, rhs_scales_shape, dtype=scale_dtype)
@jax.jit
def scaled_dot_fn(lhs, rhs, lhs_scale, rhs_scale):
return jax.lax.scaled_dot(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
)
result_jit = scaled_dot_fn(
lhs,
rhs,
lhs_scale=lhs_scales,
rhs_scale=rhs_scales,
)
self.assertEqual(result_jit.dtype, jnp.bfloat16)
@parameterized.product(
dtype=[jnp.float8_e4m3fn, jnp.float8_e5m2],
scale_dtype=[jnp.float8_e8m0fnu, jnp.bfloat16],
B=[1],
M=[1024],
N=[256],
K=[128],
)
def test_fp8_types(self, dtype, scale_dtype, B, M, N, K):
self._should_skip_test(scale_dtype)
a = self.rng().randn(B, M, K).astype(dtype)
b = self.rng().randn(B, K, N).astype(dtype)
a_scales = jnp.ones((B, M, K // 32), dtype=scale_dtype)
b_scales = jnp.ones((B, K // 32, N), dtype=scale_dtype)
r = scaled_dot_fn(a, b, lhs_scale=a_scales, rhs_scale=b_scales)
self.assertEqual(r.dtype, jnp.bfloat16)
@parameterized.product(
S=[1, 2, 3, 4, 5],
dtype=[jnp.float8_e4m3fn, jnp.float8_e5m2],
scale_dtype=[jnp.float8_e8m0fnu, jnp.bfloat16],
B=[1],
M=[256],
N=[256],
K=[128],
)
def test_different_subchannel_sizes(self, S, dtype, scale_dtype, B, M, N, K):
self._should_skip_test(scale_dtype)
a = self.rng().randn(B, M, K * 32 * S).astype(dtype)
b = self.rng().randn(B, K * 32 * S, N).astype(dtype)
a_scales = jnp.ones((B, M, K), dtype=scale_dtype)
b_scales = jnp.ones((B, K, N), dtype=scale_dtype)
result_jit = scaled_dot_fn(a, b, lhs_scale=a_scales, rhs_scale=b_scales)
result = jax.lax.scaled_dot(a, b, lhs_scale=a_scales, rhs_scale=b_scales)
self.assertAllClose(result_jit, result)
def test_same_rank_error(self):
with self.assertRaisesRegex(TypeError, "must have the same rank."):
_scaled_dot_2d(lhs=_e4m3fn((1, 1, 1)), scale_dtype=jnp.bfloat16)
def test_batch_dim_mismatch_error(self):
with self.assertRaisesRegex(TypeError, "same batch dimension size"):
_scaled_dot_3d(lhs=_e4m3fn((2, 1, 1)), scale_dtype=jnp.bfloat16)
def test_contracting_dim_mismatch_error(self):
with self.assertRaisesRegex(
TypeError,
"LHS contracting dim .* of size .* does not match RHS contracting dim"
" .* of size .*.",
):
_scaled_dot_2d(lhs=_e4m3fn((1, 16)), scale_dtype=jnp.bfloat16)
def test_lhs_contracting_dim_too_small_error(self):
with self.assertRaisesRegex(
TypeError,
"The ratio of LHS contracting dim .* to its scale's dim size .* must be"
" at least 2.",
):
_scaled_dot_2d(lhs_scales=_bf16((1, 32)), scale_dtype=jnp.bfloat16)
def test_rhs_contracting_dim_too_small_error(self):
with self.assertRaisesRegex(
TypeError,
"The ratio of RHS contracting dim .* to its scale's dim size .* must be"
" at least 2.",
):
_scaled_dot_2d(rhs_scales=_bf16((32, 4)), scale_dtype=jnp.bfloat16)
def test_lhs_scale_dim_mismatch_error(self):
with self.assertRaisesRegex(
TypeError, "LHS dim .* of size .* does not match scale dim size .*."
):
_scaled_dot_2d(
lhs=_e4m3fn((4, 32)),
lhs_scales=_bf16((1, 1)),
scale_dtype=jnp.bfloat16,
)
def test_rhs_scale_dim_mismatch_error(self):
with self.assertRaisesRegex(
TypeError, "RHS dim .* of size .* does not match scale dim size .*."
):
_scaled_dot_2d(
rhs=_e4m3fn((32, 4)),
rhs_scales=_bf16((1, 1)),
scale_dtype=jnp.bfloat16,
)
def test_too_many_args(self):
with self.assertRaisesRegex(
TypeError, "takes 2 positional arguments but 3 were given"
):
jax.lax.scaled_dot(_e4m3fn((1, 32)), _e4m3fn((32, 4)), _bf16((1, 1)))
def test_lhs_bf16(self):
"""Mix precision case.
Here we check the mix precison case where the lhs is bf16
and the rhs is e4m3fn with the rhs scale being e8m0fnu.
"""
self._should_skip_test(jnp.float8_e8m0fnu)
lhs = _bf16((1, 32))
rhs = _e4m3fn((32, 4))
rhs_scale = _e8m0fnu((1, 4))
result = jax.lax.scaled_dot(
lhs,
rhs,
rhs_scale=rhs_scale,
)
result_jit = scaled_dot_fn(lhs, rhs, rhs_scale=rhs_scale)
self.assertAllClose(result, result_jit)
def test_rhs_bf16(self):
"""Mix precision case.
Here we check the mix precison case where the rhs is bf16
and the lhs is e4m3fn with the lhs scale being e8m0fnu.
"""
if jtu.device_under_test() == "tpu":
self.skipTest("Skip. TPU does not support f8e8m0fnu.")
lhs = _e4m3fn((1, 32))
rhs = _bf16((32, 4))
lhs_scale = _e8m0fnu((1, 1))
result = jax.lax.scaled_dot(lhs, rhs, lhs_scale=lhs_scale)
result_jit = scaled_dot_fn(lhs, rhs, lhs_scale=lhs_scale)
self.assertAllClose(result, result_jit)
def test_f16_and_f32_operand_types(self):
self._should_skip_test(jnp.float8_e8m0fnu)
B, M, N, K = 1, 32, 32, 32
lhs_scale = _e8m0fnu((B, M, 1))
rhs_scale = _e8m0fnu((B, 1, N))
# float16
lhs = self.rng().randn(B, M, K).astype(jnp.float16)
rhs = self.rng().randn(B, K, N).astype(jnp.float16)
result = scaled_dot_fn(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
preferred_element_type=jnp.float32,
)
self.assertEqual(result.dtype, jnp.float32)
# float32
lhs = self.rng().randn(B, M, K).astype(jnp.float32)
rhs = self.rng().randn(B, K, N).astype(jnp.float32)
result = scaled_dot_fn(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
preferred_element_type=jnp.float32,
)
self.assertEqual(result.dtype, jnp.float32)
def test_mixed_precision_scales(self):
B, M, N, K = 1, 32, 32, 32
# Case 1: BF16 operands with BF16 scales
lhs = self.rng().randn(B, M, K).astype(jnp.bfloat16)
rhs = self.rng().randn(B, K, N).astype(jnp.bfloat16)
lhs_scale = jnp.full((B, M, 1), 2.0, dtype=jnp.bfloat16)
rhs_scale = jnp.full((B, 1, N), 0.5, dtype=jnp.bfloat16)
# Reference
lhs_ref = lhs * lhs_scale
rhs_ref = rhs * rhs_scale
ref = jax.lax.dot_general(
lhs_ref,
rhs_ref,
(((2,), (1,)), ((0,), (0,))),
preferred_element_type=jnp.float32,
)
res = scaled_dot_fn(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
preferred_element_type=jnp.float32,
)
self.assertAllClose(res, ref, atol=1e-1, rtol=1e-1)
# Case 2: F32 operands with BF16 scales
lhs = self.rng().randn(B, M, K).astype(jnp.float32)
rhs = self.rng().randn(B, K, N).astype(jnp.float32)
lhs_scale = jnp.full((B, M, 1), 2.0, dtype=jnp.bfloat16)
rhs_scale = jnp.full((B, 1, N), 0.5, dtype=jnp.bfloat16)
lhs_ref = lhs * lhs_scale.astype(jnp.float32)
rhs_ref = rhs * rhs_scale.astype(jnp.float32)
ref = jax.lax.dot_general(
lhs_ref,
rhs_ref,
(((2,), (1,)), ((0,), (0,))),
preferred_element_type=jnp.float32,
)
res = scaled_dot_fn(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
preferred_element_type=jnp.float32,
)
self.assertAllClose(res, ref, atol=1e-1, rtol=1e-1)
def test_jit_passes(self):
self._should_skip_test(jnp.float8_e8m0fnu)
result_jit = scaled_dot_fn(
_e4m3fn((1, 32)),
_e4m3fn((32, 4)),
lhs_scale=_e8m0fnu((1, 1)),
rhs_scale=_e8m0fnu((1, 4)),
)
self.assertEqual(result_jit.dtype, jnp.bfloat16)
@parameterized.product(
scale_dtype=[jnp.float8_e8m0fnu, jnp.bfloat16],
)
def test_multiple_contracting_dims_jit(self, scale_dtype):
self._should_skip_test(scale_dtype)
lhs = _e4m3fn((1, 32, 32))
rhs = _e4m3fn((32, 32, 4))
lhs_scale = _scale((1, 1, 1), dtype=scale_dtype)
rhs_scale = _scale((1, 1, 4), dtype=scale_dtype)
dimension_numbers = (((1, 2), (0, 1)), ((), ()))
result = jax.lax.scaled_dot(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
dimension_numbers=dimension_numbers,
)
self.assertEqual(result.dtype, jnp.bfloat16)
result_jit = scaled_dot_fn(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
dimension_numbers=dimension_numbers,
)
self.assertAllClose(result, result_jit)
@parameterized.product(
scale_dtype=[jnp.float8_e8m0fnu, jnp.bfloat16],
)
def test_multiple_batch_dims_jit(self, scale_dtype):
self._should_skip_test(scale_dtype)
lhs = _e4m3fn((2, 2, 1, 32))
rhs = _e4m3fn((2, 2, 4, 32))
lhs_scale = _scale((2, 2, 1, 1), dtype=scale_dtype)
rhs_scale = _scale((2, 2, 4, 1), dtype=scale_dtype)
dimension_numbers = (((3,), (3,)), ((0, 1), (0, 1)))
result_jit = scaled_dot_fn(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
dimension_numbers=dimension_numbers,
)
self.assertEqual(result_jit.dtype, jnp.bfloat16)
result = jax.lax.scaled_dot(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
dimension_numbers=dimension_numbers,
)
self.assertEqual(result.dtype, jnp.bfloat16)
self.assertAllClose(result_jit, result)
@parameterized.product(
scale_dtype=[jnp.float8_e8m0fnu, jnp.bfloat16],
)
def test_broadcast_less_than_32(self, scale_dtype):
self._should_skip_test(scale_dtype)
B, M, N, K = 32, 1024, 16, 128
subchannel_size = 16 # Ratio < 32
lhs = _e4m3fn((B, M, K))
rhs = _e4m3fn((B, K, N))
lhs_scales = _scale((B, M, K // subchannel_size), dtype=scale_dtype)
rhs_scales = _scale((B, K // subchannel_size, N), dtype=scale_dtype)
preferred_type = (
jnp.float32 if scale_dtype == jnp.bfloat16 else jnp.bfloat16
)
result = jax.lax.scaled_dot(
lhs,
rhs,
lhs_scale=lhs_scales,
rhs_scale=rhs_scales,
preferred_element_type=preferred_type,
)
result_jit = scaled_dot_fn(
lhs,
rhs,
lhs_scale=lhs_scales,
rhs_scale=rhs_scales,
preferred_element_type=preferred_type,
)
self.assertEqual(result_jit.dtype, preferred_type)
self.assertAllClose(result, result_jit, atol=1)
def test_bf16_dot_vs_scaled_dot_numeric_equivalence(self):
self._should_skip_test(jnp.float8_e8m0fnu)
B, M, N, K = 32, 256, 16, 512
x = jnp.abs(self.rng().randn(B, M, K).astype(jnp.bfloat16))
y = jnp.abs(self.rng().randn(B, K, N).astype(jnp.bfloat16))
x_fp8, x_scales = _quantize_to_fp8(x)
y_fp8, y_scales = _quantize_to_fp8(y.transpose((0, 2, 1)))
y_fp8 = y_fp8.transpose((0, 2, 1))
y_scales = jnp.transpose(y_scales, (0, 2, 1))
dimension_numbers = (((2,), (1,)), ((0,), (0,)))
scaled_dot_result_jit = scaled_dot_fn(
x_fp8,
y_fp8,
lhs_scale=x_scales,
rhs_scale=y_scales,
preferred_element_type=jnp.float32,
dimension_numbers=dimension_numbers,
)
@jax.jit
def bf16_dot(a, b):
return jax.lax.dot_general(
a,
b,
preferred_element_type=jnp.float32,
dimension_numbers=dimension_numbers,
)
original_dot_result = bf16_dot(x, y)
self.assertAllClose(
scaled_dot_result_jit, original_dot_result, atol=1e0, rtol=1e0
)
def test_batching_3d_vs_vmap_equivalence(self):
self._should_skip_test(jnp.float8_e8m0fnu)
B, M, N, K = 4, 64, 32, 128
subchannel_size = 32
lhs = _e4m3fn((B, M, K))
rhs = _e4m3fn((B, K, N))
lhs_scales = _e8m0fnu((B, M, K // subchannel_size))
rhs_scales = _e8m0fnu((B, K // subchannel_size, N))
def scaled_dot_batched_fn(lhs, rhs, lhs_scales, rhs_scales):
scaled_dot_2d = partial(
jax.lax.scaled_dot,
dimension_numbers=(((1,), (0,)), ((), ())),
preferred_element_type=jnp.bfloat16,
)
return jax.vmap(scaled_dot_2d, in_axes=(0, 0))(
lhs, rhs, lhs_scale=lhs_scales, rhs_scale=rhs_scales
)
result_vmap = scaled_dot_batched_fn(lhs, rhs, lhs_scales, rhs_scales)
result_batch_dims = jax.lax.scaled_dot(
lhs,
rhs,
lhs_scale=lhs_scales,
rhs_scale=rhs_scales,
dimension_numbers=(((2,), (1,)), ((0,), (0,))),
preferred_element_type=jnp.bfloat16,
)
self.assertAllClose(result_vmap, result_batch_dims, atol=1e-6, rtol=1e-6)
def test_batching_2d_vs_vmap_equivalence(self):
self._should_skip_test(jnp.float8_e8m0fnu)
B, M, N, K = 4, 64, 32, 128
subchannel_size = 32
lhs = _e4m3fn((B, M, K))
rhs = _e4m3fn((K, N))
lhs_scales = _e8m0fnu((B, M, K // subchannel_size))
rhs_scales = _e8m0fnu((K // subchannel_size, N))
def scaled_dot_batched_fn(lhs, rhs, lhs_scales, rhs_scales):
def scaled_dot_2d(lhs, rhs, lhs_scale, rhs_scale):
return jax.lax.scaled_dot(
lhs,
rhs,
lhs_scale=lhs_scale,
rhs_scale=rhs_scale,
dimension_numbers=(((1,), (0,)), ((), ())),
preferred_element_type=jnp.bfloat16,
)
return jax.vmap(scaled_dot_2d, in_axes=(0, None, 0, None))(
lhs, rhs, lhs_scales, rhs_scales
)
result_vmap = scaled_dot_batched_fn(lhs, rhs, lhs_scales, rhs_scales)
rhs_repeated = jnp.broadcast_to(rhs, (B, K, N))
rhs_scales_repeated = jnp.broadcast_to(
rhs_scales, (B, K // subchannel_size, N)
)
result_batch_dims = jax.lax.scaled_dot(
lhs,
rhs_repeated,
lhs_scale=lhs_scales,
rhs_scale=rhs_scales_repeated,
dimension_numbers=(((2,), (1,)), ((0,), (0,))),
preferred_element_type=jnp.bfloat16,
)
self.assertAllClose(result_vmap, result_batch_dims, atol=1e-6, rtol=1e-6)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/scaled_dot_test.py",
"license": "Apache License 2.0",
"lines": 581,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:jax/_src/pallas/mosaic_gpu/interpret/shared_memory.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import dataclasses
import threading
from typing import Any
from absl import logging
from jax._src.pallas.mosaic.interpret import shared_memory as memory
from jax._src.pallas.mosaic.interpret import utils as interpret_utils
from jax._src.pallas.mosaic.interpret import vector_clock as vc
class Barrier(memory.Allocation):
# A `Barrier` is very similar to a `Semaphore` (as defined in
# `shared_memory.py`). Two key differences are:
# - A `Barrier` is allocated with a fixed `num_arrivals`, whereas for a
# `Semaphore` a thread/core waiting on the `Semaphore` can freely choose
# which value the `Semaphore` must have for the waiting to complete (in
# this thread/on this core).
# - Unlike a `Semaphore`, a `Barrier` cannot be used to signal threads/cores
# on arbitrary devices (in a mesh). The `Barrier` _lives_ in `SMEM` on one
# (GPU) device, and can therefore be arrived at or waited on only by the
# threads that are running on this device.
# As a consequence of the second point, a `Barrier` stores only a single
# vector clock, which is updated when threads arrive at the `Barrier`. When
# a thread completes waiting on the `Barrier` the thread's vector clock is
# updated with the clock value at which the `Barrier` was last arrived at.
#
# Internally the implementation of a `Barrier` relies on a condition variable
# `self.cv`. Waiting on a `Barrier` is internally implemented as waiting on
# the condition variable (until a barrier arrival has been completed). To
# complement this, when a thread arrives at a `Barrier`, we notify all threads
# that are currently waiting by notifying `self.cv` internally. We also use
# the lock on `self.cv` to protect internal state of the `Barrier` that can be
# modified by multiple threads. Internal state that can be modified by
# multiple threads must then also only be read under the protection of the
# lock on `self.cv`. Attributes that are protected in this way must only be
# read or modified while holding the lock on `self.cv`. The attributes this
# applies to are annoted with comments "Protected by `self.cv`'s lock" below.
def __init__(
self,
shared_memory: GPUSharedMemory,
ref_count: int,
num_arrivals: int,
enable_logging: bool = False,
):
self.shared_memory = shared_memory
self.ref_count: int = ref_count # Protected by `self.cv`'s lock.
self.num_arrivals: int = num_arrivals # Protected by `self.cv`'s lock.
self.arrivals_count: int = 0 # Protected by `self.cv`'s lock.
self.enable_logging: bool = enable_logging
# We model the `Barrier`'s phase as an integer and, consequently,
# the 'next awaited phase by thread' as an array of integers. Note that on
# real GPU hardware, a barrier's phase is a single bit/boolean that is
# flipped when advancing to the next phase (i.e. when an arrival at the
# barrier has been completed). In the `Barrier` implementation here, we
# increment `self.phase` (by one) when a barrier is completed. Using an
# integer for the `Barrier`s phase (and incrementing it instead of flipping
# a bit) can be helpful for debugging.
self.phase: int = 0 # Protected by `self.cv`'s lock.
self.next_awaited_phase_by_thread: list[int] = [ # Protected by `self.cv`'s lock.
1
] * shared_memory.num_threads_per_device
# Initialize `self.phase_change_observed` to `True` so that the first
# arrival (more precisely, the first time we have arrived
# `self.num_arrivals` times) at the `Barrier` does not raise an error due to
# an unobserved phase change.
self.phase_change_observed: bool = True # Protected by `self.cv`'s lock.
# Invariant: We allow the lock on `self.cv` to be acquired and held in a
# scope where `self.shared_memory.lock` is already held, but *not* the other
# way around. The reasons for this are:
# - From code that holds `self.shared_memory.lock` we need to able to call
# methods of `Barrier` that then acquire the lock on `self.cv`
# internally (to modify internal state of `self` in a thread-safe way).
# This is needed, for example, when `self.shared_memory` deallocates a
# barrier (or at least decreases a barrier's ref count).
# - If we allowed the scopes during which `self.shared_memory.lock` and
# `self.cv` are both held to be nested in both ways, this can lead to
# deadlock.
self.cv = threading.Condition()
if self.shared_memory.detect_races:
self.clock: vc.VectorClock | None = None # Protected by `self.cv`'s lock.
def __repr__(self) -> str:
return (
f"Barrier(num_arrivals={self.num_arrivals},"
f" arrivals_count={self.arrivals_count})"
)
def _log(self, message: str):
if self.enable_logging:
logging.info(message)
@property
def detect_races(self) -> bool:
return self.shared_memory.detect_races
def has_zero_ref_count(self) -> bool:
with self.cv:
return self.ref_count == 0
def deallocate(self):
"""Deallocates the `Barrier`."""
with self.cv:
self.ref_count -= 1
if self.ref_count > 0:
return
passed_waits_by_thread = [
p - 1 for p in self.next_awaited_phase_by_thread
]
for tid, x in enumerate(passed_waits_by_thread):
# Note that `self.phase` counts the number of completed arrivals.
if 0 < x < self.phase:
raise ValueError(
f"Thread {tid} did not observe all phases ({self.phase}) for"
f" barrier (but observed {x} {'phases' if x > 1 else 'phase'})."
)
def arrive(self, device_id: int, local_thread_id: int, clock):
with self.cv:
self.arrivals_count += 1
if self.arrivals_count == self.num_arrivals:
if not self.phase_change_observed:
raise ValueError(
"Barrier arrival was completed again before previous completion"
" was observed by a thread."
)
self.phase += 1
self.arrivals_count = 0
self.phase_change_observed = False
self._log(
f"Device {device_id}, thread {local_thread_id}: Barrier {id(self)}"
f" has completed arrival. Phase is now {self.phase}."
)
if self.detect_races:
if self.clock is None:
self.clock = vc.copy_vector_clock(clock)
else:
vc.update_vector_clock(self.clock, clock)
self.cv.notify_all()
def wait(self, device_id: int, local_thread_id: int):
with self.cv:
# We are waiting for the barrier to reach exactly the phase that this
# thread is waiting for. This could lead to deadlock (see the comment in
# the body of the `while` loop below). One way to avoid deadlock would be
# to replace `!=` with `>`, which would allow the barrier's phase to run
# ahead without this thread observing exactly the phase it is waiting for
# (but only a later one). Here, we choose to compare with `!=` and avoid
# deadlock by raising an exception inside the `while` loop.
#
# Note also that if instead of modelling the barrier's phase as an
# integer, we had used a boolean (which would be closer to real GPU
# hardware), we would be forced to use `!=` here (since `>` would not be
# an option).
while self.next_awaited_phase_by_thread[local_thread_id] != self.phase:
# If `self.phase` is already past the phase that this thread is waiting
# for, this thread will wait forever. This is because `self.phase` never
# decreases and the only way for
# `self.next_awaited_phase_by_thread[local_thread_id]` to increase is by
# exiting this `while` loop.
if self.next_awaited_phase_by_thread[local_thread_id] < self.phase:
raise ValueError(
f"Thread {local_thread_id} is awaiting phase"
f" {self.next_awaited_phase_by_thread[local_thread_id]}, but"
f" barrier is already at phase {self.phase}. (This means that"
f" Thread {local_thread_id} has not participated in all"
" completions of the barrier.)"
)
self._log(
f"Device {device_id}, thread {local_thread_id}: Waiting for barrier"
f" {id(self)} to reach phase"
f" {self.next_awaited_phase_by_thread[local_thread_id]}. (Current"
f" phase: {self.phase})"
)
self.cv.wait()
self.phase_change_observed = True
self.next_awaited_phase_by_thread[local_thread_id] += 1
self._log(
f"Device {device_id}, thread {local_thread_id}: Finished waiting for"
f" phase {self.phase} of barrier {id(self)}."
)
# Read `self.clock` while still holding the lock on `self.cv`. (If race
# detection is enabled, the clock is needed below to update a vector clock
# that is managed by `self.shared_memory`.)
clock = self.clock if self.detect_races else None
# Note that this block cannot be nested under the `with self.cv` block
# immediately above since this would violate the invariant that
# `self.shared_memory.lock` *cannot* be acquired when `self.cv`'s lock is
# already held. (See the documentation of `self.cv` above.)
if self.detect_races:
global_thread_id = self.shared_memory.get_global_thread_id(
device_id, local_thread_id
)
# Assert before acquiring the lock on `self.shared_memory`.
assert clock is not None
with self.shared_memory.lock:
vc.update_vector_clock(
self.shared_memory.clocks[global_thread_id], clock
)
@dataclasses.dataclass
class GPUSharedMemory(memory.SharedMemory):
logging_mode: interpret_utils.LoggingMode | None = None
def _log(self, message: str):
if (
self.logging_mode is not None
and interpret_utils.LoggingMode.SHARED_MEMORY in self.logging_mode
):
logging.info(message)
@property
def num_threads_per_device(self) -> int:
return self.num_cores_per_device
@property
def num_global_threads(self) -> int:
return self.num_cores
def get_global_thread_id(self, device_id: int, local_thread_id: int) -> int:
"""Computes the global thread ID from the given device and local thread ID."""
return self.get_global_core_id(device_id, local_thread_id)
def allocate_barrier(
self,
device_id: int,
thread_id: int,
key: Any,
ref_count: int,
num_arrivals: int,
):
"""Allocates a barrier with the given key unless it already exists."""
with self.lock:
if key not in self.mem:
barrier = Barrier(
self,
ref_count=ref_count,
num_arrivals=num_arrivals,
enable_logging=(
self.logging_mode is not None
and interpret_utils.LoggingMode.BARRIER in self.logging_mode
),
)
self.mem[key] = barrier
self._log(
f"Device {device_id}, thread {thread_id}: Allocated barrier"
f" {id(barrier)} ({barrier}) with key {key}."
)
def get_barrier_and_increment_clock(
self, key: Any, device_id: int, thread_id: int
) -> tuple[Barrier, vc.VectorClock | None]:
clock = None
with self.lock:
if self.detect_races:
global_thread_id = self.get_global_thread_id(
device_id, thread_id
)
vc.inc_vector_clock(self.clocks[global_thread_id], global_thread_id)
clock = vc.copy_vector_clock(self.clocks[global_thread_id])
barrier = self.mem[key]
if not isinstance(barrier, Barrier):
raise ValueError(
f"Attempting to get barrier from allocation with {key} that is not a"
" `Barrier`."
)
return barrier, clock
def deallocate_barrier(self, device_id: int, thread_id: int, key: Any):
with self.lock:
barrier = self.mem[key]
if not isinstance(barrier, Barrier):
raise ValueError(
f"Attempting to get barrier from allocation with {key} that is not"
" a `Barrier`."
)
self._log(
f"Device {device_id}, thread {thread_id}: Decreasing ref count of"
f" barrier {id(barrier)} with key {key}."
)
barrier.deallocate()
if barrier.has_zero_ref_count():
self._log(
f"Device {device_id}, thread {thread_id}: Deallocating barrier"
f" {id(barrier)} with key {key}."
)
self.mem.pop(key)
def assert_no_barriers_allocated(self):
for key, alloc in self.mem.items():
assert not isinstance(
alloc, Barrier
), f"Barrier remains allocated at key `{key}`."
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic_gpu/interpret/shared_memory.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:tests/pallas/pallas_lowering_determinism_test.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Pallas lowering determinism."""
import json
import unittest
from absl.testing import absltest
import jax
from jax._src import test_util as jtu
from jax._src.lib import jaxlib_extension_version
from jax._src.lib.mlir import ir
from jax.experimental import pallas
import jax.numpy as jnp
jax.config.parse_flags_with_absl()
@jax.jit
def nested_jit_func(x):
return jax.jit(lambda x: x + 1.0)(x)
def pallas_kernel(x_ref, y_ref):
y_ref[...] = nested_jit_func(x_ref[...])
def pallas_kernel_duplicate(x_ref, y_ref):
y_ref[...] = nested_jit_func(x_ref[...])
@jax.jit
def stable_jit_func(x):
return pallas.pallas_call(
pallas_kernel,
out_shape=jax.ShapeDtypeStruct(x.shape, x.dtype),
name="stable_kernel_name",
)(x)
@jax.jit
def stable_jit_func_duplicate(x):
return pallas.pallas_call(
pallas_kernel_duplicate,
out_shape=jax.ShapeDtypeStruct(x.shape, x.dtype),
name="stable_kernel_name_duplicate",
)(x)
def extract_pallas_body(lowered):
"""Extracts Pallas kernel body from lowered object."""
module = lowered.compiler_ir()
bodies = []
def _find_backend_configs(op):
is_tpu_custom_call = (
"call_target_name" in op.attributes
and "backend_config" in op.attributes
and ir.StringAttr(op.attributes["call_target_name"]).value
== "tpu_custom_call"
)
if is_tpu_custom_call:
backend_config = op.attributes["backend_config"]
config = json.loads(ir.StringAttr(backend_config).value)
if "custom_call_config" in config:
bodies.append(config["custom_call_config"]["body"])
for region in op.regions:
for block in region:
for nested_op in block:
_find_backend_configs(nested_op)
_find_backend_configs(module.operation)
assert len(bodies) == 1
return bodies[0]
class PallasLoweringDeterminismTest(jtu.JaxTestCase):
@jtu.run_on_devices("tpu")
def testCallsiteAgnostic(self):
if jaxlib_extension_version < 399:
self.skipTest("TracebackScope requires jaxlib >= 399")
def get_lowered():
x = jnp.ones((8,), dtype=jnp.float32)
return stable_jit_func.lower(x)
jax.clear_caches()
lowered0 = get_lowered()
body0 = extract_pallas_body(lowered0)
jax.clear_caches()
def wrapper():
return get_lowered()
lowered1 = wrapper()
body1 = extract_pallas_body(lowered1)
self.assertEqual(body0, body1)
@jtu.run_on_devices("tpu")
@unittest.skipIf(
jaxlib_extension_version < 409,
"Order independence requires a recent jaxlib"
)
def testOrderAgnostic(self):
def get_pallas_body(f):
x = jnp.ones((8,), dtype=jnp.float32)
return extract_pallas_body(f.lower(x))
jax.clear_caches()
body_a0 = get_pallas_body(stable_jit_func)
body_b0 = get_pallas_body(stable_jit_func_duplicate)
jax.clear_caches()
body_b1 = get_pallas_body(stable_jit_func_duplicate)
body_a1 = get_pallas_body(stable_jit_func)
self.assertEqual(body_a0, body_a1)
self.assertEqual(body_b0, body_b1)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/pallas/pallas_lowering_determinism_test.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:tests/pallas/tpu_trace_value_test.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal test for pltpu.trace_value primitive."""
from absl.testing import absltest
import jax
from jax._src import test_util as jtu
from jax.experimental import pallas as pl
from jax.experimental.pallas import tpu as pltpu
from jax.experimental.pallas import tpu_sc as plsc
import jax.numpy as jnp
def simple_kernel_with_trace_value(x_ref, s_ref, o_ref):
"""Simple kernel that emits trace metrics."""
# Emit a constant to xprof trace
pltpu.trace_value("constant_value", jnp.float32(42.42))
scale = s_ref[0]
z = x_ref[...] + jnp.float32(48.0) + scale.astype(jnp.float32).reshape((1, 1))
pltpu.trace_value("scale_value", scale)
o_ref[...] = z
class TraceMetricTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.is_device_tpu():
self.skipTest("trace_value only supported on TPU.")
def test_simple_trace_metric(self):
"""Test that trace_metric compiles and runs without error."""
x = jnp.ones((8, 128), dtype=jnp.float32)
s = jax.random.randint(jax.random.key(0), (1,), minval=0, maxval=100)
result = pl.pallas_call(
simple_kernel_with_trace_value,
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
in_specs=[
pl.BlockSpec((8, 128), memory_space=pltpu.VMEM),
pl.BlockSpec((1,), memory_space=pltpu.SMEM),
],
out_specs=pl.BlockSpec((8, 128), memory_space=pltpu.VMEM),
compiler_params=pltpu.CompilerParams(has_side_effects=True),
name="trace_metric_test",
)(x, s)
# Just verify the kernel runs and produces correct output
self.assertEqual(result.shape, (8, 128))
self.assertTrue(
jnp.allclose(result, x + 48.0 + s.astype(jnp.float32).reshape((1, 1)))
)
class SparseCoreTraceValueTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.is_device_tpu(5, "p") and not jtu.is_device_tpu_at_least(6):
self.skipTest("SparseCore only supported on TPU v5p+")
if not jtu.is_cloud_tpu_at_least(2026, 3, 1):
self.skipTest("Requires a newer libtpu")
def test_trace_value(self):
nl = plsc.get_sparse_core_info().num_lanes
x = jnp.arange(8 * 128, dtype=jnp.int32).reshape(8, 128)
mesh = plsc.VectorSubcoreMesh(
core_axis_name="core", subcore_axis_name="subcore", num_cores=1
)
@pl.kernel(
out_shape=x,
mesh=mesh,
scratch_shapes=(pltpu.VMEM(x.shape, x.dtype),),
compiler_params=pltpu.CompilerParams(),
)
def kernel(x_hbm_ref, o_hbm_ref, tmp_ref):
pltpu.sync_copy(x_hbm_ref, tmp_ref)
pltpu.trace_value("sc_trace_value", tmp_ref[1, :nl][1])
pltpu.sync_copy(tmp_ref, o_hbm_ref)
result = kernel(x)
self.assertArraysEqual(result, x)
if __name__ == "__main__":
jax.config.parse_flags_with_absl()
absltest.main(testLoader=jtu.JaxTestLoader())
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/pallas/tpu_trace_value_test.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:tests/pmap_shmap_merge_test.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import math
import unittest
import warnings
from absl.testing import absltest
import jax
from jax._src import config
from jax._src import core
from jax._src import dtypes
from jax._src import stages
from jax._src import test_util as jtu
import jax.numpy as jnp
import numpy as np
config.parse_flags_with_absl()
jtu.request_cpu_devices(8)
# Suppress the deprecation warning from @config.pmap_shmap_merge(True) decorator
# which is triggered at class definition time.
warnings.filterwarnings(
'ignore',
message='Setting `jax_pmap_shmap_merge` is deprecated',
category=DeprecationWarning,
)
class PmapShmapMergeTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if jax.device_count() < 2:
raise unittest.SkipTest('test requires at least two devices')
@config.pmap_shmap_merge(True)
def test_store_exception(self):
def f(x):
return x
inp = jnp.ones((jax.device_count(), 1), dtype=jnp.float32)
jax.pmap(f, axis_name='i')(inp)
inp = jnp.ones((jax.device_count(), 1), dtype=jnp.int32)
jax.pmap(f, axis_name='i')(inp)
@config.pmap_shmap_merge(True)
def test_prng_key(self):
keys = jax.random.split(jax.random.key(0), jax.device_count())
out = jax.pmap(lambda x: x)(keys)
self.assertEqual(type(out), type(keys))
out = jax.pmap(lambda x, y: y, in_axes=(0, None))(keys, jax.random.key(0))
self.assertEqual(type(out), type(keys))
out = jax.pmap(lambda x, y: y, in_axes=(0, None), out_axes=None)(
keys, jax.random.key(0)
)
self.assertEqual(type(out), type(keys))
@config.pmap_shmap_merge(True)
def test_lower_with_flattened_args(self):
shape = (jax.device_count(), 3)
inputs = np.reshape(np.arange(math.prod(shape)), shape)
# The shard_map implementation of pmap takes pytree args, but the inner
# jitted_f must take flattened args.
_ = jax.pmap(lambda x: x[0]).lower((inputs, ())).compile() # doesn't crash
@config.pmap_shmap_merge(True)
def test_float0_dtype_input(self):
inputs = np.array([b''] * jax.device_count(), dtype=dtypes.float0)
_ = jax.pmap(lambda x: x)(inputs) # doesn't crash
@config.pmap_shmap_merge(True)
def test_float0_dtype_output(self):
inputs = np.ones(jax.device_count())
_ = jax.pmap(lambda x: jnp.array(b'', dtype=dtypes.float0))(
inputs
) # doesn't crash
@config.pmap_shmap_merge(True)
def test_lowered_args_info(self):
shmap_lowered = jax.pmap(lambda x: x).lower(
(jnp.ones((1,), jnp.float32), ())
)
aval = core.ShapedArray((1,), jnp.float32)
expected_args_info = (
(
(
stages.ArgInfo(aval, donated=False),
(),
),
),
{},
)
self.assertEqual(
shmap_lowered.args_info, expected_args_info
) # doesn't crash
@config.pmap_shmap_merge(True)
def test_wrapped(self):
f = lambda x: x
g = jax.pmap(f)
self.assertTrue(hasattr(g, '__wrapped__'))
self.assertEqual(g.__wrapped__, f)
@config.pmap_shmap_merge(True)
def test_numpy_input_sharding(self):
# Test that pmap correctly handles numpy array inputs by providing
# explicit in_shardings to the underlying jit(shard_map).
# Without explicit in_shardings, jit would default to UnspecifiedValue
# for numpy inputs, causing failures.
np_input = np.arange(jax.device_count(), dtype=np.float32)
result = jax.pmap(lambda x: x * 2)(np_input)
expected = np_input * 2
self.assertAllClose(result, expected)
if __name__ == '__main__':
absltest.main()
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/pmap_shmap_merge_test.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:tests/multiprocess/socket_transfer_test.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for socket transfer."""
import jax
from jax._src import test_multiprocess as jt_multiprocess
from jax.sharding import PartitionSpec as P
import numpy as np
try:
import portpicker # pytype: disable=import-error
except ImportError:
portpicker = None
class SocketTransferTest(jt_multiprocess.MultiProcessTest):
def test_cross_host_transfer_single_device_sharding(self):
x = np.arange(64).reshape(8, 8)
src_pid = 0
dst_pid = 1
src_sharding = jax.sharding.SingleDeviceSharding(
jax.local_devices(process_index=src_pid)[0])
dst_sharding = jax.sharding.SingleDeviceSharding(
jax.local_devices(process_index=dst_pid)[0])
y = jax.device_put(x, src_sharding)
z = jax.device_put(y, dst_sharding)
z.block_until_ready()
if jax.process_index() == dst_pid:
self.assertLen(z.addressable_shards, 1)
np.testing.assert_array_equal(z.addressable_shards[0].data, x)
else:
self.assertEmpty(z.addressable_shards)
def test_cross_host_transfer_named_sharding(self):
x = np.arange(64).reshape(8, 8)
n_local = jax.local_device_count()
src_pid = 0
dst_pid = 1
src_sharding = jax.sharding.NamedSharding(
jax.make_mesh((n_local,), ("x",),
devices=jax.local_devices(process_index=src_pid),
axis_types=(jax.sharding.AxisType.Explicit,)),
P("x"))
dst_sharding = jax.sharding.NamedSharding(
jax.make_mesh((n_local,), ("x",),
devices=jax.local_devices(process_index=dst_pid),
axis_types=(jax.sharding.AxisType.Explicit,)),
P("x"))
y = jax.device_put(x, src_sharding)
z = jax.device_put(y, dst_sharding)
z.block_until_ready()
if jax.process_index() == dst_pid:
self.assertLen(z.addressable_shards, n_local)
for shard in z.addressable_shards:
np.testing.assert_array_equal(shard.data, x[shard.index])
else:
self.assertEmpty(z.addressable_shards)
def test_cross_host_transfer_named_sharding_replicated(self):
x = np.arange(64).reshape(8, 8)
n_dev = jax.device_count() // 2
src_sharding = jax.sharding.NamedSharding(
jax.make_mesh((n_dev,), ("x",), devices=jax.devices()[:n_dev],
axis_types=(jax.sharding.AxisType.Explicit,)),
P()
)
dst_sharding = jax.sharding.NamedSharding(
jax.make_mesh((n_dev,), ("x",), devices=jax.devices()[n_dev:],
axis_types=(jax.sharding.AxisType.Explicit,)),
P()
)
y = jax.device_put(x, src_sharding)
z = jax.device_put(y, dst_sharding)
z.block_until_ready()
for shard in z.addressable_shards:
np.testing.assert_array_equal(shard.data, x[shard.index])
if __name__ == "__main__":
if portpicker is None:
socket_port = 12345
else:
socket_port = portpicker.pick_unused_port()
jax.config.update("jax_force_dcn_cross_host_transfers", True)
jax.config.update(
"jax_cross_host_transfer_socket_address", f"127.0.0.1:{socket_port}")
# Too small for good performance, but set to avoid oom in msan tests.
jax.config.update(
"jax_cross_host_transfer_transfer_size",
64 * 1024,
)
jt_multiprocess.main()
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/multiprocess/socket_transfer_test.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:jax/_src/pallas/mosaic_gpu/interpret/gpu_callbacks.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Mapping, Sequence
import contextlib
import dataclasses
import functools
import itertools
import threading
import types
from typing import Any, Self
import jax
from jax import numpy as jnp
from jax._src import callback
from jax._src import source_info_util
from jax._src.pallas.mosaic.interpret import utils as interpret_utils
from jax._src.pallas.mosaic.interpret import vector_clock as vc
from jax._src.pallas.mosaic.interpret.race_detection_state import RaceDetectionState
from jax._src.pallas.mosaic_gpu import core as mosaic_gpu_core
from jax._src.pallas.mosaic_gpu.interpret import shared_memory as memory
from jax._src.state import indexing
import numpy as np
IDX_BY_GPU_MEMORY_SPACE: Mapping[mosaic_gpu_core.MemorySpace, int]
IDX_BY_GPU_MEMORY_SPACE = types.MappingProxyType(
{v: i for i, v in enumerate(mosaic_gpu_core.MemorySpace)}
)
GPU_MEMORY_SPACE_BY_IDX: Mapping[int, mosaic_gpu_core.MemorySpace]
GPU_MEMORY_SPACE_BY_IDX = types.MappingProxyType(
dict(enumerate(mosaic_gpu_core.MemorySpace))
)
def get_memory_space_idx(space: mosaic_gpu_core.MemorySpace | None) -> int:
if space is None:
return IDX_BY_GPU_MEMORY_SPACE[mosaic_gpu_core.MemorySpace.SMEM]
return IDX_BY_GPU_MEMORY_SPACE[space]
def is_smem_memory_space(space: mosaic_gpu_core.MemorySpace | None) -> bool:
if space is None:
return True
return space == mosaic_gpu_core.MemorySpace.SMEM
def is_gmem_memory_space(space: mosaic_gpu_core.MemorySpace | None) -> bool:
return space == mosaic_gpu_core.MemorySpace.GMEM
_shared_memory: memory.GPUSharedMemory | None = None
_shared_memory_init_lock = threading.Lock()
_races: RaceDetectionState | None = None
def _get_shared_memory() -> memory.GPUSharedMemory:
assert _shared_memory is not None
return _shared_memory
def _clear_shared_memory():
global _shared_memory
with _shared_memory_init_lock:
_shared_memory = None
def get_races() -> RaceDetectionState:
assert _races is not None
return _races
def reset_gpu_interpret_mode_state():
"""Resets all global, shared state used by GPU interpret mode.
GPU interpret mode uses global, shared state for simulating memory buffers,
for race detection, etc., when interpreting a kernel. Normally, this shared
state is cleaned up after a kernel is interpreted.
But if an exception is thrown while interpreting a kernel, the shared state
is not cleaned up, allowing the simulated GPU state to be examined for
debugging purposes. In this case, the shared state must be reset before
any further kernels are interpreted.
"""
global _shared_memory, _races
with _shared_memory_init_lock:
_shared_memory = None
_races = None
# Below we define pairs of _callback_ functions. Each pair consists of
#
# (1) a module-private function, e.g. `_initialize_shared_memory`, and
# (2) a thin wrapper around the this module-private function, e.g.
# `call_initialize_shared_memory`.
#
# The module-private function (1) runs in the Python ("host") process and
# manages interaction of the interpreted Pallas kernel with the memory system,
# represented by the module-global `SharedMemory` object `_shared_memory`.
#
# The wrapper function (2) is to be called from the interpreted Pallas kernel
# (that is simulating a "device", or thread). It serves as the interface between
# the "device" kernel and the "host" memory system and merely passes arguments
# on to the corresponding function (1). Importantly, when the wrapper receives
# an argument that is a Jax (device) array, this argument is received as a Numpy
# (host) array by the corresponding function (1), due to the
# `callback.io_callback` mechanism.
def _initialize_shared_memory(
num_devices: Any,
num_threads: Any,
*,
interpret_params: interpret_utils.InterpretGPUParams,
):
global _shared_memory, _races
num_devices = int(num_devices)
num_threads = int(num_threads)
num_total_threads = num_devices * num_threads
with _shared_memory_init_lock:
if _shared_memory is None:
vector_clock_size = interpret_params.get_vector_clock_size(num_devices)
_races = RaceDetectionState(num_cores=num_total_threads)
_shared_memory = memory.GPUSharedMemory(
num_devices=num_devices,
# We re-use the `SharedMemory`'s capability to model multiple cores
# per (TPU) device for modeling the multiple threads on a single GPU
# device.
num_cores_per_device=num_threads,
out_of_bounds_reads=interpret_params.out_of_bounds_reads,
# TODO(nrink): Support different DMA execution modes on GPU.
dma_execution_mode="eager",
uninitialized_memory=interpret_params.uninitialized_memory,
detect_races=interpret_params.detect_races,
vector_clock_size=vector_clock_size,
clocks=[
vc.make_vector_clock(vector_clock_size)
for _ in range(num_total_threads)
],
barrier=threading.Barrier(num_devices, action=lambda: None),
clean_up_barrier=threading.Barrier(
num_devices, action=_clear_shared_memory
),
logging_mode=interpret_params.logging_mode,
)
# The naming of the `num_cores` property of `SharedMemory` originates from the
# support for multipl cores in a (Megacore) TPU device. As commented above, on
# GPU we model multiple threads per device as _cores_ in the
# (TPU-/Megacore-)inspired terminology of`SharedMemory`.
assert _shared_memory.num_cores == num_total_threads
def call_initialize_shared_memory(
*,
num_devices: int,
num_threads: int,
interpret_params: interpret_utils.InterpretGPUParams,
):
callback.io_callback(
functools.partial(
_initialize_shared_memory,
interpret_params=interpret_params,
),
(),
num_devices,
num_threads,
ordered=True,
)
def _clean_up_shared_memory():
shared_memory = _get_shared_memory()
shared_memory.clean_up_barrier.wait()
def call_clean_up_shared_memory():
callback.io_callback(_clean_up_shared_memory, (), ordered=True)
def _update_clocks_for_device_barrier(device_id: int):
shared_memory = _get_shared_memory()
shared_memory.update_clocks_for_device_barrier(device_id)
def call_update_clocks_for_device_barrier(device_id: int):
callback.io_callback(
_update_clocks_for_device_barrier, (), device_id, ordered=True
)
@dataclasses.dataclass(frozen=True, kw_only=True)
class HostAllocationRequest:
"""Request for an allocation on a device/thread and in a memory space."""
memory_space_id: int
device_id: int
# Defaults to zero for `AllocationRequest`s that do not specify a thread ID.
thread_id: int = 0
# The reference count is needed only for allocations that are explicitly
# deallocated (with _deallocate_buffer below). This currently only applies to
# allocations made by a `run_scoped` primitive.
initial_ref_count: int = 1
def __iter__(self):
# We make `self` iterable to ease conversion into Numpy and Jax arrays (cf.
# methods `as_array` and `as_jax_array` below). Note that for this purpose
# it would suffice to have any method that return a suitable iterator,
# instead of implementing the special `__iter__` method. Not implementing
# `__iter__` would mean that objects of this class cannot (accidentally) be
# iterated over by clients of the class.
return iter((
self.memory_space_id,
self.device_id,
self.thread_id,
self.initial_ref_count,
))
@classmethod
def shape_and_dtype(cls) -> jax.ShapeDtypeStruct:
num_fields = len(dataclasses.fields(cls))
return jax.ShapeDtypeStruct((num_fields,), jnp.int32)
@property
def as_array(self) -> np.ndarray:
return np.array(list(self), dtype=np.int32)
@property
def as_jax_array(self) -> jnp.ndarray:
return jnp.array(list(self), dtype=jnp.int32)
@classmethod
def from_array(cls, request: np.ndarray | jnp.ndarray) -> Self:
if request.shape != cls.shape_and_dtype().shape:
raise ValueError(
f"Expected shape {cls.shape_and_dtype().shape} but got"
f" {request.shape}"
)
if not interpret_utils.is_int(request.dtype):
raise ValueError(f"Expected integer dtype but got {request.dtype}")
arg_names = [f.name for f in dataclasses.fields(cls)]
values = map(int, request)
return cls(**dict(zip(arg_names, values)))
def make_allocation_request_array(
*,
memory_space_id: int,
device_id: int,
thread_id: int = 0,
initial_ref_count: int = 1,
) -> jnp.ndarray:
return HostAllocationRequest(
memory_space_id=memory_space_id,
device_id=device_id,
thread_id=thread_id,
initial_ref_count=initial_ref_count,
).as_jax_array
@dataclasses.dataclass(frozen=True, kw_only=True)
class HostAllocationKey(HostAllocationRequest):
"""Key for an allocation in shared memory."""
buffer_id: int
def __iter__(self):
# Note that implementing `__iter__` here affects the bahviour of the
# `as_array` and `as_jax_array` methods of the base class. This is intended.
yield from super().__iter__()
yield self.buffer_id
def _allocate_buffer_for_all_threads(
device_id: Any, allocation_request: Any, value: Any
) -> np.ndarray:
"""Allocates a buffer for the given `allocation_request`.
While only a single buffer is allocated, we increment the next buffer ID on
`_shared_memory` for all threads. (This is analogous to the behavior when
interpreting TPU kernels with multiple cores per TPU device.)
Args:
allocation_request: Array that converts into an `HostAllocationRequest` with
`thread_id` set to zero. This requirement can be thought of as associating
the allocated buffer (that is shared across all threads) with the zeroth
thread.
value: Array of values to initialize the allocated buffer with.
Returns:
`AllocationKey` to refer to the allocated buffer.
Raises:
ValueError: If the `thread_id` in `allocation_request` is not zero.
"""
device_id = int(device_id)
allocation_request = HostAllocationRequest.from_array(allocation_request)
if allocation_request.thread_id != 0:
raise ValueError(
"`thread_id` must be zero when allocating a buffer for all threads"
)
value = np.array(value)
shared_memory = _get_shared_memory()
key: HostAllocationKey | None = None
buffer_id: int | None = None
for thread_id in range(shared_memory.num_cores_per_device):
buffer_id_for_thread_id = shared_memory.get_next_buffer_id(
device_id, thread_id
)
if not buffer_id:
buffer_id = buffer_id_for_thread_id
else:
# We keep the buffer ids in sync across all threads. This implies, in
# particular, that every instance of the assignment to `key` below assigns
# an `AllocationKey` object with the same attributes.
assert buffer_id == buffer_id_for_thread_id
key = HostAllocationKey(
memory_space_id=allocation_request.memory_space_id,
device_id=allocation_request.device_id,
thread_id=0,
initial_ref_count=allocation_request.initial_ref_count,
buffer_id=buffer_id,
)
ref_count = allocation_request.initial_ref_count
# We rely on the fact that `allocate_buffer` will not allocate a new buffer
# if one with the same key already exists.
shared_memory.allocate_buffer(key, ref_count=ref_count, value=value)
# We expect the `for`-loop above to have executed its body at least once.
assert key is not None
return key.as_array
def call_allocate_buffer_for_all_threads(
device_id: int,
allocation_request: jnp.ndarray,
value: jnp.ndarray,
) -> jnp.ndarray:
return callback.io_callback(
_allocate_buffer_for_all_threads,
HostAllocationKey.shape_and_dtype(),
device_id,
allocation_request,
value,
ordered=True,
)
def _allocate_buffer(
device_id: Any,
thread_id: Any,
allocation_request: Any,
value: Any,
) -> np.ndarray:
"""Allocates a buffer for the given `allocation_request`.
Args:
allocation_request: Array that converts into a `HostAllocationRequest`.
value: Array of values to initialize the allocated buffer with.
Returns:
`AllocationKey` to refer to the allocated buffer.
"""
device_id = int(device_id)
thread_id = int(thread_id)
allocation_request = HostAllocationRequest.from_array(allocation_request)
value = np.array(value)
shared_memory = _get_shared_memory()
buffer_id = shared_memory.get_next_buffer_id(device_id, thread_id)
key = HostAllocationKey(
memory_space_id=allocation_request.memory_space_id,
device_id=allocation_request.device_id,
thread_id=allocation_request.thread_id,
initial_ref_count=allocation_request.initial_ref_count,
buffer_id=buffer_id,
)
ref_count = allocation_request.initial_ref_count
shared_memory.allocate_buffer(key, ref_count=ref_count, value=value)
return key.as_array
def call_allocate_buffer(
device_id: int,
thread_id: int,
allocation_request: jnp.ndarray,
value: jnp.ndarray,
) -> jnp.ndarray:
return callback.io_callback(
_allocate_buffer,
HostAllocationKey.shape_and_dtype(),
device_id,
thread_id,
allocation_request,
value,
ordered=True,
)
def _deallocate_buffer(allocation_key: Any):
"""Decreases the reference count of the buffer with `allocation_key` (Deallocates the buffer if its reference count becomes zero)."""
allocation_key = HostAllocationKey.from_array(allocation_key)
shared_memory = _get_shared_memory()
shared_memory.deallocate_buffer(allocation_key)
def call_deallocate_buffer(allocation_key: jnp.ndarray):
callback.io_callback(
_deallocate_buffer,
None,
allocation_key,
ordered=True,
)
def _handle_out_of_bounds_read(
ret: np.ndarray | None,
full_read_shape: tuple[int, ...],
shape: Sequence[int],
dtype: np.dtype,
allocation_key: HostAllocationKey,
read_range: tuple[int | slice, ...],
shared_memory: memory.GPUSharedMemory,
source_info,
input_name: str | None,
block_indices: tuple[int, ...] | None,
grid_loop_idx: tuple[int, ...] | None,
) -> np.ndarray:
"""Handles out-of-bounds read based on shared_memory configuration."""
if shared_memory.out_of_bounds_reads == "raise":
if source_info is None:
ctx = contextlib.nullcontext()
else:
ctx = source_info_util.user_context(
traceback=source_info.traceback, name_stack=source_info.name_stack
) # type: ignore[assignment]
with ctx:
if input_name is None:
raise IndexError(
f"Out-of-bounds read of {allocation_key}:"
f" reading [{read_range}] but buffer has shape {shape}."
)
else:
# Different error message when we are reading a block of an input,
# to copy it to a buffer before invoking the kernel body.
raise IndexError(
f"Out-of-bounds block index {block_indices} for {allocation_key},"
f' input "{input_name}" in iteration {grid_loop_idx}:'
f" reading [{read_range}] but input has shape {shape}."
)
# out_of_bounds_reads == "uninitialized"
uninit_array = np.full(
full_read_shape,
interpret_utils.get_uninitialized_value(
dtype, shared_memory.uninitialized_memory
),
dtype=dtype,
)
if ret is None:
return uninit_array
else:
uninit_array[tuple(slice(s) for s in ret.shape)] = ret
return uninit_array
def _is_dynamic(indexer: indexing.NDIndexer) -> bool:
return any(
isinstance(idx, indexing.Slice)
and (idx.is_dynamic_start or idx.is_dynamic_size)
for idx in indexer.indices
)
def _validate_transforms(transforms):
for transform in transforms:
match transform:
case indexing.NDIndexer():
if _is_dynamic(transform):
raise ValueError(
"Dynamic indexing not supported in GPU interpret mode"
)
case _:
raise ValueError(f"Unsupported transform: {transform}")
def _get(
device_id: Any,
thread_id: Any,
allocation_key: Any,
transforms,
block_indices=None,
grid_loop_idx=None,
clock=None,
source_info=None,
input_name=None,
) -> np.ndarray:
"""Performs a read from the buffer for `allocation_key_as_array` from the given device and thread."""
device_id = int(device_id)
thread_id = int(thread_id)
allocation_key = HostAllocationKey.from_array(allocation_key)
_validate_transforms(transforms)
# TODO(nrink): Support tiling and swizzling transforms.
transforms = jax.tree.map(int, transforms)
if input_name is not None:
# NOTE: input_name, block_indices, and grid_loop_idx are set only if this
# function is being called to read a block from a pallas_call input (at the
# start of one iteration of the kernel body).
assert block_indices is not None
block_indices = tuple(int(x) for x in block_indices)
assert grid_loop_idx is not None
grid_loop_idx = tuple(int(x) for x in grid_loop_idx)
shared_memory = _get_shared_memory()
global_core_id = shared_memory.get_global_core_id(device_id, thread_id)
read_range = interpret_utils.to_range(transforms)
ret, (shape, dtype), clock_ = shared_memory.get_buffer_content(
allocation_key, read_range, global_core_id
)
clock = clock if clock is not None else clock_
# Compute the shape of the read value, assuming the read is fully in-bounds.
# TODO(jburnim): We already know this shape in the Jaxpr where we insert a
# callback to `get`. Should we just pass the shape to `get`?
# TODO(jburnim): Move to a helper function?
new_full_read_shape: list[int] = []
assert len(read_range) <= len(shape)
for dim_size, idx_or_slice in itertools.zip_longest(
shape, read_range, fillvalue=None
):
assert isinstance(dim_size, int)
if idx_or_slice is None:
new_full_read_shape.append(dim_size)
elif isinstance(idx_or_slice, int):
continue
else:
dim_size = (idx_or_slice.stop - idx_or_slice.start) // idx_or_slice.step
assert isinstance(dim_size, int)
new_full_read_shape.append(dim_size)
full_read_shape = tuple(new_full_read_shape)
del new_full_read_shape
if (ret is None) or (full_read_shape != ret.shape):
ret = _handle_out_of_bounds_read(
ret,
full_read_shape,
shape,
dtype,
allocation_key,
read_range,
shared_memory,
source_info,
input_name,
block_indices,
grid_loop_idx,
)
if shared_memory.detect_races:
get_races().check_read(
device_id,
thread_id,
clock,
allocation_key,
read_range,
source_info=source_info,
)
return ret
def call_get(
*,
result_shape_and_dtype,
device_id: int,
thread_id: int,
allocation_key: jnp.ndarray,
transforms,
block_indices=None,
grid_loop_idx=None,
clock=None,
source_info=None,
input_name=None,
) -> jnp.ndarray:
return callback.io_callback(
functools.partial(_get, source_info=source_info, input_name=input_name),
result_shape_and_dtype,
device_id,
thread_id,
allocation_key,
transforms,
block_indices,
grid_loop_idx,
clock,
ordered=True,
)
def _swap(
device_id: Any,
thread_id: Any,
allocation_key_as_array: Any,
transforms,
val,
mask,
*,
source_info=None,
):
"""Performs a swap into the buffer for `allocation_key_as_array` from the given device and thread."""
device_id = int(device_id)
thread_id = int(thread_id)
allocation_key = HostAllocationKey.from_array(allocation_key_as_array)
_validate_transforms(transforms)
# TODO(nrink): Support tiling and swizzling transforms.
transforms = jax.tree.map(int, transforms)
val = np.array(val)
mask = np.array(mask) if mask is not None else None
if mask is not None:
assert mask.shape == val.shape
shared_memory = _get_shared_memory()
global_core_id = shared_memory.get_global_core_id(device_id, thread_id)
read_write_range = interpret_utils.to_range(transforms)
ret, (shape, _), clock = shared_memory.swap_buffer_content(
allocation_key, read_write_range, val, mask, global_core_id
)
if ret is None:
if mask is None:
raise ValueError(
f"Out-of-bounds swap of {allocation_key}:"
f" swapping [{read_write_range}] but buffer has shape"
f" {shape} ."
)
else:
# TODO(jburnim): Include indices of out-of-bounds locations where mask
# is True.
raise ValueError(
f"Out-of-bounds masked swap of {allocation_key}: swapping"
f" [{read_write_range}] but buffer has shape {shape} . "
)
if shared_memory.detect_races:
get_races().check_write(
device_id,
thread_id,
clock,
allocation_key,
read_write_range,
source_info=source_info,
)
return ret
def call_swap(
*,
result_shape_and_dtype,
device_id: int,
thread_id: int,
allocation_key: jnp.ndarray,
transforms,
val,
mask,
source_info=None,
):
return callback.io_callback(
functools.partial(_swap, source_info=source_info),
result_shape_and_dtype,
device_id,
thread_id,
allocation_key,
transforms,
val,
mask,
ordered=True,
)
def _allocate_barriers(
device_id: Any,
thread_id: Any,
num_arrivals: Any,
num_barriers: Any,
ref_count: Any,
) -> np.ndarray:
device_id = int(device_id)
thread_id = int(thread_id)
num_arrivals = int(num_arrivals)
num_barriers = int(num_barriers)
ref_count = int(ref_count)
shared_memory = _get_shared_memory()
keys = []
for _ in range(num_barriers):
# Advance `shared_memory`'s internal buffer id counter for all threads that
# call into this function.
barrier_id = shared_memory.get_next_buffer_id(device_id, thread_id)
smem_space_id = IDX_BY_GPU_MEMORY_SPACE[mosaic_gpu_core.SMEM]
key = HostAllocationKey(
memory_space_id=smem_space_id,
device_id=device_id,
# Barriers are shared between threads. Hence we associate all
# allocations for `Barrier`s with the 0th thread.
thread_id=0,
initial_ref_count=ref_count,
buffer_id=barrier_id,
)
shared_memory.allocate_barrier(
device_id,
thread_id,
key,
ref_count=ref_count,
num_arrivals=num_arrivals,
)
keys.append(key.as_array)
assert len(keys) == num_barriers
return np.array(keys, dtype=np.int32)
def call_allocate_barriers(
device_id: int,
thread_id: int,
num_arrivals: int,
num_barriers: int,
ref_count: int,
) -> jnp.ndarray:
shape_and_dtype = HostAllocationKey.shape_and_dtype()
result_shape = (num_barriers, *shape_and_dtype.shape)
result_shape_and_dtype = jax.ShapeDtypeStruct(
result_shape, shape_and_dtype.dtype
)
return callback.io_callback(
_allocate_barriers,
result_shape_and_dtype,
device_id,
thread_id,
num_arrivals,
num_barriers,
ref_count,
ordered=True,
)
def _deallocate_barrier(
device_id: Any, thread_id: Any, allocation_key: np.ndarray
):
device_id = int(device_id)
thread_id = int(thread_id)
assert len(allocation_key.shape) == 2
num_barriers = allocation_key.shape[0]
keys_to_deallocate = []
for i in range(num_barriers):
keys_to_deallocate.append(allocation_key[i, :])
shared_memory = _get_shared_memory()
for key in keys_to_deallocate:
barrier_allocation_key = HostAllocationKey.from_array(key)
shared_memory.deallocate_barrier(
device_id, thread_id, barrier_allocation_key
)
def call_deallocate_barrier(
device_id: int, thread_id: int, allocation_key: jnp.ndarray
):
callback.io_callback(
_deallocate_barrier,
None,
device_id,
thread_id,
allocation_key,
ordered=True,
)
def _barrier_wait(device_id: int, thread_id: int, allocation_key: np.ndarray):
device_id = int(device_id)
thread_id = int(thread_id)
barrier_key = HostAllocationKey.from_array(allocation_key)
shared_memory = _get_shared_memory()
barrier, _ = shared_memory.get_barrier_and_increment_clock(
barrier_key, device_id, thread_id
)
barrier.wait(device_id, thread_id)
def call_barrier_wait(
device_id: int, thread_id: int, allocation_key: jnp.ndarray
):
callback.io_callback(
_barrier_wait,
None,
device_id,
thread_id,
allocation_key,
ordered=True,
)
def _barrier_arrive(device_id: int, thread_id: int, allocation_key: np.ndarray):
device_id = int(device_id)
thread_id = int(thread_id)
barrier_key = HostAllocationKey.from_array(allocation_key)
shared_memory = _get_shared_memory()
barrier, clock = shared_memory.get_barrier_and_increment_clock(
barrier_key, device_id, thread_id
)
barrier.arrive(device_id, thread_id, clock)
def call_barrier_arrive(
device_id: int, thread_id: int, allocation_key: jnp.ndarray
):
callback.io_callback(
_barrier_arrive,
None,
device_id,
thread_id,
allocation_key,
ordered=True,
)
def _assert_no_barriers_allocated():
_get_shared_memory().assert_no_barriers_allocated()
def call_assert_no_barriers_allocated():
callback.io_callback(_assert_no_barriers_allocated, (), ordered=True)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic_gpu/interpret/gpu_callbacks.py",
"license": "Apache License 2.0",
"lines": 724,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic_gpu/interpret/interpret_pallas_call.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Mapping, Sequence, Set
import dataclasses
import math
from typing import Any
import jax
from jax._src import callback
from jax._src import core as jax_core
from jax._src import effects
from jax._src.pallas import core as pallas_core
from jax._src.pallas.mosaic.interpret import thread_map
from jax._src.pallas.mosaic.interpret import utils as interpret_utils
from jax._src.pallas.mosaic_gpu import core as mosaic_gpu_core
from jax._src.pallas.mosaic_gpu.interpret import gpu_callbacks
from jax._src.pallas.mosaic_gpu.interpret import jaxpr_interpret
from jax._src.typing import Array
from jax._src.util import (safe_zip, split_list)
from jax.experimental.pallas import mosaic_gpu as plgpu
InterpretParams = interpret_utils.InterpretGPUParams
def get_interpret_effects() -> Set[effects.Effect]:
return {callback._OrderedIOEffect} # pylint: disable=protected-access
def get_races() -> gpu_callbacks.RaceDetectionState:
return gpu_callbacks.get_races()
def reset_gpu_interpret_mode_state():
gpu_callbacks.reset_gpu_interpret_mode_state()
def _get_grid_bounds(grid_mapping: pallas_core.GridMapping) -> tuple[int, ...]:
if grid_mapping.num_dynamic_grid_bounds > 0:
raise NotImplementedError(
"Dynamic grid bounds not (yet) supported in GPU interpret mode."
)
result = []
for x in grid_mapping.grid:
# We have already tested for the absence of dynamic grid bounds. So all
# entries in the grid should be ints.
assert isinstance(x, int)
result.append(x)
return tuple(result)
def _get_grid_dims_and_num_threads(
grid_mapping: pallas_core.GridMapping, mesh: plgpu.Mesh | None
) -> tuple[tuple[int, ...], int]:
if not mesh:
num_threads = 1
grid_dims = _get_grid_bounds(grid_mapping)
elif isinstance(mesh, plgpu.Mesh):
if mesh.cluster is not None and math.prod(mesh.cluster) != 1:
raise NotImplementedError(
f"Invalid cluster {mesh.cluster} in mesh: GPU interpret mode does not"
" support (non-trivial) clusters."
)
num_threads = int(mesh.num_threads or 1)
grid_dims = tuple(mesh.grid)
else:
raise ValueError(f"Unsupported mesh type: {type(mesh)}")
reconstructed_grid = grid_dims + (num_threads,)
if math.prod(_get_grid_bounds(grid_mapping)) != math.prod(reconstructed_grid):
raise NotImplementedError(
f"Invalid grid {grid_mapping.grid} in grid_mapping: expected grid to"
f" have the same size as {reconstructed_grid}"
)
return grid_dims, num_threads
def _allocate_buffers_for_inputs(
device_id: int,
invars: Sequence[Any],
inputs: Sequence[jax.Array],
) -> list[jax.Array]:
"""Allocates `GMEM` buffers for the `inputs` of a `pallas_call`."""
# TODO(nrink): This code is a simplified version to the corresponding TPU
# interpreter code. Eventually, we should merge the two.
input_buffer_keys = []
for var, value in safe_zip(invars, inputs):
assert var.aval.dtype == value.dtype
allocation_request = gpu_callbacks.make_allocation_request_array(
device_id=device_id,
# All operands of a `pallas_call`/`core_map` that are arrays (i.e. that
# are not sempahores, barriers etc.) are placed in `GMEM`. These arrays
# (or slices thereof) may need to be copied into `SMEM` before executing
# the kernel.
memory_space_id=gpu_callbacks.get_memory_space_idx(
mosaic_gpu_core.MemorySpace.GMEM
),
)
input_buffer_keys.append(
gpu_callbacks.call_allocate_buffer_for_all_threads(
device_id, allocation_request, value
)
)
return input_buffer_keys
@dataclasses.dataclass(frozen=True)
class AllocationKeyAndValue:
key: jax.Array
value: jax.Array
@property
def shape(self) -> tuple[int, ...]:
return self.value.shape
def _allocate_buffers_for_outputs(
device_id: int,
num_threads: int,
input_output_aliases: tuple[tuple[int, int], ...],
grid_mapping: pallas_core.GridMapping,
input_buffer_keys: Sequence[jax.Array],
input_vals: Sequence[jax.Array],
interpret_params: interpret_utils.InterpretGPUParams,
) -> list[AllocationKeyAndValue]:
"""Allocates `GMEM` buffers for `pallas_call` outputs, respecting aliased inputs."""
# TODO(nrink): This code is a simplified version to the corresponding TPU
# interpreter code. Eventually, we should merge the two.
assert len(input_buffer_keys) == len(input_vals)
oi_alias_map = {v: k for k, v in input_output_aliases}
output_buffer_keys_and_values = []
block_shapes = [
pallas_core._get_block_shape(bm.block_shape) # pylint: disable=protected-access
for bm in grid_mapping.block_mappings
]
num_inputs = grid_mapping.num_inputs
num_outputs = grid_mapping.num_outputs
output_block_shapes = block_shapes[num_inputs : num_inputs + num_outputs]
for output_idx, bm in enumerate(grid_mapping.block_mappings_output):
if output_idx in oi_alias_map:
aliased_input_idx = oi_alias_map[output_idx]
# Reuse the `GMEM` buffer for the aliased `pallas_call`/`core_map` input.
output_buffer_keys_and_values.append(
AllocationKeyAndValue(
key=input_buffer_keys[aliased_input_idx],
value=input_vals[aliased_input_idx],
)
)
else:
out_val = interpret_params.get_uninitialized_array(
bm.array_aval.shape, bm.array_aval.dtype
)
padded_val = interpret_params.pad_to_block_dimension(
out_val, output_block_shapes[output_idx]
)
allocation_request = gpu_callbacks.make_allocation_request_array(
device_id=device_id,
# All outputs of a `pallas_call`/`core_map` that are arrays (i.e. that
# are not sempahores, barriers etc.) are placed in `GMEM`. Results
# from executing the kernel (or slices thereof) may need to be copied
# from `SMEM` into the `GMEM` output buffers that are allocated here.
memory_space_id=gpu_callbacks.get_memory_space_idx(
mosaic_gpu_core.MemorySpace.GMEM
),
initial_ref_count=num_threads,
)
output_buffer_key = gpu_callbacks.call_allocate_buffer_for_all_threads(
device_id, allocation_request, padded_val
)
output_buffer_keys_and_values.append(
AllocationKeyAndValue(key=output_buffer_key, value=out_val)
)
return output_buffer_keys_and_values
def _get_kernel_buffers(
device_id: int,
num_threads: int,
grid_mapping: pallas_core.GridMapping,
invars: Sequence[Any],
input_buffer_keys: Sequence[jax.Array],
output_buffer_keys: Sequence[jax.Array],
interpret_params: interpret_utils.InterpretGPUParams,
) -> list[jax.Array]:
"""Collects buffers to be passed to the kernel from `pallas_call` input/output buffers."""
# TODO(nrink): This code is a simplified version to the corresponding TPU
# interpreter code. Eventually, we should merge the two.
kernel_buffer_keys = []
for i, var in enumerate(invars):
output_idx = i - grid_mapping.num_inputs
is_input = i < grid_mapping.num_inputs
is_output = (output_idx >= 0) and (output_idx < grid_mapping.num_outputs)
aval = var.aval
# TODO(nrink): Support allocation of semaphores.
if gpu_callbacks.is_gmem_memory_space(aval.memory_space):
# Use the already-allocated GMEM input or output buffer.
#
# TODO(jburnim): For kernel args in GMEM, check that block shape equals
# the shape of the corresponding `pallas_call` input, and that the
# index_map is trivial.
assert is_input ^ is_output
if is_input:
kernel_buffer_keys.append(input_buffer_keys[i])
if is_output:
kernel_buffer_keys.append(output_buffer_keys[output_idx])
else:
allocation_request = gpu_callbacks.make_allocation_request_array(
device_id=device_id,
memory_space_id=gpu_callbacks.get_memory_space_idx(aval.memory_space),
initial_ref_count=num_threads,
)
init_val = interpret_params.get_uninitialized_array(
aval.shape, aval.dtype
)
kernel_buffer_keys.append(
gpu_callbacks.call_allocate_buffer_for_all_threads(
device_id, allocation_request, init_val
)
)
return kernel_buffer_keys
def _get_outputs(
device_id: int, output_buffers: Sequence[AllocationKeyAndValue]
) -> Sequence[Array]:
"""Reads and returns values from the allocated output buffers."""
outputs = []
for buffer in output_buffers:
outputs.append(
gpu_callbacks.call_get(
result_shape_and_dtype=buffer.value,
device_id=device_id,
thread_id=0,
allocation_key=buffer.key,
transforms=(), # Read the entire buffer.
)
)
return outputs
def _load_and_store_between_allocation_keys(
*,
device_id: int,
thread_id: int,
share_and_dtype: Any,
load_allocation_key: jax.Array,
store_allocation_key: jax.Array,
transform,
):
loaded_value = gpu_callbacks.call_get(
result_shape_and_dtype=share_and_dtype,
device_id=device_id,
thread_id=thread_id,
allocation_key=load_allocation_key,
transforms=transform,
)
gpu_callbacks.call_swap(
result_shape_and_dtype=share_and_dtype,
device_id=device_id,
thread_id=thread_id,
allocation_key=store_allocation_key,
transforms=transform,
val=loaded_value,
mask=None,
)
def _copy_from_gmem_buffers(
device_id: int,
thread_id: int,
avals: Sequence[Any],
gmem_buffer_keys: Sequence[jax.Array],
target_buffer_keys: Sequence[jax.Array],
transforms):
for aval, gmem_buffer_key, target_buffer_key in zip(
avals, gmem_buffer_keys, target_buffer_keys, strict=True
):
if gpu_callbacks.is_gmem_memory_space(aval.memory_space):
continue
_load_and_store_between_allocation_keys(
device_id=device_id,
thread_id=thread_id,
share_and_dtype=aval,
load_allocation_key=gmem_buffer_key,
store_allocation_key=target_buffer_key,
transform=transforms,
)
def _copy_to_gmem_buffers(
device_id: int,
thread_id: int,
avals: Sequence[Any],
source_buffer_keys: Sequence[jax.Array],
gmem_buffer_keys: Sequence[jax.Array],
transforms):
for aval, source_buffer_key, gmem_buffer_key in zip(
avals, source_buffer_keys, gmem_buffer_keys, strict=True
):
if gpu_callbacks.is_gmem_memory_space(aval.memory_space):
continue
_load_and_store_between_allocation_keys(
device_id=device_id,
thread_id=thread_id,
share_and_dtype=aval,
load_allocation_key=source_buffer_key,
store_allocation_key=gmem_buffer_key,
transform=transforms,
)
def interpret_pallas_call(
*args,
jaxpr: jax_core.Jaxpr,
debug: bool,
input_output_aliases: tuple[tuple[int, int], ...],
grid_mapping: pallas_core.GridMapping,
mesh: plgpu.Mesh | None,
compiler_params: Mapping[str, Any],
cost_estimate: pallas_core.CostEstimate,
out_avals: tuple[jax_core.AbstractValue, ...],
interpret_params: interpret_utils.InterpretGPUParams,
metadata: Mapping[str, str] | None,
**kwargs,
) -> Sequence[Array]:
# TODO(nrink): A more fleshed out implementation of the GPU interpreter may
# need to use some of these `del`ed arguments.
del debug, cost_estimate, metadata, out_avals, kwargs
# TODO(nrink): Support non-trivial `BlockSpec`s (i.e. with non-trivial
# `index_map`s).
assert all(bm.has_trivial_window() for bm in grid_mapping.block_mappings)
grid_dims, num_threads = _get_grid_dims_and_num_threads(
grid_mapping, mesh
)
device_info = jaxpr_interpret.DeviceInfo()
interpret_params = dataclasses.replace(
interpret_params, num_cores_or_threads=num_threads
)
gpu_callbacks.call_initialize_shared_memory(
num_devices=device_info.num_devices,
num_threads=num_threads,
interpret_params=interpret_params,
)
dynamic_grid_args, scalars, inputs = split_list(
args,
[grid_mapping.num_dynamic_grid_bounds, grid_mapping.num_index_operands],
)
if dynamic_grid_args:
raise NotImplementedError("Dynamic grid bounds not (yet) supported on GPU")
if scalars:
raise NotImplementedError("Scalar arguments not (yet) supported on GPU")
assert grid_mapping.num_index_operands == 0
input_buffer_keys = _allocate_buffers_for_inputs(
device_info.device_id,
jaxpr.invars[: grid_mapping.num_inputs],
inputs,
)
output_buffers = _allocate_buffers_for_outputs(
device_info.device_id,
num_threads,
input_output_aliases,
grid_mapping,
input_buffer_keys,
inputs,
interpret_params,
)
kernel_buffer_keys = _get_kernel_buffers(
device_info.device_id,
num_threads,
grid_mapping,
jaxpr.invars,
input_buffer_keys,
[buffer.key for buffer in output_buffers],
interpret_params,
)
# TODO(nrink): The two assignments below have been taken from the
# corresponding TPU interpreter code. Confirm that they make sense here (i.e.
# for GPU kernels).
kernel_input_buffer_keys, kernel_output_buffer_keys, _ = split_list(
kernel_buffer_keys, [grid_mapping.num_inputs, grid_mapping.num_outputs]
)
input_vars, output_vars = split_list(
jaxpr.invars[grid_mapping.slice_block_ops], [grid_mapping.num_inputs]
)
def _kernel(thread_id, grid_point_coords):
# Note that the copying from `GMEM` buffers here could introduce races when
# multiple threads copy to the same kernel input buffer. For this to happen,
# (a) there must be multiple threads and (b) the targeted kernel input
# buffer must not be in `GMEM` (since we omit copies from `GMEM` to `GMEM`).
# Currently, the ways in which a Pallas GPU kernel can be invoked do not
# allow for (a) and (b) to be true at the same time: (a) requires that the
# kernel is *not* invoked through a `pallas_call` but (b) can only be caused
# if `BlockSpec`s are used when invoking the kernels, which requires that
# the kernel be invoked through a `pallas_call`.
#
# TODO(nrink): Support copying of slices/blocks only, based on the
# `BlockSpec`s. (Currently only trivial `BlockSpec`s are supported.)
_copy_from_gmem_buffers(
device_id=device_info.device_id,
thread_id=thread_id,
avals=[var.aval for var in input_vars],
gmem_buffer_keys=input_buffer_keys,
target_buffer_keys=kernel_input_buffer_keys,
transforms=(),
)
jaxpr_interpreter = jaxpr_interpret.JaxprInterpreter(
grid_point_coords=grid_point_coords,
thread_id=thread_id,
mesh=mesh,
device_info=device_info,
compiler_params=compiler_params,
interpret_params=interpret_params,
)
jaxpr_interpreter.interpret(jaxpr, *kernel_buffer_keys)
# Note that a comment about potential races that is analogous to the comment
# before the call to `_copy_from_gmem_buffers` above applies here too.
#
# TODO(nrink): Support copying of slices/blocks only, based on the
# `BlockSpec`s. (Currently only trivial `BlockSpec`s are supported.)
_copy_to_gmem_buffers(
device_id=device_info.device_id,
thread_id=thread_id,
avals=[var.aval for var in output_vars],
source_buffer_keys=kernel_output_buffer_keys,
gmem_buffer_keys=[buffer.key for buffer in output_buffers],
transforms=(),
)
num_grid_loop_iterations = math.prod(grid_dims)
def _grid_loop_body(loop_idx: int, _: None):
grid_point_coords = interpret_utils.get_indices(
grid_dims, loop_idx
)
thread_map.thread_map(_kernel, num_threads, grid_point_coords)
# TODO(nrink): Should we only create happens-before here from thread 0 to
# the other threads? Currently we update the vector clocks for all threads by
# looking at the vector clock of all (other) threads. It should suffice, but
# this needs to be confirmed, to update the vector clocks for all threads by
# looking only at the vector clock of thread 0 (and at the vector clock for
# the thread itself).
gpu_callbacks.call_update_clocks_for_device_barrier(device_info.device_id)
# TODO(nrink): For now we execute the grid by sequentially looping over the
# points in the grid. This may need to be refined to be more faithful to the
# semantics of grid execution on a real GPU. (The other extreme would be to
# execute all grid points fully concurrently, e.g. in individual threads.)
jax.lax.fori_loop(0, num_grid_loop_iterations, _grid_loop_body, None)
# TODO(nrink): Should we only create happens-before here from the other
# threads to thread 0? Analogous to the comment above, it should suffice, but
# this needs to be confirmed, to update only the vector clock of thread 0 (and
# not the vector clocks for all other threads).
gpu_callbacks.call_update_clocks_for_device_barrier(device_info.device_id)
outputs = _get_outputs(device_info.device_id, output_buffers)
# We assert that no barriers remain allocated. This is an internal consistency
# check because the interpreter should take care of deallocating all barriers
# that it has allocated. It is important that the interpreter deallocates all
# barriers because barrier deallocation also checks that the barrier was used
# correctly by the kernel/threads. (Specifically, it is checked that if a
# thread has observed any completed barrier arrival, it has in fact observed
# all completed arrivals).
gpu_callbacks.call_assert_no_barriers_allocated()
gpu_callbacks.call_clean_up_shared_memory()
return outputs
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic_gpu/interpret/interpret_pallas_call.py",
"license": "Apache License 2.0",
"lines": 439,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic_gpu/interpret/jaxpr_interpret.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable, Mapping, Sequence
import dataclasses
import functools
import math
from typing import Any
import jax
from jax import lax
from jax._src import core as jax_core
from jax._src import source_info_util
from jax._src.pallas import primitives
from jax._src.pallas.mosaic.interpret import utils as interpret_utils
from jax._src.pallas.mosaic_gpu import core as mosaic_gpu_core
from jax._src.pallas.mosaic_gpu import primitives as gpu_primitives
from jax._src.pallas.mosaic_gpu.interpret import gpu_callbacks
from jax._src.state import indexing
from jax._src.state import primitives as state_primitives
from jax._src.state import types as state_types
from jax._src.util import (safe_zip, split_list)
from jax.experimental.pallas import mosaic_gpu as plgpu
import jax.numpy as jnp
@dataclasses.dataclass(init=False, frozen=True)
class DeviceInfo:
"""Information about the device that is being interpreted."""
# The indices along each axis of the device being interpreted.
axis_indices: Mapping[jax_core.AxisName, int]
# The size of each axis in the mesh of all (SMPD) devices.
axis_sizes: Mapping[jax_core.AxisName, int]
def __init__(self):
# Since this class is frozen, we must use `object.__setattr__` to set the
# attributes.
object.__setattr__(self, "axis_sizes", jax_core.get_axis_env().axis_sizes)
object.__setattr__(
self,
"axis_indices",
{k: lax.axis_index(k) for k in self.axis_sizes.keys()},
)
@functools.cached_property
def device_id(self) -> int:
"""Computes the logical ID of the device being interpreted."""
return interpret_utils.device_coords_to_logical_id(
tuple(self.axis_indices.values()), self.axis_sizes, self.axis_indices
)
@functools.cached_property
def num_devices(self) -> int:
"""Computes the number of (SPMD) devices."""
return math.prod(self.axis_sizes.values())
def _raise_if_unsupported_memory_space(
space: mosaic_gpu_core.MemorySpace | None,
):
# TODO(nrink): Support more memory spaces.
if space is not None and space not in [
mosaic_gpu_core.MemorySpace.GMEM,
mosaic_gpu_core.MemorySpace.SMEM,
]:
raise NotImplementedError(f"Unsupported memory space: {space}")
# TODO(nrink): Try unifying this function with `_extract_barrier_slice_base`
# from `jax._src.pallas.mosaic_gpu.primitives`.
def _get_index_for_barrier_allocation_key(
transforms,
) -> indexing.DimIndexer | None:
if not transforms:
return None
if not hasattr(transforms, "__len__") or len(transforms) != 1:
raise NotImplementedError(
f"Indexing barrier with {transforms} not supported in GPU interpret"
" mode"
)
if not isinstance(transforms[0], indexing.NDIndexer):
raise ValueError(f"Expected an `NDIndexer`, but got {transforms[0]}")
if len(transforms[0].indices) != 1:
raise ValueError(
f"Expected a singleton index, but got {transforms[0].indices}"
)
return transforms[0].indices[0]
_SENTINEL = jnp.inf
@dataclasses.dataclass(frozen=True, kw_only=True)
class JaxprInterpreter:
"""Interprets a jaxpr by replacing memory operations with (GPU) callbacks."""
grid_point_coords: tuple[int]
thread_id: int
mesh: plgpu.Mesh | None
device_info: DeviceInfo
compiler_params: Mapping[str, Any]
interpret_params: interpret_utils.InterpretParams
@functools.cached_property
def num_threads(self) -> int:
if self.mesh is None or self.mesh.num_threads is None:
return 1
else:
return int(self.mesh.num_threads)
def _interpret_axis_index_p(self, eqn):
assert eqn.primitive is lax.axis_index_p
axis_name = eqn.params["axis_name"]
if self.mesh is not None:
if axis_name == self.mesh.thread_name:
return jnp.int32(self.thread_id)
elif axis_name in self.mesh.grid_names:
return jnp.int32(
self.grid_point_coords[self.mesh.grid_names.index(axis_name)]
)
if axis_name in self.device_info.axis_indices:
return jnp.int32(self.device_info.axis_indices[axis_name])
raise ValueError(
f"Unable to determine axis index for axis name {axis_name}"
)
def _interpret_get_p(self, eqn, get_invals: Callable[[], Sequence[Any]]):
assert eqn.primitive is state_primitives.get_p
assert isinstance(eqn.outvars[0].aval, jax_core.ShapedArray)
invals = get_invals()
return gpu_callbacks.call_get(
result_shape_and_dtype=eqn.outvars[0].aval,
device_id=self.device_info.device_id,
thread_id=self.thread_id,
allocation_key=invals[0],
transforms=jax.tree.unflatten(eqn.params["tree"], invals[1:]),
source_info=eqn.source_info,
)
def _interpret_swap_p(self, eqn, get_invals: Callable[[], Sequence[Any]]):
assert eqn.primitive is state_primitives.swap_p
assert isinstance(eqn.outvars[0].aval, jax_core.ShapedArray)
invals = get_invals()
return gpu_callbacks.call_swap(
result_shape_and_dtype=eqn.outvars[0].aval,
device_id=self.device_info.device_id,
thread_id=self.thread_id,
allocation_key=invals[0],
transforms=jax.tree.unflatten(eqn.params["tree"], invals[2:]),
val=invals[1],
mask=None,
)
def _interpret_run_scoped_p(
self, eqn, get_invals: Callable[[], Sequence[Any]]
):
def _allocate_for_aval(aval, same_allocations_for_all_threads: bool):
_raise_if_unsupported_memory_space(aval.memory_space)
match aval:
case state_types.AbstractRef(
inner_aval=inner, memory_space=memory_space, kind=_
):
match inner:
case jax_core.ShapedArray(shape=shape, dtype=dtype):
if isinstance(dtype, mosaic_gpu_core.BarrierType):
# Allocating a barrier is meaningful only if the barrier is
# shared between all threads. Hence we assert on
# `same_allocations_for_all_threads`.
assert same_allocations_for_all_threads
assert len(shape) == 1
return gpu_callbacks.call_allocate_barriers(
device_id=self.device_info.device_id,
thread_id=self.thread_id,
num_arrivals=dtype.num_arrivals,
num_barriers=shape[0],
ref_count=self.num_threads,
)
else:
memory_space_idx = gpu_callbacks.get_memory_space_idx(
memory_space
)
allocation_request = (
gpu_callbacks.make_allocation_request_array(
device_id=self.device_info.device_id,
memory_space_id=memory_space_idx,
thread_id=(
0
if same_allocations_for_all_threads
else self.thread_id
),
initial_ref_count=(
self.num_threads
if same_allocations_for_all_threads
else 1
),
)
)
return gpu_callbacks.call_allocate_buffer(
self.device_info.device_id,
self.thread_id,
allocation_request,
self.interpret_params.get_uninitialized_array(shape, dtype),
)
case _:
raise ValueError(f"Unsupported inner aval: {inner}")
def _deallocate_for_aval(allocation, aval):
match aval:
case state_types.AbstractRef(inner_aval=inner, memory_space=_, kind=_):
match inner:
case jax_core.ShapedArray(shape=_, dtype=dtype):
if isinstance(dtype, mosaic_gpu_core.BarrierType):
gpu_callbacks.call_deallocate_barrier(
device_id=self.device_info.device_id,
thread_id=self.thread_id,
allocation_key=allocation,
)
else:
_raise_if_unsupported_memory_space(aval.memory_space)
gpu_callbacks.call_deallocate_buffer(allocation)
# TODO(nrink): For sempahores, check that they have value zero at
# the end of their lifetimes. (If semaphores are never explicitly
# deallocated, this check could take place at the end of kernel
# interpretation.)
case _:
assert False, (
f"Unsupported inner aval: {inner} (should have been"
" caught before)"
)
assert eqn.primitive is primitives.run_scoped_p
collective_axes = eqn.params["collective_axes"]
# Note that on GPU, `SMEM` buffers and barriers can only be allocated
# collectively (i.e. corresponding to `same_allocations=True`). In the
# interpreter we are a little more lenient and allow non-collective
# allocations for `SMEM` buffers.
same_allocations = False
if self.num_threads == 1:
# When there is only one thread, we set `same_allocations` to `True`
# regardless of whether `collective_axes` is set or not. Since the
# allocation of barriers asserts on `same_allocations`, setting
# `same_allocations = True` here ensures that barriers can be allocated
# when only a single thread is present and `collective_axes` is empty.
same_allocations = True
elif collective_axes:
if (
self.mesh is None
or len(collective_axes) != 1
or collective_axes[0] != self.mesh.thread_name
):
raise NotImplementedError(
"When interpreting `run_scoped` in a GPU kernel, non-empty"
" `collective_axes` is currently only supported when it contains a"
" single axis that agrees with the thread axis (i.e. `thread_name`)"
" of the mesh."
)
same_allocations = True
# Allocate a buffer or barrier for each element of
# `eqn.params['jaxpr'].invars`. It is assumed that each thread runs the same
# sequence of `run_scoped`s.
invars = eqn.params["jaxpr"].invars
allocs = []
for v in invars:
allocs.append(_allocate_for_aval(v.aval, same_allocations))
out = self.interpret(eqn.params["jaxpr"], *get_invals(), *allocs)
for a, v in safe_zip(allocs, invars):
_deallocate_for_aval(a, v.aval)
return out
def _interpret_cond_p(self, eqn, get_invals: Callable[[], Sequence[Any]]):
invals = get_invals()
return lax.switch(
invals[0],
[
functools.partial(self.interpret, branch_jaxpr.jaxpr)
for branch_jaxpr in eqn.params["branches"]
],
*invals[1:],
)
def _interpret_scan_p(self, eqn, get_invals: Callable[[], Sequence[Any]]):
consts, init_carry, xs = split_list(
get_invals(),
[eqn.params["num_consts"], eqn.params["num_carry"]],
)
def _scan_body(c, a):
return split_list(
self.interpret(eqn.params["jaxpr"].jaxpr, *consts, *c, *a),
[eqn.params["num_carry"]],
)
carry, out = lax.scan(
_scan_body, init_carry, xs=xs, length=eqn.params.get("length", None)
)
return carry + out
def _interpret_barrier_primitive(
self,
eqn,
get_invals: Callable[[], Sequence[Any]],
barrier_callback: Callable[..., None],
):
invals = get_invals()
# `invals[0]` corresponds to the barrier this primitive operates on. Since
# we are interpreting, `invals[0]` will in fact contain the allocation key
# (which is a Jax array) for the barrier.
allocation_key_as_array = invals[0]
# Assert to check internal consistency: `allocation_key_as_array` should be
# a 2-dim array (and the size of the first dimension equals the
# `num_barriers` parameter from when the barrier was allocated).
assert len(allocation_key_as_array.shape) == 2
num_barriers = allocation_key_as_array.shape[0]
# TODO(nrink): The working out of `transforms` and `index` below may need
# tidying up. Specifically, GPU interpret mode should correctly support
# legal ways to index into barriers. (Here, 'legal' is to be read as
# 'allowed by the Pallas GPU semantics'.)
transforms = jax.tree.unflatten(
eqn.params["transforms_treedef"], invals[1:]
)
index = _get_index_for_barrier_allocation_key(transforms)
if index is None:
if num_barriers != 1:
raise ValueError(
"Attempting to operate on barrier without indexing, but"
f" `num_barriers = {num_barriers}`"
)
allocation_key_as_array = allocation_key_as_array[0]
else:
allocation_key_as_array = allocation_key_as_array[index]
barrier_callback(
device_id=self.device_info.device_id,
thread_id=self.thread_id,
allocation_key=allocation_key_as_array,
)
assert eqn.primitive.multiple_results
return []
def _interpret_barrier_arrive_p(
self, eqn, get_invals: Callable[[], Sequence[Any]]
):
return self._interpret_barrier_primitive(
eqn, get_invals, gpu_callbacks.call_barrier_arrive
)
def _interpret_barrier_wait_p(
self, eqn, get_invals: Callable[[], Sequence[Any]]
):
return self._interpret_barrier_primitive(
eqn, get_invals, gpu_callbacks.call_barrier_wait
)
def _interpret_arithmetic_primitive(
self, eqn, get_invals: Callable[[], Sequence[Any]]
):
if self.interpret_params.skip_floating_point_ops and all(
interpret_utils.is_float(ovar.aval.dtype) for ovar in eqn.outvars
):
# Skip `eqn.primitive.bind` since `eqn.primitive` only produces
# floating-point values. It is safe to populate `out` with avals
# since mapping `env.write_many` over `out` (in `self.interpret`) below
# only relies on the shape and dtype (for writing `Placeholder`s).
out = [ovar.aval for ovar in eqn.outvars]
if not eqn.primitive.multiple_results:
out = out[0]
return out
else:
subfuns, bind_params = eqn.primitive.get_bind_params(eqn.params)
return eqn.primitive.bind(*subfuns, *get_invals(), **bind_params)
def interpret(self, jaxpr, *args):
sentinel_for_floating_point_values = (
_SENTINEL if self.interpret_params.skip_floating_point_ops else None
)
env = interpret_utils.JaxprEnv(
vars=jaxpr.constvars + jaxpr.invars,
values=args,
sentinel_for_floating_point_values=sentinel_for_floating_point_values,
)
for eqn in jaxpr.eqns:
with source_info_util.user_context(
eqn.source_info.traceback,
name_stack=eqn.source_info.name_stack,
):
# We defer reading the values for `eqn.invars` into each of the branches
# of the match statement below. This is because the case for arithmetic
# primitives may not need to do any reads
# (if `self.interpret_params.skip_floating_point_ops` is True). If this
# is the case, we want to avoid materializing the read array into the
# jaxpr when this function is traced.
deferred_invals = functools.partial(env.read_many, eqn.invars)
match eqn.primitive:
case lax.axis_index_p:
out = self._interpret_axis_index_p(eqn)
case primitives.program_id_p:
# Currently we only support grids and clusters with a single device.
# Hence, zero is the only valid program id.
out = jnp.int32(0)
case state_primitives.get_p:
out = self._interpret_get_p(eqn, deferred_invals)
case primitives.load_p:
raise NotImplementedError("load_p is not supported on GPU yet")
case state_primitives.swap_p:
out = self._interpret_swap_p(eqn, deferred_invals)
case primitives.swap_p:
raise NotImplementedError("swap_p is not supported on GPU yet")
case primitives.run_scoped_p:
out = self._interpret_run_scoped_p(eqn, deferred_invals)
case lax.cond_p:
out = self._interpret_cond_p(eqn, deferred_invals)
case lax.scan_p:
out = self._interpret_scan_p(eqn, deferred_invals)
case gpu_primitives.barrier_wait_p:
out = self._interpret_barrier_wait_p(eqn, deferred_invals)
case gpu_primitives.barrier_arrive_p:
out = self._interpret_barrier_arrive_p(eqn, deferred_invals)
case _:
out = self._interpret_arithmetic_primitive(eqn, deferred_invals)
out = out if eqn.primitive.multiple_results else [out]
env.write_many(eqn.outvars, out)
return env.read_many(jaxpr.outvars)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic_gpu/interpret/jaxpr_interpret.py",
"license": "Apache License 2.0",
"lines": 400,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:tests/pallas/gpu_pallas_interpret_test.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Any
from absl.testing import absltest
import jax
from jax._src import test_util as jtu
from jax._src.pallas.mosaic_gpu.interpret import interpret_pallas_call as mosaic_interpret
from jax.experimental import pallas as pl
from jax.experimental.pallas import mosaic_gpu as plgpu
import jax.numpy as jnp
import numpy as np
jax.config.parse_flags_with_absl()
def _maybe_reverse(arg: tuple[Any], reverse: bool) -> tuple[Any]:
if reverse:
return tuple(reversed(arg))
else:
return arg
# TODO(nrink): Figure out how to safely run different instance of GPU
# interpret mode in parallel, and then remove this decorator.
@jtu.thread_unsafe_test_class()
class InterpretTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(['cpu']):
self.skipTest('CPU-only test')
self.num_devices = jax.device_count()
if self.num_devices > 1:
self.skipTest(f'requires 1 device, found {self.num_devices}')
def test_interpret_pallas_call(self):
def _kernel(o_ref):
o_ref[0] = 42
@jax.jit
def kernel():
return pl.pallas_call(
_kernel,
out_shape=jax.ShapeDtypeStruct((1,), jnp.int32),
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)()
np.testing.assert_equal(kernel(), np.array([42], dtype=jnp.int32))
self.assertFalse(mosaic_interpret.get_races().races_found)
@jtu.parameterized.parameters(range(1, 17))
def test_interpret_core_map(self, num_threads: int):
@pl.run_state
def kernel(o_ref):
mesh = plgpu.Mesh(num_threads=num_threads, thread_name='x')
@pl.core_map(
mesh,
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
def _():
thread_idx = jax.lax.axis_index('x')
o_ref[thread_idx] = thread_idx
y = kernel(jnp.zeros((num_threads,), jnp.int32))
np.testing.assert_equal(y, np.arange(num_threads, dtype=jnp.int32))
self.assertFalse(mosaic_interpret.get_races().races_found)
def test_interpret_core_map_with_race(self):
@pl.run_state
def kernel(o_ref):
mesh = plgpu.Mesh(num_threads=2, thread_name='x')
@pl.core_map(
mesh,
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
def _():
thread_idx = jax.lax.axis_index('x')
o_ref[...] = thread_idx
kernel(jnp.zeros((), jnp.int32))
self.assertTrue(mosaic_interpret.get_races().races_found)
@jtu.parameterized.parameters(range(1, 17))
def test_interpret_kernel(self, num_threads):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((num_threads,), jnp.int32),
num_threads=num_threads,
thread_name='x',
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
def _kernel(o_ref):
thread_idx = jax.lax.axis_index('x')
o_ref[thread_idx] = thread_idx
np.testing.assert_equal(jax.jit(_kernel)(), np.arange(num_threads))
self.assertFalse(mosaic_interpret.get_races().races_found)
def test_skip_floating_point_ops(self):
def matmul_kernel(x_ref, y_ref, z_ref):
# TODO(nrink): Matrix multiplication with `@` is nor supported for real
# GPU kernels (but the GPU kernel interpreter allows this). Replace this
# with a `wgmma` or `tcgen05_mma` once these are supported by the GPU
# kernel interpreter.
z_ref[...] = x_ref[...] @ y_ref[...]
def matmul(x: jax.Array, y: jax.Array):
return pl.pallas_call(
matmul_kernel,
out_shape=jax.ShapeDtypeStruct((x.shape[0], y.shape[1]), x.dtype),
interpret=mosaic_interpret.InterpretParams(
skip_floating_point_ops=True
),
)(x, y)
k1, k2 = jax.random.split(jax.random.key(0))
x = jax.random.normal(k1, (1024, 1024))
y = jax.random.normal(k2, (1024, 1024))
z = jax.jit(matmul)(x, y)
np.testing.assert_array_equal(z, jnp.full_like(z, jnp.inf))
lowered = jax.jit(matmul).lower(x, y).as_text(dialect='stablehlo')
self.assertNotIn('dot_general', lowered)
@jtu.parameterized.parameters(
(1, 1, 1),
(2, 1, 2),
(2, 2, 1),
(4, 1, 4),
(4, 2, 2),
(4, 4, 1),
(8, 1, 8),
(8, 2, 4),
(8, 4, 2),
(8, 8, 1),
(16, 1, 16),
(16, 2, 8),
(16, 4, 4),
(16, 8, 2),
(16, 16, 1),
)
def test_matmul_example(self, num_threads, num_row_blocks, num_col_blocks):
assert num_threads == num_row_blocks * num_col_blocks
@jax.jit
def matmul(x: jax.Array, y: jax.Array):
num_rows_per_block = x.shape[0] // num_row_blocks
num_cols_per_block = y.shape[1] // num_col_blocks
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct(
(
x.shape[0],
y.shape[1],
),
x.dtype,
),
num_threads=num_threads,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(
detect_races=True, num_cores_or_threads=num_threads
),
)
def _matmul_kernel(x_ref, y_ref, o_ref):
thread_idx = jax.lax.axis_index('t')
row_block_idx = thread_idx // num_col_blocks
row_slice = pl.ds(
row_block_idx * num_rows_per_block, num_rows_per_block
)
col_block_idx = jax.lax.rem(thread_idx, jnp.int32(num_col_blocks))
col_slice = pl.ds(
col_block_idx * num_cols_per_block, num_cols_per_block
)
# TODO(nrink): Matrix multiplication with `@` is nor supported for real
# GPU kernels (but the GPU kernel interpreter allows this). Replace this
# with a `wgmma` or `tcgen05_mma` once these are supported by the GPU
# kernel interpreter.
o_ref[row_slice, col_slice] = x_ref[row_slice, :] @ y_ref[:, col_slice]
return _matmul_kernel(x, y)
k1, k2 = jax.random.split(jax.random.key(0))
x = jax.random.normal(k1, (1024, 1024))
y = jax.random.normal(k2, (1024, 1024))
z = matmul(x, y)
np.testing.assert_allclose(z, x @ y, atol=1e-3)
self.assertFalse(mosaic_interpret.get_races().races_found)
@jtu.parameterized.parameters(False, True)
def test_run_scoped(self, with_race):
mesh = plgpu.Mesh(num_threads=2, thread_name='n')
@jax.jit
def f(x):
def inner(o_ref):
@pl.core_map(
mesh,
interpret=mosaic_interpret.InterpretParams(
detect_races=True,
),
) # type: ignore[wrong-arg-types]
def _():
def body(ref):
@pl.when(jax.lax.axis_index('n') == 0)
def _():
ref[...] = jnp.zeros_like(ref[...])
o_ref[0, ...] = ref[...]
@pl.when(jax.lax.axis_index('n') == 1)
def _():
ref[...] = jnp.ones_like(ref[...])
o_ref[1, ...] = ref[...]
pl.run_scoped(
body,
plgpu.GMEM(o_ref.shape[1:], dtype=o_ref.dtype),
collective_axes=('n',) if with_race else (),
)
y = pl.run_state(inner)(x)
return y
y = f(jnp.zeros((2, 16, 128)))
if with_race:
# Due to the presence of a race, we cannot expect `y` to have a
# well-defined value. Hence, we do not assert anything about `y`.
self.assertTrue(mosaic_interpret.get_races().races_found)
else:
np.testing.assert_array_equal(
y, np.broadcast_to(np.arange(2).reshape(2, 1, 1), y.shape)
)
self.assertFalse(mosaic_interpret.get_races().races_found)
# Test adapted from
# https://docs.jax.dev/en/latest/pallas/gpu/reference.html#using-multiple-pallas-threads-per-cuda-block
def test_producer_consumer_threads_with_barrier(self):
x = jnp.arange(128, dtype=jnp.float32)
@functools.partial(
plgpu.kernel,
out_shape=x,
scratch_shapes=dict(
smem_ref=plgpu.SMEM(x.shape, x.dtype),
barrier_ref=plgpu.Barrier(),
),
num_threads=2,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
def _kernel(x_ref, out_ref, smem_ref, barrier_ref):
thread_id = jax.lax.axis_index('t')
@pl.when(thread_id == 0)
def producer_thread():
smem_ref[...] = x_ref[...] + 1
plgpu.barrier_arrive(barrier_ref)
@pl.when(thread_id == 1)
def consumer_thread():
plgpu.barrier_wait(barrier_ref)
out_ref[...] = smem_ref[...] + 1
y = _kernel(x)
np.testing.assert_array_equal(y, x + 2)
self.assertFalse(mosaic_interpret.get_races().races_found)
@jtu.parameterized.parameters(range(2, 17))
def test_single_barrier_with_multiple_arrival(self, num_threads):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((), jnp.int32),
scratch_shapes=dict(
smem_ref=plgpu.SMEM((num_threads - 1,), jnp.int32),
barrier_ref=plgpu.Barrier(
num_arrivals=num_threads - 1, num_barriers=1
),
),
num_threads=num_threads,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
def _kernel(out_ref, smem_ref, barrier_ref):
thread_id = jax.lax.axis_index('t')
@pl.when(thread_id == 0)
def _():
plgpu.barrier_wait(barrier_ref)
out_ref[...] = sum(smem_ref[...])
@pl.when(thread_id > 0)
def _():
smem_ref[thread_id - 1] = thread_id
plgpu.barrier_arrive(barrier_ref)
y = _kernel()
self.assertEqual(y, sum(range(num_threads)))
self.assertFalse(mosaic_interpret.get_races().races_found)
@jtu.parameterized.parameters(range(2, 17))
def test_multiple_barriers_with_single_arrival(self, num_threads):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((), jnp.int32),
scratch_shapes=dict(
smem_ref=plgpu.SMEM((num_threads - 1,), jnp.int32),
barrier_ref=plgpu.Barrier(
num_arrivals=1, num_barriers=num_threads - 1
),
),
num_threads=num_threads,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
def _kernel(out_ref, smem_ref, barrier_ref):
thread_id = jax.lax.axis_index('t')
@pl.when(thread_id == 0)
def _():
for b in range(num_threads - 1):
plgpu.barrier_wait(barrier_ref.at[b])
out_ref[...] = sum(smem_ref[...])
@pl.when(thread_id > 0)
def _():
smem_ref[thread_id - 1] = thread_id
plgpu.barrier_arrive(barrier_ref.at[thread_id - 1])
y = _kernel()
self.assertEqual(y, sum(range(num_threads)))
self.assertFalse(mosaic_interpret.get_races().races_found)
# Test adapted from
# https://docs.jax.dev/en/latest/pallas/gpu/reference.html#explicit-arrival-cross-thread-synchronization
#
# `buffer_size` has to be at least 2 so that the sizes of the `produced` and
# `consumed` barriers are at least 2 (for each barrier). Otherwise the
# indexing `produced.at[_]`/`consumed.at[_]` will not work.
@jtu.parameterized.product(
skip_floating_point_ops=[False, True],
input_size=[1, 2, 4, 16, 64, 128],
buffer_size=[2, 4, 8, 16],
)
def test_barrier_for_buffering(
self, skip_floating_point_ops, input_size, buffer_size, seed=0
):
k = jax.random.key(seed)
x = jax.random.normal(k, (input_size,), dtype=jnp.float32)
@functools.partial(
plgpu.kernel,
out_shape=x,
scratch_shapes=dict(
queue=plgpu.SMEM((buffer_size,), jnp.float32),
produced=plgpu.Barrier(num_arrivals=1, num_barriers=buffer_size),
consumed=plgpu.Barrier(num_arrivals=1, num_barriers=buffer_size),
),
num_threads=2,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(
detect_races=True, skip_floating_point_ops=skip_floating_point_ops
),
)
def _kernel(x_ref, out_ref, queue, produced, consumed):
thread_id = jax.lax.axis_index('t')
_get_slot = lambda i: jax.lax.rem(i, buffer_size)
def _thread0_body(i, _):
slot = _get_slot(i)
@pl.when(i >= buffer_size)
def _await_consumed():
plgpu.barrier_wait(consumed.at[slot])
queue[slot] = 3.0 * x_ref[i]
plgpu.barrier_arrive(produced.at[slot])
pl.when(thread_id == 0)(
lambda: jax.lax.fori_loop(0, input_size, _thread0_body, None)
)
def _thread1_body(i, _):
slot = _get_slot(i)
plgpu.barrier_wait(produced.at[slot])
out_ref[i] = queue[slot] + 42.0
plgpu.barrier_arrive(consumed.at[slot])
pl.when(thread_id == 1)(
lambda: jax.lax.fori_loop(0, input_size, _thread1_body, None)
)
# TODO(nrink): This epilogue is needed to satisfy the requirement that all
# completed barrier arrivals must have been waited for by the end of
# kernel execution. Relax this requirement to match what is required when
# the kernel is executed on real GPU hardware, where having unawaited
# completed arrivals is acceptable immediately before returning from the
# kernel.
def _thread0_epilogue(i, _):
slot = _get_slot(i)
@pl.when(i < buffer_size)
def _await_consumed():
plgpu.barrier_wait(consumed.at[slot])
pl.when(thread_id == 0)(
lambda: jax.lax.fori_loop(0, input_size, _thread0_epilogue, None)
)
y = _kernel(x)
self.assertFalse(mosaic_interpret.get_races().races_found)
if skip_floating_point_ops:
np.testing.assert_array_equal(y, jnp.full_like(y, jnp.inf))
else:
np.testing.assert_array_equal(y, 3.0 * x + 42.0)
# TODO(nrink): Add a variant of this test case that does not correctly use
# the `consumed` barrier and therefore has a race. Test that the race is
# detected.
def test_indexing_singleton_barrier_ok(self):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((), jnp.int32),
scratch_shapes=(plgpu.Barrier(),),
num_threads=1,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(),
)
def _kernel(out_ref, barrier_ref):
thread_id = jax.lax.axis_index('t')
plgpu.barrier_arrive(barrier_ref.at[thread_id])
out_ref[...] = 42
y = _kernel()
self.assertEqual(y, 42)
def test_not_indexing_multiple_barriers_raises(self):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((), jnp.int32),
scratch_shapes=(plgpu.Barrier(num_barriers=2),),
num_threads=1,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(),
)
def _kernel(out_ref, barrier_ref):
plgpu.barrier_arrive(barrier_ref)
out_ref[...] = 42
with self.assertRaisesRegex(
ValueError,
r'Attempting to operate on barrier without indexing, but `num_barriers'
r' = 2`',
):
_kernel()
mosaic_interpret.reset_gpu_interpret_mode_state()
def test_wait_for_barrier_twice(self):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((), jnp.int32),
scratch_shapes=dict(
barrier_ref=plgpu.Barrier(num_arrivals=1, num_barriers=2)
),
num_threads=3,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
def _kernel(out_ref, barrier_ref):
thread_id = jax.lax.axis_index('t')
@pl.when(thread_id == 0)
def _():
out_ref[...] = 1
plgpu.barrier_wait(barrier_ref.at[0])
out_ref[...] = 2
plgpu.barrier_arrive(barrier_ref.at[1])
plgpu.barrier_wait(barrier_ref.at[0])
out_ref[...] = 3
@pl.when(thread_id == 1)
def _():
plgpu.barrier_arrive(barrier_ref.at[0])
@pl.when(thread_id == 2)
def _():
plgpu.barrier_wait(barrier_ref.at[1])
plgpu.barrier_arrive(barrier_ref.at[0])
y = _kernel()
self.assertEqual(y, 3)
self.assertFalse(mosaic_interpret.get_races().races_found)
def test_completing_barrier_twice_in_same_thread_raises(self):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((), jnp.int32),
scratch_shapes=dict(barrier_ref=plgpu.Barrier(num_arrivals=1)),
interpret=mosaic_interpret.InterpretParams(),
)
def _kernel(out_ref, barrier_ref):
plgpu.barrier_arrive(barrier_ref)
plgpu.barrier_arrive(barrier_ref)
out_ref[...] = 42
with self.assertRaisesRegex(
Exception,
r'Barrier arrival was completed again before previous completion was'
r' observed by a thread.',
):
_kernel()
mosaic_interpret.reset_gpu_interpret_mode_state()
def test_completing_barrier_twice_in_different_threads_raises(self):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((2,), jnp.int32),
scratch_shapes=dict(barrier_ref=plgpu.Barrier(num_arrivals=1)),
interpret=mosaic_interpret.InterpretParams(),
num_threads=2,
thread_name='t',
)
def _kernel(out_ref, barrier_ref):
thread_id = jax.lax.axis_index('t')
plgpu.barrier_arrive(barrier_ref)
out_ref[thread_id] = thread_id
with self.assertRaisesRegex(
Exception,
r'Barrier arrival was completed again before previous completion was'
r' observed by a thread.',
):
_kernel()
mosaic_interpret.reset_gpu_interpret_mode_state()
@jtu.parameterized.product(
num_arriving_threads=list(range(1, 17)),
num_observing_threads=list(range(1, 17)),
num_threads=[16],
)
def test_barrier_wait_in_multiple_threads_ok(
self, num_arriving_threads, num_observing_threads, num_threads
):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((num_threads,), jnp.int32),
scratch_shapes=dict(
barrier_ref=plgpu.Barrier(
num_arrivals=num_arriving_threads, num_barriers=1
)
),
num_threads=num_threads,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(),
)
def _kernel(out_ref, barrier_ref):
thread_id = jax.lax.axis_index('t')
@pl.when(thread_id < num_arriving_threads)
def _():
out_ref[thread_id] = thread_id
plgpu.barrier_arrive(barrier_ref)
@pl.when(thread_id >= num_threads - num_observing_threads)
def _():
out_ref[thread_id] = thread_id
plgpu.barrier_wait(barrier_ref)
_kernel()
def test_more_barrier_completions_than_waits_raises(self):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((), jnp.int32),
scratch_shapes=dict(barrier_ref=plgpu.Barrier(num_arrivals=1)),
num_threads=2,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(),
)
def _kernel(out_ref, barrier_ref):
thread_id = jax.lax.axis_index('t')
@pl.when(thread_id == 0)
def _():
plgpu.barrier_arrive(barrier_ref)
@pl.when(thread_id == 1)
def _():
plgpu.barrier_wait(barrier_ref)
plgpu.barrier_arrive(barrier_ref)
out_ref[...] = 42
with self.assertRaisesRegex(
Exception,
r'Thread 1 did not observe all phases \(2\) for barrier \(but observed 1'
r' phase\).',
):
_kernel()
mosaic_interpret.reset_gpu_interpret_mode_state()
def test_not_waiting_for_all_barrier_completions_in_thread_raises(self):
@functools.partial(
plgpu.kernel,
out_shape=jax.ShapeDtypeStruct((), jnp.int32),
scratch_shapes=dict(
barrier_ref=plgpu.Barrier(num_arrivals=1, num_barriers=2)
),
num_threads=3,
thread_name='t',
interpret=mosaic_interpret.InterpretParams(),
)
def _kernel(out_ref, barrier_ref):
thread_id = jax.lax.axis_index('t')
@pl.when(thread_id == 0)
def _():
plgpu.barrier_arrive(barrier_ref.at[0])
@pl.when(thread_id == 1)
def _():
# This thread observes only the first completed arrival at
# `barrier_ref.at[0]`
plgpu.barrier_wait(barrier_ref.at[0])
plgpu.barrier_arrive(barrier_ref.at[0])
plgpu.barrier_arrive(barrier_ref.at[1])
@pl.when(thread_id == 2)
def _():
plgpu.barrier_wait(barrier_ref.at[1])
# This thread observes only the second completed arrival at
# `barrier_ref.at[0]`
plgpu.barrier_wait(barrier_ref.at[0])
out_ref[...] = 42
with self.assertRaisesRegex(
Exception,
r'Thread 2 is awaiting phase 1, but barrier is already at phase 2.',
):
_kernel()
mosaic_interpret.reset_gpu_interpret_mode_state()
@jtu.parameterized.product(
num_blocks_w=[1, 2, 3],
num_blocks_x=[1, 2, 3],
num_blocks_y=[1, 2, 3],
num_threads=[1, 2, 3],
)
def test_grid_iteration(
self, num_blocks_w, num_blocks_x, num_blocks_y, num_threads
):
def _kernel(a_gmem, out_gmem):
w = jax.lax.axis_index('w')
x = jax.lax.axis_index('x')
y = jax.lax.axis_index('y')
z = jax.lax.axis_index('z')
offset = (
w * num_blocks_x * num_blocks_y * num_threads
+ x * num_blocks_y * num_threads
+ y * num_threads
+ z
)
out_gmem[w, x, y, z] = a_gmem[w, x, y, z] + offset
a = 42 * jnp.ones(
(num_blocks_w, num_blocks_x, num_blocks_y, num_threads),
dtype=jnp.int32,
)
kernel = plgpu.kernel(
_kernel,
out_shape=jax.ShapeDtypeStruct(a.shape, a.dtype),
grid=(num_blocks_w, num_blocks_x, num_blocks_y),
grid_names=('w', 'x', 'y'),
num_threads=num_threads,
thread_name='z',
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
y = kernel(a)
expected = a + jnp.arange(
num_blocks_w * num_blocks_x * num_blocks_y * num_threads,
dtype=a.dtype,
).reshape(a.shape)
np.testing.assert_array_equal(y, expected)
self.assertFalse(mosaic_interpret.get_races().races_found)
@jtu.parameterized.product(
tile_x=[1, 2, 4],
tile_y=[1, 2, 4],
tile_z=[1, 2, 4],
swap_grid_axes=[True, False],
)
def test_add_over_grid(self, tile_x, tile_y, tile_z, swap_grid_axes):
dtype = jnp.int32
x, y, z = 4, 4, 4
assert x % tile_x == 0
assert y % tile_y == 0
assert z % tile_z == 0
a = jnp.arange(x * y * z, dtype=dtype).reshape((x, y, z))
b = jnp.ones((x, y, z), dtype=dtype)
x_iters = x // tile_x
y_iters = y // tile_y
z_iters = z // tile_z
def _kernel(a_gmem, b_gmem, out_gmem):
xi = jax.lax.axis_index('x')
yi = jax.lax.axis_index('y')
zi = jax.lax.axis_index('z')
xi_slice = pl.ds(xi * tile_x, tile_x)
yi_slice = pl.ds(yi * tile_y, tile_y)
zi_slice = pl.ds(zi * tile_z, tile_z)
out_gmem[xi_slice, yi_slice, zi_slice] = (
a_gmem[xi_slice, yi_slice, zi_slice]
+ b_gmem[xi_slice, yi_slice, zi_slice]
)
kernel = plgpu.kernel(
_kernel,
out_shape=jax.ShapeDtypeStruct((x, y, z), dtype),
grid=_maybe_reverse((x_iters, y_iters), swap_grid_axes),
grid_names=_maybe_reverse(('x', 'y'), swap_grid_axes),
num_threads=z_iters,
thread_name='z',
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
expected = a + b
y = kernel(a, b)
np.testing.assert_array_equal(y, expected)
self.assertFalse(mosaic_interpret.get_races().races_found)
@jtu.parameterized.product(
tile_m=[1, 2, 4],
tile_k=[1, 2, 4],
tile_n=[1, 2, 4],
swap_grid_axes=[True, False],
)
def test_matmul_over_grid_with_barrier_and_smem(
self, tile_m, tile_k, tile_n, swap_grid_axes
):
dtype = jnp.int32
m, k, n = 4, 4, 4
assert m % tile_m == 0
assert k % tile_k == 0
assert n % tile_n == 0
a = jnp.arange(16, dtype=dtype).reshape((m, k))
b = jnp.ones((k, n), dtype=dtype)
m_iters = m // tile_m
k_iters = k // tile_k
n_iters = n // tile_n
def _kernel(a_gmem, b_gmem, out_gmem, acc_smem, barrier):
mi = jax.lax.axis_index('m')
ki = jax.lax.axis_index('k')
ni = jax.lax.axis_index('n')
mi_slice = pl.ds(mi * tile_m, tile_m)
ki_slice = pl.ds(ki * tile_k, tile_k)
ni_slice = pl.ds(ni * tile_n, tile_n)
# We map the reduced dimension, i.e. `k`, to the thread dimension. This
# allows us to do the accumulation into an `SMEM` buffer, i.e. `acc_smem`.
# (Note that a fresh `SMEM` buffer is allocated for each grid point, but
# for each fixed grid point, a single `SMEM` buffer is shared across the
# threads.) We then need to use barriers to sequentialize access to the
# `SMEM` buffer between the threads. (Note that the specific sequential
# order of the updates to `acc_smem` does not matter, so long as there are
# no races.)
@pl.when(ki == 0)
def _():
acc_smem[...] = jnp.zeros(acc_smem.shape, dtype=acc_smem.dtype)
plgpu.barrier_arrive(barrier.at[0])
plgpu.barrier_wait(barrier.at[ki])
# TODO(nrink): Matrix multiplication with `@` is not supported for real
# GPU kernels (but the GPU kernel interpreter allows this). Replace this
# with a `wgmma` or `tcgen05_mma` once these are supported by the GPU
# kernel interpreter.
acc_smem[...] += a_gmem[mi_slice, ki_slice] @ b_gmem[ki_slice, ni_slice]
plgpu.barrier_arrive(barrier.at[(ki + 1) % k_iters])
@pl.when(ki == 0)
def _():
plgpu.barrier_wait(barrier.at[0])
out_gmem[mi_slice, ni_slice] = acc_smem[...]
kernel = plgpu.kernel(
_kernel,
out_shape=jax.ShapeDtypeStruct((m, n), dtype),
scratch_shapes=dict(
acc_smem=plgpu.SMEM((tile_m, tile_n), dtype),
barrier=plgpu.Barrier(num_barriers=k_iters),
),
grid=_maybe_reverse((m_iters, n_iters), swap_grid_axes),
grid_names=_maybe_reverse(('m', 'n'), swap_grid_axes),
num_threads=k_iters,
thread_name='k',
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
expected = a @ b
y = kernel(a, b)
np.testing.assert_array_equal(y, expected)
self.assertFalse(mosaic_interpret.get_races().races_found)
def test_matmul_over_grid_with_race(self, tile_m=4, tile_k=2, tile_n=4):
dtype = jnp.int32
m, k, n = 4, 4, 4
assert m % tile_m == 0
assert k % tile_k == 0
assert n % tile_n == 0
a = jnp.arange(16, dtype=dtype).reshape((m, k))
b = jnp.ones((k, n), dtype=dtype)
m_iters = m // tile_m
k_iters = k // tile_k
n_iters = n // tile_n
def _kernel(a_gmem, b_gmem, _, acc_smem):
mi = jax.lax.axis_index('m')
ki = jax.lax.axis_index('k')
ni = jax.lax.axis_index('n')
mi_slice = pl.ds(mi * tile_m, tile_m)
ki_slice = pl.ds(ki * tile_k, tile_k)
ni_slice = pl.ds(ni * tile_n, tile_n)
# The two threads race to update `acc_smem`. We do not bother with
# initializing `acc_mem` to zero, nor with copying the final result out to
# `GMEM`, as is done in the correct (i.e. race-free) test above (i.e. in
# `test_matmul_over_grid_with_barrier_and_smem`). This would only
# introduce additional races.
#
# TODO(nrink): Matrix multiplication with `@` is not supported for real
# GPU kernels (but the GPU kernel interpreter allows this). Replace this
# with a `wgmma` or `tcgen05_mma` once these are supported by the GPU
# kernel interpreter.
acc_smem[...] += a_gmem[mi_slice, ki_slice] @ b_gmem[ki_slice, ni_slice]
kernel = plgpu.kernel(
_kernel,
out_shape=jax.ShapeDtypeStruct((m, n), dtype),
scratch_shapes=dict(
acc_smem=plgpu.SMEM((tile_m, tile_n), dtype),
),
grid=(m_iters, n_iters),
grid_names=('m', 'n'),
num_threads=k_iters,
thread_name='k',
interpret=mosaic_interpret.InterpretParams(detect_races=True),
)
kernel(a, b)
self.assertTrue(mosaic_interpret.get_races().races_found)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/pallas/gpu_pallas_interpret_test.py",
"license": "Apache License 2.0",
"lines": 763,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:tests/multiprocess/thread_guard_test.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test thread guard for multiprocess arrays."""
import concurrent.futures
import jax
from jax._src import test_multiprocess as jt_multiprocess
import jax.numpy as jnp
@jax.jit
def f(x):
y = jnp.square(x)
z = jnp.cos(y)
return jnp.sum(z)
@jax.jit
def g(x):
y = jnp.square(x)
z = jnp.sin(y)
return z + 1
class ThreadGuardTest(jt_multiprocess.MultiProcessTest):
# Use a single test method since the thread guard affects global state and
# tests can't run in parallel.
def test_thread_guard(self):
mesh = jax.make_mesh(
(jax.device_count(),), ('i',),
axis_types=(jax.sharding.AxisType.Explicit,), devices=jax.devices())
sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec('i'))
x = jnp.ones((jax.device_count(),))
arr = jax.device_put(x, sharding)
# Test slow JIT path.
with self.assertRaisesRegex(
(RuntimeError, ValueError), 'thread guard was set'):
with (jax.thread_guard(True),
concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor):
y = executor.submit(f, arr)
jax.block_until_ready(y.result())
# Test fast JIT path.
x = g(arr)
x = g(x)
with self.assertRaisesRegex(
(RuntimeError, ValueError), 'thread guard was set'):
with (jax.thread_guard(True),
concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor):
y = executor.submit(g, x)
jax.block_until_ready(y.result())
# Test local devices only.
mesh = jax.make_mesh(
(jax.local_device_count(),), ('i',),
axis_types=(jax.sharding.AxisType.Explicit,),
devices=jax.local_devices())
sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec('i'))
x = jnp.ones((jax.local_device_count(),))
x = jax.device_put(x, sharding)
with jax.thread_guard(True):
z = g(x)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
y = executor.submit(f, x).result()
out = y + z
jax.block_until_ready(out) # No cross-process arrays, so no error.
# Test nested thread guard context managers.
with jax.thread_guard(True):
y = g(arr)
with jax.thread_guard(True):
z = g(y) # No error when context manager is redundantly nested.
jax.block_until_ready(z)
with jax.thread_guard(False):
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
w = executor.submit(f, z).result()
jax.block_until_ready(w) # No error, thread guard deactivated.
# Thread guard is re-activated by the outer context manager.
with self.assertRaisesRegex(
(RuntimeError, ValueError), 'thread guard was set'):
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
v = executor.submit(g, w)
jax.block_until_ready(v.result())
# No error on the call in a different thread outside the context manager.
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
y = executor.submit(f, x).result()
jax.block_until_ready(y)
# Test thread guard in a subthread.
def f_with_thread_guard_should_raise(x):
with (jax.thread_guard(True),
concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor):
return executor.submit(f, x).result()
with self.assertRaisesRegex(
(RuntimeError, ValueError), 'thread guard was set'):
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
y = executor.submit(f_with_thread_guard_should_raise, arr).result()
jax.block_until_ready(y)
# Test nested thread guard context managers in different threads raises.
def f_with_thread_guard(x):
with jax.thread_guard(True):
return f(x)
with self.assertRaisesRegex(
(RuntimeError, ValueError),
'Nested thread guards in different threads are not supported'):
with jax.thread_guard(True):
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
y = executor.submit(f_with_thread_guard, arr).result()
jax.block_until_ready(y)
if __name__ == '__main__':
jt_multiprocess.main()
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/multiprocess/thread_guard_test.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:jax/_src/stateful_rng.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Stateful, implicitly-updated PRNG implementation based on mutable refs.
"""
from __future__ import annotations
import dataclasses
import operator
from collections.abc import Sequence
from jax._src import api_util
from jax._src import core
from jax._src import dtypes
from jax._src import numpy as jnp
from jax._src import random
from jax._src import ref
from jax._src import tree_util
from jax._src import typing
from jax._src.state import primitives as ref_primitives
from jax._src.state import types as state_types
from jax._src.typing import Array, ArrayLike, DTypeLike
import numpy as np
def _canonicalize_size(size: int | Sequence[int] | None, *args: ArrayLike) -> tuple[int, ...]:
if size is None:
return np.broadcast_shapes(*(np.shape(arg) for arg in args))
elif isinstance(size, (int, np.number)):
return (operator.index(size),)
else:
return tuple(map(operator.index, size))
@tree_util.register_dataclass
@dataclasses.dataclass(frozen=True)
class StatefulPRNG:
"""Stateful JAX random generator.
This should be instantiated using the :func:`jax.experimental.random.stateful_rng` function.
Attributes:
_base_key: a typed JAX PRNG key object (see :func:`jax.random.key`).
_counter: a scalar integer wrapped in a :class:`jax.Ref`.
Examples:
>>> from jax.experimental import random
>>> rng = random.stateful_rng(42)
>>> rng
StatefulPRNG(_base_key=Array((), dtype=key<fry>) overlaying:
[ 0 42], _counter=Ref(0, dtype=int32, weak_type=True))
"""
_base_key: Array
_counter: core.Ref
def __post_init__(self):
if self._base_key is api_util.SENTINEL:
return
if not (isinstance(self._base_key, Array)
and dtypes.issubdtype(self._base_key.dtype, dtypes.prng_key)):
raise ValueError(f"Expected base_key to be a typed PRNG key; got {self._base_key}")
# TODO(jakevdp): how to validate a traced mutable array?
if not (isinstance(self._counter, core.Ref) or
(isinstance(self._counter, core.Tracer)
and isinstance(self._counter.aval, state_types.AbstractRef))):
raise ValueError(f"Expected counter to be a scalar integer ref; got {self._counter}")
def key(self, shape: int | Sequence[int] = ()) -> Array:
"""Generate a new JAX PRNGKey, updating the internal state.
Args:
shape: an optional shape if returning multiple keys.
Returns:
A new, independent PRNG key with the same impl/dtype as
``self._base_key``.
Examples:
>>> from jax.experimental import random
>>> rng = random.stateful_rng(0)
>>> rng.key()
Array((), dtype=key<fry>) overlaying:
[1797259609 2579123966]
>>> rng.key()
Array((), dtype=key<fry>) overlaying:
[ 928981903 3453687069]
"""
if self._base_key.shape:
# TODO(jakevdp): better error message.
raise ValueError("cannot operate on split stateful generator")
key = random.fold_in(self._base_key, ref_primitives.ref_get(self._counter))
ref_primitives.ref_addupdate(self._counter, ..., 1) # pytype: disable=wrong-arg-types # pytype bug?
shape_tuple = _canonicalize_size(shape)
return random.split(key, shape_tuple) if shape_tuple else key
def random(
self,
size: int | Sequence[int] | None = None,
dtype: DTypeLike = float,
):
"""Return random floats in the half-open interval [0.0, 1.0)."""
# TODO(jakevdp): write docstring
return random.uniform(self.key(), shape=_canonicalize_size(size), dtype=dtype)
def uniform(
self,
low: ArrayLike = 0,
high: ArrayLike = 1,
size: int | Sequence[int] | None = None,
*,
dtype: DTypeLike = float,
) -> Array:
"""Draw uniformly distributed pseudorandom values."""
# TODO(jakevdp): write docstring
return random.uniform(self.key(), _canonicalize_size(size, low, high),
minval=low, maxval=high, dtype=dtype)
def normal(
self,
loc: ArrayLike = 0,
scale: ArrayLike = 1,
size: int | Sequence[int] | None = None,
*,
dtype: DTypeLike = float,
) -> Array:
"""Draw normally-distributed pseudorandom values."""
# TODO(jakevdp): write docstring
norm = random.normal(self.key(), _canonicalize_size(size, loc, scale), dtype)
return (jnp.asarray(loc) + jnp.asarray(scale) * norm).astype(dtype)
def integers(
self,
low: ArrayLike,
high: ArrayLike | None = None,
size: int | Sequence[int] | None = None,
*,
dtype: DTypeLike = int,
) -> Array:
"""Draw pseudorandom integers."""
# TODO(jakevdp): write docstring
if high is None:
low, high = 0, low
return random.randint(self.key(), _canonicalize_size(size, low, high),
minval=low, maxval=high, dtype=dtype)
def split(self, num: int | Sequence[int]) -> StatefulPRNG:
"""Create independent child generators suitable for use in :func:`jax.vmap`.
Args:
num: integer or sequence of integers specifying the split shape
Returns:
a single StatefulPRNG object with split contents, suitable for use
with :func:`jax.vmap`
Examples:
>>> import jax
>>> from jax.experimental import random
>>> rng = random.stateful_rng(123)
>>> x = jax.numpy.zeros(3)
>>> def f(rng, x):
... return x + rng.uniform()
>>> jax.vmap(f)(rng.split(3), x)
Array([0.35525954, 0.21937883, 0.5336956 ], dtype=float32)
See also:
- :meth:`jax.experimental.random.StatefulPRNG.spawn`: This is similar to ``split``, but
returns a Python list of :class:`StatefulPRNG`` objects.
"""
return StatefulPRNG(
_base_key=self.key(num),
_counter=ref.new_ref(jnp.zeros(num, dtype=int))
)
def spawn(self, n_children: int) -> list['StatefulPRNG']:
"""Create a list of independent child generators.
Args:
n_children: non-negative integer.
Returns:
A list of length ``n_children`` containing new independent ``StatefulPRNG`` instances
spawned from the original instance.
Examples:
>>> from jax.experimental import random
>>> rng = random.stateful_rng(123)
>>> child_rngs = rng.spawn(2)
>>> [r.integers(0, 10, 2) for r in child_rngs]
[Array([4, 5], dtype=int32), Array([2, 1], dtype=int32)]
See also:
- :meth:`jax.experimental.random.StatefulPRNG.split`: this is similar to spawn, but returns
a single mapped :class:`jax.experimental.random.StatefulPRNG`` which can be passed to
:func:`jax.vmap`.
"""
return [self.__class__(key, ref.new_ref(0)) for key in self.key(n_children)]
def stateful_rng(seed: typing.ArrayLike | None = None, *,
impl: random.PRNGSpecDesc | None = None) -> StatefulPRNG:
"""
Experimental stateful RNG with implicitly-updated state.
This implements a stateful PRNG API similar to :func:`numpy.random.default_rng`.
It is compatible with JAX transformations like :func:`~jax.jit` and others,
with a few exceptions mentioned in the Notes below.
.. note::
This stateful PRNG API is a convenience wrapper around JAX's classic
stateless, explicitly updated PRNG, described in :mod:`jax.random`.
For performance-critical applications, it is recommended to use
:func:`jax.random.key` with explicit random state semantics.
For a discussion of design considerations for this API, refer to
:ref:`stateful-randomness-jep`.
Args:
seed: an optional 64- or 32-bit integer used as the value of the key.
This must be specified if the generator is instantiated within transformed
code; when used at the top level of the program, it may be omitted in
which case the RNG will be seeded using the default NumPy seeding.
impl: optional string specifying the PRNG implementation (e.g.
``'threefry2x32'``)
Returns:
A :class:`~jax.experimental.random.StatefulPRNG` object, with methods for generating
random values.
Notes:
The :class:`~jax.experimental.random.StatefulPRNG` object created by this method uses
:func:`~jax.Ref` objects to allow implicit updates of state, and thus
inherits some of its limitiations. For example:
- :class:`StatefulPRNG` objects cannot be among the return values of functions
wrapped in JIT or other JAX transformations. This means in particular
they cannot be used as `carry` values for :func:`jax.lax.scan`,
:func:`jax.lax.while_loop`, and other JAX control flow.
- :class:`StatefulPRNG` objects cannot be used together with
:func:`jax.checkpoint` or :func:`jax.remat`; in these cases it's best to
use the :meth:`StatefulPRNG.key` method to produce a standard JAX PRNG key.
Examples:
>>> from jax.experimental import random
>>> rng = random.stateful_rng(42)
Repeated draws implicitly update the key:
>>> rng.uniform()
Array(0.5302608, dtype=float32)
>>> rng.uniform()
Array(0.72766423, dtype=float32)
This also works under transformations like :func:`jax.jit`:
>>> import jax
>>> jit_uniform = jax.jit(rng.uniform)
>>> jit_uniform()
Array(0.6672406, dtype=float32)
>>> jit_uniform()
Array(0.3890121, dtype=float32)
Keys can be generated directly if desired:
>>> rng.key()
Array((), dtype=key<fry>) overlaying:
[2954079971 3276725750]
>>> rng.key()
Array((), dtype=key<fry>) overlaying:
[2765691542 824333390]
"""
if seed is None:
if not core.trace_ctx.is_top_level():
raise TypeError(
"When used within transformed code, jax.experimental.random.stateful_rng()"
" requires an explicit seed to be set.")
entropy = np.random.SeedSequence().entropy
assert isinstance(entropy, int)
seed = np.int64(entropy & np.iinfo(np.int64).max)
assert seed is not None
return StatefulPRNG(
_base_key=random.key(seed, impl=impl),
_counter=ref.new_ref(0)
)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/stateful_rng.py",
"license": "Apache License 2.0",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/experimental/random.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental random APIs."""
from jax._src.stateful_rng import (
stateful_rng as stateful_rng,
StatefulPRNG as StatefulPRNG,
)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/experimental/random.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:tests/stateful_rng_test.py | # Copyright 2026 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import numpy as np
import jax
import jax.numpy as jnp
from jax.experimental import random as exp_random
from jax._src import config
from jax._src import test_util as jtu
config.parse_flags_with_absl()
class StatefulRNGTest(jtu.JaxTestCase):
def test_stateful_rng_instantiation(self, seed=547389):
rng = exp_random.stateful_rng(seed)
key = jax.random.key(seed)
self.assertEqual(key, rng._base_key)
self.assertEqual(rng._counter.shape, ())
self.assertEqual(0, rng._counter[...])
def test_stateful_rng_counter_increment(self, seed=7865943):
rng = exp_random.stateful_rng(seed)
original_key = rng._base_key
self.assertEqual(0, rng._counter[...])
_ = jax.jit(rng.key)() # implicit update
self.assertEqual(original_key, rng._base_key) # base key does not change
self.assertEqual(1, rng._counter[...]) # counter is incremented
def test_stateful_rng_invalid_instantiation(self):
valid_key = jax.random.key(0)
valid_counter = jax.new_ref(0)
invalid_key = jax.numpy.array([0, 1], dtype='uint32')
invalid_counter = 0
with self.assertRaisesRegex(ValueError, "Expected base_key to be a typed PRNG key"):
exp_random.StatefulPRNG(invalid_key, valid_counter)
with self.assertRaisesRegex(ValueError, "Expected counter to be a scalar integer ref"):
exp_random.StatefulPRNG(valid_key, invalid_counter)
def testRepeatedKeys(self, seed=578543):
prng = exp_random.stateful_rng(seed)
self.assertNotEqual(prng.key(), prng.key())
def testShapedKeys(self, seed=7589432):
prng = exp_random.stateful_rng(seed)
keys1 = prng.key(10)
self.assertEqual(keys1.shape, (10,))
self.assertTrue(jax.dtypes.issubdtype(keys1.dtype, jax.dtypes.prng_key))
keys2 = prng.key(10)
self.assertEqual(keys1.shape, (10,))
self.assertTrue(jax.dtypes.issubdtype(keys2.dtype, jax.dtypes.prng_key))
self.assertFalse((keys1 == keys2).any())
def testRepeatedDraws(self, seed=328090):
prng = exp_random.stateful_rng(seed)
vals1 = prng.uniform(size=10)
vals2 = prng.uniform(size=10)
self.assertTrue((vals1 != vals2).all())
def testRepeatedDrawsJIT(self, seed=328090):
prng = exp_random.stateful_rng(seed)
@jax.jit
def get_values(prng):
return prng.uniform(size=10)
vals1 = get_values(prng)
vals2 = get_values(prng)
self.assertTrue((vals1 != vals2).all())
@jtu.sample_product(
size=[None, 2, (5, 2)],
dtype=jtu.dtypes.floating,
)
def testRandom(self, size, dtype):
rng = exp_random.stateful_rng(578943)
vals = rng.random(size, dtype)
shape = np.broadcast_shapes(size or ())
self.assertEqual(vals.shape, shape)
self.assertEqual(vals.dtype, dtype)
self.assertTrue((vals < 1).all())
self.assertTrue((vals >= 0).all())
@jtu.sample_product(
low=[0, 1, np.array([0, 1])],
high=[2, 3, np.array([2, 3])],
size=[None, 2, (5, 2)],
dtype=jtu.dtypes.floating,
)
@jax.numpy_dtype_promotion('standard')
@jax.numpy_rank_promotion('allow')
def testUniform(self, low, high, size, dtype):
rng = exp_random.stateful_rng(473289)
vals = rng.uniform(low, high, size, dtype=dtype)
shape = np.broadcast_shapes(np.shape(low), np.shape(high), size or ())
self.assertEqual(vals.shape, shape)
self.assertEqual(vals.dtype, dtype)
self.assertTrue((vals < high).all())
self.assertTrue((vals >= low).all())
@jtu.sample_product(
loc=[0, 1, np.array([0, 1])],
scale=[2, 3, np.array([2, 3])],
size=[None, 2, (5, 2)],
dtype=jtu.dtypes.floating,
)
@jax.numpy_dtype_promotion('standard')
@jax.numpy_rank_promotion('allow')
def testNormal(self, loc, scale, size, dtype):
rng = exp_random.stateful_rng(473289)
vals = rng.normal(loc, scale, size, dtype=dtype)
shape = np.broadcast_shapes(np.shape(loc), np.shape(scale), size or ())
self.assertEqual(vals.shape, shape)
self.assertEqual(vals.dtype, dtype)
@jtu.sample_product(
low=[0, 1, np.array([0, 1])],
high=[10, 15, np.array([10, 15])],
size=[None, 2, (5, 2)],
dtype=jtu.dtypes.integer,
)
@jax.numpy_dtype_promotion('standard')
@jax.numpy_rank_promotion('allow')
def testIntegers(self, low, high, size, dtype):
rng = exp_random.stateful_rng(473289)
vals = rng.integers(low, high, size, dtype=dtype)
shape = np.broadcast_shapes(np.shape(low), np.shape(high), size or ())
self.assertEqual(vals.shape, shape)
self.assertEqual(vals.dtype, dtype)
self.assertTrue((vals < high).all())
self.assertTrue((vals >= low).all())
def testSpawn(self):
rng = exp_random.stateful_rng(758943)
rngs = rng.spawn(4)
for child_rng in rngs:
self.assertNotEqual(rng._base_key, child_rng._base_key)
self.assertEqual(0, child_rng._counter[...])
@jtu.sample_product(shape=[4, (5,), (2, 3)])
def testSplit(self, shape):
rng = exp_random.stateful_rng(758943)
rng_split = rng.split(shape)
expected_shape = (shape,) if isinstance(shape, int) else shape
self.assertEqual(rng_split._base_key.dtype, rng._base_key.dtype)
self.assertEqual(rng_split._base_key.shape, expected_shape)
self.assertIsInstance(rng_split._counter, jax.Ref)
self.assertEqual(rng_split._counter.shape, expected_shape)
def testVmapMapped(self):
seed = 758943
N = 4
x = np.arange(N, dtype=float)
def f(rng, x):
return x + rng.uniform()
rng = exp_random.stateful_rng(seed)
expected = x + jnp.array([rng.uniform() for rng in rng.spawn(N)])
rng = exp_random.stateful_rng(seed)
actual = jax.vmap(f)(rng.split(N), x)
self.assertArraysEqual(actual, expected)
def testVmapUnmapped(self):
seed = 758943
x = np.arange(4, dtype=float)
rng = exp_random.stateful_rng(seed)
def f(rng, x):
return x + rng.uniform()
with self.assertRaisesRegex(Exception, "performing an addupdate operation with vmapped value"):
jax.vmap(f, in_axes=(None, 0))(rng, x)
def testScanClosure(self):
seed = 432932
def f1(seed):
rng = exp_random.stateful_rng(seed)
def scan_f(_, __):
return None, rng.uniform()
return jax.lax.scan(scan_f, None, length=10)[1]
def f2(seed):
rng = exp_random.stateful_rng(seed)
return jax.numpy.array([rng.uniform() for i in range(10)])
self.assertArraysAllClose(f1(seed), f2(seed))
def testDefaultSeed(self):
rng = exp_random.stateful_rng()
x = rng.uniform(size=10)
self.assertEqual(x.shape, (10,))
def testDefaultSeedErrorUnderJIT(self):
def f():
return exp_random.stateful_rng().uniform(size=10)
with self.assertRaisesRegex(TypeError, "When used within transformed code"):
jax.jit(f)()
def testDefaultSeedErrorUnderGrad(self):
def f(x):
return x + exp_random.stateful_rng().uniform()
with self.assertRaisesRegex(TypeError, "When used within transformed code"):
jax.grad(f)(1.0)
def testDefaultSeedErrorUnderVmap(self):
def f(x):
return x + exp_random.stateful_rng().uniform()
with self.assertRaisesRegex(TypeError, "When used within transformed code"):
jax.vmap(f)(jnp.arange(5.0))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/stateful_rng_test.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:tests/profiler_session_test.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax._src import test_util as jtu
import jax.numpy as jnp
_TEST_SESSION_ID = 'my_custom_session_123'
@jtu.thread_unsafe_test_class()
class ProfilerSessionTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
# Ensure that any running profiler is stopped before starting the test.
# This is in setUp rather than tearDown to defend against previous tests
# that may have crashed or failed to clean up properly.
try:
jax.profiler.stop_trace()
except RuntimeError:
pass
@parameterized.named_parameters(
dict(testcase_name='without_session_id', session_id=None),
dict(testcase_name='with_empty_session_id', session_id=''),
dict(testcase_name='with_custom_session_id', session_id=_TEST_SESSION_ID),
)
def test_programmatic_profiling(self, session_id: str | None):
tmpdir = pathlib.Path(self.create_tempdir())
options = jax.profiler.ProfileOptions()
if session_id is not None:
options.session_id = session_id
with jax.profiler.trace(tmpdir, profiler_options=options):
jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')(
jnp.ones(jax.local_device_count())
).block_until_ready()
profile_plugin_dir = tmpdir / 'plugins' / 'profile'
self.assertTrue(profile_plugin_dir.exists(), f'Not found at {profile_plugin_dir}')
subdirs = [x.name for x in profile_plugin_dir.iterdir() if x.is_dir()]
self.assertLen(subdirs, 1)
if session_id is None or not session_id:
self.assertNotIn(_TEST_SESSION_ID, subdirs)
self.assertNotIn('', subdirs)
target_dir = subdirs[0]
else:
self.assertIn(session_id, subdirs)
target_dir = session_id
session_dir = profile_plugin_dir / target_dir
pb_files = list(session_dir.glob('*.xplane.pb'))
self.assertNotEmpty(pb_files, f'No .xplane.pb files found in {session_dir}')
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/profiler_session_test.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:jax/_src/pallas/mosaic/interpret/thread_map.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
import functools
import jax
from jax._src import callback
import jax.core as jax_core
import jax.numpy as jnp
def _run_jaxpr(jaxpr, consts, *args):
def _run(jaxpr, consts, *args):
jax_core.eval_jaxpr(jaxpr, consts, *args)
traced = jax.jit(_run, static_argnums=(0,)).trace(jaxpr, consts, *args)
traced.lower().compile()(consts, *args)
return
def _thread_map_callback(jaxpr, num_threads, consts, invals):
num_threads = int(num_threads)
threads = []
with futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
for i in range(num_threads):
# `jaxpr` is the traced representation of a function whose first argument
# is the thread ID. Hence,
# - prepend the thread ID onto the `invals`; and
# - flatten the arguments that are to be passed through to the
# evaluation of `jaxpr`.
args = (jnp.int32(i), *invals)
flat_args, _ = jax.tree.flatten(args)
threads.append(executor.submit(_run_jaxpr, jaxpr, consts, *flat_args))
exceptions = []
for i in range(num_threads):
try:
threads[i].result()
except Exception as e:
exceptions.append(e)
if exceptions:
# TODO(jburnim): Use ExceptionGroup once JAX requires Python 3.11.
# raise ExceptionGroup('Exceptions raised during _thread_map', exceptions)
raise exceptions[0]
def _call_threadmap_callback(jaxpr, num_threads, consts, invals):
# NOTE: At runtime, _thread_map_callback will lower and compile the
# given jaxpr. (JAX's caches should ensure the jaxpr is only lowered and
# compiled once.)
#
# TODO(jburnim): Would it be worth trying to lower/compile the jaxpr at
# lowering/compilation time? E.g., by using a custom primitive here, could
# we lower/compile jaxpr at lowering time, and then pass the compiled
# function to the callback?
return callback.io_callback(
functools.partial(_thread_map_callback, jaxpr),
(),
num_threads,
consts,
invals,
ordered=True,
)
def thread_map(f, num_threads, *args):
"""Executes `f(thread_id, *args)` for `num_threads` threads."""
if num_threads == 1:
f(jnp.int32(0), *args)
return
def _f(core_or_thread_index, *args):
f(core_or_thread_index, *args)
return ()
jaxpr = jax.make_jaxpr(_f)(jnp.int32(0), *args)
_call_threadmap_callback(jaxpr.jaxpr, num_threads, jaxpr.consts, args)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic/interpret/thread_map.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic/interpret/utils.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Sequence
import dataclasses
import enum
import math
import threading
from typing import Any, Literal
from jax import lax
from jax._src import core as jax_core
from jax._src.pallas import primitives
from jax._src.util import safe_map
import jax.numpy as jnp
import numpy as np
def get_uninitialized_value(
dtype, uninitialized_memory: Literal["nan", "zero"]
):
if uninitialized_memory == "nan":
if jnp.issubdtype(dtype, jnp.floating):
return np.nan
elif jnp.issubdtype(dtype, jnp.integer):
return jnp.iinfo(dtype).max
elif jnp.issubdtype(dtype, jnp.bool):
return True
if uninitialized_memory == "zero":
return 0
raise NotImplementedError(uninitialized_memory + " + " + str(dtype))
@dataclasses.dataclass(frozen=True, kw_only=True)
class InterpretParams:
"""Parameters for kernel interpret mode.
Interpret mode is a way to run Pallas kernels on CPU, while simulating TPU/GPU
shared memory, communication, and synchronization operations.
Attributes:
detect_races: If True, a dynamic, happens-before race detector will be used
to detect data races during kernel interpretation. If any races are
detected, a message will be printed and `races.races_found` will be set to
True.
Default: False.
out_of_bounds_reads: If "raise", an exception will be raised on any
out-of-bounds read of a buffer. If "uninitialized_value", any parts of
the read that are out-of-bounds will return the value used to fill
uninitialized memory, which can be configured via the
"uninitialized_memory".
Default: "raise".
skip_floating_point_ops: If True, operations that produce only floating
point values will not be interpreted; instead, their results will be
replaced with arrays all of `jnp.inf`. Additionally any floating point
operands to any operation will be replaced with (arrays of) `jnp.inf`.
Default: False.
uninitialized_memory: If "nan", allocated buffers are initialized to contain
all NaNs (or to their maximum possible value for integers). If "zero",
allocated buffers are initialized to all zeros.
Default: "nan".
num_cores_or_threads: The number of cores per device (TPU) or threads per
block (GPU). Note that for interpreting GPU kernels, we currently only
support a single block in the grid. (So the number of threads per block on
the GPU can be thought of as the number of threads that runs concurrently
on the GPU.)
Default: 1.
vector_clock_size: The number of entries in the vector clocks. This should
be an integer bigger then the total number of cores, i.e. bigger than
`number of devices * num_cores_per_device`. If `None`, the vector clock
size that is used in the interpreter will default to twice the total
number of cores.
Default: None.
"""
detect_races: bool = False
out_of_bounds_reads: Literal["raise", "uninitialized"] = "raise"
skip_floating_point_ops: bool = False
uninitialized_memory: Literal["nan", "zero"] = "nan"
num_cores_or_threads: int = 1
vector_clock_size: int | None = None
def __post_init__(self):
if self.num_cores_or_threads < 1:
raise ValueError(
"Number of cores or threads must be at least 1, but got"
f" {self.num_cores_or_threads}."
)
def get_vector_clock_size(self, num_devices) -> int:
"""Returns the number of vector clocks to use.`"""
num_cores_or_threads = num_devices * self.num_cores_or_threads
if self.vector_clock_size is not None:
if num_cores_or_threads >= self.vector_clock_size:
raise ValueError(
f"Vector clock size ({self.vector_clock_size}) must be greater than"
f" the total number of cores/threads ({num_cores_or_threads})."
)
return self.vector_clock_size
else:
# Default to twice the total number of cores/threads.
return 2 * num_cores_or_threads
def get_uninitialized_array(self, shape, dtype):
return jnp.full(
shape,
get_uninitialized_value(dtype, self.uninitialized_memory),
dtype,
)
def pad_to_block_dimension(self, value, block_shape):
"""Pads values so the shape evenly divides into block dimensions.
For example, if values has a shape of (33, 2, 5) with a block_shape of
(32, 2, 4), this function will pad the value of shape to (64, 2, 8).
Args:
value: Array to be padded.
block_shape: Block shapes to use for padding. If None, no padding will be
performed.
Returns:
A padded array.
"""
padded_shape = tuple(
((v - 1) // b + 1) * b for v, b in zip(value.shape, block_shape)
)
if padded_shape != value.shape:
pad_width = tuple((0, a - b) for a, b in zip(padded_shape, value.shape))
pad_value = self.get_uninitialized_array((), value.dtype)
value = jnp.pad(value, pad_width, constant_values=pad_value)
return value
class LoggingMode(enum.Flag):
"""Logging mode for GPU interpret mode.
Attrs:
BARRIER: Enable logging inside barrier object.
SHARED_MEMORY: Enable logging in the shared memory object.
"""
BARRIER = enum.auto()
SHARED_MEMORY = enum.auto()
@dataclasses.dataclass(frozen=True, kw_only=True)
class InterpretGPUParams(InterpretParams):
"""Parameters for GPU interpret mode.
GPU interpret mode is a way run Pallas GPU kernels on CPU, while simulating
a GPU's shared memory spaces (GMEM, SMEM, etc.), threads and synchronization
operations (e.g. barriers). This mode is intended for debugging and testing.
To run a kernel under GPU interpret mode, pass an instance of
``InterpretParams`` as an argument for the ``interpret`` parameter of
:func:`pallas_call`, :func:`core_map` or :func:`kernel`.
NOTE: If an exception is raised while interpreting a kernel, you must call
:func:`reset_gpu_interpret_mode_state` before using GPU interpret mode
again in the same process.
Attrs:
logging_mode: Logging mode for GPU interpret mode.
"""
logging_mode: LoggingMode | None = None
class Counter:
"""A simple counter that is thread-safe."""
def __init__(self, initial_value: int):
self.value = initial_value
self.lock = threading.Lock()
def get_next(self):
with self.lock:
result = self.value
self.value += 1
return result
# TODO(sharadmv): De-dup this w/ the impl in primitives.py.
def _device_id_dict_to_mesh(device_id_dict, axis_sizes, axis_indices):
physical_axis_dict = {}
axis_names = axis_sizes.keys()
for axis, idx in device_id_dict.items():
if isinstance(axis, tuple) and any(a in axis_names for a in axis):
if not all(a in axis_names for a in axis):
raise NotImplementedError(
f"{axis} mixes JAX mesh and Pallas mesh grid axes"
)
axes_dimensions = [axis_sizes[name] for name in axis]
for axis_index, axis_name in enumerate(axis):
axis_size = axis_sizes[axis_name]
inner_mesh_size = math.prod(axes_dimensions[axis_index + 1 :])
minor_divisor = inner_mesh_size
# Fast path for power of 2s
if inner_mesh_size & (inner_mesh_size - 1) == 0:
shift_len = (inner_mesh_size & -inner_mesh_size).bit_length() - 1
partial_device_idx = idx >> shift_len
else:
partial_device_idx = idx // minor_divisor
if axis_size & (axis_size - 1) == 0:
device_idx = partial_device_idx & (axis_size - 1)
else:
device_idx = partial_device_idx % axis_size
physical_axis_dict[axis_name] = device_idx
else:
physical_axis_dict[axis] = idx
device_id = []
for axis in axis_names:
if axis in physical_axis_dict:
device_id.append(physical_axis_dict[axis])
else:
device_id.append(axis_indices[axis])
non_mesh_axes = {
k: v for k, v in physical_axis_dict.items() if k not in axis_names
}
return tuple(device_id), non_mesh_axes
def device_coords_to_logical_id(device_coords, axis_sizes, axis_indices):
if isinstance(device_coords, dict):
device_coords, non_mesh_axes = _device_id_dict_to_mesh(
device_coords, axis_sizes, axis_indices
)
if non_mesh_axes:
raise NotImplementedError(non_mesh_axes)
if not isinstance(device_coords, tuple):
device_coords = (device_coords,)
assert len(device_coords) == len(axis_sizes)
sizes = list(axis_sizes.values())
ret = 0
for i in range(len(device_coords)):
ret += device_coords[i] * math.prod(sizes[i + 1 :])
return ret
def _device_id_to_logical(device_id, device_id_type, axis_sizes, axis_indices):
if device_id is None:
return None
if device_id_type == primitives.DeviceIdType.MESH:
return device_coords_to_logical_id(device_id, axis_sizes, axis_indices)
elif device_id_type == primitives.DeviceIdType.LOGICAL:
return device_id
else:
raise ValueError(f"Unsupported device ID type: {device_id_type}")
def is_int(dtype):
return jnp.issubdtype(dtype, jnp.integer)
def is_float(dtype):
return jnp.issubdtype(dtype, jnp.floating)
@dataclasses.dataclass(frozen=True)
class Placeholder:
"""Placeholder for use in `JaxprEnv` below instead of storing a concrete value."""
shape: tuple[int, ...]
dtype: jnp.dtype
class JaxprEnv:
"""An environment for interpreting jaxprs, mapping variables to values."""
def __init__(
self,
*,
vars: Sequence[jax_core.Var] | None = None,
values: Sequence[Any] | None = None,
sentinel_for_floating_point_values: Any = None,
):
self._sentinel_for_floating_point_values = (
sentinel_for_floating_point_values
)
self._env: dict[jax_core.Var, Any] = {}
if vars is None and values is None:
return
vars = vars or []
values = values or []
self.write_many(vars, values)
def read(self, var):
if isinstance(var, jax_core.Literal):
result = var.val
else:
result = self._env[var]
if isinstance(result, Placeholder):
result = lax.full(
result.shape, self._sentinel_for_floating_point_values, result.dtype
)
return result
def read_many(self, vars):
return safe_map(self.read, vars)
def write(self, var, value):
if self._sentinel_for_floating_point_values and is_float(value.dtype):
value = Placeholder(value.shape, value.dtype)
self._env[var] = value
def write_many(self, vars, values):
safe_map(self.write, vars, values)
def _transform_slice_or_index(slice_or_idx):
if isinstance(slice_or_idx, int):
return slice_or_idx
else:
start = int(slice_or_idx.start)
size = int(slice_or_idx.size)
stride = int(slice_or_idx.stride)
return slice(start, start + size * stride, stride)
def _compose_slice_or_index(slice_or_idx1, slice_or_idx2):
ret = []
i = 0
j = 0
while True:
if i == len(slice_or_idx1):
ret.extend(slice_or_idx2[j:])
return tuple(ret)
elif j == len(slice_or_idx2):
ret.extend(slice_or_idx1[i:])
return tuple(ret)
elif isinstance(slice_or_idx1[i], int):
ret.append(slice_or_idx1[i])
i += 1
elif isinstance(slice_or_idx2[j], int):
ret.append(
slice_or_idx1[i].start + slice_or_idx2[j] * slice_or_idx1[i].step
)
i += 1
j += 1
else:
ret.append(
slice(
slice_or_idx1[i].start
+ slice_or_idx2[j].start * slice_or_idx1[i].step,
slice_or_idx1[i].start
+ slice_or_idx2[j].stop * slice_or_idx1[i].step,
slice_or_idx1[i].step * slice_or_idx2[j].step,
)
)
i += 1
j += 1
def to_range(transforms) -> tuple[slice | int, ...]:
ret = ()
for transform in transforms:
# For now, assume only NDIndexer transforms.
ret = _compose_slice_or_index(
ret, tuple(_transform_slice_or_index(i) for i in transform.indices)
)
return ret
def get_next_indices(grid, indices):
next_indices = []
carry = True
for dim_size, index in reversed(list(zip(grid, indices))):
i = jnp.where(carry, index + 1, index)
carry = dim_size == i
next_indices.append(jnp.where(carry, 0, i))
return tuple(reversed(next_indices))
def get_indices(grid, loop_index):
indices = []
for dim_size in reversed(grid):
i = loop_index % dim_size
loop_index = loop_index // dim_size
indices.append(i)
return tuple(reversed(indices))
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic/interpret/utils.py",
"license": "Apache License 2.0",
"lines": 331,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:docs/_static/fault_tolerance/cancel_collectives.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['XLA_FLAGS'] = ' '.join([
'--xla_gpu_nccl_terminate_on_error=false',
'--xla_gpu_nccl_async_execution=true',
'--xla_gpu_nccl_blocking_communicators=false',
])
os.environ['XLA_PYTHON_CLIENT_ABORT_COLLECTIVES_ON_FAILURE'] = '1'
os.environ['XLA_PYTHON_CLIENT_USE_TFRT_GPU_CLIENT'] = '1'
from absl import app
from absl import flags
from collections.abc import Sequence
import jax
import jax.numpy as jnp
import time
_PROCESS_ID = flags.DEFINE_integer("i", -1, "Process id")
_NUM_PROCESSES = flags.DEFINE_integer("n", -1, "Number of processes")
def main(_: Sequence[str]) -> None:
jax.config.update("jax_enable_recoverability", True)
jax.distributed.initialize(
coordinator_address="localhost:9000",
num_processes=_NUM_PROCESSES.value,
process_id=_PROCESS_ID.value,
local_device_ids=[_PROCESS_ID.value],
heartbeat_timeout_seconds=10,
)
print(f'{jax.devices()=}')
print(f'{jax.local_devices()=}')
# Don't do this. Use live_devices instead.
from jax.experimental.multihost_utils import _live_devices
_live_devices(jax._src.distributed.global_state.client, jax.devices())
n = jax.device_count()
jax.set_mesh(jax.make_mesh((n,), ("i",)))
x = jax.device_put(jnp.arange(n), jax.P("i"))
while True:
print(jnp.sum(x))
time.sleep(1)
if __name__ == "__main__":
app.run(main)
| {
"repo_id": "jax-ml/jax",
"file_path": "docs/_static/fault_tolerance/cancel_collectives.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:docs/_static/fault_tolerance/collectives.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['XLA_FLAGS'] = '--xla_gpu_nccl_terminate_on_error=false'
from absl import app
from absl import flags
from collections.abc import Sequence
import jax
import jax.numpy as jnp
import time
_PROCESS_ID = flags.DEFINE_integer("i", -1, "Process id")
_NUM_PROCESSES = flags.DEFINE_integer("n", -1, "Number of processes")
def main(_: Sequence[str]) -> None:
jax.config.update("jax_enable_recoverability", True)
jax.distributed.initialize(
coordinator_address="localhost:9000",
num_processes=_NUM_PROCESSES.value,
process_id=_PROCESS_ID.value,
local_device_ids=[_PROCESS_ID.value],
heartbeat_timeout_seconds=10,
)
print(f'{jax.devices()=}')
print(f'{jax.local_devices()=}')
n = jax.device_count()
jax.set_mesh(jax.make_mesh((n,), ("i",)))
x = jax.device_put(jnp.arange(n), jax.P("i"))
while True:
print(jnp.sum(x))
time.sleep(1)
if __name__ == "__main__":
app.run(main)
| {
"repo_id": "jax-ml/jax",
"file_path": "docs/_static/fault_tolerance/collectives.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:docs/_static/fault_tolerance/data_parallelism.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['XLA_FLAGS'] = ' '.join([
'--xla_gpu_nccl_terminate_on_error=false',
'--xla_gpu_nccl_async_execution=true',
'--xla_gpu_nccl_blocking_communicators=false',
])
os.environ['XLA_PYTHON_CLIENT_ABORT_COLLECTIVES_ON_FAILURE'] = '1'
os.environ['XLA_PYTHON_CLIENT_USE_TFRT_GPU_CLIENT'] = '1'
from absl import app
from absl import flags
from collections.abc import Sequence
from jax.experimental.multihost_utils import live_devices
import jax
import jax.numpy as jnp
import time
_PROCESS_ID = flags.DEFINE_integer("i", -1, "Process id")
_NUM_PROCESSES = flags.DEFINE_integer("n", -1, "Number of processes")
def replicated(x: jax.Array, devices: list[jax.Device]):
"""Return x replicated across the provided devices.
Note that replicated(x) doesn't actually move any data. It simply creates a
logically replicated array with x as the local replica.
"""
n = len(devices)
mesh = jax.make_mesh((n, ), ("i", ), devices=devices)
spec = jax.sharding.PartitionSpec(None)
sharding = jax.sharding.NamedSharding(mesh, spec)
shards = [
jax.device_put(x.addressable_shards[0].data, d) for d in devices
if d.process_index == jax.process_index()
]
return jax.make_array_from_single_device_arrays(x.shape, sharding, shards)
def sharded(x: jax.Array, devices: list[jax.Device]):
"""Return x sharded across the provided devices.
Note that sharded(x) doesn't actually move any data. It simply creates a
logically sharded array. x should have the same shape as the global array.
"""
n = len(devices)
mesh = jax.make_mesh((n, ), ("i", ), devices=devices)
spec = jax.sharding.PartitionSpec("i")
sharding = jax.sharding.NamedSharding(mesh, spec)
m = sharding.addressable_devices_indices_map(x.shape)
shards = [jax.device_put(x[m[d]], d) for d in jax.local_devices()]
return jax.make_array_from_single_device_arrays(x.shape, sharding, shards)
def main(_: Sequence[str]) -> None:
# Parse command line arguments and initialize multi-controller JAX.
jax.config.update("jax_enable_recoverability", True)
jax.distributed.initialize(coordinator_address="localhost:8000",
process_id=_PROCESS_ID.value,
num_processes=_NUM_PROCESSES.value,
local_device_ids=[_PROCESS_ID.value],
heartbeat_timeout_seconds=10)
print(f'{jax.devices()=}')
print(f'{jax.local_devices()=}')
# Initialize the model's weights.
keys = iter(jax.random.split(jax.random.key(seed=42), num=3))
weights = jax.random.normal(next(keys), shape=(1, ))
# We'll learn a trivial linear model: a*x.
def predict(weights, X):
return weights * X
# We'll use mean squared error loss.
def loss(weights, X, Y):
return jnp.mean((predict(weights, X) - Y)**2)
# Initialize the (noisy) training data with a=10.
X = jax.random.permutation(next(keys), jnp.arange(-300., 300.))
Y = 10 * X + jax.random.normal(next(keys), X.shape)
# Hyperparameters.
loss_and_grad = jax.jit(jax.value_and_grad(loss))
learning_rate = 1e-6
device_batch_size = 10
step = 0
while True:
try:
with live_devices(jax.devices()) as devices:
print(f'=== Running step {step} with live devices = {devices} ===')
# Replicate the model weights.
weights = replicated(weights, devices)
# Shard the batch.
batch_size = device_batch_size * len(devices)
start = (step * batch_size) % len(X)
stop = start + batch_size
X_batch = sharded(X[start:stop], devices)
Y_batch = sharded(Y[start:stop], devices)
# Compute gradients and update weights.
l, grad = loss_and_grad(weights, X_batch, Y_batch)
new_weights = jax.block_until_ready(weights - learning_rate * grad)
except Exception as e:
print(f'Step {step} failed: {e}')
else:
print(f'Step {step} succeeded: loss = {l}')
step += 1
weights = new_weights
time.sleep(1)
if __name__ == "__main__":
app.run(main)
| {
"repo_id": "jax-ml/jax",
"file_path": "docs/_static/fault_tolerance/data_parallelism.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:docs/_static/fault_tolerance/data_parallelism_with_recovery.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['XLA_FLAGS'] = ' '.join([
'--xla_gpu_nccl_terminate_on_error=false',
'--xla_gpu_nccl_async_execution=true',
'--xla_gpu_nccl_blocking_communicators=false',
])
os.environ['XLA_PYTHON_CLIENT_ABORT_COLLECTIVES_ON_FAILURE'] = '1'
os.environ['XLA_PYTHON_CLIENT_USE_TFRT_GPU_CLIENT'] = '1'
from absl import app
from absl import flags
from collections.abc import Sequence
from jax.experimental.multihost_utils import live_devices
from jax.experimental import shard_map
import jax
import jax.numpy as jnp
import numpy as np
import time
_PROCESS_ID = flags.DEFINE_integer("i", -1, "Process id")
_NUM_PROCESSES = flags.DEFINE_integer("n", -1, "Number of processes")
def replicated(x: jax.Array, devices: list[jax.Device]):
"""Return x replicated across the provided devices.
Note that replicated(x) doesn't actually move any data. It simply creates a
logically replicated array with x as the local replica.
"""
n = len(devices)
mesh = jax.make_mesh((n, ), ("i", ), devices=devices)
spec = jax.sharding.PartitionSpec(None)
sharding = jax.sharding.NamedSharding(mesh, spec)
shards = [
jax.device_put(x.addressable_shards[0].data, d) for d in devices
if d.process_index == jax.process_index()
]
return jax.make_array_from_single_device_arrays(x.shape, sharding, shards)
def sharded(x: jax.Array, devices: list[jax.Device]):
"""Return x sharded across the provided devices.
Note that sharded(x) doesn't actually move any data. It simply creates a
logically sharded array. x should have the same shape as the global array.
"""
n = len(devices)
mesh = jax.make_mesh((n, ), ("i", ), devices=devices)
spec = jax.sharding.PartitionSpec("i")
sharding = jax.sharding.NamedSharding(mesh, spec)
m = sharding.addressable_devices_indices_map(x.shape)
shards = [jax.device_put(x[m[d]], d) for d in jax.local_devices()]
return jax.make_array_from_single_device_arrays(x.shape, sharding, shards)
def send(x: jax.Array, from_device: jax.Device, to_device: jax.Device):
"""Sends x from one device to another."""
assert isinstance(x, jax.Array)
devices = [from_device, to_device]
psum = lambda x: jax.lax.psum(x, "i")
mesh = jax.make_mesh((2, ), ("i", ), devices=devices)
spec = jax.sharding.PartitionSpec(None)
x = replicated(x, [from_device, to_device])
shard_map.shard_map(psum, mesh=mesh, in_specs=spec, out_specs=spec)(x)
def recv(x: jax.Array, from_device: jax.Device, to_device: jax.Device):
"""Receives x from a matching send."""
assert isinstance(x, jax.Array)
to_device = jax.local_devices()[0]
devices = [from_device, to_device]
psum = lambda x: jax.lax.psum(x, "i")
mesh = jax.make_mesh((2, ), ("i", ), devices=devices)
spec = jax.sharding.PartitionSpec(None)
x = jnp.zeros_like(x)
x = replicated(x, [from_device, to_device])
return shard_map.shard_map(psum, mesh=mesh, in_specs=spec, out_specs=spec)(x)
def allgather(x: float, devices: list[jax.Device]) -> list[float]:
"""Performs an AllGather across the provided devices."""
n = len(devices)
mesh = jax.make_mesh((n, ), ("i", ), devices=devices)
spec = jax.sharding.PartitionSpec('i')
p = lambda x: jax.lax.all_gather(x, "i", tiled=True)
f = jax.shard_map(p, mesh=mesh, in_specs=spec, out_specs=spec)
return jax.block_until_ready(f(np.array([x] * len(devices)))).addressable_shards[0].data
def main(_: Sequence[str]) -> None:
# Parse command line arguments and initialize multi-controller JAX.
jax.config.update("jax_enable_recoverability", True)
jax.distributed.initialize(coordinator_address="localhost:8000",
process_id=_PROCESS_ID.value,
num_processes=_NUM_PROCESSES.value,
local_device_ids=[_PROCESS_ID.value],
heartbeat_timeout_seconds=10)
print(f'{jax.devices()=}')
print(f'{jax.local_devices()=}')
# Initialize the model's weights.
keys = iter(jax.random.split(jax.random.key(seed=42), num=3))
weights = jax.random.normal(next(keys), shape=(1, ))
# We'll learn a trivial linear model: a*x.
def predict(weights, X):
return weights * X
# We'll use mean squared error loss.
def loss(weights, X, Y):
return jnp.mean((predict(weights, X) - Y)**2)
# Initialize the (noisy) training data with a=10.
X = jax.random.permutation(next(keys), jnp.arange(-300., 300.))
Y = 10 * X + jax.random.normal(next(keys), X.shape)
# Hyperparameters.
loss_and_grad = jax.jit(jax.value_and_grad(loss))
learning_rate = 1e-6
device_batch_size = 10
step = 0
while True:
try:
with live_devices(jax.devices()) as devices:
print(f'=== Running step {step} with live devices = {devices} ===')
# Handle recovering devices. A device is recovering if its step doesn't
# match process 0's step. We assume process 0 never fails.
print('all gathering steps...')
steps = allgather(step, devices)
print(f'{steps=}')
recovering = [d for d, s in zip(devices, steps) if s != steps[0]]
for d in recovering:
# Process 0 sends weights and step to the recovering devices.
if jax.process_index() == 0:
print('sending...')
send(weights, jax.devices()[0], d)
send(jnp.array([step]), jax.devices()[0], d)
elif d.process_index == jax.process_index():
print('receiving...')
weights = recv(weights, jax.devices()[0], d)
step = recv(jnp.array([step]), jax.devices()[0], d)[0]
# Replicate the model weights.
weights = replicated(weights, devices)
# Shard the batch.
batch_size = device_batch_size * len(devices)
start = (step * batch_size) % len(X)
stop = start + batch_size
X_batch = sharded(X[start:stop], devices)
Y_batch = sharded(Y[start:stop], devices)
# Compute gradients and update weights.
l, grad = loss_and_grad(weights, X_batch, Y_batch)
new_weights = jax.block_until_ready(weights - learning_rate * grad)
except Exception as e:
print(f'Step {step} failed: {e}')
else:
print(f'Step {step} succeeded: loss = {l}')
step += 1
weights = new_weights
time.sleep(1)
if __name__ == "__main__":
app.run(main)
| {
"repo_id": "jax-ml/jax",
"file_path": "docs/_static/fault_tolerance/data_parallelism_with_recovery.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:docs/_static/fault_tolerance/dont_fail.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['XLA_FLAGS'] = '--xla_gpu_nccl_terminate_on_error=false'
from absl import app
from absl import flags
from collections.abc import Sequence
import jax
import time
_PROCESS_ID = flags.DEFINE_integer("i", -1, "Process id")
_NUM_PROCESSES = flags.DEFINE_integer("n", -1, "Number of processes")
def main(_: Sequence[str]) -> None:
jax.config.update("jax_enable_recoverability", True)
jax.distributed.initialize(
coordinator_address="localhost:9000",
num_processes=_NUM_PROCESSES.value,
process_id=_PROCESS_ID.value,
local_device_ids=[_PROCESS_ID.value],
heartbeat_timeout_seconds=10,
)
print(f'{jax.devices()=}')
print(f'{jax.local_devices()=}')
while True:
print(time.time())
time.sleep(1)
if __name__ == "__main__":
app.run(main)
| {
"repo_id": "jax-ml/jax",
"file_path": "docs/_static/fault_tolerance/dont_fail.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:docs/_static/fault_tolerance/live_devices.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['XLA_FLAGS'] = ' '.join([
'--xla_gpu_nccl_terminate_on_error=false',
'--xla_gpu_nccl_async_execution=true',
'--xla_gpu_nccl_blocking_communicators=false',
])
os.environ['XLA_PYTHON_CLIENT_ABORT_COLLECTIVES_ON_FAILURE'] = '1'
os.environ['XLA_PYTHON_CLIENT_USE_TFRT_GPU_CLIENT'] = '1'
from absl import app
from absl import flags
from collections.abc import Sequence
from jax.experimental.multihost_utils import live_devices
import jax
import jax.numpy as jnp
import time
_PROCESS_ID = flags.DEFINE_integer("i", -1, "Process id")
_NUM_PROCESSES = flags.DEFINE_integer("n", -1, "Number of processes")
def main(_: Sequence[str]) -> None:
jax.config.update("jax_enable_recoverability", True)
jax.distributed.initialize(
coordinator_address="localhost:9000",
num_processes=_NUM_PROCESSES.value,
process_id=_PROCESS_ID.value,
local_device_ids=[_PROCESS_ID.value],
heartbeat_timeout_seconds=10,
)
print(f'{jax.devices()=}')
print(f'{jax.local_devices()=}')
while True:
try:
with live_devices(jax.devices()) as devices:
print(f'{devices=}')
n = len(devices)
jax.set_mesh(jax.make_mesh((n,), ("i",), devices=devices))
x = jax.device_put(jnp.arange(n), jax.P("i"))
print(jnp.sum(x))
except Exception as e:
print('FAIL:', e)
else:
print('PASS')
time.sleep(1)
if __name__ == "__main__":
app.run(main)
| {
"repo_id": "jax-ml/jax",
"file_path": "docs/_static/fault_tolerance/live_devices.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:docs/_static/fault_tolerance/while_loop.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app
from absl import flags
from collections.abc import Sequence
import jax
import time
_PROCESS_ID = flags.DEFINE_integer("i", -1, "Process id")
_NUM_PROCESSES = flags.DEFINE_integer("n", -1, "Number of processes")
def main(_: Sequence[str]) -> None:
jax.distributed.initialize(
coordinator_address="localhost:9000",
num_processes=_NUM_PROCESSES.value,
process_id=_PROCESS_ID.value,
local_device_ids=[_PROCESS_ID.value],
heartbeat_timeout_seconds=10,
)
print(f'{jax.devices()=}')
print(f'{jax.local_devices()=}')
while True:
print(time.time())
time.sleep(1)
if __name__ == "__main__":
app.run(main)
| {
"repo_id": "jax-ml/jax",
"file_path": "docs/_static/fault_tolerance/while_loop.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:tests/export_serialization_back_compat_test.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backwards compatibility of serialization of JAX exports.
Whenever we change the serialization format for jax.export.Exported
(see file jax.export.serialization), we should first save a serialization
of the current format and add a test that it can be deserialized and it has
the expected behavior.
To add a new test:
* Create a new test method, with a function to be serialized that exercises
the feature you want to test, and a call to self.export_and_serialize.
You can follow the model of the tests below, which are parameterized by
the testdata. Use only `None` for the testdata parameter to signal that
you want to use a current serialization and not a saved one.
* Run the test. This will save the serialized data in
TEST_UNDECLARED_OUTPUTS_DIR (or "/tmp/back_compat_testdata" if not set).
* Copy the test data defined in the output file, to the file
jax._src.internal_test_util.export_back_compat_test_data.export_{name}.py.
* Add a new import statement to this file to import that module
This process will ensure that the saved serialized export can be read by
future code version (backward compatibility of the deserializer). To check
forward compatibility you'd have to check out an older version of the code
and cherry pick a new version of the directory
`jax._src.internal_test_util.export_back_compat_test_data`.
"""
import logging
import os
import re
from typing import Any
from absl.testing import absltest
import numpy as np
# ruff: noqa: F401
try:
import flatbuffers
CAN_SERIALIZE = True
except (ModuleNotFoundError, ImportError):
CAN_SERIALIZE = False
import jax
from jax._src import config
from jax._src import core
from jax._src.export import _export
from jax._src.export.serialization import _SERIALIZATION_VERSION
from jax.sharding import PartitionSpec as P
from jax._src import test_util as jtu
from jax._src.internal_test_util.export_back_compat_test_data import export_with_specified_sharding
from jax._src.internal_test_util.export_back_compat_test_data import export_with_unspecified_sharding
from jax._src.internal_test_util.export_back_compat_test_data import export_with_memory_space
config.parse_flags_with_absl()
jtu.request_cpu_devices(8)
class CompatTest(jtu.JaxTestCase):
def setUp(self):
if not CAN_SERIALIZE:
self.skipTest("Serialization not available")
def export_and_serialize(self, fun, *args,
vjp_order=0,
platforms=None,
**kwargs) -> bytearray:
"""Export and serialize a function.
The test data is saved in TEST_UNDECLARED_OUTPUTS_DIR (or
"/tmp/back_compat_testdata" if not set) and should be copied as explained
in the module docstring.
"""
exp = _export.export(fun, platforms=platforms)(*args, **kwargs)
serialized = exp.serialize(vjp_order=vjp_order)
updated_testdata = f"""
# Paste to the test data file (see export_serialization_back_compat_test.py module docstring)
dict(
serialization_version={_SERIALIZATION_VERSION},
exported_serialized={serialized!r},
),
"""
# Replace the word that should not appear.
updated_testdata = re.sub(r"google.", "googlex", updated_testdata)
output_dir = os.getenv("TEST_UNDECLARED_OUTPUTS_DIR",
"/tmp/back_compat_testdata")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_file_basename = f"export_{self._testMethodName.replace('test_', '')}.py"
output_file = os.path.join(output_dir, output_file_basename)
logging.info("Writing the updated serialized Exported at %s", output_file)
with open(output_file, "w") as f:
f.write(updated_testdata)
return serialized
@jtu.parameterized_filterable(
kwargs=[
dict(testdata=testdata,
testcase_name=("current" if testdata is None
else f"v{testdata['serialization_version']}"))
for testdata in [None, *export_with_specified_sharding.serializations]
]
)
def test_with_specified_sharding(self, testdata: dict[str, Any] | None):
if jtu.device_under_test() != "cpu":
self.skipTest("Testing only the CPU serialization")
a = np.arange(16 * 4, dtype=np.float32).reshape((16, 4))
mesh = jtu.create_mesh((2,), "x")
with jax.set_mesh(mesh):
@jax.jit(in_shardings=(jax.sharding.NamedSharding(mesh, P("x", None),),),
out_shardings=jax.sharding.NamedSharding(mesh, P(None, "x")))
def f(b):
return b * 2.
a = jax.device_put(a, jax.sharding.NamedSharding(mesh, P("x", None)))
if testdata is None:
serialized = self.export_and_serialize(f, a)
else:
serialized = testdata["exported_serialized"]
out = _export.deserialize(serialized).call(a)
self.assertAllClose(out, a * 2.)
self.assertEqual(out.addressable_shards[0].index, (slice(None), slice(0, 2)))
self.assertEqual(out.addressable_shards[1].index, (slice(None), slice(2, 4)))
@jtu.parameterized_filterable(
kwargs=[
dict(testdata=testdata,
testcase_name=("current" if testdata is None
else f"v{testdata['serialization_version']}"))
for testdata in [None, *export_with_unspecified_sharding.serializations]
]
)
def test_with_unspecified_sharding(self, testdata: dict[str, Any] | None):
if jtu.device_under_test() != "cpu":
self.skipTest("Testing only the CPU serialization")
a = np.arange(16 * 4, dtype=np.float32).reshape((16, 4))
# Output sharding is not specified
mesh = jtu.create_mesh((2,), "x")
with jax.set_mesh(mesh):
@jax.jit(in_shardings=(jax.sharding.NamedSharding(mesh, P("x", None),),))
def f(b):
return b * 2.
a = jax.device_put(a, jax.sharding.NamedSharding(mesh, P("x", None)))
if testdata is None:
serialized = self.export_and_serialize(f, a)
else:
serialized = testdata["exported_serialized"]
out = _export.deserialize(serialized).call(a)
self.assertAllClose(out, a * 2.)
self.assertEqual(out.addressable_shards[0].index, (slice(0, 8), slice(None)))
self.assertEqual(out.addressable_shards[1].index, (slice(8, 16), slice(None)))
@jtu.parameterized_filterable(
kwargs=[
dict(testdata=testdata,
testcase_name=("current" if testdata is None
else f"v{testdata['serialization_version']}"))
for testdata in [None, *export_with_memory_space.serializations]
]
)
def test_with_memory_space(self, testdata: dict[str, Any] | None):
# This test is based on export_test.py::test_memory_space_from_arg
mesh = jtu.create_mesh((2,), "x")
with jax.set_mesh(mesh):
shd = jax.sharding.NamedSharding(mesh, P("x", None),
memory_kind="pinned_host")
a = jax.device_put(np.ones((2, 3), dtype=np.float32), shd)
f = jax.jit(lambda x: x)
if testdata is None:
serialized = self.export_and_serialize(
f, a, platforms=("tpu", "cuda", "rocm"))
else:
if jtu.is_device_rocm() and testdata["serialization_version"] < 6:
self.skipTest("ROCm serialization testdata is only available for "
"serialization formats v6 and onwards.")
serialized = testdata["exported_serialized"]
exported = _export.deserialize(serialized)
self.assertEqual(exported.in_avals[0].memory_space, core.MemorySpace.Host)
self.assertEqual(exported.out_avals[0].memory_space, core.MemorySpace.Host)
if jtu.device_under_test() in ("tpu", "gpu"):
b = exported.call(a)
self.assertEqual(b.aval.memory_space, core.MemorySpace.Host)
self.assertEqual(b.sharding.memory_kind, a.sharding.memory_kind)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/export_serialization_back_compat_test.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:jax/_src/pallas/pallas_test_util.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pallas test utilities."""
import sys
from jax._src import test_util as jtu
from jax._src.pallas import pallas_call
from jax.experimental import pallas as pl
use_mosaic_gpu = pallas_call._PALLAS_USE_MOSAIC_GPU.value
@jtu.with_config(jax_traceback_filtering="off")
class PallasTest(jtu.JaxTestCase):
INTERPRET: bool = False
def setUp(self):
if not jtu.test_device_matches(['cpu']) and self.INTERPRET:
self.skipTest('Only run interpret tests on CPU.')
if not self.INTERPRET:
# Running on accelerator
if jtu.test_device_matches(["cpu"]):
self.skipTest("On CPU the test works only in interpret mode")
if (jtu.test_device_matches(["cuda"]) and
not jtu.is_cuda_compute_capability_at_least("8.0")):
self.skipTest("Only works on GPU with capability >= sm80")
if (jtu.test_device_matches(["cuda"]) and use_mosaic_gpu and
not jtu.is_cuda_compute_capability_at_least("9.0")):
self.skipTest("Mosaic GPU requires capability >= sm90")
if sys.platform == "win32":
self.skipTest("Only works on non-Windows platforms")
super().setUp()
def pallas_call(self, *args, **kwargs):
return pl.pallas_call(*args, **kwargs, interpret=self.INTERPRET)
class PallasTPUTest(PallasTest):
"""A test case that only runs on TPUs or in interpret mode on CPU."""
def setUp(self):
if not jtu.test_device_matches(['tpu']) and not self.INTERPRET:
self.skipTest('Test requires TPUs')
super().setUp()
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/pallas_test_util.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:tests/multiprocess/colocated_python_test.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multihost tests for jax.Array."""
import jax
from jax._src import test_multiprocess as jt_multiprocess
from jax._src import test_util as jtu
from jax.experimental import colocated_python
import numpy as np
try:
import cloudpickle # noqa
HAS_CLOUDPICKLE = True
except (ModuleNotFoundError, ImportError):
HAS_CLOUDPICKLE = False
class ColocatedPythonTestMultiHost(jt_multiprocess.MultiProcessTest):
def setUp(self):
super().setUp()
if not HAS_CLOUDPICKLE:
self.skipTest(
"ColocatedPythonTestMultiHost depends on cloudpickle library"
)
jtu.request_cpu_devices(jax.local_device_count())
def test_colocated_cpu_devices(self):
if jax.device_count() % 2 == 0:
mesh_shape = (2, jax.device_count() // 2)
else:
mesh_shape = (1, jax.device_count())
mesh = jax.make_mesh(mesh_shape, ("x", "y"),
axis_types=(jax.sharding.AxisType.Explicit,) * 2)
cpu_mesh1 = colocated_python.colocated_cpu_devices(mesh)
cpu_devices = colocated_python.colocated_cpu_devices(mesh.devices.flat)
cpu_mesh2 = jax.make_mesh(mesh_shape, ("x", "y"),
axis_types=(jax.sharding.AxisType.Explicit,) * 2,
devices=cpu_devices)
self.assertEqual(cpu_mesh1, cpu_mesh2)
def test_simple_function(self):
@colocated_python.colocated_python
def add_one(x):
return jax.make_array_from_single_device_arrays(
x.shape, x.sharding, [s.data + 1 for s in x.addressable_shards])
mesh = jax.make_mesh((jax.device_count(),), ("x",),
axis_types=(jax.sharding.AxisType.Explicit,))
cpu_mesh = colocated_python.colocated_cpu_devices(mesh)
cpu_sharding = jax.NamedSharding(cpu_mesh, jax.P("x"))
x = np.arange(cpu_mesh.size)
x = jax.device_put(x, cpu_sharding)
out = add_one(x)
out = jax.jit(lambda x: x,
out_shardings=jax.NamedSharding(cpu_mesh, jax.P()))(out)
out = jax.device_get(out)
np.testing.assert_equal(out, np.arange(cpu_mesh.size) + 1)
if __name__ == "__main__":
jt_multiprocess.main()
| {
"repo_id": "jax-ml/jax",
"file_path": "tests/multiprocess/colocated_python_test.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:docs/array_refs.py | # ---
# Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# formats: ipynb,md:myst,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.16.4
# ---
# # `Ref`: mutable arrays for data plumbing and memory control
#
# JAX `Array`s are immutable, representing mathematical values. Immutability can
# make code easier to reason about, and is useful for optimized compilation,
# parallelization, rematerialization, and transformations like autodiff.
#
# But immutability is constraining too:
# * **expressiveness** --- plumbing out intermediate data or maintaining state,
# e.g. for normalization statistics or metrics, can feel heavyweight;
# * **performance** --- it's more difficult to reason about performance, like
# memory lifetimes and in-place updates.
#
# `Ref`s can help! They represent mutable arrays that can be read and written
# in-place. These array references are compatible with JAX transformations, like
# `jax.jit` and `jax.grad`:
# +
import jax
import jax.numpy as jnp
x_ref = jax.new_ref(jnp.zeros(3)) # new array ref, with initial value [0., 0., 0.]
@jax.jit
def f():
x_ref[1] += 1. # indexed add-update
print(x_ref) # Ref([0., 0., 0.])
f()
f()
print(x_ref) # Ref([0., 2., 0.])
# -
# The indexing syntax follows NumPy's. For a `Ref` called `x_ref`, we can
# read its entire value into an `Array` by writing `x_ref[...]`, and write its
# entire value using `x_ref[...] = A` for some `Array`-valued expression `A`:
# +
def g(x):
x_ref = jax.new_ref(0.)
x_ref[...] = jnp.sin(x)
return x_ref[...]
print(jax.grad(g)(1.0)) # 0.54
# -
# `Ref` is a distinct type from `Array`, and it comes with some important
# constraints and limitations. In particular, indexed reading and writing is just
# about the *only* thing you can do with an `Ref`. References can't be passed
# where `Array`s are expected:
x_ref = jax.new_ref(1.0)
try:
jnp.sin(x_ref) # error! can't do math on refs
except Exception as e:
print(e)
# To do math, you need to read the ref's value first, like `jnp.sin(x_ref[...])`.
#
# So what _can_ you do with `Ref`? Read on for the details, and some useful
# recipes.
#
# ### API
#
# If you've ever used
# [Pallas](https://docs.jax.dev/en/latest/pallas/quickstart.html), then `Ref`
# should look familiar. A big difference is that you can create new `Ref`s
# yourself directly using `jax.new_ref`:
# +
from jax import Array, Ref
def array_ref(init_val: Array) -> Ref:
"""Introduce a new reference with given initial value."""
# -
# `jax.freeze` is its antithesis, invalidating the given ref (so that accessing it
# afterwards is an error) and producing its final value:
def freeze(ref: Ref) -> Array:
"""Invalidate given reference and produce its final value."""
# In between creating and destroying them, you can perform indexed reads and
# writes on refs. You can read and write using the functions `jax.ref.get` and
# `jax.ref.swap`, but usually you'd just use NumPy-style array indexing syntax:
# +
import types
Index = int | slice | Array | types.EllipsisType
Indexer = Index | tuple[Index, ...]
def get(ref: Ref, idx: Indexer) -> Array:
"""Returns `ref[idx]` for NumPy-style indexer `idx`."""
def swap(ref: Ref, idx: Indexer, val: Array) -> Array:
"""Performs `newval, ref[idx] = ref[idx], val` and returns `newval`."""
# -
# Here, `Indexer` can be any NumPy indexing expression:
# +
x_ref = jax.new_ref(jnp.arange(12.).reshape(3, 4))
# int indexing
row = x_ref[0]
x_ref[1] = row
# tuple indexing
val = x_ref[1, 2]
x_ref[2, 3] = val
# slice indexing
col = x_ref[:, 1]
x_ref[0, :3] = col
# advanced int array indexing
vals = x_ref[jnp.array([0, 0, 1]), jnp.array([1, 2, 3])]
x_ref[jnp.array([1, 2, 1]), jnp.array([0, 0, 1])] = vals
# -
# As with `Array`s, indexing mostly follows NumPy behavior, except for
# out-of-bounds indexing which [behaves in the usual way for JAX
# `Array`s](https://docs.jax.dev/en/latest/notebooks/Common_Gotchas_in_JAX.html#out-of-bounds-indexing).
#
# ### Pure and impure functions
#
# A function that takes a ref as an argument (either explicitly or by lexical
# closure) is considered _impure_. For example:
# +
# takes ref as an argument => impure
@jax.jit
def impure1(x_ref, y_ref):
x_ref[...] = y_ref[...]
# closes over ref => impure
y_ref = jax.new_ref(0)
@jax.jit
def impure2(x):
y_ref[...] = x
# -
# If a function only uses refs internally, it is still considered _pure_. Purity
# is in the eye of the caller. For example:
# internal refs => still pure
@jax.jit
def pure1(x):
ref = jax.new_ref(x)
ref[...] = ref[...] + ref[...]
return ref[...]
# Pure functions, even those that use refs internally, are familiar: for example,
# they work with transformations like `jax.grad`, `jax.vmap`, `jax.shard_map`, and
# others in the usual way.
#
# Impure functions are sequenced in Python program order.
#
# ### Restrictions
#
# `Ref`s are second-class, in the sense that there are restrictions on their
# use:
#
# * **Can't return refs** from `jit`\-decorated functions or the bodies of
# higher-order primitives like `jax.lax.scan`, `jax.lax.while_loop`, or
# `jax.lax.cond`
# * **Can't pass a ref as an argument more than once** to `jit`\-decorated
# functions or higher-order primitives
# * **Can only `freeze` in creation scope**
# * **No higher-order refs** (refs-to-refs)
#
# For example, these are errors:
# +
x_ref = jax.new_ref(0.)
# can't return refs
@jax.jit
def err1(x_ref):
x_ref[...] = 5.
return x_ref # error!
try:
err1(x_ref)
except Exception as e:
print(e)
# can't pass a ref as an argument more than once
@jax.jit
def err2(x_ref, y_ref):
...
try:
err2(x_ref, x_ref) # error!
except Exception as e:
print(e)
# can't pass and close over the same ref
@jax.jit
def err3(y_ref):
y_ref[...] = x_ref[...]
try:
err3(x_ref) # error!
except Exception as e:
print(e)
# can only freeze in creation scope
@jax.jit
def err4(x_ref):
jax.freeze(x_ref)
try:
err4(x_ref) # error!
except Exception as e:
print(e)
# -
# These restrictions exist to rule out aliasing, where two refs might refer to the
# same mutable memory, making programs harder to reason about and transform.
# Weaker restrictions would also suffice, so some of these restrictions may be
# lifted as we improve JAX's ability to verify that no aliasing is present.
#
# There are also restrictions stemming from undefined semantics, e.g. in the
# presence of parallelism or rematerialization:
#
# * **Can't `vmap` or `shard_map` a function that closes over refs**
# * **Can't apply `jax.remat`/`jax.checkpoint` to an impure function**
#
# For example, here are ways you can and can't use `vmap` with impure functions:
# +
# vmap over ref args is okay
def dist(x, y, out_ref):
assert x.ndim == y.ndim == 1
assert out_ref.ndim == 0
out_ref[...] = jnp.sum((x - y) ** 2)
vecs = jnp.arange(12.).reshape(3, 4)
out_ref = jax.new_ref(jnp.zeros((3, 3)))
jax.vmap(jax.vmap(dist, (0, None, 0)), (None, 0, 0))(vecs, vecs, out_ref) # ok!
print(out_ref)
# +
# vmap with a closed-over ref is not
x_ref = jax.new_ref(0.)
def err5(x):
x_ref[...] = x
try:
jax.vmap(err5)(jnp.arange(3.)) # error!
except Exception as e:
print(e)
# -
# The latter is an error because it's not clear which value `x_ref` should be
# after we run `jax.vmap(err5)`.
#
# ### `Ref`s and automatic differentiation
#
# Autodiff can be applied to pure functions as before, even if they use array refs
# internally. For example:
# +
@jax.jit
def pure2(x):
ref = jax.new_ref(x)
ref[...] = ref[...] + ref[...]
return ref[...]
print(jax.grad(pure1)(3.0)) # 2.0
# -
# Autodiff can also be applied to functions that take array refs as arguments, if
# those arguments are only used for plumbing and not involved in differentiation:
# +
# error
def err6(x, some_plumbing_ref):
y = x + x
some_plumbing_ref[...] += y
return y
# fine
def foo(x, some_plumbing_ref):
y = x + x
some_plumbing_ref[...] += jax.lax.stop_gradient(y)
return y
# -
# You can combine plumbing refs with `custom_vjp` to plumb data out of the
# backward pass of a differentiated function:
# +
# First, define the helper `stash_grads`:
@jax.custom_vjp
def stash_grads(grads_ref, x):
return x
def stash_grads_fwd(grads_ref, x):
return x, grads_ref
def stash_grads_bwd(grads_ref, g):
grads_ref[...] = g
return None, g
stash_grads.defvjp(stash_grads_fwd, stash_grads_bwd)
# +
# Now, use `stash_grads` to stash intermediate gradients:
def f(x, grads_ref):
x = jnp.sin(x)
x = stash_grads(grads_ref, x)
return x
grads_ref = jax.new_ref(0.)
f(1., grads_ref)
print(grads_ref)
# -
# Notice `stash_grads_fwd` is returning a `Ref` here. That's a special
# allowance for `custom_vjp` fwd rules: it's really syntax for indicating which
# ref arguments should be shared by both the fwd and bwd rules. So any refs
# returned by a fwd rule must be arguments to that fwd rule.
#
# ### `Ref`s and performance
#
# At the top level, when calling `jit`\-decorated functions, `Ref`s obviate
# the need for donation, since they are effectively always donated:
# +
@jax.jit
def sin_inplace(x_ref):
x_ref[...] = jnp.sin(x_ref[...])
x_ref = jax.new_ref(jnp.arange(3.))
print(x_ref.unsafe_buffer_pointer(), x_ref)
sin_inplace(x_ref)
print(x_ref.unsafe_buffer_pointer(), x_ref)
# -
# Here `sin_inplace` operates in-place, updating the buffer backing `x_ref` so
# that its address stays the same.
#
# Under a `jit`, you should expect array references to point to fixed buffer
# addresses, and for indexed updates to be performed in-place.
#
# **Temporary caveat:** dispatch from Python to impure `jit`\-compiled functions
# that take `Ref` inputs is currently slower than dispatch to pure
# `jit`\-compiled functions, since it takes a less optimized path.
#
# ### `foreach`, a new way to write `scan`
#
# As you may know, `jax.lax.scan` is a loop construct with a built-in fixed access
# pattern for scanned-over inputs and outputs. The access pattern is built in for
# autodiff reasons: if we were instead to slice into immutable inputs directly,
# reverse-mode autodiff would end up creating one-hot gradients and summing them
# up, which can be asymptotically inefficient. See [Sec 5.3.3 of the Dex
# paper](https://arxiv.org/pdf/2104.05372).
#
# But reading slices of `Ref`s doesn't have this efficiency problem: when we
# apply reverse-mode autodiff, we always generate in-place accumulation
# operations. As a result, we no longer need to be constrained by `scan`'s fixed
# access pattern. We can write more flexible loops, e.g. with non-sequential
# access.
#
# Moreover, having mutation available allows for some syntax tricks, like in this
# recipe for a `foreach` decorator:
# +
import jax
import jax.numpy as jnp
from jax.lax import scan
def foreach(*args):
def decorator(body):
return scan(lambda _, elts: (None, body(*elts)), None, args)[1]
return decorator
# +
r = jax.new_ref(0)
xs = jnp.arange(10)
@foreach(xs)
def ys(x):
r[...] += x
return x * 2
print(r) # Ref(45, dtype=int32)
print(ys) # [ 0 2 4 6 8 10 12 14 16 18]
# -
# Here, the loop runs immediately, updating `r` in-place and binding `ys` to be
# the mapped result.
| {
"repo_id": "jax-ml/jax",
"file_path": "docs/array_refs.py",
"license": "Apache License 2.0",
"lines": 351,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:docs/sphinxext/source_include.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import ast
from pathlib import Path
from docutils import nodes
from sphinx.util.docutils import SphinxDirective
from sphinx.util.logging import getLogger
logger = getLogger(__name__)
# (The parse_lines_spec and get_tagged_block functions are unchanged)
def parse_lines_spec(spec: str) -> list[int]:
items = []
if not spec:
return items
for part in spec.split(","):
part = part.strip()
if "-" in part:
start, end = part.split("-", 1)
items.extend(range(int(start), int(end) + 1))
else:
items.append(int(part))
return items
def get_tagged_block(filepath, tag, lines_spec=None):
try:
full_path = Path(filepath)
if not full_path.exists():
raise FileNotFoundError(f"Source file not found at {full_path}")
content_full = full_path.read_text()
regex_pattern = rf"# tag: {tag}\n(.*?)\s*# tag: {tag}"
pattern = re.compile(regex_pattern, re.DOTALL)
match = pattern.search(content_full)
if not match:
raise ValueError(f"Tag '{tag}' not found in '{filepath}'")
content = match.group(1).strip("\n")
if lines_spec is None:
return content
line_list = content.split("\n")
if lines_spec.startswith("[") and lines_spec.endswith("]"):
indexer = ast.literal_eval(lines_spec)
final_lines = [line_list[i] for i in indexer]
elif ":" in lines_spec:
parts_str = (lines_spec.split(":") + ["", "", ""])[:3]
indexer = slice(*(int(p.strip()) if p.strip() else None for p in parts_str))
final_lines = line_list[indexer]
else:
indexer = int(lines_spec)
final_lines = [line_list[indexer]]
if not final_lines:
return ""
try:
indent_level = len(final_lines[0]) - len(final_lines[0].lstrip())
return "\n".join(line[indent_level:] for line in final_lines)
except IndexError:
return ""
except Exception as e:
logger.warning(f"Error processing tagged_block: {e}")
return f"Error processing tagged_block for tag '{tag}' in '{filepath}'."
class TaggedBlockDirective(SphinxDirective):
has_content = False
required_arguments = 2
optional_arguments = 1
option_spec = {
"hl_lines": str,
}
def run(self):
source_dir = Path(self.env.srcdir)
filepath = source_dir / self.arguments[0]
tag = self.arguments[1]
lines_spec = self.arguments[2] if len(self.arguments) > 2 else None
code = get_tagged_block(filepath, tag, lines_spec)
literal = nodes.literal_block(code, code)
literal["language"] = "python"
if "hl_lines" in self.options:
highlight_lines = parse_lines_spec(self.options["hl_lines"])
literal["highlight_args"] = {"hl_lines": highlight_lines}
return [literal]
def setup(app):
app.add_directive("tagged-block", TaggedBlockDirective)
# This dictionary fixes the "parallel reading" warning
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| {
"repo_id": "jax-ml/jax",
"file_path": "docs/sphinxext/source_include.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:docs/the-training-cookbook.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools as ft
import itertools as it
import time
from dataclasses import dataclass
from typing import Iterator
import jax
import jax.numpy as jnp
import numpy as np
from jax.sharding import AxisType
ode = """
We are the music makers
And we are the dreamers of dreams
Wandering by lone sea-breakers
And sitting by desolate streams;
World-losers and world-forsakers
On whom the pale moon gleams
Yet we are the movers and shaker
Of the world for ever, it seems
"""
# tag: config
@jax.tree_util.register_static
@dataclass(kw_only=True, frozen=True)
class Config:
mesh_axis_names: tuple[str, ...] = ("fsdp",)
mesh_shape: tuple[int, ...] = (8,)
seq_length: int = 128
num_train_steps: int = 10**6
host_batch_size: int = 16
learning_rate: float = 1e-4
beta_1: float = 0.9
beta_2: float = 0.999
eps: float = 1e-8
eps_root: float = 0.0
param_seed: int = 12738
num_layers: int = 4
embed_dim: int = 512
mlp_dim: int = 512 * 4
vocab_size: int = 2**8 # uint8 ascii encoding
num_heads: int = 8
head_dim: int = 128
dtype: str = "bfloat16"
embed: jax.P = jax.P(None, None)
pos_embed: jax.P = jax.P(None, None)
att_qkv: jax.P = jax.P(None, "fsdp", None, None)
att_out: jax.P = jax.P("fsdp", None, None)
mlp_in: jax.P = jax.P("fsdp", None)
mlp_out: jax.P = jax.P(None, "fsdp")
in_kernel: jax.P = jax.P(None, None)
in_bias: jax.P = jax.P(None)
out_kernel: jax.P = jax.P("fsdp", None)
out_bias: jax.P = jax.P(None)
act_ids: jax.P = jax.P("fsdp")
act_seq: jax.P = jax.P("fsdp", None, None)
act_att: jax.P = jax.P("fsdp", None, None, None)
act_hidden: jax.P = jax.P("fsdp", None, None)
def __post_init__(self):
mesh = jax.make_mesh(self.mesh_shape, self.mesh_axis_names, len(self.mesh_shape) * (AxisType.Explicit,))
jax.sharding.set_mesh(mesh)
# tag: config
@jax.tree_util.register_pytree_with_keys_class
class dot_dict(dict):
__setattr__ = dict.__setitem__
__getattr__ = dict.__getitem__
def tree_flatten_with_keys(self):
keys = tuple(sorted(self))
return tuple((jax.tree_util.DictKey(k), self[k]) for k in keys), keys
@classmethod
def tree_unflatten(cls, keys, values):
return cls(zip(keys, values))
# tag: get-param-state
def init_param_state(config: Config) -> dot_dict:
root_key = jax.random.key(config.param_seed)
key = map(ft.partial(jax.random.fold_in, root_key), it.count())
zero_init = jax.nn.initializers.constant(0.0)
he_init = jax.nn.initializers.he_normal(1, 1)
dtype = config.dtype
params = dot_dict(
pos_embed=zero_init(next(key), (config.seq_length, config.embed_dim), dtype, config.pos_embed),
layers=dot_dict(),
)
params.embedding = he_init(next(key), (config.vocab_size, config.embed_dim), dtype, config.embed)
params.linear_in = dot_dict(
kernel=he_init(next(key), (1, config.embed_dim), dtype, config.in_kernel),
bias=zero_init(next(key), (config.embed_dim,), dtype, config.in_bias),
)
params.linear_out = dot_dict(
kernel=he_init(next(key), (config.embed_dim, config.vocab_size), dtype, config.out_kernel),
)
for layer in range(config.num_layers):
qkv_shape = (3, config.embed_dim, config.num_heads, config.head_dim)
out_shape = (config.num_heads, config.head_dim, config.embed_dim)
params.layers[layer] = dot_dict(
attention=dot_dict(
qkv=he_init(next(key), qkv_shape, dtype, config.att_qkv),
out=he_init(next(key), out_shape, dtype, config.att_out),
),
mlp=dot_dict(
in_kernel=he_init(next(key), (config.embed_dim, config.mlp_dim), dtype, config.mlp_in),
out_kernel=he_init(next(key), (config.mlp_dim, config.embed_dim), dtype, config.mlp_out),
),
)
return params # tag: get-param-state
# tag: model-apply
def model_apply(config: Config, params: dot_dict, tokens: jax.Array) -> jax.Array:
out = params.embedding.at[tokens].get(out_sharding=config.act_seq)
out += params.pos_embed
del tokens
for layer in range(config.num_layers):
block = params.layers[layer]
att_skip = out # 1 billion dollars in venture capital funding please
qkv = jnp.einsum("bsd,3dkh->bs3kh", out, block.attention.qkv, out_sharding=config.act_att)
out = jax.nn.dot_product_attention(qkv[:, :, 0, :], qkv[:, :, 1, :], qkv[:, :, 2, :], is_causal=True)
out = jnp.einsum("bskh,khd->bsd", out, block.attention.out, out_sharding=config.act_seq)
out += att_skip
out *= jax.lax.rsqrt(jnp.linalg.norm(out, axis=-1, keepdims=True) + 1e-6)
mlp_skip = out # machine learning circa 1986
out = jnp.einsum("bsd,dh->bsh", out, block.mlp.in_kernel, out_sharding=config.act_hidden)
out = jax.nn.gelu(out)
out = jnp.einsum("bsh,hd->bsd", out, block.mlp.out_kernel, out_sharding=config.act_seq)
out += mlp_skip
out *= jax.lax.rsqrt(jnp.linalg.norm(out, axis=-1, keepdims=True) + 1e-6)
logits = jnp.einsum("bsd,dl->bsl", out, params.linear_out.kernel, out_sharding=config.act_seq)
return logits # tag: model-apply
# tag: get-adam-state
def init_adam_state(param: jax.Array) -> dot_dict:
adam_state = dot_dict(mu=jnp.zeros_like(param), nu=jnp.zeros_like(param), count=jnp.array(0))
return adam_state # tag: get-adam-state
# tag: adam-apply
def adam_update(config: Config, param: jax.Ref, grad: jax.Array, adam_state: dot_dict):
adam_state.mu[...] = (1 - config.beta_1) * adam_state.mu[...] + config.beta_1 * grad
adam_state.nu[...] = (1 - config.beta_2) * adam_state.nu[...] + config.beta_2 * grad**2
adam_state.count[...] += 1
mu_hat = adam_state.mu[...] / (1 - config.beta_1 ** adam_state.count[...])
nu_hat = adam_state.nu[...] / (1 - config.beta_2 ** adam_state.count[...])
param[...] -= config.learning_rate * mu_hat / (jnp.sqrt(nu_hat + config.eps_root) + config.eps)
# tag: adam-apply
# tag: get-train-state
@jax.jit
def init_train_state(config: Config) -> dot_dict:
train_state = dot_dict()
train_state.params = init_param_state(config)
train_state.opt = jax.tree.map(init_adam_state, train_state.params)
return train_state # tag: get-train-state
# tag: train-step
@jax.jit
def train_step(config: Config, train_state: dot_dict, batch: dict) -> dict:
def loss_fn(params):
logits = model_apply(config, params, batch["observed_ids"])
labels = jax.nn.one_hot(batch["target_ids"], config.vocab_size)
return -(labels * jax.nn.log_softmax(logits)).mean()
params = jax.tree.map(jax.ref.get, train_state.params)
loss, grad = jax.value_and_grad(loss_fn)(params)
jax.tree.map(ft.partial(adam_update, config), train_state.params, grad, train_state.opt)
metrics = {"train_loss": loss}
return metrics # tag: train-step
# tag: record-writer
class RecordWriter:
prev_metrics = None
def __call__(self, cur_metrics: dict):
self.prev_metrics, log_metrics = cur_metrics, self.prev_metrics
if log_metrics is None:
return
print(*it.starmap("{}: {}".format, log_metrics.items()), sep="\t")
# tag: record-writer
# tag: get-dataset
def get_dataset(config: Config, single_batch=ode) -> Iterator[dict[str, np.ndarray]]:
while True:
observed_array = np.frombuffer(single_batch.encode("ascii"), dtype=np.uint8)
target_array = np.roll(observed_array, -1)
time.sleep(0.5)
yield { # repeat the sequence across the batch size to simulate multiple data points
"observed_ids": np.tile(observed_array[: config.seq_length], (config.host_batch_size, 1)),
"target_ids": np.tile(target_array[: config.seq_length], (config.host_batch_size, 1)),
}
# tag: get-dataset
# tag: get-dataset-on-device
def get_dataset_on_device(config: Config) -> Iterator[dict[str, jax.Array]]:
datset = get_dataset(config)
sharding = jax.P(config.mesh_axis_names)
return map(ft.partial(jax.make_array_from_process_local_data, sharding), datset)
# tag: get-dataset-on-device
# tag: train-loop
def train_loop(config: Config):
record_writer = RecordWriter()
train_state = init_train_state(config)
train_state = jax.tree.map(jax.ref.new_ref, train_state)
batch = iter(get_dataset_on_device(config))
for step in range(config.num_train_steps):
metrics = train_step(config, train_state, next(batch))
record_writer({"step": step} | metrics)
# tag: train-loop
if __name__ == "__main__":
jax.config.update("jax_platform_name", "cpu")
jax.config.update("jax_num_cpu_devices", 8)
train_loop(config=Config())
| {
"repo_id": "jax-ml/jax",
"file_path": "docs/the-training-cookbook.py",
"license": "Apache License 2.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/buffer_callback.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable, Sequence
import functools
from typing import Any
import numpy as np
from jax._src import core
from jax._src import dispatch
from jax._src import effects
from jax._src import ffi
from jax._src import tree_util
from jax._src import util
from jax._src.interpreters import ad
from jax._src.interpreters import batching
from jax._src.interpreters import mlir
from jax._src.lib import ffi as ffi_lib
export = util.set_module("jax.experimental.buffer_callback")
Buffer = export(ffi_lib.Buffer)
ExecutionStage = export(ffi_lib.ExecutionStage)
ExecutionContext = export(ffi_lib.ExecutionContext)
def buffer_callback(
callback: Callable[..., None],
result_shape_dtypes: object,
*,
has_side_effect: bool = False,
vmap_method: str | None = None,
input_output_aliases: dict[int, int] | None = None,
command_buffer_compatible: bool = False,
):
"""An experimental callback that operates in place on device buffers.
Only supported on CPU and GPU backends.
Note that the plan is for this to eventually be replaced by a consolidated
callback API built using JAX mutable arrays, but for now this provides a
mechanism for prototyping computational kernels using other Python libraries
including Numpy, PyTorch, Cupy, and others.
Let's start with a simple example:
>>> def py_add_one_inplace(ctx, out, x):
... np.asarray(out)[...] = np.asarray(x) + 1
...
>>> x = jnp.array(41, dtype=jnp.int32)
>>> out_type = jax.ShapeDtypeStruct(x.shape, x.dtype)
>>> add_one = buffer_callback(py_add_one_inplace, out_type)
>>> add_one(x) # doctest: +SKIP
Array(42, dtype=int32)
In this example, we're executing a numpy computation via JAX, and this could
have been implemented using :func:`jax.pure_callback`, but in this case, the
output is being populated in-place. This means that JAX doesn't need to copy
the output arrays upon returning from the callback. Note that even though the
callback function operates on mutable buffers, JAX still sees this as an
operation that consumes and produces regular immutable JAX arrays.
Unlike the other JAX callback APIs, ``buffer_callback`` requires that the
user-defined Python function have the following signature:
.. code-block:: python
def callback(ctx: ExecutionContext, out, *args) -> None:
...
where ``ctx`` is an instance of
:class:`~jax.experimental.buffer_callback.ExecutionContext`, which mainly
provides access to XLA's computation stream when running on GPU, ``out`` is a
pytree of mutable :class:`~jax.experimental.buffer_callback.Buffer` objects,
and the ``args`` arguments have the same pytree structure as the inputs, but
each leaf is :class:`~jax.experimental.buffer_callback.Buffer`. This callback
should not return any values, and it should overwrite the ``out`` buffers in
place to output values back to JAX.
It's important to note that this Python function can't really be called
except via ```buffer_callback`` itself, because it's not (yet!) possible to
construct mutable JAX buffers directly in Python.
The bespoke :class:`~jax.experimental.buffer_callback.Buffer` type is an
array-like object that supports the ``__array__`` protocol on CPU, the
``__cuda_array_interface__`` protocol on GPU, and the ``__dlpack__`` protocol
on both CPU and GPU.
Args:
callback: A Python function with the signature and behavior described above.
result_shape_dtypes: A pytree whose leaves have ``shape`` and ``dtype``
attributes, with a structure that matches the expected output of the
callback function at runtime. :class:`jax.ShapeDtypeStruct` is often used
to define leaf values.
has_side_effect: Whether the callback has side effects.
vmap_method: A string specifying how the callback transforms under
:func:`~jax.vmap` as described in the docs for :func:`~jax.pure_callback`.
input_output_aliases: a dictionary mapping the index of some inputs to
the index of the output that aliases them. These indices are in the
flattened inputs and outputs.
command_buffer_compatible: if ``True``, the callback will be traced into
the command buffer. This means that the Python code should only be
executed once, and then the operations will be replayed for every
subsequent call.
Returns:
A new callable that accepts :class:`jax.Array` inputs (and pytrees thereof),
and pytree of :class:`jax.Array` objects whose structure matches that
of ``result_shape_dtypes``.
See Also:
- :func:`jax.pure_callback`: callback designed for pure host functions.
- :func:`jax.experimental.io_callback`: callback designed for impure host
functions.
- :func:`jax.debug.callback`: callback designed for general-purpose
debugging.
- :func:`jax.debug.print`: callback designed for printing.
"""
flat_shape_dtypes, out_tree = tree_util.tree_flatten(result_shape_dtypes)
flat_result_avals = tuple(
core.ShapedArray(x.shape, x.dtype) for x in flat_shape_dtypes
)
def wrapped_callback(*args, **kwargs):
flat_args, in_tree = tree_util.tree_flatten((args, kwargs))
in_avals = [core.get_aval(x) for x in flat_args]
static_input_output_aliases: tuple[tuple[int, int], ...] = ()
if input_output_aliases is not None:
for i_idx, o_idx in sorted(input_output_aliases.items()):
i_idx, o_idx = int(i_idx), int(o_idx)
if i_idx >= len(args):
raise ValueError(
f"input_output_aliases contains the mapping '{i_idx}:{o_idx}' "
f"with input index {i_idx} outside the range [0, "
f"{len(args)}).")
if o_idx >= len(flat_result_avals):
raise ValueError(
f"input_output_aliases contains the mapping '{i_idx}:{o_idx}' "
f"with output index {o_idx} outside the range [0, "
f"{len(flat_result_avals)}).")
in_aval = in_avals[i_idx]
out_aval = flat_result_avals[o_idx]
if not ffi._check_compatible_avals(in_aval, out_aval):
raise ValueError(
f"input_output_aliases contains the mapping '{i_idx}:{o_idx}' "
f"referring to an input with abstract value {in_aval} and an "
f"output with a different abstract value {out_aval}.")
static_input_output_aliases += ((i_idx, o_idx),)
out_flat = buffer_callback_p.bind(
*flat_args,
callback=callback,
result_avals=flat_result_avals,
in_tree=in_tree,
out_tree=out_tree,
vmap_method=vmap_method,
has_side_effect=has_side_effect,
input_output_aliases=static_input_output_aliases,
command_buffer_compatible=command_buffer_compatible,
)
return tree_util.tree_unflatten(out_tree, out_flat)
return wrapped_callback
buffer_callback_p = core.Primitive("buffer_callback")
buffer_callback_p.multiple_results = True
dispatch.simple_impl(buffer_callback_p)
class BufferCallbackEffect(effects.Effect):
def __str__(self):
return "BufferCallback"
_BufferCallbackEffect = BufferCallbackEffect()
effects.lowerable_effects.add_type(BufferCallbackEffect)
effects.control_flow_allowed_effects.add_type(BufferCallbackEffect)
@buffer_callback_p.def_effectful_abstract_eval
def _buffer_callback_abstract_eval(
*args,
result_avals: tuple[core.ShapedArray, ...],
has_side_effect: bool,
**_,
):
del args
effects = {_BufferCallbackEffect} if has_side_effect else core.no_effects
return result_avals, effects
def _buffer_callback_jvp_rule(*args, **kwargs):
del args, kwargs
raise ValueError(
"Buffer callbacks do not support JVP. "
"Please use `jax.custom_jvp` to use callbacks while taking gradients.")
ad.primitive_jvps[buffer_callback_p] = _buffer_callback_jvp_rule
def _buffer_callback_transpose_rule(*args, **kwargs):
del args, kwargs
raise ValueError(
"Buffer callbacks do not support transpose. "
"Please use `jax.custom_vjp` to use callbacks while taking gradients.")
ad.primitive_transposes[buffer_callback_p] = _buffer_callback_transpose_rule
batching.primitive_batchers[buffer_callback_p] = functools.partial(
ffi.ffi_batching_rule, buffer_callback_p
)
def _buffer_callback_lowering(
ctx: mlir.LoweringRuleContext,
*args: Any,
callback,
in_tree: Any,
out_tree: Any,
has_side_effect: bool,
input_output_aliases: Sequence[tuple[int, int]],
command_buffer_compatible: bool,
**_,
):
if len(ctx.module_context.platforms) > 1:
raise NotImplementedError("multi-platform lowering for buffer_callback")
platform = ctx.module_context.platforms[0]
target_name = {
"cpu": "xla_buffer_python_cpu_callback",
"cuda": "xla_buffer_python_gpu_callback",
"rocm": "xla_buffer_python_gpu_callback",
}.get(platform)
if target_name is None:
raise ValueError(f"`buffer_callback` not supported on {platform} backend.")
if command_buffer_compatible and platform in ("cuda", "rocm"):
target_name += "_cmd_buffer"
def wrapped_callback(exec_ctx, *args: Any):
args_in, args_out = util.split_list(args, [in_tree.num_leaves])
py_args_in, py_kwargs_in = tree_util.tree_unflatten(in_tree, args_in)
py_args_out = tree_util.tree_unflatten(out_tree, args_out)
if callback(exec_ctx, py_args_out, *py_args_in, **py_kwargs_in) is not None:
raise ValueError("buffer_callback callback must not return any values.")
return ()
ctx.module_context.add_host_callback(wrapped_callback)
index = np.uint64(len(ctx.module_context.host_callbacks) - 1)
rule = ffi.ffi_lowering(
target_name,
has_side_effect=has_side_effect,
operand_output_aliases=dict(input_output_aliases),
)
return rule(ctx, *args, index=index)
mlir.register_lowering(buffer_callback_p, _buffer_callback_lowering)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/buffer_callback.py",
"license": "Apache License 2.0",
"lines": 224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/frozen_dict.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, TypeVar
from collections.abc import Iterator, Mapping
K = TypeVar("K")
V = TypeVar("V")
class FrozenDict(Mapping[K, V]):
def __init__(self, d: Mapping[K, V]):
self._d = dict(d.items())
def __repr__(self) -> str:
return f"FrozenDict({self._d!r})"
def __str__(self) -> str:
return f"FrozenDict({self._d})"
def __getitem__(self, key: K) -> V:
return self._d[key]
def __hash__(self) -> int:
# This assumes that the values are hashable.
return hash(frozenset(self._d.items()))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, FrozenDict):
return False
return self._d == other._d
def __iter__(self) -> Iterator[K]:
return iter(self._d)
def __len__(self) -> int:
return len(self._d)
def get(self, key: K) -> V | None: # type: ignore
return self._d.get(key, None)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/frozen_dict.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/hashable_array.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the
import numpy as np
class HashableArray:
__slots__ = ["val"]
val: np.ndarray
def __init__(self, val):
self.val = np.array(val, copy=True)
self.val.setflags(write=False)
def __repr__(self):
return f"HashableArray({self.val!r})"
def __str__(self):
return f"HashableArray({self.val})"
def __hash__(self):
return hash((self.val.shape, self.val.dtype, self.val.tobytes()))
def __eq__(self, other):
return isinstance(other, HashableArray) and np.array_equal(
self.val, other.val
)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/hashable_array.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/hijax.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass
from functools import partial
import inspect
import itertools as it
from typing import Any, Hashable, Callable
from jax._src import api
from jax._src import config
from jax._src import core
from jax._src import dtypes
from jax._src import effects
from jax._src.api_util import resolve_kwargs, infer_argnums_and_argnames
from jax._src.core import typeof
from jax._src.interpreters import ad
from jax._src.interpreters import batching
from jax._src.interpreters import partial_eval as pe
from jax._src.interpreters import remat
from jax._src.custom_derivatives import CustomVJPPrimal, _temporary_dtype_exception
from jax._src.errors import UnexpectedTracerError
from jax._src.state.types import AbstractRef
from jax._src import ad_util
from jax._src.util import safe_zip, safe_map, split_list, unzip2
from jax._src.tree_util import (
tree_map, tree_flatten, tree_unflatten, tree_leaves, tree_leaves_checked,
broadcast_prefix, register_static, tree_structure, tree_map_with_path,
keystr)
map, unsafe_map = safe_map, map
zip, unsafe_zip = safe_zip, zip
PyTreeOfAvals = Any
PyTreeDef = Any
LoVal = Any
HiVal = Any
# Hijax extension API
Ty = core.AbstractValue
LoType = core.AbstractValue
QDD = core.QuasiDynamicData
ShapedArray = core.ShapedArray
class HiPrimitive(core.Primitive):
def __init__(self, name):
self.name = name
ad.primitive_jvps[self] = self.jvp
ad.primitive_transposes[self] = self.transpose
def is_high(self, *avals, **params) -> bool:
return True
def is_effectful(self, params) -> bool: # type: ignore
return False # default immutable
# type checking and forward type propagation
def abstract_eval(self, *arg_avals, **params):
assert False, "must override"
# lowering implements the primitive in terms of lojax inputs/outputs/ops
def to_lojax(self, *lotypes_wrapped_in_hitypes, **params): # pyrefly: ignore[bad-override]
assert False, f"must override for {self}"
# autodiff interface
def jvp(self, primals, tangents, **params):
assert False, "must override"
# transposition is only required if the primitive is linear in some inputs
def transpose(self, *args, **params):
assert False, "must override"
class HiType(core.AbstractValue):
is_high = True
has_qdd = False # immutable
# type equality
def __hash__(self): assert False, "must override"
def __eq__(self, other): assert False, "must override"
# lowering from hijax type to lojax types
def lo_ty(self) -> list[core.AbstractValue]:
assert False, "must override"
# define lowering from hijax value to lojax values and back (like pytrees)
def lower_val(self, hi_val: HiVal) -> list[LoVal]: # TODO(mattjj); not lovals
assert False, "must override"
def raise_val(self, *lo_vals: LoVal) -> HiVal:
assert False, "must override"
# autodiff interface
def to_tangent_aval(self) -> HiType:
assert False, "must override"
def to_cotangent_aval(self) -> HiType:
return self.to_tangent_aval()
# the next two are required if this type is itself a tangent type
def vspace_zero(self) -> HiVal:
assert False, "must override"
def vspace_add(self, x: HiVal, y: HiVal) -> HiVal:
assert False, "must override"
# vmap interface (also needed for scan)
def dec_rank(self, size: int | None, spec: MappingSpec) -> HiType:
assert False, "must override"
def inc_rank(self, size: int | None, spec: MappingSpec) -> HiType:
assert False, "must override"
# scan interface
def leading_axis_spec(self) -> MappingSpec:
assert False, "must override"
# shard_map interface
def shard(self, mesh, manual_axes: frozenset, check_vma: bool, spec: HipSpec
) -> HiType:
assert False, "must override"
def unshard(self, mesh, check_vma: bool, spec: HipSpec) -> HiType:
assert False, "must override"
class MutableHiType(core.AbstractValue):
is_high = True
has_qdd = True # mutable and potentially type-changing
type_state = core.aval_method(core.cur_qdd)
# type equality
def __hash__(self): assert False, "must override"
def __eq__(self, other): assert False, "must override"
# define lowering from (mutable) hijax type to (immutable) lojax types
def lo_ty_qdd(self, state: QDD, /) -> list[core.AbstractValue]: # pytype: disable=signature-mismatch # pyrefly: ignore[bad-override]
assert False, "must override"
def lo_ty(self):
assert False, "mutable hitypes should use lo_ty_qdd instead"
# define lowering from hijax value to lojax values and back, depending on qdd
def new_from_loval(self, state: QDD, *vals: LoVal) -> HiVal:
assert False, "must override"
def read_loval(self, state: QDD, val: HiVal) -> list[LoVal]:
assert False, "must override"
# define how to mutate/set the mutable hijax value given immutable lojax vals
def update_from_loval(self, state: QDD, val: HiVal, *lo_vals: LoVal) -> None:
assert False, "must override"
# autodiff interface
def to_tangent_aval(self) -> HiType:
assert False, "must override"
# Subclasses should override if the cotangent type is a function of primal
# type. For example, CT unreduced = reduced and vice-versa.
def to_cotangent_aval(self) -> HiType:
return self.to_tangent_aval()
def register_hitype(val_cls, typeof_fn) -> None:
core.pytype_aval_mappings[val_cls] = typeof_fn
dtypes.canonicalize_value_handlers[val_cls] = lambda x: x
def hijax_method(f):
return core.aval_method(f)
# Boxes
## Box API
def new_box():
(), treedef = tree_flatten(None)
return new_box_p.bind(treedef=treedef)
def box_get(box):
tys = core.cur_qdd(box)
leaf_vals = box_get_p.bind(box, avals=tuple(tys.leaf_avals))
return tree_unflatten(tys.treedef, leaf_vals)
def box_set(box, val):
leaves, treedef = tree_flatten(val)
box_set_p.bind(box, *leaves, treedef=treedef)
## Box implementation
@dataclass(frozen=True)
class BoxTypeState(QDD):
leaf_avals: tuple[core.AbstractValue, ...]
treedef: PyTreeDef
def to_tangent_qdd(self):
leaf_avals = tuple(a.to_tangent_aval() for a in self.leaf_avals)
return BoxTypeState(leaf_avals, self.treedef)
def normalize(self):
leaf_types = tuple(a.normalize() for a in self.leaf_avals)
return BoxTypeState(leaf_types, self.treedef)
class BoxTy(MutableHiType):
has_qdd = True
# forwarded to value
get = core.aval_method(box_get)
set = core.aval_method(box_set)
# aval interface: hashability and str_short
def __hash__(self): return hash(BoxTy)
def __eq__(self, other): return isinstance(other, BoxTy)
def str_short(self, short_dtypes=False, **_) -> str: # type: ignore
return 'BoxTy'
# mutable interface
def lo_ty_qdd(self, box_state):
return [lo_ty for t in box_state.leaf_avals for lo_ty in t.lo_ty()]
def new_from_loval(self, box_state: BoxTypeState, *lo_vals) -> Box: # type: ignore
lo_vals_ = iter(lo_vals)
hi_vals = [hi_ty.raise_val(*it.islice(lo_vals_, len(hi_ty.lo_ty()))) # type: ignore
for hi_ty in box_state.leaf_avals]
assert next(lo_vals_, None) is None
return Box._new(tree_unflatten(box_state.treedef, hi_vals)) # will be mutated
def read_loval(self, box_state: BoxTypeState, box) -> list: # type: ignore
leaf_vals, treedef = tree_flatten(box_get(box))
assert treedef == box_state.treedef
return [lo_val for hi_ty, hi_val in zip(box_state.leaf_avals, leaf_vals)
for lo_val in hi_ty.lower_val(hi_val)] # type: ignore
def update_from_loval(self, box_state: BoxTypeState, box, *lo_vals) -> None: # type: ignore
lo_vals_ = iter(lo_vals)
hi_vals = [hi_ty.raise_val(*it.islice(lo_vals_, len(hi_ty.lo_ty()))) # type: ignore
for hi_ty in box_state.leaf_avals]
assert next(lo_vals_, None) is None
box_set(box, tree_unflatten(box_state.treedef, hi_vals))
def to_tangent_aval(self):
return BoxTy()
# Override isinstance checks under tracing
class _BoxMeta(type):
def __instancecheck__(self, instance):
return (super().__instancecheck__(instance) or
isinstance(instance, core.Tracer) and
isinstance(core.typeof(instance), BoxTy))
class Box(metaclass=_BoxMeta): # noqa: F811
_val = None # always clobbered by __new__, but pytype likes this
# We want `Box(x)` to bind a primitive, so we override __new__ and provide a
# raw `_new` method below.
def __new__(cls, init_val=None):
(), treedef = tree_flatten(None)
box = new_box_p.bind(treedef=treedef)
box.set(init_val)
return box
@classmethod
def _new(cls, init_val):
new = super().__new__(cls)
new._val = init_val
return new
def get(self):
return box_get(self)
def set(self, val):
box_set(self, val)
def cur_qdd(self):
return self.type_state()
@property
def ty(self):
return BoxTy()
def type_state(self):
leaves, treedef = tree_flatten(self._val)
leaf_avals = tuple(map(core.typeof, leaves))
return BoxTypeState(leaf_avals, treedef)
register_hitype(Box, lambda b: b.ty)
class BoxEffect(effects.Effect): ...
box_effect = BoxEffect()
effects.control_flow_allowed_effects.add_type(BoxEffect)
effects.custom_derivatives_allowed_effects.add_type(BoxEffect)
class NewBox(HiPrimitive):
def is_high(self, *, treedef) -> bool: return True # type: ignore
def abstract_eval(self, *, treedef): # pyrefly: ignore[bad-override]
leaves, treedef = tree_flatten(None)
qdd = BoxTypeState(tuple(leaves), treedef)
return core.AvalQDD(BoxTy(), qdd), {box_effect}
def to_lojax(_, *, treedef): # pyrefly: ignore[bad-override]
return Box._new(None)
def jvp(_, primals, tangents, *, treedef): # pyrefly: ignore[bad-override]
assert False # TODO
def transpose(_, *args, treedef): # pyrefly: ignore[bad-override]
assert False # TODO
new_box_p = NewBox('new_box')
class BoxSet(HiPrimitive):
multiple_results = True
def is_high(self, *leaf_avals, treedef) -> bool: return True # type: ignore
def abstract_eval(self, box_ty, *leaf_avals, treedef): # pyrefly: ignore[bad-override]
box_ty.mutable_qdd.update(BoxTypeState(leaf_avals, treedef))
return [], {box_effect} # TODO better typechecking...
def to_lojax(_, box, *leaves, treedef): # pyrefly: ignore[bad-override]
box._val = tree_unflatten(treedef, leaves)
return []
def jvp(_, primals, tangents, *, treedef): # pyrefly: ignore[bad-override]
box, *vals = primals
box_dot, *val_dots = tangents
if type(box_dot) is ad_util.Zero:
raise Exception("can't differentiate Box.set operation, "
"did you forget jax.lax.stop_gradient?")
box_set_p.bind(box, *vals, treedef=treedef)
box_set_p.bind(box_dot, *val_dots, treedef=treedef)
return [], []
def transpose(_, *args, treedef): # pyrefly: ignore[bad-override]
assert False # TODO
box_set_p = BoxSet('box_set')
class BoxGet(HiPrimitive):
multiple_results = True
def abstract_eval(self, box_ty, *, avals): # pyrefly: ignore[bad-override]
return avals, {box_effect}
def to_lojax(_, box, *, avals): # pyrefly: ignore[bad-override]
return tree_leaves(box._val)
def jvp(_, primals, tangents, *, avals): # pyrefly: ignore[bad-override]
(box,), (box_dot,) = primals, tangents
return (
box_get_p.bind(box, avals=avals),
box_get_p.bind(box_dot, avals=tuple(a.to_tangent_aval() for a in avals))
)
def transpose(_, *args): # pyrefly: ignore[bad-override]
assert False # TODO
box_get_p = BoxGet('box_get')
# === new-style hijax primitive implementation ===
class VJPHiPrimitive:
in_avals: tuple[PyTreeOfAvals, ...]
out_aval: PyTreeOfAvals
params: dict[str, Hashable]
def __init__(self):
if not hasattr(self, 'in_avals'):
raise AttributeError("subclass __init__ should set `self.in_avals`")
if not hasattr(self, 'out_aval'):
raise AttributeError("subclass __init__ should set `self.out_aval`")
if not hasattr(self, 'params'):
raise AttributeError("subclass __init__ should set `self.params`")
if (type(self).vjp_bwd is not VJPHiPrimitive.vjp_bwd and
type(self).vjp_bwd_retval is not VJPHiPrimitive.vjp_bwd_retval):
raise AttributeError(f"subclass {type(self)} should not override both "
"`vjp_bwd` and `vjp_bwd_retval`")
self.in_avals_flat, self.in_tree = tree_flatten(self.in_avals)
self.out_avals_flat, self.out_tree = tree_flatten(self.out_aval)
self.__dict__.update(self.params)
# Operation implementation in terms of lojax primitives
def expand(self, *args):
raise NotImplementedError(f"subclass {type(self)} must implement `expand`")
# reverse-mode AD interface
def vjp_fwd(self, nzs_in, /, *args):
raise NotImplementedError(f"for grad support, subclass {type(self)} must "
"implement `vjp_fwd`")
def vjp_bwd(self, res, outgrad, *arg_accums):
args_grad = self.vjp_bwd_retval(res, outgrad)
tree_map(lambda acc, leaf_grad: acc.accum(leaf_grad), arg_accums, args_grad)
def vjp_bwd_retval(self, res, outgrad, /):
# Classic API: returns values instead of using accumulators
raise NotImplementedError(f"for grad support, subclass {type(self)} must "
"implement `vjp_bwd` or `vjp_bwd_retval`")
# optional forward-mode AD interfaces
def jvp(self, primals, tangents):
raise NotImplementedError(f"for jvp support, subclass {type(self)} must "
"implement `jvp`")
def lin(self, nzs_in, *primals):
raise NotImplementedError(f"for linearize support, subclass {type(self)} "
"must implement `lin` and `linearized`")
def linearized(self, residuals, *tangents):
raise NotImplementedError(f"for linearize support, subclass {type(self)} "
"must implement `lin` and `linearized`")
# vmap interface
def batch(self, axis_data, args, dims):
out_dim = self.batch_dim_rule(axis_data, dims)
return VmapOf(self, axis_data, dims, out_dim)(*args), out_dim
def batch_dim_rule(self, axis_data, dims, /):
raise NotImplementedError(f"for vmap support, subclass {type(self)} must "
"implement `batch` or `batch_dim_rule`")
# optional dce control
def dce(self, used_outs):
used_outs_flat = tree_leaves_checked(self.out_tree, used_outs)
if not any(used_outs_flat):
return False, False, None
else:
return True, True, self
# optional remat control
def remat(self, _policy, *args):
return self(*args), self # full remat by default
def __call__(self, *args):
args_flat = tree_leaves_checked(self.in_tree, args)
ans_flat = call_hi_primitive_p.bind(*args_flat, _prim=self)
return tree_unflatten(self.out_tree, ans_flat)
def check(self, *arg_tys):
return # subclass can optionally override this to add checking logic
def staging(self, trace, source_info, *args):
args_flat = tree_leaves_checked(self.in_tree, args)
ans_flat = trace.default_process_primitive(
call_hi_primitive_p, args_flat, dict(_prim=self), source_info)
return tree_unflatten(self.out_tree, ans_flat)
def __repr__(self):
return f"{self.__class__.__name__}[{self.params}]"
def __hash__(self):
return hash((self.__class__.__name__, tuple(self.params.items())))
def __eq__(self, other):
return type(self) is type(other) and self.params == other.params
class VmapOf(VJPHiPrimitive):
prim: core.Primitive
axis_data: batching.AxisData
in_dims: Any
out_dim: Any
def __init__(self, prim, axis_data, in_dims, out_dim):
unmap = lambda a, d: core.unmapped_aval(axis_data.size, d, a,
axis_data.explicit_mesh_axis)
self.in_avals = tree_map(unmap, prim.in_avals, in_dims)
self.out_aval = tree_map(unmap, prim.out_aval, out_dim)
self.params = dict(prim=prim, axis_data=axis_data, in_dims=in_dims,
out_dim=out_dim)
super().__init__()
@property
def _vmap_params(self):
return dict(axis_size=self.axis_data.size, axis_name=self.axis_data.name, # type: ignore
spmd_axis_name=self.axis_data.spmd_name or self.axis_data.explicit_mesh_axis) # type: ignore
def expand(self, *args):
return api.vmap(self.prim.expand, in_axes=self.in_dims, out_axes=self.out_dim, # type: ignore
**self._vmap_params)(*args)
def jvp(self, primals, tangents):
# TODO probably gonna get non-pytree-prefix errors because of sym zeros...
return api.vmap(self.prim.jvp, in_axes=(self.in_dims, self.in_dims), # type: ignore
out_axes=(self.out_dim, self.out_dim), # type: ignore
**self._vmap_params)(primals, tangents) # type: ignore
def vjp_fwd(self, in_nzs, *args):
store = lambda: None
def fwd(*args):
primal_out, res, *maybe_out_nzs = self.prim.vjp_fwd(in_nzs, *args) # type: ignore
store.out_nzs = maybe_out_nzs # pyrefly: ignore[missing-attribute]
return primal_out, res
(primal_out, res), (_, res_axes) = api.vmap(
fwd, in_axes=self.in_dims, out_axes=(self.out_dim, batching.infer), # type: ignore
**self._vmap_params)(*args)
return primal_out, (res, Static(res_axes)), *store.out_nzs # type: ignore
def vjp_bwd_retval(self, res_, g):
# TODO probably gonna get non-pytree-prefix errors because of sym zeros...
res, res_axes = res_[0], res_[1].val
in_dims = tree_map(lambda x: batching.sum_axis if x is None else x, self.in_dims, # type: ignore
is_leaf=lambda x: x is None)
g = tree_map(partial(map_zero, self.axis_data), self.out_dim, g, is_leaf=lambda x: x is None) # type: ignore
out = api.vmap(self.prim.vjp_bwd_retval, in_axes=(res_axes, self.out_dim), # type: ignore
out_axes=in_dims, **self._vmap_params, sum_match=True)(res, g)
return tree_map(partial(unmap_zero, self.axis_data), self.in_dims, out, is_leaf=lambda x: x is None) # type: ignore
def batch_dim_rule(self, axis_data, in_dims):
fix = lambda d, d_: d if (d is None or d_ is None) else d - (d_ < d) # type: ignore
in_dims_ = tree_map(fix, in_dims, self.in_dims, is_leaf=lambda x: x is None) # type: ignore
out_dim = self.prim.batch_dim_rule(axis_data, in_dims_) # type: ignore
return tree_map(lambda d, d_: d + (d_ < d), out_dim, self.out_dim) # type: ignore
def map_zero(axis_data, d, ct):
if isinstance(ct, ad_util.Zero):
return ad_util.Zero(core.mapped_aval(axis_data.size, d, ct.aval))
return ct
def unmap_zero(axis_data, d, ct):
if isinstance(ct, ad_util.Zero):
return ad_util.Zero(core.unmapped_aval(axis_data.size, d, ct.aval,
axis_data.explicit_mesh_axis))
return ct
call_hi_primitive_p = core.Primitive("call_hi_primitive")
call_hi_primitive_p.multiple_results = True
call_hi_primitive_p.is_high = lambda *args, _prim: True # type: ignore
@call_hi_primitive_p.def_abstract_eval
def _call_hi_primitive_abstract_eval(*_args, _prim):
return _prim.out_avals_flat
def _call_hi_primitive_staging(trace, source_info, *args_flat, _prim):
trace.frame.is_high = True
args = tree_unflatten(_prim.in_tree, args_flat)
ans = _prim.staging(trace, source_info, *args)
return tree_leaves_checked(_prim.out_tree, ans)
pe.custom_staging_rules[call_hi_primitive_p] = _call_hi_primitive_staging
def _call_hi_primitive_to_lojax(*args_flat, _prim):
args = tree_unflatten(_prim.in_tree, args_flat)
ans = _prim.expand(*args)
return tree_leaves_checked(_prim.out_tree, ans)
call_hi_primitive_p.to_lojax = _call_hi_primitive_to_lojax
def _call_hi_primitive_batcher(axis_data, args_flat, dims_flat, _prim):
args = tree_unflatten(_prim.in_tree, args_flat)
dims = tree_unflatten(_prim.in_tree, dims_flat)
ans, dims = _prim.batch(axis_data, args, dims)
ans_flat = tree_leaves_checked(_prim.out_tree, ans)
dims_flat = _prim.out_tree.flatten_up_to(dims)
return ans_flat, dims_flat
batching.fancy_primitive_batchers[call_hi_primitive_p] = _call_hi_primitive_batcher
def _call_hi_primitive_linearize(is_vjp, nz_in_flat, *args_flat, _prim):
args = tree_unflatten(_prim.in_tree, args_flat)
nzs_in = tree_unflatten(_prim.in_tree, nz_in_flat)
if is_vjp:
ans, residuals, *maybe_nzs_out = _prim.vjp_fwd(nzs_in, *args)
linearized = partial(fake_linear_op, _prim, nz_in_flat)
else:
ans, residuals, *maybe_nzs_out = _prim.lin(nzs_in, *args)
linearized = partial(flatten_user_linearized, _prim)
ans_flat = tree_leaves_checked(_prim.out_tree, ans)
nzs_out = maybe_nzs_out[0] if maybe_nzs_out else True
nzs_out_flat = broadcast_prefix(nzs_out, ans)
return ans_flat, nzs_out_flat, residuals, linearized
ad.primitive_linearizations[call_hi_primitive_p] = _call_hi_primitive_linearize
def fake_linear_op(prim, nz_in_flat, rs, *tangents):
residuals_flat, residuals_tree = tree_flatten(rs)
assert nz_in_flat == [not isinstance(t, ad_util.Zero) for t in tangents]
nz_tangents = tree_leaves(tangents)
return call_hi_primitive_linearized_p.bind(
*residuals_flat, *nz_tangents, residuals_tree=residuals_tree, _prim=prim,
nz_in_flat=tuple(nz_in_flat))
def flatten_user_linearized(prim, residuals, *tangents_flat):
tangents = tree_unflatten(prim.in_tree, tangents_flat)
tangents_out = prim.linearized(residuals, *tangents)
tangents_out_flat = tree_leaves_checked(prim.out_tree, tangents_out)
return tangents_out_flat
call_hi_primitive_linearized_p = core.Primitive("call_hi_primitive_linearized")
call_hi_primitive_linearized_p.multiple_results = True
call_hi_primitive_linearized_p.is_high = lambda *args, _prim, **_: True # type: ignore
@call_hi_primitive_linearized_p.def_abstract_eval
def _call_hi_primitive_linearized_abstract_eval(*_args, _prim, residuals_tree, nz_in_flat):
return [t.to_tangent_aval() for t in _prim.out_avals_flat] # TODO(dougalm): handle nonzeros
def _call_hi_primitive_linearized_transpose(cts_flat, *args, _prim,
residuals_tree, nz_in_flat):
residuals_flat, accums_flat = split_list(args, [residuals_tree.num_leaves])
residuals = tree_unflatten(residuals_tree, residuals_flat)
accums_flat_ = iter(accums_flat)
accums_flat = [next(accums_flat_) if nz else ad.NullAccum() for nz in nz_in_flat]
assert next(accums_flat_, None) is None
accums = tree_unflatten(_prim.in_tree, accums_flat)
cts = tree_unflatten(_prim.out_tree, cts_flat)
none = _prim.vjp_bwd(residuals, cts, *accums)
assert none is None
ad.fancy_transposes[call_hi_primitive_linearized_p] = _call_hi_primitive_linearized_transpose
def _call_hi_primitive_jvp(primals, tangents, *, _prim):
primals = tree_unflatten(_prim.in_tree, primals)
tangents = tree_unflatten(_prim.in_tree, tangents)
out_primals, out_tangents = _prim.jvp(primals, tangents)
out_primals_flat = tree_leaves_checked(_prim.out_tree, out_primals)
out_tangents_flat = _prim.out_tree.flatten_up_to(out_tangents)
return out_primals_flat, out_tangents_flat
ad.primitive_jvps[call_hi_primitive_p] = _call_hi_primitive_jvp
def _call_hi_primitive_dce(used_outs_flat, eqn):
_prim = eqn.params['_prim']
used_out = tree_unflatten(_prim.out_tree, used_outs_flat)
used_ins, produced_outs, new_prim = _prim.dce(used_out)
if new_prim is None:
return [False] * len(eqn.invars), None
used_ins_flat = broadcast_prefix(used_ins, _prim.in_avals)
produced_outs_flat = broadcast_prefix(produced_outs, _prim.out_aval)
new_invars = [x for x, u in zip(eqn.invars, used_ins_flat) if u]
new_outvars = [v for v, u in zip(eqn.outvars, produced_outs_flat) if u]
new_eqn = eqn.replace(invars=new_invars, outvars=new_outvars,
params=dict(_prim=new_prim))
return used_ins_flat, new_eqn
pe.dce_rules[call_hi_primitive_p] = _call_hi_primitive_dce
call_hi_primitive_linearized_p.to_lojax = ad.raise_custom_vjp_error_on_jvp
batching.fancy_primitive_batchers[call_hi_primitive_linearized_p] = ad.raise_custom_vjp_error_on_jvp
def _call_hi_primitive_remat(policy, *args_flat, _prim):
args = tree_unflatten(_prim.in_tree, args_flat)
out, rem_ = _prim.remat(policy, *args)
def rem(*args_flat):
args = tree_unflatten(_prim.in_tree, args_flat)
out = rem_(*args)
return tree_leaves_checked(_prim.out_tree, out)
return tree_leaves_checked(_prim.out_tree, out), rem
remat.rules[call_hi_primitive_p] = _call_hi_primitive_remat
class CustomVJPTraced(VJPHiPrimitive):
traced: Any
fwd: Any
bwd: Any
symbolic_zeros: Any
static_argnums: Any
opt_remat: bool
def __init__(self, traced, fwd, bwd, in_avals, sym_zeros, static_argnums, opt_remat):
self.in_avals = in_avals
self.out_aval = traced.out_avals
self.params = dict(traced=traced, fwd=fwd, bwd=bwd, symbolic_zeros=sym_zeros,
static_argnums=static_argnums, opt_remat=opt_remat)
super().__init__()
def expand(self, *args):
args = [x for x in args if not isinstance(x, Static)]
return self.traced(*args) # type: ignore
def vjp_fwd(self, in_nzs, *args):
in_nzs = tuple(x.val if isinstance(x, Static) else x for x in in_nzs)
args = tuple(x.val if isinstance(x, Static) else x for x in args)
if self.symbolic_zeros: # type: ignore
args = tree_map(CustomVJPPrimal, args, in_nzs)
out, res = self.fwd(*args) # type: ignore
if ((tree := tree_structure(out)) != self.out_tree):
raise TypeError(_vjp_primal_fwd_tree_mismatch_err(self, tree))
tree_map_with_path(_vjp_fwd_aval_mismatch_err, self.out_aval, out)
if self.symbolic_zeros: # type: ignore
out_pairs_flat = tree_leaves_checked(self.out_tree, out)
out_flat, out_nzs_flat = unzip2(
(x.value, x.perturbed) if isinstance(x, CustomVJPPrimal) else
(x, True) for x in out_pairs_flat)
out_nzs = tree_unflatten(self.out_tree, out_nzs_flat)
out = tree_unflatten(self.out_tree, out_flat)
return out, res, out_nzs
else:
return out, res
def vjp_bwd_retval(self, res, out_ct):
static_args = tuple(x.val for x in self.in_avals if isinstance(x, Static))
in_avals_ = tuple(x for x in self.in_avals if not isinstance(x, Static))
leaf = lambda x: isinstance(x, ad_util.Zero)
if self.symbolic_zeros: # type: ignore
out_ct = tree_map(ad_util.replace_internal_symbolic_zeros, out_ct, is_leaf=leaf)
else:
out_ct = tree_map(ad_util.instantiate, out_ct, is_leaf=leaf)
in_cts = self.bwd(*static_args, res, out_ct) # type: ignore
if isinstance(in_cts, list):
in_cts = tuple(in_cts)
if not isinstance(in_cts, tuple):
raise TypeError(f"Custom VJP bwd rule {self.bwd} must produce a tuple " # type: ignore
f"but got {type(in_cts)}.") # type: ignore
if len(in_cts) != len(self.in_tree.children()) - len(self.static_argnums): # type: ignore
raise ValueError(f"Custom VJP bwd rule {self.bwd} must produce a tuple " # type: ignore
"of length equal to the primal args tuple, but got "
f"length {len(in_cts)}") # type: ignore
in_cts = broadcast_prefix(in_cts, in_avals_, is_leaf=lambda x: x is None)
in_cts = tree_unflatten(self.in_tree, map(_replace_none, self.in_avals_flat, in_cts))
tree_map_with_path(_vjp_bwd_aval_mismatch_err, self.in_avals, in_cts)
if self.symbolic_zeros: # type: ignore
in_cts = tree_map(ad_util.replace_rule_output_symbolic_zeros, in_cts)
return in_cts
def jvp(self, primals, tangents):
if self.symbolic_zeros: raise NotImplementedError # type: ignore
zero = lambda x: isinstance(x, ad_util.Zero)
tangents = tree_map(ad_util.instantiate, tangents, is_leaf=zero)
if self.opt_remat: # type: ignore
fwd_traced = api.jit(partial(self.vjp_fwd, (True,) * len(primals))).trace(*primals)
primals_out, residuals = OptRemat(self, fwd_traced)(*primals) # type: ignore
else:
primals_out, residuals, *_ = self.vjp_fwd((True,) * len(primals), *primals)
tangents_out_flat = fake_linear_op(self, [True] * len(tangents), residuals, *tangents)
tangents_out = tree_unflatten(self.out_tree, tangents_out_flat)
return primals_out, tangents_out
def batch_dim_rule(self, axis_data, in_dims):
in_dims_flat = self.in_tree.flatten_up_to(in_dims)
_, out_dims = batching.batch_jaxpr2(self.traced.jaxpr, axis_data, tuple(in_dims_flat)) # type: ignore
return tree_unflatten(self.out_tree, out_dims)
def _vjp_primal_fwd_tree_mismatch_err(self, tree):
return (f"Custom VJP fwd rule {self.fwd.__name__} for function {self.traced.fun_name} " # type: ignore
"must produce a pair (list or tuple of length two) where the first "
"element represents the primal output "
"(equal to the output of the custom_vjp-decorated function "
f"{self.traced.fun_name}) and the " # type: ignore
"second element represents residuals (i.e. values stored from the "
"forward pass for use on the backward pass), but "
f"instead the fwd rule output's first element had container/pytree "
"structure:\n"
f""" {str(tree ).replace("'", "")}\n""" # type: ignore
f"while the custom_vjp-decorated function {self.traced.fun_name} had output " # type: ignore
"container/pytree structure:\n"
f""" {str(self.out_tree).replace("'", "")}.""") # type: ignore
def _vjp_fwd_aval_mismatch_err(path, primal_aval, fwd_val):
if not core.typematch(ty := typeof(fwd_val), primal_aval):
raise TypeError(f"at {keystr(path)}, got fwd output type {ty.str_short()} "
f"which doesn't match primal output type {primal_aval.str_short()}")
def _vjp_bwd_aval_mismatch_err(path, primal_aval, ct):
if config.disable_bwd_checks.value:
return
if isinstance(ct, ad_util.Zero):
return
if isinstance(primal_aval, AbstractRef):
primal_aval = primal_aval.inner_aval
expected = primal_aval.to_cotangent_aval()
ct_aval = ct.aval if isinstance(ct, ad_util.SymbolicZero) else typeof(ct)
if (not core.typematch(expected, ct_aval) and
not _temporary_dtype_exception(expected, ct_aval) and
getattr(expected, 'dtype', None) is not dtypes.float0):
result = f"at output{keystr(path)} " if path else ""
raise ValueError(
f"{result}the bwd rule produced an output of type {ct_aval.str_short()}"
f" which doesn't match expected type {expected.str_short()}")
def _replace_none(primal_in_aval, maybe_ct):
if maybe_ct is None:
return ad_util.Zero(primal_in_aval.to_cotangent_aval())
else:
return maybe_ct
class custom_vjp3:
fwd: Callable | None = None
bwd: Callable | None = None
def __init__(self, f, nondiff_argnums=(), nondiff_argnames=()):
self.f = f
self.static_argnums = _set_up_nondiff(f, nondiff_argnums, nondiff_argnames)
def defvjp(self, fwd, bwd, *, symbolic_zeros=False, optimize_remat=False):
self.fwd = fwd
self.bwd = bwd
self.symz = symbolic_zeros
self.opt_remat = optimize_remat
return self
def __call__(self, *args, **kwargs):
if not self.fwd or not self.bwd:
msg = f"No VJP defined for custom_vjp function {self.f.__name__} using defvjp."
raise AttributeError(msg)
args = resolve_kwargs(self.f, args, kwargs)
if any(isinstance(args[i], core.Tracer) for i in self.static_argnums):
raise UnexpectedTracerError("custom_vjp inputs marked with nondiff_argnums "
"must be static, not Tracers")
traced = api.jit(self.f, static_argnums=(*self.static_argnums,)).trace(*args)
if any(isinstance(x, core.Tracer) for x in traced._consts):
t = next(x for x in traced._consts if isinstance(x, core.Tracer))
raise UnexpectedTracerError(
f"custom_vjp-decorated function {self.f} closed over a {type(t).__name__} "
f"of type {t.aval.str_short()}, but custom_vjp functions can't close "
f"over Tracers. Rewrite {self.f} to take it as an explicit input.")
raise Exception # TODO(mattjj):error tracer type, value type, primal name
args = tuple(Static(x) if i in self.static_argnums else x for i, x in enumerate(args))
in_avals = tree_map(typeof, args)
prim = CustomVJPTraced(traced, self.fwd, self.bwd, in_avals, self.symz, # type: ignore
self.static_argnums, self.opt_remat) # type: ignore
return prim(*args)
class OptRemat(VJPHiPrimitive):
orig: CustomVJPTraced
traced_fwd: Any
def __init__(self, orig, traced_fwd):
self.in_avals = orig.in_avals
self.out_aval = traced_fwd.out_avals
self.params = dict(orig=orig, traced_fwd=traced_fwd)
super().__init__()
def expand(self, *primals):
return self.traced_fwd(*primals)
def dce(self, used_outs):
used_primals, used_res = used_outs
if any(tree_leaves(used_res)):
return True, (True, True), self # if any res used, no dce at all
elif any(tree_leaves(used_primals)):
return True, (True, False), self.orig # if only primals used, undo AD
else:
return False, (False, False), None
# TODO(mattjj): jvp and transpose? does anyone rely on them?
def _set_up_nondiff(f, argnums_, argnames) -> frozenset[int]:
argnums = set(argnums_)
if argnames:
sig = inspect.signature(f) # needed for static_argnames
argnums |= set(infer_argnums_and_argnames(sig, None, argnames)[0])
return frozenset(argnums)
@register_static
@dataclass(frozen=True)
class Static:
val: Any
class MappingSpec: pass
class HipSpec:
def to_lo(self): assert False, "must override"
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/hijax.py",
"license": "Apache License 2.0",
"lines": 699,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/literals.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Literal
from jax._src.lib import _jax
import numpy as np
# TypedInt, TypedFloat, and TypedComplex are subclasses of int, float, and
# complex that carry a JAX dtype. Canonicalization forms these types from int,
# float, and complex. Repeated canonicalization, including under different
# jax_enable_x64 modes, preserves the dtype.
class TypedInt(int):
dtype: np.dtype
def __new__(cls, value: int, dtype: np.dtype):
v = super(TypedInt, cls).__new__(cls, value)
v.dtype = dtype
return v
def __repr__(self):
return f'TypedInt({int(self)}, dtype={self.dtype.name})'
def __getnewargs__(self):
return (int(self), self.dtype)
class TypedFloat(float):
dtype: np.dtype
def __new__(cls, value: float, dtype: np.dtype):
v = super(TypedFloat, cls).__new__(cls, value)
v.dtype = dtype
return v
def __repr__(self):
return f'TypedFloat({float(self)}, dtype={self.dtype.name})'
def __str__(self):
return str(float(self))
def __getnewargs__(self):
return (float(self), self.dtype)
class TypedComplex(complex):
dtype: np.dtype
def __new__(cls, value: complex, dtype: np.dtype):
v = super(TypedComplex, cls).__new__(cls, value)
v.dtype = dtype
return v
def __repr__(self):
return f'TypedComplex({complex(self)}, dtype={self.dtype.name})'
def __getnewargs__(self):
return (complex(self), self.dtype)
_jax.set_typed_int_type(TypedInt)
_jax.set_typed_float_type(TypedFloat)
_jax.set_typed_complex_type(TypedComplex)
typed_scalar_types: set[type] = {TypedInt, TypedFloat, TypedComplex}
class TypedNdArray:
"""A TypedNdArray is a host-side array used by JAX during tracing.
To most intents and purposes a TypedNdArray is a thin wrapper around a numpy
array and should act like it. The primary differences are that a TypedNdArray
carries a JAX type:
* its type is not canonicalized by JAX, irrespective of the jax_enable_x64
mode
* it can be weakly typed.
"""
__slots__ = ('val', 'weak_type')
val: np.ndarray
weak_type: bool
def __init__(self, val: np.ndarray, weak_type: bool):
self.val = val
self.weak_type = weak_type
@property
def dtype(self) -> np.dtype:
return self.val.dtype
@property
def shape(self) -> tuple[int, ...]:
return self.val.shape
@property
def strides(self) -> Sequence[int]:
return self.val.strides
@property
def ndim(self) -> int:
return self.val.ndim
@property
def size(self) -> int:
return self.val.size
def __len__(self) -> int:
return self.val.__len__()
def __repr__(self):
prefix = 'TypedNdArray('
if self.weak_type:
dtype_str = f'dtype={self.val.dtype.name}, weak_type=True)'
else:
dtype_str = f'dtype={self.val.dtype.name})'
line_width = np.get_printoptions()['linewidth']
if self.size == 0:
s = f'[], shape={self.val.shape}'
else:
s = np.array2string(
self.val,
prefix=prefix,
suffix=',',
separator=', ',
max_line_width=line_width,
)
last_line_len = len(s) - s.rfind('\n') + 1
sep = ' '
if last_line_len + len(dtype_str) + 1 > line_width:
sep = ' ' * len(prefix)
return f'{prefix}{s},{sep}{dtype_str}'
def __array__(self, dtype=None, copy=None):
# You might think that we can do the following here:
# return self.val.__array__(dtype=dtype, copy=copy)
# Unfortunately __array__ appears to be buggy on NumPy < 2.3 and interprets
# the "dtype=None" as "the default float type".
# TODO(phawkins): revert to the above form once NumPy 2.3 is the minimum
# supported version.
return np.asarray(self.val, dtype=dtype, copy=copy) # pytype: disable=wrong-keyword-args
def __add__(self, other):
return self.val.__add__(other)
def __sub__(self, other):
return self.val.__sub__(other)
def __mul__(self, other):
return self.val.__mul__(other)
def __floordiv__(self, other):
return self.val.__floordiv__(other)
def __truediv__(self, other):
return self.val.__truediv__(other)
def __mod__(self, other):
return self.val.__mod__(other)
def __pow__(self, other):
return self.val.__pow__(other)
def __radd__(self, other):
return self.val.__radd__(other)
def __rsub__(self, other):
return self.val.__rsub__(other)
def __rmul__(self, other):
return self.val.__rmul__(other)
def __rtruediv__(self, other):
return self.val.__rtruediv__(other)
def __rfloordiv__(self, other):
return self.val.__rfloordiv__(other)
def __rmod__(self, other):
return self.val.__rmod__(other)
def __rpow__(self, other):
return self.val.__rpow__(other)
def __getitem__(self, index):
return self.val.__getitem__(index)
def __bool__(self):
return self.val.__bool__()
def __int__(self):
return self.val.__int__()
def __float__(self):
return self.val.__float__()
def __complex__(self):
return self.val.__complex__()
def __index__(self):
return self.val.__index__()
def __lt__(self, other):
return self.val.__lt__(other)
def __le__(self, other):
return self.val.__le__(other)
def __eq__(self, other):
return self.val.__eq__(other)
def __ne__(self, other):
return self.val.__ne__(other)
def __gt__(self, other):
return self.val.__gt__(other)
def __ge__(self, other):
return self.val.__ge__(other)
def __abs__(self):
return self.val.__abs__()
def reshape(self, *args, **kw):
return self.val.reshape(*args, **kw)
def item(self, *args):
return self.val.item(*args)
@property
def T(self):
return self.val.T
@property
def mT(self):
return self.val.mT
def clip(self, *args, **kwargs):
return self.val.clip(*args, **kwargs)
def astype(self, dtype, order: str = 'K', casting: str = 'unsafe', subok: bool = True, copy: bool = True):
return self.val.astype( # type: ignore[no-matching-overload, call-overload]
dtype, order=order, casting=casting, subok=subok, copy=copy
)
def tobytes(self, order: Literal['A', 'C', 'F', 'K'] | None = 'C'):
return self.val.tobytes(order=order)
_jax.set_typed_ndarray_type(TypedNdArray)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/literals.py",
"license": "Apache License 2.0",
"lines": 191,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/memory.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class Space(enum.Enum):
Device = enum.auto()
Host = enum.auto()
Any = enum.auto()
def __repr__(self):
return f"MemorySpace.{self.name}"
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/memory.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/numpy/array_constructors.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
from typing import Any
import numpy as np
from jax._src import api
from jax._src import config
from jax._src import core
from jax._src import dtypes
from jax._src import literals
from jax._src import tree_util
from jax._src import xla_bridge
from jax._src.lax import lax
from jax._src.lib import xla_client as xc
from jax._src.numpy import util
from jax._src.typing import Array, ArrayLike, DTypeLike
from jax._src.sharding import Sharding
from jax._src.sharding_impls import NamedSharding, PartitionSpec as P
logger = logging.getLogger(__name__)
export = util.set_module('jax.numpy')
for pkg_name in ['jax_cuda13_plugin', 'jax_cuda12_plugin', 'jaxlib.cuda']:
try:
cuda_plugin_extension = importlib.import_module(
f'{pkg_name}.cuda_plugin_extension'
)
except ImportError:
cuda_plugin_extension = None # type: ignore
else:
break
# Dynamically find and load ROCm plugin extension
rocm_plugin_extension = None
try:
from importlib.metadata import distributions
for dist in distributions():
name = dist.metadata.get('Name', '')
if name.startswith('jax-rocm') and name.endswith('-plugin'):
module_name = name.replace('-', '_')
try:
rocm_plugin_extension = importlib.import_module(
f'{module_name}.rocm_plugin_extension'
)
break
except ImportError:
continue
except Exception as e:
logger.debug("ROCm plugin discovery failed: %s", e)
def _supports_buffer_protocol(obj):
try:
memoryview(obj)
except TypeError:
return False
else:
return True
def _make_string_array(
object: np.ndarray,
dtype: DTypeLike | None = None,
ndmin: int = 0,
device: xc.Device | Sharding | None = None,
) -> Array:
if not isinstance(object, np.ndarray):
raise TypeError(
"Currently, string arrays can only be made from NumPy"
f" arrays. Got: {type(object)}."
)
if dtype is not None and (
dtypes.is_string_dtype(object.dtype) != dtypes.is_string_dtype(dtype)
):
raise TypeError(
f"Cannot make an array with dtype {dtype} from an object with dtype"
f" {object.dtype}."
)
if ndmin > object.ndim:
raise TypeError(
f"ndmin {ndmin} cannot be greater than object's ndims"
f" {object.ndim} for string arrays."
)
# Just do a device_put since XLA does not support string as a data type.
return api.device_put(x=object, device=device)
@export
def array(object: Any, dtype: DTypeLike | None = None, copy: bool = True,
order: str | None = "K", ndmin: int = 0,
*, device: xc.Device | Sharding | None = None,
out_sharding: NamedSharding | P | None = None) -> Array:
"""Convert an object to a JAX array.
JAX implementation of :func:`numpy.array`.
Args:
object: an object that is convertible to an array. This includes JAX
arrays, NumPy arrays, Python scalars, Python collections like lists
and tuples, objects with a ``__jax_array__`` method, and objects
supporting the Python buffer protocol.
dtype: optionally specify the dtype of the output array. If not
specified it will be inferred from the input.
copy: specify whether to force a copy of the input. Default: True.
order: not implemented in JAX
ndmin: integer specifying the minimum number of dimensions in the
output array.
device: optional :class:`~jax.Device` or :class:`~jax.sharding.Sharding`
to which the created array will be committed.
out_sharding: (optional) :class:`~jax.sharding.PartitionSpec` or :class:`~jax.NamedSharding`
representing the sharding of the created array (see `explicit sharding`_ for more details).
This argument exists for consistency with other array creation routines across JAX.
Specifying both ``out_sharding`` and ``device`` will result in an error.
Returns:
A JAX array constructed from the input.
See also:
- :func:`jax.numpy.asarray`: like `array`, but by default only copies
when necessary.
- :func:`jax.numpy.from_dlpack`: construct a JAX array from an object
that implements the dlpack interface.
- :func:`jax.numpy.frombuffer`: construct a JAX array from an object
that implements the buffer interface.
Examples:
Constructing JAX arrays from Python scalars:
>>> jnp.array(True)
Array(True, dtype=bool)
>>> jnp.array(42)
Array(42, dtype=int32, weak_type=True)
>>> jnp.array(3.5)
Array(3.5, dtype=float32, weak_type=True)
>>> jnp.array(1 + 1j)
Array(1.+1.j, dtype=complex64, weak_type=True)
Constructing JAX arrays from Python collections:
>>> jnp.array([1, 2, 3]) # list of ints -> 1D array
Array([1, 2, 3], dtype=int32)
>>> jnp.array([(1, 2, 3), (4, 5, 6)]) # list of tuples of ints -> 2D array
Array([[1, 2, 3],
[4, 5, 6]], dtype=int32)
>>> jnp.array(range(5))
Array([0, 1, 2, 3, 4], dtype=int32)
Constructing JAX arrays from NumPy arrays:
>>> jnp.array(np.linspace(0, 2, 5))
Array([0. , 0.5, 1. , 1.5, 2. ], dtype=float32)
Constructing a JAX array via the Python buffer interface, using Python's
built-in :mod:`array` module.
>>> from array import array
>>> pybuffer = array('i', [2, 3, 5, 7])
>>> jnp.array(pybuffer)
Array([2, 3, 5, 7], dtype=int32)
.. _explicit sharding: https://docs.jax.dev/en/latest/notebooks/explicit-sharding.html
"""
if order is not None and order != "K":
raise NotImplementedError("Only implemented for order='K'")
# check if the given dtype is compatible with JAX
if dtype is not None:
dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "array")
# Here we make a judgment call: we only return a weakly-typed array when the
# input object itself is weakly typed. That ensures asarray(x) is a no-op
# whenever x is weak, but avoids introducing weak types with something like
# array([1, 2, 3])
weak_type = dtype is None and dtypes.is_weakly_typed(object)
if device is None and out_sharding is None and isinstance(object, core.Tracer):
sharding = object.aval.sharding
sharding = None if sharding.mesh.empty else sharding
else:
sharding = util.choose_device_or_out_sharding(device, out_sharding, "jnp.array")
# Use device_put to avoid a copy for ndarray inputs.
if (not copy and isinstance(object, np.ndarray) and
(dtype is None or dtype == object.dtype) and (ndmin <= object.ndim) and
device is None):
if dtype is not None:
# If there is an explicit dtype, we've already canonicalized things and
# device_put should not canonicalize again.
object = literals.TypedNdArray(object, weak_type=False)
# Keep the output uncommitted.
return api.device_put(object)
# String arrays need separate handling because XLA does not support string
# as a data type.
if dtypes.is_string_dtype(dtype) or (
hasattr(object, "dtype") and dtypes.is_string_dtype(object.dtype)
):
return _make_string_array(
object=object, dtype=dtype, ndmin=ndmin, device=device
)
# For Python scalar literals, call coerce_to_array to catch any overflow
# errors. We don't use dtypes.is_python_scalar because we don't want this
# triggering for traced values. We do this here because it matters whether or
# not dtype is None. We don't assign the result because we want the raw object
# to be used for type inference below.
if isinstance(object, (bool, int, float, complex)):
_ = dtypes.coerce_to_array(object, dtype)
elif not isinstance(object, Array):
# Check if object supports any of the data exchange protocols
# (except dlpack, see data-apis/array-api#301). If it does,
# consume the object as jax array and continue (but not return) so
# that other array() arguments get processed against the input
# object.
#
# Notice that data exchange protocols define dtype in the
# corresponding data structures and it may not be available as
# object.dtype. So, we'll resolve the protocols here before
# evaluating object.dtype.
if hasattr(object, '__jax_array__'):
object = object.__jax_array__()
elif hasattr(object, '__cuda_array_interface__'):
cai = object.__cuda_array_interface__
backend = xla_bridge.get_backend()
if 'rocm' in backend.platform_version.lower():
gpu_plugin_extension = rocm_plugin_extension
elif 'cuda' in backend.platform_version.lower():
gpu_plugin_extension = cuda_plugin_extension
else:
gpu_plugin_extension = None
if gpu_plugin_extension is None:
device_id = None
else:
device_id = gpu_plugin_extension.get_device_ordinal(cai["data"][0])
object = xc._xla.cuda_array_interface_to_buffer(
cai=cai, gpu_backend=backend, device_id=device_id)
# To handle nested lists & tuples, flatten the tree and process each leaf.
leaves, treedef = tree_util.tree_flatten(
object, is_leaf=lambda x: not isinstance(x, (list, tuple)))
if any(leaf is None for leaf in leaves):
raise ValueError("None is not a valid value for jnp.array")
leaves = [
leaf
if (leaf_jax_array := getattr(leaf, "__jax_array__", None)) is None
else leaf_jax_array()
for leaf in leaves
]
if dtype is None:
# Use lattice_result_type rather than result_type to avoid canonicalization.
# Otherwise, weakly-typed inputs would have their dtypes canonicalized.
try:
dtype = (
dtypes.lattice_result_type(*leaves)[0]
if leaves
else dtypes.default_float_dtype()
)
except TypeError:
# This happens if, e.g. one of the entries is a memoryview object.
# This is rare, so we only handle it if the normal path fails.
leaves = [_convert_to_array_if_dtype_fails(leaf) for leaf in leaves]
dtype = dtypes.lattice_result_type(*leaves)[0]
object = treedef.unflatten(leaves)
out: ArrayLike
if all(not isinstance(leaf, Array) for leaf in leaves):
# TODO(jakevdp): falling back to numpy here fails to overflow for lists
# containing large integers; see discussion in
# https://github.com/jax-ml/jax/pull/6047. More correct would be to call
# coerce_to_array on each leaf, but this may have performance implications.
out = np.asarray(object, dtype=dtype)
elif isinstance(object, Array):
assert object.aval is not None
out = lax._array_copy(object) if copy else object
elif isinstance(object, (list, tuple)):
if object:
arrs = (array(elt, dtype=dtype, copy=False) for elt in object)
arrays_out = [lax.expand_dims(arr, [0]) for arr in arrs]
# lax.concatenate can be slow to compile for wide concatenations, so form a
# tree of concatenations as a workaround especially for op-by-op mode.
# (https://github.com/jax-ml/jax/issues/653).
k = 16
while len(arrays_out) > k:
arrays_out = [lax.concatenate(arrays_out[i:i+k], 0)
for i in range(0, len(arrays_out), k)]
out = lax.concatenate(arrays_out, 0)
else:
out = np.array([], dtype=dtype)
elif _supports_buffer_protocol(object):
object = memoryview(object)
# TODO(jakevdp): update this once we support NumPy 2.0 semantics for the copy arg.
out = np.array(object) if copy else np.asarray(object)
else:
raise TypeError(f"Unexpected input type for array: {type(object)}")
out_array: Array = lax._convert_element_type(
out, dtype, weak_type=weak_type, sharding=sharding)
if ndmin > np.ndim(out_array):
out_array = lax.expand_dims(out_array, range(ndmin - np.ndim(out_array)))
return out_array
def _get_platform(
device_or_sharding: xc.Device | Sharding | None | str) -> str:
"""Get device_or_sharding platform or look up config.default_device.value."""
if isinstance(device_or_sharding, xc.Device):
return device_or_sharding.platform
elif isinstance(device_or_sharding, Sharding):
return list(device_or_sharding.device_set)[0].platform
elif isinstance(device_or_sharding, str):
return device_or_sharding
elif device_or_sharding is None:
if config.default_device.value is None:
return xla_bridge.default_backend()
else:
return _get_platform(config.default_device.value)
else:
raise ValueError(f"`{device_or_sharding = }` was passed to"
"`canonicalize_or_get_default_platform`, only xc.Device,"
" Sharding, None or str values are supported.")
def _convert_to_array_if_dtype_fails(x: ArrayLike) -> ArrayLike:
try:
dtypes.dtype(x)
except TypeError:
return np.asarray(x)
else:
return x
@export
def asarray(a: Any, dtype: DTypeLike | None = None, order: str | None = None,
*, copy: bool | None = None,
device: xc.Device | Sharding | None = None,
out_sharding: NamedSharding | P | None = None) -> Array:
"""Convert an object to a JAX array.
JAX implementation of :func:`numpy.asarray`.
Args:
a: an object that is convertible to an array. This includes JAX
arrays, NumPy arrays, Python scalars, Python collections like lists
and tuples, objects with a ``__jax_array__`` method, and objects
supporting the Python buffer protocol.
dtype: optionally specify the dtype of the output array. If not
specified it will be inferred from the input.
order: not implemented in JAX
copy: optional boolean specifying the copy mode. If True, then always
return a copy. If False, then error if a copy is necessary. Default is
None, which will only copy when necessary.
device: optional :class:`~jax.Device` or :class:`~jax.sharding.Sharding`
to which the created array will be committed.
Returns:
A JAX array constructed from the input.
See also:
- :func:`jax.numpy.array`: like `asarray`, but defaults to `copy=True`.
- :func:`jax.numpy.from_dlpack`: construct a JAX array from an object
that implements the dlpack interface.
- :func:`jax.numpy.frombuffer`: construct a JAX array from an object
that implements the buffer interface.
Examples:
Constructing JAX arrays from Python scalars:
>>> jnp.asarray(True)
Array(True, dtype=bool)
>>> jnp.asarray(42)
Array(42, dtype=int32, weak_type=True)
>>> jnp.asarray(3.5)
Array(3.5, dtype=float32, weak_type=True)
>>> jnp.asarray(1 + 1j)
Array(1.+1.j, dtype=complex64, weak_type=True)
Constructing JAX arrays from Python collections:
>>> jnp.asarray([1, 2, 3]) # list of ints -> 1D array
Array([1, 2, 3], dtype=int32)
>>> jnp.asarray([(1, 2, 3), (4, 5, 6)]) # list of tuples of ints -> 2D array
Array([[1, 2, 3],
[4, 5, 6]], dtype=int32)
>>> jnp.asarray(range(5))
Array([0, 1, 2, 3, 4], dtype=int32)
Constructing JAX arrays from NumPy arrays:
>>> jnp.asarray(np.linspace(0, 2, 5))
Array([0. , 0.5, 1. , 1.5, 2. ], dtype=float32)
Constructing a JAX array via the Python buffer interface, using Python's
built-in :mod:`array` module.
>>> from array import array
>>> pybuffer = array('i', [2, 3, 5, 7])
>>> jnp.asarray(pybuffer)
Array([2, 3, 5, 7], dtype=int32)
"""
# For copy=False, the array API specifies that we raise a ValueError if the input supports
# the buffer protocol but a copy is required. Since array() supports the buffer protocol
# via numpy, this is only the case when the default device is not 'cpu'
if (copy is False and not isinstance(a, Array)
and _get_platform(device) != "cpu"
and _supports_buffer_protocol(a)):
raise ValueError(f"jnp.asarray: cannot convert object of type {type(a)} to JAX Array "
f"on platform={_get_platform(device)} with "
"copy=False. Consider using copy=None or copy=True instead.")
if dtype is not None:
dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "asarray")
return array(a, dtype=dtype, copy=bool(copy), order=order, device=device,
out_sharding=out_sharding)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/numpy/array_constructors.py",
"license": "Apache License 2.0",
"lines": 375,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/fuser/custom_fusion_lib.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Callable, Sequence
import dataclasses
import functools
from typing import Any, Protocol
from jax._src import api_util
from jax._src import core
from jax._src import custom_api_util
from jax._src import linear_util as lu
from jax._src.traceback_util import api_boundary
from jax._src import tree_util
from jax._src import util
from jax._src.interpreters import mlir
from jax._src.interpreters import partial_eval as pe
from jax._src.pallas.mosaic import lowering as mosaic_lowering
from jax._src.pallas import core as pallas_core
from jax._src.pallas.fuser import block_spec as block_spec_lib
custom_fusion_p = core.Primitive('custom_fusion')
custom_fusion_p.multiple_results = True
CustomPullBlockSpecRuleFn = Callable[[tuple[block_spec_lib.BlockIndexTransform, ...]],
Sequence[block_spec_lib.BlockIndexTransform]]
CustomPushBlockSpecRuleFn = Callable[[tuple[pallas_core.BlockSpec, ...]],
tuple[pallas_core.BlockSpec, ...]]
@dataclasses.dataclass(frozen=True)
class CustomEvalContext:
out_block_specs: tuple[pallas_core.BlockSpec, ...]
out_block_indices: tuple[Any, ...]
class CustomEvalRuleFn(Protocol):
def __call__(
self,
ctx: CustomEvalContext,
*args: Any,
) -> Sequence[Any]:
...
@custom_api_util.register_custom_decorator_type
class custom_fusion:
fun: Callable[..., Any]
eval_rule: CustomEvalRuleFn | None = None
pull_block_spec_rule: CustomPullBlockSpecRuleFn | None = None
# Optional if this custom_fusion is only used as an input fusion.
push_block_spec_rule: CustomPushBlockSpecRuleFn | None = None
# Optional alternative implementation to use instead of `fun` for when this
# custom fusion is run inside a Pallas kernel.
pallas_impl: Callable[..., Any] | None = None
def __init__(self, fun: Callable[..., Any]):
functools.update_wrapper(self, fun)
self.fun = fun
def def_pallas_impl(self, pallas_impl):
self.pallas_impl = pallas_impl
return pallas_impl
def def_pull_block_spec(
self, pull_block_spec_rule: CustomPullBlockSpecRuleFn):
self.pull_block_spec_rule = pull_block_spec_rule
return pull_block_spec_rule
def def_push_block_spec(
self, push_block_spec_rule: CustomPushBlockSpecRuleFn):
self.push_block_spec_rule = push_block_spec_rule
return push_block_spec_rule
def def_eval_rule(self, eval_rule: CustomEvalRuleFn):
self.eval_rule = eval_rule
return eval_rule
@functools.partial(api_boundary,
repro_api_name="jax.pallas.custom_fusion.__call__")
def __call__(self, *args, **kwargs):
debug_fun = api_util.debug_info("custom_fusion fun", self.fun, args, kwargs)
# TODO(jburnim): Better error messages here.
assert self.eval_rule is not None
assert self.pull_block_spec_rule is not None
try:
args = api_util.resolve_kwargs(self.fun, args, kwargs)
except TypeError as e:
raise TypeError(
"The input arguments to the custom_fusion-decorated function "
f"{debug_fun.func_name} could not be resolved to positional-only "
f"arguments. Binding failed with the error:\n{e}"
) from e
# flatten and get jaxpr
args_flat, in_tree = tree_util.tree_flatten(args)
in_avals = [core.get_aval(x) for x in args_flat]
flat_fun, out_tree = api_util.flatten_fun_nokwargs(
lu.wrap_init(self.fun, debug_info=debug_fun.with_unknown_names()),
in_tree)
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_fun, in_avals)
# if a Pallas implementation was provided, get its jaxpr
if self.pallas_impl is not None:
debug_pallas_impl = api_util.debug_info(
"custom_fusion pallas_impl", self.pallas_impl, args, kwargs)
flat_pallas_impl, pallas_out_tree = api_util.flatten_fun_nokwargs(
lu.wrap_init(self.pallas_impl, debug_info=debug_pallas_impl),
in_tree)
# TODO(jburnim): Error if out_tree() and kernel_out_tree() are different?
del pallas_out_tree
pallas_jaxpr, _, pallas_consts = (
pe.trace_to_jaxpr_dynamic(flat_pallas_impl, in_avals))
else:
pallas_jaxpr = None
pallas_consts = []
# debug_info for rules
out_flat = custom_fusion_p.bind(
*consts,
*pallas_consts,
*args_flat,
jaxpr=jaxpr,
num_consts=len(consts),
eval_rule=self.eval_rule,
pull_block_spec_rule=self.pull_block_spec_rule,
push_block_spec_rule=self.push_block_spec_rule,
pallas_jaxpr=pallas_jaxpr,
pallas_num_consts=len(pallas_consts),
in_tree=in_tree,
out_tree=out_tree(),
kernel_out_tree=out_tree())
return tree_util.tree_unflatten(out_tree(), out_flat)
@custom_fusion_p.def_impl
def _custom_fusion_impl(
*args,
jaxpr: core.Jaxpr,
num_consts: int,
pallas_num_consts: int,
**_):
consts, _, args = util.split_list(args, [num_consts, pallas_num_consts]) # type: ignore[assignment]
return core.eval_jaxpr(jaxpr, consts, *args)
mlir.register_lowering(custom_fusion_p, mlir.lower_fun(
_custom_fusion_impl, multiple_results=True))
@custom_fusion_p.def_effectful_abstract_eval
def _custom_fusion_effectful_abstract_eval(
*args,
jaxpr: core.Jaxpr,
pallas_jaxpr: core.Jaxpr | None,
**_):
del args
# TODO(jburnim): Error if pallas_jaxpr has different number of outputs, or
# different shapes and types of outputs?
if jaxpr.effects:
raise NotImplementedError(
"custom_fusion-decorated function {jaxpr.debug_info.func_src_info} "
"has effects, which is not yet supported: {jaxpr.effects}")
if pallas_jaxpr is not None and pallas_jaxpr.effects:
raise NotImplementedError(
"custom_fusion-decorated function {jaxpr.debug_info.func_src_info} "
"has a pallas_impl with effects, which is not yet supported: "
f"{pallas_jaxpr.effects}")
return jaxpr.out_avals, jaxpr.effects
@block_spec_lib.register_eval_rule(custom_fusion_p)
def _custom_fusion_eval_rule(
ctx: block_spec_lib.KernelEvalContext,
*args,
eval_rule: CustomEvalRuleFn,
num_consts: int,
pallas_num_consts: int,
**_):
args = args[num_consts + pallas_num_consts:]
return eval_rule(CustomEvalContext(
out_block_specs=ctx.out_block_specs,
out_block_indices=ctx.get_out_block_indices(),
), *args)
# TODO(jburnim): Lowering rules for SC and Mosaic GPU.
@mosaic_lowering.register_lowering_rule(custom_fusion_p)
def _custom_fusion_mosaic_lowering_rule(
ctx: mosaic_lowering.LoweringRuleContext,
*args,
jaxpr: core.Jaxpr,
num_consts: int,
pallas_jaxpr: core.Jaxpr | None,
pallas_num_consts: int,
**_):
consts, pallas_consts, args = util.split_list(
args, [num_consts, pallas_num_consts])
if pallas_jaxpr is None:
pallas_jaxpr = jaxpr
pallas_consts = consts
lowering_context = ctx.lowering_context.replace(block_shapes=ctx.block_shapes)
return mosaic_lowering.jaxpr_subcomp(
lowering_context, pallas_jaxpr, *pallas_consts, *args)
@block_spec_lib.register_pull_block_spec_rule(custom_fusion_p) # type: ignore[arg-type]
def _custom_fusion_pull_block_spec_rule(
ctx : block_spec_lib.PullRuleContext,
out_block_transforms : tuple[block_spec_lib.BlockIndexTransform, ...],
*,
pull_block_spec_rule : CustomPullBlockSpecRuleFn,
**_,
) -> Sequence[block_spec_lib.BlockIndexTransform]:
del ctx
return pull_block_spec_rule(out_block_transforms)
@block_spec_lib.register_push_block_spec_rule(custom_fusion_p) # type: ignore[arg-type]
def _custom_fusion_push_block_spec_rule(
ctx : block_spec_lib.PushRuleContext,
*block_specs : pallas_core.BlockSpec,
push_block_spec_rule : CustomPushBlockSpecRuleFn,
**_
) -> tuple[pallas_core.BlockSpec, ...]:
del ctx
# TODO(jburnim): Better error message if push_block_spec_rule is None.
return push_block_spec_rule(block_specs)
@block_spec_lib.register_usage_rule(custom_fusion_p) # type: ignore[arg-type]
def _custom_fusion_usage_rule(
ctx : block_spec_lib.UsageRuleContext,
used_out: Sequence[set[block_spec_lib.Usage]],
*,
jaxpr: core.Jaxpr,
**_
) -> Sequence[set[block_spec_lib.Usage]]:
del ctx
# TODO(jburnim): Error if jaxpr.jaxpr gives different usage than pallas_jaxpr?
read_usage_env = block_spec_lib.compute_usage(jaxpr, used_out)
return util.safe_map(read_usage_env, jaxpr.invars)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/fuser/custom_fusion_lib.py",
"license": "Apache License 2.0",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic/interpret/interpret_pallas_call.py | # Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
import contextlib
import dataclasses
import enum
import functools
import itertools
import math
import threading
from typing import Any, Literal
import jax
from jax import lax
from jax._src import callback
from jax._src import config
from jax._src import core as jax_core
from jax._src import frozen_dict
from jax._src import pjit
from jax._src import source_info_util
from jax._src.interpreters import mlir
from jax._src.tree_util import FlatTree
from jax._src.pallas import core as pallas_core
from jax._src.pallas import primitives
from jax._src.pallas.mosaic import core as mosaic_core
from jax._src.pallas.mosaic import primitives as mosaic_primitives
from jax._src.pallas.mosaic.interpret import shared_memory as memory
from jax._src.pallas.mosaic.interpret import vector_clock as vc
from jax._src.pallas.mosaic.interpret.race_detection_state import RaceDetectionState
from jax._src.pallas.mosaic.interpret.thread_map import thread_map
import jax._src.pallas.mosaic.interpret.utils as interpret_utils
from jax._src import state
from jax._src.state import discharge as state_discharge
from jax._src.state import indexing
from jax._src.state import primitives as state_primitives
from jax._src.typing import Array
from jax._src.util import (
safe_map,
safe_zip,
split_list
)
from jax._src.interpreters import partial_eval as pe
import jax.numpy as jnp
import numpy as np
map, unsafe_map = safe_map, map
zip, unsafe_zip = safe_zip, zip
@dataclasses.dataclass(frozen=True, kw_only=True)
class InterpretParams(interpret_utils.InterpretParams):
"""Parameters for TPU interpret mode.
TPU interpret mode is a way run Pallas TPU kernels on CPU, while simulating
a TPU's shared memory (HBM, VMEM, etc.), communication (remote and local
DMAs), and synchronization operations (semaphores, barriers, etc.). This mode
is intended for debugging and testing.
To run a kernel under TPU interpret mode, pass an instance of
``InterpretParams`` as an argument for the ``interpret`` parameter of
:func:`jax.experimental.pallas.pallas_call` or
:func:`jax.experimental.pallas.core_map`.
NOTE: If an exception is raised while interpreting a kernel, you must call
:func:`reset_tpu_interpret_mode_state` before using TPU interpret mode
again in the same process.
Attributes:
dma_execution_mode: If "eager", DMAs are executed as soon as they are
issued. If "on_wait", DMA reads or writes are only executed when a device
is waiting on a DMA semaphore that will be signaled when the read or write
is complete.
Default: "on_wait".
random_seed: Seed for random number generator used during interpretation.
Currently random numbers are used to randomize the grid coordinates along
dimensions with 'parallel' semantics.
Default: None.
grid_point_recorder: Callback that is invoked by the interpreter for each
grid point in the order in which the grid points are traversed. The
callback is invoked with two arguments: - A tuple of grid coordinates. -
The local core ID of the core that is processing the grid point. This
callback is intended for inspecting - the randomization of coordinates
along grid dimensions with 'parallel' semantics and - the mapping of grid
points to local (i.e. per-device) cores.
Default: None.
allow_hbm_allocation_in_run_scoped: If `True`, allows the allocation of HBM
buffers (which are then shared across the cores in a device) in
`run_scoped`. While this behavior can be enabled in the interpreter,
allocating HBM buffers with `run_scoped` is not supported when executing
Pallas kernels on a real TPU.
Default: `False`.
"""
dma_execution_mode: Literal["eager", "on_wait"] = "on_wait"
random_seed: int | None = None
grid_point_recorder: (
Callable[[tuple[np.int32, ...], np.int32], None] | None
) = None
allow_hbm_allocation_in_run_scoped: bool = False
@property
def num_cores_per_device(self) -> int:
return self.num_cores_or_threads
@contextlib.contextmanager
def force_tpu_interpret_mode(params: InterpretParams = InterpretParams()):
"""Context manager that forces TPU interpret mode under its dynamic context.
TPU interpret mode is a way run Pallas TPU kernels on CPU, while simulating
a TPU's shared memory (HBM, VMEM, etc.), communication (remote and local
DMAs), and synchronization operations (semaphores, barriers, etc.). This mode
is intended for debugging and testing. See :class:`InterpretParams` for
additional information.
Args:
params: an instance of :class:`InterpretParams`. Any call to
:func:`jax.experimental.pallas.pallas_call` or
:func:`jax.experimental.pallas.core_map` that is traced under this context
manager will be run with ``interpret=params``. When ``params`` is not
``None``, this will cause those calls to run with TPU interpret mode.
"""
prev = config.pallas_tpu_interpret_mode_context_manager.swap_local(params)
try:
yield
finally:
config.pallas_tpu_interpret_mode_context_manager.set_local(prev)
def set_tpu_interpret_mode(params: InterpretParams = InterpretParams()):
config.pallas_tpu_interpret_mode_context_manager.set_global(params) # type: ignore[arg-type]
# TODO(jburnim): Do we want to support multiple instances of SharedMemory?
# Maybe for running multiple distinct interpreted computations in parallel?
_shared_memory: memory.SharedMemory | None = None
_shared_memory_init_lock = threading.Lock()
races: RaceDetectionState | None = None
dma_id_counter: interpret_utils.Counter | None = None
def reset_tpu_interpret_mode_state():
"""Resets all global, shared state used by TPU interpret mode.
TPU interpret mode uses global, shared state for simulating memory buffers
and semaphores, for race detection, etc., when interpreting a kernel.
Normally, this shared state is cleaned up after a kernel is interpreted.
But if an exception is thrown while interpreting a kernel, the shared state
is not cleaned up, allowing the simulated TPU state to be examined for
debugging purposes. In this case, the shared state must be reset before
any further kernels are interpreted.
"""
global _shared_memory, races, dma_id_counter
with _shared_memory_init_lock:
_shared_memory = None
races = None
dma_id_counter = None
def _get_shared_memory() -> memory.SharedMemory:
assert _shared_memory is not None
return _shared_memory
def _clear_shared_memory():
global _shared_memory
with _shared_memory_init_lock:
_shared_memory = None
def _initialize_shared_memory(
device_id, num_devices, num_cores_per_device, *, interpret_params
):
global _shared_memory, races, dma_id_counter
del device_id
num_devices = int(num_devices)
num_cores_per_device = int(num_cores_per_device)
num_cores = num_devices * num_cores_per_device
with _shared_memory_init_lock:
if _shared_memory is None:
vector_clock_size = interpret_params.get_vector_clock_size(num_devices)
races = RaceDetectionState(num_cores=num_cores)
dma_id_counter = interpret_utils.Counter(100)
_shared_memory = memory.SharedMemory(
num_devices=num_devices,
num_cores_per_device=num_cores_per_device,
out_of_bounds_reads=interpret_params.out_of_bounds_reads,
dma_execution_mode=interpret_params.dma_execution_mode,
uninitialized_memory=interpret_params.uninitialized_memory,
detect_races=interpret_params.detect_races,
vector_clock_size=vector_clock_size,
clocks=[
vc.make_vector_clock(vector_clock_size) for _ in range(num_cores)
],
barrier=threading.Barrier(
num_devices, action=_update_clocks_for_global_barrier
),
clean_up_barrier=threading.Barrier(
num_devices, action=_clear_shared_memory
),
)
assert _shared_memory.num_cores == num_cores
def _update_clocks_for_device_barrier(device_id):
"""Synchronizes the vector clocks for the cores on the given device."""
shared_memory = _get_shared_memory()
shared_memory.update_clocks_for_device_barrier(device_id)
def _update_clocks_for_global_barrier():
"""Synchronizes all vector clocks."""
shared_memory = _get_shared_memory()
shared_memory.update_clocks(0, shared_memory.num_cores)
def _barrier(device_id):
del device_id
shared_memory = _get_shared_memory()
if shared_memory.num_devices > 1:
shared_memory.barrier.wait()
def _clean_up_shared_memory(device_id):
del device_id
shared_memory = _get_shared_memory()
shared_memory.clean_up_barrier.wait()
def _check_for_revisiting(device_id, local_core_id, loop_idx, output_blocks):
device_id = int(device_id)
local_core_id = int(local_core_id)
loop_idx = tuple(int(x) for x in loop_idx)
try:
output_blocks = jax.tree.map(int, output_blocks)
except:
raise ValueError('Advanced indexers are not supported on TPU')
output_ranges = [
interpret_utils.to_range(b) if b is not None else None
for b in output_blocks
]
shared_memory = _get_shared_memory()
past_output_ranges = shared_memory.output_ranges[(device_id, local_core_id)]
if not past_output_ranges:
past_output_ranges.append((loop_idx, output_ranges))
return
for i in range(len(output_ranges)):
if output_ranges[i] is None:
continue
if past_output_ranges[-1][1][i] == output_ranges[i]:
continue
# TODO(jburnim): Do something constant time instead of linear here.
past_idxs = [
j
for j, ors in enumerate(past_output_ranges)
if ors[1][i] == output_ranges[i]
]
if past_idxs:
raise RuntimeError(
f'Revisited block {output_ranges[i]} of output {i} in iteration '
f'{loop_idx}. The block was previously visited in iterations '
f'{past_output_ranges[past_idxs[0]][0]} through '
f'{past_output_ranges[past_idxs[-1]][0]} .'
)
past_output_ranges.append((loop_idx, output_ranges))
def _validate(device_id):
device_id = int(device_id)
shared_memory = _get_shared_memory()
semaphores = shared_memory.get_sempahores_with_nonzero_count(device_id)
if semaphores:
sem, global_core_id = semaphores[0]
# TODO(jburnim): Make this raise an error, but in a way that doesn't
# cause other devices to hang later in `_clean_up_shared_memory`.
print(
f'Semaphore {sem.id} has non-zero count for {device_id} (global core'
f' {global_core_id}) at kernel exit:'
f' {sem.count_by_core[global_core_id]}'
)
def _allocate_buffer(
device_id: Array,
local_core_id: Array | None,
memory_space: Array,
val: Array,
):
"""Allocates a memory buffer on the device with id `device_id` and core with id `local_core_id`.
Args:
device_id: Singleton array holding the device id where the buffer will be
allocated.
local_core_id: None or singleton array holding the core id where the buffer
will be allocated. If None, a buffer will be allocated on each cores on
the device.
memory_space: Singleton array indicating the memory space to allocate the
buffer in. If the corresponding memory space is "any" (i.e. HBM), at most
one buffer will be allocated and it will belong to (local) core id 0.
val: Array of values to initialize the allocated buffer with.
Returns:
Integer id for the allocated buffer.
"""
device_id: int = int(device_id) # pyrefly: ignore[redefinition]
memory_space_str = TPU_MEMORY_SPACE_NAMES[int(memory_space)]
del memory_space
shared_memory = _get_shared_memory()
if local_core_id is None:
local_core_id_int = 0
local_core_ids = tuple(range(shared_memory.num_cores_per_device))
else:
local_core_id_int = int(local_core_id)
local_core_ids = (local_core_id_int,)
del local_core_id
local_core_id_to_buffer_id: dict[int, int] = {}
for lci in local_core_ids:
buffer_id = shared_memory.get_next_buffer_id(device_id, lci)
if memory_space_str in ['any', 'hbm']:
# If allocating in HBM, only actually allocate a buffer once. The first
# local core (i.e. thread) that gets here allocates the buffer, but the
# buffer is still keyed in the shared memory with core ID 0. However,
# since the buffer is shared across all cores, we initialize the buffer's
# `ref_count` with the number of cores per device. This ensures that the
# buffer is not deallocated until all cores have exited the scope of the
# allocation (e.g. have exited the body of a `run_scoped`).
key = (memory_space_str, buffer_id, device_id, 0)
ref_count = shared_memory.num_cores_per_device
else:
key = (memory_space_str, buffer_id, device_id, lci)
ref_count = 1
if len(local_core_id_to_buffer_id) > 0:
# If we are allocating more than one buffer, we must make additional
# copies of `val` so that each buffer is a distinct ndarray.
val = val.copy()
shared_memory.allocate_buffer(
key, ref_count=ref_count, value=np.array(val)
)
local_core_id_to_buffer_id[lci] = buffer_id
# The buffer ids should always be kept in sync across all cores.
assert all(
buffer_id == local_core_id_to_buffer_id[local_core_id_int]
for buffer_id in local_core_id_to_buffer_id.values()
)
# TODO(jburnim): Raise an error if buffer_id is too big for int16.
return np.int16(local_core_id_to_buffer_id[local_core_id_int])
def _local_core_id_or_zero_if_hbm(local_core_id: int, memory_space: str) -> int:
if memory_space in ['any', 'hbm']:
return 0
return local_core_id
def _deallocate_buffer(device_id, local_core_id, memory_space, buffer_id):
device_id = int(device_id)
local_core_id = int(local_core_id)
memory_space = TPU_MEMORY_SPACE_NAMES[int(memory_space)]
buffer_id = int(buffer_id)
local_core_id = _local_core_id_or_zero_if_hbm(local_core_id, memory_space)
shared_memory = _get_shared_memory()
key = (memory_space, buffer_id, device_id, local_core_id)
shared_memory.deallocate_buffer(key)
def _allocate_semaphores(
device_id: Array, local_core_id: Array | None, shape: Array
):
"""Allocates semaphores on the device with id `device_id` and core with id `local_core_id`.
The number of semaphores allocated is given by the product of the entries in
`shape`.
Since for each semaphore id there is really only one global `Semaphore`
object, 'allocation' of semaphores per device and core here means that the
internal counter of semaphore ids that is held by `SharedMemory` is
incremented for each the device and core (or for all cores on the dive if
argument `local_core_id` is None, see below).
Args:
device_id: Singleton array holding the id for the device where the
semaphores will be allocated.
local_core_id: None or singleton array holding the id for the core where the
semaphores will be allocated. If None, semaphores will be allocated on all
cores on the device.
shape: Shape of the semaphore array to allocate.
Returns:
Array of semaphore ids.
"""
device_id: int = int(device_id) # pyrefly: ignore[redefinition]
shape: tuple[int, ...] = tuple(map(int, shape)) # pyrefly: ignore[redefinition]
num_semaphores = math.prod(shape)
shared_memory = _get_shared_memory()
if local_core_id is None:
local_core_id_int = 0
global_core_ids = shared_memory.get_global_core_ids(device_id)
else:
local_core_id_int = int(local_core_id)
global_core_ids = (
shared_memory.get_global_core_id(device_id, local_core_id_int),
)
del local_core_id
global_core_id_to_semaphore_id = {}
for gci in global_core_ids:
semaphore_id = shared_memory.allocate_semaphores(gci, num_semaphores)
global_core_id_to_semaphore_id[gci] = semaphore_id
global_core_id = shared_memory.get_global_core_id(
device_id, local_core_id_int
)
# The semaphore ids should always be kept in sync across all cores.
assert all(
semaphore_id == global_core_id_to_semaphore_id[global_core_id]
for semaphore_id in global_core_id_to_semaphore_id.values()
)
# NOTE: For now, we use a relatively uncommon datatype (int16) for
# semaphore (and buffer) IDs, so these values are more easily identifiable
# in kernels.
#
# TODO(jburnim): Raise an error if any IDs are too big for int16.
semaphore_id = global_core_id_to_semaphore_id[global_core_id]
return np.arange(
semaphore_id, semaphore_id + num_semaphores, dtype=np.int16
).reshape(shape)
TPU_MEMORY_SPACE_IDXS: dict[
mosaic_core.MemorySpace | pallas_core.MemorySpace | None, int
] = {v: i for i, v in enumerate(mosaic_core.MemorySpace)}
TPU_MEMORY_SPACE_NAMES = {
i: v.value for i, v in enumerate(mosaic_core.MemorySpace)
}
# Inject ANY as the last memory space.
TPU_MEMORY_SPACE_NAMES[len(TPU_MEMORY_SPACE_IDXS)] = (
pallas_core.MemorySpace.ANY.value
)
TPU_MEMORY_SPACE_IDXS[pallas_core.MemorySpace.ANY] = len(TPU_MEMORY_SPACE_IDXS)
# Default to VMEM when no memory space is specified.
TPU_MEMORY_SPACE_IDXS[None] = TPU_MEMORY_SPACE_IDXS[
mosaic_core.MemorySpace.VMEM
]
def get_barrier_semaphore(device_id, collective_id):
del device_id
collective_id = int(collective_id)
shared_memory = _get_shared_memory()
shared_memory.guarantee_semaphore_with_fixed_id(collective_id)
return np.int16(collective_id)
def _to_int(x: int | Array | None) -> int | None:
"""Converts a value to an integer, or returns None if the value is None."""
if x is None:
return None
return int(x)
def get(
device_id,
local_core_id,
memory_space,
buffer_id,
transforms,
block_indices=None,
grid_loop_idx=None,
*,
src_device_id=None,
src_local_core_id=None,
clock=None,
source_info=None,
input_name=None,
) -> np.ndarray:
device_id = int(device_id)
local_core_id = int(local_core_id)
memory_space = TPU_MEMORY_SPACE_NAMES[int(memory_space)]
buffer_id = int(buffer_id)
try:
transforms = jax.tree.map(int, transforms)
except:
raise ValueError('Advanced indexers are not supported on TPU')
src_device_id = _to_int(src_device_id)
src_local_core_id = _to_int(src_local_core_id)
if input_name is not None:
# NOTE: input_name, block_indices, and grid_loop_idx are set only if this
# function is being called to read a block from a pallas_call input (at the
# start of one iteration of the kernel body).
assert block_indices is not None
block_indices = tuple(int(x) for x in block_indices)
assert grid_loop_idx is not None
grid_loop_idx = tuple(int(x) for x in tuple(grid_loop_idx))
shared_memory = _get_shared_memory()
local_core_id_for_buffer = _local_core_id_or_zero_if_hbm(
local_core_id, memory_space
)
global_core_id = shared_memory.get_global_core_id(device_id, local_core_id)
key = (memory_space, buffer_id, device_id, local_core_id_for_buffer)
read_range = interpret_utils.to_range(transforms)
ret, (shape, dtype), clock_ = shared_memory.get_buffer_content(
key, read_range, global_core_id
)
clock = clock if clock is not None else clock_
# Compute the shape of the read value, assuming the read is fully in-bounds.
# TODO(jburnim): We already know this shape in the Jaxpr where we insert a
# callback to `get`. Should we just pass the shape to `get`?
# TODO(jburnim): Move to a helper function?
full_read_shape: list[int] = []
assert len(read_range) <= len(shape)
for dim_size, idx_or_slice in itertools.zip_longest(
shape, read_range, fillvalue=None
):
assert isinstance(dim_size, int)
if idx_or_slice is None:
full_read_shape.append(dim_size)
elif isinstance(idx_or_slice, int):
continue
else:
dim_size = (idx_or_slice.stop - idx_or_slice.start) // idx_or_slice.step
assert isinstance(dim_size, int)
full_read_shape.append(dim_size)
if (ret is None) or (tuple(full_read_shape) != ret.shape):
if shared_memory.out_of_bounds_reads == 'raise':
if source_info is None:
ctx = contextlib.nullcontext()
else:
ctx = source_info_util.user_context(
traceback=source_info.traceback, name_stack=source_info.name_stack
) # type: ignore[assignment]
with ctx:
if input_name is None:
raise IndexError(
'Out-of-bounds read of'
f' ({device_id} {local_core_id} {memory_space} {buffer_id}):'
f' reading [{read_range}] but buffer has shape {shape}.'
)
else:
# Different error message when we are reading a block of an input,
# to copy it to a buffer before invoking the kernel body.
raise IndexError(
f'Out-of-bounds block index {block_indices} for'
f' input "{input_name}" in iteration {grid_loop_idx}'
f' on device {device_id} (core {local_core_id}):'
f' reading [{read_range}] but input has shape {shape}.'
)
# out_of_bounds_reads == "uninitialized"
uninit_array = np.full(
full_read_shape,
interpret_utils.get_uninitialized_value(
dtype, shared_memory.uninitialized_memory
),
dtype=dtype,
)
if ret is None:
ret = uninit_array
else:
uninit_array[tuple(slice(s) for s in ret.shape)] = ret
ret = uninit_array
if shared_memory.detect_races:
if src_device_id is None:
src_device_id = device_id
if src_local_core_id is None:
src_local_core_id = local_core_id
assert races is not None
races.check_read(
src_device_id,
src_local_core_id,
clock,
(memory_space, buffer_id, device_id, local_core_id_for_buffer),
read_range,
source_info=source_info,
)
return ret
def store(
device_id,
local_core_id,
memory_space,
buffer_id,
transforms,
val,
block_indices=None,
grid_loop_idx=None,
*,
src_device_id=None,
src_local_core_id=None,
clock=None,
source_info=None,
output_name=None,
):
device_id = int(device_id)
local_core_id = int(local_core_id)
memory_space = TPU_MEMORY_SPACE_NAMES[int(memory_space)]
buffer_id = int(buffer_id)
try:
transforms = jax.tree.map(int, transforms)
except:
raise ValueError('Advanced indexers are not supported on TPU')
val = np.array(val)
src_device_id = _to_int(src_device_id)
src_local_core_id = _to_int(src_local_core_id)
if output_name is not None:
# NOTE: output_name, block_indices, and grid_loop_idx are set only if this
# function is being called to store a block into a pallas_call output (at
# the end of one iteration of the kernel body).
assert block_indices is not None
block_indices = tuple(int(x) for x in block_indices)
assert grid_loop_idx is not None
grid_loop_idx = tuple(int(x) for x in tuple(grid_loop_idx))
shared_memory = _get_shared_memory()
local_core_id_for_buffer = _local_core_id_or_zero_if_hbm(
local_core_id, memory_space
)
global_core_id = shared_memory.get_global_core_id(device_id, local_core_id)
key = (memory_space, buffer_id, device_id, local_core_id_for_buffer)
write_range = interpret_utils.to_range(transforms)
in_bounds, (shape, _), clock_ = shared_memory.store_buffer_content(
key, write_range, val, global_core_id
)
clock = clock if clock is not None else clock_
if not in_bounds:
if output_name is None:
raise ValueError(
'Out-of-bounds write of'
f' ({device_id} {local_core_id} {memory_space} {buffer_id}):'
f' writing [{write_range}] but buffer has shape {shape} .'
)
else:
# Different error message when we are copying a kernel buffer to a
# block of an output (just after a kernel invocation).
raise IndexError(
f'Out-of-bounds block index {block_indices} for'
f' output "{output_name}" in iteration {grid_loop_idx}'
f' on device {device_id} (core {local_core_id}):'
f' reading [{write_range}] but output has shape {shape}.'
)
if shared_memory.detect_races:
if src_device_id is None:
src_device_id = device_id
if src_local_core_id is None:
src_local_core_id = local_core_id
assert races is not None
races.check_write(
src_device_id,
src_local_core_id,
clock,
(memory_space, buffer_id, device_id, local_core_id_for_buffer),
write_range,
source_info=source_info,
)
def swap(
device_id,
local_core_id,
memory_space,
buffer_id,
transforms,
val,
mask,
*,
source_info=None,
):
device_id = int(device_id)
local_core_id = int(local_core_id)
memory_space = TPU_MEMORY_SPACE_NAMES[int(memory_space)]
buffer_id = int(buffer_id)
try:
transforms = jax.tree.map(int, transforms)
except:
raise ValueError('Advanced indexers are not supported on TPU')
val = np.array(val)
mask = np.array(mask) if mask is not None else None
if mask is not None:
assert mask.shape == val.shape
shared_memory = _get_shared_memory()
local_core_id_for_buffer = _local_core_id_or_zero_if_hbm(
local_core_id, memory_space
)
global_core_id = shared_memory.get_global_core_id(device_id, local_core_id)
key = (memory_space, buffer_id, device_id, local_core_id_for_buffer)
read_write_range = interpret_utils.to_range(transforms)
ret, (shape, _), clock = shared_memory.swap_buffer_content(
key, read_write_range, val, mask, global_core_id
)
if ret is None:
if mask is None:
raise ValueError(
'Out-of-bounds swap of'
f' ({device_id} {local_core_id} {memory_space} {buffer_id}):'
f' swapping [{read_write_range}] but buffer has shape'
f' {shape} .'
)
else:
# TODO(jburnim): Include indices of out-of-bounds locations where mask
# is True.
raise ValueError(
'Out-of-bounds masked swap of'
f' ({device_id} {local_core_id} {memory_space} {buffer_id}): swapping'
f' [{read_write_range}] but buffer has shape {shape} . '
)
if shared_memory.detect_races:
assert races is not None
races.check_write(
device_id,
local_core_id,
clock,
(memory_space, buffer_id, device_id, local_core_id_for_buffer),
read_write_range,
source_info=source_info,
)
return ret
class DmaState(enum.Enum):
STARTED = 0
READ = 1
COMPLETED = 2
@dataclasses.dataclass
class DMA:
id: int
src_device_id: int
src_local_core_id: int
src_memory_space: int
src_buffer_id: int
src_transforms: tuple[Any, ...]
dst_device_id: int
dst_local_core_id: int
dst_memory_space: int
dst_buffer_id: int
dst_transforms: tuple[Any, ...]
src_sem: memory.Semaphore | None
dst_sem: memory.Semaphore
virtual_device_id: int
clock: vc.VectorClock
source_info: source_info_util.SourceInfo | None = None
state: DmaState = DmaState.STARTED
data: np.ndarray | None = None
lock: threading.Lock = dataclasses.field(default_factory=threading.Lock)
@property
def data_size(self) -> int:
assert self.data is not None
return self.data.itemsize * self.data.size
@property
def detect_races(self) -> bool:
return self.dst_sem.detect_races
@property
def src_global_core_id(self) -> int:
return self.dst_sem.get_global_core_id(
self.src_device_id, self.src_local_core_id
)
@property
def dst_global_core_id(self) -> int:
return self.dst_sem.get_global_core_id(
self.dst_device_id, self.dst_local_core_id
)
def execute_read(self):
"""Executes the reading part of this DMA.
Note that the caller must not hold the lock on the shared memory (because
`get` is called in this method).
"""
# Must acquire the lock on `self` because:
# - `self.state` is inspected and modified in this method.
# - `self.data` is assigned in this method.
with self.lock:
if self.state != DmaState.STARTED:
return
if self.detect_races:
vc.inc_vector_clock(self.clock, self.virtual_device_id)
self.data = get(
self.src_device_id,
self.src_local_core_id,
self.src_memory_space,
self.src_buffer_id,
self.src_transforms,
clock=vc.copy_vector_clock(self.clock),
src_device_id=self.id,
src_local_core_id=0,
source_info=self.source_info,
)
if self.detect_races:
vc.inc_vector_clock(self.clock, self.virtual_device_id)
# Signal the send semaphore.
if self.src_sem is not None:
self.src_sem.signal(
self.data_size, self.src_global_core_id, clock=self.clock
)
self.state = DmaState.READ
def execute_write(self):
"""Executes the writing part of this DMA.
Note that the caller must not hold the lock on the shared memory (because
`store` is called in this method).
"""
# Must acquire the lock on `self` because:
# - `self.state` is inspected and modified in this method.
# - `self.data` is assigned in this method.
with self.lock:
assert self.state in (DmaState.READ, DmaState.COMPLETED)
if self.state == DmaState.COMPLETED:
return
assert self.data is not None
if self.detect_races:
vc.inc_vector_clock(self.clock, self.virtual_device_id)
store(
self.dst_device_id,
self.dst_local_core_id,
self.dst_memory_space,
self.dst_buffer_id,
self.dst_transforms,
self.data,
clock=vc.copy_vector_clock(self.clock),
src_device_id=self.id,
src_local_core_id=0,
source_info=self.source_info,
)
if self.detect_races:
vc.inc_vector_clock(self.clock, self.virtual_device_id)
self.dst_sem.signal(
self.data_size, self.dst_global_core_id, clock=self.clock
)
self.data = None
self.state = DmaState.COMPLETED
def execute_read_and_write(self):
"""Executes this DMA, bot the reading and writing parts.
Note that the caller must not hold the lock on the shared memory.
"""
self.execute_read()
self.execute_write()
def dma_start(
device_id,
src_local_core_id,
src_memory_space,
src_id,
src_transforms,
dst_memory_space,
dst_id,
dst_transforms,
dst_sem_id,
src_sem_id,
dst_device_id,
source_info=None,
):
shared_memory = _get_shared_memory()
device_id = int(device_id)
src_local_core_id = int(src_local_core_id)
src_global_core_id = shared_memory.get_global_core_id(
device_id, src_local_core_id
)
src_memory_space, src_id = int(src_memory_space), int(src_id)
src_transforms = jax.tree.map(int, src_transforms)
dst_memory_space, dst_id = int(dst_memory_space), int(dst_id)
dst_transforms = jax.tree.map(int, dst_transforms)
dst_sem_id = int(dst_sem_id)
src_sem_id = int(src_sem_id) if src_sem_id is not None else None
if dst_device_id is not None:
dst_device_id = int(dst_device_id)
else:
dst_device_id = device_id
dst_global_core_id = shared_memory.get_global_core_id(
dst_device_id, src_local_core_id # Same core on destination device as on source.
)
(src_sem, dst_sem), clock = shared_memory.get_semaphores_and_increment_clock(
(src_sem_id, dst_sem_id), src_global_core_id
)
assert dst_sem is not None
assert dma_id_counter is not None
id = dma_id_counter.get_next()
dma = DMA(
id,
device_id,
src_local_core_id,
src_memory_space,
src_id,
src_transforms,
dst_device_id,
src_local_core_id, # Same core on destination device as on source.
dst_memory_space,
dst_id,
dst_transforms,
src_sem,
dst_sem,
virtual_device_id = shared_memory.get_random_virtual_device_id(),
clock=clock, # pyrefly: ignore[bad-argument-type]
source_info=source_info,
)
if shared_memory.dma_execution_mode == 'on_wait':
if src_sem_id is None:
shared_memory.append_semaphore_task(
dst_sem_id, dst_global_core_id, dma.execute_read_and_write
)
else:
shared_memory.append_semaphore_task(
src_sem_id, src_global_core_id, dma.execute_read
)
shared_memory.append_semaphore_task(
dst_sem_id,
dst_global_core_id,
# This task for the waiting semaphore with ID `dst_sem_id` may be
# executed before the corresponding DMA task for the sending semaphore
# that does the DMA read. We therefore have to append a read-and-write
# task here, instead of just a write task. If the reading for the DMA
# has already been executed, the DMA's state will indicate this and
# the read-write-task appended here will do the write only.
# (Alternatively, we could have the DMA write task wait on the
# `send_semphore`. This issue with this approach is that we do not
# know the number of bytes transferred that `send_semaphore` should be
# waiting for until after the reader task is done.)
dma.execute_read_and_write,
)
return
assert shared_memory.dma_execution_mode == 'eager'
dma.execute_read_and_write()
def dma_wait(device_id, local_core_id, sem_id, size):
shared_memory = _get_shared_memory()
device_id = int(device_id)
local_core_id = int(local_core_id)
sem_id = int(sem_id)
size = int(size)
global_core_id = shared_memory.get_global_core_id(device_id, local_core_id)
(sem,), _ = shared_memory.get_semaphores_and_increment_clock(
[sem_id], global_core_id
)
assert sem is not None
sem.wait(size, global_core_id, has_tasks=True)
def semaphore_signal(
device_id,
local_core_id,
sem_id,
inc,
target_device_id,
target_local_core_id,
):
shared_memory = _get_shared_memory()
device_id = int(device_id)
local_core_id = int(local_core_id)
sem_id = int(sem_id)
inc = int(inc)
src_global_core_id = shared_memory.get_global_core_id(
device_id, local_core_id
)
if target_device_id is None:
target_device_id = device_id
else:
target_device_id = int(target_device_id)
if target_local_core_id is None:
target_local_core_id = 0
(sem,), clock = shared_memory.get_semaphores_and_increment_clock(
[sem_id], src_global_core_id
)
assert sem is not None
sem.signal(
inc,
shared_memory.get_global_core_id(target_device_id, target_local_core_id),
clock,
)
def semaphore_wait(device_id, local_core_id, sem_id, value):
shared_memory = _get_shared_memory()
device_id = int(device_id)
local_core_id = int(local_core_id)
sem_id = int(sem_id)
value = int(value)
global_core_id = shared_memory.get_global_core_id(device_id, local_core_id)
(sem,), _ = shared_memory.get_semaphores_and_increment_clock(
[sem_id], global_core_id
)
assert sem is not None
sem.wait(value, global_core_id)
_SEMAPHORE = mosaic_core.MemorySpace.SEMAPHORE
_HBM = mosaic_core.MemorySpace.HBM
_ANY = pallas_core.MemorySpace.ANY
def _forward_any_to_hbm(memory_space):
if memory_space is _ANY:
return _HBM
return memory_space
_SENTINEL = jnp.inf
def _get_memory_space_and_raise_if_hbm(aval, primitive_name, message=None):
memory_space = _forward_any_to_hbm(aval.memory_space)
if memory_space is _HBM:
if message is None:
message = (
f'{primitive_name}: Buffers with a memory space of HBM or ANY cannot'
' be referenced directly. Instead, use `pltpu.sync_copy` or'
' `pltpu.async_copy`.'
)
raise ValueError(message)
return memory_space
def _interpret_jaxpr(
jaxpr,
*args,
axis_sizes,
mesh,
axis_indices,
device_id,
local_core_id,
mosaic_params,
interpret_params
):
sentinel_for_floating_point_values = (
_SENTINEL if interpret_params.skip_floating_point_ops else None
)
env = interpret_utils.JaxprEnv(
vars=jaxpr.constvars + jaxpr.invars,
values=args,
sentinel_for_floating_point_values=sentinel_for_floating_point_values,
)
# TODO(jburnim): Clean up and finish this evaluation loop. For example:
# - Replace the big if-statement with a dictionary of rules.
# - Handle other higher-order primitives?
_interpret = functools.partial(
_interpret_jaxpr,
axis_sizes=axis_sizes,
mesh=mesh,
axis_indices=axis_indices,
device_id=device_id,
local_core_id=local_core_id,
mosaic_params=mosaic_params,
interpret_params=interpret_params,
)
for eqn in jaxpr.eqns:
with source_info_util.user_context(
eqn.source_info.traceback, name_stack=eqn.source_info.name_stack):
prim = eqn.primitive
# We defer reading the values for `eqn.invars` into each of the branches
# of the if-elif-else statement below. This is because the else branch may
# not need to do any reads if `interpret_params.skip_floating_point_ops`
# is True. If this is the case, we want to avoid materializing the read
# array into the jaxpr when this function is traced.
deferred_invals = functools.partial(env.read_many, eqn.invars)
if prim is primitives.load_p:
(ref, transforms, mask, _) = jax.tree.unflatten(
eqn.params['args_tree'], deferred_invals())
if mask is not None:
raise NotImplementedError('masked load_p')
memory_space = _get_memory_space_and_raise_if_hbm(
eqn.invars[0].aval, 'load_p'
)
out = callback.io_callback(
functools.partial(get, source_info=eqn.source_info),
eqn.outvars[0].aval,
device_id,
local_core_id,
TPU_MEMORY_SPACE_IDXS[memory_space],
ref,
transforms,
ordered=True,
)
elif prim is primitives.swap_p:
(ref, transforms, val, mask) = jax.tree.unflatten(
eqn.params['args_tree'], deferred_invals())
memory_space = _get_memory_space_and_raise_if_hbm(
eqn.invars[0].aval, 'swap_p'
)
out = callback.io_callback(
functools.partial(swap, source_info=eqn.source_info),
eqn.outvars[0].aval,
device_id,
local_core_id,
TPU_MEMORY_SPACE_IDXS[memory_space],
ref,
transforms,
val,
mask,
ordered=True,
)
elif prim is primitives.delay_p:
# TODO(jburnim): Implement this properly?
out = []
elif prim is mosaic_primitives.prng_seed_p:
# TODO(jburnim): Implement this properly?
out = []
elif prim is mosaic_primitives.prng_random_bits_p:
# TODO(jburnim): Implement this properly?
out = jnp.zeros(eqn.params['shape'], jnp.int32)
elif ((prim is lax.axis_index_p)
and (mesh is not None) and (eqn.params['axis_name'] in mesh.shape)):
# We are interpreting a core_map, and this lax.axis_index call is
# querying our index along the core axis, so return our core ID.
out = local_core_id
elif ((prim is lax.axis_index_p)
and (eqn.params['axis_name'] in axis_indices)):
# We replace lax.axis_index calls in the kernel body, so that the
# kernel body jaxpr can be run on other threads (via an io_callback)
# without having to recreate the axis environment in those threads.
out = axis_indices[eqn.params['axis_name']]
elif prim is lax.cond_p:
def _make_branch(jaxpr):
return lambda *args: _interpret(jaxpr, *args)
invals = deferred_invals()
out = lax.switch(
invals[0],
[_make_branch(branch_jaxpr.jaxpr)
for branch_jaxpr in eqn.params['branches']],
*invals[1:])
elif prim is lax.scan_p:
consts, init_carry, xs = split_list(
deferred_invals(),
[eqn.params['num_consts'], eqn.params['num_carry']],
)
def _scan_body(c, a):
return split_list(
_interpret(eqn.params['jaxpr'].jaxpr, *consts, *c, *a),
[eqn.params['num_carry']])
carry, out = lax.scan(_scan_body, init_carry, xs=xs,
length=eqn.params.get('length', None))
out = carry + out
elif prim is lax.while_p:
cond_consts, body_consts, init_vals = split_list(
deferred_invals(),
[eqn.params['cond_nconsts'], eqn.params['body_nconsts']],
)
out = lax.while_loop(
lambda args: _interpret(
eqn.params['cond_jaxpr'].jaxpr, *cond_consts, *args)[0],
lambda args: _interpret(
eqn.params['body_jaxpr'].jaxpr, *body_consts, *args),
init_vals)
elif prim is pjit.jit_p:
def f(*args, jaxpr):
return _interpret(jaxpr.jaxpr, *jaxpr.consts, *args)
invals = deferred_invals()
args_ft = FlatTree.flatten((invals, {}))
avals_ft = args_ft.map(jax_core.shaped_abstractify)
new_jaxpr, _ = pe.trace_to_jaxpr(
functools.partial(f, jaxpr=eqn.params['jaxpr']), avals_ft,
eqn.params['jaxpr'].jaxpr.debug_info)
out = pjit.jit_p.bind(*invals, **(eqn.params | {'jaxpr': new_jaxpr}))
elif prim is primitives.run_scoped_p:
if eqn.params['collective_axes']:
raise NotImplementedError(
'run_scoped_p with collective axes is not supported'
)
# Allocate a buffer or semaphore for each element of
# eqn.params['jaxpr'].invars. It is assumed that each core
# runs the same sequence of `run_scoped`s.
allocs = []
for v in eqn.params['jaxpr'].invars:
if v.aval.memory_space is _SEMAPHORE:
allocs.append(
callback.io_callback(
_allocate_semaphores,
jax.ShapeDtypeStruct(v.aval.shape, jnp.int16),
device_id,
local_core_id,
v.aval.shape,
ordered=True,
)
)
else:
if not interpret_params.allow_hbm_allocation_in_run_scoped:
memory_space = _get_memory_space_and_raise_if_hbm(
v.aval, 'run_scoped_p', "Cannot allocate HBM in `run_scoped`."
)
else:
memory_space = _forward_any_to_hbm(v.aval.memory_space)
allocs.append(
callback.io_callback(
_allocate_buffer,
jax.ShapeDtypeStruct((), jnp.int16),
device_id,
local_core_id,
TPU_MEMORY_SPACE_IDXS[memory_space],
interpret_params.get_uninitialized_array(
v.aval.shape, v.aval.dtype
),
ordered=True,
)
)
out = _interpret(eqn.params['jaxpr'], *deferred_invals(), *allocs)
for a, v in zip(allocs, eqn.params['jaxpr'].invars):
if v.aval.memory_space is _SEMAPHORE:
# TODO(jburnim): De-allocate semaphores.
# callback.io_callback(
# _deallocate_semaphores,
# None,
# device_id,
# a,
# ordered=True)
pass
else:
callback.io_callback(
_deallocate_buffer,
None,
device_id,
local_core_id,
# An exception would have been raised before `_allocate_buffer`
# above if `memory_space` were HBM (i.e. either `pltpu.HBM` or
# `pl.ANY`) and if this was disallowed by `interpret_params`.
TPU_MEMORY_SPACE_IDXS[_forward_any_to_hbm(v.aval.memory_space)],
a,
ordered=True,
)
elif prim is state_primitives.get_p:
memory_space = _get_memory_space_and_raise_if_hbm(
eqn.invars[0].aval, 'get_p'
)
invals = deferred_invals()
out = callback.io_callback(
functools.partial(get, source_info=eqn.source_info),
eqn.outvars[0].aval,
device_id,
local_core_id,
TPU_MEMORY_SPACE_IDXS[memory_space],
invals[0],
jax.tree.unflatten(eqn.params['tree'], invals[1:]),
ordered=True,
)
elif prim is state_primitives.swap_p:
memory_space = _get_memory_space_and_raise_if_hbm(
eqn.invars[0].aval, 'swap_p'
)
invals = deferred_invals()
out = callback.io_callback(
functools.partial(swap, source_info=eqn.source_info),
eqn.outvars[0].aval,
device_id,
local_core_id,
TPU_MEMORY_SPACE_IDXS[memory_space],
invals[0],
jax.tree.unflatten(eqn.params['tree'], invals[2:]),
invals[1],
None,
ordered=True,
)
elif prim is mosaic_primitives.dma_start_p:
(
src,
src_transforms,
dst,
dst_transforms,
dst_sem,
dst_sem_transforms,
src_sem,
src_sem_transforms,
target_device_id,
) = jax.tree.unflatten(eqn.params['tree'], deferred_invals())
target_device_id = interpret_utils._device_id_to_logical(
target_device_id, eqn.params['device_id_type'], axis_sizes,
axis_indices)
(orig_src_ref, _, orig_dst_ref, *_
) = jax.tree.unflatten(eqn.params['tree'], eqn.invars)
src_memory_space = _forward_any_to_hbm(
getattr(orig_src_ref.aval, 'memory_space', None)
)
if src_memory_space is None:
# This is brittle. There are examples where a ref with memory_space
# set to `None` appears as one of the `constvars` of a `run_scoped`,
# and the corresponding input to the `run_scoped` is a buffer in VMEM
# (and not in HBM).
#
# Note that pairing the buffer id, i.e. `src`, here with an incorrect
# memory space will result in a (very visible) `KeyError` for now.
# (This is because the buffer id alone suffices to uniquely identify
# the buffer held by the `SharedMemory` object. The memory space can
# be considered merely additional information (useful for debugging)
# that is added to the key that the `SharedMemory` object uses
# internally to look up a buffer.)
#
# TODO(nrink): It would be more robust if the buffer id, i.e. `src`,
# did already encode enough information to identify the correct
# buffer, without the need to explicitly pass the memory space to the
# `dma_start` callback below.
src_memory_space = mosaic_core.MemorySpace.HBM
dst_memory_space = _forward_any_to_hbm(
getattr(orig_dst_ref.aval, 'memory_space', None)
)
if dst_memory_space is None:
# TODO(nrink): See comment for `src_memory_space` above.
dst_memory_space = mosaic_core.MemorySpace.HBM
callback.io_callback(
functools.partial(dma_start, source_info=eqn.source_info),
(),
device_id,
local_core_id,
TPU_MEMORY_SPACE_IDXS[src_memory_space],
src,
src_transforms,
TPU_MEMORY_SPACE_IDXS[dst_memory_space],
dst,
dst_transforms,
state_discharge.transform_array(dst_sem, dst_sem_transforms),
state_discharge.transform_array(src_sem, src_sem_transforms),
target_device_id,
ordered=True,
)
out = []
elif prim is mosaic_primitives.dma_wait_p:
(
src,
src_transforms,
dst,
dst_transforms,
dst_sem,
dst_sem_transforms,
src_sem,
src_sem_transforms,
target_device_id,
) = jax.tree.unflatten(eqn.params['tree'], deferred_invals())
src_ref_aval = state.transform_type(src_transforms, eqn.invars[0].aval)
assert isinstance(src_ref_aval, state.AbstractRef)
read_shape = src_ref_aval.shape
read_dtype = src_ref_aval.dtype
callback.io_callback(
dma_wait,
(),
device_id,
local_core_id,
state_discharge.transform_array(dst_sem, dst_sem_transforms),
math.prod(read_shape) * read_dtype.itemsize,
ordered=True,
)
out = []
elif prim is mosaic_primitives.get_barrier_semaphore_p:
out = callback.io_callback(
get_barrier_semaphore,
jax.ShapeDtypeStruct((), jnp.int16),
device_id,
mosaic_params.collective_id,
ordered=True,
)
elif prim is primitives.semaphore_signal_p:
sem, sem_transforms, inc, target_device_id, core_index = (
jax.tree.unflatten(eqn.params['args_tree'], deferred_invals()))
target_device_id = interpret_utils._device_id_to_logical(
target_device_id, eqn.params['device_id_type'], axis_sizes,
axis_indices)
callback.io_callback(
semaphore_signal,
(),
device_id,
local_core_id,
state_discharge.transform_array(sem, sem_transforms),
inc,
target_device_id,
core_index,
ordered=True,
)
out = []
elif prim is primitives.semaphore_wait_p:
sem, sem_transforms, value, decrement = (
jax.tree.unflatten(eqn.params['args_tree'], deferred_invals()))
if not decrement:
raise NotImplementedError('Non-decrementing wait is not supported.')
callback.io_callback(
semaphore_wait,
(),
device_id,
local_core_id,
state_discharge.transform_array(sem, sem_transforms),
value,
ordered=True,
)
out = []
elif prim is primitives.atomic_rmw_p:
raise NotImplementedError('atomic_rmw_p')
elif prim is primitives.atomic_cas_p:
raise NotImplementedError('atomic_cas_p')
else:
if interpret_params.skip_floating_point_ops and all(
interpret_utils.is_float(ovar.aval.dtype) for ovar in eqn.outvars
):
# Skip `prim.bind` since `prim` only produces floating-point values.
# It is safe to populate `out` with avals since mapping `write` over
# `out` below only relies on the shape and dtype (for writing
# `Placeholder`s).
out = [ovar.aval for ovar in eqn.outvars]
if not prim.multiple_results:
out = out[0]
else:
subfuns, bind_params = eqn.primitive.get_bind_params(eqn.params)
out = prim.bind(*subfuns, *deferred_invals(), **bind_params)
out = out if prim.multiple_results else [out]
env.write_many(eqn.outvars, out)
return env.read_many(jaxpr.outvars)
def _compute_start_indices(
block_mapping, loop_idx, *args,
axis_sizes, mesh, axis_indices, device_id, local_core_id,
mosaic_params, interpret_params):
jaxpr = block_mapping.index_map_jaxpr
block_indices = _interpret_jaxpr(
jaxpr.jaxpr,
*jaxpr.consts,
*loop_idx,
*args,
axis_sizes=axis_sizes,
mesh=mesh,
axis_indices=axis_indices,
device_id=device_id,
local_core_id=local_core_id,
mosaic_params=mosaic_params,
interpret_params=interpret_params,
)
def _get_start_index(i, b):
match b:
case pallas_core.Squeezed():
return i
case pallas_core.Element():
return i
case pallas_core.Blocked():
return i * b.block_size
case _:
raise ValueError(f"Unsupported block dim type: {type(b)}")
ret = jnp.array(
tuple(
_get_start_index(i, b)
for i, b in zip(block_indices, block_mapping.block_shape)
),
dtype=jnp.int32,
)
return block_indices, ret
def _get_parallel_dim_semantics(
mosaic_params: mosaic_core.CompilerParams, num_dimensions_in_grid: int,
) -> tuple[bool, ...]:
"""Returns a tuple indicating which grid dimensions have parallel semantics.
Args:
mosaic_params: The compiler params for the Mosaic TPU backend.
num_dimensions_in_grid: The number of dimensions in the grid.
Returns:
A tuple of booleans where the entry at index `i` is `True` precisely if the
`i`-th dimension in the grid has parallel semantics.
Raises:
ValueError: If the dimensions with parallel semantics do not form a prefix
of the grid.
"""
if mosaic_params.dimension_semantics is None:
return (False,) * num_dimensions_in_grid
result = tuple(ds in ('parallel', mosaic_core.PARALLEL)
for ds in mosaic_params.dimension_semantics)
for ds0, ds1 in zip(result[:-1], result[1:]):
if ds1 and not ds0:
raise ValueError(
'Dimensions with parallel semantics must form a prefix of the grid.'
)
return result
def _get_parallel_subgrid_size(
parallel_semantics_per_dim: tuple[bool, ...], grid: tuple[int, ...]
) -> int:
"""Returns the size of the subgrid along the parallel dimensions."""
return math.prod(
dim_size if parallel_dim else 1
for dim_size, parallel_dim in zip(grid, parallel_semantics_per_dim)
)
_GridPointCoordinatesPerDim = tuple[Array, ...]
def _get_randomized_grid_coordinates(
grid: tuple[int, ...],
mosaic_params: mosaic_core.CompilerParams,
random_seed: int | None,
) -> _GridPointCoordinatesPerDim:
"""Returns a tuple of randomized coordinates for each 'parallel' dimension in `grid`.
For a dimension with 'parallel' semantics at position `d` in the grid, the
returned tuple contains a random permutation of the sequence `[0,...,
grid[d] - 1]` at index `d`. For each dimension with 'arbitrary' semantics,
the resulting tuple contains an empty array. (Inserting an empty array for an
'arbitrary' dimension at position `d` in the grid, instead of the sequence
`[0,..., grid[d] - 1]`, allows `grid[d]` to be a dynamic value, i.e. a value
not known at Jax trace time.)
Args:
grid: Tuple of sizes of the dimensions in the grid.
mosaic_params: The compiler params for the Mosaic TPU backend.
parallel_semantics_per_dim: A tuple of booleans indicating whether the
corresponding dimension in the grid has parallel semantics.
random_seed: The seed to use for randomizing coordinates in parallel
dimensions.
"""
parallel_semantics_per_dim = _get_parallel_dim_semantics(
mosaic_params, len(grid)
)
key = jax.random.key(random_seed or 0)
grid_point_coordinates = []
for dim_size, parallel_dim in zip(grid, parallel_semantics_per_dim):
if parallel_dim:
# The size of a dimension with `parallel` semantics must be known at Jax
# trace time. This ensures that the arguments to `jnp.arange` and
# `jax.random.permutation` below are valid.
dim_size = jax_core.concrete_or_error(None, dim_size)
coordindates_along_dim = jnp.arange(dim_size, dtype=jnp.int32)
key, subkey = jax.random.split(key)
coordindates_along_dim = jax.random.permutation(
subkey, coordindates_along_dim
)
grid_point_coordinates.append(coordindates_along_dim)
else:
grid_point_coordinates.append(jnp.array((), dtype=jnp.int32))
return tuple(grid_point_coordinates)
# TODO(sharadmv, jburnim): add support for memory space constraints
remove_memory_space_p = jax_core.Primitive('remove_memory_space')
@remove_memory_space_p.def_abstract_eval
def _remove_memory_space_abstract_eval(x):
if isinstance(x, pallas_core.ShapedArrayWithMemorySpace):
if (
x.memory_space is None
or x.memory_space is pallas_core.MemorySpace.ANY
or x.memory_space is mosaic_core.MemorySpace.HBM
):
return jax_core.ShapedArray(x.shape, x.dtype)
raise NotImplementedError(f'Unsupported memory space: {x.memory_space}')
return x
@remove_memory_space_p.def_impl
def _remove_memory_space_impl(x):
return x
def _remove_memory_space_lowering(_, x):
return [x]
mlir.register_lowering(remove_memory_space_p, _remove_memory_space_lowering)
def _get_grid_point(
loop_indices: tuple[Array, ...],
grid_point_coordinates: _GridPointCoordinatesPerDim,
) -> Array:
"""Indexes each entry in `grid_point_coordinates` with the corresponding entry in `loop_indices`.
If an entry in `grid_point_coordinates` is an empty array, the corresponding
entry in the returned array is the corresponding entry in `loop_indices`.
Otherwise, the returned array contains the entry in `grid_point_coordinates`
indexed with the corresponding entry in `loop_indices`.
Args:
loop_indices: A tuple of loop indices.
grid_point_coordinates: A tuple of coordinate arrays for each dimension in
the grid. Dimensions with 'arbitrary' semantics are represented by empty
arrays. Dimensions with 'parallel' semantics are represented by arrays of
randomized coordinates.
Returns:
A 1-dimensional array containing the coordinates for the grid point
corresponding to the specified `loop_indices`.
"""
grid_point = []
for li, coords in zip(loop_indices, grid_point_coordinates):
grid_point.append(li if jnp.size(coords) == 0 else coords[li])
return jnp.array(grid_point, dtype=np.int32)
def get_interpret_effects():
return {callback._OrderedIOEffect}
def interpret_pallas_call(
*args,
jaxpr: jax_core.Jaxpr,
debug: bool,
input_output_aliases: tuple[tuple[int, int], ...],
grid_mapping: pallas_core.GridMapping,
mesh: pallas_core.Mesh | None,
compiler_params: pallas_core.CompilerParams | None,
cost_estimate: pallas_core.CostEstimate,
out_avals: tuple[jax_core.AbstractValue, ...],
interpret_params: InterpretParams,
metadata: frozen_dict.FrozenDict[str, str] | None,
name: str | None,
):
del debug, cost_estimate, out_avals, name
del metadata # TODO(sharadmv): Add metadata to HLO.
if isinstance(mesh, mosaic_core.TensorCoreMesh):
# As a convenience for users, if we are interpreting a pl.core_map over a
# TensorCoreMesh, we automatically set the number of cores per device so
# that users don't have to specify it in the InterpretParams.
assert len(mesh.shape) == 1
interpret_params = dataclasses.replace(
interpret_params, num_cores_or_threads=mesh.devices.shape[0]
)
if compiler_params is None:
mosaic_params = mosaic_core.CompilerParams()
else:
assert isinstance(compiler_params, mosaic_core.CompilerParams)
mosaic_params = compiler_params # type: ignore[assignment]
del compiler_params
args = [remove_memory_space_p.bind(a) for a in args]
# args contains: *dynamic_grid_sizes, *index, *inputs. (No consts?)
dynamic_grid_args, scalars, input_args = split_list(
args,
[grid_mapping.num_dynamic_grid_bounds, grid_mapping.num_index_operands],
)
dynamic_grid_args_iter = iter(dynamic_grid_args)
grid = tuple(
a if a is not pallas_core.dynamic_grid_dim
else next(dynamic_grid_args_iter)
for a in grid_mapping.grid
)
assert next(dynamic_grid_args_iter, None) is None
axis_sizes = jax_core.get_axis_env().axis_sizes
num_devices = functools.reduce(
jnp.multiply, axis_sizes.values(), jnp.int32(1))
axis_indices = {k: lax.axis_index(k) for k in axis_sizes.keys()}
device_id = interpret_utils.device_coords_to_logical_id(
tuple(axis_indices.values()), axis_sizes, axis_indices
)
callback.io_callback(
functools.partial(
_initialize_shared_memory, interpret_params=interpret_params
),
(),
device_id,
num_devices,
interpret_params.num_cores_per_device,
ordered=True,
)
# Pad input arguments.
is_squeeze_dim = [
tuple(isinstance(b, pallas_core.Squeezed) for b in bm.block_shape)
for bm in grid_mapping.block_mappings
]
block_shapes = [
pallas_core._get_block_shape(bm.block_shape)
for bm in grid_mapping.block_mappings
]
num_inputs = grid_mapping.num_inputs
input_args = [
interpret_params.pad_to_block_dimension(a, bs)
for a, bs in zip(input_args, block_shapes[:num_inputs])
]
# Allocate HBM buffers for pallas_call inputs.
#
# TODO(jburnim): As an optimization, skip allocating buffers for inputs that
# are neither aliased nor passed to the kernel in HBM?
input_buffer_ids = []
for i, var in enumerate(
jaxpr.invars[grid_mapping.num_index_operands:][:grid_mapping.num_inputs]):
assert var.aval.dtype == input_args[i].dtype # pyrefly: ignore[missing-attribute]
input_buffer_ids.append(
callback.io_callback(
_allocate_buffer,
jax.ShapeDtypeStruct((), jnp.int16),
device_id,
None, # local_core_id
TPU_MEMORY_SPACE_IDXS[mosaic_core.MemorySpace.HBM],
input_args[i],
ordered=True,
)
)
# Allocate buffers in HBM for pallas_call outputs.
oi_alias_map = {v: k - len(scalars) for k, v in input_output_aliases}
if any(i < 0 for i in oi_alias_map.keys()):
raise ValueError('Aliasing of scalar prefetch arguments is not currently '
'supported in TPU interpret mode.')
output_buffer_ids = []
output_buffer_shapes = []
output_vals = []
num_outputs = grid_mapping.num_outputs
output_block_shapes = block_shapes[num_inputs : num_inputs + num_outputs]
for i, bm in enumerate(grid_mapping.block_mappings_output):
if i in oi_alias_map:
# Reuse the HBM buffer for the aliased pallas_call input.
output_buffer_ids.append(input_buffer_ids[oi_alias_map[i]])
output_buffer_shapes.append(input_args[oi_alias_map[i]].shape)
output_vals.append(input_args[oi_alias_map[i]])
else:
out_val = interpret_params.get_uninitialized_array(
bm.array_aval.shape, bm.array_aval.dtype
)
padded_val = interpret_params.pad_to_block_dimension(
out_val, output_block_shapes[i]
)
output_buffer_ids.append(
callback.io_callback(
_allocate_buffer,
jax.ShapeDtypeStruct((), jnp.int16),
device_id,
None, # local_core_id
TPU_MEMORY_SPACE_IDXS[mosaic_core.MemorySpace.HBM],
padded_val,
ordered=True,
)
)
output_buffer_shapes.append(padded_val.shape)
output_vals.append(out_val)
# Allocate buffers for non-HBM kernel arguments (e.g., scalars, inputs,
# outputs, scratch).
scalar_buffer_ids = []
for var, val in zip(jaxpr.invars[grid_mapping.slice_index_ops], scalars):
assert var.aval.shape == val.shape
assert var.aval.dtype == val.dtype
scalar_buffer_ids.append(
callback.io_callback(
_allocate_buffer,
jax.ShapeDtypeStruct((), jnp.int16),
device_id,
None, # local_core_id,
TPU_MEMORY_SPACE_IDXS[mosaic_core.MemorySpace.SMEM],
val,
ordered=True,
)
)
kernel_buffer_ids = scalar_buffer_ids.copy()
for i, var in enumerate(jaxpr.invars[grid_mapping.num_index_operands:]):
output_idx = i - grid_mapping.num_inputs
is_input = i < grid_mapping.num_inputs
is_output = (output_idx >= 0) and (output_idx < grid_mapping.num_outputs)
aval = var.aval
memory_space = _forward_any_to_hbm(aval.memory_space) # pyrefly: ignore[missing-attribute]
if memory_space is _SEMAPHORE:
kernel_buffer_ids.append(
callback.io_callback(
_allocate_semaphores,
jax.ShapeDtypeStruct(aval.shape, jnp.int16), # pyrefly: ignore[missing-attribute]
device_id,
None, # local_core_id
aval.shape, # pyrefly: ignore[missing-attribute]
ordered=True,
)
)
elif memory_space is _HBM:
# Use the already-allocated HBM input or output buffer.
#
# TODO(jburnim): For kernel args in HBM, check that block shape equals the
# shape of the corresponding pallas_call input, and that the index_map
# is trivial.
assert is_input ^ is_output
if is_input:
kernel_buffer_ids.append(input_buffer_ids[i])
if is_output:
kernel_buffer_ids.append(output_buffer_ids[output_idx])
else:
kernel_buffer_ids.append(
callback.io_callback(
_allocate_buffer,
jax.ShapeDtypeStruct((), jnp.int16),
device_id,
None, # local_core_id,
TPU_MEMORY_SPACE_IDXS[memory_space],
interpret_params.get_uninitialized_array(
var.aval.shape, var.aval.dtype # pyrefly: ignore[missing-attribute]
),
ordered=True,
)
)
if mosaic_params.collective_id is None:
# The kernel doesn't specify its own barrier semaphore, so we do a global
# barrier before running the first iteration of the kernel.
callback.io_callback(_barrier, (), device_id, ordered=True)
_, input_ids, kernel_output_ids, _ = split_list(
kernel_buffer_ids,
[grid_mapping.num_index_operands, num_inputs, grid_mapping.num_outputs])
input_vars, output_vars = split_list(
jaxpr.invars[grid_mapping.slice_block_ops], [num_inputs])
if grid:
num_iterations = functools.reduce(jnp.multiply, grid) # type: ignore[arg-type]
else:
# Base case is always one iteration when grid is ()
num_iterations = 1
if isinstance(mesh, mosaic_core.TensorCoreMesh):
# We are interpreting a pl.core_map over a TensorCoreMesh, so we use a
# fixed division of the grid between cores, instead of a random division.
randomized_grid_coordinates = (jnp.array((), dtype=jnp.int32),) * len(grid)
else:
randomized_grid_coordinates = _get_randomized_grid_coordinates(
grid, mosaic_params, interpret_params.random_seed # type: ignore[arg-type]
)
parallel_dim_semantics = _get_parallel_dim_semantics(
mosaic_params, len(grid)
)
parallel_subgrid_size = _get_parallel_subgrid_size(
parallel_dim_semantics, grid # type: ignore[arg-type]
)
num_points_in_parallel_subgrid_per_core = (
parallel_subgrid_size + interpret_params.num_cores_per_device - 1
) // interpret_params.num_cores_per_device # We round up here.
num_iterations_per_point_in_parallel_subgrid = (
# This is evenly divisible.
num_iterations // parallel_subgrid_size # type: ignore[operator]
)
num_iterations_per_core = (
num_points_in_parallel_subgrid_per_core
* num_iterations_per_point_in_parallel_subgrid
)
def _get_local_grid_env(grid_point):
if grid_mapping.local_grid_env is not None:
return grid_mapping.local_grid_env(grid_point, grid)
else:
return tuple(
pallas_core.GridAxis(idx, b) # pyrefly: ignore[bad-argument-type]
for dim, (idx, b) in enumerate(zip(grid_point, grid))
if dim not in grid_mapping.vmapped_dims
)
def _execute_grid_for_core(core_index):
# NOTE: We assume here that all parallel dimensions appear before all
# arbitrary dimensions in the grid. (We will have raised an error earlier
# if this is not the case.)
#
# TODO(jburnim): Are we overusing nested local functions here?
initial_iteration_idx = core_index * num_iterations_per_core
loop_bound = jnp.minimum(
(core_index + 1) * num_iterations_per_core, num_iterations)
def _body(
carry: tuple[
jnp.int32,
tuple[jnp.int32, ...],
jnp.ndarray,
tuple[jnp.ndarray, ...],
tuple[jnp.ndarray, ...],
tuple[jnp.ndarray, ...],
],
) -> tuple[
jnp.int32,
tuple[jnp.int32, ...],
jnp.ndarray,
tuple[jnp.ndarray, ...],
tuple[jnp.ndarray, ...],
tuple[jnp.ndarray, ...],
]:
"""Performs one execution of the kernel body.
Execution of `jaxpr` is preceded by reading kernel input buffers and
followed by writing kernel output buffers.
Args:
carry: (iteration_idx, loop_idx, grid_point, prev_start_indices,
cur_start_indices).
- iteration_idx: the iteration index.
- loop_idx: internal indices for looping over the grid.
- grid_point: the current positions along all axes of the grid.
- prev_start_indices: a rank-1 array that contains the start indices
for the slices of inputs and outputs processed in the previous loop
iteration.
- cur_start_indices: a rank-1 array that contains the start indices
for the slices of inputs and outputs processed in the current loop
iteration.
Note that by carrying the previous *and* current start indices between
loop iterations, it suffices to compute only one list of start indices,
i.e. `next_start_indices` (see below), per iteration.
Returns:
The carry for the next iteration.
"""
(
iteration_idx,
loop_idx,
grid_point,
prev_start_indices,
cur_block_indices,
cur_start_indices,
) = carry
if interpret_params.grid_point_recorder is not None:
callback.io_callback(
interpret_params.grid_point_recorder,
(),
grid_point,
core_index,
)
with pallas_core.grid_env(_get_local_grid_env(grid_point)):
next_loop_idx = interpret_utils.get_next_indices(grid, loop_idx)
next_grid_point = _get_grid_point(
next_loop_idx, randomized_grid_coordinates
)
next_block_indices, next_start_indices = zip(*[
_compute_start_indices(
bm,
next_grid_point,
*scalar_buffer_ids,
axis_sizes=axis_sizes,
mesh=mesh,
axis_indices=axis_indices,
device_id=device_id,
local_core_id=core_index,
mosaic_params=mosaic_params,
interpret_params=interpret_params,
)
for bm in grid_mapping.block_mappings
])
if jaxpr.debug_info.arg_names is not None:
input_names, output_names = split_list(
jaxpr.debug_info.arg_names[grid_mapping.slice_block_ops], [num_inputs])
else:
input_names = ["unknown",] * grid_mapping.num_inputs
output_names = ["unknown",] * grid_mapping.num_outputs
# Copy slices of the input to the kernel buffers.
def _store_slice_to_kernel_input(index, input_var):
# Copy from the HBM buffer for the pallas_call input to the kernel
# input buffer.
# TODO(jburnim): Just use input_args[j] when the input is not aliased?
transform = indexing.NDIndexer(
indices=tuple(
indexing.Slice(st, sz) if not iid else st
for st, sz, iid in zip(
cur_start_indices[index],
block_shapes[index],
is_squeeze_dim[index],
)
),
shape=input_args[index].shape,
int_indexer_shape=(),
)
sliced_val = callback.io_callback(
# TODO(jburnim): Pass source_info from the pallas_call, in case this
# read is involved in a data race.
functools.partial(get, input_name=input_names[index]),
jax.ShapeDtypeStruct(input_var.aval.shape, input_var.aval.dtype),
device_id,
core_index,
TPU_MEMORY_SPACE_IDXS[mosaic_core.MemorySpace.HBM],
input_buffer_ids[index],
(transform,),
cur_block_indices[index],
grid_point,
ordered=True,
)
callback.io_callback(
# TODO(jburnim): Pass source_info from the pallas_call, in case this
# store is involved in a data race.
store,
(),
device_id,
core_index,
TPU_MEMORY_SPACE_IDXS[
_forward_any_to_hbm(input_var.aval.memory_space)
],
input_ids[index],
(),
sliced_val,
ordered=True,
)
for j, var in enumerate(input_vars):
if _forward_any_to_hbm(var.aval.memory_space) is _HBM:
if var.aval.shape != block_shapes[j]:
raise ValueError(
f'Kernel input {j} in HBM but does not have trivial'
' BlockSpec.'
)
continue
assert len(cur_start_indices[j].shape) == 1
assert len(prev_start_indices[j].shape) == 1
jax.lax.cond(
(iteration_idx == initial_iteration_idx)
| jax.lax.reduce_or(
cur_start_indices[j] != prev_start_indices[j], axes=(0,)
),
functools.partial(_store_slice_to_kernel_input, j, var),
lambda: None,
)
# Invoke the kernel.
_interpret_jaxpr(
jaxpr,
*kernel_buffer_ids,
axis_sizes=axis_sizes,
mesh=mesh,
axis_indices=axis_indices,
device_id=device_id,
local_core_id=core_index,
mosaic_params=mosaic_params,
interpret_params=interpret_params,
)
# Copy from the kernel buffers to slices of the output in HBM.
def _store_to_output_buffer(index, output_var, transform):
kernel_output_val = callback.io_callback(
# TODO(jburnim): Pass source_info from the pallas_call, in case this
# get is involved in a data race.
get,
output_var.aval,
device_id,
core_index,
TPU_MEMORY_SPACE_IDXS[
_forward_any_to_hbm(output_var.aval.memory_space)
],
kernel_output_ids[index],
(),
ordered=True,
)
callback.io_callback(
# TODO(jburnim): Pass source_info from the pallas_call, in case this
# store is involved in a data race.
functools.partial(store, output_name=output_names[index]),
(),
device_id,
core_index,
TPU_MEMORY_SPACE_IDXS[mosaic_core.MemorySpace.HBM],
output_buffer_ids[index],
(transform,),
kernel_output_val,
cur_block_indices[num_inputs + index],
grid_point,
ordered=True,
)
output_slices : list[Any] = []
for j, var in enumerate(output_vars):
if _forward_any_to_hbm(var.aval.memory_space) is _HBM:
if var.aval.shape != block_shapes[num_inputs + j]:
raise ValueError(
f'Kernel output {j} in HBM but does not have trivial'
' BlockSpec.'
)
output_slices.append(None)
continue
assert len(cur_start_indices[num_inputs + j].shape) == 1
assert len(next_start_indices[num_inputs + j].shape) == 1
transform = indexing.NDIndexer(
indices=tuple(
indexing.ds(st, sz) if not iid else st # type: ignore[misc]
for st, sz, iid in zip(
cur_start_indices[num_inputs + j],
block_shapes[num_inputs + j],
is_squeeze_dim[num_inputs + j],
)
),
shape=output_vals[j].shape,
int_indexer_shape=(),
)
if j in oi_alias_map:
# Suppress revisiting check for output buffers that are aliased to
# input buffers.
output_slices.append(None)
else:
output_slices.append((transform,))
jax.lax.cond(
(iteration_idx + 1 == loop_bound)
| jax.lax.reduce_or(
cur_start_indices[num_inputs + j]
!= next_start_indices[num_inputs + j],
axes=(0,),
),
functools.partial(_store_to_output_buffer, j, var, transform),
lambda: None,
)
callback.io_callback(
_check_for_revisiting,
(),
device_id,
core_index,
loop_idx,
output_slices,
ordered=True,
)
return (
iteration_idx + 1,
next_loop_idx,
next_grid_point,
cur_start_indices,
tuple(next_block_indices),
tuple(next_start_indices),
)
initial_loop_idx = interpret_utils.get_indices(grid, initial_iteration_idx)
initial_grid_point = _get_grid_point(
initial_loop_idx, randomized_grid_coordinates)
with pallas_core.grid_env(_get_local_grid_env(initial_grid_point)):
initial_block_indices, initial_start_indices = zip(*[
_compute_start_indices(
bm,
initial_grid_point,
*scalar_buffer_ids,
axis_sizes=axis_sizes,
mesh=mesh,
axis_indices=axis_indices,
device_id=device_id,
local_core_id=core_index,
mosaic_params=mosaic_params,
interpret_params=interpret_params,
)
for bm in grid_mapping.block_mappings
])
_ = lax.while_loop(
lambda carry: carry[0] < loop_bound,
_body,
(
initial_iteration_idx,
initial_loop_idx,
initial_grid_point,
initial_start_indices, # Previous start indices are ignored on the first iteration.
initial_block_indices,
initial_start_indices,
),
)
# TODO(jburnim): Should we only create happens-before here from core 0 to
# the other cores?
callback.io_callback(
_update_clocks_for_device_barrier, (), device_id, ordered=True
)
thread_map(_execute_grid_for_core, interpret_params.num_cores_per_device)
# TODO(jburnim): Should we only create happens-before here from the other
# # cores to core 0?
callback.io_callback(
_update_clocks_for_device_barrier, (), device_id, ordered=True
)
# Read the output from the allocated output buffers.
ret = [
callback.io_callback(
# TODO(jburnim): Pass source_info from the pallas_call, in case this
# get is involved in a data race.
get,
val,
device_id,
0, # local_core_id
TPU_MEMORY_SPACE_IDXS[mosaic_core.MemorySpace.HBM],
output_buffer_id,
(
indexing.NDIndexer.from_indices_shape(
tuple(indexing.ds(0, s) for s in val.shape),
output_buffer_shape,
),
),
ordered=True,
)
for val, output_buffer_id, output_buffer_shape in zip(
output_vals, output_buffer_ids, output_buffer_shapes
)
]
callback.io_callback(_validate, (), device_id, ordered=True)
# For now, when we're done with a pallas_call, we delete the shared memory.
# We use a barrier to ensure that all devices are done running the kernel.
#
# TODO(jburnim): Get rid of this barrier. And figure out how this should
# work if we want to invoke successive pallas_calls that use the same
# shared memory.
callback.io_callback(
_clean_up_shared_memory, (), device_id, ordered=True
)
return ret
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic/interpret/interpret_pallas_call.py",
"license": "Apache License 2.0",
"lines": 2036,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic/interpret/race_detection_state.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import dataclasses
import itertools
import threading
from jax._src import source_info_util
from jax._src.pallas.mosaic.interpret import vector_clock as vc
def _is_empty_slice(slice_or_idx: slice | int):
if isinstance(slice_or_idx, int) or (slice_or_idx == slice(None)):
return False
# NOTE: All slices here will have known size.
start = int(slice_or_idx.start) if slice_or_idx.start is not None else 0
stop = int(slice_or_idx.stop)
return start < stop
def _slices_overlap(slice_or_idx1: slice | int, slice_or_idx2: slice | int):
if isinstance(slice_or_idx1, int):
slice_or_idx1 = slice(slice_or_idx1, slice_or_idx1 + 1)
if isinstance(slice_or_idx2, int):
slice_or_idx2 = slice(slice_or_idx2, slice_or_idx2 + 1)
if slice_or_idx1 == slice(None):
return _is_empty_slice(slice_or_idx2)
if slice_or_idx2 == slice(None):
return _is_empty_slice(slice_or_idx1)
# TODO(jburnim): Handle non-zero steps.
assert (slice_or_idx1.step == 1) or (slice_or_idx1.step is None)
assert (slice_or_idx2.step == 1) or (slice_or_idx2.step is None)
assert slice_or_idx1.start is not None
assert slice_or_idx1.stop is not None
assert slice_or_idx2.start is not None
assert slice_or_idx2.stop is not None
# NOTE: We are only comparing slices with known stops (and sizes).
# Do we need to handle zero-length slices?
return (slice_or_idx1.start <= slice_or_idx2.start < slice_or_idx1.stop) | (
slice_or_idx2.start <= slice_or_idx1.start < slice_or_idx2.stop
)
def _ranges_overlap(
range1: tuple[slice | int, ...], range2: tuple[slice | int, ...]
) -> bool:
return all(
_slices_overlap(r1, r2)
for r1, r2 in itertools.zip_longest(range1, range2, fillvalue=slice(None))
)
@dataclasses.dataclass
class RaceDetectionState:
num_cores: int
# (memory_space, buffer_id, device_id, local_core_id) -> [(device_id, local_core_id, VectorClock, range)]
reads: dict = dataclasses.field(
default_factory=lambda: collections.defaultdict(list)
)
# (memory_space, buffer_id, device_id, local_core_id) -> [(device_id, local_core_id, VectorClock, range)]
writes: dict = dataclasses.field(
default_factory=lambda: collections.defaultdict(list)
)
lock: threading.Lock = dataclasses.field(default_factory=threading.Lock)
races_found: bool = False
def check_read(
self, device_id, local_core_id, clock, buffer_key, rnge, source_info=None
):
if source_info is not None:
user_frame = source_info_util.summarize(source_info)
else:
user_frame = 'pallas_call'
with self.lock:
writes = self.writes[buffer_key]
num_writes = len(writes)
self.reads[buffer_key].append(
(device_id, local_core_id, clock, rnge, user_frame)
)
for i in range(num_writes):
(
write_device_id,
write_local_core_id,
write_clock,
write_range,
write_frame,
) = writes[i]
if vc.ordered(write_clock, clock):
continue
if not _ranges_overlap(rnge, write_range):
continue
# TODO(jburnim): When printing device IDs for reads/writes, distinguish
# between real device IDs vs. DMA IDs.
print(
f'RACE DETECTED\n read of {buffer_key}[{rnge}] from {device_id},'
f' {local_core_id}, {user_frame}\n clock: {clock}\n write of'
f' {buffer_key}[{write_range}] from {write_device_id},'
f' {write_local_core_id} {write_frame}\n clock: {write_clock}\n'
)
with self.lock:
self.races_found = True
return
def check_write(
self, device_id, local_core_id, clock, buffer_key, rnge, source_info=None
):
if source_info is not None:
user_frame = source_info_util.summarize(source_info)
else:
user_frame = 'pallas_call'
with self.lock:
writes = self.writes[buffer_key]
reads = self.reads[buffer_key]
num_writes = len(writes)
num_reads = len(reads)
self.writes[buffer_key].append((device_id, local_core_id, clock, rnge, user_frame))
# TODO(jburnim): For performance, we should also probably remove any
# conflicting reads and writes that happened-before the current write.
for i in range(num_writes):
(
write_device_id,
write_local_core_id,
write_clock,
write_range,
write_frame,
) = writes[i]
if vc.ordered(write_clock, clock):
continue
if not _ranges_overlap(rnge, write_range):
continue
# TODO(jburnim): When printing device IDs for reads/writes, distinguish
# between real device IDs vs. DMA IDs.
print(
f'RACE DETECTED\n write of {buffer_key}[{rnge}] from {device_id},'
f' {local_core_id}, {user_frame}\n clock: {clock}\n write of'
f' {buffer_key}[{write_range}] from {write_device_id},'
f' {write_local_core_id}, {write_frame}\n clock: {write_clock}\n'
)
with self.lock:
self.races_found = True
break
for i in range(num_reads):
read_device_id, read_local_core_id, read_clock, read_range, read_frame = (
reads[i]
)
if vc.ordered(read_clock, clock):
continue
if not _ranges_overlap(rnge, read_range):
continue
# TODO(jburnim): When printing device IDs for reads/writes, distinguish
# between real device IDs vs. DMA IDs.
print(
f'RACE DETECTED\n write of {buffer_key}[{rnge}] from {device_id},'
f' {local_core_id}, {user_frame}\n clock: {clock}\n read of'
f' {buffer_key}[{read_range}] from {read_device_id},'
f' {read_local_core_id}, {read_frame}\n clock: {read_clock}\n'
)
with self.lock:
self.races_found = True
return
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic/interpret/race_detection_state.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic/interpret/shared_memory.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import collections
from collections.abc import Sequence
import dataclasses
import gc
import threading
from typing import Any, Callable, Literal
from jax._src.pallas.mosaic.interpret import vector_clock as vc
import numpy as np
class Semaphore:
def __init__(
self,
shared_memory: SharedMemory,
semaphore_id: int,
):
self.shared_memory = shared_memory
self.id: int = semaphore_id
# TODO(jburnim): Use one Condition variable per device. (Which will be
# easier to do when we're using single integer device IDs.)
self.cv = threading.Condition()
self.count_by_core = np.zeros(self.shared_memory.num_cores, dtype=np.int32)
if self.shared_memory.detect_races:
# We associate a vector clock with each count in self.counts. Whenever
# self.count_by_core[i] is signaled, self.clocks[i] is updated with the
# vector clock of the signaling core. Whenever core i successfully waits
# on self.count_by_core[i], the vector clock of core i is updated with
# self.clocks[i].
#
# TODO(jburnim): Model happens-before more precisely for the case where
# semaphores are over-signaled.
self.clocks: list[vc.VectorClock | None] = [
None
] * self.shared_memory.num_cores
@property
def num_cores(self) -> int:
return self.shared_memory.num_cores
@property
def detect_races(self) -> bool:
return self.shared_memory.detect_races
@property
def dma_execution_mode(self) -> str:
return self.shared_memory.dma_execution_mode
def get_global_core_id(self, device_id: int, local_core_id: int) -> int:
return self.shared_memory.get_global_core_id(device_id, local_core_id)
def signal(self, inc, global_core_id, clock):
"""Signal the semaphore on `(device_id, core_id)` by `inc`.
Args:
inc: A positive integer. The amount by which to increment the semaphore
on the target device.
global_core_id: The ID of the target core.
clock: The vector clock of the signaling device at the time of the signal.
"""
global_core_id = int(global_core_id)
with self.cv:
self.count_by_core[global_core_id] += inc
if self.detect_races:
if (global_clock := self.clocks[global_core_id]) is None:
self.clocks[global_core_id] = vc.copy_vector_clock(clock)
else:
vc.update_vector_clock(global_clock, clock)
self.cv.notify_all()
def read(self, global_core_id):
with self.cv:
return self.count_by_core[global_core_id]
def wait(self, value, global_core_id, *, has_tasks=False):
global_core_id = int(global_core_id)
# TODO(jburnim):
# - If the count is larger than value, raise an error?
# - If the count is equal to value, but there DMAs waiting to signal us,
# raise an error?
# Simple implementation for semaphores that have no tasks that can signal
# them.
clock = None
if not has_tasks:
with self.cv:
while self.count_by_core[global_core_id] < value:
self.cv.wait()
self.count_by_core[global_core_id] -= value
if self.detect_races:
assert self.clocks[global_core_id] is not None
clock = vc.copy_vector_clock(self.clocks[global_core_id])
if self.detect_races:
with self.shared_memory.lock:
assert clock is not None
vc.update_vector_clock(
self.shared_memory.clocks[global_core_id], clock
)
return
# TODO(nrink): Update the comment below to generalize from DMAs and DMA
# semaphores. We now have the concept of 'tasks' that can signal a
# semaphore. At the moment, DMAs are the only tasks that occur; and what is
# allowed to be a task may still change (because it should probably be more
# restricted than allowing tasks to be arbitrary callables, as is currently
# done).
#
# For DMA semaphores (when shared_memory.dma_execution_mode=='on_wait'),
# while our count is not large enough we will select and partially execute
# pending DMAs until our count is large enough.
#
# This approach will tend to run DMAs as late as possible, as well as
# out-of-order. This approach also lets us avoid the complexity of spinning
# up separate threads to handle executing DMAs.
while True:
clock = None
with self.cv:
if self.count_by_core[global_core_id] >= value:
self.count_by_core[global_core_id] -= value
if self.detect_races:
assert self.clocks[global_core_id] is not None
clock = vc.copy_vector_clock(self.clocks[global_core_id])
else:
return
if clock is not None:
with self.shared_memory.lock:
vc.update_vector_clock(
self.shared_memory.clocks[global_core_id], clock
)
return
with self.shared_memory.lock:
task_queue = self.shared_memory.tasks_by_sem[(self.id, global_core_id)]
if len(task_queue) > 0:
task = task_queue.pop()
else:
continue
task()
# A `SemaphoreTask` is called when a semaphore is waiting to be signalled on a
# specific core. A `SemaphoreTask` will typically capture the `Semaphore` object
# that is waiting, so that when the task is called, it can signal the semaphore
# (by calling `Semaphore.signal` from within the task). When a `SemaphoreTask`
# object is called, it can be assumed that the call stack of the task will
# *not* hold the lock on the shared memory in the captured `Semaphore` object.
# This allows the task to use methods from `SharedMemory` to access and modify
# the global shared memory object.
SemaphoreTask = Callable[[], None]
@dataclasses.dataclass(init=False)
class Allocation:
...
@dataclasses.dataclass
class Buffer(Allocation):
content: np.ndarray
_: dataclasses.KW_ONLY
ref_count: int = 1
def decrease_ref_count(self):
# We should never decrease the `ref_count` to below zero.
assert self.ref_count > 0
self.ref_count -= 1
def has_zero_ref_count(self) -> bool:
return self.ref_count == 0
def size(self) -> int:
return self.content.itemsize * self.content.size
@dataclasses.dataclass(frozen=True)
class ShapeAndDtype:
shape: Sequence[int]
dtype: np.dtype
def __iter__(self):
return iter((self.shape, self.dtype))
@dataclasses.dataclass
class SharedMemory:
num_devices: int
num_cores_per_device: int
out_of_bounds_reads: str
dma_execution_mode: str
uninitialized_memory: Literal["nan", "zero"]
detect_races: bool
vector_clock_size: int
clocks: list[vc.VectorClock]
barrier: threading.Barrier
clean_up_barrier: threading.Barrier
# (memory_space, buffer_id, device_id, local_core_id) -> Allocation
mem: dict[tuple[str, int, int, int], Allocation] = dataclasses.field(
default_factory=dict
)
# semaphore_id -> Semaphore
sem: dict[int, Semaphore] = dataclasses.field(default_factory=dict)
# (semaphore_id, global_core_id)
# -> tasks that will signal the semaphore on the core with the given ID and
# that should therefore be considered for execution when the semaphore is
# waiting (to be signalled).
tasks_by_sem: dict[tuple[int, int], list[SemaphoreTask]] = dataclasses.field(
default_factory=lambda: collections.defaultdict(list)
)
lock: threading.Lock = dataclasses.field(default_factory=threading.Lock)
# (device_id, local_core_id) -> next buffer ID
next_buffer_id: dict[tuple[int, int], int] = dataclasses.field(
default_factory=lambda: collections.defaultdict(lambda: 100)
)
# global_core_id -> next semaphore ID
next_semaphore_id: dict[int, int] = dataclasses.field(
default_factory=lambda: collections.defaultdict(lambda: 2000)
)
deallocated_bytes: int = 0
# (device_id, local_core_id) -> [(grid_index, [range])]
output_ranges: dict[tuple[int, int], list] = dataclasses.field(
default_factory=lambda: collections.defaultdict(list)
)
# semaphore_id -> Semaphore, where the semaphore_id is user-specified.
fixed_id_sem: dict[int, Semaphore] = dataclasses.field(
default_factory=dict
)
@property
def num_cores(self) -> int:
return self.num_devices * self.num_cores_per_device
def get_global_core_id(self, device_id: int, local_core_id: int) -> int:
"""Computes the global core ID from the given device and local core ID."""
return device_id * self.num_cores_per_device + local_core_id
def get_global_core_ids(self, device_id: int) -> Sequence[int]:
"""Computes the global core IDs for all cores in the given device."""
return tuple(
self.get_global_core_id(device_id, core_id)
for core_id in range(self.num_cores_per_device)
)
def append_semaphore_task(
self,
semaphore_id: int,
global_core_id: int,
task: SemaphoreTask,
):
"""Appends a task to be executed if the semaphore with the given sempahore ID is waiting to be signalled on the core with the given global core ID."""
with self.lock:
self.tasks_by_sem[(semaphore_id, global_core_id)].append(task)
def get_random_virtual_device_id(self) -> int:
# Virtual device IDs are needed for DMAs. Conceptually, each DMA runs on its
# own, independent device. Representing this precisely would require vector
# clocks to have sizes linear in the number of DMAs.
#
# Instead, we use approximate vector clocks of fixed size. We assign each
# DMA a virtual core ID in the range
#
# [num_cores, self.vector_clock_size - 1],
#
# and each operation of a DMA increments the corresponding coordinate in its
# vector clock. (So the "virtual" part of a vector clock is effectively
# counting, for each virtual core, the number of DMAs that happened-before
# the vector clock and were assigned to that virtual core.)
#
# If two approximate clocks are unordered, then their corresponding events
# are not ordered by the happens-before relation. So this approximation will
# not introduce any false positives in detecting data races. But we may fail
# to detect some true data races because there can be cases where two
# approximate clocks are ordered, and we will treat the corresponding events
# as ordered by the happens-before relation, but the corresponding events
# are not actually ordered.
return np.random.randint(self.num_cores, self.vector_clock_size)
def print(self, device_id: int):
device_id = int(device_id)
if device_id == 0:
with self.lock:
print(self.mem)
def get_semaphores_and_increment_clock(
self, sem_ids: Sequence[int | None], global_core_id: int
) -> tuple[list[Semaphore | None], vc.VectorClock | None]:
"""Returns the semaphores with the given `sem_ids` and increments the vector clock for the core with `global_core_id`.
If race detection is enabled, this method increments the vector clock for
the core with the given `global_core_id` (while holding the lock on `self`).
We do this so that we can associate a (vector clock) time with the shared
memory operation of looking up the semaphores, which in turn can be used as
a proxy for the time when the returned semaphores are used by the client of
the `SharedMemory` class without acquiring the lock on `self`. (For the
purpose of encapsulation, we prefer to think of `self.lock` as a private
attribute of the `SharedMemory` class; hence clients of the class should not
attempt to acquire this lock explicitly.)
Args:
sem_ids: The IDs of the semaphores to return or None.
global_core_id: The ID of the core whose vector clock should be
incremented (if race detection is enabled).
Returns:
- The semaphores with the given `sem_ids` or None if the corresponding
entry in `sem_ids` is None.
- The incremented vector clock for the core with the given
`global_core_id`, or None if race detection is not enabled.
"""
clock = None
with self.lock:
if self.detect_races:
vc.inc_vector_clock(self.clocks[global_core_id], global_core_id)
clock = vc.copy_vector_clock(self.clocks[global_core_id])
sems = []
for sem_id in sem_ids:
if sem_id is None:
sem = None
elif sem_id in self.fixed_id_sem:
if sem_id in self.sem:
# TODO(nrink): For now we make it the responsibility of the client to
# ensure that fixed-ID semaphores do not collide with internal
# semaphore IDs.
raise ValueError(
f'Semaphore {sem_id} occurs as both fixed-id and internal.'
)
sem = self.fixed_id_sem[sem_id]
else:
sem = self.sem[sem_id]
sems.append(sem)
return sems, clock
def get_sempahores_with_nonzero_count(
self, device_id: int
) -> list[tuple[Semaphore, int]]:
"""Returns tuples (semaphore, global_core_id) for all semaphores with a nonzero count for the core with `global_core_id`."""
result = []
with self.lock:
for _, sem in self.sem.items() | self.fixed_id_sem.items():
with sem.cv:
for gci in self.get_global_core_ids(device_id):
if sem.count_by_core[gci] != 0:
result.append((sem, gci))
return result
def get_next_buffer_id(self, device_id: int, local_core_id: int) -> int:
"""Returns the next buffer ID for the given device and local core ID."""
with self.lock:
buffer_id = self.next_buffer_id[(device_id, local_core_id)]
self.next_buffer_id[(device_id, local_core_id)] = buffer_id + 1
return buffer_id
def allocate_buffer(
self,
key: Any,
ref_count: int,
value: np.ndarray,
):
"""Allocates a memory buffer with the given key unless it already exists."""
with self.lock:
if key not in self.mem:
self.mem[key] = Buffer(value, ref_count=ref_count)
def deallocate_buffer(self, key: Any):
"""Decreases the ref count for the buffer with `key` and deallocates the buffer if the ref count is zero."""
with self.lock:
buff = self.mem[key]
if not isinstance(buff, Buffer):
raise ValueError(
f"Attempting to deallocate allocation with key `{key}` that is not"
" a `Buffer`."
)
buff.decrease_ref_count()
if buff.has_zero_ref_count():
self.mem.pop(key)
self.deallocated_bytes += buff.size()
del buff
should_collect = self.deallocated_bytes > 100_000_000
if should_collect:
self.deallocated_bytes = 0
if should_collect:
# Periodic garbage collection here prevents OOMs -- although it's not clear
# why arrays are not getting freed without this.
gc.collect()
def allocate_semaphores(self, key: Any, num_semaphores: int) -> int:
"""Returns the next semaphore ID and ensures that the next `num_semaphores` are allocated."""
with self.lock:
semaphore_id = self.next_semaphore_id[key]
self.next_semaphore_id[key] = semaphore_id + num_semaphores
for i in range(semaphore_id, semaphore_id + num_semaphores):
if i not in self.sem:
self.sem[i] = Semaphore(shared_memory=self, semaphore_id=i)
return semaphore_id
def guarantee_semaphore_with_fixed_id(self, semaphore_id: int):
"""Ensures that a semaphore with the given `semaphore_id` exists.
If the semaphore with the given ID does not exist, it is allocated. Note
that semaphores that are allocated with this method live in their own
address space (internally, they are mapped in a separate dictionary) from
the sempahores allocated with the `allocate_sempahores` method above.
This methods is intended to be used for barrier semaphores, where the
_collective_ semaphore ID is specified by the interpreter (i.e. by the
client of the `SharedMemory` class). This simulates sempahores that exist
prior to any Pallas kernels being run.
Args:
semaphore_id: The ID of the semaphore to ensure exists, i.e. is allocated.
"""
with self.lock:
if semaphore_id not in self.fixed_id_sem:
self.fixed_id_sem[semaphore_id] = Semaphore(
semaphore_id=semaphore_id, shared_memory=self
)
def get_buffer_content(
self, key: Any, rnge: tuple[slice | int, ...], global_core_id: int
) -> tuple[np.ndarray | None, ShapeAndDtype, vc.VectorClock | None]:
"""Reads contents of a memory buffer.
Args:
key: The key of the buffer to read.
rnge: The range to read within the buffer.
global_core_id: The global core ID of the core reading the buffer.
Returns:
- The contents of the read range of the buffer, or None if reading out of
bounds.
- The shape and dtype of the full content array of the buffer.
- The incremented vector clock for the core with the given global core ID,
or None if race detection is not enabled.
"""
clock = None
with self.lock:
if self.detect_races:
vc.inc_vector_clock(self.clocks[global_core_id], global_core_id)
clock = vc.copy_vector_clock(self.clocks[global_core_id])
buff = self.mem[key]
if not isinstance(buff, Buffer):
raise ValueError(
f"Attempting to get contents of allocation with key `{key}` that is"
" not a `Buffer`."
)
array = buff.content
try:
result = array[rnge].copy()
except:
result = None
shape_and_dtype = ShapeAndDtype(array.shape, array.dtype)
return result, shape_and_dtype, clock
def store_buffer_content(
self,
key: Any,
rnge: tuple[slice | int, ...],
value: np.ndarray,
global_core_id: int,
) -> tuple[bool, ShapeAndDtype, vc.VectorClock | None]:
"""Stores contents into a memory buffer.
Args:
key: The key of the buffer to store into.
rnge: The range within the buffer contents that `value` is written to.
value: The array to store into the buffer.
global_core_id: The global core ID of the core writing into the buffer.
Returns:
- True of the store was in bounds, False otherwise.
- The shape and dtype of the full content array of the buffer.
- The incremented vector clock for the core with the given global core ID,
or None if race detection is not enabled.
"""
clock = None
with self.lock:
if self.detect_races:
vc.inc_vector_clock(self.clocks[global_core_id], global_core_id)
clock = vc.copy_vector_clock(self.clocks[global_core_id])
buff = self.mem[key]
if not isinstance(buff, Buffer):
raise ValueError(
f"Attempting to store into allocation with key `{key}` that is not"
" a `Buffer`."
)
array = buff.content
shape_and_dtype = ShapeAndDtype(array.shape, array.dtype)
assert array.dtype == value.dtype # TODO(jburnim): Catch this statically.
# TODO(jburnim): Better error message if this raises?
in_bounds_shape = array[rnge].shape
if in_bounds_shape == value.shape:
is_in_bounds = True
array[rnge] = value
else:
is_in_bounds = False
return is_in_bounds, shape_and_dtype, clock
def swap_buffer_content(
self,
key: Any,
rnge: tuple[slice | int, ...],
value: np.ndarray,
mask: np.ndarray | None,
global_core_id: int,
) -> tuple[np.ndarray | None, ShapeAndDtype, vc.VectorClock | None]:
"""Swaps contents of a memory buffer.
Args:
key: The key of the buffer to swap into.
rnge: The range within the buffer contents that `value` is swapped into.
value: The array to be written into the buffer.
mask: The mask to apply to the swap operation.
global_core_id: The global core ID of the core writing into the buffer.
Returns:
- The contents of the range of the buffer (prior to the swap), or None if
accessing buffer contents bounds.
- The shape and dtype of the full content array of the buffer.
- The incremented vector clock for the core with the given global core ID,
or None if race detection is not enabled.
"""
clock = None
with self.lock:
if self.detect_races:
vc.inc_vector_clock(self.clocks[global_core_id], global_core_id)
clock = vc.copy_vector_clock(self.clocks[global_core_id])
buff = self.mem[key]
if not isinstance(buff, Buffer):
raise ValueError(
f"Attempting to swap into allocation with `key` {key} that is not a"
" `Buffer`."
)
array = buff.content
shape_and_dtype = ShapeAndDtype(array.shape, array.dtype)
assert array.dtype == value.dtype # TODO(jburnim): Catch this statically.
# TODO(jburnim): Better error message if this raises?
raw_result = array[rnge]
in_bounds_shape = raw_result.shape
if mask is None:
if in_bounds_shape == value.shape:
array[rnge] = value
return raw_result.copy(), shape_and_dtype, clock
else:
return None, shape_and_dtype, clock
else:
in_bounds_mask = np.full(mask.shape, True)
for i in range(len(in_bounds_shape)):
in_bounds_mask[in_bounds_shape[i] :] = False
if (~in_bounds_mask & mask).any():
return None, shape_and_dtype, clock
else:
in_bounds_idx = tuple(slice(i) for i in in_bounds_shape)
result = value.copy()
result[in_bounds_idx] = np.where(
mask[in_bounds_idx], raw_result, value[in_bounds_idx]
)
array[rnge] = np.where(
mask[in_bounds_idx], value[in_bounds_idx], raw_result
)
return result.copy(), shape_and_dtype, clock
def update_clocks(self, low_global_core_id, high_global_core_id):
"""Synchronizes the vector clocks for the cores with ids in the range between the two arguments."""
# Despite only updating the vector clocks for some cores, we still need to
# hold the global lock to ensure that no other devices are concurrently
# accessing the same vector clocks.
with self.lock:
for c in self.clocks[low_global_core_id + 1 : high_global_core_id]:
vc.update_vector_clock(self.clocks[low_global_core_id], c)
for c in self.clocks[low_global_core_id + 1 : high_global_core_id]:
vc.update_vector_clock(c, self.clocks[low_global_core_id])
def update_clocks_for_device_barrier(self, device_id):
"""Synchronizes the vector clocks for the cores on the given device."""
low_core_id = device_id * self.num_cores_per_device
high_core_id = (device_id + 1) * self.num_cores_per_device
self.update_clocks(low_core_id, high_core_id)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic/interpret/shared_memory.py",
"license": "Apache License 2.0",
"lines": 532,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic/interpret/vector_clock.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
VectorClock = np.ndarray
def make_vector_clock(vector_clock_size: int) -> VectorClock:
return np.zeros(vector_clock_size, dtype=np.int32)
def copy_vector_clock(x: VectorClock | None) -> VectorClock | None:
if x is None:
return None
return x.copy()
def update_vector_clock(x: VectorClock, y: VectorClock):
x[:] = np.maximum(x[:], y[:])
def lt(x: VectorClock, y: VectorClock) -> bool:
return bool((x <= y).all() & (x < y).any())
def ordered(x: VectorClock, y: VectorClock) -> bool:
return lt(x, y) | lt(y, x)
def inc_vector_clock(x: VectorClock, global_core_id: int):
if global_core_id >= len(x):
raise ValueError(f'device_id={global_core_id} is out of range for x={x}')
assert global_core_id < len(x)
x[global_core_id] += 1
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic/interpret/vector_clock.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic/sc_core.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains SparseCore-specific Pallas abstractions."""
from __future__ import annotations
import collections
from collections.abc import Sequence
import dataclasses
from typing import Any, TypeAlias
import jax
from jax._src import core as jax_core
from jax._src import state
from jax._src import tree_util
from jax._src.pallas import core as pallas_core
from jax._src.pallas.mosaic import core as tpu_core
from jax._src.pallas.mosaic import tpu_info
import jax.numpy as jnp
Tiling: TypeAlias = Sequence[Sequence[int]]
@dataclasses.dataclass(frozen=True)
class MemoryRef(pallas_core.MemoryRef):
"""A MemoryRef for SparseCore."""
tiling: Tiling | None = None
def __init__(
self,
shape: Sequence[int],
dtype: jax.typing.DTypeLike,
memory_space: tpu_core.MemorySpace,
tiling: Tiling | None = None,
):
super().__init__(jax_core.ShapedArray(shape, dtype), memory_space)
for tile in tiling or ():
if len(tile) > len(shape):
raise ValueError(
f"Tile rank must not exceed shape rank: {tile=} vs {shape=}"
)
object.__setattr__(self, "tiling", tiling)
def get_ref_aval(self) -> state.TransformedRef | state.AbstractRef:
# TODO(sharadmv): Clean this up. ShapedArrayWithMemorySpace fails when we
# try to apply JAX ops to it.
return AbstractRef(self.inner_aval, self.memory_space, self.tiling)
class AbstractRef(state.AbstractRef):
"""An AbstractRef for SparseCore."""
tiling: Tiling | None = None
def __init__(
self,
aval: jax_core.AbstractValue,
memory_space: tpu_core.MemorySpace,
tiling: Tiling | None,
):
super().__init__(aval, memory_space)
self.tiling = tiling
def update( # type: ignore[override]
self,
inner_aval: Any | None = None,
memory_space: Any | None = None,
tiling: Tiling | None = None,
) -> AbstractRef:
return AbstractRef(
inner_aval if inner_aval is not None else self.inner_aval,
memory_space if memory_space is not None else self.memory_space,
tiling if tiling is not None else self.tiling,
)
@dataclasses.dataclass
class BlockSpec(pallas_core.BlockSpec):
"""A BlockSpec for SparseCore.
Attributes:
indexed_by: The optional index of a parameter to use as the indexer. If set,
the pipeline emitter will issue and indirect stream indexing into the
value of this parameter as part of the pipeline.
indexed_dim: The dimension to index into. Optional unless ``indexed_by`` is
set.
See also:
:class:`jax.experimental.pallas.BlockSpec`
"""
indexed_by: int | None = None
indexed_dim: int | None = None
def __post_init__(self):
if (self.indexed_by is None) != (self.indexed_dim is None):
raise ValueError(
"indexed_by and indexed_dim must both be set or both unset"
)
def to_block_mapping(
self,
origin: pallas_core.OriginStr,
array_aval: jax_core.ShapedArray,
*,
index_map_avals: Sequence[jax_core.AbstractValue],
index_map_tree: tree_util.PyTreeDef,
grid: pallas_core.GridMappingGrid,
vmapped_dims: tuple[int, ...],
debug: bool = False,
) -> BlockMapping:
bm = super().to_block_mapping(
origin,
array_aval,
index_map_avals=index_map_avals,
index_map_tree=index_map_tree,
grid=grid,
vmapped_dims=vmapped_dims,
debug=debug,
)
return BlockMapping(
**{f.name: getattr(bm, f.name) for f in dataclasses.fields(bm)},
indexed_by=self.indexed_by,
indexed_dim=self.indexed_dim,
)
@dataclasses.dataclass(frozen=True)
class BlockMapping(pallas_core.BlockMapping):
indexed_by: int | None = None
indexed_dim: int | None = None
def get_sparse_core_info() -> tpu_info.SparseCoreInfo:
"""Returns the SparseCore information for the current device."""
return tpu_info.get_tpu_info().sparse_core or tpu_info.SparseCoreInfo(
num_cores=0, num_subcores=0, num_lanes=0, dma_granule_size_bytes=0,
)
@dataclasses.dataclass(frozen=True, kw_only=True)
class ScalarSubcoreMesh:
axis_name: str
num_cores: int
@property
def kernel_type(self) -> tpu_core.CoreType:
return tpu_core.CoreType.SC_SCALAR_SUBCORE
@property
def default_memory_space(self) -> tpu_core.MemorySpace:
return tpu_core.MemorySpace.HBM
@property
def shape(self):
return collections.OrderedDict({self.axis_name: self.num_cores})
@property
def dimension_semantics(self) -> Sequence[str]:
return ["core_parallel"]
def discharges_effect(self, effect):
del effect # Unused.
return False
def _scalar_subcore_mesh_discharge_rule(
in_avals,
out_avals,
*args,
mesh,
jaxpr,
compiler_params,
interpret,
debug,
cost_estimate,
name,
metadata,
):
if not isinstance(mesh, ScalarSubcoreMesh):
raise TypeError(f"Mesh must be a ScalarSubcoreMesh, got {type(mesh)}")
assert len(mesh.shape) == 1
sc_info = get_sparse_core_info()
if mesh.num_cores > (num_expected := sc_info.num_cores):
raise ValueError(
f"Mesh has {mesh.num_cores} cores, but the current TPU chip has only"
f" {num_expected} SparseCores"
)
if compiler_params is None:
compiler_params = tpu_core.CompilerParams()
if compiler_params.dimension_semantics is not None:
raise ValueError("ScalarSubcoreMesh does not support dimension_semantics=")
jaxpr, in_avals, out_avals, args, is_scalar_const = tpu_core.pass_scalars_as_refs(
jaxpr, args, in_avals, out_avals, mesh,
# TODO(sharadmv): Delete this once we can pass into SMEM directly on
# SparseCore.
copy_to_smem=True,
)
refs_out, out = pallas_core.default_mesh_discharge_rule(
in_avals,
out_avals,
*args,
mesh=mesh,
jaxpr=jaxpr,
compiler_params=compiler_params,
interpret=interpret,
debug=debug,
cost_estimate=cost_estimate,
name=name,
metadata=metadata,
)
refs_out = [
a if not is_scalar else None
for is_scalar, a in zip(is_scalar_const, refs_out)
]
return refs_out, out
pallas_core._core_map_mesh_rules[ScalarSubcoreMesh] = (
_scalar_subcore_mesh_discharge_rule
)
def _get_num_cores() -> int:
"""Returns the number of cores for the current SparseCore."""
return get_sparse_core_info().num_cores
def _get_num_subcores() -> int:
"""Returns the number of subcores for the current SparseCore."""
return get_sparse_core_info().num_subcores
@dataclasses.dataclass(frozen=True, kw_only=True)
class VectorSubcoreMesh:
core_axis_name: str
subcore_axis_name: str
num_cores: int = dataclasses.field(default_factory=_get_num_cores)
num_subcores: int = dataclasses.field(
default_factory=_get_num_subcores, init=False
)
def __post_init__(self):
sc_info = get_sparse_core_info()
if self.num_cores > (num_expected := sc_info.num_cores):
raise ValueError(
f"Mesh has {self.num_cores} cores, but the current TPU chip has only"
f" {num_expected} SparseCores"
)
if self.num_subcores != sc_info.num_subcores:
raise ValueError(
f"Mesh has {self.num_subcores} subcores, but the current TPU chip has"
f" only {sc_info.num_subcores} subcores"
)
@property
def kernel_type(self) -> tpu_core.CoreType:
return tpu_core.CoreType.SC_VECTOR_SUBCORE
@property
def default_memory_space(self) -> tpu_core.MemorySpace:
return tpu_core.MemorySpace.HBM
@property
def shape(self):
return collections.OrderedDict({
self.core_axis_name: self.num_cores,
self.subcore_axis_name: self.num_subcores,
})
@property
def dimension_semantics(self) -> Sequence[str]:
return ["core_parallel", "subcore_parallel"]
def discharges_effect(self, effect):
del effect # Unused.
return False
def _vector_subcore_mesh_discharge_rule(
in_avals,
out_avals,
*args,
mesh,
jaxpr,
compiler_params,
interpret,
debug,
cost_estimate,
name,
metadata,
):
if not isinstance(mesh, VectorSubcoreMesh):
raise TypeError(f"Mesh must be a VectorSubcoreMesh, got {type(mesh)}")
assert len(mesh.shape) == 2
sc_info = get_sparse_core_info().num_cores
if mesh.num_cores > (num_expected := sc_info):
raise ValueError(
f"Mesh has {mesh.num_cores} cores, but the current TPU chip has only"
f" {num_expected} SparseCores"
)
if compiler_params is None:
compiler_params = tpu_core.CompilerParams()
if compiler_params.dimension_semantics is not None:
raise ValueError("VectorSubcoreMesh does not support dimension_semantics=")
return pallas_core.default_mesh_discharge_rule(
in_avals,
out_avals,
*args,
mesh=mesh,
jaxpr=jaxpr,
compiler_params=compiler_params,
interpret=interpret,
debug=debug,
cost_estimate=cost_estimate,
name=name,
metadata=metadata,
)
pallas_core._core_map_mesh_rules[VectorSubcoreMesh] = (
_vector_subcore_mesh_discharge_rule
)
def supported_shapes(dtype: jax.typing.DTypeLike) -> Sequence[tuple[int, ...]]:
"""Returns all supported array shapes for the given dtype on SparseCore."""
sc_info = get_sparse_core_info()
num_lanes = sc_info.num_lanes
itemsize = jnp.dtype(dtype).itemsize
if itemsize > 4:
raise ValueError(f"Unsupported dtype: {dtype}")
packing_factor = 4 // itemsize
if packing_factor == 1:
return [(num_lanes,)]
return [(num_lanes * packing_factor,), (packing_factor, num_lanes)]
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic/sc_core.py",
"license": "Apache License 2.0",
"lines": 294,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic/sc_lowering.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lowering for Pallas TPU SparseCore."""
from collections.abc import Sequence
import dataclasses
import functools
import itertools
from typing import Any, Callable, cast, NoReturn
from jax._src import api_util
from jax._src import core as jax_core
from jax._src import debugging
from jax._src import lax
from jax._src import linear_util as lu
from jax._src import mesh as mesh_lib
from jax._src import numpy as jnp
from jax._src import source_info_util
from jax._src import state
from jax._src import tree_util
from jax._src import util
from jax._src.interpreters import mlir
from jax._src.interpreters import partial_eval as pe
from jax._src.lib.mlir import ir
from jax._src.lib.mlir.dialects import arith
from jax._src.lib.mlir.dialects import func
from jax._src.lib.mlir.dialects import memref
from jax._src.lib.mlir.dialects import vector
from jax._src.pallas import core as pallas_core
from jax._src.pallas import primitives as pallas_primitives
from jax._src.pallas.mosaic import core as tpu_core
from jax._src.pallas.mosaic import lowering as tc_lowering
from jax._src.pallas.mosaic import primitives as tpu_primitives
from jax._src.pallas.mosaic import sc_core
from jax._src.pallas.mosaic import tpu_info
from jax._src.state import discharge as state_discharge
from jax._src.state import indexing
from jax._src.state import primitives as state_primitives
from jax.experimental.mosaic.dialects import tpu
map, unsafe_map = util.safe_map, map
zip, unsafe_zip = util.safe_zip, zip
MemorySpace = tpu_core.MemorySpace
ShapedAbstractValue = tc_lowering.ShapedAbstractValue
LoweringContext = tc_lowering.LoweringContext
LoweringRuleContext = tc_lowering.LoweringRuleContext
_transform_ref = tc_lowering._transform_ref
_dtype_to_ir_type = tc_lowering._dtype_to_ir_type
# pylint: disable=protected-access
def dynamic_shape_replacement_fn(x):
return x
def _block_spec_from_block_mapping(
bm: pallas_core.BlockMapping,
which_parallel: Sequence[bool],
default_memory_space: MemorySpace,
) -> pallas_core.BlockSpec:
eval_index_map = functools.partial(
jax_core.eval_jaxpr,
bm.index_map_jaxpr.jaxpr,
bm.index_map_jaxpr.consts,
)
def index_map(*indices):
# Inject the parallel indices into the sequential ones coming from
# `emit_pipeline`.
new_indices = util.merge_lists(
which_parallel,
indices,
[
pallas_primitives.program_id(axis - 1)
for axis, is_parallel in zip(
itertools.accumulate(which_parallel), which_parallel
)
if is_parallel
],
)
return eval_index_map(*new_indices)
memory_space = bm.transformed_block_aval.memory_space
if memory_space is None:
memory_space = default_memory_space
if isinstance(bm, sc_core.BlockMapping):
return sc_core.BlockSpec(
bm.block_shape,
index_map,
indexed_by=bm.indexed_by,
indexed_dim=bm.indexed_dim,
memory_space=memory_space,
)
return sc_core.BlockSpec(bm.block_shape, index_map, memory_space=memory_space)
def _trace_index_map_to_jaxpr(
index_map: Callable[..., Any],
debug_info: jax_core.DebugInfo,
index_map_tree: Any,
index_map_avals: Sequence[jax_core.AbstractValue],
) -> jax_core.ClosedJaxpr:
flat_fun, _ = api_util.flatten_fun(
lu.wrap_init(index_map, debug_info=debug_info), index_map_tree
)
index_map_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
flat_fun, index_map_avals
)
return jax_core.ClosedJaxpr(index_map_jaxpr, consts)
def lower_pipelined_jaxpr_to_module(
lowering_context: mlir.LoweringRuleContext,
grid_mapping: pallas_core.GridMapping,
jaxpr: jax_core.Jaxpr,
*,
dimension_semantics: Sequence[tpu_core.DimensionSemantics] | None,
kernel_type: tpu_core.CoreType,
mesh: mesh_lib.Mesh | None = None,
dynamic_shape_replacement_enabled: bool = False,
use_tc_tiling: bool | None = None,
) -> ir.Module:
module = ir.Module.create()
lower_pipelined_jaxpr_into_module(
lowering_context,
module,
grid_mapping,
jaxpr,
name=mlir.sanitize_name(jaxpr.debug_info.func_name),
dimension_semantics=dimension_semantics,
kernel_type=kernel_type,
mesh=mesh,
dynamic_shape_replacement_enabled=dynamic_shape_replacement_enabled,
use_tc_tiling=use_tc_tiling,
)
return module
def lower_pipelined_jaxpr_into_module(
lowering_context: mlir.LoweringRuleContext,
module: ir.Module,
grid_mapping: pallas_core.GridMapping,
jaxpr: jax_core.Jaxpr,
*,
name: str,
dimension_semantics: Sequence[tpu_core.DimensionSemantics] | None,
kernel_type: tpu_core.CoreType,
mesh: mesh_lib.Mesh | None = None,
dynamic_shape_replacement_enabled: bool = False,
use_tc_tiling: bool | None = None,
) -> None:
if dynamic_shape_replacement_enabled:
raise NotImplementedError(
"Dynamic shape replacement is not supported for SparseCore."
)
grid = grid_mapping.grid
block_mappings = grid_mapping.block_mappings
if dimension_semantics is None:
dimension_semantics = ("arbitrary",) * len(grid) # type: ignore
dimension_semantics: Sequence[tpu_core.LiteralDimensionSemantics] = tuple( # pyrefly: ignore[redefinition] # pytype: disable=annotation-type-mismatch
map(tc_lowering._canonicalize_dimension_semantic, dimension_semantics) # type: ignore[arg-type]
)
is_semaphore = []
for bm in grid_mapping.block_mappings:
for bd in bm.block_shape:
if not isinstance(bd, (pallas_core.Squeezed, pallas_core.Blocked)):
raise NotImplementedError(
"Unsupported block dimension type: "
f"{type(bd)} for block shape: {bm.block_shape}"
)
if isinstance(bm, sc_core.BlockMapping) and bm.indexed_by is not None:
# TODO(slebedev): Remove this branch once ``pltpu.emit_pipeline`` supports
# gathers/scatters.
lower_jaxpr_into_module(
lowering_context,
module,
grid_mapping,
jaxpr,
name=name,
dimension_semantics=dimension_semantics,
kernel_type=kernel_type,
mesh=mesh,
)
return
is_semaphore.append(bm.block_aval.memory_space is MemorySpace.SEMAPHORE)
# Split out semaphores, because they do not need to be pipelined.
block_mappings, sem_block_mappings = util.partition_list(
is_semaphore, block_mappings
)
in_block_mappings, out_block_mappings = util.split_list(
block_mappings,
[grid_mapping.num_inputs - sum(is_semaphore[: grid_mapping.num_inputs])],
)
assert len(dimension_semantics) == len(grid)
which_parallel = [ds != "arbitrary" for ds in dimension_semantics]
sequential_grid = tuple(
d for axis, d in enumerate(grid) if not which_parallel[axis]
)
parallel_grid = tuple(
d for axis, d in enumerate(grid) if which_parallel[axis]
)
from jax._src.pallas.mosaic import pipeline # pytype: disable=import-error
def pipeline_fn(*refs_and_scratch):
refs, scratch_refs = util.split_list(refs_and_scratch, [len(is_semaphore)])
refs, sem_refs = util.partition_list(is_semaphore, refs)
def body_fn(indices, *refs):
program_ids_template = util.merge_lists(
which_parallel, indices, [None] * sum(which_parallel)
)
assert len(refs) + len(sem_refs) + len(scratch_refs) == len(jaxpr.invars)
return pallas_primitives._jaxpr_call(
jaxpr,
*util.merge_lists(is_semaphore, refs, sem_refs),
*scratch_refs,
program_ids=program_ids_template,
)
tiling = None
if use_tc_tiling is not None:
tiling = (
tpu_info.Tiling.COMPACT
if use_tc_tiling
else tpu_info.Tiling.SPARSE_CORE
)
make_block_spec = functools.partial(
_block_spec_from_block_mapping,
which_parallel=which_parallel,
default_memory_space=MemorySpace.SMEM
if kernel_type is tpu_core.CoreType.SC_SCALAR_SUBCORE
else MemorySpace.VMEM,
)
pipeline.emit_pipeline(
body_fn,
grid=sequential_grid,
in_specs=map(make_block_spec, in_block_mappings),
out_specs=map(make_block_spec, out_block_mappings),
tiling=tiling,
_explicit_indices=True,
)(*refs)
return () # ``wrap_init`` does not support functions returning None.
with grid_mapping.trace_env():
new_jaxpr, _, new_consts = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(
pipeline_fn, debug_info=jaxpr.debug_info.with_unknown_names()
),
util.merge_lists(
is_semaphore,
[
MemorySpace.HBM(
bm.array_aval.shape, bm.array_aval.dtype
).get_ref_aval()
for bm in block_mappings
],
[bm.transformed_block_aval for bm in sem_block_mappings],
)
+ jaxpr.in_avals[grid_mapping.slice_scratch_ops],
)
assert not new_consts
parallel_index_map_avals, parallel_index_map_tree = tree_util.tree_flatten(
((jax_core.ShapedArray((), jnp.int32),) * len(parallel_grid), {})
)
parallel_block_mappings = []
for bm in block_mappings:
debug_info = bm.index_map_jaxpr.jaxpr.debug_info
if debug_info.arg_names is not None:
debug_info = debug_info._replace(
arg_names=tuple(
name
for name, is_parallel in zip(
debug_info.arg_names, which_parallel
)
if is_parallel
)
)
with pallas_core.tracing_grid_env(
parallel_grid, grid_mapping.vmapped_dims
):
new_index_map_jaxpr = _trace_index_map_to_jaxpr(
lambda *args: (0,) * len(bm.block_shape),
debug_info,
parallel_index_map_tree,
parallel_index_map_avals,
)
parallel_block_mappings.append(
bm.replace(
index_map_jaxpr=new_index_map_jaxpr,
block_shape=tuple(map(pallas_core.Blocked, bm.array_aval.shape)),
transformed_block_aval=MemorySpace.HBM(
bm.array_aval.shape, bm.array_aval.dtype
).get_ref_aval(),
)
)
grid_mapping = grid_mapping.replace(
grid=parallel_grid,
index_map_avals=parallel_index_map_avals,
index_map_tree=parallel_index_map_tree,
block_mappings=tuple(
util.merge_lists(
is_semaphore, parallel_block_mappings, sem_block_mappings
)
),
)
dimension_semantics = [ # pytype: disable=annotation-type-mismatch
ds
for axis, ds in enumerate(dimension_semantics)
if which_parallel[axis]
]
with grid_mapping.trace_env():
lower_jaxpr_into_module(
lowering_context,
module,
grid_mapping,
new_jaxpr,
name=name,
dimension_semantics=dimension_semantics, # pytype: disable=wrong-arg-types
kernel_type=kernel_type,
mesh=mesh,
)
return module
def lower_jaxpr_into_module(
lowering_context: mlir.LoweringRuleContext,
module: ir.Module,
grid_mapping: pallas_core.GridMapping,
jaxpr: jax_core.Jaxpr,
*,
name: str,
dimension_semantics: Sequence[tpu_core.DimensionSemantics] | None,
kernel_type: tpu_core.CoreType,
mesh: mesh_lib.Mesh | None = None,
dynamic_shape_replacement_enabled: bool = False,
) -> ir.Module:
"""Lowers a Jaxpr to a Mosaic SparseCore module."""
if dynamic_shape_replacement_enabled:
raise NotImplementedError(
"Dynamic shape replacement is not supported for SparseCore."
)
backend = lowering_context.module_context.get_backend(optional=True)
mosaic_grid_mapping = MosaicGridMapping(
jaxpr,
grid_mapping,
dimension_semantics,
mesh=mesh,
kernel_type=kernel_type,
)
sym_tab = ir.SymbolTable(module.operation)
func_op = lower_jaxpr_to_func(
jaxpr,
name=name,
kernel_type=kernel_type,
mosaic_grid_mapping=mosaic_grid_mapping,
forward_compatible=lowering_context.is_forward_compat(),
backend=backend,
)
module.body.append(func_op)
sym_tab.insert(func_op)
assert mosaic_grid_mapping.grid is not None
assert all(isinstance(d, int) for d in mosaic_grid_mapping.grid)
func_op.attributes["iteration_bounds"] = ir.DenseI64ArrayAttr.get(
cast(tuple[int, ...], mosaic_grid_mapping.grid)
)
func_op.attributes["dimension_semantics"] = (
mosaic_grid_mapping.get_dimension_semantics()
)
if not mosaic_grid_mapping.grid:
# No need for "window_params" if the grid is empty.
return
window_params = []
for i, bm in enumerate(grid_mapping.block_mappings):
func_name = f"{name}_transform_{i}"
mlir_func = tc_lowering.lower_jaxpr_to_transform_func(
bm.index_map_jaxpr.jaxpr,
bm.block_aval,
name=func_name,
mosaic_grid_mapping=mosaic_grid_mapping,
kernel_type=kernel_type,
forward_compatible=lowering_context.is_forward_compat(),
backend=backend,
dynamic_shape_replacement_fn=dynamic_shape_replacement_fn,
)
assert mlir_func.verify(), mlir_func
module.body.append(mlir_func)
assert func_name not in sym_tab
sym_tab.insert(mlir_func)
block_shape = list(pallas_core._get_block_shape(bm.block_shape))
block_params = dict(
window_bounds=ir.DenseI64ArrayAttr.get(block_shape),
transform_indices=ir.FlatSymbolRefAttr.get(func_name),
)
window_params.append(ir.DictAttr.get(block_params))
func_op.attributes["window_params"] = ir.ArrayAttr.get(window_params)
@dataclasses.dataclass(init=False)
class MosaicGridMapping(tc_lowering.MosaicGridMapping):
"""Abstracts a grid mapping for Mosaic SparseCore."""
def __init__(
self,
jaxpr: jax_core.Jaxpr,
grid_mapping: pallas_core.GridMapping,
dimension_semantics: Sequence[tpu_core.DimensionSemantics] | None,
mesh: mesh_lib.Mesh | None,
kernel_type: tpu_core.CoreType,
):
if any(
isinstance(var.aval, sc_core.AbstractRef)
for var in jaxpr.invars[grid_mapping.slice_scratch_ops]
):
# TODO(slebedev): Support tiling annotations for kernel operands.
raise NotImplementedError(
"`plsc.MemoryRef`s are not supported as scratch operands to the"
" kernel. Allocate them in the kernel body via `pl.run_scoped`."
)
super().__init__(
jaxpr,
grid_mapping,
dimension_semantics,
mesh,
dynamic_shape_replacement_fn=dynamic_shape_replacement_fn,
kernel_type=kernel_type,
)
def lower_jaxpr_to_func(
jaxpr: jax_core.Jaxpr,
*,
name: str,
kernel_type: tpu_core.CoreType,
mosaic_grid_mapping: MosaicGridMapping,
forward_compatible: bool,
backend: Any | None,
) -> func.FuncOp:
"""Lowers a Jaxpr to a Mosaic SparseCore function."""
num_grid = len(mosaic_grid_mapping.grid_types)
num_scalar_prefetch = len(mosaic_grid_mapping.scalar_prefetch_types)
if num_scalar_prefetch:
raise NotImplementedError("Scalar prefetch not supported.")
num_scratch = len(mosaic_grid_mapping.scratch_types)
arg_types = [
*mosaic_grid_mapping.grid_types,
*mosaic_grid_mapping.scalar_prefetch_types,
*mosaic_grid_mapping.operand_types,
*mosaic_grid_mapping.scratch_types,
]
arg_block_shapes = [
*mosaic_grid_mapping.scalar_prefetch_block_shapes,
*mosaic_grid_mapping.operand_block_shapes,
*mosaic_grid_mapping.scratch_block_shapes,
]
def body_func(*args: ir.Value):
grid_indices, scalar_prefetch, operands_and_scratch = util.split_list(
args, [num_grid, num_scalar_prefetch]
)
grid_indices = mosaic_grid_mapping.get_grid_indices(
grid_indices, maybe_include_mapped_dims=False
)
jaxpr_indices = tuple(
idx
for i, idx in enumerate(grid_indices)
if i not in mosaic_grid_mapping.vmapped_dims
)
lowering_context = LoweringContext(
mosaic_grid_mapping.grid, # type: ignore
mosaic_grid_mapping.grid_names,
mosaic_grid_mapping.vmapped_dims,
jaxpr_indices,
arg_block_shapes,
source_info_util.NameStack(),
mesh_context=mosaic_grid_mapping.mesh_info,
traceback_caches=mlir.TracebackCaches(),
kernel_type=kernel_type,
forward_compatible=forward_compatible,
backend=backend,
dynamic_shape_replacement_fn=dynamic_shape_replacement_fn,
)
return tc_lowering.jaxpr_subcomp(
lowering_context, jaxpr, *scalar_prefetch, *operands_and_scratch
)
body: Any = func.FuncOp.from_py_func(*arg_types, name=name)(body_func)
func_op = cast(func.FuncOp, body.func_op)
func_op.attributes["tpu.core_type"] = ir.Attribute.parse(
f"#tpu.core_type<{kernel_type.name.lower()}>"
)
func_op.attributes["scratch_operands"] = ir.IntegerAttr.get(
ir.IntegerType.get_signless(64), num_scratch
)
arg_attrs = [ir.DictAttr.get({})] * num_grid
for arg, bm in zip(
func_op.arguments[num_grid : len(func_op.arguments) - num_scratch],
mosaic_grid_mapping.block_mappings,
):
d = {}
if (
str(arg.type.memory_space) == "#tpu.memory_space<hbm>"
or str(arg.type.memory_space) == "#tpu.memory_space<semaphore_mem>"
):
d["sc.persistent"] = ir.UnitAttr.get()
if isinstance(bm, sc_core.BlockMapping) and bm.indexed_by is not None:
d["sc.indexed_by"] = mlir.i32_attr(bm.indexed_by)
d["sc.indexed_dim"] = mlir.i32_attr(bm.indexed_dim)
arg_attrs.append(ir.DictAttr.get(d))
arg_attrs.extend([ir.DictAttr.get({})] * num_scratch)
func_op.arg_attrs = ir.ArrayAttr.get(arg_attrs)
try:
func_op.verify()
except Exception as e:
raise ValueError(
f"Body failed to verify: {func_op}.\nThis is an internal error."
" Please report a bug at:"
" https://github.com/jax-ml/jax/issues/new?assignees=sharadmv."
) from e
return func_op
register_lowering_rule = functools.partial(
tc_lowering.register_lowering_rule,
kernel_types=(
tpu_core.CoreType.SC_SCALAR_SUBCORE,
tpu_core.CoreType.SC_VECTOR_SUBCORE,
),
)
@register_lowering_rule(state_primitives.get_p)
def _get_lowering_rule(ctx: LoweringRuleContext, ref, *flat_transforms, tree):
return _load_lowering_rule(ctx, ref, None, *flat_transforms, tree=tree)
def _load_lowering_rule(
ctx: LoweringRuleContext, ref, mask, *flat_transforms, tree
):
ref_aval, *_flat_index_avals = ctx.avals_in
assert isinstance(ref_aval, state.AbstractRef)
[out_aval] = ctx.avals_out
assert isinstance(out_aval, jax_core.ShapedArray)
if (
(ref_memory_space := ref_aval.memory_space) is MemorySpace.HBM or
ref_memory_space is MemorySpace.VMEM_SHARED
):
raise NotImplementedError(
f"Get does not support loading from {ref_memory_space.name}."
" Copy the data to a core-local memory space, e.g. VMEM,"
" via `pltpu.async_copy`."
)
transforms = list(tree_util.tree_unflatten(tree, flat_transforms))
if not transforms or not isinstance(transforms[-1], indexing.NDIndexer):
tref_aval = state.transform_type(transforms, ref_aval)
assert isinstance(tref_aval, state.AbstractRef)
transforms.append(indexing.NDIndexer.make_trivial_indexer(tref_aval.shape))
*prev_transforms, indexer = transforms
ref_block_shape, *_ = ctx.block_shapes
ref, ref_block_shape = _transform_ref(
ref, ref_aval, ref_block_shape, prev_transforms
)
starts, sizes, strides, _, _ = tc_lowering._indexer_to_start_size_stride(
indexer, ref_block_shape, cast_to_index=True
)
del sizes # Currently unused.
if not all(s == 1 for s in strides):
raise NotImplementedError(
"Get only supports slices with stride 1, got {strides}"
)
if not out_aval.ndim:
if mask is not None:
raise NotImplementedError("Get does not support masked scalar loads")
return memref.load(ref, starts)
if ref_memory_space is MemorySpace.SMEM:
raise NotImplementedError("Get can only load scalars from SMEM")
else:
_check_aval_is_supported("Get", out_aval)
vec_type = ir.VectorType.get(
out_aval.shape, _dtype_to_ir_type(ref_aval.dtype)
)
return tpu.vector_load(vec_type, ref, indices=starts, strides=[], mask=mask)
@register_lowering_rule(state_primitives.swap_p)
def _swap_lowering_rule(
ctx: LoweringRuleContext, ref, val, *flat_transforms, tree
):
return _store_lowering_rule(
ctx, ref, val, None, *flat_transforms, tree=tree, add=False
)
def _store_lowering_rule(
ctx: LoweringRuleContext, ref, val, mask, *flat_transforms, tree, add
):
ref_aval, _, *_flat_index_avals = ctx.avals_in
assert isinstance(ref_aval, state.AbstractRef)
[out_aval] = ctx.avals_out
assert isinstance(out_aval, jax_core.ShapedArray)
if (
(ref_memory_space := ref_aval.memory_space) is MemorySpace.HBM or
ref_memory_space is MemorySpace.VMEM_SHARED
):
raise NotImplementedError(
f"Swap does not support storing to {ref_memory_space.name}."
" Copy the data to a core-local memory space, e.g. VMEM,"
" via `pltpu.async_copy`."
)
transforms = list(tree_util.tree_unflatten(tree, flat_transforms))
if not transforms or not isinstance(transforms[-1], indexing.NDIndexer):
tref_aval = state.transform_type(transforms, ref_aval)
assert isinstance(tref_aval, state.AbstractRef)
transforms.append(indexing.NDIndexer.make_trivial_indexer(tref_aval.shape))
*prev_transforms, indexer = transforms
ref_block_shape, *_ = ctx.block_shapes
ref, ref_block_shape = _transform_ref(
ref, ref_aval, ref_block_shape, prev_transforms
)
starts, sizes, strides, _, _ = tc_lowering._indexer_to_start_size_stride(
indexer, ref_block_shape, cast_to_index=True
)
del sizes # Currently unused.
if not all(s == 1 for s in strides):
raise NotImplementedError(
"Swap only supports slices with stride 1, got {strides}"
)
if not out_aval.ndim:
if mask is not None:
raise NotImplementedError("Swap does not support masked scalar stores")
if add:
# TODO(slebedev): We can use memref.atomic_rmw here, but the SC compiler
# doesn't support it yet.
raise NotImplementedError("Swap does not support atomic scalar adds")
old_val = memref.load(ref, starts)
memref.store(val, ref, starts)
return old_val
if ref_memory_space is MemorySpace.SMEM:
raise NotImplementedError("Swap can only store scalars to SMEM")
else:
_check_aval_is_supported("Swap", out_aval)
vec_type = ir.VectorType.get(
out_aval.shape, _dtype_to_ir_type(ref_aval.dtype)
)
old_val = tpu.vector_load(vec_type, ref, starts, strides=[], mask=mask)
tpu.vector_store(val, ref, starts, strides=[], mask=mask, add=add)
return old_val
@register_lowering_rule(lax.iota_p,
kernel_types=[tpu_core.CoreType.SC_VECTOR_SUBCORE])
def _iota_lowering_rule_sc(ctx: LoweringRuleContext, dtype, shape, dimension,
sharding):
sc_info = sc_core.get_sparse_core_info()
if shape != (sc_info.num_lanes,):
raise ValueError(
f"Unsupported iota shape for SC vector subcore. Got {shape}, supported "
f"shape is {(sc_info.num_lanes,)}."
)
[out_aval] = ctx.avals_out
out_type = ir.VectorType.get(
[sc_info.num_lanes], _dtype_to_ir_type(out_aval.dtype)
)
return tpu.iota(out_type, dimensions=[dimension])
def _check_aval_is_supported(caller: str, aval: jax_core.ShapedArray) -> None:
supported_shapes = sc_core.supported_shapes(aval.dtype)
if aval.shape in supported_shapes:
return
if not supported_shapes:
raise NotImplementedError(f"{caller} does not support {aval.dtype} arrays")
else:
raise NotImplementedError(
f"{caller} only supports {aval.dtype} arrays of shapes"
f" [{', '.join(map(repr, supported_shapes))}], got {aval.shape}"
)
@register_lowering_rule(debugging.debug_print_p)
def _debug_print_lowering_rule(
ctx: LoweringRuleContext,
*args,
fmt: str,
ordered,
partitioned,
in_tree,
static_args,
np_printoptions,
has_placeholders,
logging_record,
):
del partitioned, np_printoptions, in_tree, static_args
def fail(reason: str) -> NoReturn:
raise NotImplementedError(
f"pl.debug_print() {reason} when lowering to SparseCore"
)
if ordered:
fail("does not support ordered print")
if has_placeholders:
fail("does not support placeholders")
match args:
case []:
tpu.log(inputs=[], tag=fmt)
case [arg] if isinstance(arg.type, ir.MemRefType):
tpu.log_buffer(arg, ctx.avals_in[0].shape, fmt) # pytype: disable=attribute-error
case [arg]:
tpu.log(inputs=[arg], tag=fmt)
case _:
fail("does not support multiple inputs")
return []
def _memref_memory_space(ref: ir.Value) -> MemorySpace:
match str(ir.MemRefType(ref.type).memory_space):
case "#tpu.memory_space<hbm>":
return MemorySpace.HBM
case "#tpu.memory_space<vmem>":
return MemorySpace.VMEM
case "#tpu.memory_space<vmem_shared>":
return MemorySpace.VMEM_SHARED
case "#tpu.memory_space<smem>":
return MemorySpace.SMEM
case _:
raise LookupError(f"Unknown memory space: {ref.type}")
def _prepare_dma_refs(
src_ref,
src_transforms,
dst_ref,
dst_transforms,
src_aval,
dst_aval,
is_add: bool = False,
):
"""Prepares the DMA source and destination references."""
src_memory_space = _memref_memory_space(src_ref)
dst_memory_space = _memref_memory_space(dst_ref)
match src_memory_space, dst_memory_space:
case MemorySpace.HBM | MemorySpace.VMEM_SHARED, MemorySpace.VMEM:
if _has_indirect_offsets(dst_transforms):
raise ValueError(
"Only the source ref can be indexed when doing a gather via"
" `pltpu.async_copy`"
)
dst_ref, _ = _transform_ref(
dst_ref, dst_aval, dst_aval.shape, dst_transforms
)
dst_ref_shape = ir.MemRefType(dst_ref.type).shape
indirect_offsets, src_transforms = _extract_indirect_offsets(
src_transforms, tuple(dst_ref_shape)
)
src_ref, _ = _transform_ref(
src_ref, src_aval, src_aval.shape, src_transforms
)
indirect_offsets_ref_str = "src_ref"
case MemorySpace.VMEM, MemorySpace.HBM | MemorySpace.VMEM_SHARED:
if _has_indirect_offsets(src_transforms):
raise ValueError(
"Only the destination ref can be indexed when doing a scatter via"
" `pltpu.async_copy`"
)
src_ref, _ = _transform_ref(
src_ref, src_aval, src_aval.shape, src_transforms
)
src_ref_shape = ir.MemRefType(src_ref.type).shape
indirect_offsets, dst_transforms = _extract_indirect_offsets(
dst_transforms, tuple(src_ref_shape)
)
dst_ref, _ = _transform_ref(
dst_ref, dst_aval, dst_aval.shape, dst_transforms
)
indirect_offsets_ref_str = "dst_ref"
case _: # Indirect DMA is not supported.
if (
# fmt: off
_has_indirect_offsets(src_transforms) or
_has_indirect_offsets(dst_transforms)
# fmt: on
):
raise NotImplementedError(
"Scatter/gather via `pltpu.async_copy` from"
f" {src_memory_space.name} to {dst_memory_space.name} is not"
" supported"
)
if is_add:
raise ValueError(
"DMAs with `add=True` are only supported between VMEM and "
f"HBM/VMEM_SHARED. "
f"Got (src, dst)={(src_aval.memory_space, dst_aval.memory_space)}"
)
src_ref, _ = _transform_ref(
src_ref, src_aval, src_aval.shape, src_transforms
)
dst_ref, _ = _transform_ref(
dst_ref, dst_aval, dst_aval.shape, dst_transforms
)
indirect_offsets = None
indirect_offsets_ref_str = ""
if is_add and indirect_offsets is None:
raise NotImplementedError(
"DMAs with `add=True` must (for now) specify offsets of the"
" majormost dimension. You can do this by writing"
" `pltpu.async_copy(..., {ref}={ref}.at[jnp.arange(vec_dim)], ...)`"
" or `pltpu.async_copy(..., {ref}={ref}.at[indices_ref],"
" ...)`.".format(ref=indirect_offsets_ref_str)
)
return src_ref, dst_ref, indirect_offsets
# TODO(slebedev): Use the TC rule once we align the ``LoweringRuleContext``
# with the TC lowering.
@register_lowering_rule(tpu_primitives.dma_start_p)
def _dma_start_lowering_rule(
ctx: LoweringRuleContext,
*args,
tree,
device_id_type: pallas_primitives.DeviceIdType,
priority: int,
add: bool,
):
(
src_ref,
src_transforms,
dst_ref,
dst_transforms,
sem,
sem_transforms,
src_sem,
src_sem_transforms,
device_id,
) = tpu_primitives._dma_unflatten(tree, args)
src_aval, _, dst_aval, _, sem_aval, _, src_sem_aval, _, _ = (
tpu_primitives._dma_unflatten(tree, ctx.avals_in)
)
src_ref, dst_ref, indirect_offsets = _prepare_dma_refs(
src_ref, src_transforms, dst_ref, dst_transforms, src_aval, dst_aval, add
)
if add and indirect_offsets is None:
# TODO: Support regular DMA with add=True.
raise NotImplementedError(
"DMAs with `add=True` must (for now) specify offsets of the majormost "
"dimension. You can do this by writing "
"`pltpu.async_copy(..., dst_ref=ref.at[jnp.arange(vec_dim)], ...)` or "
"`pltpu.async_copy(..., dst_ref=ref.at[iota_ref], ...)`."
)
sem, _ = _transform_ref(sem, sem_aval, sem_aval.shape, sem_transforms)
if src_sem is not None:
src_sem, _ = _transform_ref(
src_sem, src_sem_aval, src_sem_aval.shape, src_sem_transforms
)
# If not ``None``, we lower to an indirect DMA instead.
if indirect_offsets is None:
if device_id is not None:
device_id, _ = tc_lowering._device_id_to_logical(
ctx, device_id, device_id_type
)
tpu.enqueue_dma(
src_ref,
dst_ref,
sem,
source_semaphore=src_sem,
device_id=device_id,
priority=priority,
)
return []
if device_id is not None:
raise NotImplementedError(
"Scatter/gather to or from a remote device via `pltpu.async_copy` is"
" not supported"
)
del priority # Unused by indirect DMAs.
tpu.enqueue_indirect_dma(src_ref, dst_ref, indirect_offsets, sem, add=add)
return []
# TODO(slebedev): Use the TC rule once we align the ``LoweringRuleContext``
# with the TC lowering.
@register_lowering_rule(tpu_primitives.dma_wait_p)
def _dma_wait_lowering_rule(
ctx: LoweringRuleContext,
*args,
tree,
device_id_type: pallas_primitives.DeviceIdType,
):
(
src_ref,
src_transforms,
dst_ref,
dst_transforms,
sem,
sem_transforms,
_,
_,
device_id,
) = tpu_primitives._dma_unflatten(tree, args)
src_aval, _, dst_aval, _, sem_aval, _, _, _, _ = (
tpu_primitives._dma_unflatten(tree, ctx.avals_in)
)
src_ref, dst_ref, indirect_offsets = _prepare_dma_refs(
src_ref, src_transforms, dst_ref, dst_transforms, src_aval, dst_aval,
)
sem, _ = _transform_ref(sem, sem_aval, sem_aval.shape, sem_transforms)
# If not ``None``, we lower to an indirect DMA instead of a regular DMA.
if indirect_offsets is None:
if device_id is not None:
device_id, _ = tc_lowering._device_id_to_logical(
ctx, device_id, device_id_type
)
tpu.wait_dma2(sem, src_ref, dst_ref, device_id=device_id)
return []
if device_id is not None:
raise NotImplementedError(
"Scatter/gather to or from a remote device via `pltpu.async_copy` is"
" not supported"
)
tpu.wait_indirect_dma(sem, src_ref, dst_ref)
return []
def _extract_indirect_offsets_from_indexer(
indexer: indexing.NDIndexer, expected_shape: tuple[int, ...] | None = None
) -> ir.Value | None:
offsets_ref: Any # Make mypy happy.
match indexer.indices:
case [ir.Value() as offsets, *_] if (
# fmt: off
isinstance(offsets.type, ir.MemRefType) or
isinstance(offsets.type, ir.VectorType)
): # fmt: on
shape = (*offsets.type.shape, *indexer.shape[offsets.type.rank :])
if expected_shape is not None and shape != expected_shape:
raise NotImplementedError(
"The indexer shape in scatter/gather via `pltpu.async_copy` does"
f" not match the expected shape. Want: {expected_shape}, got:"
f" {shape}."
)
case [state.TransformedRef() as offsets_ref, *_]:
offsets_type = ir.MemRefType(offsets_ref.ref.type)
if offsets_type.element_type != ir.IntegerType.get_signless(32):
raise NotImplementedError(
"Only int32 indices are supported by scatter/gather via"
" `pltpu.async_copy` with a dynamically-shaped indexer"
)
offsets_ref_aval = state.AbstractRef(
inner_aval=jax_core.ShapedArray(
dtype=jnp.dtype("int32"),
shape=tuple(offsets_type.shape),
),
memory_space=None,
)
offsets, _ = _transform_ref(
offsets_ref.ref,
offsets_ref_aval,
offsets_type.shape, # The shape before the indexing.
offsets_ref.transforms,
)
case _:
return None
if isinstance(offsets.type, ir.MemRefType):
offsets_memory_space = _memref_memory_space(offsets)
if offsets_memory_space is not MemorySpace.VMEM:
raise NotImplementedError(
"Indices for scatter/gather via `pltpu.async_copy` must be in VMEM,"
f" got {offsets_memory_space.name}"
)
if not state_discharge._is_trivial_indexer(
indexing.NDIndexer(indexer.indices[1:], indexer.shape[1:], ())
):
# TODO(slebedev): Consider lifting this restriction.
raise NotImplementedError(
"Only indexing along the major dimension is supported in scatter/gather"
" via `pltpu.async_copy`"
)
return offsets
def _extract_indirect_offsets(
transforms: Sequence[state.Transform], expected_shape: tuple[int, ...]
) -> tuple[ir.Value | None, Sequence[state.Transform]]:
for i, indexer in enumerate(transforms):
if not isinstance(indexer, indexing.NDIndexer):
continue
offsets = _extract_indirect_offsets_from_indexer(indexer, expected_shape)
if offsets is None:
continue
if i != len(transforms) - 1:
raise NotImplementedError(
"The indexed ref in scatter/gather via `pltpu.async_copy` cannot have"
" any transforms following the indexer"
)
return offsets, transforms[:i]
return None, transforms
def _has_indirect_offsets(transforms: Sequence[ir.Value]) -> bool:
return any(
_extract_indirect_offsets_from_indexer(indexer) is not None
for indexer in transforms
if isinstance(indexer, indexing.NDIndexer)
)
@register_lowering_rule(pallas_primitives.run_scoped_p)
def _run_scoped_lowering_rule(
ctx: LoweringRuleContext, *consts, jaxpr, collective_axes
):
return tc_lowering._run_scoped_lowering_rule(
ctx,
*consts,
jaxpr=jaxpr,
collective_axes=collective_axes,
alloc_fn=_alloc_value,
)
@register_lowering_rule(pallas_primitives.jaxpr_call_p)
def _jaxpr_call_lowering_rule(
ctx: LoweringRuleContext,
*flat_args,
jaxpr: jax_core.Jaxpr,
ref_treedefs,
program_ids_treedef,
):
args = []
flat_ref_avals, _ = util.split_list(
ctx.avals_in, [sum(treedef.num_leaves for treedef in ref_treedefs)]
)
flat_ref_avals = util.split_list(
flat_ref_avals,
[treedef.num_leaves for treedef in ref_treedefs[: len(ref_treedefs) - 1]],
)
flat_refs, flat_program_ids = util.split_list(
flat_args, [sum(treedef.num_leaves for treedef in ref_treedefs)]
)
flat_refs = util.split_list(
flat_refs,
[treedef.num_leaves for treedef in ref_treedefs[: len(ref_treedefs) - 1]],
)
flat_block_shapes, _ = util.split_list(
ctx.block_shapes, [sum(treedef.num_leaves for treedef in ref_treedefs)]
)
flat_block_shapes = util.split_list(
flat_block_shapes,
[treedef.num_leaves for treedef in ref_treedefs[: len(ref_treedefs) - 1]],
)
ref_block_shapes = []
for treedef, flat_ref, flat_ref_aval, flat_block_shape in zip(
ref_treedefs, flat_refs, flat_ref_avals, flat_block_shapes
):
ref = treedef.unflatten(flat_ref)
ref_aval = treedef.unflatten(flat_ref_aval)
block_shape = treedef.unflatten(flat_block_shape)
if isinstance(ref, tuple):
# We ignore other transforms here, because they are already embedded
# in the jaxpr.
ref, transforms = ref
ref_aval, _ = ref_aval
block_shape, _ = block_shape
assert isinstance(ref_aval, state.AbstractRef)
ref, block_shape = _transform_ref(ref, ref_aval, block_shape, transforms)
ref_block_shapes.append(block_shape)
args.append(ref)
user_grid_indices = ctx.lowering_context.user_grid_indices
assert user_grid_indices is not None
program_ids = program_ids_treedef.unflatten(flat_program_ids)
for axis, pid in enumerate(program_ids):
if pid is None:
program_ids[axis] = user_grid_indices[axis]
new_lowering_ctx = dataclasses.replace(
ctx.lowering_context,
block_shapes=tuple(ref_block_shapes), # type: ignore
user_grid_indices=program_ids,
)
return tc_lowering.jaxpr_subcomp(new_lowering_ctx, jaxpr, *args)
@register_lowering_rule(jax_core.empty_ref_p)
def _empty_ref_lowering_rule(ctx: LoweringRuleContext, ty, memory_space):
del ty, memory_space
[aval_out] = ctx.avals_out
return _alloc_value(aval_out, ctx=ctx) # pytype: disable=wrong-arg-types
@register_lowering_rule(
lax.sort_p, kernel_types=[tpu_core.CoreType.SC_VECTOR_SUBCORE]
)
def _sort_lowering_rule(
ctx: LoweringRuleContext, *xs, dimension, is_stable, num_keys
):
del is_stable # Unused, always stable.
if dimension not in (0, -1):
raise ValueError(f"Unsupported dimension: {dimension}")
if num_keys != 1:
raise NotImplementedError("Multiple sort keys not supported")
sc_info = sc_core.get_sparse_core_info()
supported_shape = (sc_info.num_lanes,)
for i, aval in enumerate(ctx.avals_in):
if aval.shape != supported_shape:
raise NotImplementedError(
f"Unsupported shape for operand {i} of SC sort: Got {aval.shape}, "
f"expected {supported_shape}"
)
keys = xs[0]
values = xs[1:]
mask_type = ir.VectorType.get(
[sc_info.num_lanes], ir.IntegerType.get_signless(1))
mask = arith.constant(mask_type, ir.DenseElementsAttr.get_splat(
mask_type, ir.BoolAttr.get(True)))
if not values:
_, sorted_keys, _ = tpu.sort(
mask_type, keys.type, keys.type, keys, keys, mask=mask
)
return (sorted_keys,)
results: list[ir.Value] = []
for value in values:
_, sorted_keys, sorted_value = tpu.sort(
mask_type, keys.type, value.type, keys, value, mask=mask
)
if not results:
results.append(sorted_keys)
results.append(sorted_value)
return tuple(results)
@register_lowering_rule(
lax.gather_p, kernel_types=[tpu_core.CoreType.SC_VECTOR_SUBCORE]
)
def _gather_lowering_rule(
ctx: LoweringRuleContext,
x,
indices,
*,
dimension_numbers,
slice_sizes,
unique_indices,
indices_are_sorted,
mode,
fill_value,
):
in_aval, indices_aval = ctx.avals_in
out_aval, = ctx.avals_out
if len(in_aval.shape) != 1:
raise NotImplementedError("Only 1D gather is supported")
if in_aval.shape != indices_aval.shape[:-1] != out_aval.shape:
raise ValueError(
"Shape mismatch in input, indices and output:"
f" {in_aval.shape}, {indices_aval.shape[:-1]}, {out_aval.shape}"
)
# During lowering jnp.take_along_axis to lax.gather, we append extra dimension
# to the end of the indices array. We should reshape it back to the original
# shape before lowering to Mosaic and rely on MLIR canonicalization to remove
# the reshapes.
assert indices_aval.shape == in_aval.shape + (1,)
recovered_indices = vector.shape_cast(
ir.VectorType.get(in_aval.shape, indices.type.element_type),
indices,
)
# Note: current support for lax.gather is still very limited.
del fill_value
if slice_sizes == (1,) and mode == lax.GatherScatterMode.PROMISE_IN_BOUNDS:
if dimension_numbers == lax.GatherDimensionNumbers(
offset_dims=(),
collapsed_slice_dims=(0,),
start_index_map=(0,),
operand_batching_dims=(),
start_indices_batching_dims=(),
):
return tpu.dynamic_gather(x, recovered_indices, [0])
raise NotImplementedError("Unsupported gather")
@register_lowering_rule(
lax.rev_p, kernel_types=[tpu_core.CoreType.SC_VECTOR_SUBCORE]
)
def _rev_lowering_rule(ctx: LoweringRuleContext, x, dimensions):
del ctx # Unused.
if dimensions != (0,):
raise NotImplementedError(f"Invalid dimensions for SC lax.rev: {dimensions}")
i32 = ir.IntegerType.get_signless(32)
vec_dim = sc_core.get_sparse_core_info().num_lanes
cdim = arith.constant(i32, ir.IntegerAttr.get(i32, vec_dim - 1))
cdim_vec = vector.broadcast(ir.VectorType.get((vec_dim,), cdim.type), cdim)
return tpu.dynamic_gather(
x,
arith.subi(cdim_vec, tpu.iota(cdim_vec.type, dimensions=[0])),
dimensions=[0],
)
def _default_tile_strides(
tiling: sc_core.Tiling, shape: Sequence[int]
) -> Sequence[int]:
"""Returns default tile strides for a given shape and tiling."""
assert tiling
cdiv = lambda a, b: (a + b - 1) // b
strides = [0] * len(shape)
stride = 1
first_tile, *_ = tiling
for d in reversed(range(len(shape))):
assert shape[d] != ir.ShapedType.get_dynamic_size()
strides[d] = stride
if d >= len(shape) - len(first_tile):
tile_d = d - (len(shape) - len(first_tile))
stride *= cdiv(shape[d], first_tile[tile_d])
else:
stride *= shape[d]
return strides
def _alloc_value(
aval: jax_core.AbstractValue | tc_lowering.ShapedAbstractValue, *, ctx: LoweringRuleContext
) -> ir.Value:
if isinstance(aval, sc_core.AbstractRef) and aval.tiling is not None:
tiling = "".join(f"({','.join(map(str, tile))})" for tile in aval.tiling)
strides = _default_tile_strides(aval.tiling, aval.shape)
out_type = ir.MemRefType.get(
aval.shape,
_dtype_to_ir_type(aval.dtype, is_kernel_boundary=True),
layout=ir.Attribute.parse(f"#tpu.tiled<{tiling},{strides}>"),
memory_space=tc_lowering._memory_space_to_mosaic_attribute(
aval.memory_space,
kernel_type=ctx.lowering_context.kernel_type,
),
)
return memref.alloca(out_type, [], [])
return tc_lowering._alloc_value(aval, ctx=ctx)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic/sc_lowering.py",
"license": "Apache License 2.0",
"lines": 1164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic/sc_primitives.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pallas primitives for SparseCore."""
from collections.abc import Callable, Sequence
import enum
import functools
from typing import overload, TypeAlias, TypeVar
import jax
from jax import api_util
from jax import lax
from jax._src import core as jax_core
from jax._src import dtypes
from jax._src import effects
from jax._src import linear_util as lu
from jax._src.api_util import check_no_transformed_refs_args
from jax._src.interpreters import partial_eval as pe
from jax._src.lib.mlir import ir
from jax._src.lib.mlir.dialects import arith
from jax._src.lib.mlir.dialects import scf
from jax._src.lib.mlir.dialects import vector
from jax._src.pallas import core as pallas_core
from jax._src.pallas import primitives as pallas_primitives
from jax._src.pallas.mosaic import core as tpu_core
from jax._src.pallas.mosaic import lowering as tc_lowering
from jax._src.pallas.mosaic import sc_core
from jax._src.pallas.mosaic import sc_lowering
from jax._src.state import indexing
from jax._src.state import primitives as state_primitives
from jax._src.state import types as state_types
from jax.experimental.mosaic.dialects import tpu
import jax.numpy as jnp
_ensure_ir_value = tc_lowering._ensure_mlir_value
TransformedRef: TypeAlias = state_types.TransformedRef
Ref: TypeAlias = state_types.AbstractRef | TransformedRef
_T = TypeVar("_T")
load_p = jax_core.Primitive("load")
load_p.is_effectful = lambda params: True # type: ignore
@load_p.def_effectful_abstract_eval
def _load_abstract_eval(ref, *args, has_mask, tree):
flat_transforms = args[:-1] if has_mask else args
tref = state_types.TransformedRef(
ref, jax.tree.unflatten(tree, flat_transforms))
if has_mask:
mask = args[-1]
if mask.dtype != jnp.bool:
raise TypeError(f"Mask must be a boolean array, got {mask.dtype}")
if mask.shape != tref.shape:
raise ValueError(f"Mask must have shape {tref.shape}, got {mask.shape}")
return (
jax_core.ShapedArray(tref.shape, ref.dtype), {state_types.ReadEffect(0)})
@sc_lowering.register_lowering_rule(load_p)
def _load_lowering_rule(
ctx: sc_lowering.LoweringRuleContext, ref, *args, has_mask, tree
):
if has_mask:
*flat_transforms, mask = args
else:
flat_transforms, mask = list(args), None
return sc_lowering._load_lowering_rule(
ctx, ref, mask, *flat_transforms, tree=tree
)
def load_expanded(ref: Ref, *, mask: jax.Array) -> jax.Array:
"""Performs and expanded masked load from a ref.
Elements from ``ref`` are placed into positions where ``mask`` is ``True``.
The elements are taken from ``ref`` sequentially, meaning that the i-th
``True`` value in ``mask`` corresponds to accessing ``ref[i]``. The result is
expanded into the shape of the ``mask``.
For example, if the mask is ``[True, False, True, True]``, the result is
```[ref[0], <?>, ref[2], ref[3]]``, where ``<?>`` is an undefined value.
Args:
ref: The ref to load from.
mask: A boolean mask specifying which elements to load into.
Returns:
The loaded array, with the same shape as the mask. No assumptions can be
made about the elements at the indices where the mask is ``False``.
"""
if not isinstance(ref, Ref):
raise TypeError(f"ref must be an AbstractRef or TransformedRef, got {ref}")
if not isinstance(ref, TransformedRef):
ref = ref.at[...] # type: ignore
assert isinstance(ref, TransformedRef)
flat_transforms, tree = jax.tree.flatten(ref.transforms)
return load_p.bind(ref.ref, *flat_transforms, mask, has_mask=True, tree=tree)
swap_p = jax_core.Primitive("swap")
swap_p.is_effectful = lambda params: True # type: ignore
@swap_p.def_effectful_abstract_eval
def _swap_abstract_eval(ref, x, *args, has_mask, tree, add):
flat_transforms = args[:-1] if has_mask else args
tref = state_types.TransformedRef(
ref, jax.tree.unflatten(tree, flat_transforms))
if has_mask:
mask = args[-1]
if mask.dtype != jnp.bool:
raise TypeError(f"Mask must be a boolean array, got {mask.dtype}")
if mask.shape != tref.shape:
raise ValueError(f"Mask must have shape {tref.shape}, got {mask.shape}")
if ref.dtype != x.dtype:
raise TypeError(
f"Ref and value must have the same dtype, got {ref.dtype} and {x.dtype}"
)
if tref.shape != x.shape:
raise ValueError(f"Value must have shape {tref.shape}, got {x.shape}")
effects: set[jax_core.Effect] = {state_types.WriteEffect(0)}
if add:
effects.add(state_types.ReadEffect(0))
return x, effects
@sc_lowering.register_lowering_rule(swap_p)
def _swap_lowering_rule(
ctx: sc_lowering.LoweringRuleContext, ref, x, *args, has_mask, tree, add
):
if has_mask:
*flat_transforms, mask = args
else:
flat_transforms, mask = list(args), None
return sc_lowering._store_lowering_rule(
ctx, ref, x, mask, *flat_transforms, tree=tree, add=add
)
def store_compressed(ref: Ref, x: jax.Array, *, mask: jax.Array) -> None:
"""Performs a compressed masked store to a ref.
Elements from ``x`` where ``mask`` is ``True`` are placed into ``ref``.
The elements are written to ``ref`` sequentially, meaning the i-th ``True``
value in ``mask`` corresponds to writing to ``ref[i]``.
For example, if the mask is ``[True, False, True, True]``, the elements
``x[0]``, ``x[2]``, and ``x[3]`` are written to ``ref[0]``, ``ref[1]``, and
``ref[2]`` respectively.
Args:
ref: The ref to store into.
x: The array to store. Must have the same shape as ``ref``.
mask: A boolean mask specifying which elements from ``x`` to store.
"""
if not isinstance(ref, Ref):
raise TypeError(f"ref must be an AbstractRef or TransformedRef, got {ref}")
if not isinstance(ref, TransformedRef):
ref = ref.at[...] # type: ignore
assert isinstance(ref, TransformedRef)
flat_transforms, tree = jax.tree.flatten(ref.transforms)
_ = swap_p.bind(
ref.ref,
x,
*flat_transforms,
mask,
has_mask=True,
tree=tree,
add=False,
)
return None
def addupdate(ref: Ref, x: jax.Array) -> None:
"""Performs an atomic add to a ref.
Args:
ref: The ref to store into.
x: The array to store. Must have the same shape as ``ref``.
"""
if not isinstance(ref, Ref):
raise TypeError(f"ref must be an AbstractRef or TransformedRef, got {ref}")
if not isinstance(ref, TransformedRef):
ref = ref.at[...] # type: ignore
assert isinstance(ref, TransformedRef)
flat_transforms, tree = jax.tree.flatten(ref.transforms)
_ = swap_p.bind(
ref.ref, x, *flat_transforms, has_mask=False, tree=tree, add=True
)
return None
def addupdate_compressed(ref: Ref, x: jax.Array, *, mask: jax.Array) -> None:
"""Performs a masked atomic add to a ref.
See ``store_compressed`` for details on how the mask is used.
"""
if not isinstance(ref, Ref):
raise TypeError(f"ref must be an AbstractRef or TransformedRef, got {ref}")
if not isinstance(ref, TransformedRef):
ref = ref.at[...] # type: ignore
assert isinstance(ref, TransformedRef)
flat_transforms, tree = jax.tree.flatten(ref.transforms)
_ = swap_p.bind(
ref.ref, x, *flat_transforms, mask, has_mask=True, tree=tree, add=True
)
return None
def _indexed_shape(ref: Ref, indices: Sequence[jax.Array]) -> tuple[int, ...]:
if len(indices) != ref.ndim:
raise ValueError(f"The number of indices does not match {ref.ndim=}")
prev_idx: jax.Array | None = None
for idx in indices:
if idx.ndim != 1:
raise ValueError(
f"Indices must be a 1-D array, got an index with shape {idx.shape}"
)
if prev_idx is not None and idx.size != prev_idx.size:
raise ValueError(
"Indices must have the same size, got {prev_idx.size} and {idx.size}"
)
prev_idx = idx
assert prev_idx is not None
return (prev_idx.size,)
gather_p = jax_core.Primitive("gather")
gather_p.is_effectful = lambda params: True # type: ignore
@gather_p.def_effectful_abstract_eval
def _gather_abstract_eval(*flat_args, tree):
ref, transforms, indices, mask = tree.unflatten(flat_args)
if transforms:
ref = state_types.TransformedRef(ref, transforms)
if ref.dtype not in (jnp.int32, jnp.float32):
raise TypeError(f"ref.dtype={ref.dtype} must be int32 or float32")
out_aval = jax_core.ShapedArray(_indexed_shape(ref, indices), ref.dtype)
sc_lowering._check_aval_is_supported("Gather", out_aval)
if mask is not None and mask.shape != out_aval.shape:
raise ValueError(
f"{mask.shape=} does not match the expected shape {out_aval.shape}"
)
return out_aval, {state_types.ReadEffect(0)}
@sc_lowering.register_lowering_rule(gather_p)
def _gather_lowering_rule(
ctx: sc_lowering.LoweringRuleContext, *flat_args, tree
):
ref, transforms, indices, mask = tree.unflatten(flat_args)
ref_aval, *_ = tree.unflatten(ctx.avals_in)
if ref_aval.memory_space not in (tpu_core.MemorySpace.VMEM, None):
raise ValueError(
f"Gather only supports loading from VMEM, got {ref_aval.memory_space}"
)
if transforms:
ref_block_shape, *_ = ctx.block_shapes
ref, _ = tc_lowering._transform_ref(
ref, ref_aval, ref_block_shape, transforms
)
[out_aval] = ctx.avals_out
vec_type = ir.VectorType.get(
out_aval.shape, sc_lowering._dtype_to_ir_type(ref_aval.dtype)
)
return tpu.vector_load_idx(vec_type, ref, indices, mask=mask)
def load_gather(
ref: Ref, indices: Sequence[jax.Array], *, mask: jax.Array | None = None
) -> jax.Array:
"""Gathers an array from a ref.
Args:
ref: The ref in ``VMEM`` to gather from.
indices: A sequence of 1D arrays, one for each dimension of ``ref``. Each
array specifies an index for that dimension. All arrays must have the same
size.
mask: An optional boolean array, which specifies which elements to load. If
``None``, all elements are loaded.
Returns:
The gathered array.
"""
ref, transforms = state_primitives.get_ref_and_transforms(
ref, None, "load_gather"
)
flat_args, tree = jax.tree.flatten((ref, transforms, indices, mask))
return gather_p.bind(*flat_args, tree=tree)
scatter_p = jax_core.Primitive("scatter")
scatter_p.is_effectful = lambda params: True # type: ignore
scatter_p.multiple_results = True
@scatter_p.def_effectful_abstract_eval
def _scatter_abstract_eval(*flat_args, tree, add):
ref, transforms, indices, x, mask = jax.tree.unflatten(tree, flat_args)
if transforms:
ref = state_types.TransformedRef(ref, transforms)
if ref.dtype not in (jnp.int32, jnp.float32):
raise TypeError(f"ref.dtype={ref.dtype} must be int32 or float32")
expected_shape = _indexed_shape(ref, indices)
if x.shape != expected_shape:
raise ValueError(
f"{x.shape=} does not match expected shape {expected_shape}"
)
if x.dtype != ref.dtype:
raise TypeError(f"val.dtype={x.dtype} != ref.dtype={ref.dtype}")
if mask is not None and mask.shape != expected_shape:
raise ValueError(
f"{mask.shape=} does not match expected shape {expected_shape}"
)
effects: set[jax_core.Effect] = {state_types.WriteEffect(0)}
if add:
effects.add(state_types.ReadEffect(0))
return (), effects
@sc_lowering.register_lowering_rule(scatter_p)
def _scatter_lowering_rule(
ctx: sc_lowering.LoweringRuleContext, *flat_args, tree, add
):
ref, transforms, indices, x, mask = jax.tree.unflatten(tree, flat_args)
ref_aval, *_ = tree.unflatten(ctx.avals_in)
if ref_aval.memory_space not in (tpu_core.MemorySpace.VMEM, None):
raise ValueError(
f"Scatter only supports storing to VMEM, got {ref_aval.memory_space}"
)
if transforms:
ref_block_shape, *_ = ctx.block_shapes
ref, _ = tc_lowering._transform_ref(
ref, ref_aval, ref_block_shape, transforms
)
tpu.vector_store_idx(x, ref, indices, mask=mask, add=add)
return ()
def store_scatter(
ref: Ref,
indices: Sequence[jax.Array],
x: jax.Array,
*,
mask: jax.Array | None = None,
) -> None:
"""Scatters an array to a ref.
Args:
ref: The ref in ``VMEM`` to scatter to.
indices: A sequence of 1D arrays, one for each dimension of ``ref``. Each
array specifies an index for that dimension. All arrays must have the same
size.
val: The array to store.
mask: An optional boolean array, which specifies which elements to store. If
``None``, all elements are stored.
"""
if not indices:
raise ValueError("Indices must not be empty")
ref, transforms = state_primitives.get_ref_and_transforms(
ref, None, "store_scatter"
)
flat_args, tree = jax.tree.flatten((ref, transforms, indices, x, mask))
_ = scatter_p.bind(*flat_args, tree=tree, add=False)
return None
def addupdate_scatter(
ref: Ref,
indices: Sequence[jax.Array],
x: jax.Array,
*,
mask: jax.Array | None = None,
) -> None:
"""Scatters an array to a ref atomically adding to existing values."""
if not indices:
raise ValueError("Indices must not be empty")
ref, transforms = state_primitives.get_ref_and_transforms(
ref, None, "store_scatter"
)
flat_args, tree = jax.tree.flatten((ref, transforms, indices, x, mask))
_ = scatter_p.bind(*flat_args, tree=tree, add=True)
bitcast_p = jax_core.Primitive("bitcast")
@bitcast_p.def_abstract_eval
def _bitcast_abstract_eval(x, dtype):
old_bitwidth = dtypes.itemsize_bits(x.dtype)
new_bitwidth = dtypes.itemsize_bits(dtype)
if old_bitwidth == new_bitwidth:
return jax_core.ShapedArray(x.shape, dtype)
if x.ndim == 0:
raise ValueError(
"Cannot bitcast a ()-shaped array to a dtype with a different bitwidth:"
f" {old_bitwidth=} vs {new_bitwidth=}"
)
new_last_dim, rem = divmod(x.shape[-1] * old_bitwidth, new_bitwidth)
if rem:
raise ValueError(
f"Cannot bitcast from {x.dtype} ({old_bitwidth} bits) to"
f" {dtype} ({new_bitwidth} bits), because {x.shape[-1]=} *"
f" {old_bitwidth} is not divisible by {new_bitwidth}"
)
return jax_core.ShapedArray((*x.shape[:-1], new_last_dim), dtype)
@sc_lowering.register_lowering_rule(bitcast_p)
def _bitcast_lowering_rule(ctx: sc_lowering.LoweringRuleContext, x, *, dtype):
del dtype # Unused.
[out_aval] = ctx.avals_out
return vector.bitcast(ctx.aval_to_ir_type(out_aval), x)
def bitcast(x: jax.Array, dtype: jax.typing.DTypeLike) -> jax.Array:
"""Bitcasts an array to a different dtype.
Unlike ``lax.bitcast_convert_type``, this function returns an array of the
same rank as the input. The minormost dimension is expanded/shrunk to
account for the difference in the element bitwidth.
"""
if x.dtype == dtype:
return x
return bitcast_p.bind(x, dtype=jnp.dtype(dtype))
class MemoryEffect(jax_core.Effect):
pass
effects.control_flow_allowed_effects.add_type(MemoryEffect)
effects.lowerable_effects.add_type(MemoryEffect)
_memory_effect = MemoryEffect()
barrier_p = jax_core.Primitive("barrier")
barrier_p.multiple_results = True
@barrier_p.def_effectful_abstract_eval
def _barrier_abstract_eval():
return (), {_memory_effect}
@sc_lowering.register_lowering_rule(barrier_p)
def _barrier_lowering_rule(ctx: sc_lowering.LoweringRuleContext):
ix = ir.IndexType.get()
tpu.barrier(arith.constant(ix, ir.IntegerAttr.get(ix, 0)))
return ()
def subcore_barrier():
"""Blocks until all subcores on the same core reach this instruction.
The barrier must be used with the vector subcore, either via
:class:jax.experimental.pallas.tpu_sc.VectorSubcoreMesh or by passing::
pltpu.CompilerParams(
kernel_type=pltpu.CoreType.SC_VECTOR_SUBCORE,
dimension_semantics[..., "subcore_parallel", ...])
to ``pallas_call``.
"""
barrier_p.bind()
scan_count_p = jax_core.Primitive("unique")
scan_count_p.multiple_results = True
@scan_count_p.def_abstract_eval
def _scan_count_abstract_eval(x, mask):
if x.dtype not in (jnp.uint32, jnp.int32, jnp.float32):
raise NotImplementedError(
f"x.dtype={x.dtype} must be uint32, int32 or float32")
if not jnp.issubdtype(mask.dtype, jnp.bool):
raise TypeError(f"mask.dtype={mask.dtype} is not a boolean dtype")
if x.shape != mask.shape:
raise ValueError(f"x.shape={x.shape} != mask.shape={mask.shape}")
return jax_core.ShapedArray(x.shape, jnp.int32), mask
@sc_lowering.register_lowering_rule(scan_count_p)
def _scan_count_lowering_rule(ctx: sc_lowering.LoweringRuleContext, x, mask):
del ctx # Unused.
# Reverse, because the MLIR op returns the mask first.
return tpu.scan_count(mask, x)[::-1]
def scan_count(
x: jax.Array, mask: jax.Array | None = None
) -> tuple[jax.Array, jax.Array]:
"""Computes the running duplicate occurrence count of the array.
Args:
x: An array of integers or floats.
mask: An optional array of booleans, which specifies which elements ``x``
are eligible for counting. If ``None``, all elements are eligible.
Returns:
A tuple of two arrays:
* the running duplicate occurrence count of ``x``;
* the mask indicating the last occurrence of each duplicate that was
counted.
"""
return scan_count_p.bind(x, lax.full(x.shape, True) if mask is None else mask)
masked_cummax_p = jax_core.Primitive("masked_cummax")
masked_cummax_p.multiple_results = False
masked_cummin_p = jax_core.Primitive("masked_cummin")
masked_cummin_p.multiple_results = False
masked_cumsum_p = jax_core.Primitive("masked_cumsum")
masked_cumsum_p.multiple_results = False
@masked_cummax_p.def_abstract_eval
@masked_cummin_p.def_abstract_eval
@masked_cumsum_p.def_abstract_eval
def _masked_cummax_abstract_eval(x, mask):
if x.dtype not in (jnp.uint32, jnp.int32, jnp.float32):
raise NotImplementedError(
f"x.dtype={x.dtype} must be uint32, int32 or float32")
if not jnp.issubdtype(mask.dtype, jnp.bool):
raise TypeError(f"mask.dtype={mask.dtype} is not a boolean dtype")
if x.shape != mask.shape:
raise ValueError(f"x.shape={x.shape} != mask.shape={mask.shape}")
return x
def _masked_cumop_lowering_rule(ctx: sc_lowering.LoweringRuleContext, x, mask,
*, reduction_kind: str):
sign_bit_vec = None
# tpu.scan comparisons assume unsigned int predicates, so we compare
# with the sign bit flipped.
if ctx.avals_in[0].dtype == jnp.int32 and reduction_kind in ("max", "min"):
u32 = ir.IntegerType.get_signless(32)
sign_bit_vec = vector.broadcast(
x.type, arith.constant(u32, ir.IntegerAttr.get(u32, 0x80000000)))
x = arith.xori(x, sign_bit_vec)
result = tpu.scan(
x.type, x, ir.Attribute.parse(f"#tpu.reduction_kind<{reduction_kind}>"),
mask=mask)
if sign_bit_vec is not None: # Flip the sign bit back
return arith.xori(result, sign_bit_vec)
return result
sc_lowering.register_lowering_rule(masked_cummax_p)(
functools.partial(_masked_cumop_lowering_rule, reduction_kind="max"))
sc_lowering.register_lowering_rule(masked_cummin_p)(
functools.partial(_masked_cumop_lowering_rule, reduction_kind="min"))
sc_lowering.register_lowering_rule(masked_cumsum_p)(
functools.partial(_masked_cumop_lowering_rule, reduction_kind="sum"))
def _reduce_op_lowering_rule(ctx: sc_lowering.LoweringRuleContext, x, axes,
*, reduction_kind, out_sharding=None):
del out_sharding # Unused.
if axes != (0,):
raise NotImplementedError(
f"reductions require axes to be (0,) on SparseCore, but got {axes}.")
vec_dim = ctx.avals_in[0].shape[0]
i1t = ir.IntegerType.get_signless(1)
c1 = arith.constant(i1t, ir.IntegerAttr.get(i1t, 1))
x_shp = ctx.avals_in[0].shape
c1v = vector.broadcast(ir.VectorType.get(x_shp, c1.type), c1)
return vector.extract(
_masked_cumop_lowering_rule(ctx, x, c1v, reduction_kind=reduction_kind),
[], [vec_dim - 1])
sc_lowering.register_lowering_rule(
lax.reduce_max_p, kernel_types=[tpu_core.CoreType.SC_VECTOR_SUBCORE])(
functools.partial(_reduce_op_lowering_rule, reduction_kind="max"))
sc_lowering.register_lowering_rule(
lax.reduce_min_p, kernel_types=[tpu_core.CoreType.SC_VECTOR_SUBCORE])(
functools.partial(_reduce_op_lowering_rule, reduction_kind="min"))
sc_lowering.register_lowering_rule(
lax.reduce_sum_p, kernel_types=[tpu_core.CoreType.SC_VECTOR_SUBCORE])(
functools.partial(_reduce_op_lowering_rule, reduction_kind="sum"))
def cummax(x: jax.Array, *, mask: jax.Array | None = None) -> jax.Array:
"""Returns the cumulative max of the array along its innermost axis.
Elements from `x` will pass through directly to the result until the first
valid value is encountered (`mask[i] == True`). If you would like to specify
a default value for such elements instead, write
`x = jnp.where(mask, x, default_value)` before or after calling this function.
Args:
x: An array of integers or floats.
mask: An optional array of booleans, which specifies which elements of `x`
are eligible for the max. If `None`, all elements are eligible.
"""
if x.ndim != 1:
raise NotImplementedError(f"cummax: x={x.aval} must be rank 1")
if mask is None:
mask = lax.full(x.shape, True)
return masked_cummax_p.bind(x, mask)
def cummin(x: jax.Array, *, mask: jax.Array | None = None) -> jax.Array:
"""Returns the cumulative min of the array along its innermost axis.
Elements from `x` will pass through directly to the result until the first
valid value is encountered (`mask[i] == True`). If you would like to specify
a default value for such elements instead, write
`x = jnp.where(mask, x, default_value)` before or after calling this function.
Args:
x: An array of integers or floats.
mask: An optional array of booleans, which specifies which elements of `x`
are eligible for the min. If `None`, all elements are eligible.
"""
if x.ndim != 1:
raise NotImplementedError(f"cummin: x={x.aval} must be rank 1")
if mask is None:
mask = lax.full(x.shape, True)
return masked_cummin_p.bind(x, mask)
@sc_lowering.register_lowering_rule(lax.cumsum_p)
def _cumsum_lowering_rule(ctx: sc_lowering.LoweringRuleContext, x, axis,
reverse):
if axis != 0:
raise NotImplementedError(f"SC cumsum: axis={axis} must be 0.")
if len(ctx.avals_in[0].shape) != 1:
raise NotImplementedError(f"SC cumsum: x={ctx.avals_in[0]} must be rank 1")
if reverse:
raise NotImplementedError("SC cumsum: reverse=True is not yet supported")
i1t = ir.IntegerType.get_signless(1)
c1 = arith.constant(i1t, ir.IntegerAttr.get(i1t, 1))
c1v = vector.broadcast(ir.VectorType.get(x.type.shape, c1.type), c1)
return tpu.scan(
x.type, x, ir.Attribute.parse("#tpu.reduction_kind<sum>"), mask=c1v)
def cumsum(x: jax.Array, *, mask: jax.Array | None = None) -> jax.Array:
"""Returns the cumulative sum of the array along its innermost axis.
This differs from `jnp.cumsum` in that it takes an additional `mask` argument.
Args:
x: An array of integers or floats.
mask: An optional array of booleans, which specifies which elements of `x`
are eligible for summing. If `None`, all elements are eligible.
"""
if x.ndim != 1:
raise NotImplementedError(f"cumsum: x={x.aval} must be rank 1")
if mask is None:
mask = lax.full(x.shape, True)
return masked_cumsum_p.bind(x, mask)
masked_sort_p = jax_core.Primitive("masked_sort")
masked_sort_p.multiple_results = True
@masked_sort_p.def_abstract_eval
def _masked_sort_abstract_eval(keys, values, *maybe_mask, descending):
del descending # Unused.
supported_shape = (sc_core.get_sparse_core_info().num_lanes,)
if keys.dtype not in (jnp.int32, jnp.float32):
raise NotImplementedError(
f"sort_key_val: keys dtype {keys.dtype} should be int32 or float32")
if keys.shape != supported_shape:
raise ValueError(f"keys shape {keys.shape} must be {supported_shape}")
if jnp.dtype(values.dtype).itemsize != 4:
raise NotImplementedError(
f"sort_key_val: values dtype {values.dtype} should be 32 bits")
if values.shape != supported_shape:
raise ValueError(f"values shape {values.shape} must be {supported_shape}")
if maybe_mask:
[mask] = maybe_mask
if not jnp.issubdtype(mask.dtype, jnp.bool):
raise TypeError(f"mask dtype {mask.dtype} is not boolean")
if mask.shape != supported_shape:
raise ValueError(f"mask shape {mask.shape} must be {supported_shape}")
return keys, values, *maybe_mask
@sc_lowering.register_lowering_rule(masked_sort_p)
def _masked_sort_lowering_rule(
ctx: sc_lowering.LoweringRuleContext, keys, values, *maybe_mask, descending):
del ctx # Unused.
if maybe_mask:
[mask] = maybe_mask
else:
mask_type = ir.VectorType.get(
[sc_core.get_sparse_core_info().num_lanes],
ir.IntegerType.get_signless(1))
mask = arith.constant(mask_type, ir.DenseElementsAttr.get_splat(
mask_type, ir.BoolAttr.get(True)))
out_mask, sorted_keys, sorted_values = tpu.sort(
mask.type, keys.type, values.type, keys, values, mask=mask,
descending=descending
)
if maybe_mask:
return sorted_keys, sorted_values, out_mask
return sorted_keys, sorted_values
def sort_key_val(
keys: jax.Array, values: jax.Array, *,
mask: jax.Array | None = None, descending: bool = False
) -> jax.Array:
"""Sorts keys and values, pushing invalid elements to the last positions.
Args:
keys: An array of integers or floats.
values: An array of values corresponding to the keys.
mask: An optional array of booleans, which specifies which elements of
`keys` and `values` are valid. If `None`, all elements are valid.
descending: Whether to sort in descending order.
Returns:
sorted_keys, sorted_values, [output_mask]: The sorted keys and values, and,
if a mask was given, the corresponding mask for output keys and values.
"""
maybe_mask = () if mask is None else (mask,)
return masked_sort_p.bind(keys, values, *maybe_mask, descending=descending)
parallel_loop_p = jax_core.Primitive("parallel_loop")
parallel_loop_p.is_effectful = lambda params: bool(params["jaxpr"].effects) # type: ignore
parallel_loop_p.multiple_results = True
@parallel_loop_p.def_effectful_abstract_eval
def _parallel_loop_abstract_eval(*args, jaxpr, tree, **params):
del params # Unused.
_, _, _, _, carries = tree.unflatten(args)
if any(isinstance(c, (Ref, TransformedRef)) for c in carries):
raise TypeError(f"Carried values may not be refs, but got: {carries}")
updated_effects = set()
for eff in jaxpr.effects:
if isinstance(eff, effects.JaxprInputEffect):
# Offset for the parallel_loop eqn to account for start, stop, and step
# args passed to parallel_loop_p.bind.
eff = eff.replace(input_index=eff.input_index + 3)
updated_effects.add(eff)
return carries, updated_effects
@sc_lowering.register_lowering_rule(parallel_loop_p)
def _parallel_loop_lowering_rule(
ctx: sc_lowering.LoweringRuleContext,
*flat_args,
tree,
unroll,
jaxpr,
):
lower, upper, step, consts, carry = tree.unflatten(flat_args)
for_op = scf.ForOp(
_ensure_ir_value(lower, pallas_core.index_map_grid_aval),
_ensure_ir_value(upper, pallas_core.index_map_grid_aval),
_ensure_ir_value(step, pallas_core.index_map_grid_aval),
carry,
)
for_op.attributes["sc.parallel_access"] = ir.UnitAttr.get()
for_op.attributes["sc.loop_unroll_factor"] = ir.IntegerAttr.get(
ir.IntegerType.get_signless(64), unroll
)
with ir.InsertionPoint(for_op.body):
_, _, _, consts_block_shapes, *_ = tree.unflatten(ctx.block_shapes)
lowering_ctx = ctx.lowering_context.replace(
block_shapes=[*consts_block_shapes, None] + [None] * len(carry),
)
carry_out = tc_lowering.jaxpr_subcomp(
lowering_ctx,
pe.convert_constvars_jaxpr(jaxpr),
*consts,
for_op.induction_variable,
*for_op.inner_iter_args,
)
scf.yield_(carry_out)
return for_op.results
@overload
def parallel_loop(
lower: jax.typing.ArrayLike,
upper: jax.typing.ArrayLike,
step: jax.typing.ArrayLike = ...,
*,
unroll: int = ...,
carry: None = None,
) -> Callable[[Callable[[jax.Array], None]], None]:
...
@overload
def parallel_loop(
lower: jax.typing.ArrayLike,
upper: jax.typing.ArrayLike,
step: jax.typing.ArrayLike = ...,
*,
unroll: int = ...,
carry: _T,
) -> Callable[[Callable[[jax.Array, _T], _T]], _T]:
...
def parallel_loop(lower, upper, step=1, *, unroll=1, carry=None):
"""A parallel loop decorator.
The decorated function forms the loop body. It is called with the current
loop index as the argument and optionally, a single additional carry argument.
The loop iterations must be independent, meaning that operations in one
iteration cannot depend on the side effects, especially Ref writes, of any
other iteration. This allows the compiler to execute instructions from
different iterations concurrently, potentially reordering them for better
performance.
Cross-iteration dependencies traceable via carried values are allowed. Refs
may not be carried.
Safe usage of carried value::
@parallel_loop(0, 64, step=8, carry=jnp.int32(1))
def body(i, j):
# Writes are independent across iterations.
x_ref[pl.ds(i, 8)] = j + jnp.arange(8)
return j + 1
Any pytree can be carried. The final value is returned by the decorator::
def body(i, my_tree: MyTree):
# Writes are independent across iterations.
x_ref[pl.ds(i, 8)] = my_tree.transform(jnp.arange(8))
return my_tree.step(i)
final_value = parallel_loop(0, 64, step=8, carry=MyTree())(body)
Undefined result::
@parallel_loop(0, 64, step=4, carry=jnp.int32(1))
def body(i, j):
# Because the step size is 4, the array written is of size 8, and loop
# iterations may be reordered, the values in indices 4-59 of x_ref are
# unspecified after the loop. (The values in 0-3 and 60-63 are only
# written by the first and last iterations, so are well-defined.)
x_ref[pl.ds(i, 8)] = j + jnp.arange(8)
return j + 1
Unsafe read of "previous" iteration's write (don't do this)::
@parallel_loop(0, 64, 8, carry=jnp.int32(1))
def body(i, j):
# Unsafe because it depends on the side-effect of "previous" iterations,
# which may be executed in parallel or reordered.
mask = x_ref[pl.ds(0, 8)] < j
x_ref[pl.ds(0, 8)] += jnp.where(mask, j + jnp.arange(8), 0)
return j + 1
Args:
lower: The starting value of the loop index.
upper: The exclusive upper bound of the loop index.
step: The increment of the loop index. Default to 1.
unroll: The unroll factor of the loop.
carry: Optional carried state of the loop.
Returns:
A decorator that executes the given function in a parallel loop.
"""
def decorator(body):
flat_carries, carry_tree = jax.tree.flatten(carry)
def wrapped(idx, *carries):
if carry is None:
body(idx)
return []
result = body(idx, carry_tree.unflatten(carries))
result, result_tree = jax.tree.flatten(result)
if result_tree != carry_tree:
raise ValueError(
"parallel_loop: body result should have same structure as carry:"
f" {result_tree} != {carry_tree}"
)
return result
flat_avals = [
pallas_core.index_map_grid_aval,
*(c.aval for c in flat_carries),
]
debug_info = api_util.debug_info("parallel_loop", body, flat_avals, {})
check_no_transformed_refs_args(lambda: debug_info, flat_carries)
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(wrapped, debug_info=debug_info), flat_avals
)
carry_tree.unflatten(jaxpr.outvars) # Verify same structure.
disallowed_effects = effects.control_flow_allowed_effects.filter_not_in(
jaxpr.effects
)
if disallowed_effects:
raise NotImplementedError(
f"Effects not supported in parallel_loop: {disallowed_effects}"
)
flat_args, tree = jax.tree.flatten(
(lower, upper, step, consts, flat_carries)
)
flat_result = parallel_loop_p.bind(
*flat_args, tree=tree, unroll=unroll, jaxpr=jaxpr
)
if carry is None:
return None
return carry_tree.unflatten(flat_result)
return decorator
class PackFormat(enum.Enum):
#: [a0, a1], [b0, b1] -> [[a0, a1], [b0, b1]]
COMPRESSED = "compressed"
#: [a0, a1], [b0, b1] -> [a0, b0, a1, b1]
INTERLEAVED = "interleaved"
def _format_to_ir_attribute(format: PackFormat) -> ir.Attribute:
return ir.Attribute.parse(f"#tpu.pack_format<{format.value}>")
pack_p = jax_core.Primitive("pack")
@pack_p.def_abstract_eval
def _pack_abstract_eval(a, b, *, format, preferred_element_type):
if a.shape != b.shape:
raise ValueError(
f"Packed arrays must have the same shape, got {a.shape} and {b.shape}"
)
if a.ndim != 1:
raise ValueError(f"Packed arrays must be 1-D, got {a.ndim}")
if a.dtype != b.dtype:
raise TypeError(
f"Packed arrays must have the same dtype, got {a.dtype} and {b.dtype}"
)
if preferred_element_type is None:
match a.dtype:
case jnp.float32:
packed_dtype = jnp.bfloat16
case jnp.int32:
packed_dtype = jnp.int16
case _:
# TODO(slebedev): Support more types.
raise NotImplementedError(
f"Only packing of float32 and int32 is supported, got {a.dtype}"
)
else:
packed_bw = dtypes.itemsize_bits(a.dtype) // 2
if dtypes.itemsize_bits(preferred_element_type) != packed_bw:
raise ValueError(
f"preferred_element_type= must have bitwidth {packed_bw}, got"
f" {dtypes.itemsize_bits(preferred_element_type)}"
)
packed_dtype = preferred_element_type
match format:
case PackFormat.INTERLEAVED:
packed_shape = (2 * a.size,)
case PackFormat.COMPRESSED:
packed_shape = (a.size, 2)
case _:
raise TypeError(f"Unexpected format: {format}")
return jax_core.ShapedArray(packed_shape, packed_dtype)
@sc_lowering.register_lowering_rule(pack_p)
def _pack_lowering_rule(
ctx: sc_lowering.LoweringRuleContext,
a,
b,
*,
format,
preferred_element_type,
):
del preferred_element_type # Unused.
[out_aval] = ctx.avals_out
return tpu.pack_subelements(
ctx.aval_to_ir_type(out_aval),
[a, b],
[0, 1],
_format_to_ir_attribute(format),
)
def pack(
a: jax.Array,
b: jax.Array,
/,
*,
format: PackFormat,
preferred_element_type: jax.typing.DTypeLike | None = None,
) -> jax.Array:
"""Packs two arrays according to the given format.
.. warning:: This API is temporary and will be removed once the SparseCore
compiler is able to do packing/unpacking automatically.
Args:
a: The first array to pack.
b: The second array to pack.
format: The packing format to use.
preferred_element_type: Optional. The preferred element type of the packed
array. If specified, must have half the bitwidth of the input array types.
Returns:
The packed array.
"""
if preferred_element_type is not None:
preferred_element_type = jnp.dtype(preferred_element_type)
return pack_p.bind(
a, b, format=format, preferred_element_type=preferred_element_type
)
unpack_p = jax_core.Primitive("unpack")
unpack_p.multiple_results = True
@unpack_p.def_abstract_eval
def _unpack_abstract_eval(ab, *, format, preferred_element_type):
match format:
case PackFormat.INTERLEAVED:
if ab.ndim != 1 or ab.size % 2 != 0:
raise ValueError(
"Interleaved unpack requires a 1-D array with an even size, got"
f" {ab.shape}"
)
case PackFormat.COMPRESSED:
if ab.ndim != 2 or ab.shape[1] != 2:
raise ValueError(
"Compressed unpack requires an array with shape (N, 2), got"
f" {ab.shape}"
)
if preferred_element_type is None:
match ab.dtype:
case jnp.bfloat16:
unpacked_dtype = jnp.float32
case jnp.int16:
unpacked_dtype = jnp.int32
case _:
# TODO(slebedev): Support more types.
raise NotImplementedError(
f"Only unpacking of bloat16 and int16 is supported, got {ab.dtype}"
)
else:
unpacked_bw = dtypes.itemsize_bits(ab.dtype) * 2
if dtypes.itemsize_bits(preferred_element_type) != unpacked_bw:
raise ValueError(
f"preferred_element_type= must have bitwidth {unpacked_bw}, got"
f" {dtypes.itemsize_bits(preferred_element_type)}"
)
unpacked_dtype = preferred_element_type
return (jax_core.ShapedArray((ab.size // 2,), unpacked_dtype),) * 2
@sc_lowering.register_lowering_rule(unpack_p)
def _unpack_lowering_rule(
ctx: sc_lowering.LoweringRuleContext, ab, *, format, preferred_element_type
):
del preferred_element_type # Unused.
out_aval, _ = ctx.avals_out
out_type = ctx.aval_to_ir_type(out_aval)
return (
tpu.unpack_subelements(out_type, ab, 0, _format_to_ir_attribute(format)),
tpu.unpack_subelements(out_type, ab, 1, _format_to_ir_attribute(format)),
)
def unpack(
ab: jax.Array,
/,
*,
format: PackFormat,
preferred_element_type: jax.typing.DTypeLike | None = None,
) -> tuple[jax.Array, jax.Array]:
"""Unpacks two arrays according to the given format.
.. warning:: This API is temporary and will be removed once the SparseCore
compiler is able to do packing/unpacking automatically.
Args:
ab: The array to unpack.
format: The packing format to use.
preferred_element_type: Optional. The preferred element type of the unpacked
arrays. If specified, must have double the bitwidth of the input array
type.
Returns:
The unpacked arrays.
"""
if preferred_element_type is not None:
preferred_element_type = jnp.dtype(preferred_element_type)
return unpack_p.bind(
ab,
format=format,
preferred_element_type=preferred_element_type,
)
def _mask_all_reduce_abstract_eval(x, *, reduce):
if x.dtype != jnp.bool:
raise TypeError(f"Mask all-reduce only supports bool arrays, got {x.dtype}")
match x.shape:
case (minor_dim,):
return jax_core.ShapedArray((minor_dim // reduce,), jnp.int32)
case _:
raise ValueError("Mask all-reduce only supports 1D arrays")
def _mask_all_reduce_lowering_rule(
ctx: sc_lowering.LoweringRuleContext, x, *, reduce, kind: str
):
[out_aval] = ctx.avals_out
return tpu.all_reduce(
ir.VectorType.get(
out_aval.shape,
ir.IntegerType.get_signless(32),
),
x,
0,
ir.Attribute.parse(f"#tpu.reduction_kind<{kind}>"),
)
all_reduce_population_count_p = jax_core.Primitive(
"all_reduce_population_count"
)
all_reduce_population_count_p.def_abstract_eval(_mask_all_reduce_abstract_eval)
sc_lowering.register_lowering_rule(all_reduce_population_count_p)(
functools.partial(_mask_all_reduce_lowering_rule, kind="sum")
)
def all_reduce_population_count(x: jax.Array, *, reduce: int = 1) -> jax.Array:
"""Computes the number of nonzero elements in the array.
Args:
x: A 1D array of bools.
reduce: The factor to reduce the output shape by.
Returns:
An array with each element containing the number of true elements in ``x``.
"""
return all_reduce_population_count_p.bind(x, reduce=reduce)
all_reduce_ffs_p = jax_core.Primitive("all_reduce_ffs")
all_reduce_ffs_p.def_abstract_eval(_mask_all_reduce_abstract_eval)
sc_lowering.register_lowering_rule(all_reduce_ffs_p)(
functools.partial(_mask_all_reduce_lowering_rule, kind="find_first_set")
)
def all_reduce_ffs(x: jax.Array, *, reduce: int = 1) -> jax.Array:
"""Computes the index of the first true element in the array.
Args:
x: A 1D array of bools.
reduce: The factor to reduce the output shape by.
Returns:
An array with each element containing the index of the first true element in
``x`` or ``x.size`` if there are no true elements.
"""
return all_reduce_ffs_p.bind(x, reduce=reduce)
fetch_and_add_p = jax_core.Primitive("sc_fetch_and_add")
fetch_and_add_p.multiple_results = False
@fetch_and_add_p.def_effectful_abstract_eval
def _fetch_and_add_abstract_eval(*args):
x_ref, value, *indices, subcore_id = args
if x_ref.dtype != jnp.int32:
raise NotImplementedError(
f"Only int32 refs are supported, but got {x_ref.dtype}"
)
if x_ref.memory_space != tpu_core.MemorySpace.SMEM:
raise ValueError(
f"Only refs in SMEM memory space are supported, but got {x_ref}"
)
if value.dtype != x_ref.dtype or value.shape:
raise ValueError(
"The value must be a scalar of the same type as the ref"
f" ({x_ref.dtype}), but got {value}."
)
if any(i.dtype != jnp.int32 or i.shape for i in indices):
raise ValueError(
f"All indices must be scalars of type int32, but got {indices}."
)
if subcore_id.dtype != jnp.int32 or subcore_id.shape:
raise ValueError(
f"subcore_id= must be a scalar of type int32, but got {subcore_id}."
)
return value, {state_types.ReadEffect(0), state_types.WriteEffect(0)}
@sc_lowering.register_lowering_rule(fetch_and_add_p)
def _fetch_and_add_lowering_rule(ctx: sc_lowering.LoweringRuleContext, *args):
del ctx # Unused.
x_ref, value, *indices, subcore_id = args
core_type = ir.Attribute.parse("#tpu.core_type<sc_vector_subcore>")
return tpu.fetch_and_add_sync(
x_ref, indices, value, core_type=core_type, core_id=subcore_id
)
def fetch_and_add(
x_ref: jax.Ref | state_types.TransformedRef,
value: jax.typing.ArrayLike,
*,
subcore_id: jax.typing.ArrayLike,
) -> jax.Array:
"""Adds value to the ``x_ref`` on another subcore.
Be careful to ensure subcores are synchronized between initializing the SMEM
(on the target subcore) and adding to it, potentially using
``plsc.subcore_barrier()``.
Args:
x_ref: A scalar SMEM ref.
value: The value to add to ``x_ref`` on ``subcore_id``.
subcore_id: The ID of the vector subcore to use.
Returns:
The value of ``x_ref`` on the specified subcore before adding ``value``.
"""
if x_ref.size != 1:
raise ValueError(f"Expected a scalar ref, but got {x_ref.shape=}.")
x_ref, transforms = pallas_primitives._get_ref_and_transforms(x_ref)
match transforms:
case []:
indices = [jnp.int32(0)] * x_ref.ndim
case [indexing.NDIndexer(indices=indices) as indexer]:
if any(isinstance(i, indexing.Slice) for i in indexer.indices):
raise ValueError(
"fetch_and_add only supports refs indexed with non-slice indices,"
f" but got {indices}"
)
case _:
raise ValueError(
"fetch_and_add requires a scalar ref with a single non-slice"
f" indexer, but got {transforms}"
)
return fetch_and_add_p.bind(x_ref, value, *indices, subcore_id)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic/sc_primitives.py",
"license": "Apache License 2.0",
"lines": 1057,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic/tpu_info.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exposes TPU hardware information."""
import dataclasses
import enum
from typing import Callable, cast
from jax import numpy as jnp
from jax._src import core as jax_core
from jax._src import dtypes
from jax._src import util as jax_util
from jax._src.pallas import utils as pallas_utils
from jax._src.pallas.mosaic import core
class ChipVersionBase:
pass
class ChipVersion(ChipVersionBase, enum.Enum):
"""TPU chip version.
The following table summarizes the differences between TPU versions:
+---------+-------------------------------+-----------+------------------+
| Version | Physical TensorCores per chip | Lite chip | Megacore support |
+=========+===============================+===========+==================+
| v2 | 2 | No | No |
+---------+-------------------------------+-----------+------------------+
| v3 | 2 | No | No |
+---------+-------------------------------+-----------+------------------+
| v4i | 1 | Yes | No |
+---------+-------------------------------+-----------+------------------+
| v4 | 2 | No | Yes |
+---------+-------------------------------+-----------+------------------+
| v5e | 1 | Yes | No |
+---------+-------------------------------+-----------+------------------+
| v5p | 2 | No | Yes |
+---------+-------------------------------+-----------+------------------+
| v6e | 1 | Yes | No |
+---------+-------------------------------+-----------+------------------+
| 7 | 2 | No | No |
+---------+-------------------------------+-----------+------------------+
| 7x | 2 | No | No |
+---------+-------------------------------+-----------+------------------+
"""
TPU_V2 = "v2"
TPU_V3 = "v3"
TPU_V4I = "v4i"
TPU_V4 = "v4"
TPU_V5E = "v5e"
TPU_V5P = "v5p"
TPU_V6E = "v6e"
TPU_7 = "7"
TPU_7X = "7x"
def __str__(self) -> str:
return self.value
@property
def _num_physical_tensor_cores_per_chip(self) -> int: # pyrefly: ignore[bad-return] # pyrefly#2080
match self: # pyrefly: ignore[non-exhaustive-match] # pyrefly#2080
case (
ChipVersion.TPU_V2
| ChipVersion.TPU_V3
| ChipVersion.TPU_V4
| ChipVersion.TPU_V5P
| ChipVersion.TPU_7
| ChipVersion.TPU_7X
):
return 2
case ChipVersion.TPU_V4I | ChipVersion.TPU_V5E | ChipVersion.TPU_V6E:
return 1
@property
def num_physical_tensor_cores_per_chip(self) -> int:
# TODO(slebedev): Remove this wrapper once pyrefly#2080 is fixed.
return cast(int, self._num_physical_tensor_cores_per_chip) # type: ignore[redundant-cast]
@property
def supports_megacore(self) -> bool:
match self:
case ChipVersion.TPU_V4 | ChipVersion.TPU_V5P:
return True
case _:
return False
@property
def is_lite(self) -> bool:
match self:
case ChipVersion.TPU_V4I | ChipVersion.TPU_V5E | ChipVersion.TPU_V6E:
return True
case _:
return False
def chip_version_from_device_kind(device_kind: str) -> ChipVersion | None:
match device_kind:
case "TPU v2":
return ChipVersion.TPU_V2
case "TPU v3":
return ChipVersion.TPU_V3
case "TPU v4":
return ChipVersion.TPU_V4
case "TPU v4 lite":
return ChipVersion.TPU_V4I
case "TPU v5e" | "TPU v5 lite":
return ChipVersion.TPU_V5E
case "TPU v5" | "TPU v5p":
return ChipVersion.TPU_V5P
case "TPU v6e" | "TPU v6 lite":
return ChipVersion.TPU_V6E
case "TPU7":
return ChipVersion.TPU_7
case "TPU7x":
return ChipVersion.TPU_7X
case _:
return None
@dataclasses.dataclass(frozen=True, kw_only=True)
class SparseCoreInfo:
"""SparseCore-specific information."""
num_cores: int
num_subcores: int
num_lanes: int
dma_granule_size_bytes: int
@dataclasses.dataclass(frozen=True, kw_only=True)
class TpuInfo:
"""TPU hardware information.
Note that all information is per-TensorCore so you would need to multiply
by `num_cores` to obtain the total for the chip.
"""
chip_version: ChipVersionBase
generation: int
num_cores: int
num_lanes: int
num_sublanes: int
mxu_column_size: int
vmem_capacity_bytes: int
cmem_capacity_bytes: int
smem_capacity_bytes: int
hbm_capacity_bytes: int
mem_bw_bytes_per_second: int
bf16_ops_per_second: int
int8_ops_per_second: int
fp8_ops_per_second: int
int4_ops_per_second: int
sparse_core: SparseCoreInfo | None = None
@property
def is_lite(self) -> bool:
return cast(ChipVersion, self.chip_version).is_lite
@property
def is_split_chip(self) -> bool:
"""Returns True if the chip is a multi-core chip being used in single-core mode.
Some TPU generations (e.g. v4, v5p) have multiple TensorCores per chip.
These chips can be used in two modes:
1. "Megacore" mode, where the cores are combined into a single logical
device (if supported).
2. "Split" mode, where each core is treated as an independent logical
device.
This property returns True if the chip is in "split" mode (case 2).
"""
return self.num_cores == 1 and (
cast(ChipVersion, self.chip_version).num_physical_tensor_cores_per_chip
> 1
)
@property
def is_megacore(self) -> bool:
"""Returns True if the chip is configured in Megacore mode.
Megacore mode means the two physical TensorCores are combined into a single
logical device.
"""
return self.num_cores > 1
def is_matmul_supported(
self,
lhs_dtype: dtypes.DTypeLike,
rhs_dtype: dtypes.DTypeLike,
) -> bool:
"""Returns whether the chip natively supports matmul on the given input dtypes (no casting needed)."""
lhs_dtype = dtypes.dtype(lhs_dtype)
rhs_dtype = dtypes.dtype(rhs_dtype)
F32 = jnp.float32
BF16 = jnp.bfloat16
S8 = jnp.int8
U8 = jnp.uint8
F8E4M3B11FNUZ = jnp.float8_e4m3b11fnuz
F8E4M3FN = jnp.float8_e4m3fn
F8E5M2 = jnp.float8_e5m2
S4 = jnp.int4
U4 = jnp.uint4
match self.generation:
case 2 | 3:
return lhs_dtype == rhs_dtype == F32
case 4:
return lhs_dtype in (F32, BF16) and rhs_dtype in (F32, BF16, S8)
case 5 | 6:
return (
(
lhs_dtype in (F32, BF16, F8E5M2, F8E4M3B11FNUZ)
and rhs_dtype in (F32, BF16, F8E5M2, F8E4M3B11FNUZ)
)
or (lhs_dtype in (U8, S8) and rhs_dtype in (U8, S8))
or (lhs_dtype in (U4, S4) and rhs_dtype in (U4, S4))
)
case 7:
return (lhs_dtype in (F32, BF16) and rhs_dtype in (F32, BF16)) or (
lhs_dtype in (F32, BF16, F8E5M2, F8E4M3FN)
and rhs_dtype in (F8E5M2, F8E4M3FN)
)
case _:
return False
def get_sublane_tiling(self, dtype: jnp.dtype) -> int:
"""Returns the sublane tiling for the given itemsize.
Note that this is a heurustic and depends on the settings of the XLA flags.
"""
bitwidth = dtypes.itemsize_bits(dtype)
if self.generation < 7:
# Caveat: before TPU7x, by default XLA does not use large 2nd minor tiling
# but it can be enabled by setting the flag
# xla_tpu_enable_large_2nd_minor_layout_for_x16.
if bitwidth == 16 or bitwidth == 32:
return self.num_sublanes
else:
# Large 2nd minor tiling is enabled for other types.
return self.num_sublanes * (32 // bitwidth)
# XLA allows large 2nd minor tiling by default starting with TPU7x.
if self.generation == 7:
return self.num_sublanes * (32 // bitwidth)
raise NotImplementedError("TPU generation is not supported")
def is_tpu_device() -> bool:
return chip_version_from_device_kind(core.get_device_kind()) is not None
registry: dict[str, Callable[[], TpuInfo]] = {}
def _get_tpu_info_impl(chip_version: ChipVersion, num_cores: int) -> TpuInfo:
"""Returns the TPU hardware info for the given chip version and core count.
Note that all information is *per-TensorCore* so you would need to multiply by
`num_cores` to obtain the total for the chip.
Args:
chip_version: The TPU chip version.
num_cores: The number of TensorCores per chip for this configuration. This
is influenced by the TPU version and whether Megacore is enabled.
"""
# Common parameters for all TensorCores
NUM_LANES = 128
NUM_SUBLANES = 8
MXU_COLUMN_SIZE_GEN_LT_6 = 128
MXU_COLUMN_SIZE_GEN_GE_6 = 256
tensor_cores_per_chip = chip_version.num_physical_tensor_cores_per_chip
match chip_version:
case ChipVersion.TPU_V2:
return TpuInfo(
chip_version=chip_version,
generation=2,
num_cores=num_cores,
num_lanes=NUM_LANES,
num_sublanes=NUM_SUBLANES,
mxu_column_size=MXU_COLUMN_SIZE_GEN_LT_6,
vmem_capacity_bytes=16 * 1024 * 1024, # 16 MiB per core
cmem_capacity_bytes=0,
smem_capacity_bytes=16 * 1024, # 16 KiB per core
hbm_capacity_bytes=int(16_000_000_000 // tensor_cores_per_chip),
mem_bw_bytes_per_second=int(7.16e11 // tensor_cores_per_chip),
bf16_ops_per_second=int(4.6e13 // tensor_cores_per_chip),
int8_ops_per_second=0, # Not Available
fp8_ops_per_second=0, # Not Available
int4_ops_per_second=0, # Not Available
)
case ChipVersion.TPU_V3:
return TpuInfo(
chip_version=chip_version,
generation=3,
num_cores=num_cores,
num_lanes=NUM_LANES,
num_sublanes=NUM_SUBLANES,
mxu_column_size=MXU_COLUMN_SIZE_GEN_LT_6,
vmem_capacity_bytes=16 * 1024 * 1024, # 16 MiB per core
cmem_capacity_bytes=0,
smem_capacity_bytes=16 * 1024, # 16 KiB per core
hbm_capacity_bytes=34_400_000_000 // tensor_cores_per_chip,
mem_bw_bytes_per_second=int(8.25e11 // tensor_cores_per_chip),
bf16_ops_per_second=int(1.40e14 // tensor_cores_per_chip),
int8_ops_per_second=0, # Not Available
fp8_ops_per_second=0, # Not Available
int4_ops_per_second=0, # Not Available
)
case ChipVersion.TPU_V4I:
return TpuInfo(
chip_version=chip_version,
generation=4,
num_cores=num_cores,
num_lanes=NUM_LANES,
num_sublanes=NUM_SUBLANES,
mxu_column_size=MXU_COLUMN_SIZE_GEN_LT_6,
vmem_capacity_bytes=16 * 1024 * 1024, # 16 MiB per core
cmem_capacity_bytes=134_000_000,
smem_capacity_bytes=1024 * 1024, # 1 MiB per core
hbm_capacity_bytes=8_590_000_000,
mem_bw_bytes_per_second=int(6.14e11),
bf16_ops_per_second=int(1.37e14),
int8_ops_per_second=0, # Not Available
fp8_ops_per_second=0, # Not Available
int4_ops_per_second=0, # Not Available
)
case ChipVersion.TPU_V4:
return TpuInfo(
chip_version=chip_version,
generation=4,
num_cores=num_cores,
num_lanes=NUM_LANES,
num_sublanes=NUM_SUBLANES,
mxu_column_size=MXU_COLUMN_SIZE_GEN_LT_6,
vmem_capacity_bytes=16 * 1024 * 1024, # 16 MiB per core
cmem_capacity_bytes=134_000_000 // tensor_cores_per_chip,
smem_capacity_bytes=1024 * 1024, # 1 MiB per core
hbm_capacity_bytes=34_400_000_000 // tensor_cores_per_chip,
mem_bw_bytes_per_second=int(1.23e12 // tensor_cores_per_chip),
bf16_ops_per_second=int(2.75e14 // tensor_cores_per_chip),
int8_ops_per_second=0, # Not Available
fp8_ops_per_second=0, # Not Available
int4_ops_per_second=0, # Not Available
)
case ChipVersion.TPU_V5E:
return TpuInfo(
chip_version=chip_version,
generation=5,
num_cores=num_cores,
num_lanes=NUM_LANES,
num_sublanes=NUM_SUBLANES,
mxu_column_size=MXU_COLUMN_SIZE_GEN_LT_6,
vmem_capacity_bytes=128 * 1024 * 1024, # 128 MiB per core
cmem_capacity_bytes=0,
smem_capacity_bytes=1024 * 1024, # 1 MiB per core
hbm_capacity_bytes=17_200_000_000,
mem_bw_bytes_per_second=int(8.20e11),
bf16_ops_per_second=int(1.97e14),
int8_ops_per_second=int(3.94e14),
fp8_ops_per_second=0, # Not Available
int4_ops_per_second=int(7.88e14),
)
case ChipVersion.TPU_V5P:
return TpuInfo(
chip_version=chip_version,
generation=5,
num_cores=num_cores,
num_lanes=NUM_LANES,
num_sublanes=NUM_SUBLANES,
mxu_column_size=MXU_COLUMN_SIZE_GEN_LT_6,
vmem_capacity_bytes=64 * 1024 * 1024, # 64 MiB per core
cmem_capacity_bytes=0,
smem_capacity_bytes=1024 * 1024, # 1 MiB per core
hbm_capacity_bytes=103_000_000_000 // tensor_cores_per_chip,
mem_bw_bytes_per_second=int(2.46e12 // tensor_cores_per_chip),
bf16_ops_per_second=int(4.59e14 // tensor_cores_per_chip),
int8_ops_per_second=int(9.18e14 // tensor_cores_per_chip),
fp8_ops_per_second=0, # Not Available
int4_ops_per_second=int(1.84e15 // tensor_cores_per_chip),
sparse_core=SparseCoreInfo(
num_cores=4,
num_subcores=16,
num_lanes=8,
dma_granule_size_bytes=32,
),
)
case ChipVersion.TPU_V6E:
return TpuInfo(
chip_version=chip_version,
generation=6,
num_cores=num_cores,
num_lanes=NUM_LANES,
num_sublanes=NUM_SUBLANES,
mxu_column_size=MXU_COLUMN_SIZE_GEN_GE_6,
vmem_capacity_bytes=128 * 1024 * 1024, # 128 MiB per core
cmem_capacity_bytes=0,
smem_capacity_bytes=1024 * 1024, # 1 MiB per core
hbm_capacity_bytes=34_400_000_000,
mem_bw_bytes_per_second=int(1.64e12),
bf16_ops_per_second=int(9.20e14),
int8_ops_per_second=int(1.84e15),
fp8_ops_per_second=int(9.20e14),
int4_ops_per_second=int(3.68e15),
sparse_core=SparseCoreInfo(
num_cores=2,
num_subcores=16,
num_lanes=8,
dma_granule_size_bytes=32,
),
)
case ChipVersion.TPU_7 | ChipVersion.TPU_7X:
return TpuInfo(
chip_version=chip_version,
generation=7,
num_cores=num_cores,
num_lanes=128,
num_sublanes=8,
mxu_column_size=256,
vmem_capacity_bytes=64 * 1024 * 1024, # 64 MiB per core
cmem_capacity_bytes=0,
smem_capacity_bytes=1024 * 1024, # 1 MiB per core
hbm_capacity_bytes=206_000_000_000 // tensor_cores_per_chip,
mem_bw_bytes_per_second=int(7.40e12 // tensor_cores_per_chip),
bf16_ops_per_second=int(2.31e15 // tensor_cores_per_chip),
int8_ops_per_second=0, # Not Available
fp8_ops_per_second=int(4.60e15 // tensor_cores_per_chip),
int4_ops_per_second=0, # Not Available
sparse_core=SparseCoreInfo(
num_cores=2,
num_subcores=16,
num_lanes=16,
dma_granule_size_bytes=64,
),
)
case _:
raise ValueError(f"Unsupported TPU chip version: {chip_version}")
@jax_util.cache(trace_context_in_key=True)
def get_tpu_info() -> TpuInfo:
"""Returns the TPU hardware info for the current device.
Note that all information is *per-TensorCore* so you would need to multiply by
`num_cores` to obtain the total for the chip.
"""
device_kind = core.get_device_kind()
chip_version = chip_version_from_device_kind(device_kind)
if chip_version is None:
if device_kind in registry:
return registry[device_kind]()
raise ValueError(f"Unsupported TPU device kind: {device_kind}")
return _get_tpu_info_impl(chip_version, core.get_num_device_cores())
@jax_util.cache(trace_context_in_key=True)
def get_tpu_info_for_chip(
chip_version: ChipVersion, num_tensor_cores_per_logical_device: int
) -> TpuInfo:
"""Returns the TPU hardware info for the given TPU chip version.
Note that all information is *per-TensorCore* so you would need to multiply by
`num_tensor_cores_per_logical_device` to obtain the total for the chip.
Args:
chip_version: The TPU chip version.
num_tensor_cores_per_logical_device: The number of TensorCores per logical
device in the requested configuration. Should be 1 for single-core chips
(TPU_V4I, TPU_V5E, TPU_V6E). For dual-core chips that support Megacore
(TPU_V4, TPU_V5P), this can be 2 (Megacore mode) or 1 (split mode). For
dual-core chips that do not support Megacore (TPU_V2, TPU_V3, TPU_7X),
this must be 1.
"""
if (
chip_version.is_lite
or chip_version
in {
ChipVersion.TPU_V2,
ChipVersion.TPU_V3,
ChipVersion.TPU_7,
ChipVersion.TPU_7X,
}
) and num_tensor_cores_per_logical_device != 1:
raise ValueError(
"Lite chips and dual-core chips that do not support Megacore must "
"have num_tensor_cores_per_logical_device=1, but got"
f" {num_tensor_cores_per_logical_device}."
)
return _get_tpu_info_impl(chip_version, num_tensor_cores_per_logical_device)
# TODO(sharadmv): Generalize Tiling to capture the various options
# (compact 2nd minor, large 2nd minor, regular tiling)
class Tiling(enum.Enum):
COMPACT = enum.auto()
SPARSE_CORE = enum.auto()
@property
def shape(self) -> tuple[int, ...]:
# TODO(slebedev): Use ``get_tpu_info()`` instead of hardcoding the values.
match self: # pyrefly: ignore[non-exhaustive-match] # pyrefly#2080
case Tiling.COMPACT:
return (8, 128)
case Tiling.SPARSE_CORE:
return (8,)
case _:
raise NotImplementedError # pyrefly#2080
def _get_tiling_factor(src: int, max_tiling: int, packing: int) -> int:
# This roughly mirrors ``getTilingFactor`` in infer-memref-layout.
tpu_generation = get_tpu_info().generation
tiling = (1 + int(tpu_generation < 4)) * packing
while tiling < min(src, max_tiling):
tiling *= 2
return tiling
def infer_tiling(
ty: jax_core.AbstractValue,
tiling: Tiling | None = None,
) -> tuple[int | None, ...] | None:
"""Compute a tiling for the given shape and type.
For an n-dimensional shape, returns the tiling for the last
``len(tiling.shape)`` dimensions and 1 for the leading dims. For example:
- 2D tiling: (256, 256) -> (8, 128) and (2, 3, 128, 128) -> (1, 1, 8, 128).
- 1D tiling: (16,) -> (8,) and (2, 3, 8) -> (1, 1, 8).
Types are not required to have a dtype, so for such types we return None for
all dimensions because their tiling is unknown.
"""
assert hasattr(ty, "shape")
shape = ty.shape
if not hasattr(ty, "dtype"):
return (None,) * len(shape)
if ty.dtype == jnp.dtype("int4"):
packing = 8
else:
packing = 4 // ty.dtype.itemsize
if tiling is None:
tiling = Tiling.COMPACT
tiling_rank = len(tiling.shape)
if len(shape) == 1 and tiling == Tiling.COMPACT:
sublane_count, lane_count = tiling.shape
src_sublane = pallas_utils.cdiv(shape[0], lane_count)
max_tiling = max(sublane_count, packing)
factor = _get_tiling_factor(src_sublane, max_tiling, packing)
return (factor * lane_count,)
if len(shape) < tiling_rank:
raise ValueError(
f"Shape must have at least {tiling_rank} dimensions: {shape=}"
)
leading_dims, final_dims = shape[:-tiling_rank], shape[-tiling_rank:]
match tiling: # pyrefly: ignore[non-exhaustive-match] # pyrefly#2080
case Tiling.COMPACT:
second_minor, _ = final_dims
factor = _get_tiling_factor(second_minor, tiling.shape[0], packing)
return (*(1,) * len(leading_dims), factor, tiling.shape[1])
case Tiling.SPARSE_CORE:
[tile_size] = tiling.shape
return (*(1,) * len(leading_dims), tile_size * packing)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic/tpu_info.py",
"license": "Apache License 2.0",
"lines": 517,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic_gpu/helpers.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for Pallas Mosaic GPU kernels."""
from collections.abc import Callable, Hashable, Sequence
import dataclasses
import functools
import math
from typing import TypeVar, overload
import jax
from jax import numpy as jnp
from jax import lax
from jax._src import dtypes
from jax._src.pallas.mosaic_gpu import primitives as gpu_primitives
from jax._src.pallas.mosaic_gpu import core as gpu_core
from jax._src.pallas import primitives as pallas_primitives
import numpy as np
_T = TypeVar("_T")
@dataclasses.dataclass(frozen=True, eq=False)
class NDLoopInfo:
"""Container dataclass for loop iteration information.
Attributes:
index: The grid indices corresponding to the current loop iteration.
local_index: The local iteration index.
num_local_steps: The total number of local iterations to run. None
if unknown.
"""
index: tuple[jax.Array, ...]
local_index: jax.Array | int
num_local_steps: jax.Array | int | None
@overload
def nd_loop(
grid: Sequence[int],
*,
collective_axes: Sequence[Hashable] | Hashable,
tiling: Sequence[int] | None = None,
init_carry: None = None
) -> Callable[[Callable[[NDLoopInfo], None]], None]:
...
@overload
def nd_loop(
grid: Sequence[int],
*,
collective_axes: Sequence[Hashable] | Hashable,
tiling: Sequence[int] | None = None,
init_carry: _T
) -> Callable[[Callable[[NDLoopInfo, _T], _T]], _T]:
...
def nd_loop(grid, *, collective_axes, tiling=None, init_carry=None):
"""A loop over a multi-dimensional grid partitioned along the given axes.
The body of the loop a single argument ``loop_info`` which is an NDLoopInfo
object containing index and iteration information. However if a carry is
specified, the body will expect a second keyword argument `carry` containing
the loop carry.
For example, if ``collective_axes`` is ``"x"`` with :func:`lax.axis_size`
equal to 4 and the grid is (2, 3), the implementation would produce the
following iteration order
+-----------+--------+------------+
| loop step | index | axis index |
+===========+========+============+
| 0 | (0, 0) | 0 |
+-----------+--------+------------+
| 1 | (0, 1) | 1 |
+-----------+--------+------------+
| 2 | (0, 2) | 2 |
+-----------+--------+------------+
| 3 | (1, 0) | 3 |
+-----------+--------+------------+
| 4 | (1, 1) | 0 |
+-----------+--------+------------+
| 5 | (1, 2) | 1 |
+-----------+--------+------------+
which comes from partitioning the flat iteration space into chunks in an
interleaved fashion wrt the ``"x"`` axis index.
Note that in the example the total number of loop steps is not divisible
by the axis size of ``"x"``, and thus for some ``"x"`` axis indices the
loop will do one iteration less.
+------------+------------------+
| axis index | indices |
+============+==================+
| 0 | (0, 0), (1, 1) |
+------------+------------------+
| 1 | (0, 1), (1, 2) |
+------------+------------------+
| 2 | (0, 2) |
+------------+------------------+
| 3 | (1, 0) |
+------------+------------------+
If ``init_carry`` is passed then ``nd_loop()`` will expect the body to
take and return the carry. If it's ``None`` then no carry argument is
expected.
See also:
- :func:`jax.experimental.pallas.loop`: A loop over a single dimension.
"""
axis_index = lax.axis_index(collective_axes)
axis_size = lax.axis_size(collective_axes)
if tiling:
if len(grid) != len(tiling):
raise ValueError(f"{tiling=} and {grid=} must have same length.")
if any(dim % tile != 0 for dim, tile in zip(grid, tiling, strict=True)):
raise ValueError(f"Tiling {tiling} does not divide grid {grid}.")
tile_grid = tuple(
dim // tile for dim, tile in zip(grid, tiling, strict=True))
grid = (*tile_grid, *tiling)
grid_size = math.prod(grid)
def decorator(body):
def wrapper(wave_step, carry):
nonlocal body
step = wave_step * axis_size + axis_index
# The loop below is conceptually ``jnp.unravel_index``, but it uses
# ``lax`` APIs instead of ``jax.numpy`` to minimize the number of
# primitives used.
index = []
for grid_dim in reversed(grid):
grid_dim = lax.convert_element_type(grid_dim, step.dtype)
index.append(lax.rem(step, grid_dim))
step = lax.div(step, grid_dim)
index.reverse()
if tiling:
# Recompute index as if the grid was not tiled.
tile_indices, subtile_indices = index[:len(tiling)], index[len(tiling):]
untiled_index = []
for sub_idx, tile_idx, tile_dim in zip(
subtile_indices, tile_indices, tiling, strict=True):
untiled_index.append(sub_idx + tile_idx * tile_dim)
index = untiled_index
loop_info = NDLoopInfo(
index=tuple(index),
local_index=wave_step,
num_local_steps=upper
)
if init_carry is None:
body(loop_info)
else:
return body(loop_info, carry=carry)
upper = lax.div(grid_size, axis_size) + lax.convert_element_type(
axis_index < grid_size % axis_size, axis_index.dtype
)
return lax.fori_loop(0, upper, wrapper, init_carry)
return decorator
def format_tcgen05_sparse_metadata(meta):
"""Formats the sparse metadata for tcgen05.mma into the expected format.
See https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-sparse-matrices-sparsity-selector-kind-f16-m128-256
for the documentation of the required layouts. The array can be copied into
SMEM, from where ``plgpu.async_copy_sparse_metadata_to_tmem`` can be used to
copy it over to TMEM.
"""
if meta.dtype != dtypes.uint2:
raise ValueError(f"Expected metadata dtype to be uint2, got: {meta.dtype}")
if meta.ndim != 3:
raise ValueError(
"Expected metadata to be 3-dimensional (M, K // 4, 2), but it is"
f" {meta.ndim}D"
)
m, k, _2 = meta.shape
if _2 != 2:
raise ValueError(
"Expected the trailing dimension of the metadata to be 2, got:"
f" {meta.shape[-1]}"
)
k *= 2
return (
meta.reshape(m // 128, 8, 2, 8, k // 64, 4, 2, 8)
.transpose(0, 4, 1, 6, 3, 5, 2, 7)
.reshape(m // 128, k // 64, 128, 64)
)
def find_swizzle(minor_dim_bits: int, what: str = ""):
"""Returns the largest swizzle that can be applied to a memory region.
Swizzling is usually necessary when dealing with 2D data in SMEM, especially
if the reference is used as an MMA operand. The returned swizzle is usually
applied as ``plgpu`` transform:
transforms = (
plgpu.TilingTransform((8, 8 * swizzle // elem_bits)),
plgpu.SwizzleTransform(swizzle))
)
Args:
minor_dim_bits: The number of bits in the minor (last) dimension of the
memory region. Usually computed as ``dim_size * jnp.finfo(dtype).bits``.
what: A string describing the operand for which the swizzle is being
computed. Improves the error message if specified.
"""
for swizzle_bytes in (128, 64, 32, 16):
if minor_dim_bits % (swizzle_bytes * 8) == 0:
return swizzle_bytes
if what:
what = " for " + what
raise ValueError(
f"No valid out swizzle{what}: minor dimension has"
f" {minor_dim_bits} bits, which is not a multiple of 128 (16 bytes)"
)
def planar_snake(
lin_idx: jax.Array, shape: tuple[int, int], minor_dim: int, tile_width: int
):
"""Converts a linear index into an index into shape, trying to optimize locality.
The "space filling curve" this function computes splits the minor dimension
into tiles of length ``tile_width``. Every other tile has its major dimension
inverted, so that the iteration order "snakes around" when going from one tile
to another.
For a shape of (8, 8), ``minor_dim=0`` and ``tile_width=2``, the iteration
order is::
0 2 4 6 8 10 12 14
1 3 5 7 9 11 13 15
30 28 26 24 22 20 18 16
31 29 27 25 23 21 19 17
32 34 36 38 40 42 44 46
33 35 37 39 41 43 45 47
62 60 58 56 54 52 50 48
63 61 59 57 55 53 51 49
Notice how each pair of rows forms a tile (``minor_dim=0``, ``tile_width=2``)
and when moving from one tile to another, the indices increase along columns
in one of them and decrease in the other.
"""
tile_width = np.int32(tile_width) # pyrefly: ignore[bad-assignment]
major_size = np.int32(shape[1 - minor_dim])
minor_size = np.int32(shape[minor_dim])
minor_tile_idx = lax.div(lin_idx, tile_width * major_size)
def tile_coordinates(lin_idx, width):
# if minor_dim == 0 then tiles are (tile_width, major_size) else (major_size, tile_width)
minor_within_tile = lax.rem(lin_idx, width)
major_within_tile = lax.rem(lax.div(lin_idx, width), major_size)
minor = minor_tile_idx * tile_width + minor_within_tile
major = lax.select(
lax.rem(minor_tile_idx, np.int32(2)) == 0,
major_within_tile,
major_size - 1 - major_within_tile,
)
return (minor, major) if minor_dim == 0 else (major, minor)
num_full_tiles = shape[minor_dim] // tile_width
full_tiles_minor_size = num_full_tiles * tile_width
num_full_tiles_elements = num_full_tiles * tile_width * major_size
is_full_tile = lin_idx < num_full_tiles_elements
return jax.tree.map(
functools.partial(jax.lax.select, is_full_tile),
tile_coordinates(lin_idx, tile_width),
tile_coordinates(lin_idx - num_full_tiles_elements, minor_size - full_tiles_minor_size)
)
@overload
def dynamic_scheduling_loop(
grid_names: Sequence[Hashable],
*,
thread_axis: Hashable | None = None,
init_carry: None = None
) -> Callable[[Callable[[NDLoopInfo], None]], None]:
...
@overload
def dynamic_scheduling_loop(
grid_names: Sequence[Hashable],
*,
thread_axis: Hashable | None = None,
init_carry: _T
) -> Callable[[Callable[[NDLoopInfo, _T], _T]], _T]:
...
def dynamic_scheduling_loop(
grid_names,
thread_axis = None,
init_carry = None):
"""A loop over program instances using dynamic work scheduling.
This loop will iterate through available program instances until all
work has been scheduled. The kernel should be instantiated with a grid
equal to the logical amount of work to be done (as opposed to a persistent
kernel where the grid is set to the number of cores). Each core running
this loop will continuously query the next available block of work and
the loop will terminate when the entire grid has been scheduled.
Example usage::
@plgpu.dynamic_scheduling_loop(grid_names)
def body(loop_info):
work(loop_info.index) # do work...
Args:
grid_names: The names of the axes in the grid.
thread_axis: The name of the thread axis. This must be passed in if
the kernel uses multiple threads.
init_carry: An optional initial carry for the loop. If passed in, the
body function should expect a ``carry`` keyword argument and return
the next carry value.
"""
if thread_axis is not None:
num_threads = lax.axis_size(thread_axis)
else:
num_threads = 1
user_carry = init_carry
def decorator(body):
grid_idx = tuple(lax.axis_index(axis_name) for axis_name in grid_names)
success = True
def _scoped(try_cancel_buffer, try_cancel_barrier, cancel_used_barrier):
gpu_primitives.barrier_arrive(cancel_used_barrier)
def try_cancel_cond(carry):
_, success, _, _ = carry
return success
def try_cancel_body(carry):
grid_idx, _, wave_step, user_carry = carry
slot = lax.rem(wave_step, jnp.int32(2))
gpu_primitives.barrier_wait(cancel_used_barrier)
gpu_primitives.try_cluster_cancel(
try_cancel_buffer.at[slot], try_cancel_barrier
)
loop_info = NDLoopInfo(
index=grid_idx,
local_index=wave_step,
num_local_steps=None,
)
if user_carry is None:
body(loop_info)
else:
user_carry = body(loop_info, carry=user_carry)
gpu_primitives.barrier_wait(try_cancel_barrier)
grid_idx, success = gpu_primitives.query_cluster_cancel(
try_cancel_buffer.at[slot],
grid_names=grid_names)
gpu_primitives.barrier_arrive(cancel_used_barrier)
return (grid_idx, success, wave_step + jnp.int32(1), user_carry)
init_carry = (grid_idx, success, jnp.int32(0), user_carry)
final_carry = lax.while_loop(
try_cancel_cond,
try_cancel_body,
init_carry,
)
gpu_primitives.barrier_wait(cancel_used_barrier)
if user_carry is not None:
return final_carry[-1]
return pallas_primitives.run_scoped(
_scoped,
try_cancel_buffer=gpu_core.TryClusterCancelResult(2),
try_cancel_barrier=gpu_core.Barrier(num_arrivals=num_threads),
cancel_used_barrier=gpu_core.Barrier(num_arrivals=num_threads),
collective_axes=thread_axis,
)
return decorator
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic_gpu/helpers.py",
"license": "Apache License 2.0",
"lines": 337,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/mosaic_gpu/torch.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch interop for Mosaic GPU."""
from __future__ import annotations
import ctypes
from collections import defaultdict
import functools
import itertools
from typing import Callable, TypeGuard, Mapping
import weakref
import jax
import jax.numpy as jnp
from jax._src import util
from jax._src.lib.mlir import ir
from jax._src.lib.mlir import passmanager
from jax._src.lib.mlir.dialects import func
from jax._src.lib.mlir.dialects import hlo
import jax.experimental.mosaic.gpu as mgpu
from jax.experimental.mosaic.gpu import core as mgpu_core
def as_torch_kernel(fn):
"""Makes a Mosaic GPU kernel callable with PyTorch tensors.
Args:
fn: A JAX function that invokes a Mosaic GPU kernel. Note that
the implementation currently only supports functions that contain a
single Mosaic GPU kernel invocation, without any other JAX API calls,
e.g. from :mod:`jax.numpy`.
Returns:
A wrapper function that accepts PyTorch tensors as inputs and returns
PyTorch tensors as outputs. The output tensors are allocated on the
same device as the input tensors.
Example::
@functools.partial(
pl.pallas_call, out_shape=jax.ShapeDtypeStruct([128], jnp.int32)
)
def add_kernel(x_ref, y_ref, o_ref):
o_ref[...] = x_ref[...] + y_ref[...]
x = torch.arange(128, dtype=torch.int32, device="cuda")
y = x * x
out = plgpu.as_torch_kernel(add_kernel)(x, y)
"""
@functools.wraps(fn)
def wrapper(*args):
in_structs = jax.tree.map(
lambda arg: jax.ShapeDtypeStruct(
# Drop the "torch." prefix from the dtype string, if present.
arg.shape,
str(arg.dtype).split(".")[-1],
),
args,
)
return _compile_fn(fn, in_structs)(*args)
return wrapper
def _find_mgpu_call_in_module(module: ir.Module):
main_funcs = [
op
for op in module.body.operations
if isinstance(op, func.FuncOp) and op.name.value == "main"
]
# TODO(apaszke): Add support for jax.jit, which will call another function
# from main.
if len(main_funcs) != 1:
raise ValueError("Expected a single function in the kernel module")
[func_body] = main_funcs[0].body.blocks
return _find_mgpu_call(func_body, list(func_body.arguments))
def _mlir_to_torch_dtype(torch, mlir_dtype: ir.Type):
if mlir_dtype == ir.F32Type.get():
return torch.float32
if mlir_dtype == ir.F16Type.get():
return torch.float16
if mlir_dtype == ir.BF16Type.get():
return torch.bfloat16
if isinstance(mlir_dtype, ir.IntegerType):
int_type = ir.IntegerType(mlir_dtype)
if int_type.is_signed or int_type.is_signless:
return getattr(torch, f"int{int_type.width}")
else:
return getattr(torch, f"uint{int_type.width}")
raise NotImplementedError(f"Unsupported MLIR type: {mlir_dtype}")
def _find_mgpu_call(block: ir.Block, args: list[ir.Value]):
import torch # type: ignore[import-not-found] # pytype: disable=import-error
mgpu_call: hlo.CustomCallOp | None = None
get_outputs = None
to_evaluate: list[Callable] = []
init_env = {}
name_source = itertools.count()
value_names: Mapping[ir.Value, int] = defaultdict(lambda: next(name_source))
for op in block.operations:
if _is_custom_call(op, "AllocateBuffer"):
def allocate_torch_buffer(
env,
device,
_shape=op.result.type.shape,
_dtype=_mlir_to_torch_dtype(torch, op.result.type.element_type),
_result_name=value_names[op.result],
):
env[_result_name] = torch.empty(_shape, dtype=_dtype, device=device)
to_evaluate.append(allocate_torch_buffer)
elif _is_custom_call(op, "mosaic_gpu_v2"):
if mgpu_call is not None:
raise ValueError("Multiple Mosaic GPU kernels found in the module")
mgpu_call = op
elif op.name == "func.return" or op.name == "sdy.return":
if mgpu_call is None:
raise ValueError("No Mosaic GPU call found in the module")
if get_outputs is not None:
raise ValueError("Multiple return ops found in the module")
mgpu_results = list(mgpu_call.results)
try:
out_indices = [mgpu_results.index(o) for o in op.operands]
except ValueError:
raise ValueError("The function can only return kernel results") from None
def get_outputs(*results, _out_indices=out_indices):
return tuple(results[i] for i in _out_indices)
elif op.name == "stablehlo.constant":
result_type = ir.ShapedType(op.result.type)
if result_type.shape:
raise ValueError(f"Only scalar constants are supported, got {op}")
if not op.value.is_splat:
raise ValueError(f"Only splat constants are supported, got {op}")
if result_type.element_type == ir.IntegerType.get_signless(32):
init_env[value_names[op.result]] = ir.IntegerAttr(
op.value.get_splat_value()
).value
else:
raise NotImplementedError(f"Only i32 constants are supported, got {op}")
elif op.name == "stablehlo.broadcast_in_dim":
if op.broadcast_dimensions:
raise ValueError("Only scalar broadcasts are supported")
target_shape = tuple(op.result.type.shape)
result_name = value_names[op.result]
operand_name = value_names[op.operand]
dtype = torch.int32
def run_broadcast(
env,
device,
_target_shape=target_shape,
_dtype=dtype,
_operand_name=operand_name,
_result_name=result_name,
):
env[_result_name] = torch.broadcast_to(
torch.as_tensor(env[_operand_name], dtype=_dtype, device=device),
_target_shape,
)
to_evaluate.append(run_broadcast)
else:
raise ValueError(f"Unsupported operation found in the kernel module: {op}")
if mgpu_call is None:
raise ValueError("No Mosaic GPU call found in the module")
if get_outputs is None:
raise ValueError("No return op found in the module")
block_arg_names = [value_names[arg] for arg in block.arguments]
mgpu_arg_names = [value_names[arg] for arg in mgpu_call.operands]
def prepare_args(*user_args, device):
env = dict(init_env)
for name, arg in zip(block_arg_names, user_args, strict=True):
env[name] = arg
for thunk in to_evaluate:
thunk(env, device)
return tuple(env[name] for name in mgpu_arg_names)
output_input_aliases: list[int | None] = [None] * len(mgpu_call.results)
for alias in mgpu_call.output_operand_aliases or []:
alias = hlo.OutputOperandAlias(alias)
if alias.operand_tuple_indices:
raise NotImplementedError("Tupled operand indices not supported")
if len(alias.output_tuple_indices) > 1:
raise NotImplementedError("Expected one element in output_tuple_indices")
[output_index] = alias.output_tuple_indices or (0,)
output_input_aliases[output_index] = alias.operand_index
output_types = [
(result.type.shape, _mlir_to_torch_dtype(torch, result.type.element_type))
for result in mgpu_call.results
]
def prepare_outputs(*all_args, device):
outputs = []
for ty, alias in zip(output_types, output_input_aliases, strict=True):
if alias is not None:
outputs.append(all_args[alias])
continue
outputs.append(torch.empty(ty[0], dtype=ty[1], device=device))
return outputs
return mgpu_call, prepare_args, prepare_outputs, get_outputs
def _is_custom_call(op: ir.Operation, name: str) -> TypeGuard[hlo.CustomCallOp]:
return isinstance(op, hlo.CustomCallOp) and op.call_target_name.value == name
@util.weakref_lru_cache
def _compile_fn(fn, in_structs):
try:
import torch # type: ignore[import-not-found] # pytype: disable=import-error
except ImportError:
raise RuntimeError("Can't compile for PyTorch: import torch failed") from None
traced = jax.jit(fn).trace(*in_structs)
main_module = traced.lower().compiler_ir()
with main_module.context:
# jax.jit outlines its bodies which we undo for the interpreter.
mgpu.dialect.register_inliner_extensions(main_module.context)
inliner_pass = passmanager.PassManager.parse(
"builtin.module(inline{default-pipeline=})"
)
inliner_pass.run(main_module.operation)
mgpu_call, prepare_args, prepare_outputs, get_outputs = _find_mgpu_call_in_module(
main_module
)
if not isinstance(in_structs, tuple):
in_structs = (in_structs,)
if isinstance(traced.out_info, tuple):
out_structs = traced.out_info
unwrap_output_tuple = False
else:
out_structs = (traced.out_info,)
unwrap_output_tuple = True
flat_arg_types, expected_arg_treedef = jax.tree.flatten(in_structs)
_, out_treedef = jax.tree.flatten(out_structs)
backend_config = mgpu_call.attributes["mhlo.backend_config"]
module_asm = backend_config["module"].value_bytes
launch, unload = mgpu_core._compile_as_torch_gpu_kernel(module_asm)
def as_torch_dtype(dtype):
# torch contains NumPy-compatible dtypes in its top namespace
return getattr(torch, jnp.dtype(dtype).name)
def apply(*user_args):
flat_user_args, arg_treedef = jax.tree.flatten(user_args)
if arg_treedef != expected_arg_treedef:
raise ValueError(
f"Invalid argument structure: expected {expected_arg_treedef}, got"
f" {arg_treedef}, ({user_args=})"
)
for arg, expected_ty in zip(flat_user_args, flat_arg_types):
if arg.shape != expected_ty.shape:
raise ValueError(
f"Argument shape mismatch: expected {expected_ty.shape}, got"
f" {arg.shape}"
)
if arg.dtype != as_torch_dtype(expected_ty.dtype):
raise ValueError(
"Argument dtype mismatch: expected"
f" {as_torch_dtype(expected_ty.dtype)}, got {arg.dtype}"
)
# We run all the ops that are necessary to prepare the arguments
device = torch.device("cuda")
flat_args = prepare_args(*flat_user_args, device=device)
flat_outs = prepare_outputs(*flat_args, device=device)
# Construct a device pointer list like in the XLA calling convention
buffers = (ctypes.c_void_p * (len(flat_args) + len(flat_outs)))()
for i, arg in enumerate(itertools.chain(flat_args, flat_outs)):
buffers[i] = arg.data_ptr()
launch(buffers, device)
user_outs = get_outputs(*flat_outs)
out = jax.tree.unflatten(out_treedef, user_outs)
return out[0] if unwrap_output_tuple else out
# Unload the compiled code when the Python function is destroyed.
# pyrefly: ignore[missing-attribute]
apply.destructor = weakref.ref(apply, lambda _weak_ref: unload)
return apply
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/mosaic_gpu/torch.py",
"license": "Apache License 2.0",
"lines": 263,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/pipelining/internal.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal APIs and data structures for the custom pipelining API."""
from collections.abc import Hashable, Sequence
import dataclasses
from jax._src import core as jax_core
from jax._src.state import types as state_types
ReadEffect = state_types.ReadEffect
WriteEffect = state_types.WriteEffect
RefEffect = state_types.ReadEffect | state_types.WriteEffect
BufferIndex = int | str
def filter_write_effects(effects: set[RefEffect]) -> set[WriteEffect]:
return {effect for effect in effects if isinstance(effect, WriteEffect)}
def filter_read_effects(effects: set[RefEffect]) -> set[ReadEffect]:
return {effect for effect in effects if isinstance(effect, ReadEffect)}
def filter_tokens(effects: set[RefEffect]) -> set[RefEffect]:
return {effect for effect in effects if isinstance(effect.input_index, str)}
@dataclasses.dataclass(frozen=True)
class SchedulingProperties:
max_in_flight: int
is_async_start: bool
is_async_done: bool
def __post_init__(self):
if self.is_async_start and self.is_async_done:
raise ValueError(
"Async start and async done are mutually exclusive.")
@dataclasses.dataclass(frozen=True)
class PipelineStage:
"""An internal representation of a pipeline stage."""
jaxpr: jax_core.ClosedJaxpr
effects: set[RefEffect]
properties: SchedulingProperties
name: str
def get_read_idxs(self) -> set[BufferIndex]:
"""Returns the buffer indices that this stage reads from."""
return {
effect.input_index
for effect in filter_read_effects(self.effects)
}
def get_write_idxs(self) -> set[BufferIndex]:
"""Returns the buffer indices that this stage writes to."""
return {
effect.input_index
for effect in filter_write_effects(self.effects)
}
def __str__(self):
return self.name
def __repr__(self):
return f"{self.name}[effs={self.effects}]"
@dataclasses.dataclass(frozen=True)
class NDLoopStruct:
stages: Sequence[PipelineStage]
grid: Sequence[int]
def make_token(obj: Hashable) -> str:
"""Returns a fake input ID used to thread data dependencies."""
return f"token_{hash(obj)}"
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/pipelining/internal.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/pipelining/pipeline_test_util.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utilities for the custom pipeline scheduling API."""
import dataclasses
from typing import Any, Sequence
from jax._src import debugging
from jax._src.pallas.pipelining import schedulers
from jax._src.pallas.pipelining import internal
def print_stage(
ctx: schedulers.PipelineContext, stage: internal.PipelineStage, *args
):
"""Evaluation function that prints the stage name and iteration number."""
del args
debugging.debug_print(
"[itr={}] %s" % stage, ctx.linearized_index, ordered=True)
@dataclasses.dataclass(frozen=True)
class AnyOrder:
"""A helper class to mark the order of elements as unimportant."""
elements: Sequence[Any]
def compare_lists(result, expected):
"""Returns if two lists are equal while respecting ``AnyOrder`` elements."""
result_ptr = 0
expected_ptr = 0
any_order_set: set[Any] | None = None
while result_ptr < len(result) and expected_ptr < len(expected):
cur_result = result[result_ptr]
cur_expected = expected[expected_ptr]
if isinstance(cur_expected, AnyOrder):
if any_order_set is None:
any_order_set = set(cur_expected.elements)
if cur_result in any_order_set:
result_ptr += 1
any_order_set.remove(cur_result)
else:
return False
if not any_order_set:
any_order_set = None
expected_ptr += 1
else:
if cur_result == cur_expected:
result_ptr += 1
expected_ptr += 1
else:
return False
return True
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/pipelining/pipeline_test_util.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/pipelining/schedule_api.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# mypy: ignore-errors
# pyrefly: ignore-errors
# pylint: disable=missing-function-docstring
# pylint: disable=g-doc-args
# pytype: disable=wrong-keyword-args
"""Internal API for the Pallas pipelining scheduler."""
import dataclasses
from typing import Any, Optional, Sequence
import jax
from jax._src import api_util
from jax._src import core as jax_core
from jax._src import linear_util as lu
from jax._src.interpreters import partial_eval as pe
from jax._src.state import types as state_types
from jax._src.pallas.pipelining import schedulers
from jax._src.pallas.pipelining import internal
PipelineContext = schedulers.PipelineContext
def stage(max_in_flight: int):
"""Wrapper for creating a pipeline stage."""
def wrapper(func) -> SyncStage:
return SyncStage(func, max_in_flight)
return wrapper
class SyncStage:
"""Constructs a synchronous pipeline stage."""
def __init__(self, func, max_in_flight: int):
self.func = func
self.max_in_flight = max_in_flight
def trace(
self, abstract_refs, state_avals, grid
) -> internal.PipelineStage:
jaxpr, effs = trace_fun(
self.func, abstract_refs, state_avals, grid
)
name = getattr(self.func, "__name__", str(self.func))
return internal.PipelineStage(
jaxpr=jaxpr,
effects=set(effs),
properties=internal.SchedulingProperties(
max_in_flight=self.max_in_flight,
is_async_start=False,
is_async_done=False,
),
name=name,
)
class AsyncStage:
"""Constructs an asynchronous pipeline stage."""
def __init__(self, max_in_flight: int):
self.start_func = None
self.end_func = None
self.max_in_flight = max_in_flight
def def_start(self, func):
self.start_func = func
return self
def def_end(self, func):
self.end_func = func
return self
def trace(
self, abstract_refs, state_avals, grid
) -> tuple[internal.PipelineStage, internal.PipelineStage]:
start_jaxpr, start_effs = trace_fun(
self.start_func, abstract_refs, state_avals, grid
)
end_jaxpr, end_effs = trace_fun(
self.end_func, abstract_refs, state_avals, grid
)
token = internal.make_token(self)
start_effs = {*start_effs, internal.WriteEffect(token)}
end_effs = {*end_effs, internal.ReadEffect(token)}
name = getattr(self.start_func, "__name__", str(self.start_func))
start_stage = internal.PipelineStage(
jaxpr=start_jaxpr,
effects=start_effs,
properties=internal.SchedulingProperties(
max_in_flight=self.max_in_flight,
is_async_start=True,
is_async_done=False,
),
name=name,
)
name = getattr(self.end_func, "__name__", str(self.end_func))
end_stage = internal.PipelineStage(
jaxpr=end_jaxpr,
effects=end_effs,
properties=internal.SchedulingProperties(
max_in_flight=self.max_in_flight,
is_async_start=False,
is_async_done=True,
),
name=name,
)
return start_stage, end_stage
Stage = SyncStage | AsyncStage
def trace_fun(
fun, ref_avals, state_avals, grid
) -> tuple[jax_core.ClosedJaxpr, Sequence[internal.RefEffect]]:
"""Trace a stage body function to a Jaxpr."""
ctx_aval = PipelineContext.aval_pytree(grid, state_avals)
num_ctx_avals = len(jax.tree.leaves(ctx_aval))
flat_avals, in_tree = jax.tree.flatten((ctx_aval, *ref_avals))
debug_info = api_util.debug_info("trace_fun", fun, flat_avals, {})
flat_fn, out_tree_thunk = api_util.flatten_fun_nokwargs(
lu.wrap_init(fun, debug_info=debug_info), in_tree
)
del out_tree_thunk
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_fn, flat_avals)
ref_effects = [
eff for eff in jaxpr.effects if isinstance(eff, state_types.RefEffect)
]
# Subtract off the consts and state_avals, since this is variable per stage.
n_const = len(consts)
ref_effects = [
type(eff)(input_index=eff.input_index - n_const - num_ctx_avals)
for eff in ref_effects
]
return jax_core.ClosedJaxpr(jaxpr, consts), ref_effects
def apply_ref_filter(
stages: Sequence[internal.PipelineStage],
ref_filter: Any,
grid, state_avals
) -> Sequence[internal.PipelineStage]:
"""Removes any effects belonging to Refs that do not pass the filter."""
if ref_filter is None:
return stages
ctx_aval = PipelineContext.aval_pytree(grid, state_avals)
num_ctx_avals = len(jax.tree.leaves(ctx_aval))
new_stages = []
for stage_ in stages:
jaxpr = stage_.jaxpr.jaxpr
ref_effects = stage_.effects
token_effects = list(internal.filter_tokens(ref_effects))
refs_to_keep = {
i - num_ctx_avals
for i, aval in enumerate(jaxpr.in_avals)
if ref_filter(aval)
}
new_effects = [
eff for eff in ref_effects if eff.input_index in refs_to_keep
] + token_effects
new_stages.append(dataclasses.replace(stage_, effects=set(new_effects)))
return new_stages
def convert_accum_effects_to_writes(stages: Sequence[internal.PipelineStage]
) -> Sequence[internal.PipelineStage]:
"""Replaces all accumulate effects with simple writes."""
# After tracing, an accumulation such as ref[...] += y
# will result in both a ReadEffect and a WriteEffect into `ref`.
new_stages = []
for stage_ in stages:
read_effs = internal.filter_read_effects(stage_.effects)
write_effs = internal.filter_write_effects(stage_.effects)
new_read_effs = (
eff
for eff in read_effs
if state_types.WriteEffect(eff.input_index) not in write_effs
)
effs = (*new_read_effs, *write_effs)
new_stages.append(dataclasses.replace(stage_, effects=set(effs)))
return new_stages
def remove_duplicate_writes_between_async_stages(
stages: Sequence[internal.PipelineStage],
) -> Sequence[internal.PipelineStage]:
"""Removes duplicate writes between the async start and done stages.
This is done because the scheduler doesn't support multiple writes to
the same Ref in different stages. We instead write to a token in the
async_start stage that's read by the async_done and all direct consumers.
"""
new_stages = []
for stage_ in stages:
if stage_.properties.is_async_start:
start_read_effs = internal.filter_read_effects(stage_.effects)
start_write_effs = internal.filter_write_effects(stage_.effects)
write_token = internal.filter_tokens(start_write_effs)
assert len(write_token) == 1, stage_.effects
write_token = tuple(write_token)[0]
read_token = state_types.ReadEffect(write_token.input_index)
done_stage = [
x
for x in stages
if x.properties.is_async_done and read_token in x.effects
]
assert len(done_stage) == 1
done_stage = done_stage[0]
end_write_effs = internal.filter_write_effects(done_stage.effects)
start_write_effs = start_write_effs - end_write_effs
start_effs = (*start_read_effs, *start_write_effs)
new_stages.append(dataclasses.replace(stage_, effects=set(start_effs)))
else:
new_stages.append(stage_)
return new_stages
def thread_token_deps_to_consumers(stages: Sequence[internal.PipelineStage]
) -> Sequence[internal.PipelineStage]:
"""Threads the async token to consumers of async op.
This ensures that the async_start op does not start too soon and potentially
clobber buffers that the consumers are reading from.
"""
effects = [stage_.effects for stage_ in stages]
for stage_ in stages:
if stage_.properties.is_async_done:
write_tokens = internal.filter_tokens(
internal.filter_write_effects(stage_.effects)
)
read_tokens = internal.filter_tokens(
internal.filter_read_effects(stage_.effects)
)
assert not write_tokens, stage_.effects
assert len(read_tokens) == 1, stage_.effects
read_token_effect = tuple(read_tokens)[0]
write_idxs = stage_.get_write_idxs()
for i, other_stage in enumerate(stages):
if any(
write_idx in other_stage.get_read_idxs() for write_idx in write_idxs
):
effects[i].add(read_token_effect)
return [dataclasses.replace(stage_, effects=set(effects[i])
) for i, stage_ in enumerate(stages)]
def schedule_pipeline(
stages: Sequence[Stage],
grid: Sequence[int],
args: Sequence[Any],
ref_filter: Optional[Any] = None,
initial_state: schedulers.PipelineState | None = None,
scheduler: schedulers.PipelineScheduler = schedulers.static_nd_loop_scheduler,
**scheduler_kwargs,
):
"""Schedules stages and emits the code for a pipeline.
Args:
stages: A sequence of pipeline stages.
grid: The loop grid size.
args: A sequence of arguments to the pipeline. These will be passed
directly to each stage.
ref_filter: An optional function to filter out Refs during tracing so
that they do not affect the pipeline schedule.
initial_state: An optional pipeline state that will be passed as a
carry into each stage.
scheduler: Which scheduling function to use.
**scheduler_kwargs: Additional arguments to pass to the scheduler.
Returns:
A function that can be called with ``args`` and runs the pipeline.
"""
_, ref_tree = jax.tree.flatten(args)
def _get_aval(x):
if hasattr(x, "get_ref_aval"):
return x.get_ref_aval()
return jax_core.get_aval(x)
avals = jax.tree.map(_get_aval, args)
# Make state avals.
state_avals = jax.tree.map(_get_aval, initial_state)
traced_stages = []
for stage in stages:
if isinstance(stage, SyncStage):
traced_stages.append(stage.trace(avals, state_avals, grid))
elif isinstance(stage, AsyncStage):
start_stage, end_stage = stage.trace(avals, state_avals, grid)
traced_stages.append(start_stage)
traced_stages.append(end_stage)
else:
raise ValueError(f"Unsupported stage type: {type(stage)}")
# Run several "passes" to clean up effects before scheduling.
traced_stages = apply_ref_filter(traced_stages, ref_filter, grid, state_avals)
traced_stages = convert_accum_effects_to_writes(traced_stages)
traced_stages = remove_duplicate_writes_between_async_stages(traced_stages)
traced_stages = thread_token_deps_to_consumers(traced_stages)
loop_struct = internal.NDLoopStruct(stages=traced_stages, grid=grid)
def pipeline(*args):
flat_args, args_tree = jax.tree.flatten(args)
if args_tree != ref_tree:
raise ValueError(
f"Args tree and ref tree do not match.\n{args_tree=}\n{ref_tree=}"
)
scheduler(
loop_struct,
args=flat_args,
initial_state=initial_state,
**scheduler_kwargs,
)
return pipeline
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/pipelining/schedule_api.py",
"license": "Apache License 2.0",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pallas/pipelining/schedulers.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# mypy: ignore-errors
# pyrefly: ignore-errors
# pytype: disable=invalid-annotation
# pytype: disable=wrong-arg-types
# pytype: disable=bad-return-type
# pylint: disable=missing-function-docstring
# pylint: disable=g-doc-args
"""Pipeline scheduler implementations."""
import collections
from collections.abc import Callable, Mapping, Sequence
import copy
import dataclasses
import functools
import math
import operator
from typing import Any, cast, Protocol
import jax
from jax import lax
from jax import numpy as jnp
from jax._src import core as jax_core
import numpy as np
from jax._src.pallas.pipelining import internal
PipelineState = Any
PipelineScheduler = Callable[
[internal.NDLoopStruct, Sequence[Any], Any, Any], None]
def compute_grid_indices(linear_index: jax.Array, grid_size: Sequence[int]):
"""Computes the grid indices for a given linear index."""
indices = []
for i, _ in enumerate(grid_size):
rest_size = math.prod(grid_size[i+1:])
axis_index = linear_index // rest_size
indices.append(axis_index)
linear_index = lax.rem(linear_index, rest_size)
return indices
def increment_grid(indices: Sequence[int | jax.Array],
grid: Sequence[int],
dynamic: bool = False):
"""Increments the grid indices by 1."""
next_indices = []
carry: bool | jax.Array = True
for idx, size in reversed(list(zip(indices, grid, strict=True))):
if dynamic:
idx = cast(jax.Array, idx)
next_idx = lax.select(carry, idx + 1, idx)
carry = next_idx == size
next_indices.append(
lax.select(carry, jnp.asarray(0, dtype=idx.dtype), next_idx)
)
else:
next_idx = idx + 1 if carry else idx
carry = next_idx == size
next_indices.append(0 if carry else next_idx)
return tuple(reversed(next_indices)), carry
@functools.partial(jax.tree_util.register_dataclass,
data_fields=["loop_index",
"linearized_index",
"pipeline_state"],
meta_fields=[])
@dataclasses.dataclass(frozen=True)
class PipelineContext:
"""Container class containing pipeline state information.
Attributes:
loop_index: The current grid indices to run for the current stage.
linearized_index: The linearized ``loop_index``.
pipeline_state: The global pipeline carry state.
"""
loop_index: tuple[jax.Array, ...]
linearized_index: jax.Array
pipeline_state: PipelineState
@classmethod
def aval_pytree(cls, grid, state_avals) -> "PipelineContext":
return PipelineContext(
loop_index=(jax_core.ShapedArray((), jnp.int32),) * len(grid),
linearized_index=jax_core.ShapedArray((), jnp.int32),
pipeline_state=state_avals)
def check_pipeline(stages: Sequence[internal.PipelineStage]):
"""Runs sanity checks on the pipeline."""
last_write = collections.defaultdict(lambda: None)
last_read = collections.defaultdict(lambda: None)
for i, stage in enumerate(stages):
for read_idx in stage.get_read_idxs():
if last_write[read_idx] is None:
raise ValueError(
f"Read before write. {stage} attempted to read ref {read_idx}"
" without a prior stage writing to it.")
last_read[read_idx] = i
for write_idx in stage.get_write_idxs():
if last_write[write_idx] is not None:
raise ValueError(
f"Write conflict. {stage} writes to ref {write_idx} but it was"
f" already written to by stage {stages[last_write[write_idx]]}."
" The current scheduler only allows one stage to write to each"
" buffer.")
last_write[write_idx] = i
all_idxs = last_write.keys() | last_read.keys()
for i in all_idxs:
if last_write[i] > last_read[i]:
raise ValueError(f"Ref {i} is written to after its final read.")
@functools.partial(jax.tree_util.register_dataclass,
data_fields=["stage_counters"],
meta_fields=["which_stage_writes", "which_stages_read"])
@dataclasses.dataclass(frozen=True)
class Scoreboard:
"""A scoreboard used to book-keep data dependencies.
Attributes:
which_stage_writes: A mapping from buffer index to the stage index that
writes to it.
which_stages_read: A mapping from buffer index to the stages that read
from it.
stage_counters: A list of length num_stages that tracks the number of times
each stage has run.
"""
which_stage_writes: Mapping[internal.BufferIndex, int]
which_stages_read: Mapping[internal.BufferIndex, Sequence[int]]
stage_counters: list[jax.Array | int]
@classmethod
def create(cls, stages: Sequence[internal.PipelineStage]):
which_stage_writes = collections.defaultdict(lambda: None)
which_stage_reads = collections.defaultdict(set)
stage_counters = [0] * len(stages)
for i, stage in enumerate(stages):
for write_idx in stage.get_write_idxs():
which_stage_writes[write_idx] = i
for read_idx in stage.get_read_idxs():
which_stage_reads[read_idx].add(i)
return cls(which_stage_writes, which_stage_reads, stage_counters)
def get_stage_counter(self, stage_idx: int) -> jax.Array | int:
"""Returns the current stage counter for the given stage index."""
return self.stage_counters[stage_idx]
def get_writing_stage(self, buffer_idx: internal.BufferIndex) -> int:
"""Returns the stage index that writes to the given buffer index."""
return self.which_stage_writes[buffer_idx]
def increment_stage_counter(self, stage_idx: int) -> None:
"""Increments the stage counter for the given stage index."""
self.stage_counters[stage_idx] += 1
def copy(self) -> "Scoreboard":
"""Returns a deep copy of the scoreboard."""
new_stage_counters = copy.copy(self.stage_counters)
return Scoreboard(self.which_stage_writes, self.which_stages_read,
new_stage_counters)
@functools.partial(jax.tree_util.register_dataclass,
data_fields=["indices"],
meta_fields=["grid", "offsets", "dynamic"])
@dataclasses.dataclass(frozen=True)
class GridCarry:
"""Helper class for managing the pipeline grid indices.
Attributes:
grid: The size of the grid.
offsets: A mapping from the stage index to the integer offset from the
slowest scheduled stage.
dynamic: Whether grid indices should be calculated dynamically.
indices: A mapping from offset to the grid indices.
"""
grid: Sequence[int]
offsets: Sequence[int]
dynamic: bool
indices: Sequence[Sequence[int | jax.Array]]
@classmethod
def init(cls, grid, offsets, dynamic=False) -> 'GridCarry':
max_offset = max(offsets)
cur_indices = tuple([0] * len(grid))
indices = [cur_indices]
for _ in range(1, max_offset + 1):
next_indices, _ = increment_grid(cur_indices, grid)
indices.append(next_indices)
cur_indices = next_indices
return cls(grid, offsets, dynamic, tuple(indices))
def next(self) -> "GridCarry":
next_indices, _ = increment_grid(
self.indices[-1], self.grid, dynamic=self.dynamic
)
new_indices = (*self.indices[1:], next_indices)
return GridCarry(self.grid, self.offsets, self.dynamic, new_indices)
def get_indices_for_stage(self, stage_idx: int) -> Sequence[int | jax.Array]:
return self.indices[self.offsets[stage_idx]]
def check_args_ready(
stage: internal.PipelineStage,
scoreboard: Scoreboard,
new_scoreboard: Scoreboard,
current_stage_counter: int | jax.Array,
dynamic=False,
) -> bool | jax.Array:
"""Returns whether all arguments to the stage have already been computed."""
all_read_stages = []
for arg_idx in stage.get_read_idxs():
if stage.properties.is_async_start:
# Async start stages can start immediately after the preceding
# stage, so we use new_scoreboard instead of scoreboard.
arg_stage_idx = new_scoreboard.get_writing_stage(arg_idx)
arg_stage_ctr = new_scoreboard.get_stage_counter(arg_stage_idx)
else:
arg_stage_idx = scoreboard.get_writing_stage(arg_idx)
arg_stage_ctr = scoreboard.get_stage_counter(arg_stage_idx)
all_read_stages.append(arg_stage_ctr > current_stage_counter)
op = jnp.logical_and if dynamic else operator.and_
args_ready = functools.reduce(op, all_read_stages, True)
return args_ready
def check_async_done(stage: internal.PipelineStage,
scoreboard: Scoreboard,
num_itrs: int | jax.Array,
current_stage_counter: int | jax.Array,
dynamic=False) -> bool | jax.Array:
"""Returns whether the async done stage can run."""
and_op = jnp.logical_and if dynamic else operator.and_
# For async done stages, we need to insert delays so that they
# happen as late as possible.
# First condition is that there are a full number of async starts
# in flight.
max_in_flight = stage.properties.max_in_flight
can_run = True
token_read_effs = internal.filter_tokens(
internal.filter_read_effects(stage.effects))
read_tokens = {effect.input_index for effect in token_read_effs}
assert len(read_tokens) == 1, stage.effects
read_token = tuple(read_tokens)[0]
async_start_stage_idx = scoreboard.which_stage_writes[read_token]
async_start_counter = scoreboard.get_stage_counter(
async_start_stage_idx)
async_done_counter = current_stage_counter
min_op = jnp.minimum if dynamic else min
start_full = (async_start_counter >=
min_op(async_done_counter + max_in_flight, num_itrs))
can_run = and_op(can_run, start_full)
# Second condition - the consumers of this stage's outputs will
# actually need the results on the next iteration.
for write_idx in stage.get_write_idxs():
which_stages_read = scoreboard.which_stages_read[write_idx]
for read_stage_idx in which_stages_read:
read_itr = scoreboard.stage_counters[read_stage_idx]
can_run = and_op(can_run, (current_stage_counter <= read_itr))
return can_run
def check_async_start(
stage: internal.PipelineStage,
scoreboard: Scoreboard,
current_stage_counter: int | jax.Array,
dynamic=False,
) -> bool | jax.Array:
"""Returns whether the async start stage can run."""
token_write_effs = internal.filter_tokens(
internal.filter_write_effects(stage.effects)
)
assert len(token_write_effs) == 1, stage.effects
token_write_idx = tuple(token_write_effs)[0].input_index
dependent_stages = scoreboard.which_stages_read[token_write_idx]
dependents_ready = []
max_in_flight = stage.properties.max_in_flight
for dependent_stage_idx in dependent_stages:
check_itr = scoreboard.stage_counters[dependent_stage_idx]
# Do not issue more async_starts than max_in_flight.
dependents_ready.append(
current_stage_counter < check_itr + max_in_flight)
op = jnp.logical_and if dynamic else operator.and_
dependents_ready = functools.reduce(op, dependents_ready, True)
return dependents_ready
class EvalStageFunc(Protocol):
def __call__(
self,
ctx: PipelineContext,
stage: internal.PipelineStage,
args: Sequence[Any],
) -> PipelineState:
...
def eval_stage(ctx: PipelineContext, stage: internal.PipelineStage, args
) -> PipelineState:
"""Evaluates a single stage."""
flat_ctx = jax.tree.leaves(ctx)
state_tree = jax.tree.structure(ctx.pipeline_state)
next_state = jax_core.eval_jaxpr(
stage.jaxpr.jaxpr, stage.jaxpr.consts, *flat_ctx, *args
)
if next_state:
return jax.tree.unflatten(state_tree, next_state)
return ctx.pipeline_state
def linearize_stages(stages: Sequence[internal.PipelineStage]
) -> Sequence[internal.PipelineStage]:
"""Computes a linearization of the pipeline stages."""
linearized_stages = []
outputs_written = set()
available_stages = stages
while available_stages:
stage_added = False
new_available_stages = list(available_stages)
for stage in available_stages:
if all(read_idx in outputs_written for read_idx in stage.get_read_idxs()):
linearized_stages.append(stage)
outputs_written.update(stage.get_write_idxs())
stage_added = True
new_available_stages.remove(stage)
available_stages = new_available_stages
if not stage_added:
raise ValueError(
"Failed to linearize pipeline stages. Could not linearize"
f" {available_stages=}")
return linearized_stages
def make_ctx(stage: internal.PipelineStage,
stage_idx: int,
scoreboard: Scoreboard,
pipeline_state: PipelineState,
grid_carry: GridCarry | None = None,
grid: Sequence[int] | None = None,
offset: int | jax.Array = 0) -> PipelineContext:
del stage
step = scoreboard.stage_counters[stage_idx] + offset
if grid_carry is not None:
loop_index = grid_carry.get_indices_for_stage(stage_idx)
else:
loop_index = compute_grid_indices(step, grid)
return PipelineContext(loop_index=loop_index,
linearized_index=step,
pipeline_state=pipeline_state)
# TODO(justinfu): Implement a second version that rolls more of the pipeline
# into the loop body to reduce code size.
def static_nd_loop_scheduler(
nd_loop: internal.NDLoopStruct,
args: Sequence[Any],
initial_state: PipelineState | None = None,
eval_fn: EvalStageFunc | None = None,
):
"""Schedules and emits the pipeline into a single instruction stream.
This scheduler is static in the sense that most of the control logic is
implemented in Python and run at JAX tracing time. This reduce scalar
core pressure as the scoreboarding logic does not have to be computed
at runtime.
"""
if eval_fn is None:
eval_fn = eval_stage
stages = linearize_stages(nd_loop.stages)
num_stages = len(stages)
num_itrs = np.prod(nd_loop.grid)
check_pipeline(stages)
scoreboard = Scoreboard.create(stages)
def can_run_stage(
stage: internal.PipelineStage,
scoreboard: Scoreboard,
new_scoreboard: Scoreboard,
current_stage_counter: int | jax.Array,
) -> bool | jax.Array:
can_run = True
# Check args ready.
can_run = can_run & check_args_ready(
stage, scoreboard, new_scoreboard, current_stage_counter)
# Check dependents
if stage.properties.is_async_start:
can_run = can_run & check_async_start(
stage, scoreboard, current_stage_counter,
)
if stage.properties.is_async_done:
can_run = can_run & check_async_done(
stage, scoreboard, num_itrs, current_stage_counter)
return can_run
def compute_offsets(scoreboard: Scoreboard) -> Sequence[int] | None:
while any(scoreboard.stage_counters[i] < 1 for i in range(num_stages)):
new_scoreboard = scoreboard.copy()
for stage_idx, stage in enumerate(stages):
current_stage_counter = scoreboard.stage_counters[stage_idx]
can_run = can_run_stage(
stage, scoreboard, new_scoreboard, current_stage_counter
)
if can_run:
new_scoreboard.increment_stage_counter(stage_idx)
if scoreboard.stage_counters == new_scoreboard.stage_counters:
raise ValueError("Scheduling error. No stages ran.")
scoreboard = new_scoreboard
min_stage = min(scoreboard.stage_counters)
offsets = [
scoreboard.stage_counters[i] - min_stage for i in range(num_stages)
]
if max(offsets) > num_itrs:
# Bail out, since we won't be running the main loop.
return None
return offsets
# Main loop stage iteration offsets.
# This is a list of integers containing the number of iterations each
# stage is ahead of the slowest stage.
offsets = compute_offsets(scoreboard)
# Static prologue
# This runs the pipeline up until the steady state.
pipeline_state = initial_state
with jax.named_scope("pipeline_prologue"):
while any(
scoreboard.stage_counters[i] < (offsets[i] if offsets else 1)
for i in range(num_stages)
):
new_scoreboard = scoreboard.copy()
for stage_idx, stage in enumerate(stages):
current_stage_counter = scoreboard.stage_counters[stage_idx]
if offsets:
can_run = current_stage_counter < offsets[stage_idx]
else:
can_run = current_stage_counter < num_itrs
can_run = can_run & can_run_stage(
stage, scoreboard, new_scoreboard, current_stage_counter
)
if can_run:
pipeline_state = eval_fn(
make_ctx(
stage, stage_idx, scoreboard, pipeline_state,
grid=nd_loop.grid,
),
stage,
args,
)
new_scoreboard.increment_stage_counter(stage_idx)
if scoreboard.stage_counters == new_scoreboard.stage_counters:
raise ValueError("Scheduling error. No stages ran.")
scoreboard = new_scoreboard
if offsets:
assert all(
scoreboard.stage_counters[i] == offsets[i] for i in range(num_stages)
), (
f"Scheduling error. Scoreboard {scoreboard.stage_counters} does not"
f" match computed offsets {offsets}"
)
# Dynamic loop body.
# This runs the steady state of the pipeline where all stages run with
# no control flow.
@jax.named_scope("pipeline_steady_state")
def loop_body(itr: jax.Array, carry: tuple[PipelineState, GridCarry]):
pipeline_state, grid_carry = carry
stages_left = list(stages)
old_scoreboard = scoreboard.copy()
while any(stages_left):
new_scoreboard = old_scoreboard.copy()
for stage_idx, stage in enumerate(stages_left):
if stage is None:
continue
current_stage_counter = old_scoreboard.stage_counters[stage_idx]
can_run = can_run_stage(
stage, old_scoreboard, new_scoreboard, current_stage_counter
)
if can_run:
pipeline_state = eval_fn(
make_ctx(
stage,
stage_idx,
old_scoreboard,
pipeline_state,
grid_carry=grid_carry,
offset=itr,
),
stage,
args,
)
new_scoreboard.increment_stage_counter(stage_idx)
stages_left[stage_idx] = None
old_scoreboard = new_scoreboard
return (pipeline_state, grid_carry.next())
num_loop_itrs = int(max(num_itrs - max(scoreboard.stage_counters), 0))
if offsets:
grid_carry = GridCarry.init(
offsets=offsets, grid=nd_loop.grid, dynamic=True)
init_carry = (pipeline_state, grid_carry)
final_carry = jax.lax.fori_loop(0, num_loop_itrs, loop_body, init_carry)
(pipeline_state, _) = final_carry
# Update the static scoreboard to reflect the fact that each stage ran
# num_loop_itrs times.
for stage_idx in range(len(stages)):
scoreboard.stage_counters[stage_idx] += num_loop_itrs
# Static epilogue
with jax.named_scope("pipeline_epilogue"):
while any(
scoreboard.stage_counters[i] < num_itrs for i in range(num_stages)
):
new_scoreboard = scoreboard.copy()
for stage_idx, stage in enumerate(stages):
current_stage_counter = scoreboard.stage_counters[stage_idx]
can_run = current_stage_counter < num_itrs
can_run = can_run & can_run_stage(
stage, scoreboard, new_scoreboard, current_stage_counter
)
if can_run:
pipeline_state = eval_fn(
make_ctx(
stage, stage_idx, scoreboard, pipeline_state,
grid=nd_loop.grid,
),
stage,
args,
)
new_scoreboard.increment_stage_counter(stage_idx)
if scoreboard.stage_counters == new_scoreboard.stage_counters:
raise ValueError("Scheduling error. No stages ran.")
scoreboard = new_scoreboard
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pallas/pipelining/schedulers.py",
"license": "Apache License 2.0",
"lines": 496,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/pmap.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from functools import partial
from typing import Any, Callable, NamedTuple
import warnings
from jax._src import api
from jax._src.api_util import (
argnums_partial, donation_vector, fun_signature, fun_sourceinfo)
from jax._src import array
from jax._src import config
from jax._src import core
from jax._src import dtypes
from jax._src import linear_util as lu
from jax._src import pjit as pjit_lib
from jax._src import prng
from jax._src import sharding_impls
from jax._src import stages
from jax._src import traceback_util
from jax._src import util
from jax._src import xla_bridge as xb
from jax._src.lib import jaxlib_extension_version
from jax._src.interpreters import pxla
from jax._src.lax import lax
from jax._src.lib import xla_client as xc
from jax._src.mesh import Mesh
from jax._src.shard_map import _axes_to_pspec, shard_map
from jax._src.tree_util import (
broadcast_flattened_prefix_with_treedef, broadcast_prefix,
prefix_errors, tree_flatten, tree_map, tree_unflatten)
import numpy as np
map, unsafe_map = util.safe_map, map
zip, unsafe_zip = util.safe_zip, zip
# jaxlib extension version >= 400 supports _rewrap_with_aval_and_sharding
_SUPPORTS_REWRAP = jaxlib_extension_version >= 400
traceback_util.register_exclusion(__file__)
# Implementing pmap in terms of shard_map
def pmap(f, axis_name=None, *, in_axes=0, out_axes=0,
static_broadcasted_argnums=(), devices=None, backend=None,
axis_size=None, donate_argnums=()):
if devices is not None:
if not devices:
raise ValueError("'devices' argument to pmap must be non-empty, or None.")
devices = tuple(devices)
axis_name, static_broadcasted_tuple, donate_tuple = api._shared_code_pmap( # pylint: disable=protected-access
f, axis_name, static_broadcasted_argnums, donate_argnums, in_axes, out_axes)
if isinstance(axis_name, core._TempAxisName): # pylint: disable=protected-access
axis_name = repr(axis_name)
wrapped_fun = _pmap_wrap_init(f, static_broadcasted_tuple)
out_axes_flat, out_axes_tree = tree_flatten(out_axes)
out_axes_flat = tuple(out_axes_flat)
def infer_params(*args, **kwargs):
process_count = xb.process_count(backend)
trace_state_clean = core.trace_state_clean()
dyn_f, dyn_argnums, dyn_args = _get_dyn_args(
wrapped_fun, static_broadcasted_tuple, args)
dyn_args_flat, dyn_args_tree = tree_flatten((dyn_args, kwargs))
in_axes_flat = _get_in_axes_flat(
in_axes, dyn_argnums, dyn_args, kwargs, len(dyn_args_flat),
dyn_args_tree)
local_axis_size = _mapped_axis_size(dyn_args_flat, in_axes_flat)
donated_invars = _get_donated_invars(
donate_tuple, dyn_args_tree, len(dyn_args_flat))
mesh_devices = _get_mesh_devices(
devices, backend, local_axis_size, axis_size, trace_state_clean)
cached = _cached_shard_map(
dyn_f, dyn_args_tree, in_axes_flat, out_axes_flat, out_axes_tree,
donated_invars, mesh_devices, axis_name)
jitted_f = (cached.jitted_f_with_shardings if trace_state_clean
else cached.jitted_f)
if process_count > 1:
dyn_args_flat = host_local_array_to_global_array(
dyn_args_flat, cached, trace_state_clean, donated_invars
)
return (cached, jitted_f, dyn_args_flat, dyn_args_tree, donate_tuple,
process_count, trace_state_clean)
@util.wraps(f)
def wrapped(*args, **kwargs):
cached, jitted_f, dyn_args_flat, _, _, process_count, trace_state_clean = (
infer_params(*args, **kwargs))
out = jitted_f(*dyn_args_flat)
if process_count > 1:
out = global_array_to_host_local_array(out, cached, trace_state_clean)
return out
def lower(*args, **kwargs):
_, jitted_f, args_flat, in_tree, donated_tuple, _, _ = infer_params(
*args, **kwargs
)
abstract_args = list(map(core.shaped_abstractify, args_flat))
args_info = stages.make_args_info(in_tree, abstract_args, donated_tuple)
lowered = jitted_f.trace(*args_flat).lower()
# NOTE(dsuo): Calling .compile()(*inputs) will fail because our jitted function
# has no notion of host-local <> global conversion.
return stages.Lowered(
lowered._lowering, # pylint: disable=protected-access
args_info,
lowered.out_tree,
no_kwargs=lowered._no_kwargs, # pylint: disable=protected-access
)
wrapped.lower = lower
return wrapped
class CachedShardMap(NamedTuple):
"""Core cached pmap result.
Attributes:
pmapped: The shard_map-transformed function.
in_specs_flat: Flattened input PartitionSpecs for array conversion.
local_devices: List of devices in the local mesh.
in_local_shardings: NamedSharding for each input using local mesh.
in_global_shardings: NamedSharding for each input using global mesh.
mesh: The global Mesh for this pmap invocation.
out_specs: Output PartitionSpecs as a pytree prefix.
out_local_shardings_thunk: Cached thunk returning (local, global) sharding
pairs for output pspecs.
donate_argnums: Indices of donated arguments.
out_global_shardings: Output NamedShardings as a pytree.
jitted_f: Pre-cached jit wrapper without explicit shardings.
jitted_f_with_shardings: Pre-cached jit wrapper with in/out shardings.
"""
pmapped: Callable[..., Any]
in_specs_flat: tuple[sharding_impls.PartitionSpec, ...]
local_devices: list[xc.Device]
in_local_shardings: list[sharding_impls.NamedSharding]
in_global_shardings: list[sharding_impls.NamedSharding]
mesh: Mesh
out_specs: Any # pytree of PartitionSpecs
out_local_shardings_thunk: Callable[
[sharding_impls.PartitionSpec],
tuple[sharding_impls.NamedSharding, sharding_impls.NamedSharding],
]
donate_argnums: list[int]
out_global_shardings: Any # pytree of NamedShardings
jitted_f: Any
jitted_f_with_shardings: Any
@lu.cache
def _cached_shard_map(fun, in_tree, in_axes_flat, out_axes_flat, out_axes_tree,
donated_invars, mesh_devices, axis_name):
mesh = Mesh(mesh_devices, (axis_name,))
out_axes = tree_unflatten(out_axes_tree, list(out_axes_flat))
in_specs = tuple(map(partial(_axes_to_pspec, axis_name), in_axes_flat))
out_specs = tree_map(
partial(_axes_to_pspec, axis_name), out_axes, is_leaf=lambda x: x is None
)
def _fun(*flat_args):
args = tree_map(
lambda x, ax: x if ax is None else lax.squeeze(x, [ax]),
flat_args,
in_axes_flat,
)
args, kwargs = tree_unflatten(in_tree, args)
out = fun.call_wrapped(*args, **kwargs)
out_flat, out_tree = tree_flatten(out)
out_axes_flat = broadcast_prefix(out_axes, out, is_leaf=lambda x: x is None)
out_flat = tree_map(
lambda x, ax: x if ax is None else lax.expand_dims(x, [ax]),
out_flat,
out_axes_flat,
)
return tree_unflatten(out_tree, out_flat)
_pmapped = shard_map(_fun, mesh=mesh, in_specs=in_specs, out_specs=out_specs,
check_vma=False, axis_names=set(mesh.axis_names))
# Donation is now safe in multi-host mode because host_local_array_to_global_array
# copies donated arrays instead of rewrapping them (which would share buffers).
donate_argnums = [i for i, val in enumerate(donated_invars) if val]
# out_specs is a pytree, so use tree_map to convert to shardings
get_sharding = (
lambda spec: sharding_impls.NamedSharding(mesh, spec)
if spec is not None else spec)
out_global_shardings = tree_map(
get_sharding, out_specs, is_leaf=lambda x: x is None)
@util.cache()
def out_local_shardings_thunk(pspec):
return (
sharding_impls.NamedSharding(mesh.local_mesh, pspec),
sharding_impls.NamedSharding(mesh, pspec),
)
local_devices = list(mesh.local_mesh.devices.flat)
in_local_shardings = [
sharding_impls.NamedSharding(mesh.local_mesh, p) for p in in_specs]
in_global_shardings = [
sharding_impls.NamedSharding(mesh, p) for p in in_specs]
jitted_f = api.jit(_pmapped, donate_argnums=donate_argnums)
jitted_f_with_shardings = api.jit(
_pmapped,
donate_argnums=donate_argnums,
in_shardings=tuple(in_global_shardings),
out_shardings=out_global_shardings,
)
return CachedShardMap(
pmapped=_pmapped,
in_specs_flat=in_specs,
local_devices=local_devices,
in_local_shardings=in_local_shardings,
in_global_shardings=in_global_shardings,
mesh=mesh,
out_specs=out_specs,
out_local_shardings_thunk=out_local_shardings_thunk,
donate_argnums=donate_argnums,
out_global_shardings=out_global_shardings,
jitted_f=jitted_f,
jitted_f_with_shardings=jitted_f_with_shardings,
)
def _mapped_axis_size(args, in_axes):
"""Infer axis size from the first mapped argument.
shard_map already does a check on all arguments, so just look at first arg.
Args:
args: Flat list of arguments.
in_axes: Flat tuple of axis indices (int or None for each arg).
Returns:
The size of the mapped axis.
Raises:
ValueError: If no args have a mapped axis.
"""
if args and in_axes:
# Fast path: check first arg/axis (most common case).
if in_axes[0] is not None and hasattr(args[0], "shape"):
return int(args[0].shape[in_axes[0]])
# Slow path: scan for first mapped arg.
if isinstance(in_axes, tuple):
for arg, ax in zip(args, in_axes):
if ax is not None and hasattr(arg, "shape"):
return int(arg.shape[ax])
raise ValueError("pmap requires at least one argument with a mapped axis.")
def _pmap_wrap_init(f, static_broadcasted_tuple):
"""Create a wrapped function with DebugInfo for pmap.
Args:
f: The function to wrap.
static_broadcasted_tuple: Tuple of static argument indices.
Returns:
A lu.WrappedFun ready for pmap.
"""
# Compute arg_names from signature, excluding static argnums
if (signature := fun_signature(f)) is not None:
static_set = frozenset(static_broadcasted_tuple)
arg_names = tuple(
name
for i, name in enumerate(signature.parameters.keys())
if i not in static_set
)
else:
arg_names = None
dbg = lu.DebugInfo("pmap", fun_sourceinfo(f), arg_names, None)
return lu.wrap_init(f, debug_info=dbg)
def _get_dyn_args(wrapped_f, static_broadcasted_tuple, args):
"""Extract dynamic args and argnums after handling static args.
Args:
wrapped_f: The wrapped function.
static_broadcasted_tuple: Tuple of static argument indices.
args: Positional arguments.
Returns:
dyn_f: function with static args bound
dyn_argnums: list of dynamic arg indices (or None if no static args)
dyn_args: dynamic positional arguments (after static removed)
Raises:
ValueError: If static_broadcasted_argnums exceeds number of args.
"""
if static_broadcasted_tuple:
if max(static_broadcasted_tuple) >= len(args):
raise ValueError(
"pmapped function has"
f" static_broadcasted_argnums={static_broadcasted_tuple} but was"
f" called with only {len(args)} positional"
f" argument{'s' if len(args) > 1 else ''}. All static broadcasted"
" arguments must be passed positionally."
)
dyn_argnums = [
i for i in range(len(args)) if i not in static_broadcasted_tuple
]
wrapped_f, dyn_args = argnums_partial(wrapped_f, dyn_argnums, args)
else:
dyn_argnums = None
dyn_args = args
return wrapped_f, dyn_argnums, dyn_args
def _get_in_axes_flat(
in_axes, dyn_argnums, dyn_args, kwargs, num_flat_args, in_tree
):
"""Compute flat in_axes tuple from in_axes prefix and args structure.
Args:
in_axes: The original in_axes specification.
dyn_argnums: The indices of dynamic (non-static) positional args, or None if
no static args.
dyn_args: The dynamic positional args (after static args removed).
kwargs: The keyword arguments.
num_flat_args: Total number of flat args.
in_tree: The PyTreeDef of (dyn_args, kwargs).
Returns:
Flat tuple of axis indices (int or None for each flat arg).
Raises:
ValueError: If in_axes is not a valid prefix of the args structure.
"""
# Compute dyn_in_axes from in_axes and dyn_argnums
if dyn_argnums is not None and isinstance(in_axes, tuple):
dyn_in_axes = tuple(in_axes[i] for i in dyn_argnums)
else:
dyn_in_axes = in_axes
# Fast path: avoid broadcast_prefix for common simple cases
in_axes_flat = None
if isinstance(dyn_in_axes, int):
if dyn_in_axes == 0:
# Most common case: all args mapped on axis 0, including kwargs (which also get 0)
in_axes_flat = (0,) * num_flat_args
elif not kwargs:
# No kwargs: broadcast single in_axes to all positional leaves
in_axes_flat = (dyn_in_axes,) * num_flat_args
elif dyn_in_axes is None and not kwargs:
# Unusual case: no mapping, no kwargs
in_axes_flat = (None,) * num_flat_args
elif (
not kwargs
and isinstance(dyn_in_axes, tuple)
and all(isinstance(ax, int) or ax is None for ax in dyn_in_axes)
):
# No kwargs: check if it's a simple flat tuple matching positional args
if len(dyn_in_axes) == len(dyn_args) and num_flat_args == len(dyn_args):
# Each positional arg is a leaf (no nested structure)
in_axes_flat = dyn_in_axes
# Slow path: use broadcast_flattened_prefix_with_treedef for complex cases
if in_axes_flat is None:
try:
# Flatten in_axes prefix tree (treating None as leaf)
flat_in_axes_prefix, in_axes_tree = tree_flatten(
(dyn_in_axes, 0), is_leaf=lambda x: x is None
)
in_axes_flat = tuple(
broadcast_flattened_prefix_with_treedef(
flat_in_axes_prefix, in_axes_tree, in_tree
)
)
except ValueError:
e, *_ = prefix_errors((dyn_in_axes, 0), (dyn_args, kwargs))
ex = e("pmap in_axes")
(msg,) = ex.args
msg += (
"\n\nThe 'full pytree' here is the tuple of arguments passed "
"positionally to the pmapped function, and the value of `in_axes` "
"must be a tree prefix of that tuple. But it was not a prefix."
)
if kwargs:
msg += (
"\n\nWhen some arguments are passed by keyword to the pmapped "
"function, they are not included in the comparison to `in_axes`. "
"Instead, each argument passed by keyword is mapped over its "
"leading axis. See the description of `in_axes` in the `pmap` "
"docstring: "
"https://docs.jax.dev/en/latest/_autosummary/jax.pmap.html#jax.pmap"
)
msg += (
"\n\nCheck that the value of the `in_axes` argument to `pmap` "
"is a tree prefix of the tuple of arguments passed positionally to "
"the pmapped function."
)
raise ValueError(msg) from None
return in_axes_flat
def _get_donated_invars(donate_tuple, in_tree, num_flat_args):
"""Compute donation vector for arguments.
Args:
donate_tuple: Tuple of donated argument indices.
in_tree: PyTreeDef of input structure.
num_flat_args: Number of flat arguments.
Returns:
Tuple of bools indicating which flat args are donated.
"""
if donate_tuple and not config.debug_nans.value:
return donation_vector(donate_tuple, (), in_tree)
else:
return (False,) * num_flat_args
@util.cache()
def _get_mesh_devices(devices, backend, local_axis_size, axis_size,
trace_state_clean):
"""Compute effective mesh devices based on context.
Args:
devices: The mesh devices tuple.
backend: The backend to use.
local_axis_size: The local axis size (per-process).
axis_size: User-specified global axis size (optional).
trace_state_clean: True if in execution mode (not tracing).
Returns:
Tuple of effective mesh devices sliced appropriately.
Raises:
ValueError: If axis_size doesn't match inferred size in single-process.
"""
process_count = xb.process_count(backend)
# Validate explicit axis_size in single-process mode
if (process_count == 1 and axis_size is not None and
axis_size != local_axis_size):
raise ValueError(
f"Specified axis_size {axis_size} doesn't match received "
f"axis_size {local_axis_size}.")
# Compute global_axis_size
if axis_size is not None:
global_axis_size = axis_size
elif process_count > 1:
global_axis_size = local_axis_size * process_count
# Validate all processes have the same number of local devices
assert all(
len(xb.local_devices(pi, backend)) == xb.local_device_count(backend)
for pi in range(process_count))
else:
global_axis_size = local_axis_size
# Determine mesh devices
if devices is not None:
mesh_devices = devices
elif process_count > 1:
# Multi-process: group devices by process (host) for optimal collective
# performance. This matches the old pmap's device ordering which uses
# local_devices(process_index) in a nested loop, ensuring devices from
# the same host are contiguous in the mesh.
# pylint: disable=g-complex-comprehension
mesh_devices = tuple(
d
for process_index in range(process_count)
for d in xb.local_devices(process_index, backend)
)
# pylint: enable=g-complex-comprehension
elif backend is not None:
mesh_devices = tuple(xb.devices(backend=backend))
else:
mesh_devices = tuple(xb.devices())
if not trace_state_clean and process_count > 1:
# Tracing in multihost: use local devices
return tuple(xb.local_devices(backend=backend)[:local_axis_size])
else:
return mesh_devices[:global_axis_size]
@util.cache()
def _local_to_global_aval(shape, dtype, sharding):
"""Compute global aval from local shape."""
pspec_prepared = sharding_impls.prepare_axis_resources(sharding.spec, "pspec")
local_aval = core.ShapedArray(shape, dtype)
return pxla.mesh_local_to_global(
sharding.mesh,
sharding_impls.get_array_mapping(pspec_prepared),
local_aval,
)
@util.cache()
def _global_to_local_aval(shape, dtype, sharding):
"""Compute local aval from global shape."""
pspec_prepared = sharding_impls.prepare_axis_resources(sharding.spec, "pspec")
global_aval = core.ShapedArray(shape, dtype)
return pxla.mesh_global_to_local(
sharding.mesh,
sharding_impls.get_array_mapping(pspec_prepared),
global_aval,
)
@util.cache()
def _local_device_indices(local_sharding, shape):
"""Cached device indices for slicing arrays."""
return tuple(local_sharding.devices_indices_map(shape).values())
@util.cache()
def _is_sharding_equivalent(sharding_a, sharding_b, ndim):
"""Check if sharding is equivalent to NamedSharding(mesh.local_mesh, pspec)."""
return sharding_a.is_equivalent_to(sharding_b, ndim)
@util.cache()
def _get_out_shardings(out_tree, pspecs, out_shardings_thunk):
"""Get flattened output shardings, combining pspec flattening and sharding lookup."""
out_pspecs_flat = pjit_lib.flatten_axis_resources(
"output pspecs", out_tree, pspecs, tupled_args=True
)
return tuple(zip(*[out_shardings_thunk(p) for p in out_pspecs_flat]))
def host_local_array_to_global_array(
dyn_args_flat, cached, trace_state_clean, donated_invars
):
"""Convert host-local arrays to global arrays for multihost pmap.
Args:
dyn_args_flat: Flat list of input arrays.
cached: CachedPmap tuple with mesh and sharding info.
trace_state_clean: True if in execution mode (not tracing).
donated_invars: Tuple of bools indicating which args are donated. For
donated args that require the slow path, we delete the original to free
memory.
Returns:
Converted global arrays.
"""
if not trace_state_clean:
import jax.experimental.multihost_utils as mhu # pytype: disable=import-error
return list(
mhu.host_local_array_to_global_array(
tuple(dyn_args_flat), cached.mesh, cached.in_specs_flat
)
)
in_local_shardings = cached.in_local_shardings
in_global_shardings = cached.in_global_shardings
if dyn_args_flat and isinstance(
dyn_args_flat[0], (core.Tracer, core.AbstractValue)
):
return dyn_args_flat
for i, arr in enumerate(dyn_args_flat):
local_sharding = in_local_shardings[i]
global_sharding = in_global_shardings[i]
donated = donated_invars[i]
prng_impl = None
typ = type(arr)
if typ is array.ArrayImpl and not arr.is_fully_addressable:
continue
if typ is not array.ArrayImpl:
if typ is prng.PRNGKeyArray:
prng_impl = arr.dtype._impl # pylint: disable=protected-access
arr = arr._base_array # pylint: disable=protected-access
dtype = arr.dtype
if dtype == dtypes.float0:
arr = np.zeros(arr.shape, dtype=bool)
arr = np.asarray(arr)
if dtype != dtypes.canonicalize_dtype(dtype):
arr = dtypes.canonicalize_value(arr)
shape, dtype = arr.shape, arr.dtype
typ = type(arr)
global_aval = _local_to_global_aval(shape, dtype, global_sharding)
if typ == array.ArrayImpl and _is_sharding_equivalent(
arr.sharding, local_sharding, len(arr.shape)
):
# Fast path: rewrap without copy (shares buffers with original).
# For donated args, jit's donation will invalidate the shared buffers,
# which is the expected behavior - original arrays become invalid.
if _SUPPORTS_REWRAP:
dyn_args_flat[i] = arr._rewrap_with_aval_and_sharding( # pylint: disable=protected-access
global_aval, global_sharding
)
else:
# Fallback for older jaxlib: use batched_device_put with shard data.
arrays = [x.data for x in arr.addressable_shards]
dyn_args_flat[i] = pxla.batched_device_put(
global_aval,
global_sharding,
arrays,
list(local_sharding._device_assignment),
) # pylint: disable=protected-access
else:
# Slow path: slice and device_put (creates new buffers).
# For donated args, we must explicitly delete the original to free memory.
arrays = [
arr[idx] for idx in _local_device_indices(local_sharding, shape)
]
dyn_args_flat[i] = pxla.batched_device_put(
global_aval,
global_sharding,
arrays,
list(local_sharding._device_assignment),
) # pylint: disable=protected-access
if donated and typ is array.ArrayImpl:
warnings.warn(
"Donated pmap argument required resharding. This causes a brief "
"2x memory spike before the original is freed. For optimal "
"donation, ensure inputs are correctly sharded before pmap.",
stacklevel=4,
)
arr.delete()
if prng_impl is not None:
dyn_args_flat[i] = prng.PRNGKeyArray(prng_impl, dyn_args_flat[i])
return dyn_args_flat
def global_array_to_host_local_array(out, cached, trace_state_clean):
"""Convert global arrays to host-local arrays for multihost pmap output.
Args:
out: The output pytree from jitted function.
cached: CachedPmap tuple with mesh and sharding info.
trace_state_clean: True if in execution mode (not tracing).
Returns:
Host-local output pytree.
"""
if not trace_state_clean:
import jax.experimental.multihost_utils as mhu # pytype: disable=import-error
return mhu.global_array_to_host_local_array(
out, cached.mesh, cached.out_specs
)
out_flat, out_tree = tree_flatten(out)
out_local_shardings, out_global_shardings = _get_out_shardings(
out_tree, cached.out_specs, cached.out_local_shardings_thunk
)
if out_flat and isinstance(out_flat[0], (core.Tracer, core.AbstractValue)):
return out
for i, arr in enumerate(out_flat):
local_sharding = out_local_shardings[i]
global_sharding = out_global_shardings[i]
prng_impl = None
typ = type(arr)
if typ is array.ArrayImpl and arr.is_fully_addressable:
continue
if typ is not array.ArrayImpl:
if typ is prng.PRNGKeyArray:
prng_impl = arr.dtype._impl # pylint: disable=protected-access
arr = arr._base_array # pylint: disable=protected-access
try:
_ = arr.shape
except AttributeError:
arr = np.array(arr)
dtype = arr.dtype
if dtype == dtypes.float0:
arr = np.zeros(arr.shape, dtype=bool)
if dtype != dtypes.canonicalize_dtype(dtype):
arr = dtypes.canonicalize_value(arr)
shape, dtype = arr.shape, arr.dtype
typ = type(arr)
local_aval = _global_to_local_aval(shape, dtype, global_sharding)
if typ == array.ArrayImpl:
if not _is_sharding_equivalent(arr.sharding, global_sharding, len(shape)):
arr = api.device_put(arr, global_sharding)
if _SUPPORTS_REWRAP:
out_flat[i] = arr._rewrap_with_aval_and_sharding( # pylint: disable=protected-access
local_aval, local_sharding
)
else:
# Fallback for older jaxlib: construct ArrayImpl directly.
out_flat[i] = array.ArrayImpl(
local_aval, local_sharding, arr._arrays, committed=True
) # pylint: disable=protected-access
else:
arrays = [
arr[idx] for idx in _local_device_indices(local_sharding, shape)
]
out_flat[i] = pxla.batched_device_put(
local_aval,
local_sharding,
arrays,
list(local_sharding._device_assignment),
) # pylint: disable=protected-access
if prng_impl is not None:
out_flat[i] = prng.PRNGKeyArray(prng_impl, out_flat[i])
return tree_unflatten(out_tree, out_flat)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/pmap.py",
"license": "Apache License 2.0",
"lines": 626,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/ref.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from jax._src import core
def new_ref(init_val: Any, *, memory_space: Any = None) -> core.Ref:
"""Create a mutable array reference with initial value ``init_val``.
For more discussion, see the `Ref guide`_.
Args:
init_val: A :class:`jax.Array` representing the initial state
of the buffer.
memory_space: An optional memory space attribute for the Ref.
Returns:
A :class:`jax.ref.Ref` containing a reference to a mutable buffer.
.. _Ref guide: https://docs.jax.dev/en/latest/array_refs.html
"""
return core.new_ref(init_val, memory_space=memory_space)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/ref.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/scipy/stats/gumbel_l.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from jax._src import lax
from jax._src import numpy as jnp
from jax._src.lax.lax import _const as _lax_const
from jax._src.numpy.util import promote_args_inexact
from jax._src.typing import Array, ArrayLike
from jax._src.scipy.special import xlogy, xlog1py
def logpdf(x: ArrayLike,
loc: ArrayLike = 0,
scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Left Skewed) log probability distribution function.
JAX implementation of :obj:`scipy.stats.gumbel_l` ``logpdf``.
.. math::
f_{pdf}(x; \mu, \beta) = \frac{1}{\beta} \exp\left( \frac{x - \mu}{\beta} - \exp\left( \frac{x - \mu}{\beta} \right) \right)
Args:
x: ArrayLike, value at which to evaluate log(pdf)
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of logpdf values
See Also:
- :func:`jax.scipy.stats.gumbel_l.pdf`
- :func:`jax.scipy.stats.gumbel_l.logcdf`
- :func:`jax.scipy.stats.gumbel_l.cdf`
- :func:`jax.scipy.stats.gumbel_l.ppf`
- :func:`jax.scipy.stats.gumbel_l.logsf`
- :func:`jax.scipy.stats.gumbel_l.sf`
"""
x, loc, scale = promote_args_inexact("gumbel_l.logpdf", x, loc, scale)
ok = lax.gt(scale, _lax_const(scale, 0))
# logpdf = -log(scale) + z - exp(z)
z = lax.div(lax.sub(x, loc), scale)
neg_log_scale = xlogy(-1, scale)
t2 = lax.sub(z, lax.exp(z))
log_pdf = lax.add(neg_log_scale, t2)
return jnp.where(ok, log_pdf, np.nan)
def pdf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Left Skewed) probability distribution function.
JAX implementation of :obj:`scipy.stats.gumbel_l` ``pdf``.
.. math::
f_{pdf}(x; \mu, \beta) = \frac{1}{\beta} \exp\left( \frac{x - \mu}{\beta} - \exp\left( \frac{x - \mu}{\beta} \right) \right)
Args:
x: ArrayLike, value at which to evaluate pdf
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of pdf values
See Also:
- :func:`jax.scipy.stats.gumbel_l.logpdf`
- :func:`jax.scipy.stats.gumbel_l.logcdf`
- :func:`jax.scipy.stats.gumbel_l.cdf`
- :func:`jax.scipy.stats.gumbel_l.ppf`
- :func:`jax.scipy.stats.gumbel_l.logsf`
- :func:`jax.scipy.stats.gumbel_l.sf`
"""
return lax.exp(logpdf(x, loc, scale))
def logcdf(x: ArrayLike,
loc: ArrayLike = 0,
scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Left Skewed) log cumulative density function.
JAX implementation of :obj:`scipy.stats.gumbel_l` ``logcdf``.
.. math::
f_{cdf}(x; \mu, \beta) = 1 - \exp\left( -\exp\left( \frac{x - \mu}{\beta} \right) \right)
Args:
x: ArrayLike, value at which to evaluate log(cdf)
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of logcdf values
See Also:
- :func:`jax.scipy.stats.gumbel_l.logpdf`
- :func:`jax.scipy.stats.gumbel_l.pdf`
- :func:`jax.scipy.stats.gumbel_l.cdf`
- :func:`jax.scipy.stats.gumbel_l.ppf`
- :func:`jax.scipy.stats.gumbel_l.logsf`
- :func:`jax.scipy.stats.gumbel_l.sf`
"""
x, loc, scale = promote_args_inexact("gumbel_l.logcdf", x, loc, scale)
ok = lax.gt(scale, _lax_const(scale, 0))
z = lax.div(lax.sub(x, loc), scale)
neg_exp_z = lax.neg(lax.exp(z))
# xlog1p fails here, that's why log1p is used here
# even log1p fails for some cases when using float64 mode
# so we're using this formula which is stable
log_cdf = lax.log(-lax.expm1(neg_exp_z))
return jnp.where(ok, log_cdf, np.nan)
def cdf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Left Skewed) cumulative density function.
JAX implementation of :obj:`scipy.stats.gumbel_l` ``cdf``.
.. math::
f_{cdf}(x; \mu, \beta) = 1 - \exp\left( -\exp\left( \frac{x - \mu}{\beta} \right) \right)
Args:
x: ArrayLike, value at which to evaluate cdf
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of cdf values
See Also:
- :func:`jax.scipy.stats.gumbel_l.logpdf`
- :func:`jax.scipy.stats.gumbel_l.pdf`
- :func:`jax.scipy.stats.gumbel_l.logcdf`
- :func:`jax.scipy.stats.gumbel_l.ppf`
- :func:`jax.scipy.stats.gumbel_l.logsf`
- :func:`jax.scipy.stats.gumbel_l.sf`
"""
return lax.exp(logcdf(x, loc, scale))
def ppf(p: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Left Skewed) percent point function (inverse of CDF)
JAX implementation of :obj:`scipy.stats.gumbel_l` ``ppf``.
.. math::
F_{ppf}}(p; \mu, \beta) = \mu + \beta \log\left( -\log(1 - p) \right)
Args:
p: ArrayLike, probability value (quantile) at which to evaluate ppf
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of ppf values
See Also:
- :func:`jax.scipy.stats.gumbel_l.logpdf`
- :func:`jax.scipy.stats.gumbel_l.pdf`
- :func:`jax.scipy.stats.gumbel_l.logcdf`
- :func:`jax.scipy.stats.gumbel_l.cdf`
- :func:`jax.scipy.stats.gumbel_l.logsf`
- :func:`jax.scipy.stats.gumbel_l.sf`
"""
p, loc, scale = promote_args_inexact("gumbel_l.ppf", p, loc, scale)
ok = lax.bitwise_and(lax.gt(p, _lax_const(p, 0)),
lax.lt(p, _lax_const(p, 1)))
# quantile = loc + (scale)*log(-log(1 - p))
t1 = xlog1py(-1, lax.neg(p))
# xlogp failed here too, that's why log is used
t = lax.mul(scale, lax.log(t1))
quantile = lax.add(loc, t)
return jnp.where(ok, quantile, np.nan)
def sf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Left Skewed) survival function.
JAX implementation of :obj:`scipy.stats.gumbel_l` ``sf``.
.. math::
f_{sf}(x; \mu, \beta) = 1 - f_{cdf}(x, \mu, \beta)
Args:
x: ArrayLike, value at which to evaluate survival function
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of sf values (1 - cdf)
See Also:
- :func:`jax.scipy.stats.gumbel_l.logpdf`
- :func:`jax.scipy.stats.gumbel_l.pdf`
- :func:`jax.scipy.stats.gumbel_l.logcdf`
- :func:`jax.scipy.stats.gumbel_l.cdf`
- :func:`jax.scipy.stats.gumbel_l.logsf`
"""
return jnp.exp(logsf(x, loc, scale))
def logsf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Left Skewed) log survival function.
JAX implementation of :obj:`scipy.stats.gumbel_l` ``logsf``.
.. math::
f_{sf}(x; \mu, \beta) = 1 - f_{cdf}(x, \mu, \beta)
Args:
x: ArrayLike, value at which to evaluate log survival function
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of logsf values
See Also:
- :func:`jax.scipy.stats.gumbel_l.logpdf`
- :func:`jax.scipy.stats.gumbel_l.pdf`
- :func:`jax.scipy.stats.gumbel_l.logcdf`
- :func:`jax.scipy.stats.gumbel_l.cdf`
- :func:`jax.scipy.stats.gumbel_l.sf`
"""
x, loc, scale = promote_args_inexact("gumbel_l.logsf", x, loc, scale)
ok = lax.gt(scale, _lax_const(scale, 0))
# logsf = -exp(z)
z = lax.div(lax.sub(x, loc), scale)
log_sf = lax.neg(lax.exp(z))
return jnp.where(ok, log_sf, np.nan)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/scipy/stats/gumbel_l.py",
"license": "Apache License 2.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/scipy/stats/gumbel_r.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from jax._src import lax
from jax._src import numpy as jnp
from jax._src.lax.lax import _const as _lax_const
from jax._src.numpy.util import promote_args_inexact
from jax._src.typing import Array, ArrayLike
from jax._src.scipy.special import xlogy
from jax._src.nn.functions import log1mexp
def logpdf(x: ArrayLike,
loc: ArrayLike = 0,
scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Right Skewed) log probability distribution function.
JAX implementation of :obj:`scipy.stats.gumbel_l` ``logpdf``.
.. math::
f_{pdf}(x; \mu, \beta) = \frac{1}{\beta} \exp\left( -\frac{x - \mu}{\beta} - \exp\left( -\frac{x - \mu}{\beta} \right) \right)
Args:
x: ArrayLike, value at which to evaluate log(pdf)
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of logpdf values
See Also:
- :func:`jax.scipy.stats.gumbel_r.pdf`
- :func:`jax.scipy.stats.gumbel_r.logcdf`
- :func:`jax.scipy.stats.gumbel_r.cdf`
- :func:`jax.scipy.stats.gumbel_r.ppf`
- :func:`jax.scipy.stats.gumbel_r.sf`
- :func:`jax.scipy.stats.gumbel_r.logsf`
"""
x, loc, scale = promote_args_inexact("gumbel_r.logpdf", x, loc, scale)
ok = lax.gt(scale, _lax_const(scale, 0))
z = lax.div(lax.sub(x, loc), scale)
# logpdf = -log(beta) - (z + exp(-z))
neg_log_scale = xlogy(-1, scale)
t2 = lax.neg(lax.add(z, lax.exp(lax.neg(z))))
log_pdf = lax.add(neg_log_scale, t2)
return jnp.where(ok, log_pdf, np.nan)
def pdf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Right Skewed) probability distribution function.
JAX implementation of :obj:`scipy.stats.gumbel_r` ``pdf``.
.. math::
f_{pdf}(x; \mu, \beta) = \frac{1}{\beta} \exp\left( -\frac{x - \mu}{\beta} - \exp\left( -\frac{x - \mu}{\beta} \right) \right)
Args:
x: ArrayLike, value at which to evaluate pdf
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of pdf values
See Also:
- :func:`jax.scipy.stats.gumbel_r.logpdf`
- :func:`jax.scipy.stats.gumbel_r.logcdf`
- :func:`jax.scipy.stats.gumbel_r.cdf`
- :func:`jax.scipy.stats.gumbel_r.ppf`
- :func:`jax.scipy.stats.gumbel_r.sf`
- :func:`jax.scipy.stats.gumbel_r.logsf`
"""
return lax.exp(logpdf(x, loc, scale))
def logcdf(x: ArrayLike,
loc: ArrayLike = 0,
scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Right Skewed) log cumulative density function.
JAX implementation of :obj:`scipy.stats.gumbel_r` ``logcdf``.
.. math::
f_{cdf}(x; \mu, \beta) = \exp\left( -\exp\left( -\frac{x - \mu}{\beta} \right) \right)
Args:
x: ArrayLike, value at which to evaluate log(cdf)
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of logcdf values
See Also:
- :func:`jax.scipy.stats.gumbel_r.logpdf`
- :func:`jax.scipy.stats.gumbel_r.pdf`
- :func:`jax.scipy.stats.gumbel_r.cdf`
- :func:`jax.scipy.stats.gumbel_r.ppf`
- :func:`jax.scipy.stats.gumbel_r.sf`
- :func:`jax.scipy.stats.gumbel_r.logsf`
"""
x, loc, scale = promote_args_inexact("gumbel_r.logcdf", x, loc, scale)
ok = lax.gt(scale, _lax_const(scale, 0))
z = lax.div(lax.sub(x, loc), scale)
# log cdf = -exp(-z)
log_cdf = lax.neg(lax.exp(lax.neg(z)))
return jnp.where(ok, log_cdf, np.nan)
def cdf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Right Skewed) cumulative density function.
JAX implementation of :obj:`scipy.stats.gumbel_r` ``cdf``.
.. math::
f_{cdf}(x; \mu, \beta) = \exp\left( -\exp\left( -\frac{x - \mu}{\beta} \right) \right)
Args:
x: ArrayLike, value at which to evaluate cdf
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of cdf values
See Also:
- :func:`jax.scipy.stats.gumbel_r.logpdf`
- :func:`jax.scipy.stats.gumbel_r.pdf`
- :func:`jax.scipy.stats.gumbel_r.logcdf`
- :func:`jax.scipy.stats.gumbel_r.ppf`
- :func:`jax.scipy.stats.gumbel_r.sf`
- :func:`jax.scipy.stats.gumbel_r.logsf`
"""
return lax.exp(logcdf(x, loc, scale))
def ppf(p: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Right Skewed) percent point function.
JAX implementation of :obj:`scipy.stats.gumbel_r` ``ppf``.
.. math::
F(p; \mu, \beta) = \mu - \beta \log\left( -\log(p) \right)
Args:
p: ArrayLike, probability value (quantile) at which to evaluate ppf
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of ppf values
See Also:
- :func:`jax.scipy.stats.gumbel_r.logpdf`
- :func:`jax.scipy.stats.gumbel_r.pdf`
- :func:`jax.scipy.stats.gumbel_r.logcdf`
- :func:`jax.scipy.stats.gumbel_r.cdf`
- :func:`jax.scipy.stats.gumbel_r.sf`
- :func:`jax.scipy.stats.gumbel_r.logsf`
"""
p, loc, scale = promote_args_inexact("gumbel_r.ppf", p, loc, scale)
# 0 < p < 1
ok = lax.bitwise_and(lax.gt(p, _lax_const(p, 0)),
lax.lt(p, _lax_const(p, 1)))
# quantile = loc - (scale)*log(-log(p))
t1 = xlogy(-1, p)
t = lax.mul(scale, lax.log(t1))
quantile = lax.sub(loc, t)
return jnp.where(ok, quantile, np.nan)
def sf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Right Skewed) survival function.
JAX implementation of :obj:`scipy.stats.gumbel_r` ``sf``.
.. math::
f_{sf}(x; \mu, \beta) = 1 - F_{cdf}(x; \mu, \beta)
Args:
x: ArrayLike, value at which to evaluate survival function
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of sf values (1 - cdf)
See Also:
- :func:`jax.scipy.stats.gumbel_r.logpdf`
- :func:`jax.scipy.stats.gumbel_r.pdf`
- :func:`jax.scipy.stats.gumbel_r.logcdf`
- :func:`jax.scipy.stats.gumbel_r.cdf`
- :func:`jax.scipy.stats.gumbel_r.logsf`
"""
x, loc, scale = promote_args_inexact("gumbel_r.sf", x, loc, scale)
ok = lax.gt(scale, _lax_const(scale, 0))
# sf = 1 - exp(-exp(-z))
neg_z = lax.div(lax.sub(loc, x), scale)
t1 = lax.exp(lax.neg(lax.exp(neg_z)))
_sf = lax.sub(_lax_const(x, 1), t1)
return jnp.where(ok, _sf, np.nan)
def logsf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
r"""
Gumbel Distribution (Right Skewed) log survival function.
JAX implementation of :obj:`scipy.stats.gumbel_r` ``logsf``.
Args:
x: ArrayLike, value at which to evaluate log survival function
loc: ArrayLike, distribution offset (:math:`\mu`) (defaulted to 0)
scale: ArrayLike, distribution scaling (:math:`\beta`) (defaulted to 1)
Returns:
array of logsf values
See Also:
- :func:`jax.scipy.stats.gumbel_r.logpdf`
- :func:`jax.scipy.stats.gumbel_r.pdf`
- :func:`jax.scipy.stats.gumbel_r.logcdf`
- :func:`jax.scipy.stats.gumbel_r.cdf`
- :func:`jax.scipy.stats.gumbel_r.sf`
"""
x, loc, scale = promote_args_inexact("gumbel_r.logsf", x, loc, scale)
ok = lax.gt(scale, _lax_const(scale, 0))
# logsf = log(1 - exp(-exp(-z)))
neg_z = lax.div(lax.sub(loc, x), scale)
log_sf = log1mexp(lax.exp(neg_z))
return jnp.where(ok, log_sf, np.nan)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/scipy/stats/gumbel_r.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/_src/test_multiprocess.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for running multi-process tests."""
import functools
import os
import pathlib
import re
import signal
import subprocess
import sys
import time
from absl import app
import absl.flags
from absl.testing import absltest
from absl.testing import parameterized
from jax._src import distributed
from jax._src import xla_bridge as xb
from jax._src import test_util as jtu
from jax._src.config import config
from jax._src.lib import cuda_versions
from jax._src.lib import _jax
try:
import portpicker # pytype: disable=import-error
except ImportError:
portpicker = None
NUM_PROCESSES = absl.flags.DEFINE_integer(
"num_processes", None, "Number of processes to use."
)
_GPUS_PER_PROCESS = absl.flags.DEFINE_integer(
"gpus_per_process",
0,
"Number of GPUs per worker process.",
)
_TPU_CHIPS_PER_PROCESS = absl.flags.DEFINE_integer(
"tpu_chips_per_process",
0,
"Number of TPU chips per worker process.",
)
CPU_COLLECTIVES_IMPLEMENTATION = absl.flags.DEFINE_string(
"cpu_collectives_implementation",
"",
"CPU collectives implementation to use. Uses default if empty.",
)
EXTRA_TEST_ARGS = absl.flags.DEFINE_multi_string(
"extra_test_args", [], "Extra flags to pass to worker process."
)
# For internal use.
MULTIPROCESS_TEST_WORKER_ID = absl.flags.DEFINE_integer(
"multiprocess_test_worker_id",
-1,
"Worker id. Set by main test process; should not be set by users.",
)
_MULTIPROCESS_TEST_CONTROLLER_ADDRESS = absl.flags.DEFINE_string(
"multiprocess_test_controller_address",
"",
"Address of the JAX controller. Set by the main test process; should not be"
" set by users.",
)
_DEVICE_IDS = absl.flags.DEFINE_list(
"device_ids",
None,
"List of device ids to use. Set by main test process; should not be set by"
" users.",
)
_ENABLE_MEGASCALE = absl.flags.DEFINE_bool(
"enable_megascale", False, "If true, enable Megascale runtime."
)
_HEARTBEAT_TIMEOUT = absl.flags.DEFINE_integer(
"heartbeat_timeout",
5,
"Timeout in seconds for heartbeat checks. Set to a higher number when"
" running under sanitizers.",
)
_SHUTDOWN_TIMEOUT = absl.flags.DEFINE_integer(
"shutdown_timeout",
15,
"JAX shutdown timeout duration in seconds for each subprocess worker. If "
"your test is timing out, try increasing this value.",
)
_BARRIER_TIMEOUT = absl.flags.DEFINE_integer(
"barrier_timeout",
10,
"Barrier timeout in seconds. Set to a higher number when running under"
" sanitizers.",
)
_INITIALIZATION_TIMEOUT = absl.flags.DEFINE_integer(
"initialization_timeout",
10,
"Coordination service initialization timeout in seconds. Set to a higher"
" number when running under sanitizers.",
)
_DUMP_HLO = absl.flags.DEFINE_bool(
"dump_hlo",
False,
"If true, dump per-process HLO to undeclared outputs. They will show up in"
" sponge artifacts under the directory 'jax_%process_idx%_hlo_dump'.",
)
expect_failures_with_regex = None
def main(shard_main=None):
config.config_with_absl()
app.run(functools.partial(_main, shard_main=shard_main))
class GracefulKiller:
"""Add a signal handler that sets a flag if SIGINT or SIGTERM are caught."""
# From https://stackoverflow.com/a/31464349
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, sig_num, unused_stack_frame):
print(f"Caught signal: {signal.Signals(sig_num).name} ({sig_num})")
self.kill_now = True
def _main(argv, shard_main):
# TODO(emilyaf): Enable multiprocess tests on Windows.
if sys.platform == "win32":
print("Multiprocess tests are not supported on Windows.")
return
num_processes = NUM_PROCESSES.value
if MULTIPROCESS_TEST_WORKER_ID.value >= 0:
local_device_ids = _DEVICE_IDS.value
if local_device_ids is not None:
local_device_ids = [int(device_id) for device_id in local_device_ids]
distributed.initialize(
_MULTIPROCESS_TEST_CONTROLLER_ADDRESS.value,
num_processes=num_processes,
process_id=MULTIPROCESS_TEST_WORKER_ID.value,
local_device_ids=local_device_ids,
heartbeat_timeout_seconds=_HEARTBEAT_TIMEOUT.value,
shutdown_timeout_seconds=_SHUTDOWN_TIMEOUT.value,
initialization_timeout=_INITIALIZATION_TIMEOUT.value,
)
if shard_main is not None:
return shard_main()
return absltest.main(testLoader=jtu.JaxTestLoader())
if not argv[0].endswith(".py"): # Skip the interpreter path if present.
argv = argv[1:]
if num_processes is None:
raise ValueError("num_processes must be set")
gpus_per_process = _GPUS_PER_PROCESS.value
tpu_chips_per_process = _TPU_CHIPS_PER_PROCESS.value
num_tpu_chips = num_processes * tpu_chips_per_process
if num_tpu_chips == 0:
tpu_host_bounds = ""
tpu_chips_per_host_bounds = ""
elif num_tpu_chips == 1:
assert tpu_chips_per_process == 1
tpu_host_bounds = "1,1,1"
tpu_chips_per_host_bounds = "1,1,1"
elif num_tpu_chips == 4:
if tpu_chips_per_process == 1:
tpu_host_bounds = "2,2,1"
tpu_chips_per_host_bounds = "1,1,1"
elif tpu_chips_per_process == 2:
tpu_host_bounds = "2,1,1"
tpu_chips_per_host_bounds = "1,2,1"
elif tpu_chips_per_process == 4:
tpu_host_bounds = "1,1,1"
tpu_chips_per_host_bounds = "2,2,1"
else:
raise ValueError(
"Invalid number of TPU chips per worker {}".format(
tpu_chips_per_process
)
)
elif num_tpu_chips == 8:
if tpu_chips_per_process == 1:
tpu_host_bounds = "4,2,1"
tpu_chips_per_host_bounds = "1,1,1"
elif tpu_chips_per_process == 4:
# Note: this branch assumes we are using 2x4 v6e LitePod, and will not
# work with 4x2 v5e LitePod.
tpu_host_bounds = "1,2,1"
tpu_chips_per_host_bounds = "2,2,1"
elif tpu_chips_per_process == 8:
tpu_host_bounds = "1,1,1"
tpu_chips_per_host_bounds = "2,4,1"
else:
# TODO(phawkins): implement other cases.
raise ValueError(
"Invalid number of TPU chips per worker {}".format(
tpu_chips_per_process
)
)
else:
raise ValueError(f"Invalid number of TPU chips {num_tpu_chips}")
if portpicker is None:
slicebuilder_ports = [10000 + i for i in range(num_processes)]
else:
slicebuilder_ports = [
portpicker.pick_unused_port() for _ in range(num_processes)
]
slicebuilder_addresses = ",".join(
f"localhost:{port}" for port in slicebuilder_ports
)
megascale_coordinator_port = None
if gpus_per_process > 0:
# Get the number of GPUs visible to this process without initializing the runtime
if cuda_versions is not None:
local_device_count = cuda_versions.cuda_device_count()
if num_processes * gpus_per_process > local_device_count:
print(
f"Cannot run {num_processes} processes with {gpus_per_process} GPU(s) "
f"each on a system with only {local_device_count} local GPU(s), "
f"starting {local_device_count // gpus_per_process} instead - test "
"cases will likely be skipped!"
)
num_processes = local_device_count // gpus_per_process
if portpicker is None:
jax_port = 9876
else:
# TODO(emilyaf): Use a port server if there are flaky port collisions due
# to pick_unused_port() racing among tests.
jax_port = portpicker.pick_unused_port()
subprocesses = []
output_filenames = []
output_files = []
sys_path = os.pathsep.join(sys.path)
for i in range(num_processes):
device_ids = None
env = os.environ.copy()
# Note: Fix for rules_python >= 1.7.0 (Strict Hermeticity):
# The parent process sees dependencies via sys.path, but modern rules_python
# does not export this to PYTHONPATH by default. We must manually propagate
# it so child workers can locate dependencies.
path_parts = [sys_path, env.get("PYTHONPATH", "")]
env["PYTHONPATH"] = os.pathsep.join(p for p in path_parts if p)
args = [
"/proc/self/exe",
*argv,
f"--num_processes={num_processes}",
f"--multiprocess_test_worker_id={i}",
f"--multiprocess_test_controller_address=localhost:{jax_port}",
f"--heartbeat_timeout={_HEARTBEAT_TIMEOUT.value}",
f"--shutdown_timeout={_SHUTDOWN_TIMEOUT.value}",
f"--barrier_timeout={_BARRIER_TIMEOUT.value}",
f"--initialization_timeout={_INITIALIZATION_TIMEOUT.value}",
"--logtostderr",
]
if num_tpu_chips > 0:
device_ids = range(
i * tpu_chips_per_process, (i + 1) * tpu_chips_per_process)
env["CLOUD_TPU_TASK_ID"] = str(i)
env["TPU_CHIPS_PER_PROCESS_BOUNDS"] = tpu_chips_per_host_bounds
env["TPU_PROCESS_BOUNDS"] = tpu_host_bounds
env["TPU_PROCESS_ADDRESSES"] = slicebuilder_addresses
env["TPU_PROCESS_PORT"] = str(slicebuilder_ports[i])
env["TPU_VISIBLE_CHIPS"] = ",".join(map(str, device_ids))
env["ALLOW_MULTIPLE_LIBTPU_LOAD"] = "1"
if gpus_per_process > 0:
device_ids = range(i * gpus_per_process, (i + 1) * gpus_per_process)
args.append(f"--jax_cuda_visible_devices={','.join(map(str, device_ids))}")
if device_ids is not None:
args.append(f"--device_ids={','.join(map(str, device_ids))}")
cpu_collectives_impl = CPU_COLLECTIVES_IMPLEMENTATION.value
if cpu_collectives_impl:
args.append(
f"--jax_cpu_collectives_implementation={cpu_collectives_impl}"
)
if _ENABLE_MEGASCALE.value or cpu_collectives_impl == "megascale":
if portpicker is None:
megascale_port = 9877
else:
megascale_port = portpicker.pick_unused_port()
if megascale_coordinator_port is None:
megascale_coordinator_port = megascale_port
args += [
f"--megascale_coordinator_address=localhost:{megascale_coordinator_port}",
f"--megascale_port={megascale_port}",
]
args += EXTRA_TEST_ARGS.value
undeclared_outputs = os.environ.get("TEST_UNDECLARED_OUTPUTS_DIR", "/tmp")
stdout_name = f"{undeclared_outputs}/jax_{i}_stdout.log"
stderr_name = f"{undeclared_outputs}/jax_{i}_stderr.log"
if _DUMP_HLO.value:
hlo_dump_path = f"{undeclared_outputs}/jax_{i}_hlo_dump/"
os.makedirs(hlo_dump_path, exist_ok=True)
env["XLA_FLAGS"] = f"--xla_dump_to={hlo_dump_path}"
stdout = open(stdout_name, "wb")
stderr = open(stderr_name, "wb")
print(f"Launching process {i}:")
print(f" stdout: {stdout_name}")
print(f" stderr: {stderr_name}")
proc = subprocess.Popen(args, env=env, stdout=stdout, stderr=stderr)
subprocesses.append(proc)
output_filenames.append((stdout_name, stderr_name))
output_files.append((stdout, stderr))
print(" All launched, running ".center(80, "="), flush=True)
# Wait for all the children to finish or for a SIGTERM from bazel. If we get
# SIGTERM, we still want to collect their logs, so kill them and continue.
killer = GracefulKiller()
running_procs = dict(enumerate(subprocesses))
while not killer.kill_now and running_procs:
time.sleep(0.1)
for i, proc in list(running_procs.items()):
if proc.poll() is not None:
print(f"Process {i} finished.", flush=True)
running_procs.pop(i)
if killer.kill_now and running_procs:
print("Caught termination, terminating remaining children.", flush=True)
# Send a SIGTERM to each child process, to let it know it should terminate.
for i, proc in running_procs.items():
proc.terminate()
print(f"Process {i} terminated.", flush=True)
# We give the child process(es) a few seconds for their own cleanup, and
# keep the rest (up to 15s) for copying the children logs into our own.
time.sleep(5)
# Send a SIGKILL (a "hard" kill) to each child process. This is CRITICAL:
# without it, this process may end up waiting a long time on the proc.wait()
# below, and never get to saving the children logs, making test timeouts
# very hard to debug.
for i, proc in running_procs.items():
proc.kill()
print(f"Process {i} killed.")
print("Killed all child processes.", flush=True)
retvals = []
stdouts = []
stderrs = []
for proc, fds, (stdout, stderr) in zip(
subprocesses, output_files, output_filenames
):
retvals.append(proc.wait())
for fd in fds:
fd.close()
stdouts.append(pathlib.Path(stdout).read_text(errors="replace"))
stderrs.append(pathlib.Path(stderr).read_text(errors="replace"))
print(" All finished ".center(80, "="), flush=True)
print(" Summary ".center(80, "="))
for i, (retval, stdout, stderr) in enumerate(zip(retvals, stdouts, stderrs)):
m = re.search(r"Ran \d+ tests? in [\d.]+s\n\n.*", stderr, re.MULTILINE)
result = m.group().replace("\n\n", "; ") if m else "Test crashed?"
print(
f"Process {i}, ret: {retval}, len(stdout): {len(stdout)}, "
f"len(stderr): {len(stderr)}; {result}"
)
print(" Detailed logs ".center(80, "="))
for i, (retval, stdout, stderr) in enumerate(zip(retvals, stdouts, stderrs)):
print(f" Process {i}: return code: {retval} ".center(80, "="))
if stdout:
print(f" Process {i} stdout ".center(80, "-"))
print(stdout)
if stderr:
print(f" Process {i} stderr ".center(80, "-"))
print(stderr)
print(" Done detailed logs ".center(80, "="), flush=True)
for i, (retval, stderr) in enumerate(zip(retvals, stderrs)):
if retval != 0:
if expect_failures_with_regex is not None:
assert re.search(
expect_failures_with_regex, stderr
), f"process {i} failed, expected regex: {expect_failures_with_regex}"
else:
assert retval == 0, f"process {i} failed, return value: {retval}"
class MultiProcessTest(parameterized.TestCase):
def setUp(self):
"""Start tests together."""
super().setUp()
if xb.process_count() == 1:
self.skipTest("Test requires multiple processes.")
assert xb.process_count() == NUM_PROCESSES.value, (
xb.process_count(),
NUM_PROCESSES.value,
)
# Make sure all processes are at the same test case.
client = distributed.global_state.client
if client is None:
raise TypeError("client cannot be None")
try:
client.wait_at_barrier(
f"{self._testMethodName}_start", _BARRIER_TIMEOUT.value * 1000)
except _jax.JaxRuntimeError as e:
msg, *_ = e.args
if msg.startswith("DEADLINE_EXCEEDED"):
raise RuntimeError(
f"Init or some test executed earlier than {self._testMethodName} "
"failed. Check logs from earlier tests to debug further. We "
"recommend debugging that specific failed test with "
"`--test_filter` before running the full test suite again."
) from e
def tearDown(self):
"""End tests together."""
client = distributed.global_state.client
if client is None:
raise TypeError("client cannot be None")
# Ensure a shared fate for tests where a subset of processes run different
# test assertions (i.e. some processes may pass and some processes fail -
# but the overall test should fail).
try:
client.wait_at_barrier(
f"{self._testMethodName}_end", _BARRIER_TIMEOUT.value * 1000)
except _jax.JaxRuntimeError as e:
msg, *_ = e.args
if msg.startswith("DEADLINE_EXCEEDED"):
raise RuntimeError(
f"Test {self._testMethodName} failed in another process. We "
"recommend debugging that specific failed test with "
"`--test_filter` before running the full test suite again."
) from e
super().tearDown()
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/test_multiprocess.py",
"license": "Apache License 2.0",
"lines": 408,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
jax-ml/jax:jax/_src/xla_metadata_lib.py | # Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from jax._src import config
from jax._src.lib import xla_client
config_ext = xla_client._xla.config
class XlaMetadata:
__slots__ = ['val', 'hash']
val: dict[str, Any]
def __init__(self, val):
self.val = val
self.hash = hash(tuple(sorted(self.val.items())))
def __hash__(self):
return self.hash
def __eq__(self, other):
return other is not None and self.val == other.val
def filter_nones(d: dict) -> dict:
return {k: v for k, v in d.items() if v is not None}
def update_metadata(a, b: dict[str, Any]):
if not b:
return a
if a is None or a is config_ext.unset:
val = {}
else:
val = a.val.copy()
val.update(b)
return XlaMetadata(filter_nones(val))
def current_xla_metadata() -> dict[str, Any] | None:
metadata = config.xla_metadata_context_manager.value
return None if metadata is None else metadata.val
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/_src/xla_metadata_lib.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/experimental/array_serialization/pytree_serialization.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Serializations routines for pytrees including array and non-array serialization.
"""
from __future__ import annotations
from os import PathLike
import os
import re
from typing import Any
from uuid import uuid4, UUID
import json
import asyncio
import threading
from concurrent.futures import ThreadPoolExecutor
import shutil
import logging
import jax
from jax._src import distributed
from jax._src.api_util import flatten_axes
from jax._src.layout import Format
from jax.experimental import multihost_utils
from jax.experimental.array_serialization import tensorstore_impl as ts_impl
import jax.experimental.array_serialization.pytree_serialization_utils as utils
from jax._src import path as pathlib
import numpy as np
logger = logging.getLogger(__name__)
_THREADING_SAVE_LOCK = threading.Lock()
_REMOTE_URL_PREFIXES = ['gs://', 's3://']
_PYTREEDEF_FILE = "pytreedef.json"
_ARCHIVE_NAME = "archive.zip"
_USE_OCDBT = True # a lot of the code relies on this being True
_MAX_PATH_LENGTH = 4096
_ARRAY_STORE_DIRNAME = "array_store"
_ARRAY_TYPE_FORMAT = "Array({dtype}[{shape}])"
_ARRAY_TYPE_REGEX = r"Array\(([a-zA-Z0-9_]+)\[([0-9, ]*)\]\)"
_MAX_CONCURRENCY = 32
_TIMEOUT_SEC = 30
PyTreeT = Any
__all__ = ["save", "load", "load_pytreedef",
"nonblocking_load", "nonblocking_save"]
def _get_unique_sync_key() -> str | None:
"""Generate a thread-local key for ensuring all host finish (de)serializing"""
if jax.process_count() == 1:
return None
# broadcast a thread-local unique barrier name
sync_key_unique = multihost_utils.broadcast_one_to_all(
np.frombuffer(uuid4().bytes, dtype=np.int32))
sync_key_id = UUID(bytes=np.array(sync_key_unique).tobytes())
return f"jax_sync_key_{str(sync_key_id)}"
def _is_str_same_on_all_hosts(path: str | PathLike[str]) -> bool:
"""All-gather the location of the checkpoint and check if it's the same."""
if jax.process_count() <= 1:
return False
path_b = str(path).encode("utf-8")
if len(path_b) > _MAX_PATH_LENGTH:
raise ValueError(f"Path exceeds maximum length of {_MAX_PATH_LENGTH} in"
" multiprocess case.")
path_array = np.concatenate([
np.frombuffer(path_b, dtype=np.uint8), np.zeros(
_MAX_PATH_LENGTH - len(path_b), dtype=np.uint8)])
path_array = multihost_utils.process_allgather(path_array)
return bool(np.all(path_array[0] == path_array[1:]))
def _sync_on_key(key: str | None, extra_tag: str = "") -> None:
if key is None:
return
full_key = f"{key}-{extra_tag}" if extra_tag else key
if (client := distributed.global_state.client) is not None:
client.wait_at_barrier(full_key, timeout_in_ms=_TIMEOUT_SEC * 1000)
def _is_array_like(x):
return isinstance(x, (jax.Array, np.ndarray))
def _leaf_to_desc(leaf) -> str:
if leaf is None:
return "null"
elif _is_array_like(leaf):
return _ARRAY_TYPE_FORMAT.format(
dtype=leaf.dtype.name, shape=", ".join(map(str, leaf.shape)))
else:
return type(leaf).__name__
def _desc_to_leaf(leaf_desc: str | None) -> str | None | jax.ShapeDtypeStruct:
if leaf_desc is None:
return None
if not re.match(_ARRAY_TYPE_REGEX, leaf_desc):
return leaf_desc
shape_dtype_match = re.match(_ARRAY_TYPE_REGEX, leaf_desc)
assert shape_dtype_match is not None
dtype_str, shape_str = shape_dtype_match.groups()
shape = [int(x.strip()) for x in shape_str.strip("]").strip().split(",")
if len(x.strip()) > 0]
return jax.ShapeDtypeStruct(shape, jax.numpy.dtype(dtype_str))
def _is_remote_path(path: str | PathLike[str]):
"""Check whether a path is remote by examining the prefix."""
# we need to truncate e.g., gs:// to gs:/ because pathlib.Path collapses //
return any(str(path).startswith(prefix[:-1])
for prefix in _REMOTE_URL_PREFIXES)
def _norm_path(path: str | PathLike[str]) -> Any:
if _is_remote_path(path):
return pathlib.Path(path)
return pathlib.Path(path).expanduser().resolve()
def _rm_dir(root: Any) -> None:
if _is_remote_path(root):
root.rmtree() # pytype: disable=attribute-error
else:
shutil.rmtree(root)
def _set_up_destination(root: str | PathLike[str], overwrite: bool,
pytree_repr: dict[str, Any], distinct_locations: bool,
sync_key: str | None) -> dict[str, Any]:
"""Inspect the destination, set it up for writing, potentially read existing data."""
root = _norm_path(root)
if overwrite:
if root.exists() and len(list(root.iterdir())) > 0:
# check that we're only deleting things that come from JAX
# refuse to rm directories containing additional entries
extra_member_paths = [
path for path in list(root.iterdir()) if path.name not in
(_PYTREEDEF_FILE, _ARCHIVE_NAME, _ARRAY_STORE_DIRNAME)]
if len(extra_member_paths) != 0:
raise RuntimeError(
"Refusing to work on a directory that is not a previous checkpoint."
f" Unrecognized paths: {extra_member_paths}. Remove them manually"
f" if you're sure you want to use {root} as the checkpoint"
" directory.")
if (jax.process_index() == 0 or distinct_locations) and root.exists():
_rm_dir(root)
_sync_on_key(sync_key, "overwrite")
return pytree_repr
else:
if (root.exists() and len(list(root.iterdir())) > 0): # not empty
raise ValueError(f"Files already exist at path: `{root}`, but you"
f" specified `{overwrite=}`")
return pytree_repr
def _prepare_directory(root: str | PathLike[str], overwrite: bool,
pytreedef_repr: dict[str, Any], distinct_locations: bool,
sync_key: str | None):
"""Prepare the directory: check destination, potentially read existing data
and overwrite.
Raises:
RuntimeError: If the destination directory cannot be created.
"""
root = _norm_path(root)
# prepare the destination directory, overwrite destination directory or error
pytreedef_repr = _set_up_destination(
root, overwrite, pytreedef_repr, distinct_locations, sync_key)
if not _is_remote_path(root) and (distinct_locations
or jax.process_index() == 0):
root.mkdir(exist_ok=True) # do not make parents, that's too much
if not root.exists() or not root.is_dir():
raise RuntimeError(f"Could not create destination directory at {root}")
_sync_on_key(sync_key, "mkdir")
return pytreedef_repr
def _write_arrays(array_store_path: Any, arrs: list[Any],
arr_leaf_ids: list[int], ts_specs: list[Any | None],
distinct_locations: bool):
paths = [array_store_path / str(leaf_id) for leaf_id in arr_leaf_ids]
process_idx = None
if not distinct_locations and jax.process_count() > 1:
process_idx = jax.process_index()
default_ts_specs = [ts_impl.get_tensorstore_spec(path, ocdbt=_USE_OCDBT,
process_idx=process_idx,
arr=arr)
for (path, arr) in zip(paths, arrs)]
ts_specs = [ts_impl.merge_nested_ts_specs(default_ts_spec, ts_spec)
for (default_ts_spec, ts_spec) in zip(default_ts_specs, ts_specs)]
# sanity check the ts specs
if len(ts_specs) > 0: # verify the base path is shared for all arrays
expected_path = ts_specs[0]["kvstore"]["base"]["path"] # shared base path
for ts_spec, arr in zip(ts_specs, arrs):
ts_impl.verify_tensorstore_spec(ts_spec, arr, expected_path,
ocdbt=_USE_OCDBT, check_metadata=True)
async def _serialize_arrays():
await asyncio.gather(*[
ts_impl.async_serialize(arr, ts_spec, primary_host=None)
for (arr, ts_spec) in zip(arrs, ts_specs)])
asyncio.run(_serialize_arrays())
def _finalize_array_store(kvstore_path, distinct_locations: bool):
"""When multiple processes are writing, they must write to a per-process
location followed by combining them via no-copy links to the final location.
"""
# only in multiprocess case and only process 0
if distinct_locations or jax.process_count() == 1 or jax.process_index() != 0:
return
dummy_key_path = os.path.join(kvstore_path, "dummy_key")
combined_kvstore = ts_impl.get_tensorstore_spec(
dummy_key_path, ocdbt=True, process_idx=None)["kvstore"]
children_kvstores = [ts_impl.get_tensorstore_spec(
dummy_key_path, ocdbt=True, process_idx=i)["kvstore"]
for i in range(jax.process_count())]
_ = combined_kvstore.pop("path")
_ = [kvstore.pop("path") for kvstore in children_kvstores]
asyncio.run(ts_impl.combine_kvstores(combined_kvstore, children_kvstores))
def _write_pytreedef(directory: Any, pytree_repr: dict[str, Any],
distinct_locations: bool):
"""Write the pytreedef to the destination directory and aux data to the archive."""
if not (jax.process_index() == 0 or distinct_locations):
return
root = _norm_path(directory)
(root / _PYTREEDEF_FILE).write_text(json.dumps(pytree_repr, indent=2))
def _tree_broadcast(a, b, is_leaf=lambda x: x is None):
"""Broadcast the prefix tree `a` to the full tree `b`
Uses `flatten_axes` for better error messages on mismatched arity but allowing
for custom is_leaf in the `a` and `b` trees.
"""
a_leaves, a_struct = jax.tree.flatten(a, is_leaf=is_leaf)
a_idx2leaf_map = dict(enumerate(a_leaves))
a_idx = jax.tree.unflatten(a_struct, a_idx2leaf_map.keys())
a_idx_broadcast = flatten_axes("tree_broadcast",
jax.tree.structure(b, is_leaf=is_leaf), a_idx)
return jax.tree.map(lambda i: a_idx2leaf_map[i], a_idx_broadcast)
_serialization_executor = ThreadPoolExecutor(max_workers=_MAX_CONCURRENCY)
def save(data: PyTreeT, directory: str | PathLike[str], *,
overwrite: bool = True, ts_specs: PyTreeT | None = None) -> None:
"""Saves the given data structure to the provided directory path.
This function provides functionality to serialize and save a data structure
comprising JAX arrays, along with its structure to a given directory. It
leverages `PyTree` for flattening and reconstructing the data structure.
This is a simple experimental array serialization API, for anything more
complex and for all checkpointing prefer: https://github.com/google/orbax
Args:
data: The data structure to be saved. Arbitrary composition of JAX arrays,
including nested structures.
directory: The directory path where the data will be saved. A local path or
a remote URL (e.g., gs://, s3://). For remote URLs, `etils` is required.
overwrite: If True, any existing directory with the same name will be
overwritten.
ts_specs: Optional tensorstore specs to use for serialization. If None,
defaults to using the default tensorstore specs.
Example:
>>> data = {"a": jnp.array([1, 2]), "b": None}
>>> save(data, directory)
"""
with _THREADING_SAVE_LOCK:
return _save(data, directory, overwrite=overwrite, ts_specs=ts_specs)
def _save(data: PyTreeT, directory: str | PathLike[str], *,
overwrite: bool = True, ts_specs: PyTreeT | None = None) -> None:
sync_key = _get_unique_sync_key() # get a synchronization key for multi-host
if _is_remote_path(directory) and not pathlib.epath_installed:
raise RuntimeError("For saving to remote URLs (e.g., gs, s3) you need the"
" `etils` module installed. You can install it using"
" `pip install etils`.")
ts_specs = _tree_broadcast(ts_specs, data,
is_leaf=ts_impl.is_tensorstore_spec_leaf)
data_flat, pytreedef = jax.tree.flatten(data, is_leaf=lambda x: x is None)
if not all(x is None or _is_array_like(x) for x in data_flat):
raise ValueError("For serialization, all leaves must be either None or"
" jax.Array-like objects.")
distinct_locations = not _is_str_same_on_all_hosts(directory)
if jax.process_count() > 1 and distinct_locations:
raise ValueError(
"Saving to different locations on different hosts is not supported,"
" because it is extremely fragile. Consider using a single location.")
root = _norm_path(directory)
# 1. serialize the pytree #################################
pytreedef_repr = utils.serialize_pytreedef(pytreedef)
pytreedef_repr[utils._LEAF_IDS_KEY] = jax.tree.map(_leaf_to_desc, data_flat)
pytreedef_repr = _prepare_directory(
root, overwrite, pytreedef_repr, distinct_locations, sync_key)
futures = []
futures.append(_serialization_executor.submit(
_write_pytreedef, root, pytreedef_repr, distinct_locations))
# 2. serialize arrays #####################################
array_store_path = root / _ARRAY_STORE_DIRNAME
arrs = [data for data in data_flat if _is_array_like(data)]
arr_leaf_ids = [i for i, data in enumerate(data_flat) if _is_array_like(data)]
ts_specs_flat = jax.tree.leaves(ts_specs,
is_leaf=ts_impl.is_tensorstore_spec_leaf)
ts_specs_flat = [ts_specs_flat[i] for i in arr_leaf_ids]
futures.append(_serialization_executor.submit(
_write_arrays, array_store_path, arrs, arr_leaf_ids, ts_specs_flat,
distinct_locations))
# 3. wait for all futures to complete #####################
_ = [fut.result() for fut in futures]
_sync_on_key(sync_key, "array_serialization")
# 4. finalize the array writing ###########################
if len(arr_leaf_ids) > 0 and _USE_OCDBT:
_serialization_executor.submit( # call from a thread to not nest asyncio
_finalize_array_store, array_store_path, distinct_locations).result()
# we are done with all async ops here, we can block ####
_sync_on_key(sync_key, "end")
def _read_arrays(array_store_path: str | PathLike[str], arr_leaf_ids: list[int],
ts_specs: list[Any], shardings: list[Any]):
# array_store_path = root / _LEAF_DATA_DIR / _ARRAY_STORE_DIRNAME
arr_store_path = _norm_path(array_store_path)
arr_paths = [arr_store_path / str(leaf_id) for leaf_id in arr_leaf_ids]
# byte limiter to limit number of parallel reads, resizes to largest read
byte_limiter = ts_impl._LimitInFlightBytes(10 * 1024 ** 3) # 10 GB
default_ts_specs = [ts_impl.get_tensorstore_spec(path, ocdbt=_USE_OCDBT,
process_idx=None)
for path in arr_paths]
ts_specs = [ts_impl.merge_nested_ts_specs(default_ts_spec, ts_spec)
for (default_ts_spec, ts_spec) in zip(default_ts_specs, ts_specs)]
if len(ts_specs) > 0: # verify the base path is shared for all arrays
expected_path = ts_specs[0]["kvstore"]["base"]["path"] # shared base path
for ts_spec in ts_specs:
ts_impl.verify_tensorstore_spec(ts_spec, arr=None, path=expected_path,
ocdbt=_USE_OCDBT, check_metadata=False)
async def _deserialize_arrays():
return await asyncio.gather(*[
ts_impl.async_deserialize(sharding, ts_spec, byte_limiter=byte_limiter)
for (sharding, ts_spec) in zip(shardings, ts_specs)])
return dict(zip(arr_leaf_ids, asyncio.run(_deserialize_arrays())))
def load_pytreedef(directory: str | PathLike[str]) -> PyTreeT:
"""Loads a pytree from the given directory.
This is a simple experimental array serialization API, for anything more
complex and for all checkpointing prefer: https://github.com/google/orbax
Args:
directory: Directory path to load from.
Returns:
The loaded pytree with arrays represented as jax.ShapeDtypeStruct's.
"""
assert not _is_remote_path(directory) or pathlib.epath_installed, (
"For checkpointing using remote URLs (e.g., gs, s3) you need `etils`"
" module installed. You can install it using `pip install etils`.")
json_content = (_norm_path(directory) / _PYTREEDEF_FILE).read_text()
raw_tree = json.loads(json_content)
leaves = map(_desc_to_leaf, raw_tree[utils._LEAF_IDS_KEY])
return jax.tree.unflatten(utils.deserialize_pytreedef(raw_tree), leaves)
def load(directory: str | PathLike[str], shardings: PyTreeT, *,
mask: PyTreeT | None = None, ts_specs: PyTreeT | None = None
) -> PyTreeT:
"""Loads and reconstructs a data structure from a directory.
This is a simple experimental array serialization API, for anything more
complex and for all checkpointing prefer: https://github.com/google/orbax
Args:
directory: Directory path where the data is stored.
shardings: Sharding strategy for array objects, either a Sharding or a
ShapeDtypeStruct with a Sharding/Format.
mask: boolean prefix tree for partial loading, will return None for False
leaves.
ts_specs: Optional tensorstore specs to use for deserialization. If None,
defaults to using the default tensorstore specs.
Returns:
Reconstructed data.
Example:
>>> save(data, directory)
>>> restored_data = load(directory, SingleDeviceSharding(jax.devices()[0]))
"""
assert not _is_remote_path(directory) or pathlib.epath_installed, (
"For checkpointing using remote URLs (e.g., gs, s3) you need `etils`"
" module installed. You can install it using `pip install etils`.")
root = _norm_path(directory)
assert root.is_dir(), f"Checkpoint directory {root} does not exist"
is_leaf = lambda x: x is None
# deserialize PyTreeDef
pytree = load_pytreedef(directory)
# broadcast the (prefix) shardings and tensorstore specs to the full pytree
shardings = _tree_broadcast(shardings, pytree)
ts_specs = _tree_broadcast(ts_specs, pytree,
is_leaf=ts_impl.is_tensorstore_spec_leaf)
if mask is not None:
_prefix_mask = lambda m, x: jax.tree.map(lambda _: None, x) if not m else x
pytree = jax.tree.map(_prefix_mask, mask, pytree)
pytreedef = jax.tree.structure(pytree, is_leaf=is_leaf)
leaf_ids_flat = jax.tree.leaves(pytree, is_leaf=is_leaf)
shardings_flat = jax.tree.leaves(shardings, is_leaf=is_leaf)
if any(isinstance(shardings, Format) for shardings in shardings_flat):
raise NotImplementedError(
"Deserialization with `Format` instead of `Sharding` is not currently"
" supported. Pass ShapeDtypeStruct(shape, dtype, sharding=format)"
" instead.")
ts_specs_flat = jax.tree.leaves(ts_specs,
is_leaf=ts_impl.is_tensorstore_spec_leaf)
# deserialize array objects
arr_leaf_ids = [i for i, leaf_id in enumerate(leaf_ids_flat)
if leaf_id is not None]
shardings_flat = [shardings_flat[i] for i in arr_leaf_ids]
ts_specs_flat = [ts_specs_flat[i] for i in arr_leaf_ids]
arrs_fut = _serialization_executor.submit(
_read_arrays, root / _ARRAY_STORE_DIRNAME, arr_leaf_ids, ts_specs_flat,
shardings_flat)
arrs = arrs_fut.result()
filled_values = [arrs.get(i, None) for i, _ in enumerate(leaf_ids_flat)]
return jax.tree.unflatten(pytreedef, filled_values)
def nonblocking_save(data: PyTreeT, directory: str | PathLike[str], *,
overwrite: bool = True, ts_specs: PyTreeT | None = None
) -> utils.PyTreeFuture:
"""Nonblocking alias of save, return an awaitable future with a pytree stub.
This is a simple experimental array serialization API, for anything more
complex and for all checkpointing prefer: https://github.com/google/orbax
Examples:
>>> fut = nonblocking_save(data, directory)
>>> print(fut.pytree) # a pytree of jax.ShapeDtypeStruct's
>>> print(fut.result()) # None, blocking until the serialization is done
"""
# start serialization immediately
fut = utils.PyTreeFuture(_serialization_executor.submit(
save, data, directory, overwrite=overwrite, ts_specs=ts_specs))
# construct a nice looking pytree representing the nodes being read
fut.pytree = jax.tree.map(lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype)
if _is_array_like(x) else x, data)
return fut
def nonblocking_load(directory: str | PathLike[str], shardings: PyTreeT, *,
mask: PyTreeT | None = None,
ts_specs: PyTreeT | None = None) -> utils.PyTreeFuture:
"""Nonblocking alias of load, return an awaitable future with a pytree stub.
This is a simple experimental array serialization API, for anything more
complex and for all checkpointing prefer: https://github.com/google/orbax
Examples:
>>> fut = nonblocking_load(directory)
>>> print(fut.pytree) # a pytree of jax.ShapeDtypeStruct
>>> print(fut.result()) # the fully populated pytree
"""
# TODO(rdyro): the awaitable future output is a workaround
# it should return the fully populated pytree instead of just
# jax.ShapeDtypeStruct for arrays by constructing them asynchronously
fut = utils.PyTreeFuture(_serialization_executor.submit(
load, directory, shardings, mask=mask, ts_specs=ts_specs))
fut.pytree = load_pytreedef(directory)
return fut
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/experimental/array_serialization/pytree_serialization.py",
"license": "Apache License 2.0",
"lines": 419,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/experimental/array_serialization/pytree_serialization_utils.py | # Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for representing pytreedefs in a serializable format.
"""
import base64
import logging
from types import ModuleType
from concurrent.futures import Future
from typing import Any, TypeVar
import jax
from jax._src.export.serialization import (flatbuffers, _serialize_pytreedef,
_deserialize_pytreedef_to_pytree,
ser_flatbuf)
from jax.export import register_pytree_node_serialization # pylint: disable=unused-import
T = TypeVar("T")
PickleModule = ModuleType
logger = logging.getLogger(__name__)
_READABLE_PYTREE_SERIALIZATION = True
_TREE_REPR_KEY = "__jax_pytreedef_repr"
_LEAF_IDS_KEY = "__jax_leaf_ids"
_NOT_REGISTERED_MESSAGE = (
" * If you want to register a custom leaf, register it via"
" `register_pytree_leaf_serialization` first.\n"
" * If you want to register a custom node, register is via"
" `register_pytree_node_serialization`")
__all__ = ["serialize_pytreedef", "deserialize_pytreedef",
"register_pytree_node_serialization"]
class PyTreeFuture(Future[Any]):
"""A wrapper around a Future that makes it look like an async function."""
def __init__(self, future: Future[Any]):
self._future, self.pytree = future, None
def done(self):
return self._future.done()
def result(self, *args, **kw):
return self._future.result(*args, **kw)
def __await__(self):
while not self.done():
yield
return self.result()
def __repr__(self):
return f"PyTreeFuture(done={self.done()}, pytree={self.pytree})"
def serialize_pytreedef(node) -> dict[str, Any]:
builder = flatbuffers.Builder(65536)
exported = _serialize_pytreedef(builder, node)
builder.Finish(exported)
root_repr = base64.b64encode(builder.Output()).decode("utf-8")
leaf_count = node.num_leaves
pytree_repr = {_TREE_REPR_KEY: root_repr,
_LEAF_IDS_KEY: list(range(leaf_count))}
return pytree_repr
def deserialize_pytreedef(pytreedef_repr: dict[str, Any]):
buf = base64.b64decode(pytreedef_repr[_TREE_REPR_KEY])
exp = ser_flatbuf.PyTreeDef.GetRootAs(buf)
treestruct = jax.tree.structure(_deserialize_pytreedef_to_pytree(exp))
return treestruct
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/experimental/array_serialization/pytree_serialization_utils.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/experimental/buffer_callback.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jax._src.buffer_callback import (
Buffer as Buffer,
ExecutionContext as ExecutionContext,
ExecutionStage as ExecutionStage,
buffer_callback as buffer_callback,
)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/experimental/buffer_callback.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/experimental/fused.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jax._src import core
from jax._src import linear_util as lu
from jax._src import dispatch
from jax._src.core import typeof
from jax._src.tree_util import tree_flatten, tree_unflatten
from jax._src.util import safe_map, safe_zip, weakref_lru_cache, unzip2
from jax._src.api_util import debug_info, flatten_fun_nokwargs
from jax._src.interpreters import ad
from jax._src.interpreters import batching
from jax._src.interpreters import mlir
from jax._src.interpreters import partial_eval as pe
from jax._src.lib.mlir import ir
map, unsafe_map = safe_map, map
zip, unsafe_zip = safe_zip, zip
def fused(*, out_spaces):
def wrap(f):
def wrapped(*args):
dbg = debug_info('fused', f, args, {})
args_flat, in_tree = tree_flatten(args)
in_avals = [typeof(x).update(memory_space=core.MemorySpace.Any)
for x in args_flat]
jaxpr, out_tree = _trace_to_jaxpr(f, in_tree, tuple(in_avals), dbg)
outs_flat = fused_p.bind(*args_flat, jaxpr=jaxpr, out_spaces=out_spaces)
return tree_unflatten(out_tree, outs_flat)
return wrapped
return wrap
@weakref_lru_cache
def _trace_to_jaxpr(fun, in_tree, in_avals, dbg):
f = lu.wrap_init(fun, debug_info=dbg)
f, out_tree = flatten_fun_nokwargs(f, in_tree)
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(f, in_avals)
return core.ClosedJaxpr(jaxpr, consts), out_tree()
fused_p = core.Primitive('fused_call')
fused_p.multiple_results = True
@fused_p.def_abstract_eval
def _fused_abstract_eval(*in_avals, out_spaces, jaxpr):
return [a.update(memory_space=s)
for a, s in zip(jaxpr.out_avals, out_spaces)]
dispatch.simple_impl(fused_p)
def _fused_lowering(ctx, *args, out_spaces, jaxpr):
const_args_and_avals = core.jaxpr_const_args(jaxpr.jaxpr)
const_args, const_arg_avals = unzip2(const_args_and_avals)
const_arg_values = [
mlir.ir_constant(c, const_lowering=ctx.const_lowering, aval=aval)
for c, aval in const_args_and_avals]
in_avals = [*const_arg_avals, *ctx.avals_in]
func_op, _, _ = mlir.lower_called_computation(
"fused", jaxpr, ctx.module_context, len(const_args), in_avals,
ctx.avals_out, ctx.tokens_in)
out_spaces_ = [ir.StringAttr.get(str(s)) for s in out_spaces]
fused = mlir.custom_call(
"fused",
result_types=func_op.type.results,
operands=mlir.flatten_ir_values([*const_arg_values, *args]),
called_computations=[func_op.name.value],
backend_config=dict(out_spaces=ir.ArrayAttr.get(out_spaces_),
inlineable=ir.BoolAttr.get(False),
MUST_FUSE=ir.BoolAttr.get(True)),
)
return fused.results
mlir.register_lowering(fused_p, _fused_lowering, platform="cuda")
def _fused_batcher(axis_data, vals_in, dims_in, *, jaxpr, out_spaces):
batched_jaxpr, dims_out = batching.batch_jaxpr2(jaxpr, axis_data, dims_in)
outs = fused_p.bind(*vals_in, jaxpr=batched_jaxpr, out_spaces=out_spaces)
return outs, dims_out
batching.fancy_primitive_batchers[fused_p] = _fused_batcher
def _fused_jvp(primals, tangents, *, jaxpr, out_spaces):
nzs = [not isinstance(t, ad.Zero) for t in tangents]
jaxpr_jvp, out_nzs = ad.jvp_jaxpr(jaxpr, nzs, False)
nz_tangents = [t for t in tangents if not isinstance(t, ad.Zero)]
spaces_jvp = (*out_spaces, *[s for s, nz in zip(out_spaces, out_nzs) if nz])
outs = fused_p.bind(*primals, *nz_tangents, jaxpr=jaxpr_jvp,
out_spaces=spaces_jvp)
primals_out, nz_tangents_out = outs[:len(out_nzs)], outs[len(out_nzs):]
nz_outs = iter(nz_tangents_out)
tangents_out = [next(nz_outs) if nz else ad.Zero(aval.to_tangent_aval())
for aval, nz in zip(jaxpr.out_avals, out_nzs)]
assert next(nz_outs, None) is None
return primals_out, tangents_out
ad.primitive_jvps[fused_p] = _fused_jvp
def _fused_lin(_is_vjp, nzs, *primals, jaxpr, out_spaces):
# TODO(mattjj): why did i do jvp + dce here, not ad.linearize_jaxpr?
jaxpr_jvp, out_nzs = ad.jvp_jaxpr(jaxpr, nzs, False)
lin_outs = [False] * len(out_nzs) + [True] * sum(out_nzs)
jaxpr_lin_, used_inputs = pe.dce_jaxpr(jaxpr_jvp.jaxpr, lin_outs, False)
jaxpr_lin = pe.close_jaxpr(jaxpr_lin_)
spaces_lin = tuple(s for s, nz in zip(out_spaces, out_nzs) if nz)
primals_out = fused_p.bind(*primals, jaxpr=jaxpr, out_spaces=out_spaces)
tangent_avals_out = [a.to_tangent_aval() for a in jaxpr.out_avals]
def fused_lin(primals, *tangents):
nz_tangents = [t for t in tangents if not isinstance(t, ad.Zero)]
inputs = [x for x, u in zip([*primals, *nz_tangents], used_inputs) if u]
nz_outs = fused_p.bind(*inputs, jaxpr=jaxpr_lin, out_spaces=spaces_lin)
nz_outs_ = iter(nz_outs)
outs = [next(nz_outs_) if nz else ad.Zero(a)
for nz, a in zip(out_nzs, tangent_avals_out)]
assert next(nz_outs_, None) is None
return outs
return primals_out, out_nzs, primals, fused_lin
ad.primitive_linearizations[fused_p] = _fused_lin
def _fused_transpose(cts_in, *primals_in, jaxpr, out_spaces):
in_flat, in_tree = tree_flatten((primals_in, cts_in))
in_avals = [typeof(x).update(memory_space=core.MemorySpace.Any)
for x in in_flat]
trans_jaxpr, out_tree = _transpose_jaxpr(jaxpr, in_tree, (*in_avals,))
in_spaces = [x.aval.memory_space if isinstance(x, ad.UndefinedPrimal)
else typeof(x).memory_space for x in primals_in]
cts_out_ = tree_unflatten(out_tree, trans_jaxpr.out_avals)
trans_spaces = tuple(s for x, s in zip(cts_out_, in_spaces) if x)
cts_out = fused_p.bind(*in_flat, jaxpr=trans_jaxpr, out_spaces=trans_spaces)
return tree_unflatten(out_tree, cts_out)
@weakref_lru_cache
def _transpose_jaxpr(jaxpr, in_tree, in_avals):
cell = lambda: None
def transposed(*in_flat):
primals_in, cts_in = tree_unflatten(in_tree, in_flat)
primals_in = tuple(
ad.UndefinedPrimal(p.aval.update(memory_space=core.MemorySpace.Any))
if type(p) is ad.UndefinedPrimal else p for p in primals_in)
cts_in = [ad.Zero(ct.aval.update(memory_space=core.MemorySpace.Any))
if type(ct) is ad.Zero else ct for ct in cts_in]
out = ad.backward_pass(jaxpr.jaxpr, False, jaxpr.consts, primals_in, cts_in)
out = [ct if not isinstance(ct, ad.Zero) else None for ct in out]
cts_out, cell.out_tree = tree_flatten(out) # type: ignore
return cts_out
dbg = jaxpr.jaxpr.debug_info.with_unknown_names()
trans_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(transposed, debug_info=dbg), in_avals)
return core.ClosedJaxpr(trans_jaxpr, consts), cell.out_tree # type: ignore
ad.primitive_transposes[fused_p] = _fused_transpose
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/experimental/fused.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/experimental/hijax.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ruff: noqa
from jax._src.ad_util import (
Zero as Zero,
)
from jax._src.core import (
AbstractValue as AbstractValue,
AvalQDD as AvalQDD,
ShapedArray as ShapedArray,
aval_method as aval_method,
aval_property as aval_property,
AvalMutableQDD as AvalMutableQDD,
)
from jax._src.interpreters.ad import (
instantiate_zeros as instantiate_zeros,
is_undefined_primal as is_undefined_primal,
)
from jax._src.effects import (
control_flow_allowed_effects as control_flow_allowed_effects,
)
from jax._src.hijax import (
HiPrimitive as HiPrimitive,
HipSpec as HipSpec,
HiType as HiType,
MutableHiType as MutableHiType,
VJPHiPrimitive as VJPHiPrimitive,
register_hitype as register_hitype,
)
from jax._src.state import (
AbstractRef as AbstractRef,
TransformedRef as TransformedRef
)
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/experimental/hijax.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
jax-ml/jax:jax/experimental/jax2tf/tests/multiprocess/jax2tf_multiprocess_test.py | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multihost test for JAX2TF."""
import jax
from jax import numpy as jnp
from jax._src import pjit
from jax._src import test_multiprocess as jt_multiprocess
from jax._src import test_util as jtu
from jax.experimental import multihost_utils
from jax.sharding import PartitionSpec as P
import unittest
import warnings
try:
# TODO(b/470156950): Remove this once a proper fix is in place
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
category=FutureWarning,
message=".*np.object.*")
import tensorflow as tf
from jax.experimental import jax2tf
from jax.experimental.jax2tf.tests import tf_test_util
JaxToTfTestCase = tf_test_util.JaxToTfTestCase
except ImportError:
tf = None
jax2tf = None # type: ignore[assignment]
tf_test_util = None # type: ignore[assignment]
JaxToTfTestCase = jtu.JaxTestCase # type: ignore[misc]
@unittest.skipIf(tf is None, "Test requires tensorflow.")
class Jax2TfMultiProcessTest(JaxToTfTestCase, jt_multiprocess.MultiProcessTest):
def test_multi_process_pjit_export(self):
"""Pjitted function can be exported."""
key_w, key_x = jax.random.split(jax.random.PRNGKey(1234), 2)
w = jax.random.uniform(key_w, [16, 16], dtype=jnp.float32)
x = jax.random.uniform(key_x, [16, 1], dtype=jnp.float32)
with jtu.create_mesh((4, 2), ("x", "y")):
pjit_matmul = pjit.pjit(jnp.matmul, in_shardings=(P("x", "y"), None))
jax_result = multihost_utils.process_allgather(
pjit_matmul(w, x), tiled=True)
tf_model = tf.Module()
tf_model.w = tf.Variable(w)
tf_closure = tf.function(
lambda x: {"y": jax2tf.convert(pjit_matmul)(tf_model.w, x)},
autograph=False,
).get_concrete_function(
tf.TensorSpec.from_tensor(tf.constant(x), name="x")
)
if jax.process_index() == 0:
export_dir = self.create_tempdir().full_path
tf.saved_model.save(
tf_model,
export_dir,
signatures={"serving_default": tf_closure},
)
loaded = tf.saved_model.load(export_dir)
tf_result = loaded.signatures["serving_default"](x=x)["y"]
self.assertAllClose(tf_result.numpy(), jax_result)
if __name__ == "__main__":
jt_multiprocess.main()
| {
"repo_id": "jax-ml/jax",
"file_path": "jax/experimental/jax2tf/tests/multiprocess/jax2tf_multiprocess_test.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.