language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
walkccc__LeetCode
solutions/2223. Sum of Scores of Built Strings/2223.py
{ "start": 0, "end": 465 }
class ____: def sumScores(self, s: str) -> int: n = len(s) # https://cp-algorithms.com/string/z-function.html#implementation z = [0] * n # [l, r] := the indices of the rightmost segment match l = 0 r = 0 for i in range(1, n): if i < r: z[i] = min(r - i, z[i - l]) while i + z[i] < n and s[z[i]] == s[i + z[i]]: z[i] += 1 if i + z[i] > r: l = i r = i + z[i] return sum(z) + n
Solution
python
getsentry__sentry
src/sentry/digests/types.py
{ "start": 1401, "end": 1536 }
class ____(NamedTuple): event: Event | GroupEvent rules: list[Rule] notification_uuid: str | None
NotificationWithRuleObjects
python
kamyu104__LeetCode-Solutions
Python/smallest-missing-genetic-value-in-each-subtree.py
{ "start": 29, "end": 1035 }
class ____(object): def smallestMissingValueSubtree(self, parents, nums): """ :type parents: List[int] :type nums: List[int] :rtype: List[int] """ def iter_dfs(adj, nums, i, lookup): stk = [i] while stk: i = stk.pop() if nums[i] in lookup: continue lookup.add(nums[i]) for j in adj[i]: stk.append(j) result = [1]*len(parents) i = next((i for i in xrange(len(nums)) if nums[i] == 1), -1) if i == -1: return result adj = [[] for _ in xrange(len(parents))] for j in xrange(1, len(parents)): adj[parents[j]].append(j) lookup = set() miss = 1 while i >= 0: iter_dfs(adj, nums, i, lookup) while miss in lookup: miss += 1 result[i] = miss i = parents[i] return result
Solution
python
pydantic__pydantic
tests/test_validate_call.py
{ "start": 38495, "end": 38853 }
class ____(BaseModel): @classmethod @validate_call(config={'defer_build': True}) def cls_meth(cls, x: int) -> 'DeferBuildClass': return DeferBuildClass() def test_validate_call_defer_build() -> None: DeferBuildClass.cls_meth(x=1) with pytest.raises(ValidationError): DeferBuildClass.cls_meth(x='not_an_int')
DeferBuildClass
python
ray-project__ray
python/ray/data/tests/unit/test_datatype.py
{ "start": 31461, "end": 32551 }
class ____: """Test that pattern-matching types cannot be converted to concrete NumPy dtypes.""" @pytest.mark.parametrize( "pattern_type_factory", [ lambda: DataType.list(), lambda: DataType.large_list(), lambda: DataType.struct(), lambda: DataType.map(), lambda: DataType.tensor(), lambda: DataType.variable_shaped_tensor(), lambda: DataType.temporal(), ], ) def test_pattern_matching_to_numpy_dtype_raises(self, pattern_type_factory): """Test that calling to_numpy_dtype on pattern-matching types raises an error. Pattern-matching types represent abstract type categories (e.g., "any list") and cannot be converted to concrete NumPy dtypes. """ dt = pattern_type_factory() assert dt.is_pattern_matching() with pytest.raises(ValueError, match="Cannot convert pattern-matching type"): dt.to_numpy_dtype() if __name__ == "__main__": pytest.main(["-v", __file__])
TestPatternMatchingToNumpyDtype
python
apache__airflow
providers/snowflake/tests/unit/snowflake/hooks/test_snowflake.py
{ "start": 3511, "end": 49275 }
class ____: @pytest.mark.parametrize( ("connection_kwargs", "expected_uri", "expected_conn_params"), [ ( BASE_CONNECTION_KWARGS, ( "snowflake://user:pw@airflow.af_region/db/public?" "application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh" ), { "account": "airflow", "application": "AIRFLOW", "authenticator": "snowflake", "database": "db", "password": "pw", "region": "af_region", "role": "af_role", "schema": "public", "session_parameters": None, "user": "user", "warehouse": "af_wh", }, ), ( { **BASE_CONNECTION_KWARGS, "extra": { "extra__snowflake__database": "db", "extra__snowflake__account": "airflow", "extra__snowflake__warehouse": "af_wh", "extra__snowflake__region": "af_region", "extra__snowflake__role": "af_role", }, }, ( "snowflake://user:pw@airflow.af_region/db/public?" "application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh" ), { "account": "airflow", "application": "AIRFLOW", "authenticator": "snowflake", "database": "db", "password": "pw", "region": "af_region", "role": "af_role", "schema": "public", "session_parameters": None, "user": "user", "warehouse": "af_wh", }, ), ( { **BASE_CONNECTION_KWARGS, "extra": { "extra__snowflake__database": "db", "extra__snowflake__account": "airflow", "extra__snowflake__warehouse": "af_wh", "extra__snowflake__region": "af_region", "extra__snowflake__role": "af_role", "extra__snowflake__insecure_mode": "True", "extra__snowflake__json_result_force_utf8_decoding": "True", "extra__snowflake__client_request_mfa_token": "True", "extra__snowflake__client_store_temporary_credential": "True", }, }, ( "snowflake://user:pw@airflow.af_region/db/public?" "application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh" ), { "account": "airflow", "application": "AIRFLOW", "authenticator": "snowflake", "database": "db", "password": "pw", "region": "af_region", "role": "af_role", "schema": "public", "session_parameters": None, "user": "user", "warehouse": "af_wh", "insecure_mode": True, "json_result_force_utf8_decoding": True, "client_request_mfa_token": True, "client_store_temporary_credential": True, }, ), ( { **BASE_CONNECTION_KWARGS, "extra": { "extra__snowflake__database": "db", "extra__snowflake__account": "airflow", "extra__snowflake__warehouse": "af_wh", "extra__snowflake__region": "af_region", "extra__snowflake__role": "af_role", "extra__snowflake__insecure_mode": "False", "extra__snowflake__json_result_force_utf8_decoding": "False", "extra__snowflake__client_request_mfa_token": "False", "extra__snowflake__client_store_temporary_credential": "False", }, }, ( "snowflake://user:pw@airflow.af_region/db/public?" "application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh" ), { "account": "airflow", "application": "AIRFLOW", "authenticator": "snowflake", "database": "db", "password": "pw", "region": "af_region", "role": "af_role", "schema": "public", "session_parameters": None, "user": "user", "warehouse": "af_wh", }, ), ( { **BASE_CONNECTION_KWARGS, "extra": { **BASE_CONNECTION_KWARGS["extra"], "region": "", }, }, ( "snowflake://user:pw@airflow/db/public?" "application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh" ), { "account": "airflow", "application": "AIRFLOW", "authenticator": "snowflake", "database": "db", "password": "pw", "region": "", "role": "af_role", "schema": "public", "session_parameters": None, "user": "user", "warehouse": "af_wh", }, ), ( { **BASE_CONNECTION_KWARGS, "password": ";/?:@&=+$, ", }, ( "snowflake://user:;%2F?%3A%40&=+$, @airflow.af_region/db/public?" "application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh" ), { "account": "airflow", "application": "AIRFLOW", "authenticator": "snowflake", "database": "db", "password": ";/?:@&=+$, ", "region": "af_region", "role": "af_role", "schema": "public", "session_parameters": None, "user": "user", "warehouse": "af_wh", }, ), ( { **BASE_CONNECTION_KWARGS, "extra": { **BASE_CONNECTION_KWARGS["extra"], "extra__snowflake__insecure_mode": False, "extra__snowflake__json_result_force_utf8_decoding": True, "extra__snowflake__client_request_mfa_token": False, "extra__snowflake__client_store_temporary_credential": False, }, }, ( "snowflake://user:pw@airflow.af_region/db/public?" "application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh" ), { "account": "airflow", "application": "AIRFLOW", "authenticator": "snowflake", "database": "db", "password": "pw", "region": "af_region", "role": "af_role", "schema": "public", "session_parameters": None, "user": "user", "warehouse": "af_wh", "json_result_force_utf8_decoding": True, }, ), ( { **BASE_CONNECTION_KWARGS, "extra": { **BASE_CONNECTION_KWARGS["extra"], "ocsp_fail_open": True, }, }, ( "snowflake://user:pw@airflow.af_region/db/public?" "application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh" ), { "account": "airflow", "application": "AIRFLOW", "authenticator": "snowflake", "database": "db", "password": "pw", "region": "af_region", "role": "af_role", "schema": "public", "session_parameters": None, "user": "user", "warehouse": "af_wh", "ocsp_fail_open": True, }, ), ( { **BASE_CONNECTION_KWARGS, "extra": { **BASE_CONNECTION_KWARGS["extra"], "ocsp_fail_open": False, }, }, ( "snowflake://user:pw@airflow.af_region/db/public?" "application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh" ), { "account": "airflow", "application": "AIRFLOW", "authenticator": "snowflake", "database": "db", "password": "pw", "region": "af_region", "role": "af_role", "schema": "public", "session_parameters": None, "user": "user", "warehouse": "af_wh", "ocsp_fail_open": False, }, ), ], ) def test_hook_should_support_prepare_basic_conn_params_and_uri( self, connection_kwargs, expected_uri, expected_conn_params ): with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()): assert SnowflakeHook(snowflake_conn_id="test_conn").get_uri() == expected_uri assert SnowflakeHook(snowflake_conn_id="test_conn")._get_conn_params == expected_conn_params def test_get_conn_params_should_support_private_auth_in_connection( self, base64_encoded_encrypted_private_key: Path ): connection_kwargs: Any = { **BASE_CONNECTION_KWARGS, "password": _PASSWORD, "extra": { "database": "db", "account": "airflow", "warehouse": "af_wh", "region": "af_region", "role": "af_role", "private_key_content": base64_encoded_encrypted_private_key, }, } with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()): assert "private_key" in SnowflakeHook(snowflake_conn_id="test_conn")._get_conn_params @pytest.mark.parametrize("include_params", [True, False]) def test_hook_param_beats_extra(self, include_params): """When both hook params and extras are supplied, hook params should beat extras.""" hook_params = dict( account="account", warehouse="warehouse", database="database", region="region", role="role", authenticator="authenticator", session_parameters="session_parameters", ) extras = {k: f"{v}_extra" for k, v in hook_params.items()} with mock.patch.dict( "os.environ", AIRFLOW_CONN_TEST_CONN=Connection(conn_type="any", extra=json.dumps(extras)).get_uri(), ): assert hook_params != extras assert SnowflakeHook( snowflake_conn_id="test_conn", **(hook_params if include_params else {}) )._get_conn_params == { "user": None, "password": "", "application": "AIRFLOW", "schema": "", **(hook_params if include_params else extras), } @pytest.mark.parametrize("include_unprefixed", [True, False]) def test_extra_short_beats_long(self, include_unprefixed): """When both prefixed and unprefixed values are found in extra (e.g. extra__snowflake__account and account), we should prefer the short name.""" extras = dict( account="account", warehouse="warehouse", database="database", region="region", role="role", ) extras_prefixed = {f"extra__snowflake__{k}": f"{v}_prefixed" for k, v in extras.items()} with mock.patch.dict( "os.environ", AIRFLOW_CONN_TEST_CONN=Connection( conn_type="any", extra=json.dumps({**(extras if include_unprefixed else {}), **extras_prefixed}), ).get_uri(), ): assert list(extras.values()) != list(extras_prefixed.values()) assert SnowflakeHook(snowflake_conn_id="test_conn")._get_conn_params == { "user": None, "password": "", "application": "AIRFLOW", "schema": "", "authenticator": "snowflake", "session_parameters": None, **(extras if include_unprefixed else dict(zip(extras.keys(), extras_prefixed.values()))), } def test_get_conn_params_should_support_private_auth_with_encrypted_key( self, encrypted_temporary_private_key ): connection_kwargs = { **BASE_CONNECTION_KWARGS, "password": _PASSWORD, "extra": { "database": "db", "account": "airflow", "warehouse": "af_wh", "region": "af_region", "role": "af_role", "private_key_file": str(encrypted_temporary_private_key), }, } with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()): assert "private_key" in SnowflakeHook(snowflake_conn_id="test_conn")._get_conn_params def test_get_conn_params_should_support_private_auth_with_unencrypted_key( self, unencrypted_temporary_private_key ): connection_kwargs = { **BASE_CONNECTION_KWARGS, "password": None, "extra": { "database": "db", "account": "airflow", "warehouse": "af_wh", "region": "af_region", "role": "af_role", "private_key_file": str(unencrypted_temporary_private_key), }, } with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()): assert "private_key" in SnowflakeHook(snowflake_conn_id="test_conn")._get_conn_params connection_kwargs["password"] = "" with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()): assert "private_key" in SnowflakeHook(snowflake_conn_id="test_conn")._get_conn_params connection_kwargs["password"] = _PASSWORD with ( mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()), pytest.raises(TypeError, match="Password was given but private key is not encrypted."), ): SnowflakeHook(snowflake_conn_id="test_conn")._get_conn_params def test_get_conn_params_should_fail_on_invalid_key(self): connection_kwargs = { **BASE_CONNECTION_KWARGS, "password": None, "extra": { "database": "db", "account": "airflow", "warehouse": "af_wh", "region": "af_region", "role": "af_role", "private_key_file": "/dev/urandom", }, } with ( mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()), pytest.raises(ValueError, match="The private_key_file path points to an empty or invalid file."), ): SnowflakeHook(snowflake_conn_id="test_conn").get_conn() @mock.patch("requests.post") @mock.patch( "airflow.providers.snowflake.hooks.snowflake.SnowflakeHook._get_conn_params", new_callable=PropertyMock, ) def test_get_conn_params_should_support_oauth(self, mock_get_conn_params, requests_post): requests_post.return_value = Mock( status_code=200, json=lambda: { "access_token": "supersecretaccesstoken", "expires_in": 600, "refresh_token": "secrettoken", "token_type": "Bearer", "username": "test_user", }, ) connection_kwargs = { **BASE_CONNECTION_KWARGS, "login": "test_client_id", "password": "test_client_secret", "extra": { "database": "db", "account": "airflow", "warehouse": "af_wh", "region": "af_region", "role": "af_role", "refresh_token": "secrettoken", "authenticator": "oauth", }, } mock_get_conn_params.return_value = connection_kwargs with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn_params = hook._get_conn_params conn_params_keys = conn_params.keys() conn_params_extra = conn_params.get("extra", {}) conn_params_extra_keys = conn_params_extra.keys() assert "authenticator" in conn_params_extra_keys assert conn_params_extra["authenticator"] == "oauth" assert "user" not in conn_params_keys assert "password" in conn_params_keys assert "refresh_token" in conn_params_extra_keys # Mandatory fields to generate account_identifier `https://<account>.<region>` assert "region" in conn_params_extra_keys assert "account" in conn_params_extra_keys @mock.patch("requests.post") @mock.patch( "airflow.providers.snowflake.hooks.snowflake.SnowflakeHook._get_conn_params", new_callable=PropertyMock, ) def test_get_conn_params_should_support_oauth_with_token_endpoint( self, mock_get_conn_params, requests_post ): requests_post.return_value = Mock( status_code=200, json=lambda: { "access_token": "supersecretaccesstoken", "expires_in": 600, "refresh_token": "secrettoken", "token_type": "Bearer", "username": "test_user", }, ) connection_kwargs = { **BASE_CONNECTION_KWARGS, "login": "test_client_id", "password": "test_client_secret", "extra": { "database": "db", "account": "airflow", "warehouse": "af_wh", "region": "af_region", "role": "af_role", "refresh_token": "secrettoken", "authenticator": "oauth", "token_endpoint": "https://www.example.com/oauth/token", }, } mock_get_conn_params.return_value = connection_kwargs with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn_params = hook._get_conn_params conn_params_keys = conn_params.keys() conn_params_extra = conn_params.get("extra", {}) conn_params_extra_keys = conn_params_extra.keys() assert "authenticator" in conn_params_extra_keys assert conn_params_extra["authenticator"] == "oauth" assert conn_params_extra["token_endpoint"] == "https://www.example.com/oauth/token" assert "user" not in conn_params_keys assert "password" in conn_params_keys assert "refresh_token" in conn_params_extra_keys # Mandatory fields to generate account_identifier `https://<account>.<region>` assert "region" in conn_params_extra_keys assert "account" in conn_params_extra_keys @mock.patch("requests.post") @mock.patch( "airflow.providers.snowflake.hooks.snowflake.SnowflakeHook._get_conn_params", new_callable=PropertyMock, ) def test_get_conn_params_should_support_oauth_with_client_credentials( self, mock_get_conn_params, requests_post ): requests_post.return_value = Mock( status_code=200, json=lambda: { "access_token": "supersecretaccesstoken", "expires_in": 600, "refresh_token": "secrettoken", "token_type": "Bearer", "username": "test_user", }, ) connection_kwargs = { **BASE_CONNECTION_KWARGS, "login": "test_client_id", "password": "test_client_secret", "extra": { "database": "db", "account": "airflow", "warehouse": "af_wh", "region": "af_region", "role": "af_role", "authenticator": "oauth", "token_endpoint": "https://www.example.com/oauth/token", "grant_type": "client_credentials", }, } mock_get_conn_params.return_value = connection_kwargs with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn_params = hook._get_conn_params conn_params_keys = conn_params.keys() conn_params_extra = conn_params.get("extra", {}) conn_params_extra_keys = conn_params_extra.keys() assert "authenticator" in conn_params_extra_keys assert conn_params_extra["authenticator"] == "oauth" assert conn_params_extra["grant_type"] == "client_credentials" assert "user" not in conn_params_keys assert "password" in conn_params_keys assert "refresh_token" not in conn_params_extra_keys # Mandatory fields to generate account_identifier `https://<account>.<region>` assert "region" in conn_params_extra_keys assert "account" in conn_params_extra_keys def test_get_conn_params_should_support_oauth_with_azure_conn_id(self, mocker): azure_conn_id = "azure_test_conn" mock_azure_token = "azure_test_token" connection_kwargs = { "extra": { "database": "db", "account": "airflow", "region": "af_region", "warehouse": "af_wh", "authenticator": "oauth", "azure_conn_id": azure_conn_id, }, } mock_connection_class = mocker.patch("airflow.providers.snowflake.hooks.snowflake.Connection") mock_azure_base_hook = mock_connection_class.get.return_value.get_hook.return_value mock_azure_base_hook.get_token.return_value.token = mock_azure_token with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn_params = hook._get_conn_params # Check AzureBaseHook initialization and get_token call args mock_connection_class.get.assert_called_once_with(azure_conn_id) mock_azure_base_hook.get_token.assert_called_once_with(SnowflakeHook.default_azure_oauth_scope) assert "authenticator" in conn_params assert conn_params["authenticator"] == "oauth" assert "token" in conn_params assert conn_params["token"] == mock_azure_token assert "user" not in conn_params assert "password" not in conn_params assert "refresh_token" not in conn_params # Mandatory fields to generate account_identifier `https://<account>.<region>` assert "region" in conn_params assert "account" in conn_params def test_should_add_partner_info(self): with mock.patch.dict( "os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**BASE_CONNECTION_KWARGS).get_uri(), AIRFLOW_SNOWFLAKE_PARTNER="PARTNER_NAME", ): assert ( SnowflakeHook(snowflake_conn_id="test_conn")._get_conn_params["application"] == "PARTNER_NAME" ) def test_get_conn_should_call_connect(self): with ( mock.patch.dict( "os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**BASE_CONNECTION_KWARGS).get_uri() ), mock.patch("airflow.providers.snowflake.hooks.snowflake.connector") as mock_connector, ): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn = hook.get_conn() mock_connector.connect.assert_called_once_with(**hook._get_conn_params) assert mock_connector.connect.return_value == conn def test_get_sqlalchemy_engine_should_support_pass_auth(self): with ( mock.patch.dict( "os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**BASE_CONNECTION_KWARGS).get_uri() ), mock.patch("airflow.providers.snowflake.hooks.snowflake.create_engine") as mock_create_engine, ): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn = hook.get_sqlalchemy_engine() mock_create_engine.assert_called_once_with( "snowflake://user:pw@airflow.af_region/db/public" "?application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh" ) assert mock_create_engine.return_value == conn def test_get_sqlalchemy_engine_should_support_insecure_mode(self): connection_kwargs = deepcopy(BASE_CONNECTION_KWARGS) connection_kwargs["extra"]["extra__snowflake__insecure_mode"] = "True" with ( mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()), mock.patch("airflow.providers.snowflake.hooks.snowflake.create_engine") as mock_create_engine, ): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn = hook.get_sqlalchemy_engine() mock_create_engine.assert_called_once_with( "snowflake://user:pw@airflow.af_region/db/public" "?application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh", connect_args={"insecure_mode": True}, ) assert mock_create_engine.return_value == conn def test_get_sqlalchemy_engine_should_support_json_result_force_utf8_decoding(self): connection_kwargs = deepcopy(BASE_CONNECTION_KWARGS) connection_kwargs["extra"]["extra__snowflake__json_result_force_utf8_decoding"] = "True" with ( mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()), mock.patch("airflow.providers.snowflake.hooks.snowflake.create_engine") as mock_create_engine, ): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn = hook.get_sqlalchemy_engine() mock_create_engine.assert_called_once_with( "snowflake://user:pw@airflow.af_region/db/public" "?application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh", connect_args={"json_result_force_utf8_decoding": True}, ) assert mock_create_engine.return_value == conn def test_get_sqlalchemy_engine_should_support_session_parameters(self): connection_kwargs = deepcopy(BASE_CONNECTION_KWARGS) connection_kwargs["extra"]["session_parameters"] = {"TEST_PARAM": "AA", "TEST_PARAM_B": 123} with ( mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()), mock.patch("airflow.providers.snowflake.hooks.snowflake.create_engine") as mock_create_engine, ): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn = hook.get_sqlalchemy_engine() mock_create_engine.assert_called_once_with( "snowflake://user:pw@airflow.af_region/db/public" "?application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh", connect_args={"session_parameters": {"TEST_PARAM": "AA", "TEST_PARAM_B": 123}}, ) assert mock_create_engine.return_value == conn def test_get_sqlalchemy_engine_should_support_private_key_auth(self, unencrypted_temporary_private_key): connection_kwargs = deepcopy(BASE_CONNECTION_KWARGS) connection_kwargs["password"] = "" connection_kwargs["extra"]["private_key_file"] = str(unencrypted_temporary_private_key) with ( mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()), mock.patch("airflow.providers.snowflake.hooks.snowflake.create_engine") as mock_create_engine, ): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn = hook.get_sqlalchemy_engine() assert "private_key" in mock_create_engine.call_args.kwargs["connect_args"] assert mock_create_engine.return_value == conn def test_get_sqlalchemy_engine_should_support_ocsp_fail_open(self): connection_kwargs = deepcopy(BASE_CONNECTION_KWARGS) connection_kwargs["extra"]["ocsp_fail_open"] = "False" with ( mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()), mock.patch("airflow.providers.snowflake.hooks.snowflake.create_engine") as mock_create_engine, ): hook = SnowflakeHook(snowflake_conn_id="test_conn") conn = hook.get_sqlalchemy_engine() mock_create_engine.assert_called_once_with( "snowflake://user:pw@airflow.af_region/db/public" "?application=AIRFLOW&authenticator=snowflake&role=af_role&warehouse=af_wh", connect_args={"ocsp_fail_open": False}, ) assert mock_create_engine.return_value == conn def test_hook_parameters_should_take_precedence(self): with mock.patch.dict( "os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**BASE_CONNECTION_KWARGS).get_uri() ): hook = SnowflakeHook( snowflake_conn_id="test_conn", account="TEST_ACCOUNT", warehouse="TEST_WAREHOUSE", database="TEST_DATABASE", region="TEST_REGION", role="TEST_ROLE", schema="TEST_SCHEMA", authenticator="TEST_AUTH", session_parameters={"AA": "AAA"}, ) assert hook._get_conn_params == { "account": "TEST_ACCOUNT", "application": "AIRFLOW", "authenticator": "TEST_AUTH", "database": "TEST_DATABASE", "password": "pw", "region": "TEST_REGION", "role": "TEST_ROLE", "schema": "TEST_SCHEMA", "session_parameters": {"AA": "AAA"}, "user": "user", "warehouse": "TEST_WAREHOUSE", } assert hook.get_uri() == ( "snowflake://user:pw@TEST_ACCOUNT.TEST_REGION/TEST_DATABASE/TEST_SCHEMA" "?application=AIRFLOW&authenticator=TEST_AUTH&role=TEST_ROLE&warehouse=TEST_WAREHOUSE" ) @pytest.mark.parametrize( ("sql", "expected_sql", "expected_query_ids"), [ ("select * from table", ["select * from table"], ["uuid"]), ( "select * from table;select * from table2", ["select * from table;", "select * from table2"], ["uuid1", "uuid2"], ), (["select * from table;"], ["select * from table;"], ["uuid1"]), ( ["select * from table;", "select * from table2;"], ["select * from table;", "select * from table2;"], ["uuid1", "uuid2"], ), ], ) @mock.patch("airflow.providers.snowflake.hooks.snowflake.SnowflakeHook.get_conn") def test_run_storing_query_ids_extra(self, mock_conn, sql, expected_sql, expected_query_ids): hook = SnowflakeHook() conn = mock_conn.return_value cur = mock.MagicMock(rowcount=0) conn.cursor.return_value = cur type(cur).sfqid = mock.PropertyMock(side_effect=expected_query_ids) mock_params = {"mock_param": "mock_param"} hook.run(sql, parameters=mock_params) cur.execute.assert_has_calls([mock.call(query, mock_params) for query in expected_sql]) assert hook.query_ids == expected_query_ids cur.close.assert_called() @mock.patch("airflow.providers.common.sql.hooks.sql.DbApiHook.get_first") def test_connection_success(self, mock_get_first): with mock.patch.dict( "os.environ", AIRFLOW_CONN_SNOWFLAKE_DEFAULT=Connection(**BASE_CONNECTION_KWARGS).get_uri() ): hook = SnowflakeHook() mock_get_first.return_value = [{"1": 1}] status, msg = hook.test_connection() assert status is True assert msg == "Connection successfully tested" mock_get_first.assert_called_once_with("select 1") @mock.patch( "airflow.providers.common.sql.hooks.sql.DbApiHook.get_first", side_effect=Exception("Connection Errors"), ) def test_connection_failure(self, mock_get_first): with mock.patch.dict( "os.environ", AIRFLOW_CONN_SNOWFLAKE_DEFAULT=Connection(**BASE_CONNECTION_KWARGS).get_uri() ): hook = SnowflakeHook() status, msg = hook.test_connection() assert status is False assert msg == "Connection Errors" mock_get_first.assert_called_once_with("select 1") def test_empty_sql_parameter(self): hook = SnowflakeHook() for empty_statement in ([], "", "\n"): with pytest.raises(ValueError, match="List of SQL statements is empty"): hook.run(sql=empty_statement) def test_get_openlineage_default_schema_with_no_schema_set(self): connection_kwargs = { **BASE_CONNECTION_KWARGS, "schema": "PUBLIC", } with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()): hook = SnowflakeHook(snowflake_conn_id="test_conn") assert hook.get_openlineage_default_schema() == "PUBLIC" @mock.patch("airflow.providers.common.sql.hooks.sql.DbApiHook.get_first") def test_get_openlineage_default_schema_with_schema_set(self, mock_get_first): with mock.patch.dict( "os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**BASE_CONNECTION_KWARGS).get_uri() ): hook = SnowflakeHook(snowflake_conn_id="test_conn") assert hook.get_openlineage_default_schema() == BASE_CONNECTION_KWARGS["schema"] mock_get_first.assert_not_called() hook_with_schema_param = SnowflakeHook(snowflake_conn_id="test_conn", schema="my_schema") assert hook_with_schema_param.get_openlineage_default_schema() == "my_schema" mock_get_first.assert_not_called() @mock.patch("airflow.providers.snowflake.utils.openlineage.emit_openlineage_events_for_snowflake_queries") def test_get_openlineage_database_specific_lineage_with_no_query_ids(self, mock_emit): hook = SnowflakeHook(snowflake_conn_id="test_conn") assert hook.query_ids == [] result = hook.get_openlineage_database_specific_lineage(None) mock_emit.assert_not_called() assert result is None @mock.patch("airflow.providers.snowflake.utils.openlineage.emit_openlineage_events_for_snowflake_queries") def test_get_openlineage_database_specific_lineage_with_single_query_id(self, mock_emit): from airflow.providers.common.compat.openlineage.facet import ExternalQueryRunFacet from airflow.providers.openlineage.extractors import OperatorLineage hook = SnowflakeHook(snowflake_conn_id="test_conn") hook.query_ids = ["query1"] hook.get_connection = mock.MagicMock() hook.get_openlineage_database_info = lambda x: mock.MagicMock(authority="auth", scheme="scheme") ti = mock.MagicMock() result = hook.get_openlineage_database_specific_lineage(ti) mock_emit.assert_called_once_with( **{ "hook": hook, "query_ids": ["query1"], "query_source_namespace": "scheme://auth", "task_instance": ti, "query_for_extra_metadata": True, } ) assert result == OperatorLineage( run_facets={ "externalQuery": ExternalQueryRunFacet(externalQueryId="query1", source="scheme://auth") } ) @mock.patch("airflow.providers.snowflake.utils.openlineage.emit_openlineage_events_for_snowflake_queries") def test_get_openlineage_database_specific_lineage_with_multiple_query_ids(self, mock_emit): hook = SnowflakeHook(snowflake_conn_id="test_conn") hook.query_ids = ["query1", "query2"] hook.get_connection = mock.MagicMock() hook.get_openlineage_database_info = lambda x: mock.MagicMock(authority="auth", scheme="scheme") ti = mock.MagicMock() result = hook.get_openlineage_database_specific_lineage(ti) mock_emit.assert_called_once_with( **{ "hook": hook, "query_ids": ["query1", "query2"], "query_source_namespace": "scheme://auth", "task_instance": ti, "query_for_extra_metadata": True, } ) assert result is None @mock.patch("importlib.metadata.version", return_value="1.99.0") def test_get_openlineage_database_specific_lineage_with_old_openlineage_provider(self, mock_version): hook = SnowflakeHook(snowflake_conn_id="test_conn") hook.query_ids = ["query1", "query2"] hook.get_connection = mock.MagicMock() hook.get_openlineage_database_info = lambda x: mock.MagicMock(authority="auth", scheme="scheme") expected_err = ( "OpenLineage provider version `1.99.0` is lower than required `2.3.0`, " "skipping function `emit_openlineage_events_for_snowflake_queries` execution" ) with pytest.raises(AirflowOptionalProviderFeatureException, match=expected_err): hook.get_openlineage_database_specific_lineage(mock.MagicMock()) @pytest.mark.skipif(sys.version_info >= (3, 12), reason="Snowpark Python doesn't support Python 3.12 yet") @mock.patch("snowflake.snowpark.Session.builder") def test_get_snowpark_session(self, mock_session_builder): from airflow import __version__ as airflow_version from airflow.providers.snowflake import __version__ as provider_version mock_session = mock.MagicMock() mock_session_builder.configs.return_value.create.return_value = mock_session with mock.patch.dict( "os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**BASE_CONNECTION_KWARGS).get_uri() ): hook = SnowflakeHook(snowflake_conn_id="test_conn") session = hook.get_snowpark_session() assert session == mock_session mock_session_builder.configs.assert_called_once_with(hook._get_conn_params) # Verify that update_query_tag was called with the expected tag dictionary mock_session.update_query_tag.assert_called_once_with( { "airflow_version": airflow_version, "airflow_provider_version": provider_version, } ) @mock.patch("airflow.providers.snowflake.hooks.snowflake.HTTPBasicAuth") @mock.patch("requests.post") @mock.patch( "airflow.providers.snowflake.hooks.snowflake.SnowflakeHook._get_conn_params", new_callable=PropertyMock, ) def test_get_oauth_token(self, mock_conn_param, requests_post, mock_auth): """Test get_oauth_token method makes the right http request""" basic_auth = {"Authorization": "Basic usernamepassword"} mock_conn_param.return_value = CONN_PARAMS_OAUTH requests_post.return_value.status_code = 200 mock_auth.return_value = basic_auth hook = SnowflakeHook(snowflake_conn_id="mock_conn_id") hook.get_oauth_token(conn_config=CONN_PARAMS_OAUTH) requests_post.assert_called_once_with( f"https://{CONN_PARAMS_OAUTH['account']}.snowflakecomputing.com/oauth/token-request", data={ "grant_type": "refresh_token", "refresh_token": CONN_PARAMS_OAUTH["refresh_token"], "redirect_uri": "https://localhost.com", }, headers={"Content-Type": "application/x-www-form-urlencoded"}, auth=basic_auth, ) @mock.patch("airflow.providers.snowflake.hooks.snowflake.HTTPBasicAuth") @mock.patch("requests.post") @mock.patch( "airflow.providers.snowflake.hooks.snowflake.SnowflakeHook._get_conn_params", new_callable=PropertyMock, ) def test_get_oauth_token_with_token_endpoint(self, mock_conn_param, requests_post, mock_auth): """Test get_oauth_token method makes the right http request""" basic_auth = {"Authorization": "Basic usernamepassword"} token_endpoint = "https://example.com/oauth/token" mock_conn_param.return_value = CONN_PARAMS_OAUTH requests_post.return_value.status_code = 200 mock_auth.return_value = basic_auth hook = SnowflakeHook(snowflake_conn_id="mock_conn_id") hook.get_oauth_token(conn_config=CONN_PARAMS_OAUTH, token_endpoint=token_endpoint) requests_post.assert_called_once_with( token_endpoint, data={ "grant_type": "refresh_token", "refresh_token": CONN_PARAMS_OAUTH["refresh_token"], "redirect_uri": "https://localhost.com", }, headers={"Content-Type": "application/x-www-form-urlencoded"}, auth=basic_auth, ) def test_get_azure_oauth_token(self, mocker): """Test get_azure_oauth_token method gets token from provided connection id""" azure_conn_id = "azure_test_conn" mock_azure_token = "azure_test_token" mock_connection_class = mocker.patch("airflow.providers.snowflake.hooks.snowflake.Connection") mock_azure_base_hook = mock_connection_class.get.return_value.get_hook.return_value mock_azure_base_hook.get_token.return_value.token = mock_azure_token hook = SnowflakeHook(snowflake_conn_id="mock_conn_id") token = hook.get_azure_oauth_token(azure_conn_id) # Check AzureBaseHook initialization and get_token call args mock_connection_class.get.assert_called_once_with(azure_conn_id) mock_azure_base_hook.get_token.assert_called_once_with(SnowflakeHook.default_azure_oauth_scope) assert token == mock_azure_token def test_get_azure_oauth_token_expect_failure_on_older_azure_provider_package(self, mocker): class MockAzureBaseHookOldVersion: """Simulate an old version of AzureBaseHook where sdk_client is required.""" def __init__(self, sdk_client, conn_id="azure_default"): pass azure_conn_id = "azure_test_conn" mock_connection_class = mocker.patch("airflow.providers.snowflake.hooks.snowflake.Connection") mock_connection_class.get.return_value.get_hook = MockAzureBaseHookOldVersion hook = SnowflakeHook(snowflake_conn_id="mock_conn_id") with pytest.raises( AirflowOptionalProviderFeatureException, match=( "Getting azure token is not supported.*" "Please upgrade apache-airflow-providers-microsoft-azure>=" ), ): hook.get_azure_oauth_token(azure_conn_id) # Check AzureBaseHook initialization mock_connection_class.get.assert_called_once_with(azure_conn_id)
TestPytestSnowflakeHook
python
joblib__joblib
joblib/memory.py
{ "start": 5257, "end": 8111 }
class ____(Logger): """Object representing a cached value. Attributes ---------- location: str The location of joblib cache. Depends on the store backend used. func: function or str function whose output is cached. The string case is intended only for instantiation based on the output of repr() on another instance. (namely eval(repr(memorized_instance)) works). argument_hash: str hash of the function arguments. backend: str Type of store backend for reading/writing cache files. Default is 'local'. mmap_mode: {None, 'r+', 'r', 'w+', 'c'} The memmapping mode used when loading from cache numpy arrays. See numpy.load for the meaning of the different values. verbose: int verbosity level (0 means no message). timestamp, metadata: string for internal use only. """ def __init__( self, location, call_id, backend="local", mmap_mode=None, verbose=0, timestamp=None, metadata=None, ): Logger.__init__(self) self._call_id = call_id self.store_backend = _store_backend_factory(backend, location, verbose=verbose) self.mmap_mode = mmap_mode if metadata is not None: self.metadata = metadata else: self.metadata = self.store_backend.get_metadata(self._call_id) self.duration = self.metadata.get("duration", None) self.verbose = verbose self.timestamp = timestamp @property def func(self): return self.func_id @property def func_id(self): return self._call_id[0] @property def args_id(self): return self._call_id[1] def get(self): """Read value from cache and return it.""" try: return self.store_backend.load_item( self._call_id, timestamp=self.timestamp, metadata=self.metadata, verbose=self.verbose, ) except ValueError as exc: new_exc = KeyError( "Error while trying to load a MemorizedResult's value. " "It seems that this folder is corrupted : {}".format( os.path.join(self.store_backend.location, *self._call_id) ) ) raise new_exc from exc def clear(self): """Clear value from cache""" self.store_backend.clear_item(self._call_id) def __repr__(self): return '{}(location="{}", func="{}", args_id="{}")'.format( self.__class__.__name__, self.store_backend.location, *self._call_id ) def __getstate__(self): state = self.__dict__.copy() state["timestamp"] = None return state
MemorizedResult
python
PrefectHQ__prefect
src/integrations/prefect-databricks/prefect_databricks/models/jobs.py
{ "start": 24543, "end": 24716 }
class ____(RootModel[Union[GitSource, Any, Any, Any]]): """ See source code for the fields' description. """ model_config = ConfigDict(frozen=True)
GitSource1
python
cython__cython
Cython/Distutils/extension.py
{ "start": 213, "end": 3551 }
class ____(_Extension.Extension): # When adding arguments to this constructor, be sure to update # user_options.extend in build_ext.py. def __init__(self, name, sources, include_dirs=None, define_macros=None, undef_macros=None, library_dirs=None, libraries=None, runtime_library_dirs=None, extra_objects=None, extra_compile_args=None, extra_link_args=None, export_symbols=None, #swig_opts=None, depends=None, language=None, cython_include_dirs=None, cython_directives=None, cython_create_listing=False, cython_line_directives=False, cython_cplus=False, cython_c_in_temp=False, cython_gen_pxi=False, cython_gdb=False, no_c_in_traceback=False, cython_compile_time_env=None, **kw): # Translate pyrex_X to cython_X for backwards compatibility. had_pyrex_options = False for key in list(kw): if key.startswith('pyrex_'): had_pyrex_options = True kw['cython' + key[5:]] = kw.pop(key) if had_pyrex_options: Extension.__init__( self, name, sources, include_dirs=include_dirs, define_macros=define_macros, undef_macros=undef_macros, library_dirs=library_dirs, libraries=libraries, runtime_library_dirs=runtime_library_dirs, extra_objects=extra_objects, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, export_symbols=export_symbols, #swig_opts=swig_opts, depends=depends, language=language, no_c_in_traceback=no_c_in_traceback, **kw) return _Extension.Extension.__init__( self, name, sources, include_dirs=include_dirs, define_macros=define_macros, undef_macros=undef_macros, library_dirs=library_dirs, libraries=libraries, runtime_library_dirs=runtime_library_dirs, extra_objects=extra_objects, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, export_symbols=export_symbols, #swig_opts=swig_opts, depends=depends, language=language, **kw) self.cython_include_dirs = cython_include_dirs or [] self.cython_directives = cython_directives or {} self.cython_create_listing = cython_create_listing self.cython_line_directives = cython_line_directives self.cython_cplus = cython_cplus self.cython_c_in_temp = cython_c_in_temp self.cython_gen_pxi = cython_gen_pxi self.cython_gdb = cython_gdb self.no_c_in_traceback = no_c_in_traceback self.cython_compile_time_env = cython_compile_time_env # class Extension read_setup_file = _Extension.read_setup_file
Extension
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1303456, "end": 1303758 }
class ____( sgqlc.types.Type, Node, AuditEntry, EnterpriseAuditEntryData, OrganizationAuditEntryData, RepositoryAuditEntryData ): """Audit log entry for a private_repository_forking.enable event.""" __schema__ = github_schema __field_names__ = ()
PrivateRepositoryForkingEnableAuditEntry
python
google__jax
tests/api_test.py
{ "start": 50636, "end": 177034 }
class ____(jtu.JaxTestCase): def test_grad_item(self): def f(x): if x.astype(bool).item(): return x ** 2 else: return x out = jax.grad(f)(2.0) self.assertEqual(out, 4) def test_jit_item(self): def f(x): return x.item() x = jnp.array(1.0) self.assertEqual(f(x), x) with self.assertRaisesRegex(core.ConcretizationTypeError, "Abstract tracer value"): jax.jit(f)(x) @parameterized.named_parameters( ('grad', jax.grad), ('jacfwd', jax.jacfwd), ('jacref', jax.jacrev), ) def test_grad_wrap(self, transform): # Ensures that transforms wrap transformed functions with the correct signature. @jit(static_argnames=['flag']) @transform def my_function(x, flag): return x if flag else jnp.zeros_like(x) self.assertEqual(my_function(1.0, False), 0.0) self.assertEqual(my_function(1.0, True), 1.0) def test_grad_bad_input(self): def f(x): return x self.assertRaisesRegex( TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type", lambda: grad(f)("foo")) def test_grad_argnums(self): def f(x, y, z, flag=False): assert flag return 1.0 * x + 2.0 * y + 3.0 * z assert grad(f)(1.0, 1.0, 1.0, flag=True) == 1.0 assert grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == 2.0 assert grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (3.0, 1.0) def test_value_and_grad_argnums(self): def f(x, y, z, flag=False): assert flag return 1.0 * x + 2.0 * y + 3.0 * z y = f(1.0, 1.0, 1.0, flag=True) assert api.value_and_grad(f)(1.0, 1.0, 1.0, flag=True) == (y, 1.0) assert api.value_and_grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == (y, 2.0) assert api.value_and_grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (y, (3.0, 1.0)) @jtu.thread_unsafe_test() # Concurrent cache eviction means we may retrace. def test_grad_of_jit(self): side = [] @jit def f(x): side.append(None) return x * x assert grad(f)(1.0) == 2.0 assert len(side) == 1 assert grad(f)(2.0) == 4.0 assert len(side) == 1 @jtu.thread_unsafe_test() # Concurrent ache eviction means we may retrace. def test_jit_of_grad(self): side = [] @jit def f(x): side.append(None) return x * x g = jit(grad(f)) assert g(1.0) == 2.0 assert len(side) == 1 assert g(2.0) == 4.0 assert len(side) == 1 @jtu.thread_unsafe_test() # Concurrent ache eviction means we may retrace. def test_fwd_and_bwd(self): def f(x, W): return x @ W x = W = cot_out = jnp.ones((4,4)) expected_y, f_vjp = api.vjp(f, x, W) expected_cot_x, expected_cot_W = f_vjp(cot_out) fwd, bwd = api.fwd_and_bwd(f, argnums=(0,1)) y, residuals = fwd(x, W) cot_x, cot_W = bwd(residuals, cot_out) self.assertArraysAllClose(y, expected_y) self.assertArraysAllClose(cot_x, expected_cot_x) self.assertArraysAllClose(cot_W, expected_cot_W) with jax.no_tracing(): y, residuals = fwd(x, W) cot_x, cot_W = bwd(residuals, cot_out) # no recompilation @parameterized.named_parameters( {"testcase_name": f"_{transform.__name__}", "transform": transform} for transform in [grad, jacfwd, jacrev]) def test_ad_weak_types(self, transform): out = transform(lambda x: x)(1.0) self.assertTrue(dtypes.is_weakly_typed(out)) def test_bad_input(self): def f(x): return x with self.assertRaisesRegex(TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type"): grad(f)("foo") err_str = ("Error interpreting argument to .* as an abstract array. The problematic " "value is of type .* and was passed to the function at path x.") with self.assertRaisesRegex(TypeError, err_str): jit(f)("foo") def test_grad_tuple_output(self): jtu.check_raises(lambda: grad(lambda x: (x,x))(1.0), TypeError, "Gradient only defined for scalar-output functions. ") def test_grad_unit_output(self): jtu.check_raises(lambda: grad(lambda x: ())(np.zeros(3)), TypeError, "Gradient only defined for scalar-output functions. ") def test_grad_nonscalar_output(self): jtu.check_raises(lambda: grad(lambda x: x)(np.zeros(3)), TypeError, "Gradient only defined for scalar-output functions. ") def test_unwrapped_numpy(self): def f(x): return np.exp(x) with self.assertRaisesRegex(Exception, "The numpy.ndarray conversion .*"): grad(f)(np.zeros(3)) def test_binop_mismatch(self): def f(x, y): return x + y jtu.check_raises( lambda: f(jnp.zeros(3), jnp.zeros(4)), TypeError, "add got incompatible shapes for broadcasting: (3,), (4,).") jtu.check_raises( lambda: grad(f)(np.zeros(3), np.zeros(4)), TypeError, "add got incompatible shapes for broadcasting: (3,), (4,).") def test_dot_mismatch(self): def f(x, y): return jnp.dot(x, y) self.assertRaisesRegex( TypeError, ("dot_general requires contracting dimensions to have " "the same shape, got \\(3L?,\\) and \\(4L?,\\)."), lambda: grad(f)(np.zeros(3), np.zeros(4))) def test_abstract_error_message(self): for castfun in [float, complex, int]: def f(x): return castfun(x) self.assertRaisesRegex( TypeError, f"[Tt]ry using `x.astype\\({castfun.__name__}\\)`", lambda: jit(f)(1.0)) def test_switch_value_jit(self): def f(x): y = x > 0 if y: return x else: return -x assert grad(f)(1.0) == 1.0 assert grad(f)(-1.0) == -1.0 with self.assertRaisesRegex(core.ConcretizationTypeError, "Attempted boolean conversion"): jit(f)(1) def test_list_index_err(self): L = [1, 2, 3] def f(n): return L[n] assert jit(f, static_argnums=(0,))(0) == L[0] self.assertRaisesRegex( TypeError, r"The __index__\(\) method was called on traced array.*", lambda: jit(f)(0)) def test_range_err(self): def f(x, n): for i in range(n): x = x + i return x assert jit(f, static_argnums=(1,))(0, 5) == 10 self.assertRaisesRegex( TypeError, r"The __index__\(\) method was called on traced array.*", lambda: jit(f)(0, 5)) def test_cast_int(self): f = lambda x: int(x) self.assertRaisesRegex( TypeError, "('(?:JaxprTracer|DynamicJaxprTracer)' object cannot be interpreted as an integer" "|Abstract tracer value encountered where concrete value is expected.*)", lambda: jit(f)(0)) def test_casts(self): for castfun in [hex, oct]: f = lambda x: castfun(x) self.assertRaisesRegex( TypeError, r"The __index__\(\) method was called on traced array.*", lambda: jit(f)(0)) def test_unimplemented_interpreter_rules(self): foo_p = core.Primitive('foo') def foo(x): return foo_p.bind(x) jtu.check_raises(lambda: foo(1.0), NotImplementedError, "Evaluation rule for 'foo' not implemented") jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError, "Abstract evaluation for 'foo' not implemented") jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError, "Differentiation rule for 'foo' not implemented") foo_p.def_abstract_eval(lambda x: x) jtu.check_raises_regexp(lambda: jit(foo)(1.0), NotImplementedError, ".* rule for primitive 'foo' not found.*") foo_p.def_impl(lambda x: x) ad.defjvp(foo_p, lambda g, x: foo(g)) jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError, "Transpose rule (for reverse-mode differentiation) for 'foo' not implemented") def test_wrong_output_abstract_eval(self): foo_p = core.Primitive('foo') def foo(x): return foo_p.bind(x) foo_p.def_abstract_eval(lambda x: [x]) # Shouldn't return a list. foo_p.def_impl(lambda x: x) jitted = jit(lambda x: foo(x)) jtu.check_raises(lambda: jitted(1.0), ValueError, "foo.abstract_eval() method should return a tuple or") foo2_p = core.Primitive('foo2') foo2_p.multiple_results = True def foo2(x): return foo2_p.bind(x), foo2_p.def_abstract_eval(lambda x: x) # Should return a list. foo2_p.def_impl(lambda x: [x]) jitted = jit(lambda x: foo2(x)) jtu.check_raises(lambda: jitted(1.0), ValueError, "foo2.abstract_eval() method should return a tuple or") def test_is_subclass(self): self.assertFalse(issubclass(np.ndarray, jax.Array)) def test_is_instance(self): def f(x): self.assertIsInstance(x, jax.Array) self.assertNotIsInstance(x, np.ndarray) return x + 2 jit(f)(3) jax.vmap(f)(np.arange(3)) def test_device_put_and_get(self): x = np.arange(12.).reshape((3, 4)).astype("float32") dx = api.device_put(x) _check_instance(self, dx) self.assertIsInstance(dx, jax.Array) self.assertNotIsInstance(dx, np.ndarray) x2 = api.device_get(dx) self.assertNotIsInstance(x2, jax.Array) self.assertIsInstance(x2, np.ndarray) self.assertArraysEqual(x2, x) y = [x, (2 * x, 3 * x)] dy = api.device_put(y) y2 = api.device_get(dy) self.assertIsInstance(y2, list) self.assertIsInstance(y2[0], np.ndarray) self.assertArraysEqual(y2[0], x) self.assertIsInstance(y2[1], tuple) self.assertIsInstance(y2[1][0], np.ndarray) self.assertArraysEqual(y2[1][0], 2 * x) self.assertIsInstance(y2[1][1], np.ndarray) self.assertArraysEqual(y2[1][1], 3 * x) def test_device_put_sharding(self): mesh = jax.sharding.Mesh(jax.devices(), ('x',)) s = jax.NamedSharding(mesh, P('x')) x = jnp.arange(len(jax.devices())) y = jax.device_put(x, s) self.assertEqual(y.sharding, s) self.assertArraysAllClose(y, x) # this might hit a special fast path z = jax.device_put(y, s) self.assertEqual(z.sharding, s) self.assertArraysAllClose(z, x) self.assertIs(z, y) # no copy w = jax.device_put(z) self.assertIs(w, z) u = jax.device_put(y, jax.devices()[0]) self.assertArraysAllClose(u, y) self.assertEqual(u.devices(), {jax.devices()[0]}) def test_device_put_sharding_tree(self): if jax.device_count() < 2: raise unittest.SkipTest("Test requires >= 2 devices") mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]).reshape((2, 1)), ("x", "y")) s1 = jax.NamedSharding(mesh, P("x")) s2 = jax.NamedSharding(mesh, P("y")) s3 = jax.NamedSharding(mesh, P("x", "y")) x = jnp.arange(2) y = jnp.arange(2) + 10 z = (jnp.arange(2) + 100).reshape((2, 1)) out = jax.device_put((x, (y, z)), device=(s1, (s2, s3))) self.assertEqual(out[0].sharding, s1) self.assertEqual(out[1][0].sharding, s2) self.assertEqual(out[1][1].sharding, s3) self.assertArraysAllClose(out[0], x) self.assertArraysAllClose(out[1][0], y) self.assertArraysAllClose(out[1][1], z) def test_device_put_sharding_tree_prefix(self): if jax.device_count() < 2: raise unittest.SkipTest("Test requires >= 2 devices") mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]).reshape((2, 1)), ("x", "y")) s1 = jax.sharding.NamedSharding(mesh, P("x")) s2 = jax.sharding.NamedSharding(mesh, P("y")) x = jnp.arange(2) y = jnp.arange(2) + 10 z = jnp.arange(2) + 100 out = jax.device_put((x, (y, z)), device=(s1, s2)) self.assertEqual(out[0].sharding, s1) self.assertEqual(out[1][0].sharding, s2) self.assertEqual(out[1][1].sharding, s2) self.assertArraysAllClose(out[0], x) self.assertArraysAllClose(out[1][0], y) self.assertArraysAllClose(out[1][1], z) def test_device_put_sharding_mismatched_tree_same_leaf_count(self): if jax.device_count() < 2: raise unittest.SkipTest("Test requires >= 2 devices") mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]).reshape((2, 1)), ("x", "y")) s1 = jax.sharding.NamedSharding(mesh, P("x")) s2 = jax.sharding.NamedSharding(mesh, P("y")) x = jnp.arange(2) y = jnp.arange(2) + 10 z = jnp.arange(2) + 100 with self.assertRaisesRegex( ValueError, "device_put device specification must be a tree prefix of the " r"corresponding value, got specification \(\(NamedSharding\(.*\), " r"NamedSharding\(.*\)\), NamedSharding\(.*\)\) for value tree " r"PyTreeDef\(\(\*, \(\*, \*\)\)\)." ): jax.device_put((x, (y, z)), device=((s1, s2), s2)) def test_device_put_sharding_mismatched_tree_different_leaf_count(self): if jax.device_count() < 2: raise unittest.SkipTest("Test requires >= 2 devices") mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]).reshape((2, 1)), ("x", "y")) s1 = jax.sharding.NamedSharding(mesh, P("x")) s2 = jax.sharding.NamedSharding(mesh, P("y")) x = jnp.arange(2) y = jnp.arange(2) + 10 z = jnp.arange(2) + 100 with self.assertRaisesRegex( ValueError, "device_put device specification must be a tree prefix of the " r"corresponding value, got specification \(NamedSharding\(.*\), " r"NamedSharding\(.*\)\) for value tree PyTreeDef\(\(\*, \*, \*\)\)." ): jax.device_put((x, y, z), device=(s1, s2)) def test_internal_device_put_with_device(self): # Hitting the cache for a single-device jitted execution while using a numpy # array calls internal `DevicePutWithDevice`. f = jax.jit(lambda x: x + 1) f(np.arange(8)) with jtu.count_internal_device_puts() as counts: f(np.arange(8)) self.assertEqual(counts(), {"device_put_with_device": 1}) def test_internal_device_put_fully_replicated(self): if jax.device_count() < 2: raise unittest.SkipTest("Test requires >= 2 devices") # Creating an array from a numpy array with a fully-replicated sharding # calls internal `DevicePutWithSharding`, taking the fully-replicated sub # case. mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]), "x") sharding = jax.NamedSharding(mesh, P()) with jtu.count_internal_device_puts() as counts: jax.device_put(np.arange(8), sharding) self.assertEqual( counts(), {"device_put_with_sharding": 1, "device_put_fully_replicated": 1}, ) def test_internal_device_put_batched(self): if jax.device_count() < 2: raise unittest.SkipTest("Test requires >= 2 devices") # Creating an array from a numpy array with a non-fully-replicated sharding # calls internal `DevicePutWithSharding`, performing batched creation of a # multi-shard array. mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]), "x") sharding = jax.NamedSharding(mesh, P("x")) with jtu.count_internal_device_puts() as counts: jax.device_put(np.arange(8), sharding) self.assertEqual( counts(), {"device_put_with_sharding": 1, "device_put_batched": 1} ) def test_internal_device_put_assembled(self): if jax.device_count() < 2: raise unittest.SkipTest("Test requires >= 2 devices") # Creating an array from per-device JAX arrays calls internal # `DevicePutWithSharding`, performing per-shard array adoption followed by # assembly. mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]), "x") sharding = jax.NamedSharding(mesh, P("x")) arr = np.arange(8) per_device_arrs = { # Use uncommitted arrays that are not aligned with the destination # sharding so that we trigger `BatchedDevicePut`. sharding_impls.hashed_index(index): jnp.array(arr[index]) for _, index in sharding.devices_indices_map(arr.shape).items() } data_callback = lambda index: per_device_arrs[ sharding_impls.hashed_index(index) ] with jtu.count_internal_device_puts() as counts: jax.make_array_from_callback(arr.shape, sharding, data_callback) self.assertEqual( counts(), {"device_put_with_sharding": 1, "device_put_assembled": 1} ) def test_device_put_custom_type_not_accepting_none_leaves(self): class CustomNode(list): pass def unflatten(unused_aux_data, children): self.assertIsNotNone(children[0]) return CustomNode(children) tree_util.register_pytree_node(CustomNode, lambda x: (x, None), unflatten) jax.device_put(CustomNode([0.1])) def test_device_put_literals(self): self.assertEqual( np.dtype(np.int32), jax.device_put(literals.TypedInt(1, np.dtype(np.int32))).dtype) self.assertEqual( np.dtype(np.int64), jax.device_put(literals.TypedInt(1, np.dtype(np.int64))).dtype) self.assertEqual( np.dtype(np.float32), jax.device_put(literals.TypedFloat(1, np.dtype(np.float32))).dtype) self.assertEqual( np.dtype(np.float64), jax.device_put(literals.TypedFloat(1, np.dtype(np.float64))).dtype) self.assertEqual( np.dtype(np.complex64), jax.device_put(literals.TypedComplex( 1,np.dtype(np.complex64))).dtype) if jtu.device_under_test() != "tpu": # The TPU compiler does not support complex128. self.assertEqual( np.dtype(np.complex128), jax.device_put(literals.TypedComplex( 1, np.dtype(np.complex128))).dtype) self.assertEqual( np.dtype(np.int32), jax.device_put(literals.TypedNdArray(np.array([1], dtype=np.int32), weak_type=False)).dtype) self.assertEqual( np.dtype(np.int64), jax.device_put(literals.TypedNdArray(np.array([1], dtype=np.int64), weak_type=False)).dtype) def test_vmap_inconsistent_sizes_constructs_proper_error_message(self): def f(x1, x2, g): return g(x1, x2) with self.assertRaisesRegex( ValueError, "vmap got inconsistent sizes for array axes to be mapped:" ): jax.vmap(f, (0, 0, None))(jnp.ones(2), jnp.ones(3), jnp.add) def test_vmap_inconsistent_sizes_constructs_proper_error_message_kwargs(self): # regression test for https://github.com/jax-ml/jax/issues/24406 def f(x1, x2, a3): return x1 + x2 + a3 with self.assertRaisesRegex( ValueError, "vmap got inconsistent sizes for array axes to be mapped:\n" r" \* most axes \(2 of them\) had size 2, e.g. axis 0 of argument x1 of type float32\[2\];\n" r" \* one axis had size 1: axis 0 of kwargs\['a3'\] of type float32\[1\]", ): jax.vmap(f)( jnp.ones(2, dtype=jnp.float32), a3=jnp.ones(1, dtype=jnp.float32), x2=jnp.ones(2, dtype=jnp.float32) ) def test_vmap_inconsistent_sizes_constructs_proper_error_message_starargs(self): # regression test for https://github.com/jax-ml/jax/issues/26908 def f(x, *args): return x - sum(args) with self.assertRaisesRegex( ValueError, "vmap got inconsistent sizes for array axes to be mapped:" ): jax.vmap(f)(jnp.ones(4), jnp.ones(2), jnp.ones(2)) def test_device_get_scalar(self): x = np.arange(12.).reshape((3, 4)).astype("float32") x = api.device_put(x) _check_instance(self, x) self.assertIsInstance(x.sharding, jax.sharding.SingleDeviceSharding) for s in x.addressable_shards: self.assertArraysEqual(s.data, x) self.assertEqual(s.replica_id, 0) self.assertEqual(s.index, (slice(None), slice(None))) y = [x, 2] y2 = api.device_get(y) self.assertIsInstance(y2, list) self.assertIsInstance(y2[0], np.ndarray) self.assertArraysEqual(y2[0], x) self.assertIsInstance(y2[1], int) self.assertEqual(y2[1], 2) @parameterized.parameters([(3,)], [(2, 0)]) def test_device_put_across_devices(self, shape): if len(jax.local_devices()) < 2: raise unittest.SkipTest("this test requires multiple devices") d1, d2 = jax.local_devices()[:2] data = self.rng().randn(*shape).astype(np.float32) x = api.device_put(data, device=d1) self.assertEqual(x.devices(), {d1}) y = api.device_put(x, device=d2) self.assertEqual(y.devices(), {d2}) np.testing.assert_array_equal(data, np.array(y)) # Make sure these don't crash api.device_put(x) api.device_put(y) @jtu.skip_on_devices("cpu") def test_device_put_across_platforms(self): default_device = jax.devices()[0] cpu_device = jax.devices("cpu")[0] np_arr = np.array([1,2,3]) scalar = 1 device_arr = jnp.array([1,2,3]) assert device_arr.devices() == {default_device} for val in [np_arr, device_arr, scalar]: x = api.device_put(val, device=cpu_device) self.assertEqual(x.devices(), {cpu_device}) def test_device_put_on_single_device_donated_buffer_fails(self): @jax.jit(donate_argnums=0) def f(inp1): return inp1 * 2 x = jnp.zeros((10,), jnp.float32) f(x) with self.assertRaises(RuntimeError): result = jax.device_put(x, jax.devices()[0]) result.block_until_ready() with self.assertRaises(RuntimeError): result = jax.device_put(x, jax.devices()[-1]) result.block_until_ready() def test_device_put_on_multi_device_donated_buffer_fails(self): @jax.jit(donate_argnums=0) def f(inp1): return inp1 * 2 mesh1 = jax.sharding.Mesh(jax.devices(), ("x",)) s1 = jax.NamedSharding(mesh1, P("x")) mesh2 = jax.sharding.Mesh(tuple(reversed(jax.devices())), ("x",)) s2 = jax.NamedSharding(mesh2, P("x")) x = jax.device_put(np.arange(len(jax.devices()), dtype=jnp.float32), s1) f(x) with self.assertRaises(RuntimeError): result = jax.device_put(x, s1) result.block_until_ready() with self.assertRaises(RuntimeError): result = jax.device_put(x, s2) result.block_until_ready() @jax.default_matmul_precision("float32") def test_jacobian(self): R = self.rng().randn A = R(4, 3) x = R(3) f = lambda x: jnp.dot(A, x) self.assertAllClose(jacfwd(f)(x), A) self.assertAllClose(jacrev(f)(x), A) f = lambda x: jnp.tanh(jnp.dot(A, x)) self.assertAllClose(jacfwd(f)(x), jacrev(f)(x)) @jax.default_matmul_precision("float32") def test_hessian(self): R = self.rng().randn A = R(4, 4) x = R(4) f = lambda x: jnp.dot(x, jnp.dot(A, x)) self.assertAllClose(hessian(f)(x), A + A.T) @jax.default_matmul_precision("float32") def test_hessian_holomorphic(self): R = self.rng().randn A = R(4, 4) x = R(4).astype('complex64') * (1 + 2j) f = lambda x: jnp.dot(x, jnp.dot(A.astype(x.dtype), x)) self.assertAllClose( hessian(f, holomorphic=True)(x), (A + A.T).astype(x.dtype)) @jax.default_matmul_precision("float32") def test_hessian_aux(self): R = self.rng().randn A = R(4, 4) x = R(4) f = lambda x: (jnp.dot(x, jnp.dot(A, x)), x) h, aux = hessian(f, has_aux=True)(x) self.assertAllClose(h, A + A.T) self.assertAllClose(aux, x) def test_std_basis(self): basis = api._std_basis(jnp.zeros(3)) assert getattr(basis, "shape", None) == (3, 3) self.assertAllClose(basis, np.eye(3)) basis = api._std_basis(jnp.zeros((3, 3))) assert getattr(basis, "shape", None) == (9, 3, 3) self.assertAllClose(basis, np.eye(9).reshape(9, 3, 3)) basis = api._std_basis([0., (jnp.zeros(3), jnp.zeros((3, 4)))]) assert isinstance(basis, list) and len(basis) == 2 assert getattr(basis[0], "shape", None) == (16,) assert isinstance(basis[1], tuple) and len(basis[1]) == 2 assert getattr(basis[1][0], "shape", None) == (16, 3) assert getattr(basis[1][1], "shape", None) == (16, 3, 4) @jtu.skip_on_devices("tpu") def test_jacobian_on_pytrees(self): for jacfun in [jacfwd, jacrev]: ans = jacfun(lambda x, y: (x, y))(0., 1.) expected = (1., 0.) self.assertAllClose(ans, expected, check_dtypes=False) ans = jacfun(lambda x, y: (x, y), 1)(0., 1.) expected = (0., 1.) self.assertAllClose(ans, expected, check_dtypes=False) ans = jacfun(lambda x, y: (x, y), (0, 1))(0., 1.) expected = ((1., 0.), (0., 1.),) self.assertAllClose(ans, expected, check_dtypes=False) ans = jacfun(lambda x: x[:2])((1., 2., 3.)) expected = ((1., 0., 0.), (0., 1., 0.)) self.assertAllClose(ans, expected, check_dtypes=False) R = self.rng().randn x = jnp.array(R(2)) y = jnp.array(R(3)) ans = jacfun(lambda x, y: {'x': x, 'xy': jnp.outer(x, y)})(x, y) expected = {'x': np.eye(2), 'xy': np.kron(np.eye(2), y[:, None]).reshape(2, 3, 2)} self.assertAllClose(ans, expected, check_dtypes=False) @jtu.skip_on_devices("tpu") def test_hessian_on_pytrees(self): ans = hessian(lambda x: jnp.array(x)**2)((1., 2.)) expected = ((np.array([2., 0.]), np.array([0., 0.])), (np.array([0., 0.]), np.array([0., 2.]))) self.assertAllClose(ans, expected, check_dtypes=False) @jtu.skip_on_devices("tpu") def test_issue1372(self): def quad(x): return jnp.dot(x, x) def f(x, u): return quad(x) + quad(u) x, u = jnp.ones(5), jnp.ones(2) rev = jacrev fwd = jacfwd # Diagonal entries self.assertEqual(rev(rev(f, 0), 0)(x, u).shape, (5, 5)) self.assertEqual(rev(fwd(f, 0), 0)(x, u).shape, (5, 5)) self.assertEqual(fwd(rev(f, 0), 0)(x, u).shape, (5, 5)) self.assertEqual(fwd(fwd(f, 0), 0)(x, u).shape, (5, 5)) self.assertEqual(rev(rev(f, 1), 1)(x, u).shape, (2, 2)) self.assertEqual(rev(fwd(f, 1), 1)(x, u).shape, (2, 2)) self.assertEqual(fwd(rev(f, 1), 1)(x, u).shape, (2, 2)) self.assertEqual(fwd(fwd(f, 1), 1)(x, u).shape, (2, 2)) # Off-diagonal entries by reverse-mode on the outside self.assertEqual(rev(rev(f, 1), 0)(x, u).shape, (2, 5)) self.assertEqual(rev(fwd(f, 1), 0)(x, u).shape, (2, 5)) self.assertEqual(rev(rev(f, 0), 1)(x, u).shape, (5, 2)) self.assertEqual(rev(fwd(f, 0), 1)(x, u).shape, (5, 2)) # Off-diagonal entries by forward-mode on the outside self.assertEqual(fwd(rev(f, 1), 0)(x, u).shape, (2, 5)) self.assertEqual(fwd(fwd(f, 1), 0)(x, u).shape, (2, 5)) self.assertEqual(fwd(rev(f, 0), 1)(x, u).shape, (5, 2)) self.assertEqual(fwd(fwd(f, 0), 1)(x, u).shape, (5, 2)) def test_large_device_constant(self): ans = jit(lambda x: 2 * x)(jnp.ones(int(2e6))) # doesn't crash self.assertAllClose(ans, np.ones(int(2e6)) * 2., check_dtypes=False) def test_grad_and_aux_basic(self): g, aux = grad(lambda x: (x**3, [x**2]), has_aux=True)(3.) self.assertAllClose(g, grad(lambda x: x**3)(3.)) self.assertAllClose(aux, [9.], check_dtypes=False) def test_grad_and_aux_error(self): with self.assertRaisesRegex(TypeError, "two-element tuple"): grad(lambda x: (1, 2, 3), has_aux=True)(1.) with self.assertRaisesRegex(TypeError, "two-element tuple"): grad(lambda x: x, has_aux=True)(1.) with self.assertRaisesRegex(TypeError, "two-element tuple"): grad(lambda x: (x,), has_aux=True)(1.) def test_grad_and_aux_nested(self): def f(x): g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x) return aux[0] f2 = lambda x: x**3 self.assertEqual(grad(f)(4.), grad(f2)(4.)) self.assertEqual(jit(grad(f))(4.), grad(f2)(4.)) self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.)) def f(x): g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x) return aux[0] * jnp.sin(x) f2 = lambda x: x**3 * jnp.sin(x) self.assertEqual(grad(f)(4.), grad(f2)(4.)) self.assertEqual(jit(grad(f))(4.), grad(f2)(4.)) self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.)) def test_grad_and_aux_constant(self): g, aux = grad(lambda x: (x**3, [4.]), has_aux=True)(4.) self.assertEqual(g, grad(lambda x: x**3)(4.)) self.assertEqual(aux, [4.]) g, aux = grad(lambda x: (x**3, [x**2, 4.]), has_aux=True)(4.) self.assertEqual(g, grad(lambda x: x**3)(4.)) self.assertEqual(aux, [4.**2, 4.]) def test_grad_and_aux_no_tracers(self): # see https://github.com/jax-ml/jax/issues/1950 def f(x): aux = dict(identity=x, p1=x+1) return x ** 2, aux _, aux = jax.grad(f, has_aux=True)(3.) self.assertIsInstance(aux, dict) for val in aux.values(): self.assertNotIsInstance(val, core.Tracer) def test_jacfwd_and_aux_basic(self): jac, aux = jacfwd(lambda x: (x**3, [x**2]), has_aux=True)(3.) self.assertAllClose(jac, jacfwd(lambda x: x**3)(3.)) self.assertAllClose(aux, [9.], check_dtypes=False) def test_jacrev_and_aux_basic(self): jac, aux = jacrev(lambda x: (x**3, [x**2]), has_aux=True)(3.) self.assertAllClose(jac, jacrev(lambda x: x**3)(3.)) self.assertAllClose(aux, [9.], check_dtypes=False) def test_jacfwd_and_aux_nested(self): def f(x): jac, aux = jacfwd(lambda x: (x**3, [x**3]), has_aux=True)(x) return aux[0] f2 = lambda x: x**3 self.assertEqual(jacfwd(f)(4.), jacfwd(f2)(4.)) self.assertEqual(jit(jacfwd(f))(4.), jacfwd(f2)(4.)) self.assertEqual(jit(jacfwd(jit(f)))(4.), jacfwd(f2)(4.)) def f(x): jac, aux = jacfwd(lambda x: (x**3, [x**3]), has_aux=True)(x) return aux[0] * jnp.sin(x) f2 = lambda x: x**3 * jnp.sin(x) self.assertEqual(jacfwd(f)(4.), jacfwd(f2)(4.)) self.assertEqual(jit(jacfwd(f))(4.), jacfwd(f2)(4.)) self.assertEqual(jit(jacfwd(jit(f)))(4.), jacfwd(f2)(4.)) def test_jacrev_and_aux_nested(self): def f(x): jac, aux = jacrev(lambda x: (x**3, [x**3]), has_aux=True)(x) return aux[0] f2 = lambda x: x**3 self.assertEqual(jacrev(f)(4.), jacrev(f2)(4.)) self.assertEqual(jit(jacrev(f))(4.), jacrev(f2)(4.)) self.assertEqual(jit(jacrev(jit(f)))(4.), jacrev(f2)(4.)) def f(x): jac, aux = jacrev(lambda x: (x**3, [x**3]), has_aux=True)(x) return aux[0] * jnp.sin(x) f2 = lambda x: x**3 * jnp.sin(x) self.assertEqual(jacrev(f)(4.), jacrev(f2)(4.)) self.assertEqual(jit(jacrev(f))(4.), jacrev(f2)(4.)) self.assertEqual(jit(jacrev(jit(f)))(4.), jacrev(f2)(4.)) def test_jvp_and_aux_basic(self): fun = lambda x: (x**3, [x**2]) primals, tangents, aux = api.jvp(fun, (3.,), (4.,), has_aux=True) expected_primals, expected_tangents = api.jvp(lambda x: x**3, (3.,), (4.,)) self.assertAllClose(primals, expected_primals, check_dtypes=True) self.assertAllClose(tangents, expected_tangents, check_dtypes=True) self.assertEqual(aux, [3.**2]) def test_jvp_mismatched_arguments(self): self.assertRaisesRegex( TypeError, ("primal and tangent arguments to jax.jvp must have the same tree " "structure"), lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), ())) # If primals and tangents must both be tuples or both lists self.assertRaisesRegex( TypeError, ("primal and tangent arguments to jax.jvp must have the same tree " "structure"), lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), [np.float32(2)])) self.assertRaisesRegex( TypeError, "primal and tangent arguments to jax.jvp do not match.", lambda: api.jvp(lambda x: -x, (np.float16(2),), (np.float32(4),))) # If primals and tangents are not of the same shape then raise error fun = lambda x: x+1 with self.assertRaisesRegex( ValueError, "jvp called with different primal and tangent shapes"): api.jvp(fun, (jnp.array([1.,2.,3.]),), (jnp.array([1.,2.,3.,4.]),)) with self.assertRaisesRegex( ValueError, "jvp called with different primal and tangent shapes"): api.jvp(fun, (jnp.float32(10.),), (jnp.array([1.,2.,3.], dtype=jnp.float32),)) with self.assertRaisesRegex( ValueError, "jvp called with different primal and tangent shapes"): api.jvp(fun, (jnp.array([1.,2.,3.], dtype=jnp.float32),), (jnp.float32(20.),)) with self.assertRaisesRegex( ValueError, "jvp called with different primal and tangent shapes"): api.jvp(fun, (jnp.array([1.,2.,3.]),), (20.,)) def test_jvp_non_tuple_arguments(self): def f(x, y): return x + y self.assertRaisesRegex( TypeError, "primal and tangent arguments to jax.jvp must be tuples or lists; found float and tuple.", lambda: api.jvp(f, 0., (1.,))) self.assertRaisesRegex( TypeError, "primal and tangent arguments to jax.jvp must be tuples or lists; found tuple and ndarray.", lambda: api.jvp(f, (0.,), np.array([1., 2.]))) def test_vjp_mismatched_arguments(self): _, pullback = api.vjp(lambda x, y: x * y, np.float32(3), np.float32(4)) self.assertRaisesRegex( ValueError, "unexpected tree structure", lambda: pullback((np.float32(7), np.float32(100)))) self.assertRaisesRegex( ValueError, "unexpected JAX type", lambda: pullback(np.float16(42))) def test_vjp_bad_cotangent_shape(self): x = np.ones((2, 5), dtype=np.float32) y = np.ones((5, 3), dtype=np.float32) def f_jax(x, y): return jnp.matmul(x, y) res, pullback = jax.vjp(f_jax, x, y) with self.assertRaisesRegex(ValueError, "unexpected JAX type"): pullback(np.ones((2, 4), dtype=np.float32)) def test_jvp_jit_cached(self): """Bug in caching in presence of JVP and JIT.""" def func(x): def inner(y): return y * x # Must have two calls to the inner jit (the second one hits the cache) res1 = api.jit(inner)(4.) res2 = api.jit(inner)(5.) return res1 + res2 self.assertAllClose((45., 9.), api.jvp(func, (5.,), (1.,))) def test_linear_transpose_abstract(self): x = types.SimpleNamespace(shape=(3,), dtype=np.dtype(np.float32)) y = jnp.arange(3, dtype=np.float32) transpose_fun = api.linear_transpose(lambda x: 2 * x, x) z, = transpose_fun(y) self.assertArraysEqual(2 * y, z, check_dtypes=True) def test_linear_transpose_integer(self): f = lambda x: 2 * x transpose = api.linear_transpose(f, 1) actual, = transpose(3) expected = 6 self.assertEqual(actual, expected) def test_linear_transpose_dce(self): # https://github.com/jax-ml/jax/issues/15660 f = jit(lambda x: (2 * x, x > 0)) g = lambda x: f(x)[0] api.linear_transpose(g, 1.)(1.) def test_linear_transpose_error(self): with self.assertRaisesRegex( TypeError, "linear_transpose only supports"): api.linear_transpose(lambda x: 2. * x, 1) transpose_fun = api.linear_transpose(lambda x: [x, x], 1.0) with self.assertRaisesRegex(TypeError, "cotangent tree does not match"): transpose_fun(1.0) transpose_fun = api.linear_transpose(lambda x: jnp.stack([x, x]), 1.0) with self.assertRaisesRegex(TypeError, "cotangent type does not match"): transpose_fun(1.0) transpose_fun = api.linear_transpose(lambda x: 1j * x, 1.0) with self.assertRaisesRegex(TypeError, "cotangent type does not match"): transpose_fun(1.0) transpose_fun = api.linear_transpose(lambda x: x, 1.0) with self.assertRaisesRegex(TypeError, "cotangent type does not match"): transpose_fun(1j) def test_linear_transpose_complex(self): f = lambda x: (1 + 2j) * x transpose = api.linear_transpose(f, 1j) actual, = transpose(3 + 4j) expected = -5 + 10j self.assertEqual(actual, expected) def test_linear_transpose_zeros(self): f = lambda x: x[0] transpose = api.linear_transpose(f, [1., 2.]) actual, = transpose(3.) expected = [3., 0.] self.assertEqual(actual, expected) def test_complex_grad_raises_error(self): self.assertRaises(TypeError, lambda: grad(lambda x: jnp.sin(x))(1 + 2j)) def test_holomorphic_grad(self): out = grad(lambda x: jnp.sin(x), holomorphic=True)(1 + 2j) expected = 2.0327230070196656 - 3.0518977991518j self.assertAllClose(out, expected, check_dtypes=False) def test_nonholomorphic_grad(self): zs = 0.5j * np.arange(5) + np.arange(5) def f(z): return jnp.sum(jnp.cos(jnp.abs(z))) ans = grad(f)(zs) expected = np.array([ 0. + 0.j, -0.80430663 + 0.40215331j, -0.70368982 + 0.35184491j, 0.1886467 - 0.09432335j, 0.86873727 - 0.43436864j]) self.assertAllClose(ans, expected, check_dtypes=False, atol=jtu.default_gradient_tolerance, rtol=jtu.default_gradient_tolerance) def test_complex_output_jacrev_raises_error(self): self.assertRaises(TypeError, lambda: jacrev(lambda x: jnp.sin(x))(1 + 2j)) def test_nonholomorphic_jacrev(self): # code based on https://github.com/jax-ml/jax/issues/603 zs = 0.5j * np.arange(5) + np.arange(5) def f(z): return jnp.cos(jnp.linalg.norm(2 * z)) ans = jacrev(f)(zs) expected = grad(f)(zs) self.assertAllClose(ans, expected) @jax.numpy_dtype_promotion('standard') # Test explicitly exercises implicit dtype promotion. def test_heterogeneous_jacfwd(self): # See https://github.com/jax-ml/jax/issues/7157 # See https://github.com/jax-ml/jax/issues/7780 x = np.array([2.0], dtype=np.float16) y = np.array([3.0], dtype=np.float32) a = (x, y) def f(tup): jtu._check_dtypes_match(tup, a) x, y = tup return x, y, x + y actual = jacfwd(f)(a) desired = ((np.array(1., dtype=np.float16), np.array(0., dtype=np.float16)), (np.array(0., dtype=np.float32), np.array(1., dtype=np.float32)), (np.array(1., dtype=np.float32), np.array(1., dtype=np.float32))) jtu._check_dtypes_match(actual, desired) jtu.check_eq(actual, desired) @jax.numpy_dtype_promotion('standard') # Test explicitly exercises implicit dtype promotion. def test_heterogeneous_jacrev(self): # See https://github.com/jax-ml/jax/issues/7157 # See https://github.com/jax-ml/jax/issues/7780 x = np.array([2.0], dtype=np.float16) y = np.array([3.0], dtype=np.float32) a = (x, y) def f(tup): jtu._check_dtypes_match(tup, a) x, y = tup return x, y, x + y actual = jacrev(f)(a) desired = ((np.array(1., dtype=np.float16), np.array(0., dtype=np.float32)), (np.array(0., dtype=np.float16), np.array(1., dtype=np.float32)), (np.array(1., dtype=np.float16), np.array(1., dtype=np.float32))) jtu._check_dtypes_match(actual, desired) jtu.check_eq(actual, desired) def test_heterogeneous_grad(self): # See https://github.com/jax-ml/jax/issues/7157 x = np.array(1.0+1j) y = np.array(2.0) a = (x, y) def f(tup): jtu._check_dtypes_match(tup, a) x, y = tup return jnp.square(jnp.abs(x)) + y actual = grad(f)(a) desired = (np.array(2 - 2j), np.array(1.)) jtu._check_dtypes_match(actual, desired) jtu.check_eq(actual, desired) def test_complex_input_jacfwd_raises_error(self): self.assertRaises(TypeError, lambda: jacfwd(lambda x: jnp.sin(x))(1 + 2j)) def test_legacy_devicearray_repr(self): dx = device_put(3.) str(dx.item()) # doesn't crash def test_devicearray_repr(self): x = device_put(jnp.zeros(3)) _check_instance(self, x) repr(x) # doesn't crash x = device_put(jnp.full(3, 1 + 1j)) _check_instance(self, x) repr(x) # doesn't crash def test_devicearray_delete(self): x = device_put(1.) x.delete() self.assertRaisesRegex(RuntimeError, "Array has been deleted.", lambda: repr(x)) def test_devicearray_block_until_ready(self): x = device_put(1.) y = x.block_until_ready() # Tests mostly that block_until_ready() does not produce an error. self.assertTrue(y is x) def test_block_until_ready_function(self): # Just tests that we don't error... pytree = (device_put(1.), np.ones(3)) pytree = jax.block_until_ready(pytree) self.assertAllClose(pytree[0], jnp.array(1.), check_dtypes=False) self.assertAllClose(pytree[1], np.ones(3), check_dtypes=False) def test_block_until_ready_numpy_arrays(self): pytree = (np.ones(1), np.ones(2)) pytree = jax.block_until_ready(pytree) self.assertAllClose(pytree[0], np.ones(1), check_dtypes=False) self.assertAllClose(pytree[1], np.ones(2), check_dtypes=False) def test_block_until_ready_mixed(self): pytree = (device_put(1.), device_put(2.), np.ones(3), 4) pytree = jax.block_until_ready(pytree) self.assertAllClose(pytree[0], jnp.array(1.), check_dtypes=False) self.assertAllClose(pytree[1], jnp.array(2.), check_dtypes=False) self.assertAllClose(pytree[2], np.ones(3), check_dtypes=False) self.assertEqual(pytree[3], 4) def test_copy_to_host_async(self): x = device_put(1.) y = jax.copy_to_host_async(x) # Tests mostly that copy_to_host_async() does not produce an error. self.assertIs(y, x) self.assertEqual(np.asarray(y), 1.) def test_copy_to_host_async_non_array(self): # Just tests that we don't error... o = object() mock_array = unittest.mock.Mock() mock_array.copy_to_host_async.return_value = None x = [o, 1, 2, 3, mock_array] y = jax.copy_to_host_async(x) self.assertIs(y, x) self.assertEqual(y, [o, 1, 2, 3, mock_array]) mock_array.copy_to_host_async.assert_called_once() def test_copy_to_host_async_does_not_hide_attribute_error(self): x = unittest.mock.Mock() x.copy_to_host_async.side_effect = AttributeError("foo") with self.assertRaisesRegex(AttributeError, "foo"): jax.copy_to_host_async(x) @jtu.thread_unsafe_test() # Weakref destruction seems unpredictable with threads def test_devicearray_weakref_friendly(self): x = device_put(1.) y = weakref.ref(x) self.assertEqual(y(), 1.) del x self.assertIsNone(y()) def test_namedtuple_transparency(self): # See https://github.com/jax-ml/jax/issues/446 Point = collections.namedtuple("Point", ["x", "y"]) def f(pt): return jnp.sqrt(pt.x ** 2 + pt.y ** 2) pt = Point(1., 2.) f(pt) # doesn't crash g = api.grad(f)(pt) self.assertIsInstance(g, Point) f_jit = api.jit(f) self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False) def test_namedtuple_subclass_transparency(self): # See https://github.com/jax-ml/jax/issues/806 Point = collections.namedtuple("Point", ["x", "y"]) class ZeroPoint(Point): def is_zero(self): return (self.x == 0) and (self.y == 0) pt = ZeroPoint(0., 0.) def f(pt): return 0. if pt.is_zero() else jnp.sqrt(pt.x ** 2 + pt.y ** 2) f(pt) # doesn't crash _ = api.grad(f)(pt) self.assertIsInstance(pt, ZeroPoint) @parameterized.parameters(1, 2, 3) def test_shape_dtype_struct(self, i): s = api.ShapeDtypeStruct(shape=(i, 2, 3), dtype=jnp.float32) self.assertEqual(s.shape, (i, 2, 3)) self.assertEqual(s.dtype, jnp.float32) self.assertEqual(s.ndim, 3) self.assertEqual(s.size, i * 2 * 3) self.assertLen(s, i) for f in (str, repr): self.assertEqual( f(s), f"ShapeDtypeStruct(shape=({i}, 2, 3), dtype=float32)") def test_shape_dtype_struct_scalar(self): s = api.ShapeDtypeStruct(shape=(), dtype=jnp.float32) self.assertEmpty(s.shape) self.assertEqual(s.size, 1) self.assertEqual(s.ndim, 0) with self.assertRaisesRegex(TypeError, "len[(][)] of unsized object"): _ = len(s) def test_shape_dtype_struct_hash(self): s1 = api.ShapeDtypeStruct(shape=(2, 3), dtype=jnp.float32) s2 = api.ShapeDtypeStruct(shape=(2, 3), dtype=jnp.float32) s3 = api.ShapeDtypeStruct(shape=(2, 4), dtype=jnp.float32) self.assertEqual(hash(s1), hash(s2)) self.assertNotEqual(hash(s1), hash(s3)) def test_shape_dtype_struct_invalid_shape(self): with self.assertRaisesRegex(TypeError, "'int' object is not iterable"): api.ShapeDtypeStruct(shape=4, dtype='float32') def test_shape_dtype_struct_dtype_none(self): with self.assertRaisesRegex(ValueError, "dtype must be specified"): api.ShapeDtypeStruct(shape=(), dtype=None) def test_eval_shape(self): def fun(x, y): return jnp.tanh(jnp.dot(x, y) + 3.) x = jnp.ones((2, 3)) y = jnp.ones((3, 4)) out_shape = api.eval_shape(fun, x, y) self.assertEqual(out_shape.shape, (2, 4)) def test_eval_shape_constants(self): def fun(): x = jnp.ones((2, 3)) y = jnp.ones((3, 4)) return jnp.tanh(jnp.dot(x, y) + 3.) out_shape = api.eval_shape(fun) self.assertEqual(out_shape.shape, (2, 4)) def test_eval_shape_tuple_unpacking(self): def fun(x, y): a, b = x return a + b + y x = (jnp.ones(2), jnp.ones(2)) y = 3. out_shape = api.eval_shape(fun, x, y) self.assertEqual(out_shape.shape, (2,)) def test_eval_shape_tuple_itemgetting(self): def fun(x, y): return x[0] + x[1] + y x = (jnp.ones(2), jnp.ones(2)) y = 3. out_shape = api.eval_shape(fun, x, y) self.assertEqual(out_shape.shape, (2,)) def test_eval_shape_output_dict(self): def fun(x, y): return {'hi': x[0] + x[1] + y} x = (jnp.ones(2), jnp.ones(2)) y = 3. out_shape = api.eval_shape(fun, x, y) out_shape = jax.tree.map(np.shape, out_shape) self.assertEqual(out_shape, {'hi': (2,)}) def test_eval_shape_shape_error(self): def fun(x, y): return jnp.tanh(jnp.dot(x, y) + 3.) x = jnp.ones((3, 3)) y = jnp.ones((4, 4)) self.assertRaises(TypeError, lambda: api.eval_shape(fun, x, y)) def test_eval_shape_trace_cache_share(self): def f(x): return x inp = np.arange(8) with jtu.count_jit_tracing_cache_miss() as count: jax.eval_shape(f, inp) jax.jit(f)(inp) self.assertEqual(count(), 1) @jtu.thread_unsafe_test() # jit cache misses aren't thread safe def test_jit_infer_params_cache(self): def f(x): return x f_jit = jax.jit(f) def g(x): x = f_jit(x) # noqa: F821 x = f_jit(x) # noqa: F821 return x g_jit = jax.jit(g) inp = np.arange(8) with jtu.count_jit_infer_params_cache_miss() as count: g_jit(inp) self.assertDictEqual(count, {f: 1, g: 1}) cache_size = pjit_lib._infer_params_cached.cache_info().currsize del count, f, f_jit, g, g_jit # Cache should only keep a weak reference to f and g. self.assertLess(pjit_lib._infer_params_cached.cache_info().currsize, cache_size, msg=pjit_lib._infer_params_cached.cache_keys()) def test_eval_shape_out_shardings(self): s = jax.sharding.SingleDeviceSharding(jax.devices()[0]) @jax.jit(out_shardings=s) def f(x): return x * 2 inp = np.arange(8) out = f.eval_shape(inp) self.assertEqual(out.sharding, s) self.assertEqual(out.shape, (inp * 2).shape) def test_eval_shape_duck_typing(self): def fun(A, b, x): return jnp.dot(A, x) + b class MyArgArray: def __init__(self, shape, dtype): self.shape = shape self.dtype = np.dtype(dtype) A = MyArgArray((3, 4), jnp.float32) b = MyArgArray((1, 5), jnp.float32) x = MyArgArray((4, 5), jnp.float32) out_shape = api.eval_shape(fun, A, b, x) self.assertEqual(out_shape.shape, (3, 5)) def test_eval_shape_duck_typing2(self): # https://github.com/jax-ml/jax/issues/5683 class EasyDict(dict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__dict__ = self x = EasyDict(shape=(3,), dtype=np.dtype('float32')) out_shape = api.eval_shape(lambda x: x, x) # doesn't crash self.assertEqual(out_shape.shape, (3,)) def test_issue_871(self): T = jnp.array([[1., 2.], [3., 4.], [5., 6.]]) x = jnp.array([1, 2, 3]) msg = ("linearized function called on tangent values inconsistent with " "the original primal values") y, f_jvp = api.linearize(jnp.sum, x) with self.assertRaisesRegex(ValueError, msg): f_jvp(T) y, f_jvp = api.linearize(api.jit(jnp.sum), x) with self.assertRaisesRegex(ValueError, msg): f_jvp(T) def test_grad_of_int_errors(self): # Errors without allow_int=True dfn = grad(lambda x: x ** 2) self.assertRaisesRegex( TypeError, (r"grad requires real- or complex-valued inputs \(input dtype that is a " r"sub-dtype of np.inexact\), but got int.*."), lambda: dfn(3)) def test_jvp_of_int_identity(self): primals = (1,) tangents = (np.zeros(shape=(), dtype=float0),) _, out = api.jvp(lambda x: x, primals, tangents) self.assertEqual(out, np.zeros(shape=(), dtype=float0)) def test_jvp_of_int_add(self): primals = (2,) tangents = (np.zeros(shape=(), dtype=float0),) _, out_tangent = api.jvp(lambda x: x+1, primals, tangents) self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0)) def test_jit_jvp_of_int(self): primals = (2,) tangents = (np.zeros(shape=(), dtype=float0),) _, out_tangent = api.jvp(jax.jit(lambda x: x+1), primals, tangents) self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0)) def test_jvp_of_convert_element_type(self): fun = lambda x: x.astype(np.int32) + 1 primal, tangent = jax.jvp(fun, (2.,), (1.,)) self.assertAllClose(primal, np.int32(3)) self.assertEqual(tangent, np.zeros((), dtype=float0)) def test_vjp_of_int_index(self): primal, fn_vjp = api.vjp(lambda x, i: x[i], np.ones(2)*2, 1) tangent_x, tangent_i = fn_vjp(1.) self.assertEqual(primal, 2.) self.assertAllClose(tangent_x, jnp.array([0., 1.])) self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0)) def test_vjp_of_int_shapes(self): out, fn_vjp = api.vjp( lambda x: lax.reshape(x, (2, 2)), np.ones((4, 1), dtype=int)) tangent, = fn_vjp(np.zeros((2, 2), dtypes.float0)) self.assertArraysEqual(tangent, np.zeros(shape=(4, 1), dtype=float0)) def test_jit_vjp_of_int(self): primal, fn_vjp = api.vjp(lambda x, y: x+y, 2, 1) tangent_x, tangent_i = jax.jit(fn_vjp)(np.zeros((), dtypes.float0)) self.assertEqual(primal, 3) self.assertEqual(tangent_x, np.zeros(shape=(), dtype=float0)) self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0)) def test_vjp_of_int_fulllike(self): # Regression test for tangent and cotangent mismatch in convert_element_type # transpose rule wrt a ConstVar f = lax.full_like out, vjp = api.vjp(f, jnp.zeros((2, 2)), 1) self.assertAllClose(out, jnp.ones((2, 2))) tangent_x, tangent_y = vjp(out) self.assertAllClose(tangent_x, jnp.zeros((2, 2))) self.assertEqual(tangent_y, np.zeros(shape=(), dtype=float0)) def test_grad_of_int(self): # Need real-valued output, but testing integer input. out = api.grad(lambda x: x+0., allow_int=True)(1) self.assertEqual(out, np.zeros(shape=(), dtype=float0)) def test_grad_of_bool(self): def cond(pred): return lax.cond(pred, lambda _: 1., lambda _: 2., 1.) value, grd = api.value_and_grad(cond, allow_int=True)(True) self.assertEqual(value, 1.) self.assertEqual(grd, np.zeros(shape=(), dtype=float0)) def test_grad_of_bool_vjp3(self): def cond(pred): return lax.cond(pred, lambda _: 1., lambda _: 2., 1.) value, f_vjp = api.vjp3(cond, True) grd, = f_vjp(1.) self.assertEqual(value, 1.) self.assertEqual(grd, np.zeros(shape=(), dtype=float0)) def test_grad_of_int_index(self): grad_x, grad_i = api.grad(lambda x, i: x[i], argnums=(0, 1), allow_int=True)(np.ones(2), 1) self.assertAllClose(grad_x, jnp.array([0., 1.])) self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0)) def test_jit_grad_of_int(self): grad_f = api.grad(lambda x, i: x[i], argnums=(0, 1), allow_int=True) grad_x, grad_i = jax.jit(grad_f)(np.ones(2), 1) self.assertAllClose(grad_x, jnp.array([0., 1.])) self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0)) def test_float0_reshape(self): # dtype-agnostic operations are supported float0_array = jax.grad(lambda x: jnp.sum(x+0.), allow_int=True)(np.ones((2, 4), dtype=int)) self.assertArraysEqual(float0_array.reshape((4, 2)), np.zeros((4, 2), dtype=float0)) self.assertArraysEqual(float0_array.transpose(), np.zeros((4, 2), dtype=float0)) def test_float0_error(self): # float0 is incompatible with other dtypes float0_array = jax.grad(lambda x: x+0., allow_int=True)(1) self.assertEqual(float0_array.dtype, dtypes.float0) error_text = "float0s do not support any operations by design" with self.assertRaisesRegex(TypeError, error_text): # dispatch via Array.__add__ and hence jax.numpy _ = float0_array + jnp.zeros(()) with self.assertRaisesRegex(TypeError, error_text): # dispatch via lax _ = lax.add(float0_array, jnp.zeros(())) def test_grad_complex_result_errors(self): dfn = grad(lambda x: x ** 2 + 1j) self.assertRaisesRegex( TypeError, (r"grad requires real-valued outputs \(output dtype that is a " r"sub-dtype of np.floating\), but got complex.*"), lambda: dfn(3.)) def test_holomorphic_grad_of_float_errors(self): dfn = grad(lambda x: x ** 2, holomorphic=True) self.assertRaisesRegex( TypeError, (r"grad with holomorphic=True requires inputs with complex dtype, " r"but got float.*"), lambda: dfn(3.)) def test_holomorphic_jacrev_of_float_errors(self): dfn = jacrev(lambda x: x ** 2, holomorphic=True) self.assertRaisesRegex( TypeError, (r"jacrev with holomorphic=True requires inputs with complex dtype, " r"but got float.*"), lambda: dfn(3.)) def test_holomorphic_jacfwd_of_float_errors(self): dfn = jacfwd(lambda x: x ** 2, holomorphic=True) self.assertRaisesRegex( TypeError, (r"jacfwd with holomorphic=True requires inputs with complex dtype, " r"but got float.*"), lambda: dfn(3.)) def test_jacfwd_of_complex_errors(self): dfn = jacfwd(lambda x: x ** 2) self.assertRaisesRegex( TypeError, (r"jacfwd requires real-valued inputs \(input dtype that is a " r"sub-dtype of np.floating\), but got complex.*"), lambda: dfn(3. + 1j)) def test_compiler_ir(self): # TODO(phawkins): merge these tests with the `xla_computation` tests. def e(x): return jnp.sin(jnp.cos(x)) hlo = api.jit(e).lower(2.).compiler_ir(dialect="hlo").as_hlo_text() self.assertIn(' cosine', hlo) self.assertIn(' sine', hlo) stablehlo = str(api.jit(e).lower(2.).compiler_ir(dialect="stablehlo")) self.assertIn("stablehlo.cosine", stablehlo) self.assertIn("stablehlo.sine", stablehlo) def test_constants_not_in_lowering_jit(self): if not config.use_simplified_jaxpr_constants.value: self.skipTest("Works only with simplified Jaxpr consts") const_size = 100 const = jax.random.uniform(jax.random.key(0), (const_size,), dtype=np.float32) @jax.jit def f(): return jax.jit(lambda: const + 1.)() with jtu.collect_lowered_jaxprs() as collection: res = f() res = f() self.assertAllClose(const + 1., res) for j, j_module in collection: self.assertNotRegex(str(j_module), f"stablehlo.constant dense.*tensor<{const_size}x") def test_constants_not_in_lowering_scan(self): if not config.use_simplified_jaxpr_constants.value: self.skipTest("Works only with simplified Jaxpr consts") const_size = 100 const = jax.random.uniform(jax.random.key(0), (const_size,), dtype=np.float32) def f(): def scan_body(carry, x): return const, None # Closed over and return return lax.scan(jax.jit(scan_body), jnp.zeros((const_size,), dtype=np.float32), # ignored jnp.zeros((8, const_size), dtype=np.float32)) with jtu.collect_lowered_jaxprs() as collection: res, _ = f() res, _ = f() self.assertAllClose(const, res) for j, j_module in collection: self.assertNotRegex(str(j_module), f"stablehlo.constant dense.*tensor<{const_size}x") def test_constants_not_in_lowering_cond(self): if not config.use_simplified_jaxpr_constants.value: self.skipTest("Works only with simplified Jaxpr consts") const_size = 100 const = jax.random.uniform(jax.random.key(0), (const_size,), dtype=np.float32) def f(x): return lax.cond(x >= 0., jax.jit(lambda: const), lambda: const) with jtu.collect_lowered_jaxprs() as collection: res = f(42.) f(43.) self.assertAllClose(const, res) for j, j_module in collection: self.assertNotRegex(str(j_module), f"stablehlo.constant dense.*tensor<{const_size}x") def test_concurrent_device_get_and_put(self): def f(x): for _ in range(100): y = jax.device_put(x) x = jax.device_get(y) return x xs = [self.rng().randn(i) for i in range(10)] # Make sure JAX backend is initialised on the main thread since some JAX # backends install signal handlers. jax.device_put(0) with concurrent.futures.ThreadPoolExecutor() as executor: futures = [executor.submit(partial(f, x)) for x in xs] ys = [f.result() for f in futures] for x, y in zip(xs, ys): self.assertAllClose(x, y) def test_dtype_from_builtin_types(self): for dtype in [bool, int, float, complex]: with self.assertNoWarnings(): x = jnp.array(0, dtype=dtype) self.assertEqual(x.dtype, dtypes.canonicalize_dtype(dtype)) @jtu.sample_product( explicit_x64_dtypes=[ config.ExplicitX64Mode.WARN, config.ExplicitX64Mode.ERROR, config.ExplicitX64Mode.ALLOW, ], enable_x64=[True, False], ) def test_dtype_warning(self, explicit_x64_dtypes, enable_x64): # cf. issue #1230 @config.explicit_x64_dtypes(explicit_x64_dtypes) @config.enable_x64(enable_x64) def check(warn, nowarn): if ( config.enable_x64.value or config.explicit_x64_dtypes.value == config.ExplicitX64Mode.ALLOW ): if config.enable_x64.value: with self.assertNoWarnings(): warn() elif config.explicit_x64_dtypes.value == config.ExplicitX64Mode.WARN: with self.assertWarnsRegex(UserWarning, "Explicitly requested dtype"): warn() else: with self.assertRaisesRegex(ValueError, "Explicitly requested dtype"): warn() with self.assertNoWarnings(): nowarn() check(lambda: jnp.array([1, 2, 3], dtype="float64"), lambda: jnp.array([1, 2, 3], dtype="float32")) check(lambda: jnp.array([1, 2, 3], dtype="float64"), lambda: jnp.array([1, 2, 3], dtype=float)) check(lambda: jnp.ones(3, dtype=np.float64), lambda: jnp.ones(3)) check(lambda: jnp.ones(3, dtype=np.float64), lambda: jnp.ones(3, dtype=float)) check(lambda: jnp.ones_like(3, dtype=np.int64), lambda: jnp.ones_like(3, dtype=np.int32)) check(lambda: jnp.zeros(3, dtype="int64"), lambda: jnp.zeros(3, dtype="int32")) check(lambda: jnp.zeros_like(3, dtype="float64"), lambda: jnp.zeros_like(3, dtype="float32")) check(lambda: jnp.full((2, 3), 1, dtype="int64"), lambda: jnp.full((2, 3), 1)) check(lambda: jnp.ones(3).astype("float64"), lambda: jnp.ones(3).astype("float32")) check(lambda: jnp.eye(3, dtype=np.float64), lambda: jnp.eye(3)) check(lambda: jnp.arange(3, dtype=np.float64), lambda: jnp.arange(3, dtype=np.float32)) check(lambda: jnp.linspace(0, 3, dtype=np.float64), lambda: jnp.linspace(0, 3, dtype=np.float32)) check(lambda: jnp.tri(2, dtype="float64"), lambda: jnp.tri(2, dtype="float32")) check(lambda: jnp.arange(1).astype("float64"), lambda: jnp.arange(1).astype(float)) check(lambda: jnp.arange(1.0).astype("int64"), lambda: jnp.arange(1.0).astype(int)) def test_error_for_invalid_dtype(self): err_str = ("Error interpreting argument to .* as an abstract array. The problematic " r"value is of type .* and was passed to the function at path args\[1\].") with jax.enable_checks(False): with self.assertRaisesRegex(TypeError, err_str): lax.add(jnp.array(7), np.array("hello")) # TODO(dougalm): re-enable checks at the beginning of `bind`. We just # need to know which arguments to a generic primitive are ordinary operands vs functions. # with jax.enable_checks(True): # with self.assertRaises(AssertionError): # lax.add(jnp.array(7), np.array("hello")) def test_vmap_preserves_docstr(self): def superfun(a): """Does things with stuff.""" self.assertRegex(api.vmap(superfun).__doc__, "\n".join([ "Vectorized version of superfun.*", "", "Original documentation:", "", superfun.__doc__, ])) def test_vmap_in_axes_list(self): # https://github.com/jax-ml/jax/issues/2367 dictionary = {'a': 5., 'b': jnp.ones(2)} x = jnp.zeros(3) y = jnp.arange(3.) def f(dct, x, y): return dct['a'] + dct['b'] + x + y out1 = api.vmap(f, (None, 0, 0))(dictionary, x, y) out2 = api.vmap(f, [None, 0, 0])(dictionary, x, y) self.assertAllClose(out1, out2) def test_vmap_in_axes_non_tuple_error(self): # https://github.com/jax-ml/jax/issues/18548 with self.assertRaisesRegex( TypeError, re.escape("vmap in_axes must be an int, None, or a tuple of entries corresponding " "to the positional arguments passed to the function, but got {'a': 0}.")): jax.vmap(lambda x: x['a'], in_axes={'a': 0}) def test_vmap_in_axes_wrong_length_tuple_error(self): # https://github.com/jax-ml/jax/issues/18548 with self.assertRaisesRegex( ValueError, re.escape("vmap in_axes must be an int, None, or a tuple of entries corresponding to the " "positional arguments passed to the function, but got len(in_axes)=2, len(args)=1")): jax.vmap(lambda x: x['a'], in_axes=(0, {'a': 0}))({'a': jnp.zeros((3, 3))}) def test_vmap_in_axes_tree_prefix_error(self): # https://github.com/jax-ml/jax/issues/795 value_tree = jnp.ones(3) self.assertRaisesRegex( ValueError, "vmap in_axes specification must be a tree prefix of the corresponding " r"value, got specification \(\[0\],\) for value tree " + re.escape(f"{jax.tree.structure((value_tree,))}."), lambda: api.vmap(lambda x: x, in_axes=([0],))(value_tree) ) def test_vmap_in_axes_leaf_types(self): with self.assertRaisesRegex( TypeError, r"vmap in_axes must be an int, None, or .*"): api.vmap(lambda x: x, in_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.])) def test_vmap_out_axes_leaf_types(self): with self.assertRaisesRegex( TypeError, r"vmap out_axes must be an int, None, or .*"): api.vmap(lambda x: x, out_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.])) def test_vmap_unbatched_object_passthrough_issue_183(self): # https://github.com/jax-ml/jax/issues/183 fun = lambda f, x: f(x) vfun = api.vmap(fun, (None, 0)) ans = vfun(lambda x: x + 1, jnp.arange(3)) self.assertAllClose(ans, np.arange(1, 4), check_dtypes=False) def test_vmap_mismatched_keyword(self): # https://github.com/jax-ml/jax/issues/10193 @jax.vmap def f(x, y): return x + y with self.assertRaisesRegex( ValueError, "vmap got inconsistent sizes for array axes to be mapped:\n" r" \* one axis had size 1: axis 0 of argument x of type int32\[1\];" "\n" r" \* one axis had size 2: axis 0 of kwargs\['y'\] of type int32\[2\]"): f(jnp.array([1], 'int32'), y=jnp.array([1, 2], 'int32')) def test_vmap_mismatched_axis_sizes_error_message_issue_705(self): # https://github.com/jax-ml/jax/issues/705 def h(a, b): return jnp.sum(a) + jnp.sum(b) X = self.rng().randn(10, 4).astype('float32') U = self.rng().randn(10, 2).astype('float32') with self.assertRaisesRegex( ValueError, "vmap got inconsistent sizes for array axes to be mapped:\n" r" \* one axis had size 10: axis 0 of argument a of type float32\[10,4\];""\n" r" \* one axis had size 2: axis 1 of argument b of type float32\[10,2\]"): api.vmap(h, in_axes=(0, 1))(X, U) with self.assertRaisesRegex( ValueError, "vmap got inconsistent sizes for array axes to be mapped:\n" r" \* most axes \(2 of them\) had size 10, e.g. axis 0 of argument x " r"of type float32\[10,4\];" "\n" r" \* one axis had size 2: axis 1 of argument y of type float32\[10,2\]"): api.vmap(lambda x, y, z: None, in_axes=(0, 1, 0))(X, U, X) with self.assertRaisesRegex( ValueError, "vmap got inconsistent sizes for array axes to be mapped:\n" r" \* most axes \(2 of them\) had size 2, e.g. axis 1 of argument b\[0\] " r"of type float32\[10,2\];" "\n" r" \* one axis had size 10: axis 0 of argument a of type float32\[10,4\]"): api.vmap(h, in_axes=(0, 1))(X, [U, U]) error = (r"vmap was requested to map its argument along axis 0, which " r"implies that its rank should be at least 1, but is only 0 " r"\(its shape is \(\)\)") with self.assertRaisesRegex(ValueError, error): # The mapped inputs cannot be scalars api.vmap(lambda x: x)(1.) with self.assertRaisesRegex( ValueError, "vmap must have at least one non-None value in in_axes"): # If the output is mapped, there must be a non-None in_axes api.vmap(lambda x: x, in_axes=None)(jnp.array([1., 2.])) error = (r"vmap was requested to map its argument along axis 1, which " r"implies that its rank should be at least 2, but is only 1 " r"\(its shape is \(2,\)\)") with self.assertRaisesRegex(ValueError, error): api.vmap(lambda x: x, in_axes=1)(jnp.array([1., 2.])) # Error is: TypeError: only integer scalar arrays can be converted to a scalar index with self.assertRaisesRegex( ValueError, "vmap out_axes specification must be a tree prefix of the " "corresponding value.*"): api.vmap(lambda x: x, in_axes=0, out_axes=(2, 3))(jnp.array([1., 2.])) with self.assertRaisesRegex( ValueError, r"vmap has mapped output \(axis_name='foo'\) but out_axes is None"): # If the output is mapped (user-named axis), then there must be some # out_axes specified. api.vmap(lambda x: x, out_axes=None, axis_name="foo")(jnp.array([1., 2.])) with self.assertRaisesRegex( ValueError, "at vmap out_axes"): # If the output is mapped (unnamed axis), then there must be some out_axes # specified. api.vmap(lambda x: x, out_axes=None)(jnp.array([1., 2.])) def test_vmap_structured_in_axes(self): A, B, C, D = 2, 3, 4, 5 K = 6 # batch size x = np.ones((K, A, B)) # batch axis in different locations y = np.ones((B, K, C)) z = np.ones((C, D, K)) def foo(tree_arg): x, (y, z) = tree_arg return jnp.dot(x, jnp.dot(y, z)) tree = (x, (y, z)) vfoo = api.vmap(foo, in_axes=((0, (1, 2)),)) self.assertEqual(vfoo(tree).shape, (6, 2, 5)) Point = collections.namedtuple("Point", ["x", "y"]) tree = (x, Point(y, z)) vfoo = api.vmap(foo, in_axes=((0, Point(1, 2)),)) self.assertEqual(vfoo(tree).shape, (6, 2, 5)) def foo(tree_arg): x, dct = tree_arg y, z = dct['a'], dct['b'] return jnp.dot(x, jnp.dot(y, z)) tree = (x, {'a': y, 'b': z}) vfoo = api.vmap(foo, in_axes=((0, {'a': 1, 'b': 2}),)) self.assertEqual(vfoo(tree).shape, (6, 2, 5)) tree = (x, collections.OrderedDict([('a', y), ('b', z)])) vfoo = api.vmap( foo, in_axes=((0, collections.OrderedDict([('a', 1), ('b', 2)])),)) self.assertEqual(vfoo(tree).shape, (6, 2, 5)) def test_vmap_in_axes_bool_error(self): # https://github.com/jax-ml/jax/issues/6372 with self.assertRaisesRegex(TypeError, "must be an int"): api.vmap(lambda x: x, in_axes=False)(jnp.zeros(3)) def test_pmap_in_axes_bool_error(self): # https://github.com/jax-ml/jax/issues/6372 with self.assertRaisesRegex(TypeError, "must be an int"): api.pmap(lambda x: x, in_axes=False)(jnp.zeros(1)) def test_vmap_empty_arguments(self): with self.assertRaisesRegex( ValueError, "vmap wrapped function must be passed at least one argument " r"containing an array, got empty \*args=\(\{\},\) and \*\*kwargs=\{\}"): api.vmap(lambda x: x)({}) def test_pmap_empty_arguments(self): with self.assertRaisesRegex( ValueError, "pmap wrapped function must be passed at least one argument " r"containing an array, got empty \*args=\(\{\},\) and \*\*kwargs=\{\}"): api.pmap(lambda x: x)({}) @jtu.thread_unsafe_test() # counting compilations isn't thread-safe def test_pmap_global_cache(self): def f(x, y): return x, y x = np.ones((1, 1, 1), dtype=np.float32) # All defaults with jtu.assert_num_jit_and_pmap_compilations(1): for _ in range(2): api.pmap(f)(x, x) # With axis name with jtu.assert_num_jit_and_pmap_compilations(1): for _ in range(2): api.pmap(f, 'i')(x, x) # With in_axes and out_axes for x_in, y_in, x_out, y_out in it.product(*((0, 1, 2) for _ in range(4))): with jtu.assert_num_jit_and_pmap_compilations(1): for _ in range(2): api.pmap(f, 'i', in_axes=(x_in, y_in), out_axes=(x_out, y_out))(x, x) # Forward-mode AD on the outside with jtu.assert_num_jit_and_pmap_compilations(1): for _ in range(2): api.jvp(api.pmap(f), (x, x), (x, x)) # Reverse-mode AD on the outside. One compilation for forward, one for backward. with jtu.assert_num_jit_and_pmap_compilations(2): for _ in range(2): api.vjp(api.pmap(f), x, x)[1]((x, x)) def test_device_array_repr(self): rep = jnp.ones(()) + 1. self.assertStartsWith(repr(rep), 'Array') def test_device_array_hash(self): rep = jnp.ones((1,)) + 1. _check_instance(self, rep) self.assertNotIsInstance(rep, collections.abc.Hashable) with self.assertRaisesRegex(TypeError, 'unhashable type'): hash(rep) def test_grad_without_enough_args_error_message(self): # https://github.com/jax-ml/jax/issues/1696 def f(x, y): return x + y df = api.grad(f, argnums=0) self.assertRaisesRegex( TypeError, "differentiating with respect to argnums=0 requires at least 1 " "positional arguments to be passed by the caller, but got only 0 " "positional arguments.", lambda: partial(df, x=0.)(y=1.)) def test_grad_object_array_error(self): x = np.array([1, 2, 3], dtype=object) with self.assertRaisesRegex(TypeError, ".*is not a valid JAX type"): jax.grad(lambda x: x)(x) @jtu.thread_unsafe_test() # logging isn't thread-safe def test_jit_compilation_time_logging(self): @api.jit def f(x): return x * 2 # make sure some initial warnings & cached operations already happen. f(jnp.ones(2)) prev_level = logging.get_verbosity() try: logging.set_verbosity('DEBUG') with self.assertLogs(level=logging.DEBUG) as l: f(2.) finally: logging.set_verbosity(prev_level) self.assertGreaterEqual(len(l.output), 3) # 3 lines self.assertTrue(any('Finished tracing' in line for line in l.output)) self.assertTrue(any('Compiling jit(f)' in line for line in l.output)) self.assertTrue(any('Finished XLA compilation' in line for line in l.output)) def test_grad_of_jit_compilation_caching(self): if not hasattr(self, "assertLogs"): raise unittest.SkipTest("test requires assertLogs (python 3)") # make sure some initial warnings & cached operations already happen. api.grad(api.jit(lambda x: x))(1.0) @api.jit def f(x): return jnp.sin(x) prev_level = logging.get_verbosity() try: logging.set_verbosity('DEBUG') with self.assertLogs(level=logging.DEBUG) as l: ans1 = api.grad(f)(2.) ans2 = api.grad(f)(3.) finally: logging.set_verbosity(prev_level) self.assertGreaterEqual(len(l.output), 2 * 3) # one for fwd, one for bwd, 3 lines each self.assertAllClose(ans1, np.cos(2.), check_dtypes=False) self.assertAllClose(ans2, np.cos(3.), check_dtypes=False) def test_grad_of_jit_compilation_caching2(self): # Like the above test, but instead of logging use our compile counters. # make sure some initial convert element type operations are pre-cached. api.grad(api.jit(lambda x: x))(1.0) @api.jit def f(x): return jnp.sin(x) with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841 _ = jax.grad(f)(3.) self.assertEqual(count(), 2) # one for fwd, one for bwd with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841 _ = jax.grad(f)(3.) _ = jax.grad(f)(4.) self.assertEqual(count(), 0) # cache hits on both fwd and bwd def test_grad_does_not_unflatten_tree_with_none(self): # https://github.com/jax-ml/jax/issues/7546 class CustomNode(list): pass def unflatten(unused_aux_data, children): self.assertIsNotNone(children[0]) return CustomNode(children) tree_util.register_pytree_node(CustomNode, lambda x: (x, None), unflatten) grad(lambda x: x[0])(CustomNode([0.])) def test_trivial_computations(self): x = jnp.array([1, 2, 3]) y = api.jit(lambda x: x)(x) self.assertNotEqual(x.unsafe_buffer_pointer(), y.unsafe_buffer_pointer()) z1, z2 = api.jit(lambda x: (x, x))(x) self.assertNotEqual(z1.unsafe_buffer_pointer(), z2.unsafe_buffer_pointer()) x1, x2 = jnp.array([1, 2]), jnp.array([2, 3]) z1, z2, z3 = api.jit(lambda x, y: (y, 1, x))(x1, x2) self.assertNotEqual(z1.unsafe_buffer_pointer(), x2.unsafe_buffer_pointer()) self.assertNotEqual(z3.unsafe_buffer_pointer(), x1.unsafe_buffer_pointer()) self.assertEqual(z2, 1) @jtu.thread_unsafe_test() # monkey-patching mlir.jaxpr_subcomp isn't thread-safe def test_nested_jit_hoisting(self): @api.jit def f(x, y): z = 2 * x return y + z, 3 @api.jit def g(x): return f(2, x) mlir_jaxpr_subcomp = mlir.jaxpr_subcomp jaxprs = [] def mlir_jaxpr_subcomp_and_collect(c, jaxpr, *args, **kwargs): jaxprs.append(jaxpr) return mlir_jaxpr_subcomp(c, jaxpr, *args, **kwargs) try: mlir.jaxpr_subcomp = mlir_jaxpr_subcomp_and_collect ans = g(3) finally: mlir.jaxpr_subcomp = mlir_jaxpr_subcomp self.assertEqual(ans, (7, 3)) self.assertLen(jaxprs, 2) outer_jaxpr, inner_jaxpr = jaxprs self.assertLen(outer_jaxpr.eqns, 1) prim_name = 'jit' jaxpr_param = 'jaxpr' self.assertEqual(outer_jaxpr.eqns[0].primitive.name, f'{prim_name}') subjaxpr_1 = outer_jaxpr.eqns[0].params[f"{jaxpr_param}"] self.assertEqual(str(subjaxpr_1), str(inner_jaxpr)) self.assertLen(inner_jaxpr.eqns, 2) self.assertEqual(inner_jaxpr.eqns[-2].primitive.name, 'mul') self.assertEqual(inner_jaxpr.eqns[-1].primitive.name, 'add') @jtu.thread_unsafe_test() # count_primitive_compiles isn't thread-safe def test_primitive_compilation_cache(self): with jtu.count_primitive_compiles() as count: lax.add(1, 2) lax.add(2, 3) self.assertEqual(count(), 1) def test_arange_jit(self): # see https://github.com/jax-ml/jax/issues/553 def fun(x): r = jnp.arange(x.shape[0])[x] return r jit(fun)(jnp.array([0, 1, 2], dtype=jnp.int32)) # doesn't crash def helper_save_tracer(self, x): self._saved_tracer = x return x def test_escaped_tracers_different_top_level_traces(self): api.jit(self.helper_save_tracer)(0.) with self.assertRaisesRegex( UnexpectedTracerError, "Encountered an unexpected tracer"): api.jit(lambda x: self._saved_tracer)(0.) def test_escaped_tracers_cant_lift_sublevels(self): api.jit(self.helper_save_tracer)(0.) with self.assertRaisesRegex( UnexpectedTracerError, re.compile( "Encountered an unexpected tracer", re.DOTALL)): api.jit(lambda x: x)(self._saved_tracer) @unittest.skip # TODO(dougalm): rethink what this should do under stackless def test_escaped_tracers_tracer_from_higher_level(self): api.grad(self.helper_save_tracer)(0.) with self.assertRaises(UnexpectedTracerError): api.grad(lambda x: x)(self._saved_tracer) def test_escaped_tracers_incompatible_sublevel(self): def func1(x): api.jit(self.helper_save_tracer)(0.) # Use the tracer return x + self._saved_tracer with self.assertRaisesRegex( UnexpectedTracerError, re.compile("Encountered an unexpected tracer", re.DOTALL)): api.jit(func1)(2.) def test_escaped_tracers_cant_lift(self): def func1(x): api.grad(self.helper_save_tracer)(0.) return x + self._saved_tracer with self.assertRaisesRegex( UnexpectedTracerError, re.compile("unexpected tracer")): api.grad(func1)(2.) def test_escaped_tracers_not_among_input_tracers(self): def func1(x): api.grad(self.helper_save_tracer)(x) # Use the tracer return x + self._saved_tracer msg = "Encountered an unexpected tracer" with self.assertRaisesRegex( UnexpectedTracerError, re.compile(msg, re.DOTALL)): api.jit(func1)(2.0) def test_escaped_tracer_omnistaging(self): count = 1 @jit def f(): nonlocal count count = jnp.add(count, 1) f() # leaked a tracer! but currently undetected def f(x, c): jnp.add(count, 1) return None, None @jit def g(): lax.scan(f, None, None, length=2) with self.assertRaisesRegex(UnexpectedTracerError, "was created on line"): g() def test_escaped_tracer_omnistaging_top_trace(self): count = 1 def f(_, __): nonlocal count count = jnp.add(count, 1) return None, None lax.scan(f, None, None, length=2) # leaked a tracer! (of level 1!) with self.assertRaisesRegex(UnexpectedTracerError, "was created on line"): # The following call will try and raise the ones array to the count tracer # level, which is no longer live. jax.jit(jnp.add)(jnp.ones(()), count) def test_escaped_tracer_shape_dtype(self): with self.assertRaisesRegex(core.UnexpectedTracerError, r"int32\[4,3\]"): jax.jit(self.helper_save_tracer)(jnp.ones((4, 3), dtype=jnp.int32)) _ = self._saved_tracer+1 def test_pmap_static_kwarg_error_message(self): # https://github.com/jax-ml/jax/issues/3007 def f(a, b): return a + b g = jax.pmap(f, static_broadcasted_argnums=(1,)) msg = (r"pmapped function has static_broadcasted_argnums=\(1,\) but was " r"called with only 1 positional argument. All static broadcasted " r"arguments must be passed positionally.") with self.assertRaisesRegex(ValueError, msg): g(jnp.ones((1, 1)), b=1) def test_vmap_unmapped_last(self): @partial(jax.vmap, out_axes=-1) def f(x): return np.zeros((2,)) f(np.zeros((5,))) # TODO(jakevdp): re-enable this if possible. @unittest.skipIf(True, "broken by convert_element_type change.") def test_xla_constant_dedup(self): y = np.array([7, 14], dtype=np.float32) def f(x): return x + y + y x = np.array([1, 2], dtype=np.float32) hlo_lines = jax.jit(f).lower(x).as_text('hlo').split('\n') hlo_lines = {s.strip() for s in hlo_lines} self.assertIn('constant.1 = f32[2]{0} constant({7, 14})', hlo_lines) self.assertNotIn('constant.2 = f32[2]{0} constant({7, 14})', hlo_lines) def test_eval_context(self): @jit def f(): with core.eval_context(): assert jnp.add(1, 1) == 2 f() # doesn't crash def test_linearize_aux(self): def fn(x): return x * 2 - 3, x > 0 f, lin_fn, aux = api.linearize(fn, 3.4, has_aux=True) tang = lin_fn(5.) self.assertAllClose(f, 3.8) self.assertAllClose(tang, 10.) self.assertEqual(aux, True) def test_linearize_aval_error(self): # https://github.com/jax-ml/jax/issues/4622 f = lambda x: x # these should not error _, f_jvp = api.linearize(f, 1.) f_jvp(1.) _, f_jvp = api.linearize(f, np.ones(2, np.int32)) f_jvp(np.zeros(2, float0)) # these should error _, f_jvp = api.linearize(f, 1.) with self.assertRaisesRegex(ValueError, "tangent values inconsistent"): f_jvp(1) _, f_jvp = api.linearize(f, np.ones(2, np.int32)) with self.assertRaisesRegex(ValueError, "tangent values inconsistent"): f_jvp(np.ones(2, np.int32)) def test_grad_of_token_consuming_primitive(self): # https://github.com/jax-ml/jax/issues/5463 tokentest_p = core.Primitive("tokentest") tokentest_p.def_impl(partial(dispatch.apply_primitive, tokentest_p)) tokentest_p.def_abstract_eval(lambda x, y: x) mlir.register_lowering(tokentest_p, lambda ctx, x, y: [x]) ad.defjvp(tokentest_p, (lambda g, x, token: x), None) token = jax.lax.create_token(123) arr = jnp.ones((3, 2)) res, vjp_fun = jax.vjp(lambda x: tokentest_p.bind(x, token), arr) # Should not crash. vjp_fun(arr) def test_jit_returning_token(self): x = jax.jit(jax.lax.create_token)(1.0) self.assertIsInstance(x, core.Token) def test_jit_capturing_token(self): tok = jax.lax.create_token() _, y = jax.jit(lambda x: (x + 2, tok))(7) self.assertIsInstance(y, core.Token) def test_leak_checker_catches_a_jit_leak(self): with jax.checking_leaks(): lst = [] @jit def f(x): lst.append(x) return x with self.assertRaisesRegex(Exception, r"Leaked"): f(3) def test_leak_checker_catches_a_pmap_leak(self): with jax.checking_leaks(): lst = [] @api.pmap def f(x): lst.append(x) return x with self.assertRaisesRegex(Exception, r"Leaked"): f(np.ones(1)) @unittest.skip('TODO(dougalm): re-enable once we fix tests that were showing tracer leaks') def test_leak_checker_catches_a_grad_leak(self): with jax.checking_leaks(): lst = [] def f(x): lst.append(x) return x with self.assertRaisesRegex(Exception, r"Leaked trace"): api.grad(f)(3.) def test_leak_checker_avoids_false_positives(self): with jax.checking_leaks(): api.vmap(lambda x: x)(np.arange(3.)) # doesn't crash @jit def f(x): return x f(3) # doesn't crash api.vmap(f)(np.arange(3)) # doesn't crash api.grad(f)(3.) # doesn't crash @api.pmap def f(x): return x f(np.ones(1)) # doesn't crash api.vmap(f)(np.ones((1, 1))) # doesn't crash def test_leak_checker_catches_a_scan_leak(self): with jax.checking_leaks(): lst = [] to_scan = lambda c, x: (lst.append(c) or jnp.sin(c), None) with self.assertRaisesRegex(Exception, r"Leaked trace"): lax.scan(to_scan, 1., np.arange(3.)) def test_leak_checker_avoids_false_positives_scan(self): with jax.checking_leaks(): to_scan = lambda c, x: (jnp.sin(c), None) lax.scan(to_scan, 1., np.arange(3.)) # doesn't crash def test_leak_checker_avoids_false_positives_scan_jvp(self): with jax.checking_leaks(): to_scan = lambda c, x: (c, None) def f(x): lax.scan(to_scan, x, None, length=1) api.jvp(f, (3.,), (1.,)) # doesn't crash def test_leak_checker_avoids_false_positives_scan_vmap(self): with jax.checking_leaks(): to_scan = lambda c, _: (1., None) @api.vmap def f(x): lax.scan(to_scan, x, None, length=1) f(np.arange(5.)) # doesn't crash def test_leak_checker_avoids_false_positives_scan_vmap_2(self): with jax.checking_leaks(): to_scan = lambda c, _: (c, None) @api.vmap def f(x): lax.scan(to_scan, x, None, length=1) f(np.arange(5.)) # doesn't crash def test_leak_checker_catches_a_sublevel_leak(self): with jax.checking_leaks(): @jit def f(x): lst = [] @jit def g(x): lst.append(x) return x x = g(x) return x msg = r'Leaked trace DynamicJaxprTrace' with self.assertRaisesRegex(Exception, f"{msg}"): f(3) def test_leak_checker_avoids_false_positive_custom_jvp(self): # see https://github.com/jax-ml/jax/issues/5636 with jax.checking_leaks(): @jax.custom_jvp def t(y): return y def t_jvp(p, t): pass t.defjvp(t_jvp) @jit def s(y): return t(y) s(3) # doesn't crash def test_leak_checker_internal_error(self): def apply_fn(inp): fn = jax.checkpoint(lambda x: jax.nn.relu(1.0 * x)) return jax.vjp(fn, inp) with jax.check_tracer_leaks(): jax.jit(apply_fn)(1.0) # don't crash def test_leak_checker_reference_chain(self): class A: def __init__(self, dct): self.dct = dct a = A({}) x = jnp.arange(3) def sketch(x): def foo(): return x a.dct['hi'] = [foo] return x # TODO(mattjj): full test msg below fails (harmlessly) on CI, investigate msg = ( r"This BatchTracer with object id [0-9]+ was created on line:\n" r" .*\n" r"<BatchTracer [0-9]+> is referred to by" ) # msg = ( # r"This BatchTracer with object id [0-9]+ was created on line:\n" # r" .*\n" # r"<BatchTracer [0-9]+> is referred to by <function [0-9]+> \(foo\) " # r"closed-over variable x\n" # r"<function [0-9]+> is referred to by <list [0-9]+>\[0\]\n" # r"<list [0-9]+> is referred to by <dict [0-9]+>\['hi'\]\n" # r"<dict [0-9]+> is referred to by <A [0-9]+>\.dct\n" # ) with jax.check_tracer_leaks(): with self.assertRaisesRegex(Exception, msg): jax.vmap(sketch)(x) def test_default_backend(self): first_local_device = jax.local_devices()[0] self.assertEqual(first_local_device.platform, jax.default_backend()) @jtu.skip_on_devices("cpu") def test_default_device(self): system_default_devices = jnp.add(1, 1).devices() self.assertLen(system_default_devices, 1) test_device = jax.devices("cpu")[-1] # Sanity check creating array using system default device self.assertEqual(jnp.ones(1).devices(), system_default_devices) # Create array with default_device set with jax.default_device(test_device): # Hits cached primitive path self.assertEqual(jnp.ones(1).devices(), {test_device}) # Uncached self.assertEqual(jnp.zeros((1, 2)).devices(), {test_device}) # Test that we can reset to system default device self.assertEqual(jnp.ones(1).devices(), system_default_devices) def test_dunder_jax_array(self): # https://github.com/jax-ml/jax/pull/4725 @partial(jax.tree_util.register_dataclass, data_fields=['jax_val'], meta_fields=[]) class AlexArray: def __init__(self, jax_val): self.jax_val = jax_val def __jax_array__(self): return self.jax_val dtype = property(lambda self: self.jax_val.dtype) shape = property(lambda self: self.jax_val.shape) x = AlexArray(jnp.array([1., 2., 3.])) y = jax.jit(lambda x: x)(x) self.assertIsInstance(x, AlexArray) self.assertArraysEqual(jnp.asarray(x), jnp.asarray(y)) y = jnp.sin(x) self.assertAllClose(y, jnp.sin(jnp.array([1., 2., 3.]))) y = api.grad(api.jit(lambda x: jnp.sin(x).sum()))(x) self.assertIsInstance(y, AlexArray) self.assertAllClose(jnp.asarray(y), jnp.cos(jnp.array([1., 2., 3.]))) x = AlexArray(jnp.array([[1., 2., 3.]])) y = api.pmap(jnp.sin)(x) self.assertAllClose(y, jnp.sin(jnp.array([[1., 2., 3.]]))) x = jnp.array(1) a = AlexArray(x) for f in [jnp.isscalar, jnp.size, jnp.shape, jnp.dtype]: self.assertEqual(f(x), f(a)) x = AlexArray(jnp.array(1)) a1 = jnp.array(x) self.assertAllClose(1, a1) a2 = jnp.array(((x, x), [x, x])) self.assertAllClose(np.array(((1, 1), (1, 1))), a2) def test_dunder_jax_array_warnings(self): class AlexArray: def __init__(self, jax_val): self.jax_val = jax_val def __jax_array__(self): return self.jax_val f = jax.jit(lambda x: x) a = AlexArray(jnp.arange(4)) msg = ( r"Triggering __jax_array__\(\) during abstractification is no longer" r" supported." ) with self.assertRaisesRegex(ValueError, msg): f(a) @jtu.thread_unsafe_test() # count_jit_tracing_cache_miss() isn't thread-safe def test_eval_shape_weak_type(self): # https://github.com/jax-ml/jax/issues/23302 arr = jax.numpy.array(1) def f(x): return jax.numpy.array(x) with jtu.count_jit_tracing_cache_miss() as count: jax.eval_shape(f, 1) out = jax.eval_shape(f, 1) self.assertEqual(count(), 1) self.assertTrue(out.weak_type) self.assertEqual(out.weak_type, arr.weak_type) def test_dunder_jax_array_bug(self): @jax.tree_util.register_pytree_node_class class A: x: jax.Array def __init__(self, x: jax.Array): self.x = x def tree_flatten(self): return ((self.x,), None) @classmethod def tree_unflatten(cls, _, children): x, = children return cls(x) def __jax_array__(self) -> jax.Array: return self.x ndim = property(operator.attrgetter('x.ndim')) dtype = property(operator.attrgetter('x.dtype')) shape = property(operator.attrgetter('x.shape')) a = A(jnp.ones((3, 3))) jnp.asarray(a) # don't crash f = jax.jit(jnp.matmul) f(a, a) # don't crash def test_constant_handler_mro(self): # https://github.com/jax-ml/jax/issues/6129 class Foo(enum.IntEnum): bar = 1 @api.pmap def f(_): return Foo.bar ans = f(jnp.arange(1)) # doesn't crash expected = jnp.arange(1) + 1 self.assertAllClose(ans, expected, check_dtypes=False) @parameterized.named_parameters([ {"testcase_name": f"{dtype.__name__}", "dtype": dtype} for dtype in jtu.dtypes.all]) def test_constant_handlers(self, dtype): # https://github.com/jax-ml/jax/issues/9380 @jax.jit def f(): return jnp.exp(dtype(0)) f() # doesn't error def test_vmap_make_jaxpr_close_over_tracer(self): def run(inp): def f(x, y): return x + y g = lambda x: f(x, inp) jaxpr = jax.make_jaxpr(g)(1) return jax.core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, 1) jax.vmap(run)(jnp.arange(2)) # doesn't crash def test_large_python_ints(self): with self.assertRaises(OverflowError): jnp.multiply(2 ** 100, 3.) out = lax.convert_element_type(2 ** 100, jnp.float32) # doesn't crash self.assertArraysEqual(out, np.float32(2 ** 100)) def test_dot_precision_context_manager(self): x = jnp.zeros((2, 2)) with jax.default_matmul_precision(None): jnp.dot(x, x) # doesn't crash jaxpr = jax.make_jaxpr(jnp.dot)(x, x) # self.assertIn('precision=None', str(jaxpr)) self.assertIs(jaxpr.jaxpr.eqns[0].params['precision'], None) with jax.default_matmul_precision("bfloat16"): x @ x # doesn't crash jaxpr = jax.make_jaxpr(op.matmul)(x, x) self.assertIn('Precision.DEFAULT', str(jaxpr)) with jax.default_matmul_precision("tensorfloat32"): jnp.dot(x, x) # doesn't crash jaxpr = jax.make_jaxpr(jnp.dot)(x, x) self.assertIn('Precision.HIGH', str(jaxpr)) with jax.default_matmul_precision("float32"): jnp.dot(x, x) # doesn't crash jaxpr = jax.make_jaxpr(jnp.dot)(x, x) self.assertIn('Precision.HIGHEST', str(jaxpr)) dot = partial(jnp.dot, precision=lax.Precision.HIGHEST) with jax.default_matmul_precision("tensorfloat32"): dot(x, x) # doesn't crash jaxpr = jax.make_jaxpr(dot)(x, x) self.assertIn('Precision.HIGHEST', str(jaxpr)) def test_dot_precision_flag(self): x = jnp.zeros((2, 2)) with config.default_matmul_precision("tensorfloat32"): jnp.dot(x, x) # doesn't crash jaxpr = jax.make_jaxpr(jnp.dot)(x, x) self.assertIn('Precision.HIGH', str(jaxpr)) with config.default_matmul_precision("tensorfloat32"): jnp.dot(x, x) # doesn't crash jaxpr = jax.make_jaxpr(jnp.dot)(x, x) self.assertIn('Precision.HIGH', str(jaxpr)) @jtu.thread_unsafe_test() # Updating global configs is not thread-safe. def test_dot_precision_forces_retrace(self): num_traces = 0 def g(x): nonlocal num_traces num_traces += 1 return jnp.dot(x, x) def f_cond(x): return lax.cond(True, g, g, x) @jax.jit def f_jit(x): nonlocal num_traces num_traces += 1 return jnp.dot(x, x) for f in [f_jit, f_cond]: # Use _read() to read the flag value rather than threadlocal value. precision = config._read("jax_default_matmul_precision") try: num_traces = 0 x = jnp.zeros((2, 2)) f(x) self.assertEqual(num_traces, 1) f(x) self.assertEqual(num_traces, 1) with jax.default_matmul_precision("tensorfloat32"): f(x) self.assertEqual(num_traces, 2) config.update("jax_default_matmul_precision", "float32") f(x) self.assertGreaterEqual(num_traces, 2) nt = num_traces f(x) self.assertEqual(num_traces, nt + 1) f(x) self.assertEqual(num_traces, nt + 1) finally: config.update("jax_default_matmul_precision", precision) def test_backward_pass_ref_dropping(self): refs = [] @jax.custom_vjp def f(x): return x def f_fwd(x): return x, None def f_rev(_, g): assert len(refs) != 2 or refs[0]() is None zero = np.zeros(()) refs.append(weakref.ref(zero)) return (zero,) f.defvjp(f_fwd, f_rev) api.grad(lambda x: f(f(f(x))))(1.) def test_jit_inline(self): @api.jit(inline=False) def f(x): return x * 2 jaxpr = api.make_jaxpr(f)(3) self.assertIn('jit', str(jaxpr)) @api.jit(inline=True) def f(x): return x * 2 jaxpr = api.make_jaxpr(f)(3) self.assertNotIn('jit', str(jaxpr)) # Repro for https://github.com/jax-ml/jax/issues/7229. def test_compute_with_large_transfer(self): def f(x, delta): return x + jnp.asarray(delta, x.dtype) # A large and potentially unaligned array to trigger non-zero-copy and # async device array copy. xs = self.rng().uniform(0., 1., size=(10, 131, 111, 3)).astype(np.float32) for x in xs: delta = self.rng().uniform(-0.5, 0.5, size=()) jitted_f = api.jit(f) np.testing.assert_allclose(jitted_f(x, delta), f(x, delta)) def test_vjp_fun_jit(self): # test that the function returned by vjp can be returned # from and passed to jitted functions f = lambda x: 2. * x @jit(static_argnums=0) def linearize_vjp(f, x): _, vjp_fun = api.vjp(f, x) return vjp_fun linearized = linearize_vjp(f, 1.) actual = jit(lambda f, x: f(x))(linearized, 3.) expected = (6.,) self.assertEqual(actual, expected) def test_linearize_fun_jit(self): # test that the function returned by linearize can be returned # from and passed to jitted functions f = lambda x: 2. * x @jit(static_argnums=0) def linearize(f, x): _, jvp_fun = api.linearize(f, x) return jvp_fun linearized = linearize(f, 1.) actual = jit(lambda f, x: f(x))(linearized, 3.) expected = 6. self.assertEqual(actual, expected) def test_linear_transpose_fun_jit(self): # test that the function returned by linear_transpose can be returned # from and passed to jitted functions f = lambda x: 2. * x @jit(static_argnums=0) def transpose(f, x): return api.linear_transpose(f, x) transposed = transpose(f, 1.) actual = jit(lambda f, x: f(x))(transposed, 3.) expected = (6.,) self.assertEqual(actual, expected) def test_lax_real_empty(self): out = jax.lax.empty((2, 2), dtype=jnp.float32) self.assertEqual(out.shape, (2, 2)) self.assertEqual(out.dtype, jnp.float32) @jtu.run_on_devices('gpu', 'tpu') def test_lax_empty_vmap(self): inp = np.arange(8, dtype=jnp.int32).reshape(4, 2) def f(x): return jax.lax.empty(x.shape, x.dtype) f = jax.jit(jax.vmap(f)) f(inp) # doesn't crash lowered_text = f.lower(inp).as_text() self.assertIn('@AllocateBuffer() : () -> tensor<4x2xi32>', lowered_text) def test_leaked_tracer_issue_7613(self): # from https://github.com/jax-ml/jax/issues/7613 import numpy.random as npr x = jnp.ones((1, 50)) A = jnp.array(npr.randn(50, 50), dtype=x.dtype) @jax.jit def loss(A, x): h = jax.nn.sigmoid(A * x) return jnp.sum((h - x)**2) with jax.checking_leaks(): _ = jax.grad(loss)(A, x) # doesn't crash def test_vmap_caching(self): # https://github.com/jax-ml/jax/issues/7621 f = lambda x: jnp.square(x).mean() jf = jax.jit(f) x = jax.random.uniform(jax.random.key(0), shape=(8, 4)) with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841 for _ in range(5): jax.hessian(jf)(x).block_until_ready() n = count() # The exact number of compilations may vary depending on the number of # jit decorators in the function above, but it should not grow after an # initial warmup phase. for _ in range(5): jax.hessian(jf)(x).block_until_ready() self.assertEqual(count(), n) def test_jnp_array_doesnt_device_put(self): with jtu.count_device_put() as count: api.make_jaxpr(lambda: jnp.array(3))() self.assertEqual(count(), 0) @jtu.thread_unsafe_test() # Updating global configs is not thread-safe. def test_rank_promotion_forces_retrace(self): num_traces = 0 def g(x): nonlocal num_traces num_traces += 1 return x + x def f_cond(x): return lax.cond(True, g, g, x) @jax.jit def f_jit(x): nonlocal num_traces num_traces += 1 return x + x for f in [f_jit, f_cond]: # Use _read() to read the flag value rather than threadlocal value. allow_promotion = jax.numpy_rank_promotion.get_global() try: config.update("jax_numpy_rank_promotion", "allow") num_traces = 0 @jax.jit def f(x): nonlocal num_traces num_traces += 1 return x + x x = jnp.zeros((2, 2)) f(x) self.assertEqual(num_traces, 1) f(x) self.assertEqual(num_traces, 1) with jax.numpy_rank_promotion("warn"): f(x) self.assertEqual(num_traces, 2) config.update("jax_numpy_rank_promotion", "raise") f(x) self.assertGreaterEqual(num_traces, 2) nt = num_traces f(x) self.assertEqual(num_traces, nt) f(x) self.assertEqual(num_traces, nt) finally: config.update("jax_numpy_rank_promotion", allow_promotion) def test_frexp_sharded(self): mesh = jtu.create_mesh((1,), 'x') x = jax.device_put(np.ones(8), jax.NamedSharding(mesh, jax.P('x'))) jax.jacrev(lambda x: jnp.frexp(x)[0])(x) # doesn't crash def test_grad_negative_argnums(self): def f(x, y): return x.sum() * y.sum() x = jax.random.normal(jax.random.key(0), (16, 16)) y = jax.random.normal(jax.random.key(1), (16, 16)) g = jax.grad(f, argnums=-1) g(x, y) # doesn't crash def test_jit_negative_static_argnums(self): @jax.jit(static_argnums=-1) def g(x, y): assert isinstance(y, int) return x * y for i in range(3): # Loop verifies we exercise both Python and C++ dispatch self.assertEqual(2 * i, g(2, i), msg=i) def test_make_jaxpr_static_argnums_order(self): # https://github.com/jax-ml/jax/issues/28065 def f(a, b, c): x = a + c y = b * c z = x - y return z for static_argnums in [(1, 0), (0, 1)]: val = jax.jit(f, static_argnums=static_argnums)(1, 2, 3) self.assertEqual(val, -2) jaxpr = jax.make_jaxpr(f, static_argnums=static_argnums)(1, 2, 3) self.assertEqual(jaxpr.eqns[0].invars[0].val, 1) self.assertEqual(jaxpr.eqns[1].invars[0].val, 2) def test_fastpath_cache_confusion(self): # https://github.com/jax-ml/jax/issues/12542 @jax.jit def a(x): return () @jax.jit def b(x): return a(x) @jax.jit def g(x): return x, x @jax.jit def h(x): return g(x) jaxpr = jax.make_jaxpr(h)(7) core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, 7) b(8) # don't crash def test_fastpath_cache_confusion2(self): @jax.jit def a(): # note nullary function, still staged out though return () @jax.jit def b(x): return a() @jax.jit def g(x): return x, x @jax.jit def h(x): return g(x) jaxpr = jax.make_jaxpr(h)(7) core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, 7) b(8) # don't crash def test_vjp_multiple_arguments_error_message(self): # https://github.com/jax-ml/jax/issues/13099 def foo(x): return (x, x) _, f_vjp = jax.vjp(foo, 1.0) with self.assertRaisesRegex(TypeError, "applied to foo"): f_vjp(1.0, 1.0) def test_make_jaxpr_weakref(self): class Foo(NamedTuple): x: int def __call__(self, y): return self.x + y jax.make_jaxpr(Foo(1))(3) # don't crash def test_make_jaxpr_name(self): def foo(x, y, z): return x + y + z jfoo = jax.make_jaxpr(foo) self.assertEqual(jfoo.__name__, f"make_jaxpr({foo.__name__})") self.assertEqual(jfoo.__qualname__, f"make_jaxpr({foo.__qualname__})") self.assertEqual(jfoo.__module__, "jax") @jtu.thread_unsafe_test() # Concurrent cache eviction means we may retrace def test_inner_jit_function_retracing(self): # https://github.com/jax-ml/jax/issues/7155 inner_count = outer_count = 0 @jax.jit def inner_fn(state): nonlocal inner_count inner_count += 1 return 2*state @jax.jit def outer_fn(x): nonlocal outer_count outer_count += 1 old_x = x for _ in range(10): x = inner_fn(x) x = x + old_x return x state = jnp.arange(5, dtype=jnp.uint32) outer_fn(state) outer_fn(state) self.assertEqual(inner_count, 1) self.assertEqual(outer_count, 1) inner_fn(state) self.assertEqual(inner_count, 1) # not retraced when top-level def test_grad_conj_symbolic_zeros(self): # https://github.com/jax-ml/jax/issues/15400 f = lambda x: jax.jit(lambda x, y: (x, y))(x, jax.lax.conj(x))[0] out = jax.grad(f)(3.0) # doesn't crash self.assertAllClose(out, 1., check_dtypes=False) @jtu.thread_unsafe_test() def test_cache_clear_pmap(self): if config.pmap_shmap_merge.value: self.skipTest("Already tested by pjit tests under pmap_shmap_merge=True.") @jax.pmap def f(i): return i * 2 f(np.arange(1, dtype='float32')).block_until_ready() self.assertEqual(f._cache_size, 1) jax.clear_caches() self.assertEqual(f._cache_size, 0) def test_invalid_value_device_put(self): with self.assertRaisesRegex(ValueError, r".*Received invalid value.*"): jax.device_put(jnp.arange(8), 'cpu') def test_num_cpu_devices_called_after_initialization(self): jax.devices() with self.assertRaisesRegex( RuntimeError, "jax_num_cpu_devices config should be updated before backends are " "initialized"): config.update('jax_num_cpu_devices', 2) @jtu.thread_unsafe_test() # logging is not thread-safe def test_clear_cache(self): @jax.jit def add(x): return x * 2 inp = jnp.arange(8) with config.log_compiles(True): with self.assertLogs(level='WARNING') as cm: add(inp) jax.clear_caches() add(inp) tracing_add_count = 0 for m in cm.output: if 'Finished tracing + transforming add for pjit' in m: tracing_add_count += 1 self.assertEqual(tracing_add_count, 2) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_skip_internals(self): if is_persistent_cache_enabled(): self.skipTest('With persistent cache, we see the cache misses') with config.explain_cache_misses(True): with self.assertNoLogs(level='WARNING'): for i in range(2): jnp.sin(jnp.arange(i + 1, dtype=np.float32)) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_first_miss(self): @jax.jit def f(x): return x x = jnp.float32(1.) expected_log_len = 1 if not is_persistent_cache_enabled() else 3 # print on first miss, not on hit with config.explain_cache_misses(True): with self.assertLogs(level="WARNING") as cm: f(x) f(x) self.assertLen(cm.output, expected_log_len) msg = cm.output[0] self.assertIn("TRACING CACHE MISS", msg) self.assertIn("never seen function", msg) self.assertNotIn("explanation unavailable!", msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_other_in_tree(self): @jax.jit def f(*args, **kwargs): return args[0] f(0., 1., y=(2., 2.1)) with config.explain_cache_misses(True): with self.assertLogs(level="WARNING") as cm: # Same number of leaves but different trees f(0., (1., 1.1), y=2.) self.assertLen(cm.output, 1) msg = cm.output[0] self.assertIn("different input pytree", msg) self.assertNotIn("explanation unavailable!", msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_other_arg_passed_as_kwarg(self): @jax.jit def f(x, y): return jnp.sin(x) + y f(0., 1.) # kwarg change with config.explain_cache_misses(True): with self.assertLogs(level="WARNING") as cm: f(0., y=1.) self.assertLen(cm.output, 1) msg = cm.output[0] self.assertIn("different number of args and kwargs, but same total number", msg) self.assertIn("now 1 args and kwargs with keys ['y']", msg) self.assertIn("before 1 args and kwargs with keys []", msg) self.assertNotIn("explanation unavailable!", msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_other_static_argnums(self): @jax.jit(static_argnums=(0, 2)) def f(x, y, z): return y f(1., 2., "foo") with config.explain_cache_misses(True): with self.assertLogs(level="WARNING") as cm: f(1., 2., "bar") self.assertLen(cm.output, 1) msg = cm.output[0] self.assertIn("different value of static args", msg) self.assertIn("now 1.0, 'bar' and before 1.0, 'foo'", msg) self.assertNotIn("explanation unavailable!", msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_other_static_argnames(self): @jax.jit(static_argnames="foo") def f(*, foo): return 1 f(foo="foo") with config.explain_cache_misses(True): with self.assertLogs(level="WARNING") as cm: f(foo="bar") self.assertLen(cm.output, 1) msg = cm.output[0] self.assertIn("different value of static kwargs", msg) self.assertIn("now {foo: 'bar'} and before {foo: 'foo'}", msg) self.assertNotIn('explanation unavailable!', msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_other_dtype(self): @jax.jit def f(x, y): return x f(np.float32(0), np.float32(1)) with config.explain_cache_misses(True): with self.assertLogs(level='WARNING') as cm: f(np.float32(0), np.int32(1)) self.assertLen(cm.output, 1) msg = cm.output[0] self.assertIn("different input types", msg) self.assertIn("at y, now i32[] and before f32[]", msg) self.assertNotIn("explanation unavailable!", msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_other_weak_type(self): @jax.jit def f(x, y): return jnp.sin(x) + y y = jnp.arange(4, dtype="float32") f(jnp.float32(0.), y) # weak type change (assuming no x64) if config.enable_x64.value: self.skipTest("Work only for 32 bit mode") with config.explain_cache_misses(True): with self.assertLogs(level="WARNING") as cm: f(0., y) expected_log_len = 1 if not is_persistent_cache_enabled() else 3 self.assertLen(cm.output, expected_log_len) msg = cm.output[0] self.assertIn("different input types", msg) self.assertIn("at x, now f32[]{weak_type=True} and before f32[]{weak_type=False}", msg) self.assertIn("https://docs.jax.dev/en/latest/type_promotion.html#weak-types", msg) self.assertNotIn("explanation unavailable!", msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_other_shape(self): @jax.jit def f(x, y): return jnp.sin(x) + y f(np.float32(0), np.arange(1, dtype=np.float32)) with config.explain_cache_misses(True): with self.assertLogs(level='WARNING') as cm: f(np.float32(0), np.arange(2, dtype=np.float32)) expected_log_len = 1 if not is_persistent_cache_enabled() else 3 self.assertLen(cm.output, expected_log_len) msg = cm.output[0] self.assertIn("different input types", msg) self.assertIn("at y, now f32[2] and before f32[1]", msg) self.assertNotIn("explanation unavailable!", msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_other_shape_explain_closest(self): @jax.jit def f(x): return x f(np.ones((1, 2), dtype=np.float32)) f(np.ones((10, 20, 30), dtype=np.float32)) f(np.ones((1, 2, 3), dtype=np.float32)) with config.explain_cache_misses(True): with self.assertLogs(level='WARNING') as cm: f(np.ones((10, 2, 30), dtype=np.float32)) expected_log_len = 1 if not is_persistent_cache_enabled() else 3 self.assertLen(cm.output, expected_log_len) msg = cm.output[0] self.assertIn("key with different input types", msg) self.assertIn("at x, now f32[10,2,30] and before f32[10,20,30]", msg) self.assertNotIn("explanation unavailable!", msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_other_tracing_config(self): @jax.jit def f(x, y): return jnp.sin(x) + y f(0., 1.) # tracing config change with config.explain_cache_misses(True): with self.assertLogs(level="WARNING") as cm: with jax.numpy_rank_promotion("warn"): with jax.default_matmul_precision("high"): f(0., 1.) expected_log_len = 1 if not is_persistent_cache_enabled() else 3 self.assertTrue(1 <= len(cm.output) <= expected_log_len) msg = cm.output[0] self.assertIn("key with different tracing context", msg) self.assertIn("now warn and before", msg) self.assertIn("now high and before", msg) self.assertNotIn("explanation unavailable!", msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_multiple_changes(self): @jax.jit def f(x): return jnp.sin(x) call_1 = f(np.arange(4, dtype=np.float32)) with jax.numpy_rank_promotion("warn"): call_2 = f(np.arange(8, dtype=np.float32)) with config.explain_cache_misses(True): with self.assertLogs(level='WARNING') as cm: # Matches call_2 in shape but not context, and call_1 in context but # not in shape. f(np.arange(8, dtype=np.float32)) self.assertLen(cm.output, 1) msg = cm.output[0] self.assertIn("key with different input types", msg) self.assertIn("at x, now f32[8] and before f32[4]", msg) self.assertIn("key with different tracing context", msg) self.assertNotIn("explanation unavailable!", msg) @jtu.thread_unsafe_test() # logging is not thread-safe def test_cache_miss_explanations_new_function_in_loop(self): @jax.jit def f(x, y): return jnp.sin(x) * y['hi'] x = jnp.float32(1.) with config.explain_cache_misses(True): with self.assertLogs(level='WARNING') as cm: for _ in range(2): jax.jit(lambda x: 2 * x)(3) if is_persistent_cache_enabled(): # number of warnings depends on the backend self.assertTrue(4 <= len(cm.output) <= 6) msg = cm.output[3] self.assertIn('another function defined on the same line', msg) else: self.assertLen(cm.output, 2) _, msg = cm.output self.assertIn('another function defined on the same line', msg) def test_cache_miss_explanations_no_source_info(self): # ``operator.add`` is a built-in function and does not have source info. with config.explain_cache_misses(True): jax.jit(operator.add)(42, 24) def test_cache_miss_explanations_are_thread_safe(self): @jax.jit def f(i): return jnp.sum(i) saw_exception = False def thread(i0): nonlocal saw_exception try: for i in range(i0, 100, 10): if saw_exception: break with config.explain_cache_misses(True): f(jnp.zeros(i)) except Exception: saw_exception = True raise t = [threading.Thread(target=thread, args=(i,)) for i in range(10)] for i in t: i.start() for i in t: i.join() self.assertFalse(saw_exception) @parameterized.named_parameters([ {"testcase_name": f"{np.dtype(dtype)}", "dtype": dtype} for dtype in jtu.dtypes.custom_floats]) def test_jit_custom_floats(self, dtype): f = lambda x: x + 1 args_maker = lambda: [jnp.ones((), dtype=dtype)] self._CompileAndCheck(f, args_maker) def test_jvp_asarray_returns_array(self): # https://github.com/jax-ml/jax/issues/15676 p, t = jax.jvp(jax.numpy.asarray, (1.,), (2.,)) _check_instance(self, p) _check_instance(self, t) def test_scalar_conversion_errors(self): array_int = jnp.arange(10, dtype=int) scalar_float = jnp.float32(0) scalar_int = jnp.int32(0) empty_int = jnp.arange(0, dtype='int32') array1_float = jnp.arange(1, dtype='float32') assertIntError = partial(self.assertRaisesRegex, TypeError, "Only integer scalar arrays can be converted to a scalar index.") for func in [operator.index, hex, oct]: assertIntError(func, array_int) assertIntError(func, empty_int) assertIntError(func, scalar_float) assertIntError(jax.jit(func), array_int) assertIntError(jax.jit(func), empty_int) assertIntError(jax.jit(func), scalar_float) self.assertRaises(TracerIntegerConversionError, jax.jit(func), scalar_int) _ = func(scalar_int) # no error assertScalarError = partial(self.assertRaisesRegex, TypeError, "Only scalar arrays can be converted to Python scalars.") for func in [int, float, complex]: assertScalarError(func, array_int) assertScalarError(jax.jit(func), array_int) self.assertRaises(ConcretizationTypeError, jax.jit(func), scalar_int) _ = func(scalar_int) # no error assertScalarError(func, array1_float) assertEmptyBoolError = partial( self.assertRaisesRegex, ValueError, "The truth value of an empty array is ambiguous.") assertEmptyBoolError(bool, empty_int) assertEmptyBoolError(jax.jit(bool), empty_int) assertBoolError = partial( self.assertRaisesRegex, ValueError, "The truth value of an array with more than one element is ambiguous.") assertBoolError(bool, array_int) assertBoolError(jax.jit(bool), array_int) self.assertRaises(TracerBoolConversionError, jax.jit(bool), scalar_int) _ = bool(scalar_int) # no error @jtu.run_on_devices('cpu') def test_asarray_no_copy_np(self): x = np.random.uniform(0, 1, (1000, 2000)).astype("float32") out = jnp.asarray(x) x_ptr = x.__array_interface__["data"][0] # This is because the PJRT CPU client shares memory if it is 16-byte aligned. if (x_ptr & 15) != 0: self.assertTrue(np.shares_memory(out, x)) def test_mesh_creation_error_message(self): with self.assertRaisesRegex(ValueError, "ndim of its first argument"): jax.sharding.Mesh(jax.devices(), ("x", "y")) @jtu.thread_unsafe_test() # weakref gc doesn't seem predictable def test_jit_boundmethod_reference_cycle(self): class A: def __init__(self): self._foo = jax.jit(self.foo) def foo(self): pass a = weakref.ref(A()) gc.collect() assert a() is None def test_forwarding_bug(self): # Test for issue #20267. def f(x): @jax.jit def inner(a, x): return a, jnp.exp(x) return inner(0.0, x)[0] jax.grad(f)(1.) # don't crash @parameterized.parameters(it.product(range(4), repeat=3)) @jtu.run_on_devices("cpu") def test_jit_forwarding_correctness(self, seed, num_input_fwd, num_output_fwd): num_args = 3 rng = np.random.RandomState(seed) in_perm = rng.permutation(num_args) out_perm = rng.permutation(num_args) @jax.jit def f(inputs): inputs = [inputs[i] for i in in_perm] outputs = inputs[:num_input_fwd] + [ jnp.exp(inputs[i]) if i < num_output_fwd else jnp.sin(inputs[i]) for i in range(num_args - num_input_fwd)] return [outputs[i] for i in out_perm] jtu.check_grads(f, (list(jnp.arange(float(num_args))),), order=1, modes=['rev'], atol=1e-3, rtol=1e-3) @jtu.run_on_devices("cpu") def test_inner_jit_forwarding_happens(self): if not config.dynamic_shapes.value: self.skipTest("Only works for dynamic shapes") jaxpr = jax.make_jaxpr(lambda: jax.jit(lambda x: x)(3))() self.assertLen(jaxpr.jaxpr.outvars, 1) self.assertIsInstance(jaxpr.jaxpr.outvars[0], core.Literal) self.assertEqual(jaxpr.jaxpr.outvars[0].val, 3) @parameterized.parameters(range(8)) @jtu.run_on_devices("cpu") def test_inner_jit_forwarding_correctness(self, num_input_fwd): if not config.dynamic_shapes.value: self.skipTest("Only works for dynamic shapes") num_args = 8 rng = np.random.RandomState(0) @jax.jit def f(inputs): inputs = [inputs[i] for i in rng.permutation(num_args)] outputs = (inputs[:num_input_fwd] + [jnp.sin(inputs[i]) for i in range(num_args - num_input_fwd)]) return [outputs[i] for i in rng.permutation(num_args)] f2 = jax.jit(f) inputs = list(jnp.arange(float(num_args))) expected = f(inputs) ans = f2(inputs) for a, b in zip(ans, expected): self.assertAllClose(a, b) @unittest.skip # TODO(dougalm): figure out with Matt what to do with this feature def test_inner_jit_forwarded_consts_stay_const(self): out = jax.jit(lambda: int(jax.jit(lambda x: x)(3)))() # don't crash self.assertEqual(out, 3) def test_lowering_platform_aot(self): @jax.jit def f(x): return x * 2 f.trace(jnp.arange(8)).lower(lowering_platforms=('tpu',)) # doesn't crash def test_no_double_dots_in_error_message(self): @jax.jit def f(x): return 1 if x > 0 else 0 with self.assertRaisesRegex(TracerBoolConversionError, r"with shape bool\[\]\.[^\.]"): f(0) def test_inlined_literals_with_error(self): @jax.jit def f(): @jax.jit(inline=True) def g(): return jnp.sin(1.) if g() > 0: return 1. return 0. with self.assertRaisesRegex(TracerBoolConversionError, "Attempted boolean"): f() def test_inline_return_twice(self): # https://github.com/jax-ml/jax/issues/22944 @jax.jit def add_one(x: int) -> int: return x + 1 def add_one_and_dupe(x: int) -> tuple[int, int]: y = add_one(x) return (y, y) jit_add_one_dupe = jax.jit(add_one_and_dupe, inline=True) jax.eval_shape(jit_add_one_dupe, 0) # don't crash def test_use_direct_linearize(self): def check_invariant_to_use_direct_linearize(f): with config.use_direct_linearize(False): ans1 = f() with config.use_direct_linearize(True): ans2 = f() self.assertEqual(ans1, ans2) def sin_of_sin(x): return lax.sin(jax.jit(lax.sin)(x)) check_invariant_to_use_direct_linearize(lambda: jax.grad(sin_of_sin)(1.0)) def test_deferred_primal_with_direct_linearize(self): def my_sin_lin(nzs, x): nz, = nzs return (my_sin_p.bind(x, accuracy=None), nz, x, lambda x, t: lax.mul(t, lax.cos(x))) my_sin_p = core.Primitive("my_sin_p") my_sin_p.def_impl(lax.sin) my_sin_p.def_abstract_eval(lambda x: x) ad_internal.primitive_linearizations[my_sin_p] = my_sin_lin with config.use_direct_linearize(True): jax.grad(my_sin_p.bind)(1.0) # doesn't crash def test_ensure_compile_time_eval_no_leaks(self): # https://github.com/jax-ml/jax/issues/25847 with jax.ensure_compile_time_eval(): jnp.linalg.solve(jnp.eye(3), jnp.ones(3)) # doesn't crash def test_returned_non_jaxtype(self): class TestEnum(enum.Enum): A = enum.auto() @jax.tree_util.register_dataclass @dataclasses.dataclass class TestClass3: test_enum_field: TestEnum = dataclasses.field(metadata=dict(static=True)) test_data_field: int def test_jax_function(test_class: TestClass3) -> TestEnum: return test_class.test_enum_field jitted_test_function = jax.jit(test_jax_function) with self.assertRaisesRegex(TypeError, "returned a value of type"): jitted_test_function( TestClass3( test_data_field=1, test_enum_field=TestEnum.A, ) ) def test_make_jaxpr_deduplicates_consts(self): # We don't promise this behavior in the public API, but we've had it for a # long time. This test checks we don't *unintentionally* break it. # We are careful to choose a type that would not be canonicalized here, # otherwise the jnp.array(...) calls will induce constant duplication. c = np.ones(3).astype(np.float32) def find_constants(jaxpr: core.ClosedJaxpr): for j in it.chain([jaxpr], core.subjaxprs(jaxpr)): for eq in j.eqns: for inv in eq.invars: if isinstance(inv, core.Literal) and np.shape(inv.val): yield inv.val def uniq(lst): def key(a): if isinstance(a, literals.TypedNdArray): return np.asarray(a) else: return a return {id(key(v)): v for v in lst}.values() @jax.make_jaxpr def f(): return jnp.array(c), jnp.sum(c), c, jnp.array(c), jnp.sum(c), c if config.use_simplified_jaxpr_constants.value: consts = uniq(find_constants(f())) else: consts = f().consts self.assertLen(consts, 1) d = np.zeros(3) # TODO(mattjj,phawkins): we broke this on purpose, as it probably isn't # load-bearing (see above comment). If we wanted to fix it, we might share # the constid cache across jaxpr traces, or we might hash on const value. # @jax.make_jaxpr # def g(): # return jax.lax.cond(True, # lambda: (c, jnp.sum(c), c), # lambda: (c, jnp.sum(d), d)) # if config.use_simplified_jaxpr_constants.value: # consts = uniq(find_constants(g())) # else: # consts = g().consts # self.assertLen(consts, 2) # TODO(mattjj,dougalm): this test was flakey on CI; figure out how to enable? # @jtu.run_on_devices('cpu') # def test_implicit_dce_linearize(self): # def foo(x): # const = np.zeros((300,)) # x * const # r = weakref.ref(const) # del const # assert r() is None, "oops, the constant wasn't DCE'd" # return x # with config.use_direct_linearize(True): # _ = jax.grad(foo)(3.) @jtu.run_on_devices('cpu') def test_implicit_dce_linearize_jaxpr(self): def foo(x): const = np.zeros((300,)) x * const r = weakref.ref(const) del const return x with config.use_direct_linearize(True): _, f_vjp = jax.vjp(foo, 3.) self.assertNotIn('mul', str(f_vjp)) @jtu.thread_unsafe_test() # make_user_context() is not thread-safe at the moment def test_user_trace_context_hooks(self): my_config = jax.make_user_context() @jax.jit def f(x): return x with jtu.count_jit_tracing_cache_miss() as tracing_count: f(1.) with my_config(2): f(1.) with my_config(3): f(1.) with my_config(4): f(1.) self.assertEqual(tracing_count(), 4) # TODO(mattjj,dougalm): re-enable if we set auto_dce=True by default # @jtu.run_on_devices('cpu') # def test_implicit_dce(self): # @api.jit # def foo(x): # const = np.zeros((300,)) # r = weakref.ref(const) # jnp.sin(const) + const # del const # assert r() is None, "oops, the constant wasn't DCE'd" # return x + x # foo(1.0) def test_dce_sink_vmap(self): def f(x): jax.lax.dce_sink(x) return x jax.vmap(f)(jnp.arange(3.)) # don't crash
APITest
python
getsentry__sentry
src/sentry/integrations/repository/base.py
{ "start": 233, "end": 786 }
class ____(NotificationMessageValidationError): """ Raised when a NotificationMessage has an error with a message identifier. A NotificationMessage can only have a message identifier when it is successful; therefore if error details exist, it means that the NotificationMessage was NOT successful, which implies that it should not have the value. """ message = ( "cannot create a new notification message with message identifier when an error exists" ) @dataclass(frozen=True)
MessageIdentifierWithErrorValidationError
python
walkccc__LeetCode
solutions/243. Shortest Word Distance/243.py
{ "start": 0, "end": 523 }
class ____: def shortestDistance( self, wordsDict: list[str], word1: str, word2: str, ) -> int: ans = len(wordsDict) index1 = -1 # wordsdict[index1] == word1 index2 = -1 # wordsdict[index2] == word2 for i, word in enumerate(wordsDict): if word == word1: index1 = i if index2 != -1: ans = min(ans, index1 - index2) if word == word2: index2 = i if index1 != -1: ans = min(ans, index2 - index1) return ans
Solution
python
google__python-fire
fire/test_components.py
{ "start": 8601, "end": 9616 }
class ____: """A class with a static method and a class method.""" CLASS_STATE = 1 def __init__(self, instance_state): self.instance_state = instance_state @staticmethod def static_fn(args): return args @classmethod def class_fn(cls, args): return args + cls.CLASS_STATE def function_with_varargs(arg1, arg2, arg3=1, *varargs): # pylint: disable=keyword-arg-before-vararg """Function with varargs. Args: arg1: Position arg docstring. arg2: Position arg docstring. arg3: Flags docstring. *varargs: Accepts unlimited positional args. Returns: The unlimited positional args. """ del arg1, arg2, arg3 # Unused. return varargs def function_with_keyword_arguments(arg1, arg2=3, **kwargs): del arg2 # Unused. return arg1, kwargs def fn_with_code_in_docstring(): """This has code in the docstring. Example: x = fn_with_code_in_docstring() indentation_matters = True Returns: True. """ return True
HasStaticAndClassMethods
python
getsentry__sentry
src/sentry/sentry_metrics/querying/visitors/query_expression.py
{ "start": 11235, "end": 16941 }
class ____(QueryExpressionVisitor[tuple[UnitMetadata, QueryExpression]]): """ Visitor that recursively transforms the `QueryExpression` components to have the same unit. """ UNITLESS_AGGREGATES = {"count", "count_unique"} def __init__(self): self._unit_family = None def _visit_formula(self, formula: Formula) -> tuple[UnitMetadata, QueryExpression]: last_metadata: WithUnit | None = None future_units = [] has_all_timeseries_params = True has_all_futures = True parameters = [] for index, parameter in enumerate(formula.parameters): if not isinstance(parameter, Timeseries): has_all_timeseries_params = False unit_metadata, query_expression = self.visit(parameter) if isinstance(unit_metadata, WithNoUnit): return unit_metadata, formula elif isinstance(unit_metadata, WithFutureUnit): future_units.append((index, query_expression)) parameters.append(query_expression) elif isinstance(unit_metadata, WithUnit): has_all_futures = False if ( last_metadata is not None and unit_metadata.unit_family != last_metadata.unit_family ): return WithNoUnit(), formula last_metadata = unit_metadata parameters.append(query_expression) # If we have only future unit types, we know that the formula will be a future itself. # TODO: we might want to execute in-memory the formulas with all scalars to avoid making bigger queries. if has_all_futures: return WithFutureUnit(), formula # If we have no metadata here, it means that all parameters of the formula can't be normalized. if last_metadata is None: return WithNoUnit(), formula has_coefficient_operators = formula.function_name in COEFFICIENT_OPERATORS # If we have all timeseries as parameters of a formula and the function belongs to `*` or `/` we will # not perform any normalization. if has_coefficient_operators and has_all_timeseries_params: return WithNoUnit(), formula # We convert all scalars in the formula using the last seen scaling factor. Since we are always working with # two operands, this means that if we found at least one numeric scalar, the scaling factor will belong to the # other operand. # It's important to note that we are not doing any scalar normalization if we have a coefficient operator, since # we don't want to scale both operands. # Example: # a * 2 with a scaling factor of 1000 must become a * 1000 * 2 and not a * 1000 * 2 * 1000 if not has_coefficient_operators and future_units and last_metadata.unit is not None: for index, future_unit in future_units: parameters[index] = self._normalize_future_units(last_metadata.unit, future_unit) # We want to find the reference unit of the unit family in the formula. formula_reference_unit = get_reference_unit_for_unit_family(last_metadata.unit_family) if formula_reference_unit is None: return WithNoUnit(), formula # The new formula unit is the reference unit, since we know that all of its operands have been converted to # the reference unit at this point. return WithUnit( unit_family=last_metadata.unit_family, reference_unit=formula_reference_unit.name, unit=formula_reference_unit, from_formula=True, ), formula.set_parameters(parameters) def _visit_timeseries(self, timeseries: Timeseries) -> tuple[UnitMetadata, QueryExpression]: extracted_unit = self._extract_unit(timeseries=timeseries) if extracted_unit is not None: unit_family_and_unit = get_unit_family_and_unit(extracted_unit) if unit_family_and_unit is not None: unit_family, reference_unit, unit = unit_family_and_unit return WithUnit( unit_family=unit_family, reference_unit=reference_unit, unit=unit ), unit.apply_on_query_expression(timeseries) return WithNoUnit(), timeseries def _visit_int(self, int_number: float) -> tuple[UnitMetadata, QueryExpression]: return WithFutureUnit(), int_number def _visit_float(self, float_number: float) -> tuple[UnitMetadata, QueryExpression]: return WithFutureUnit(), float_number def _visit_string(self, string: str) -> tuple[UnitMetadata, QueryExpression]: return WithNoUnit(), string def _extract_unit(self, timeseries: Timeseries) -> str | None: """ Extracts the unit from the timeseries, by parsing its MRI. """ if timeseries.aggregate in self.UNITLESS_AGGREGATES: return None parsed_mri = parse_mri(timeseries.metric.mri) if parsed_mri is not None: if parsed_mri.entity == "c": return None return parsed_mri.unit return None def _normalize_future_units(self, unit: Unit, value: QueryExpression) -> QueryExpression: """ Normalizes all future units, which in our case are just numeric scalars, using a common unit. This assumes that such numbers are used in the context of the unit and as such they need to be scaled by a certain factor to be normalized to the reference unit. """ return NumericScalarsNormalizationVisitor(unit).visit(value)
UnitsNormalizationVisitor
python
huggingface__transformers
src/transformers/models/qwen3_next/modular_qwen3_next.py
{ "start": 9957, "end": 18289 }
class ____(Qwen3MoeAttention): def __init__(self, config: Qwen3NextConfig, layer_idx: int): super().__init__(config, layer_idx) self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim * 2, bias=config.attention_bias ) del self.sliding_window def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states, gate = torch.chunk( self.q_proj(hidden_states).view(*input_shape, -1, self.head_dim * 2), 2, dim=-1 ) gate = gate.reshape(*input_shape, -1) query_states = self.q_norm(query_states.view(hidden_shape)).transpose(1, 2) key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = attn_output * torch.sigmoid(gate) attn_output = self.o_proj(attn_output) return attn_output, attn_weights def torch_causal_conv1d_update( hidden_states, conv_state, weight, bias=None, activation=None, ): _, hidden_size, seq_len = hidden_states.shape state_len = conv_state.shape[-1] hidden_states_new = torch.cat([conv_state, hidden_states], dim=-1).to(weight.dtype) conv_state.copy_(hidden_states_new[:, :, -state_len:]) out = F.conv1d(hidden_states_new, weight.unsqueeze(1), bias, padding=0, groups=hidden_size) out = F.silu(out[:, :, -seq_len:]) out = out.to(hidden_states.dtype) return out def l2norm(x: torch.FloatTensor, dim: int = -1, eps: float = 1e-6): """This function is intended to align with the l2norm implementation in the FLA library.""" inv_norm = torch.rsqrt((x * x).sum(dim=dim, keepdim=True) + eps) return x * inv_norm def torch_chunk_gated_delta_rule( query, key, value, g, beta, chunk_size=64, initial_state=None, output_final_state=False, use_qk_l2norm_in_kernel=False, ): initial_dtype = query.dtype if use_qk_l2norm_in_kernel: query = l2norm(query, dim=-1, eps=1e-6) key = l2norm(key, dim=-1, eps=1e-6) query, key, value, beta, g = [ x.transpose(1, 2).contiguous().to(torch.float32) for x in (query, key, value, beta, g) ] batch_size, num_heads, sequence_length, k_head_dim = key.shape v_head_dim = value.shape[-1] pad_size = (chunk_size - sequence_length % chunk_size) % chunk_size query = F.pad(query, (0, 0, 0, pad_size)) key = F.pad(key, (0, 0, 0, pad_size)) value = F.pad(value, (0, 0, 0, pad_size)) beta = F.pad(beta, (0, pad_size)) g = F.pad(g, (0, pad_size)) total_sequence_length = sequence_length + pad_size scale = 1 / (query.shape[-1] ** 0.5) query = query * scale v_beta = value * beta.unsqueeze(-1) k_beta = key * beta.unsqueeze(-1) # reshape to chunks query, key, value, k_beta, v_beta = [ x.reshape(x.shape[0], x.shape[1], -1, chunk_size, x.shape[-1]) for x in (query, key, value, k_beta, v_beta) ] g = g.reshape(g.shape[0], g.shape[1], -1, chunk_size) mask = torch.triu(torch.ones(chunk_size, chunk_size, dtype=torch.bool, device=query.device), diagonal=0) # chunk decay g = g.cumsum(dim=-1) decay_mask = ((g.unsqueeze(-1) - g.unsqueeze(-2)).tril().exp().float()).tril() attn = -((k_beta @ key.transpose(-1, -2)) * decay_mask).masked_fill(mask, 0) for i in range(1, chunk_size): row = attn[..., i, :i].clone() sub = attn[..., :i, :i].clone() attn[..., i, :i] = row + (row.unsqueeze(-1) * sub).sum(-2) attn = attn + torch.eye(chunk_size, dtype=attn.dtype, device=attn.device) value = attn @ v_beta k_cumdecay = attn @ (k_beta * g.exp().unsqueeze(-1)) last_recurrent_state = ( torch.zeros(batch_size, num_heads, k_head_dim, v_head_dim).to(value) if initial_state is None else initial_state.to(value) ) core_attn_out = torch.zeros_like(value) mask = torch.triu(torch.ones(chunk_size, chunk_size, dtype=torch.bool, device=query.device), diagonal=1) # for each chunk for i in range(0, total_sequence_length // chunk_size): q_i, k_i, v_i = query[:, :, i], key[:, :, i], value[:, :, i] attn = (q_i @ k_i.transpose(-1, -2) * decay_mask[:, :, i]).masked_fill_(mask, 0) v_prime = (k_cumdecay[:, :, i]) @ last_recurrent_state v_new = v_i - v_prime attn_inter = (q_i * g[:, :, i, :, None].exp()) @ last_recurrent_state core_attn_out[:, :, i] = attn_inter + attn @ v_new last_recurrent_state = ( last_recurrent_state * g[:, :, i, -1, None, None].exp() + (k_i * (g[:, :, i, -1, None] - g[:, :, i]).exp()[..., None]).transpose(-1, -2) @ v_new ) if not output_final_state: last_recurrent_state = None core_attn_out = core_attn_out.reshape(core_attn_out.shape[0], core_attn_out.shape[1], -1, core_attn_out.shape[-1]) core_attn_out = core_attn_out[:, :, :sequence_length] core_attn_out = core_attn_out.transpose(1, 2).contiguous().to(initial_dtype) return core_attn_out, last_recurrent_state def torch_recurrent_gated_delta_rule( query, key, value, g, beta, initial_state, output_final_state, use_qk_l2norm_in_kernel=False ): initial_dtype = query.dtype if use_qk_l2norm_in_kernel: query = l2norm(query, dim=-1, eps=1e-6) key = l2norm(key, dim=-1, eps=1e-6) query, key, value, beta, g = [ x.transpose(1, 2).contiguous().to(torch.float32) for x in (query, key, value, beta, g) ] batch_size, num_heads, sequence_length, k_head_dim = key.shape v_head_dim = value.shape[-1] scale = 1 / (query.shape[-1] ** 0.5) query = query * scale core_attn_out = torch.zeros(batch_size, num_heads, sequence_length, v_head_dim).to(value) last_recurrent_state = ( torch.zeros(batch_size, num_heads, k_head_dim, v_head_dim).to(value) if initial_state is None else initial_state.to(value) ) for i in range(sequence_length): q_t = query[:, :, i] k_t = key[:, :, i] v_t = value[:, :, i] g_t = g[:, :, i].exp().unsqueeze(-1).unsqueeze(-1) beta_t = beta[:, :, i].unsqueeze(-1) last_recurrent_state = last_recurrent_state * g_t kv_mem = (last_recurrent_state * k_t.unsqueeze(-1)).sum(dim=-2) delta = (v_t - kv_mem) * beta_t last_recurrent_state = last_recurrent_state + k_t.unsqueeze(-1) * delta.unsqueeze(-2) core_attn_out[:, :, i] = (last_recurrent_state * q_t.unsqueeze(-1)).sum(dim=-2) if not output_final_state: last_recurrent_state = None core_attn_out = core_attn_out.transpose(1, 2).contiguous().to(initial_dtype) return core_attn_out, last_recurrent_state
Qwen3NextAttention
python
ansible__ansible
test/lib/ansible_test/_internal/become.py
{ "start": 2516, "end": 3072 }
class ____(Become): """Become using 'sudo'.""" @property def method(self) -> str: """The name of the Ansible become plugin that is equivalent to this.""" return 'sudo' def prepare_command(self, command: list[str]) -> list[str]: """Return the given command, if any, with privilege escalation.""" become = ['sudo', '-in'] if command: become.extend(['sh', '-c', shlex.join(command)]) return become SUPPORTED_BECOME_METHODS = {cls.name(): cls for cls in get_subclasses(Become)}
Sudo
python
jazzband__django-model-utils
tests/models.py
{ "start": 2229, "end": 2415 }
class ____(InheritanceManagerTestParent): non_related_field_using_descriptor_2 = models.FileField(upload_to="test") normal_field_2 = models.TextField()
InheritanceManagerTestChild2
python
kamyu104__LeetCode-Solutions
Python/find-three-consecutive-integers-that-sum-to-a-given-number.py
{ "start": 36, "end": 230 }
class ____(object): def sumOfThree(self, num): """ :type num: int :rtype: List[int] """ return [num//3-1, num//3, num//3+1] if num%3 == 0 else []
Solution
python
tensorflow__tensorflow
tensorflow/python/data/ops/options.py
{ "start": 18752, "end": 20075 }
class ____(options_lib.OptionsBase): """Represents options for tf.data service. You can set the service options of a dataset through the `experimental_service` property of `tf.data.Options`; the property is an instance of `tf.data.experimental.ServiceOptions`. ```python options = tf.data.Options() options.experimental_service.pinned = True dataset = dataset.with_options(options) ``` """ pinned = options_lib.create_option( name="pinned", ty=bool, docstring=( "If true, the tf.data service client allocates data to pinned memory," " which facilitates more efficient copying from host memory to GPU" " memory downstream. For gRPC, compression must be disabled for this" " to take effect. For alternative data transfer protocols, this may" " or may not take effect, depending on the implementation." ), ) def _to_proto(self): pb = dataset_options_pb2.ServiceOptions() if self.pinned is not None: pb.pinned = self.pinned return pb def _from_proto(self, pb): if pb.WhichOneof("optional_pinned") is not None: self.pinned = pb.pinned @deprecation.deprecated_endpoints("data.experimental.ThreadingOptions") @tf_export("data.experimental.ThreadingOptions", "data.ThreadingOptions")
ServiceOptions
python
pandas-dev__pandas
pandas/core/indexers/objects.py
{ "start": 3509, "end": 5227 }
class ____(BaseIndexer): """Creates window boundaries that are of fixed length.""" def get_window_bounds( self, num_values: int = 0, min_periods: int | None = None, center: bool | None = None, closed: str | None = None, step: int | None = None, ) -> tuple[np.ndarray, np.ndarray]: """ Computes the bounds of a window. Parameters ---------- num_values : int, default 0 number of values that will be aggregated over window_size : int, default 0 the number of rows in a window min_periods : int, default None min_periods passed from the top level rolling API center : bool, default None center passed from the top level rolling API closed : str, default None closed passed from the top level rolling API step : int, default None step passed from the top level rolling API win_type : str, default None win_type passed from the top level rolling API Returns ------- A tuple of ndarray[int64]s, indicating the boundaries of each window """ if center or self.window_size == 0: offset = (self.window_size - 1) // 2 else: offset = 0 end = np.arange(1 + offset, num_values + 1 + offset, step, dtype="int64") start = end - self.window_size if closed in ["left", "both"]: start -= 1 if closed in ["left", "neither"]: end -= 1 end = np.clip(end, 0, num_values) start = np.clip(start, 0, num_values) return start, end
FixedWindowIndexer
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/distributions/kullback_leibler_test.py
{ "start": 1159, "end": 5254 }
class ____(test.TestCase): def testRegistration(self): class MyDist(normal.Normal): pass # Register KL to a lambda that spits out the name parameter @kullback_leibler.RegisterKL(MyDist, MyDist) def _kl(a, b, name=None): # pylint: disable=unused-argument,unused-variable return name a = MyDist(loc=0.0, scale=1.0) self.assertEqual("OK", kullback_leibler.kl_divergence(a, a, name="OK")) @test_util.run_deprecated_v1 def testDomainErrorExceptions(self): class MyDistException(normal.Normal): pass # Register KL to a lambda that spits out the name parameter @kullback_leibler.RegisterKL(MyDistException, MyDistException) # pylint: disable=unused-argument,unused-variable def _kl(a, b, name=None): return array_ops.identity([float("nan")]) # pylint: disable=unused-argument,unused-variable with self.cached_session(): a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=False) kl = kullback_leibler.kl_divergence(a, a, allow_nan_stats=False) with self.assertRaisesOpError( "KL calculation between .* and .* returned NaN values"): self.evaluate(kl) with self.assertRaisesOpError( "KL calculation between .* and .* returned NaN values"): a.kl_divergence(a).eval() a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=True) kl_ok = kullback_leibler.kl_divergence(a, a) self.assertAllEqual([float("nan")], self.evaluate(kl_ok)) self_kl_ok = a.kl_divergence(a) self.assertAllEqual([float("nan")], self.evaluate(self_kl_ok)) cross_ok = a.cross_entropy(a) self.assertAllEqual([float("nan")], self.evaluate(cross_ok)) def testRegistrationFailures(self): class MyDist(normal.Normal): pass with self.assertRaisesRegex(TypeError, "must be callable"): kullback_leibler.RegisterKL(MyDist, MyDist)("blah") # First registration is OK kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None) # Second registration fails with self.assertRaisesRegex(ValueError, "has already been registered"): kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None) def testExactRegistrationsAllMatch(self): for (k, v) in _DIVERGENCES.items(): self.assertEqual(v, _registered_kl(*k)) def _testIndirectRegistration(self, fn): class Sub1(normal.Normal): def entropy(self): return "" class Sub2(normal.Normal): def entropy(self): return "" class Sub11(Sub1): def entropy(self): return "" # pylint: disable=unused-argument,unused-variable @kullback_leibler.RegisterKL(Sub1, Sub1) def _kl11(a, b, name=None): return "sub1-1" @kullback_leibler.RegisterKL(Sub1, Sub2) def _kl12(a, b, name=None): return "sub1-2" @kullback_leibler.RegisterKL(Sub2, Sub1) def _kl21(a, b, name=None): return "sub2-1" # pylint: enable=unused-argument,unused_variable sub1 = Sub1(loc=0.0, scale=1.0) sub2 = Sub2(loc=0.0, scale=1.0) sub11 = Sub11(loc=0.0, scale=1.0) self.assertEqual("sub1-1", fn(sub1, sub1)) self.assertEqual("sub1-2", fn(sub1, sub2)) self.assertEqual("sub2-1", fn(sub2, sub1)) self.assertEqual("sub1-1", fn(sub11, sub11)) self.assertEqual("sub1-1", fn(sub11, sub1)) self.assertEqual("sub1-2", fn(sub11, sub2)) self.assertEqual("sub1-1", fn(sub11, sub1)) self.assertEqual("sub1-2", fn(sub11, sub2)) self.assertEqual("sub2-1", fn(sub2, sub11)) self.assertEqual("sub1-1", fn(sub1, sub11)) def testIndirectRegistrationKLFun(self): self._testIndirectRegistration(kullback_leibler.kl_divergence) def testIndirectRegistrationKLSelf(self): self._testIndirectRegistration( lambda p, q: p.kl_divergence(q)) def testIndirectRegistrationCrossEntropy(self): self._testIndirectRegistration( lambda p, q: p.cross_entropy(q)) def testFunctionCrossEntropy(self): self._testIndirectRegistration(kullback_leibler.cross_entropy) if __name__ == "__main__": test.main()
KLTest
python
scikit-learn__scikit-learn
sklearn/ensemble/_forest.py
{ "start": 58108, "end": 73657 }
class ____(ForestRegressor): """ A random forest regressor. A random forest is a meta estimator that fits a number of decision tree regressors on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting. Trees in the forest use the best split strategy, i.e. equivalent to passing `splitter="best"` to the underlying :class:`~sklearn.tree.DecisionTreeRegressor`. The sub-sample size is controlled with the `max_samples` parameter if `bootstrap=True` (default), otherwise the whole dataset is used to build each tree. This estimator has native support for missing values (NaNs). During training, the tree grower learns at each split point whether samples with missing values should go to the left or right child, based on the potential gain. When predicting, samples with missing values are assigned to the left or right child consequently. If no missing values were encountered for a given feature during training, then samples with missing values are mapped to whichever child has the most samples. For a comparison between tree-based ensemble models see the example :ref:`sphx_glr_auto_examples_ensemble_plot_forest_hist_grad_boosting_comparison.py`. Read more in the :ref:`User Guide <forest>`. Parameters ---------- n_estimators : int, default=100 The number of trees in the forest. .. versionchanged:: 0.22 The default value of ``n_estimators`` changed from 10 to 100 in 0.22. criterion : {"squared_error", "absolute_error", "friedman_mse", "poisson"}, \ default="squared_error" The function to measure the quality of a split. Supported criteria are "squared_error" for the mean squared error, which is equal to variance reduction as feature selection criterion and minimizes the L2 loss using the mean of each terminal node, "friedman_mse", which uses mean squared error with Friedman's improvement score for potential splits, "absolute_error" for the mean absolute error, which minimizes the L1 loss using the median of each terminal node, and "poisson" which uses reduction in Poisson deviance to find splits. Training using "absolute_error" is significantly slower than when using "squared_error". .. versionadded:: 0.18 Mean Absolute Error (MAE) criterion. .. versionadded:: 1.0 Poisson criterion. max_depth : int, default=None The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int or float, default=2 The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a fraction and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for fractions. min_samples_leaf : int or float, default=1 The minimum number of samples required to be at a leaf node. A split point at any depth will only be considered if it leaves at least ``min_samples_leaf`` training samples in each of the left and right branches. This may have the effect of smoothing the model, especially in regression. - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a fraction and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for fractions. min_weight_fraction_leaf : float, default=0.0 The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_features : {"sqrt", "log2", None}, int or float, default=1.0 The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a fraction and `max(1, int(max_features * n_features_in_))` features are considered at each split. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None or 1.0, then `max_features=n_features`. .. note:: The default of 1.0 is equivalent to bagged trees and more randomness can be achieved by setting smaller values, e.g. 0.3. .. versionchanged:: 1.1 The default of `max_features` changed from `"auto"` to 1.0. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_leaf_nodes : int, default=None Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_decrease : float, default=0.0 A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 bootstrap : bool, default=True Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree. oob_score : bool or callable, default=False Whether to use out-of-bag samples to estimate the generalization score. By default, :func:`~sklearn.metrics.r2_score` is used. Provide a callable with signature `metric(y_true, y_pred)` to use a custom metric. Only available if `bootstrap=True`. For an illustration of out-of-bag (OOB) error estimation, see the example :ref:`sphx_glr_auto_examples_ensemble_plot_ensemble_oob.py`. n_jobs : int, default=None The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`, :meth:`decision_path` and :meth:`apply` are all parallelized over the trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. random_state : int, RandomState instance or None, default=None Controls both the randomness of the bootstrapping of the samples used when building trees (if ``bootstrap=True``) and the sampling of the features to consider when looking for the best split at each node (if ``max_features < n_features``). See :term:`Glossary <random_state>` for details. verbose : int, default=0 Controls the verbosity when fitting and predicting. warm_start : bool, default=False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest. See :term:`Glossary <warm_start>` and :ref:`tree_ensemble_warm_start` for details. ccp_alpha : non-negative float, default=0.0 Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ``ccp_alpha`` will be chosen. By default, no pruning is performed. See :ref:`minimal_cost_complexity_pruning` for details. See :ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py` for an example of such pruning. .. versionadded:: 0.22 max_samples : int or float, default=None If bootstrap is True, the number of samples to draw from X to train each base estimator. - If None (default), then draw `X.shape[0]` samples. - If int, then draw `max_samples` samples. - If float, then draw `max(round(n_samples * max_samples), 1)` samples. Thus, `max_samples` should be in the interval `(0.0, 1.0]`. .. versionadded:: 0.22 monotonic_cst : array-like of int of shape (n_features), default=None Indicates the monotonicity constraint to enforce on each feature. - 1: monotonically increasing - 0: no constraint - -1: monotonically decreasing If monotonic_cst is None, no constraints are applied. Monotonicity constraints are not supported for: - multioutput regressions (i.e. when `n_outputs_ > 1`), - regressions trained on data with missing values. Read more in the :ref:`User Guide <monotonic_cst_gbdt>`. .. versionadded:: 1.4 Attributes ---------- estimator_ : :class:`~sklearn.tree.DecisionTreeRegressor` The child estimator template used to create the collection of fitted sub-estimators. .. versionadded:: 1.2 `base_estimator_` was renamed to `estimator_`. estimators_ : list of DecisionTreeRegressor The collection of fitted sub-estimators. feature_importances_ : ndarray of shape (n_features,) The impurity-based feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Warning: impurity-based feature importances can be misleading for high cardinality features (many unique values). See :func:`sklearn.inspection.permutation_importance` as an alternative. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_outputs_ : int The number of outputs when ``fit`` is performed. oob_score_ : float Score of the training dataset obtained using an out-of-bag estimate. This attribute exists only when ``oob_score`` is True. oob_prediction_ : ndarray of shape (n_samples,) or (n_samples, n_outputs) Prediction computed with out-of-bag estimate on the training set. This attribute exists only when ``oob_score`` is True. estimators_samples_ : list of arrays The subset of drawn samples (i.e., the in-bag samples) for each base estimator. Each subset is defined by an array of the indices selected. .. versionadded:: 1.4 See Also -------- sklearn.tree.DecisionTreeRegressor : A decision tree regressor. sklearn.ensemble.ExtraTreesRegressor : Ensemble of extremely randomized tree regressors. sklearn.ensemble.HistGradientBoostingRegressor : A Histogram-based Gradient Boosting Regression Tree, very fast for big datasets (n_samples >= 10_000). Notes ----- The default values for the parameters controlling the size of the trees (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and unpruned trees which can potentially be very large on some data sets. To reduce memory consumption, the complexity and size of the trees should be controlled by setting those parameter values. The features are always randomly permuted at each split. Therefore, the best found split may vary, even with the same training data, ``max_features=n_features`` and ``bootstrap=False``, if the improvement of the criterion is identical for several splits enumerated during the search of the best split. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed. The default value ``max_features=1.0`` uses ``n_features`` rather than ``n_features / 3``. The latter was originally suggested in [1]_, whereas the former was more recently justified empirically in [2]_. References ---------- .. [1] :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001. <10.1023/A:1010933404324>` .. [2] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. Examples -------- >>> from sklearn.ensemble import RandomForestRegressor >>> from sklearn.datasets import make_regression >>> X, y = make_regression(n_features=4, n_informative=2, ... random_state=0, shuffle=False) >>> regr = RandomForestRegressor(max_depth=2, random_state=0) >>> regr.fit(X, y) RandomForestRegressor(...) >>> print(regr.predict([[0, 0, 0, 0]])) [-8.32987858] """ _parameter_constraints: dict = { **ForestRegressor._parameter_constraints, **DecisionTreeRegressor._parameter_constraints, } _parameter_constraints.pop("splitter") def __init__( self, n_estimators=100, *, criterion="squared_error", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=1.0, max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, ccp_alpha=0.0, max_samples=None, monotonic_cst=None, ): super().__init__( estimator=DecisionTreeRegressor(), n_estimators=n_estimators, estimator_params=( "criterion", "max_depth", "min_samples_split", "min_samples_leaf", "min_weight_fraction_leaf", "max_features", "max_leaf_nodes", "min_impurity_decrease", "random_state", "ccp_alpha", "monotonic_cst", ), bootstrap=bootstrap, oob_score=oob_score, n_jobs=n_jobs, random_state=random_state, verbose=verbose, warm_start=warm_start, max_samples=max_samples, ) self.criterion = criterion self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.max_leaf_nodes = max_leaf_nodes self.min_impurity_decrease = min_impurity_decrease self.ccp_alpha = ccp_alpha self.monotonic_cst = monotonic_cst
RandomForestRegressor
python
Pylons__pyramid
src/pyramid/view.py
{ "start": 25896, "end": 29709 }
class ____: """Request methods mixin for BaseRequest having to do with executing views""" def invoke_exception_view( self, exc_info=None, request=None, secure=True, reraise=False ): """Executes an exception view related to the request it's called upon. The arguments it takes are these: ``exc_info`` If provided, should be a 3-tuple in the form provided by ``sys.exc_info()``. If not provided, ``sys.exc_info()`` will be called to obtain the current interpreter exception information. Default: ``None``. ``request`` If the request to be used is not the same one as the instance that this method is called upon, it may be passed here. Default: ``None``. ``secure`` If the exception view should not be rendered if the current user does not have the appropriate permission, this should be ``True``. Default: ``True``. ``reraise`` A boolean indicating whether the original error should be reraised if a :term:`response` object could not be created. If ``False`` then an :class:`pyramid.httpexceptions.HTTPNotFound`` exception will be raised. Default: ``False``. If a response is generated then ``request.exception`` and ``request.exc_info`` will be left at the values used to render the response. Otherwise the previous values for ``request.exception`` and ``request.exc_info`` will be restored. .. versionadded:: 1.7 .. versionchanged:: 1.9 The ``request.exception`` and ``request.exc_info`` properties will reflect the exception used to render the response where previously they were reset to the values prior to invoking the method. Also added the ``reraise`` argument. """ if request is None: request = self registry = getattr(request, 'registry', None) if registry is None: registry = get_current_registry() if registry is None: raise RuntimeError("Unable to retrieve registry") if exc_info is None: exc_info = sys.exc_info() exc = exc_info[1] attrs = request.__dict__ context_iface = providedBy(exc) # clear old generated request.response, if any; it may # have been mutated by the view, and its state is not # sane (e.g. caching headers) with hide_attrs(request, 'response', 'exc_info', 'exception'): attrs['exception'] = exc attrs['exc_info'] = exc_info # we use .get instead of .__getitem__ below due to # https://github.com/Pylons/pyramid/issues/700 request_iface = attrs.get('request_iface', IRequest) manager.push({'request': request, 'registry': registry}) try: response = _call_view( registry, request, exc, context_iface, '', view_types=None, view_classifier=IExceptionViewClassifier, secure=secure, request_iface=request_iface.combined, ) except Exception: if reraise: reraise_(*exc_info) raise finally: manager.pop() if response is None: if reraise: reraise_(*exc_info) raise HTTPNotFound # successful response, overwrite exception/exc_info attrs['exception'] = exc attrs['exc_info'] = exc_info return response
ViewMethodsMixin
python
python-openxml__python-docx
src/docx/oxml/text/parfmt.py
{ "start": 10747, "end": 11632 }
class ____(BaseOxmlElement): """`<w:tab>` element, representing an individual tab stop. Overloaded to use for a tab-character in a run, which also uses the w:tab tag but only needs a __str__ method. """ val: WD_TAB_ALIGNMENT = RequiredAttribute( # pyright: ignore[reportAssignmentType] "w:val", WD_TAB_ALIGNMENT ) leader: WD_TAB_LEADER | None = OptionalAttribute( # pyright: ignore[reportAssignmentType] "w:leader", WD_TAB_LEADER, default=WD_TAB_LEADER.SPACES ) pos: Length = RequiredAttribute( # pyright: ignore[reportAssignmentType] "w:pos", ST_SignedTwipsMeasure ) def __str__(self) -> str: """Text equivalent of a `w:tab` element appearing in a run. Allows text of run inner-content to be accessed consistently across all text inner-content. """ return "\t"
CT_TabStop
python
sympy__sympy
sympy/physics/quantum/matrixcache.py
{ "start": 333, "end": 3587 }
class ____: """A cache for small matrices in different formats. This class takes small matrices in the standard ``sympy.Matrix`` format, and then converts these to both ``numpy.matrix`` and ``scipy.sparse.csr_matrix`` matrices. These matrices are then stored for future recovery. """ def __init__(self, dtype='complex'): self._cache = {} self.dtype = dtype def cache_matrix(self, name, m): """Cache a matrix by its name. Parameters ---------- name : str A descriptive name for the matrix, like "identity2". m : list of lists The raw matrix data as a SymPy Matrix. """ try: self._sympy_matrix(name, m) except ImportError: pass try: self._numpy_matrix(name, m) except ImportError: pass try: self._scipy_sparse_matrix(name, m) except ImportError: pass def get_matrix(self, name, format): """Get a cached matrix by name and format. Parameters ---------- name : str A descriptive name for the matrix, like "identity2". format : str The format desired ('sympy', 'numpy', 'scipy.sparse') """ m = self._cache.get((name, format)) if m is not None: return m raise NotImplementedError( 'Matrix with name %s and format %s is not available.' % (name, format) ) def _store_matrix(self, name, format, m): self._cache[(name, format)] = m def _sympy_matrix(self, name, m): self._store_matrix(name, 'sympy', to_sympy(m)) def _numpy_matrix(self, name, m): m = to_numpy(m, dtype=self.dtype) self._store_matrix(name, 'numpy', m) def _scipy_sparse_matrix(self, name, m): # TODO: explore different sparse formats. But sparse.kron will use # coo in most cases, so we use that here. m = to_scipy_sparse(m, dtype=self.dtype) self._store_matrix(name, 'scipy.sparse', m) sqrt2_inv = Pow(2, Rational(-1, 2), evaluate=False) # Save the common matrices that we will need matrix_cache = MatrixCache() matrix_cache.cache_matrix('eye2', Matrix([[1, 0], [0, 1]])) matrix_cache.cache_matrix('op11', Matrix([[0, 0], [0, 1]])) # |1><1| matrix_cache.cache_matrix('op00', Matrix([[1, 0], [0, 0]])) # |0><0| matrix_cache.cache_matrix('op10', Matrix([[0, 0], [1, 0]])) # |1><0| matrix_cache.cache_matrix('op01', Matrix([[0, 1], [0, 0]])) # |0><1| matrix_cache.cache_matrix('X', Matrix([[0, 1], [1, 0]])) matrix_cache.cache_matrix('Y', Matrix([[0, -I], [I, 0]])) matrix_cache.cache_matrix('Z', Matrix([[1, 0], [0, -1]])) matrix_cache.cache_matrix('S', Matrix([[1, 0], [0, I]])) matrix_cache.cache_matrix('T', Matrix([[1, 0], [0, exp(I*pi/4)]])) matrix_cache.cache_matrix('H', sqrt2_inv*Matrix([[1, 1], [1, -1]])) matrix_cache.cache_matrix('Hsqrt2', Matrix([[1, 1], [1, -1]])) matrix_cache.cache_matrix( 'SWAP', Matrix([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])) matrix_cache.cache_matrix('ZX', sqrt2_inv*Matrix([[1, 1], [1, -1]])) matrix_cache.cache_matrix('ZY', Matrix([[I, 0], [0, -I]]))
MatrixCache
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/completion/base.py
{ "start": 5333, "end": 6602 }
class ____(metaclass=ABCMeta): """ Base class for completer implementations. """ @abstractmethod def get_completions( self, document: Document, complete_event: CompleteEvent ) -> Iterable[Completion]: """ This should be a generator that yields :class:`.Completion` instances. If the generation of completions is something expensive (that takes a lot of time), consider wrapping this `Completer` class in a `ThreadedCompleter`. In that case, the completer algorithm runs in a background thread and completions will be displayed as soon as they arrive. :param document: :class:`~prompt_toolkit.document.Document` instance. :param complete_event: :class:`.CompleteEvent` instance. """ while False: yield async def get_completions_async( self, document: Document, complete_event: CompleteEvent ) -> AsyncGenerator[Completion, None]: """ Asynchronous generator for completions. (Probably, you won't have to override this.) Asynchronous generator of :class:`.Completion` objects. """ for item in self.get_completions(document, complete_event): yield item
Completer
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/dlp.py
{ "start": 99159, "end": 103590 }
class ____(GoogleCloudBaseOperator): """ Re-identifies content that has been de-identified. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:CloudDLPReidentifyContentOperator` :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param reidentify_config: (Optional) Configuration for the re-identification of the content item. :param inspect_config: (Optional) Configuration for the inspector. :param item: (Optional) The item to re-identify. Will be treated as text. :param inspect_template_name: (Optional) Optional template to use. Any configuration directly specified in inspect_config will override those set in the template. :param reidentify_template_name: (Optional) Optional template to use. References an instance of DeidentifyTemplate. Any configuration directly specified in reidentify_config or inspect_config will override those set in the template. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "project_id", "reidentify_config", "inspect_config", "item", "inspect_template_name", "reidentify_template_name", "gcp_conn_id", "impersonation_chain", ) def __init__( self, *, project_id: str = PROVIDE_PROJECT_ID, reidentify_config: dict | DeidentifyConfig | None = None, inspect_config: dict | InspectConfig | None = None, item: dict | ContentItem | None = None, inspect_template_name: str | None = None, reidentify_template_name: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.project_id = project_id self.reidentify_config = reidentify_config self.inspect_config = inspect_config self.item = item self.inspect_template_name = inspect_template_name self.reidentify_template_name = reidentify_template_name self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context: Context): hook = CloudDLPHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) response = hook.reidentify_content( project_id=self.project_id, reidentify_config=self.reidentify_config, inspect_config=self.inspect_config, item=self.item, inspect_template_name=self.inspect_template_name, reidentify_template_name=self.reidentify_template_name, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return ReidentifyContentResponse.to_dict(response)
CloudDLPReidentifyContentOperator
python
django__django
tests/utils_tests/test_http.py
{ "start": 10759, "end": 11424 }
class ____(unittest.TestCase): def test_parsing(self): self.assertEqual( parse_etags(r'"" , "etag", "e\\tag", W/"weak"'), ['""', '"etag"', r'"e\\tag"', 'W/"weak"'], ) self.assertEqual(parse_etags("*"), ["*"]) # Ignore RFC 2616 ETags that are invalid according to RFC 9110. self.assertEqual(parse_etags(r'"etag", "e\"t\"ag"'), ['"etag"']) def test_quoting(self): self.assertEqual(quote_etag("etag"), '"etag"') # unquoted self.assertEqual(quote_etag('"etag"'), '"etag"') # quoted self.assertEqual(quote_etag('W/"etag"'), 'W/"etag"') # quoted, weak
ETagProcessingTests
python
getsentry__sentry
tests/sentry/sentry_apps/tasks/test_sentry_apps.py
{ "start": 70311, "end": 74332 }
class ____(TestCase): def setUp(self) -> None: self.sentry_app = self.create_sentry_app( organization=self.organization, events=["issue.created"] ) self.install = self.create_sentry_app_installation( organization=self.organization, slug=self.sentry_app.slug ) @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_cron_issue_without_feature_flag( self, mock_record: MagicMock, safe_urlopen: MagicMock ) -> None: """Test that CRON issues don't send webhooks without the feature flag""" event = self.store_event( data={ "event_id": "a" * 32, "message": "monitor check-in failure", "timestamp": before_now(minutes=1).isoformat(), }, project_id=self.project.id, ) assert event.group is not None # Set to CRON category (type_id = 4001, MonitorIncidentType) with assume_test_silo_mode(SiloMode.REGION): event.group.update(type=4001) with self.tasks(): post_process_group( is_new=True, is_regression=False, is_new_group_environment=False, cache_key=write_event_to_cache(event), group_id=event.group_id, project_id=self.project.id, eventstream_type=EventStreamEventType.Generic.value, ) assert not safe_urlopen.called @with_feature("organizations:expanded-sentry-apps-webhooks") @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_cron_issue_with_feature_flag( self, mock_record: MagicMock, safe_urlopen: MagicMock ) -> None: event = self.store_event( data={ "event_id": "b" * 32, "message": "monitor check-in failure", "timestamp": before_now(minutes=1).isoformat(), }, project_id=self.project.id, ) assert event.group is not None with assume_test_silo_mode(SiloMode.REGION): event.group.update(type=4001) with self.tasks(): post_process_group( is_new=True, is_regression=False, is_new_group_environment=False, cache_key=write_event_to_cache(event), group_id=event.group_id, project_id=self.project.id, eventstream_type=EventStreamEventType.Generic.value, ) assert safe_urlopen.called ((args, kwargs),) = safe_urlopen.call_args_list data = json.loads(kwargs["data"]) assert data["action"] == "created" assert data["installation"]["uuid"] == self.install.uuid assert data["data"]["issue"]["id"] == str(event.group.id) assert_success_metric(mock_record) @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_error_issue_always_sends_webhook( self, mock_record: MagicMock, safe_urlopen: MagicMock ) -> None: event = self.store_event(data={}, project_id=self.project.id) assert event.group is not None with self.tasks(): post_process_group( is_new=True, is_regression=False, is_new_group_environment=False, cache_key=write_event_to_cache(event), group_id=event.group_id, project_id=self.project.id, eventstream_type=EventStreamEventType.Error.value, ) assert safe_urlopen.called ((args, kwargs),) = safe_urlopen.call_args_list data = json.loads(kwargs["data"]) assert data["action"] == "created" assert data["installation"]["uuid"] == self.install.uuid assert data["data"]["issue"]["id"] == str(event.group.id) assert_success_metric(mock_record)
TestExpandedSentryAppsWebhooks
python
tensorflow__tensorflow
tensorflow/python/debug/lib/check_numerics_callback_test.py
{ "start": 2657, "end": 4914 }
class ____(test_util.TensorFlowTestCase): def tearDown(self): check_numerics_callback.disable_check_numerics() super(CheckNumericsCallbackTest, self).tearDown() def testCallingDisableCheckNumericsWithoutEnablingFirstIsTolerated(self): check_numerics_callback.disable_check_numerics() def testNoCatchEagerOpExecution(self): """Test running multiple steps of eager execution without Inf/NaN.""" check_numerics_callback.enable_check_numerics() x = constant_op.constant([2.0, 3.0]) y = constant_op.constant([1.0, 0.0]) self.assertAllClose((x + y) * (x - y), [3.0, 9.0]) @test_util.run_in_graph_and_eager_modes def testDatasetMapHealthyResults(self): check_numerics_callback.enable_check_numerics() tensor = constant_op.constant( [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]) def map_fn(x): return math_ops.log(math_ops.square(x) + 1) dataset = dataset_ops.Dataset.from_tensor_slices(tensor).batch(2).map( map_fn) @def_function.function def get_batches(): iterator = iter(dataset) return [next(iterator), next(iterator)] batches = self.evaluate(get_batches()) self.assertLen(batches, 2) self.assertAllClose(batches[0], np.log([1.25, 2])) self.assertAllClose(batches[1], np.log([3.25, 5])) @test_util.run_in_graph_and_eager_modes def testGraphModeUsesCorrectPathLengthAndStackHeightLimits(self): check_numerics_callback.enable_check_numerics( stack_height_limit=123, path_length_limit=1200) @def_function.function def add_fn(x, y): return x + y fake_get_check_numerics_error_message = test.mock.MagicMock( return_value="dummy_message") with test.mock.patch.object(check_numerics_callback, "get_check_numerics_error_message", fake_get_check_numerics_error_message): x = constant_op.constant(2.0) y = constant_op.constant(3.0) self.assertAllClose(self.evaluate(add_fn(x, y)), 5.0) (_, call_kwargs) = fake_get_check_numerics_error_message.call_args self.assertEqual(call_kwargs["stack_height_limit"], 123) self.assertEqual(call_kwargs["path_length_limit"], 1200)
CheckNumericsCallbackTest
python
celery__celery
celery/worker/components.py
{ "start": 2860, "end": 5497 }
class ____(bootsteps.StartStopStep): """Bootstep managing the worker pool. Describes how to initialize the worker pool, and starts and stops the pool during worker start-up/shutdown. Adds attributes: * autoscale * pool * max_concurrency * min_concurrency """ requires = (Hub,) def __init__(self, w, autoscale=None, **kwargs): w.pool = None w.max_concurrency = None w.min_concurrency = w.concurrency self.optimization = w.optimization if isinstance(autoscale, str): max_c, _, min_c = autoscale.partition(',') autoscale = [int(max_c), min_c and int(min_c) or 0] w.autoscale = autoscale if w.autoscale: w.max_concurrency, w.min_concurrency = w.autoscale super().__init__(w, **kwargs) def close(self, w): if w.pool: w.pool.close() def terminate(self, w): if w.pool: w.pool.terminate() def create(self, w): semaphore = None max_restarts = None if w.app.conf.worker_pool in GREEN_POOLS: # pragma: no cover warnings.warn(UserWarning(W_POOL_SETTING)) threaded = not w.use_eventloop or IS_WINDOWS procs = w.min_concurrency w.process_task = w._process_task if not threaded: semaphore = w.semaphore = LaxBoundedSemaphore(procs) w._quick_acquire = w.semaphore.acquire w._quick_release = w.semaphore.release max_restarts = 100 if w.pool_putlocks and w.pool_cls.uses_semaphore: w.process_task = w._process_task_sem allow_restart = w.pool_restarts pool = w.pool = self.instantiate( w.pool_cls, w.min_concurrency, initargs=(w.app, w.hostname), maxtasksperchild=w.max_tasks_per_child, max_memory_per_child=w.max_memory_per_child, timeout=w.time_limit, soft_timeout=w.soft_time_limit, putlocks=w.pool_putlocks and threaded, lost_worker_timeout=w.worker_lost_wait, threads=threaded, max_restarts=max_restarts, allow_restart=allow_restart, forking_enable=True, semaphore=semaphore, sched_strategy=self.optimization, app=w.app, ) _set_task_join_will_block(pool.task_join_will_block) return pool def info(self, w): return {'pool': w.pool.info if w.pool else 'N/A'} def register_with_event_loop(self, w, hub): w.pool.register_with_event_loop(hub)
Pool
python
apache__airflow
helm-tests/tests/helm_tests/airflow_core/test_api_server.py
{ "start": 31933, "end": 33746 }
class ____: """Tests api-server service account.""" def test_should_add_component_specific_labels(self): docs = render_chart( values={ "apiServer": { "serviceAccount": {"create": True}, "labels": {"test_label": "test_label_value"}, }, }, show_only=["templates/api-server/api-server-serviceaccount.yaml"], ) assert "test_label" in jmespath.search("metadata.labels", docs[0]) assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value" def test_default_automount_service_account_token(self): docs = render_chart( values={ "apiServer": { "serviceAccount": {"create": True}, }, }, show_only=["templates/api-server/api-server-serviceaccount.yaml"], ) assert jmespath.search("automountServiceAccountToken", docs[0]) is True def test_overridden_automount_service_account_token(self): docs = render_chart( values={ "apiServer": { "serviceAccount": {"create": True, "automountServiceAccountToken": False}, }, }, show_only=["templates/api-server/api-server-serviceaccount.yaml"], ) assert jmespath.search("automountServiceAccountToken", docs[0]) is False def test_can_be_disabled(self): """ API Server should be able to be disabled if the users desires. """ docs = render_chart( values={"apiServer": {"enabled": False}}, show_only=["templates/api-server/api-server-serviceaccount.yaml"], ) assert len(docs) == 0
TestAPIServerServiceAccount
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_iter.py
{ "start": 2744, "end": 2913 }
class ____: def __init__(self, i): self.i = i def __next__(self): return next(self.i) def __iter__(self): return self
IteratorProxyClass
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/call12.py
{ "start": 483, "end": 767 }
class ____(TypedDict): c: int func1(a=(v4 := 1), **B(b=(v4 + 1))) # This should generate an error. func1(**A(a=(v5 + 1)), b=(v5 := 1)) func1(**A(a=(v5 := 1)), b=(v5 + 1)) func1(b=(v6 + 1), *[(v6 := 1)], **C(c=(v6 + 2))) def func2(a: int, b: int): pass func2(b=1, *(2,))
C
python
Netflix__metaflow
metaflow/user_configs/config_parameters.py
{ "start": 7728, "end": 13538 }
class ____(collections.abc.Mapping): """ Small wrapper that allows the evaluation of a Config() value in a delayed manner. This is used when we want to use config.* values in decorators for example. It also allows the following "delayed" access on an obj that is a DelayEvaluation - obj.x.y.z (ie: accessing members of DelayEvaluator; accesses will be delayed until the DelayEvaluator is evaluated) - **obj (ie: unpacking the DelayEvaluator as a dictionary). Note that this requires special handling in whatever this is being unpacked into, specifically the handling of _unpacked_delayed_* """ def __init__(self, ex: str, saved_globals: Optional[Dict[str, Any]] = None): self._config_expr = ex self._globals = saved_globals if ID_PATTERN.match(self._config_expr): # This is a variable only so allow things like config_expr("config").var self._is_var_only = True self._access = [] else: self._is_var_only = False self._access = None self._cached_expr = None def __copy__(self): c = DelayEvaluator(self._config_expr) c._access = self._access.copy() if self._access is not None else None # Globals are not copied -- always kept as a reference return c def __deepcopy__(self, memo): c = DelayEvaluator(self._config_expr) c._access = ( copy.deepcopy(self._access, memo) if self._access is not None else None ) # Globals are not copied -- always kept as a reference return c def __iter__(self): yield "%s%d" % (UNPACK_KEY, id(self)) def __getitem__(self, key): if isinstance(key, str) and key == "%s%d" % (UNPACK_KEY, id(self)): return self if self._access is None: raise KeyError(key) # Make a copy so that we can support something like # foo = delay_evaluator["blah"] # bar = delay_evaluator["baz"] # and don't end up with a access list that contains both "blah" and "baz" c = self.__copy__() c._access.append(key) c._cached_expr = None return c def __len__(self): return 1 def __getattr__(self, name): if self._access is None: raise AttributeError(name) c = self.__copy__() c._access.append(name) c._cached_expr = None return c def __call__(self, ctx=None, deploy_time=False): from ..flowspec import FlowStateItems # Prevent circular import # Two additional arguments are only used by DeployTimeField which will call # this function with those two additional arguments. They are ignored. flow_cls = getattr(current_flow, "flow_cls", None) if flow_cls is None: # We are not executing inside a flow (ie: not the CLI) raise MetaflowException( "Config object can only be used directly in the FlowSpec defining them " "(or their flow decorators)." ) if self._cached_expr is not None: to_eval_expr = self._cached_expr elif self._access is not None: # Build the final expression by adding all the fields in access as . fields access_list = [self._config_expr] for a in self._access: if isinstance(a, str): access_list.append(a) elif isinstance(a, DelayEvaluator): # Supports things like config[other_config.selector].var access_list.append(a()) else: raise MetaflowException( "Field '%s' of type '%s' is not supported" % (str(a), type(a)) ) to_eval_expr = self._cached_expr = ".".join(access_list) else: to_eval_expr = self._cached_expr = self._config_expr # Evaluate the expression setting the config values as local variables try: return eval( to_eval_expr, self._globals or globals(), { k: v if plain_flag or v is None else ConfigValue(v) for k, (v, plain_flag) in flow_cls._flow_state[ FlowStateItems.CONFIGS ].items() }, ) except NameError as e: raise MetaflowException( "Config expression '%s' could not be evaluated: %s" % (to_eval_expr, str(e)) ) from e def config_expr(expr: str) -> DelayEvaluator: """ Function to allow you to use an expression involving a config parameter in places where it may not be directory accessible or if you want a more complicated expression than just a single variable. You can use it as follows: - When the config is not directly accessible: @project(name=config_expr("config").project.name) class MyFlow(FlowSpec): config = Config("config") ... - When you want a more complex expression: class MyFlow(FlowSpec): config = Config("config") @environment(vars={"foo": config_expr("config.bar.baz.lower()")}) @step def start(self): ... Parameters ---------- expr : str Expression using the config values. """ # Get globals where the expression is defined so that the user can use # something like `config_expr("my_func()")` in the expression. parent_globals = inspect.currentframe().f_back.f_globals return DelayEvaluator(expr, saved_globals=parent_globals)
DelayEvaluator
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 996503, "end": 997211 }
class ____(sgqlc.types.relay.Connection): """The connection type for Team.""" __schema__ = github_schema __field_names__ = ("edges", "nodes", "page_info", "total_count") edges = sgqlc.types.Field(sgqlc.types.list_of("TeamEdge"), graphql_name="edges") """A list of edges.""" nodes = sgqlc.types.Field(sgqlc.types.list_of("Team"), graphql_name="nodes") """A list of nodes.""" page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo") """Information to aid in pagination.""" total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount") """Identifies the total count of items in the connection."""
TeamConnection
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/task_instances.py
{ "start": 6870, "end": 7791 }
class ____(StrictBaseModel): """Request body for Clear Task Instances endpoint.""" new_state: TaskInstanceState | None = None note: Annotated[str, StringConstraints(max_length=1000)] | None = None include_upstream: bool = False include_downstream: bool = False include_future: bool = False include_past: bool = False @field_validator("new_state", mode="before") @classmethod def validate_new_state(cls, ns: str | None) -> str: """Validate new_state.""" valid_states = [ vs.name.lower() for vs in (TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED) ] if ns is None: raise ValueError("'new_state' should not be empty") ns = ns.lower() if ns not in valid_states: raise ValueError(f"'{ns}' is not one of {valid_states}") return ns
PatchTaskInstanceBody
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/ecs.py
{ "start": 3048, "end": 6913 }
class ____(EcsBaseOperator): """ Creates an AWS ECS cluster. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:EcsCreateClusterOperator` :param cluster_name: The name of your cluster. If you don't specify a name for your cluster, you create a cluster that's named default. :param create_cluster_kwargs: Extra arguments for Cluster Creation. :param wait_for_completion: If True, waits for creation of the cluster to complete. (default: True) :param waiter_delay: The amount of time in seconds to wait between attempts, if not set then the default waiter value will be used. :param waiter_max_attempts: The maximum number of attempts to be made, if not set then the default waiter value will be used. :param deferrable: If True, the operator will wait asynchronously for the job to complete. This implies waiting for completion. This mode requires aiobotocore module to be installed. (default: False) """ template_fields: Sequence[str] = aws_template_fields( "cluster_name", "create_cluster_kwargs", "wait_for_completion", "deferrable", ) def __init__( self, *, cluster_name: str, create_cluster_kwargs: dict | None = None, wait_for_completion: bool = True, waiter_delay: int = 15, waiter_max_attempts: int = 60, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs, ) -> None: super().__init__(**kwargs) self.cluster_name = cluster_name self.create_cluster_kwargs = create_cluster_kwargs or {} self.wait_for_completion = wait_for_completion self.waiter_delay = waiter_delay self.waiter_max_attempts = waiter_max_attempts self.deferrable = deferrable def execute(self, context: Context): self.log.info( "Creating cluster %r using the following values: %s", self.cluster_name, self.create_cluster_kwargs, ) result = self.client.create_cluster(clusterName=self.cluster_name, **self.create_cluster_kwargs) cluster_details = result["cluster"] cluster_state = cluster_details.get("status") if cluster_state == EcsClusterStates.ACTIVE: # In some circumstances the ECS Cluster is created immediately, # and there is no reason to wait for completion. self.log.info("Cluster %r in state: %r.", self.cluster_name, cluster_state) elif self.deferrable: self.defer( trigger=ClusterActiveTrigger( cluster_arn=cluster_details["clusterArn"], waiter_delay=self.waiter_delay, waiter_max_attempts=self.waiter_max_attempts, aws_conn_id=self.aws_conn_id, region_name=self.region_name, ), method_name="_complete_exec_with_cluster_desc", # timeout is set to ensure that if a trigger dies, the timeout does not restart # 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent) timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60), ) elif self.wait_for_completion: waiter = self.hook.get_waiter("cluster_active") waiter.wait( clusters=[cluster_details["clusterArn"]], WaiterConfig=prune_dict( { "Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts, } ), ) return cluster_details
EcsCreateClusterOperator
python
PyCQA__pylint
tests/functional/i/invalid/invalid_index_returned.py
{ "start": 1159, "end": 1257 }
class ____: """ Uninferable return value """ __index__ = lambda self: Missing
AmbigousIndex
python
apache__thrift
lib/py/src/protocol/TBinaryProtocol.py
{ "start": 8547, "end": 9157 }
class ____(TProtocolFactory): def __init__(self, string_length_limit=None, container_length_limit=None, fallback=True): self.string_length_limit = string_length_limit self.container_length_limit = container_length_limit self._fallback = fallback def getProtocol(self, trans): return TBinaryProtocolAccelerated( trans, string_length_limit=self.string_length_limit, container_length_limit=self.container_length_limit, fallback=self._fallback)
TBinaryProtocolAcceleratedFactory
python
numba__numba
numba/cuda/deviceufunc.py
{ "start": 13132, "end": 17625 }
class ____(_BaseUFuncBuilder): def __init__( self, func, sig, identity=None, cache=False, targetoptions=None, writable_args=(), ): if targetoptions is None: targetoptions = {} if cache: raise TypeError("caching is not supported") if writable_args: raise TypeError("writable_args are not supported") # Allow nopython flag to be set. if not targetoptions.pop('nopython', True): raise TypeError("nopython flag must be True") # Are there any more target options? if targetoptions: opts = ', '.join([repr(k) for k in targetoptions.keys()]) fmt = "The following target options are not supported: {0}" raise TypeError(fmt.format(opts)) self.py_func = func self.identity = parse_identity(identity) self.signature = sig self.inputsig, self.outputsig = parse_signature(self.signature) # Maps from a tuple of input_dtypes to (output_dtypes, kernel) self.kernelmap = OrderedDict() @property def pyfunc(self): return self.py_func def add(self, sig=None): indims = [len(x) for x in self.inputsig] outdims = [len(x) for x in self.outputsig] args, return_type = sigutils.normalize_signature(sig) # It is only valid to specify types.none as a return type, or to not # specify the return type (where the "Python None" is the return type) valid_return_type = return_type in (types.none, None) if not valid_return_type: raise TypeError('guvectorized functions cannot return values: ' f'signature {sig} specifies {return_type} return ' 'type') funcname = self.py_func.__name__ src = expand_gufunc_template(self._kernel_template, indims, outdims, funcname, args) glbls = self._get_globals(sig) exec(src, glbls) fnobj = glbls['__gufunc_{name}'.format(name=funcname)] outertys = list(_determine_gufunc_outer_types(args, indims + outdims)) kernel = self._compile_kernel(fnobj, sig=tuple(outertys)) nout = len(outdims) dtypes = [np.dtype(str(t.dtype)) for t in outertys] indtypes = tuple(dtypes[:-nout]) outdtypes = tuple(dtypes[-nout:]) self.kernelmap[indtypes] = outdtypes, kernel def _compile_kernel(self, fnobj, sig): raise NotImplementedError def _get_globals(self, sig): raise NotImplementedError def _determine_gufunc_outer_types(argtys, dims): for at, nd in zip(argtys, dims): if isinstance(at, types.Array): yield at.copy(ndim=nd + 1) else: if nd > 0: raise ValueError("gufunc signature mismatch: ndim>0 for scalar") yield types.Array(dtype=at, ndim=1, layout='A') def expand_gufunc_template(template, indims, outdims, funcname, argtypes): """Expand gufunc source template """ argdims = indims + outdims argnames = ["arg{0}".format(i) for i in range(len(argdims))] checkedarg = "min({0})".format(', '.join(["{0}.shape[0]".format(a) for a in argnames])) inputs = [_gen_src_for_indexing(aref, adims, atype) for aref, adims, atype in zip(argnames, indims, argtypes)] outputs = [_gen_src_for_indexing(aref, adims, atype) for aref, adims, atype in zip(argnames[len(indims):], outdims, argtypes[len(indims):])] argitems = inputs + outputs src = template.format(name=funcname, args=', '.join(argnames), checkedarg=checkedarg, argitems=', '.join(argitems)) return src def _gen_src_for_indexing(aref, adims, atype): return "{aref}[{sliced}]".format(aref=aref, sliced=_gen_src_index(adims, atype)) def _gen_src_index(adims, atype): if adims > 0: return ','.join(['__tid__'] + [':'] * adims) elif isinstance(atype, types.Array) and atype.ndim - 1 == adims: # Special case for 0-nd in shape-signature but # 1d array in type signature. # Slice it so that the result has the same dimension. return '__tid__:(__tid__ + 1)' else: return '__tid__'
DeviceGUFuncVectorize
python
ray-project__ray
python/ray/train/tests/dummy_preprocessor.py
{ "start": 62, "end": 683 }
class ____(Preprocessor): _is_fittable = False def __init__(self, transform=None): self.id = uuid.uuid4() if transform is None: self.transform = lambda b: b else: self.transform = transform def transform_batch(self, batch): self._batch_transformed = True return self.transform(batch) def _transform_pandas(self, df): return df @property def has_preprocessed(self): return hasattr(self, "_batch_transformed") def __eq__(self, other_preprocessor): return self.id == other_preprocessor.id
DummyPreprocessor
python
spyder-ide__spyder
spyder/plugins/plots/widgets/figurebrowser.py
{ "start": 41385, "end": 45795 }
class ____(QWidget): """ A widget that consists of a FigureCanvas, a side toolbar, and a context menu that is used to show preview of figures in the ThumbnailScrollBar. """ sig_canvas_clicked = Signal(object) """ This signal is emitted when the figure canvas is clicked. Parameters ---------- figure_thumbnail: spyder.plugins.plots.widget.figurebrowser.FigureThumbnail The clicked figure thumbnail. """ sig_remove_figure_requested = Signal(object) """ This signal is emitted to request the removal of a figure thumbnail. Parameters ---------- figure_thumbnail: spyder.plugins.plots.widget.figurebrowser.FigureThumbnail The figure thumbnail to remove. """ sig_save_figure_requested = Signal(object, str) """ This signal is emitted to request the saving of a figure thumbnail. Parameters ---------- figure_thumbnail: spyder.plugins.plots.widget.figurebrowser.FigureThumbnail The figure thumbnail to save. format: str The image format to use when saving the image. One of "image/png", "image/jpeg" and "image/svg+xml". """ sig_context_menu_requested = Signal(QPoint) """ This signal is emitted to request a context menu. Parameters ---------- point: QPoint The QPoint in global coordinates where the menu was requested. """ def __init__(self, parent=None, background_color=None, auto_fit=True): super().__init__(parent) self.auto_fit = auto_fit self.scalefactor = None self.vscrollbar_value = 0 self.hscrollbar_value = 0 self.canvas = FigureCanvas( parent=self, background_color=background_color ) self.canvas.sig_context_menu_requested.connect( self.sig_context_menu_requested) self.canvas.installEventFilter(self) self.setup_gui() def setup_gui(self): """Setup the main layout of the widget.""" layout = QGridLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(self.canvas, 0, 0, Qt.AlignCenter) layout.setSizeConstraint(QLayout.SizeConstraint.SetFixedSize) def highlight_canvas(self, highlight): """ Set a colored frame around the FigureCanvas if highlight is True. """ if highlight: # See spyder-ide/spyder#21598 for choice of styling. self.canvas.setStyleSheet( "FigureCanvas{border: 3px solid %s;}" % SpyderPalette.COLOR_ACCENT_3 ) else: self.canvas.setStyleSheet("FigureCanvas{}") def scale_canvas_size(self, max_canvas_size): """ Scale this thumbnail canvas size, while respecting its associated figure dimension ratio. """ fwidth = self.canvas.fwidth fheight = self.canvas.fheight if fheight != 0: if fwidth / fheight > 1: canvas_width = max_canvas_size canvas_height = canvas_width / fwidth * fheight else: canvas_height = max_canvas_size canvas_width = canvas_height / fheight * fwidth self.canvas.setFixedSize(int(canvas_width), int(canvas_height)) self.layout().setColumnMinimumWidth(0, max_canvas_size) def eventFilter(self, widget, event): """ A filter that is used to send a signal when the figure canvas is clicked. """ if event.type() == QEvent.MouseButtonPress: if event.button() == Qt.LeftButton: self.sig_canvas_clicked.emit(self) return super().eventFilter(widget, event) def mouseMoveEvent(self, event): """ Enable drags to reorganize thumbnails with the mouse in the scrollbar. Solution taken from: https://www.pythonguis.com/faq/pyqt-drag-drop-widgets/ """ if event.buttons() == Qt.LeftButton: # Create drag drag = QDrag(self) mime = QMimeData() drag.setMimeData(mime) # Show pixmap of the thumbnail while it's being moved. pixmap = QPixmap(self.size()) self.render(pixmap) drag.setPixmap(pixmap) # Execute drag's event loop drag.exec_(Qt.MoveAction)
FigureThumbnail
python
getsentry__sentry
src/sentry/sentry_apps/models/platformexternalissue.py
{ "start": 302, "end": 1511 }
class ____(Model): __relocation_scope__ = RelocationScope.Excluded group = FlexibleForeignKey("sentry.Group", db_constraint=False, db_index=False) project = FlexibleForeignKey("sentry.Project", null=True, db_constraint=False) # external service that's linked to the sentry issue service_type = models.CharField(max_length=64) display_name = models.TextField() web_url = models.URLField() date_added = models.DateTimeField(default=timezone.now) class Meta: app_label = "sentry" db_table = "sentry_platformexternalissue" unique_together = (("group", "service_type"),) __repr__ = sane_repr("group_id", "service_type", "display_name", "web_url") @classmethod def get_annotations_for_group_list(cls, group_list): external_issues = cls.objects.filter(group_id__in=[group.id for group in group_list]) # group annotations by group id annotations_by_group_id = defaultdict(list) for ei in external_issues: annotation = {"url": ei.web_url, "displayName": ei.display_name} annotations_by_group_id[ei.group_id].append(annotation) return annotations_by_group_id
PlatformExternalIssue
python
Delgan__loguru
tests/test_add_option_enqueue.py
{ "start": 827, "end": 8480 }
class ____: def write(self, message): if "fail" in message.record["extra"]: raise RuntimeError("You asked me to fail...") print(message, end="") def test_enqueue(): x = [] def sink(message): time.sleep(0.1) x.append(message) logger.add(sink, format="{message}", enqueue=True) logger.debug("Test") assert len(x) == 0 logger.complete() assert len(x) == 1 assert x[0] == "Test\n" def test_enqueue_with_exception(): x = [] def sink(message): time.sleep(0.1) x.append(message) logger.add(sink, format="{message}", enqueue=True) try: 1 / 0 # noqa: B018 except ZeroDivisionError: logger.exception("Error") assert len(x) == 0 logger.complete() assert len(x) == 1 lines = x[0].splitlines() assert lines[0] == "Error" assert lines[-1] == "ZeroDivisionError: division by zero" def test_caught_exception_queue_put(writer, capsys): logger.add(writer, enqueue=True, catch=True, format="{message}") logger.info("It's fine") logger.bind(broken=NotPicklable()).info("Bye bye...") logger.info("It's fine again") logger.remove() out, err = capsys.readouterr() lines = err.strip().splitlines() assert writer.read() == "It's fine\nIt's fine again\n" assert out == "" assert lines[0] == "--- Logging error in Loguru Handler #0 ---" assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1]) assert "PicklingError: You shall not serialize me!" in err assert lines[-1] == "--- End of logging error ---" def test_caught_exception_queue_get(writer, capsys): logger.add(writer, enqueue=True, catch=True, format="{message}") logger.info("It's fine") logger.bind(broken=NotUnpicklable()).info("Bye bye...") logger.info("It's fine again") logger.remove() out, err = capsys.readouterr() lines = err.strip().splitlines() assert writer.read() == "It's fine\nIt's fine again\n" assert out == "" assert lines[0] == "--- Logging error in Loguru Handler #0 ---" assert lines[1] == "Record was: None" assert "UnpicklingError: You shall not de-serialize me!" in err assert lines[-1] == "--- End of logging error ---" def test_caught_exception_sink_write(capsys): logger.add(NotWritable(), enqueue=True, catch=True, format="{message}") logger.info("It's fine") logger.bind(fail=True).info("Bye bye...") logger.info("It's fine again") logger.remove() out, err = capsys.readouterr() lines = err.strip().splitlines() assert out == "It's fine\nIt's fine again\n" assert lines[0] == "--- Logging error in Loguru Handler #0 ---" assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1]) assert "RuntimeError: You asked me to fail..." in err assert lines[-1] == "--- End of logging error ---" def test_not_caught_exception_queue_put(writer, capsys): logger.add(writer, enqueue=True, catch=False, format="{message}") logger.info("It's fine") with pytest.raises(pickle.PicklingError, match=r"You shall not serialize me!"): logger.bind(broken=NotPicklable()).info("Bye bye...") logger.remove() out, err = capsys.readouterr() assert writer.read() == "It's fine\n" assert out == "" assert err == "" def test_not_caught_exception_queue_get(writer, capsys): logger.add(writer, enqueue=True, catch=False, format="{message}") with default_threading_excepthook(): logger.info("It's fine") logger.bind(broken=NotUnpicklable()).info("Bye bye...") logger.info("It's fine again") logger.remove() out, err = capsys.readouterr() lines = err.strip().splitlines() assert writer.read() == "It's fine\nIt's fine again\n" assert out == "" assert lines[0] == "--- Logging error in Loguru Handler #0 ---" assert lines[1] == "Record was: None" assert "UnpicklingError: You shall not de-serialize me!" in err assert lines[-1] == "--- End of logging error ---" def test_not_caught_exception_sink_write(capsys): logger.add(NotWritable(), enqueue=True, catch=False, format="{message}") with default_threading_excepthook(): logger.info("It's fine") logger.bind(fail=True).info("Bye bye...") logger.info("It's fine again") logger.remove() out, err = capsys.readouterr() lines = err.strip().splitlines() assert out == "It's fine\nIt's fine again\n" assert lines[0] == "--- Logging error in Loguru Handler #0 ---" assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1]) assert "RuntimeError: You asked me to fail..." in err assert lines[-1] == "--- End of logging error ---" def test_not_caught_exception_sink_write_then_complete(capsys): logger.add(NotWritable(), enqueue=True, catch=False, format="{message}") with default_threading_excepthook(): logger.bind(fail=True).info("Bye bye...") logger.complete() logger.complete() # Called twice to ensure it's re-usable. logger.remove() out, err = capsys.readouterr() lines = err.strip().splitlines() assert out == "" assert lines[0] == "--- Logging error in Loguru Handler #0 ---" assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1]) assert "RuntimeError: You asked me to fail..." in err assert lines[-1] == "--- End of logging error ---" def test_not_caught_exception_queue_get_then_complete(writer, capsys): logger.add(writer, enqueue=True, catch=False, format="{message}") with default_threading_excepthook(): logger.bind(broken=NotUnpicklable()).info("Bye bye...") logger.complete() logger.complete() logger.remove() out, err = capsys.readouterr() lines = err.strip().splitlines() assert writer.read() == "" assert out == "" assert lines[0] == "--- Logging error in Loguru Handler #0 ---" assert lines[1] == "Record was: None" assert "UnpicklingError: You shall not de-serialize me!" in err assert lines[-1] == "--- End of logging error ---" def test_wait_for_all_messages_enqueued(capsys): def slow_sink(message): time.sleep(0.01) sys.stderr.write(message) logger.add(slow_sink, enqueue=True, catch=False, format="{message}") for i in range(10): logger.info(i) logger.complete() out, err = capsys.readouterr() assert out == "" assert err == "".join("%d\n" % i for i in range(10)) @pytest.mark.parametrize("exception_value", [NotPicklable(), NotPicklableTypeError()]) def test_logging_not_picklable_exception(exception_value): exception = None def sink(message): nonlocal exception exception = message.record["exception"] logger.add(sink, enqueue=True, catch=False) try: raise ValueError(exception_value) except Exception: logger.exception("Oups") logger.remove() type_, value, traceback_ = exception assert type_ is ValueError assert value is None assert traceback_ is None @pytest.mark.parametrize("exception_value", [NotUnpicklable(), NotUnpicklableTypeError()]) def test_logging_not_unpicklable_exception(exception_value): exception = None def sink(message): nonlocal exception exception = message.record["exception"] logger.add(sink, enqueue=True, catch=False) try: raise ValueError(exception_value) except Exception: logger.exception("Oups") logger.remove() type_, value, traceback_ = exception assert type_ is ValueError assert value is None assert traceback_ is None
NotWritable
python
pandas-dev__pandas
pandas/io/sql.py
{ "start": 54379, "end": 56341 }
class ____(BaseEngine): def __init__(self) -> None: import_optional_dependency( "sqlalchemy", extra="sqlalchemy is required for SQL support." ) def insert_records( self, table: SQLTable, con, frame, name: str, index: bool | str | list[str] | None = True, schema=None, chunksize: int | None = None, method=None, **engine_kwargs, ) -> int | None: from sqlalchemy import exc try: return table.insert(chunksize=chunksize, method=method) except exc.StatementError as err: # GH34431 # https://stackoverflow.com/a/67358288/6067848 msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?# )|inf can not be used with MySQL""" err_text = str(err.orig) if re.search(msg, err_text): raise ValueError("inf cannot be used with MySQL") from err raise err def get_engine(engine: str) -> BaseEngine: """return our implementation""" if engine == "auto": engine = get_option("io.sql.engine") if engine == "auto": # try engines in this order engine_classes = [SQLAlchemyEngine] error_msgs = "" for engine_class in engine_classes: try: return engine_class() except ImportError as err: error_msgs += "\n - " + str(err) raise ImportError( "Unable to find a usable engine; " "tried using: 'sqlalchemy'.\n" "A suitable version of " "sqlalchemy is required for sql I/O " "support.\n" "Trying to import the above resulted in these errors:" f"{error_msgs}" ) if engine == "sqlalchemy": return SQLAlchemyEngine() raise ValueError("engine must be one of 'auto', 'sqlalchemy'")
SQLAlchemyEngine
python
pytorch__pytorch
test/distributed/pipelining/test_transformer.py
{ "start": 365, "end": 714 }
class ____(torch.nn.Module): def __init__(self, d_hid): super().__init__() self.net1 = torch.nn.Linear(d_hid, d_hid) self.relu = torch.nn.ReLU() self.net2 = torch.nn.Linear(d_hid, d_hid) def forward(self, x): x = self.net1(x) x = self.relu(x) x = self.net2(x) return x
MLPModule
python
doocs__leetcode
solution/1000-1099/1013.Partition Array Into Three Parts With Equal Sum/Solution.py
{ "start": 0, "end": 310 }
class ____: def canThreePartsEqualSum(self, arr: List[int]) -> bool: s, mod = divmod(sum(arr), 3) if mod: return False cnt = t = 0 for x in arr: t += x if t == s: cnt += 1 t = 0 return cnt >= 3
Solution
python
getsentry__sentry
src/sentry/snuba/sessions_v2.py
{ "start": 6738, "end": 6932 }
class ____(Protocol): def get_snuba_columns(self) -> list[str]: ... def get_snuba_groupby(self) -> list[str]: ... def get_keys_for_row(self, row) -> list[tuple[str, str]]: ...
_GroupBy
python
pypa__warehouse
tests/unit/legacy/api/test_pypi.py
{ "start": 2197, "end": 3288 }
class ____: def test_browse(self, db_request): classifier = ClassifierFactory.create(classifier="foo :: bar") db_request.params = {"c": str(classifier.id)} db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the/path") result = pypi.browse(db_request) assert isinstance(result, HTTPMovedPermanently) assert result.headers["Location"] == "/the/path" assert result.status_code == 301 assert db_request.route_path.calls == [ pretend.call("search", _query={"c": classifier.classifier}) ] def test_browse_no_id(self): request = pretend.stub(params={}) with pytest.raises(HTTPNotFound): pypi.browse(request) def test_browse_bad_id(self, db_request): db_request.params = {"c": "99999"} with pytest.raises(HTTPNotFound): pypi.browse(db_request) def test_brows_invalid_id(self, request): request = pretend.stub(params={"c": '7"'}) with pytest.raises(HTTPNotFound): pypi.browse(request)
TestBrowse
python
numba__llvmlite
llvmlite/binding/value.py
{ "start": 12652, "end": 12881 }
class ____(_AttributeIterator): def _dispose(self): self._capi.LLVMPY_DisposeAttributeListIter(self) def _next(self): return ffi.ret_bytes(ffi.lib.LLVMPY_AttributeListIterNext(self))
_AttributeListIterator
python
dagster-io__dagster
python_modules/dagster-pipes/dagster_pipes/__init__.py
{ "start": 15408, "end": 16082 }
class ____(ABC): @abstractmethod @contextmanager def load_context(self, params: PipesParams) -> Iterator[PipesContextData]: """A `@contextmanager` that loads context data injected by the orchestration process. This method should read and yield the context data from the location specified by the passed in `PipesParams`. Args: params (PipesParams): The params provided by the context injector in the orchestration process. Yields: PipesContextData: The context data. """ T_MessageChannel = TypeVar("T_MessageChannel", bound="PipesMessageWriterChannel")
PipesContextLoader
python
google__jax
tests/name_stack_test.py
{ "start": 8826, "end": 21810 }
class ____(jtu.JaxTestCase): def test_while_loop_body_should_not_have_name_stack(self): @jax.named_scope('foo') def f(x): @jax.named_scope('bar') def body(x): return x + 1 @jax.named_scope('bar_cond') def cond(x): return x < 5 return lax.while_loop(cond, body, x) jaxpr = jax.make_jaxpr(f)(0) self.assertEqual(str(jaxpr.eqns[0].source_info.name_stack), 'foo') self.assertEqual(str( jaxpr.eqns[0].params['body_jaxpr'].eqns[0].source_info.name_stack), 'bar') self.assertEqual(str( jaxpr.eqns[0].params['cond_jaxpr'].eqns[0].source_info.name_stack), 'bar_cond') hlo_text = _get_hlo(f)(1.) self.assertIn('foo/while/body/bar', hlo_text) self.assertIn('foo/while/cond/bar_cond', hlo_text) def test_vmap_of_while_loop_should_transform_name_stack(self): @jax.vmap @jax.named_scope('foo') def f(x): @jax.named_scope('bar') def body(x): return x + 1 @jax.named_scope('bar_cond') def cond(x): return x < 5 return lax.while_loop(cond, body, x) jaxpr = jax.make_jaxpr(f)(jnp.arange(2)) self.assertEqual(str(jaxpr.eqns[0].source_info.name_stack), 'vmap(foo)') self.assertEqual(str( jaxpr.eqns[0].params['body_jaxpr'].eqns[0].source_info.name_stack), 'bar') self.assertEqual(str( jaxpr.eqns[0].params['cond_jaxpr'].eqns[0].source_info.name_stack), 'bar_cond') hlo_text = _get_hlo(f)(jnp.arange(2.)) self.assertIn('vmap(foo)/while/body/bar/add', hlo_text) self.assertIn('vmap(foo)/while/cond/bar_cond/lt', hlo_text) def test_jvp_of_while_loop_transforms_name_stack(self): @jax.named_scope('foo') def f(x): @jax.named_scope('bar') def body(x): return x + 1. @jax.named_scope('bar_cond') def cond(x): return x < 5. return lax.while_loop(cond, body, x) g = lambda x, t: jax.jvp(f, (x,), (t,)) jaxpr = jax.make_jaxpr(g)(1., 1.) self.assertEqual(str(jaxpr.eqns[0].source_info.name_stack), 'jvp(foo)') self.assertEqual(str( jaxpr.eqns[0].params['body_jaxpr'].eqns[0].source_info.name_stack), 'bar') self.assertEqual(str( jaxpr.eqns[0].params['cond_jaxpr'].eqns[0].source_info.name_stack), 'bar_cond') hlo_text = _get_hlo(g)(1., 1.) self.assertIn('jvp(foo)/while/body/bar/add', hlo_text) self.assertIn('jvp(foo)/while/cond/bar_cond/lt', hlo_text) def test_vmap_of_jvp_of_while_loop_transforms_name_stack(self): @jax.named_scope('foo') def f(x): @jax.named_scope('bar') def body(x): return x + 1. @jax.named_scope('bar_cond') def cond(x): return x < 5. return lax.while_loop(cond, body, x) g = jax.vmap(lambda x, t: jax.jvp(f, (x,), (t,))) jaxpr = jax.make_jaxpr(g)(jnp.arange(2.), jnp.ones(2)) self.assertEqual(str(jaxpr.eqns[0].source_info.name_stack), 'vmap(jvp(foo))') self.assertEqual(str( jaxpr.eqns[0].params['body_jaxpr'].eqns[0].source_info.name_stack), 'bar') self.assertEqual(str( jaxpr.eqns[0].params['cond_jaxpr'].eqns[0].source_info.name_stack), 'bar_cond') hlo_text = _get_hlo(g)(jnp.arange(2.), jnp.ones(2)) self.assertIn('vmap(jvp(foo))/while/body/bar/add', hlo_text) self.assertIn('vmap(jvp(foo))/while/body_pred/bar_cond', hlo_text) def test_cond_body_should_not_have_name_stack(self): @jax.named_scope('foo') def f(x, y): @jax.named_scope('true') def true_fn(x): return x + 1 @jax.named_scope('false') def false_fn(x): return x - 1 return lax.cond(y, true_fn, false_fn, x) jaxpr = jax.make_jaxpr(f)(0, True) for eqn in jaxpr.eqns: self.assertEqual(str(eqn.source_info.name_stack), 'foo') if eqn.primitive is lax.cond_p: self.assertEqual(str( eqn.params['branches'][0].eqns[0].source_info.name_stack), 'false') self.assertEqual(str( eqn.params['branches'][1].eqns[0].source_info.name_stack), 'true') hlo_text = _get_hlo(f)(1, True) self.assertIn('foo/cond/branch_0_fun/false/sub', hlo_text) self.assertIn('foo/cond/branch_1_fun/true/add', hlo_text) def test_vmap_of_cond_should_transform_name_stack(self): @jax.named_scope('foo') @functools.partial(jax.vmap, in_axes=(0, None)) def f(x, y): @jax.named_scope('true') def true_fn(x): return x + 1 @jax.named_scope('false') def false_fn(x): return x - 1 return lax.cond(y, true_fn, false_fn, x) jaxpr = jax.make_jaxpr(f)(jnp.arange(2), True) for eqn in jaxpr.eqns: self.assertIn('foo', str(eqn.source_info.name_stack)) if eqn.primitive is lax.cond_p: self.assertEqual(str( eqn.params['branches'][0].eqns[0].source_info.name_stack), 'false') self.assertEqual(str( eqn.params['branches'][1].eqns[0].source_info.name_stack), 'true') hlo_text = _get_hlo(f)(jnp.arange(2.), True) self.assertIn('foo/vmap()/cond/branch_0_fun/false/sub', hlo_text) self.assertIn('foo/vmap()/cond/branch_1_fun/true/add', hlo_text) def test_jvp_of_cond_transforms_name_stack(self): @jax.named_scope('foo') def f(x, y): @jax.named_scope('true') def true_fn(x): return x + 1 @jax.named_scope('false') def false_fn(x): return x - 1 return lax.cond(y, true_fn, false_fn, x) f_ = lambda x: jax.jit(f)(x, True) g = lambda x, t: jax.jvp(f_, (x,), (t,)) jaxpr = jax.make_jaxpr(g)(jnp.arange(2.), jnp.ones(2)) jaxpr_param = 'jaxpr' call_jaxpr = jaxpr.jaxpr.eqns[0].params[jaxpr_param] self.assertEqual(str(call_jaxpr.eqns[1].source_info.name_stack), 'foo') self.assertEqual(str( call_jaxpr.eqns[1].params['branches'][0].eqns[0].source_info.name_stack), 'false') self.assertEqual(str( call_jaxpr.eqns[1].params['branches'][1].eqns[0].source_info.name_stack), 'true') hlo_text = _get_hlo(g)(jnp.arange(2.), jnp.ones(2)) self.assertIn('jvp(jit(f))', hlo_text) self.assertIn('foo/cond/branch_0_fun/false/sub', hlo_text) self.assertIn('foo/cond/branch_1_fun/true/add', hlo_text) def test_vmap_of_jvp_of_cond_transforms_name_stack(self): @jax.named_scope('foo') def f(x, y): @jax.named_scope('true') def true_fn(x): return x + 1 @jax.named_scope('false') def false_fn(x): return x - 1 return lax.cond(y, true_fn, false_fn, x) f_ = lambda x: jax.jit(f)(x, True) g = jax.vmap(lambda x, t: jax.jvp(f_, (x,), (t,))) jaxpr = jax.make_jaxpr(g)(jnp.arange(2.), jnp.ones(2)) jaxpr_param = 'jaxpr' call_jaxpr = jaxpr.jaxpr.eqns[0].params[jaxpr_param] self.assertEqual(str( call_jaxpr.eqns[1].params['branches'][0].eqns[0].source_info.name_stack), 'false') self.assertEqual(str( call_jaxpr.eqns[1].params['branches'][1].eqns[0].source_info.name_stack), 'true') hlo_text = _get_hlo(g)(jnp.arange(2.), jnp.ones(2)) self.assertIn('vmap(jvp(jit(f)))', hlo_text) self.assertIn('foo/cond/branch_0_fun/false/sub', hlo_text) self.assertIn('foo/cond/branch_1_fun/true/add"', hlo_text) def test_grad_of_cond_transforms_name_stack(self): @jax.value_and_grad @jax.named_scope('foo') def f(x, y): @jax.named_scope('true') def true_fn(x): return x * x * 2. @jax.named_scope('false') def false_fn(x): return x / jnp.square(x) return lax.cond(y, true_fn, false_fn, x) jaxpr = jax.make_jaxpr(f)(1., True) self.assertEqual(str(jaxpr.eqns[1].source_info.name_stack), 'jvp(foo)') self.assertEqual(str(jaxpr.eqns[2].source_info.name_stack), 'transpose(jvp(foo))') hlo_text = _get_hlo(f)(1., True) self.assertIn( 'jvp(foo)/cond/branch_0_fun/false/div', hlo_text) self.assertIn( 'jvp(foo)/cond/branch_1_fun/true/mul', hlo_text) self.assertIn( 'transpose(jvp(foo))/cond/branch_0_fun/false/div', hlo_text) self.assertIn( 'transpose(jvp(foo))/cond/branch_1_fun/true/mul', hlo_text) def test_vmap_of_grad_of_cond_transforms_name_stack(self): @functools.partial(jax.vmap, in_axes=(0, None)) @jax.value_and_grad @jax.named_scope('foo') def f(x, y): @jax.named_scope('true') def true_fn(x): return x * x * 2. @jax.named_scope('false') def false_fn(x): return x / x / 2. return lax.cond(y, true_fn, false_fn, x) jaxpr = jax.make_jaxpr(f)(jnp.arange(2.), True) self.assertEqual(str(jaxpr.eqns[1].source_info.name_stack), 'vmap(jvp(foo))') self.assertEqual(str(jaxpr.eqns[2].source_info.name_stack), 'vmap(transpose(jvp(foo)))') hlo_text = _get_hlo(f)(jnp.arange(2.), True) self.assertIn( 'vmap(jvp(foo))/cond/branch_0_fun/false/div', hlo_text) self.assertIn( 'vmap(jvp(foo))/cond/branch_1_fun/true/mul', hlo_text) self.assertIn( 'vmap(transpose(jvp(foo)))/cond/branch_0_fun/false/div', hlo_text) self.assertIn( 'vmap(transpose(jvp(foo)))/cond/branch_1_fun/true/mul', hlo_text) def test_scan_body_should_not_have_name_stack(self): @jax.named_scope('foo') def f(x): @jax.named_scope('scan_body') def body(carry, x): return carry + x, carry + x return lax.scan(body, x, jnp.arange(5, dtype='float32')) jaxpr = jax.make_jaxpr(f)(jnp.float32(1)) self.assertEqual(str(jaxpr.eqns[0].source_info.name_stack), 'foo') self.assertEqual(str( jaxpr.eqns[1].params['jaxpr'].eqns[0].source_info.name_stack), 'scan_body') hlo_text = _get_hlo(f)(1.) self.assertIn('foo/while/body', hlo_text) self.assertIn('scan_body/add', hlo_text) def test_vmap_of_scan_should_transform_stack(self): @jax.vmap @jax.named_scope('foo') def f(x): @jax.named_scope('scan_body') def body(carry, x): return carry + x, carry + x return lax.scan(body, x, jnp.arange(8.)) jaxpr = jax.make_jaxpr(f)(jnp.arange(2.)) self.assertEqual(str(jaxpr.eqns[0].source_info.name_stack), 'vmap(foo)') self.assertEqual(str( jaxpr.eqns[1].params['jaxpr'].eqns[0].source_info.name_stack), 'scan_body') hlo_text = _get_hlo(f)(jnp.arange(2.)) self.assertIn('vmap(foo)/while/body', hlo_text) self.assertIn('scan_body/add', hlo_text) def test_jvp_of_scan_should_transform_stack(self): @jax.named_scope('foo') def f(x): @jax.named_scope('scan_body') def body(carry, x): return carry + x, carry + x return lax.scan(body, x, jnp.arange(8, dtype='float32')) g = lambda x, t: jax.jvp(f, (x,), (t,)) jaxpr = jax.make_jaxpr(g)(jnp.float32(1), jnp.float32(1)) self.assertEqual(str(jaxpr.eqns[0].source_info.name_stack), 'jvp(foo)') self.assertEqual(str( jaxpr.eqns[1].params['jaxpr'].eqns[0].source_info.name_stack), 'scan_body') hlo_text = _get_hlo(g)(1., 1.) self.assertIn('jvp(foo)/while/body', hlo_text) self.assertIn('scan_body/add', hlo_text) def test_grad_of_scan_should_transform_stack(self): @jax.value_and_grad @jax.named_scope('foo') def f(x): @jax.named_scope('scan_body') def body(carry, x): return 2 * carry * x, carry + x return lax.scan(body, x, jnp.arange(8., dtype='float32'))[0] jaxpr = jax.make_jaxpr(f)(jnp.float32(2)) self.assertEqual(str(jaxpr.eqns[1].source_info.name_stack), 'jvp(foo)') self.assertEqual(str(jaxpr.eqns[2].source_info.name_stack), 'transpose(jvp(foo))') self.assertEqual(str( jaxpr.eqns[1].params['jaxpr'].eqns[0].source_info.name_stack), 'scan_body') hlo_text = _get_hlo(f)(1.) self.assertIn('jvp(foo)/while/body', hlo_text) self.assertIn('scan_body/mul', hlo_text) self.assertIn('transpose(jvp(foo))/while/body/', hlo_text) def test_vmap_of_grad_of_scan_should_transform_stack(self): @jax.vmap @jax.value_and_grad @jax.named_scope('foo') def f(x): @jax.named_scope('scan_body') def body(carry, x): return carry * x, carry + x return lax.scan(body, x, jnp.arange(8.))[0] jaxpr = jax.make_jaxpr(f)(jnp.arange(2.)) self.assertEqual(str(jaxpr.eqns[1].source_info.name_stack), 'vmap(jvp(foo))') self.assertEqual(str(jaxpr.eqns[2].source_info.name_stack), 'vmap(transpose(jvp(foo)))') self.assertEqual(str( jaxpr.eqns[1].params['jaxpr'].eqns[0].source_info.name_stack), 'scan_body') hlo_text = _get_hlo(f)(jnp.arange(2.)) self.assertIn('vmap(jvp(foo))/while/body', hlo_text) self.assertIn('scan_body/mul', hlo_text) self.assertIn('vmap(transpose(jvp(foo)))/while/body', hlo_text) if __name__ == '__main__': absltest.main(testLoader=jtu.JaxTestLoader())
NameStackControlFlowTest
python
kamyu104__LeetCode-Solutions
Python/max-points-on-a-line.py
{ "start": 171, "end": 1060 }
class ____(object): def maxPoints(self, points): """ :type points: List[Point] :rtype: int """ max_points = 0 for i, start in enumerate(points): slope_count, same = collections.defaultdict(int), 1 for j in xrange(i + 1, len(points)): end = points[j] if start.x == end.x and start.y == end.y: same += 1 else: slope = float("inf") if start.x - end.x != 0: slope = (start.y - end.y) * 1.0 / (start.x - end.x) slope_count[slope] += 1 current_max = same for slope in slope_count: current_max = max(current_max, slope_count[slope] + same) max_points = max(max_points, current_max) return max_points
Solution
python
pytorch__pytorch
torch/utils/_sympy/printers.py
{ "start": 14045, "end": 25505 }
class ____(ExprPrinter): def _print_Integer(self, expr: sympy.Expr) -> str: suffix = "LL" if sys.platform in ["darwin", "win32"] else "L" i = int(expr) if i > INDEX_TYPE_MAX or i < INDEX_TYPE_MIN: raise OverflowError(f"{i} too big to convert to {INDEX_TYPE}") elif i == INDEX_TYPE_MIN: if i != (-1) << 63: raise AssertionError("unexpected minimum index type value") # Writing -9223372036854775808L makes the value overflow # as it is parsed as -(9223372036854775808L) by the C/C++ compiler return f"(-1{suffix} << 63)" return f"{i}{suffix}" def _print_Where(self, expr: sympy.Expr) -> str: c, p, q = ( self.parenthesize(arg, PRECEDENCE["Atom"] - 0.5) for arg in expr.args ) return f"{c} ? {p} : {q}" def _print_Piecewise(self, expr: sympy.Expr) -> str: # Convert Piecewise(expr_cond_pairs) to nested ternary operators # Piecewise((e1, c1), (e2, c2), ..., (eN, cN)) # becomes: c1 ? e1 : (c2 ? e2 : (... : eN)) result: str | None = None for expr_i, cond_i in reversed(expr.args): expr_str = self.parenthesize(expr_i, PRECEDENCE["Atom"] - 0.5) if cond_i == True: # noqa: E712 # This is the default case result = expr_str else: cond_str = self.parenthesize(cond_i, PRECEDENCE["Atom"] - 0.5) if result is None: result = expr_str else: result = f"{cond_str} ? {expr_str} : {result}" return f"({result})" if result else "0" def _print_ModularIndexing(self, expr: sympy.Expr) -> str: x, div, mod = expr.args x = self.doprint(x) if div != 1: div = self.doprint(div) if expr.is_integer: x = f"c10::div_floor_integer(static_cast<int64_t>({x}), static_cast<int64_t>({div}))" else: x = f"c10::div_floor_floating(static_cast<double>({x}), static_cast<double>({div}))" mod = self.doprint(mod) return f"(static_cast<{INDEX_TYPE}>({x}) % static_cast<{INDEX_TYPE}>({mod}))" def _print_FloorDiv(self, expr: sympy.Expr) -> str: x, div = expr.args x = self.doprint(x) div = self.doprint(div) if expr.is_integer: return f"c10::div_floor_integer(static_cast<int64_t>({x}), static_cast<int64_t>({div}))" return f"c10::div_floor_floating(static_cast<double>({x}), static_cast<double>({div}))" def _print_floor(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("floor expects exactly one argument") r = f"std::floor({self._print(expr.args[0])})" return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r def _print_FloorToInt(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("FloorToInt expects exactly one argument") r = f"std::floor({self._print(expr.args[0])})" return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r def _print_TruncToInt(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("TruncToInt expects exactly one argument") r = f"std::trunc({self._print(expr.args[0])})" return f"static_cast<{INDEX_TYPE}>({r})" def _print_TruncToFloat(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("TruncToFloat expects exactly one argument") return f"std::trunc({self._print(expr.args[0])})" def _print_ToFloat(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("ToFloat expects exactly one argument") return f"static_cast<double>({self._print(expr.args[0])})" def _print_PythonMod(self, expr: sympy.Expr) -> str: x, div = expr.args x = self.doprint(x) div = self.doprint(div) return f"c10::div_mod({x}, {div})" def _print_IntTrueDiv(self, expr: sympy.Expr) -> str: lhs, rhs = expr.args # TODO: This is only accurate up to 2**53 return f"static_cast<double>({self._print(lhs)}) / static_cast<double>({self._print(rhs)})" # TODO: PowByNatural: we need to implement our own int-int pow. Do NOT # use std::pow, that operates on floats def _print_PowByNatural(self, expr: sympy.Expr) -> str: # Implement the special-case of 2**x for now base, exp = expr.args if base == 2: return f"(1 << ({self._print(exp)}))" raise NotImplementedError( f"_print_PowByNatural not implemented for {type(self)}" ) def _print_FloatPow(self, expr: sympy.Expr) -> str: base, exp = expr.args return f"std::pow({self._print(base)}, {self._print(exp)})" def _print_Pow(self, expr: sympy.Expr) -> str: # Uses float constants to perform FP div base, exp = expr.args if exp == 0.5 or exp == -0.5: base = self._print(base) return f"std::sqrt({base})" if exp == 0.5 else f"1.0/std::sqrt({base})" if exp.is_integer: exp = int(exp) if exp > 0: r = self.stringify([base] * exp, "*", PRECEDENCE["Mul"]) elif exp < -1: r = ( "1.0/(" + self.stringify([base] * abs(exp), "*", PRECEDENCE["Mul"]) + ")" ) elif exp == -1: r = "1.0/" + self._print(base) else: # exp == 0 r = "1.0" return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r else: # TODO: float vs double return f"std::pow({base}, {float(exp)})" def _print_Rational(self, expr: sympy.Expr) -> str: # Uses float constants to perform FP div if expr.q == 1: r = f"{expr.p}" else: r = f"{expr.p}.0/{expr.q}.0" return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r def _print_ceiling(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("ceiling expects exactly one argument") r = f"std::ceil({self._print(expr.args[0])})" return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r def _print_CeilToInt(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("CeilToInt expects exactly one argument") r = f"std::ceil({self._print(expr.args[0])})" return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r def _print_Min(self, expr: sympy.Expr) -> str: args = [self._print(a) for a in expr.args] if len(args) == 2: return f"std::min(static_cast<{INDEX_TYPE}>({args[0]}), static_cast<{INDEX_TYPE}>({args[1]}))" else: # Initializer list overload il = "{" + ", ".join(args) + "}" return f"std::min<{INDEX_TYPE}>({il})" def _print_Max(self, expr: sympy.Expr) -> str: args = [self._print(a) for a in expr.args] if len(args) == 2: return f"std::max(static_cast<{INDEX_TYPE}>({args[0]}), static_cast<{INDEX_TYPE}>({args[1]}))" else: # Initializer list overload il = "{" + ", ".join(args) + "}" return f"std::max<{INDEX_TYPE}>({il})" def _print_Abs(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("Abs expects exactly one argument") return f"std::abs({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_cos(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("cos expects exactly one argument") return f"std::cos({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_cosh(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("cosh expects exactly one argument") return f"std::cosh({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_acos(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("acos expects exactly one argument") return f"std::acos({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_sin(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("sin expects exactly one argument") return f"math.sin({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_sinh(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("sinh expects exactly one argument") return f"std::sinh({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_asin(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("asin expects exactly one argument") return f"std::asin({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_tan(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("tan expects exactly one argument") return f"std::tan({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_tanh(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("tanh expects exactly one argument") return f"std::tanh({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_atan(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("atan expects exactly one argument") return f"std::atan({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_sqrt(self, expr: sympy.Expr) -> str: return f"std::sqrt({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_log2(self, expr: sympy.Expr) -> str: return f"std::log2({self._print(expr.args[0])})" def _print_RoundToInt(self, expr: sympy.Expr) -> str: if len(expr.args) != 1: raise AssertionError("RoundToInt expects exactly one argument") # TODO: dispatch to llrint depending on index type return f"std::lrint({self._print(expr.args[0])})" def _print_RoundDecimal(self, expr: sympy.Expr) -> str: if len(expr.args) != 2: raise AssertionError("RoundDecimal expects exactly two arguments") number, ndigits = expr.args if number.is_integer: # ndigits < 0 should have been filtered by the sympy function if ndigits >= 0: raise AssertionError("ndigits must be negative for integer inputs") raise ValueError( f"For integer inputs, only non-negative ndigits are currently supported, but got {ndigits}." ) number_str = self.parenthesize(number, PRECEDENCE["Mul"]) return f"static_cast<double>(std::nearbyint(1e{ndigits} * {number_str}) * 1e{-ndigits})" def _print_BooleanTrue(self, expr: sympy.Expr) -> str: return "true" def _print_BooleanFalse(self, expr: sympy.Expr) -> str: return "false" def _print_Infinity(self, expr: sympy.Expr) -> str: return "std::numeric_limits<double>::infinity()" def _print_NegativeInfinity(self, expr: sympy.Expr) -> str: return f"-{self._print_Infinity(expr)}"
CppPrinter
python
allegroai__clearml
clearml/backend_api/services/v2_23/frames.py
{ "start": 444715, "end": 446662 }
class ____(Response): """ Response of frames.get_snippets_query_for_dataview endpoint. :param query: The Elasticsearch query filter that should bring the snippet frames according to the provided dataview :type query: dict :param kibana_link: The link to the Kibana discovery page with the dataview query :type kibana_link: str """ _service = "frames" _action = "get_snippets_query_for_dataview" _version = "2.23" _schema = { "definitions": {}, "properties": { "kibana_link": { "description": "The link to the Kibana discovery page with the dataview query", "type": ["string", "null"], }, "query": { "description": ( "The Elasticsearch query filter that should bring the snippet frames " "according to the provided dataview" ), "type": ["object", "null"], }, }, } def __init__(self, query=None, kibana_link=None, **kwargs): super(GetSnippetsQueryForDataviewResponse, self).__init__(**kwargs) self.query = query self.kibana_link = kibana_link @schema_property("query") def query(self): return self._property_query @query.setter def query(self, value): if value is None: self._property_query = None return self.assert_isinstance(value, "query", (dict,)) self._property_query = value @schema_property("kibana_link") def kibana_link(self): return self._property_kibana_link @kibana_link.setter def kibana_link(self, value): if value is None: self._property_kibana_link = None return self.assert_isinstance(value, "kibana_link", six.string_types) self._property_kibana_link = value
GetSnippetsQueryForDataviewResponse
python
huggingface__transformers
src/transformers/models/cpmant/modeling_cpmant.py
{ "start": 20802, "end": 21431 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states @auto_docstring
CpmAntOutput
python
joke2k__faker
tests/providers/test_address.py
{ "start": 67468, "end": 70661 }
class ____(TestEnPh): """Test fil_PH address provider methods""" @classmethod def setup_class(cls): cls.building_number_pattern: Pattern = re.compile( r"(?:[1-9]|[1-9]\d{1,3})(?:[A-J]|\s[A-J]|-[A-J]|\sUnit\s[A-J])?", ) cls.address_pattern: Pattern = re.compile( r"(?P<street_address>.*), (?P<lgu>.*?), (?P<postcode>\d{4}) (?P<province>.*?)", ) cls.metro_manila_postcodes = EnPhAddressProvider.metro_manila_postcodes cls.luzon_province_postcodes = EnPhAddressProvider.luzon_province_postcodes cls.visayas_province_postcodes = EnPhAddressProvider.visayas_province_postcodes cls.mindanao_province_postcodes = EnPhAddressProvider.mindanao_province_postcodes cls.postcodes = EnPhAddressProvider.postcodes cls.provinces = EnPhAddressProvider.provinces cls.province_lgus = EnPhAddressProvider.province_lgus cls.metro_manila_lgus = EnPhAddressProvider.metro_manila_lgus def test_metro_manila_postcode(self, faker, num_samples): for _ in range(num_samples): assert int(faker.metro_manila_postcode()) in self.metro_manila_postcodes def test_luzon_province_postcode(self, faker, num_samples): for _ in range(num_samples): assert int(faker.luzon_province_postcode()) in self.luzon_province_postcodes def test_visayas_province_postcode(self, faker, num_samples): for _ in range(num_samples): assert int(faker.visayas_province_postcode()) in self.visayas_province_postcodes def test_mindanao_province_postcode(self, faker, num_samples): for _ in range(num_samples): assert int(faker.mindanao_province_postcode()) in self.mindanao_province_postcodes def test_postcode(self, faker, num_samples): for _ in range(num_samples): assert int(faker.postcode()) in self.postcodes def test_building_number(self, faker, num_samples): for _ in range(num_samples): assert self.building_number_pattern.fullmatch(faker.building_number()) def test_floor_unit_number(self, faker, num_samples): for _ in range(num_samples): number = faker.floor_unit_number() assert 2 <= int(number[:-2]) <= 99 assert 1 <= int(number[-2:]) <= 40 def test_ordinal_floor_number(self, faker, num_samples): for _ in range(num_samples): floor_number = faker.ordinal_floor_number() assert floor_number[-2:] in ["th", "st", "nd", "rd"] def test_address(self, faker, num_samples): for _ in range(num_samples): address = faker.address() match = self.address_pattern.fullmatch(address) street_address = match.group("street_address") lgu = match.group("lgu") postcode = match.group("postcode") province = match.group("province") assert match assert street_address assert lgu in self.province_lgus or lgu in self.metro_manila_lgus assert int(postcode) in self.postcodes assert province in self.provinces or province == "Metro Manila"
TestFilPh
python
psf__black
tests/data/cases/torture.py
{ "start": 330, "end": 1448 }
class ____: def foo(self): for _ in range(10): aaaaaaaaaaaaaaaaaaa = bbbbbbbbbbbbbbb.cccccccccc( # pylint: disable=no-member xxxxxxxxxxxx ) def test(self, othr): return (1 == 2 and (name, description, self.default, self.selected, self.auto_generated, self.parameters, self.meta_data, self.schedule) == (name, description, othr.default, othr.selected, othr.auto_generated, othr.parameters, othr.meta_data, othr.schedule)) assert ( a_function(very_long_arguments_that_surpass_the_limit, which_is_eighty_eight_in_this_case_plus_a_bit_more) == {"x": "this need to pass the line limit as well", "b": "but only by a little bit"} ) # output importA ( () << 0 ** 101234234242352525425252352352525234890264906820496920680926538059059209922523523525 ) # assert sort_by_dependency( { "1": {"2", "3"}, "2": {"2a", "2b"}, "3": {"3a", "3b"}, "2a": set(), "2b": set(), "3a": set(), "3b": set(), } ) == ["2a", "2b", "2", "3a", "3b", "3", "1"] importA 0 0 ^ 0 #
A
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_gradient11.py
{ "start": 315, "end": 1489 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_gradient11.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "column"}) chart.axis_ids = [66738048, 66748416] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series( { "values": "=Sheet1!$A$1:$A$5", "gradient": {"colors": ["#DDEBCF", "#DDEBCF", "#9CB86E", "#156B13"]}, } ) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
huggingface__transformers
src/transformers/models/edgetam_video/modeling_edgetam_video.py
{ "start": 62601, "end": 65494 }
class ____(ModelOutput): r""" iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`): The Intersection over Union (IoU) scores of the predicted masks. pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`): The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed by the processor to be brought to the original image size. object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`): Logits for the object score, indicating if an object is present. image_embeddings (`tuple(torch.FloatTensor)`): The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each tensor has shape `(batch_size, channels, height, width)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the vision model at the output of each stage. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the vision model. mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the mask decoder. high_res_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, image_size, image_size)`, *optional*): The predicted masks, upscaled to the original image size. Only used for EdgeTamVideoModel. object_pointer (`torch.FloatTensor` of shape `(batch_size, point_batch_size, hidden_size)`, *optional*): A tensor representing the object pointer, used for tracking in videos. Only used for EdgeTamVideoModel. """ iou_scores: Optional[torch.FloatTensor] = None pred_masks: Optional[torch.FloatTensor] = None object_score_logits: Optional[torch.FloatTensor] = None image_embeddings: tuple[torch.FloatTensor, ...] = None vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None high_res_masks: Optional[torch.FloatTensor] = None object_pointer: Optional[torch.FloatTensor] = None @dataclass @auto_docstring(custom_intro="Base class for the Sam2 model's output.")
EdgeTamVideoImageSegmentationOutput
python
charliermarsh__ruff
crates/ty_python_semantic/resources/corpus/88_regression_issue_17792.py
{ "start": 107, "end": 170 }
class ____: ... def f(arg: C): pass x, _ = f(1) assert x
C
python
python__mypy
mypy/typeanal.py
{ "start": 110803, "end": 115123 }
class ____(SyntheticTypeVisitor[None]): """Type visitor that looks for type variable types and self types.""" def __init__(self, api: SemanticAnalyzerCoreInterface, scope: TypeVarLikeScope) -> None: self.api = api self.scope = scope self.type_var_likes: list[tuple[str, TypeVarLikeExpr]] = [] self.has_self_type = False self.include_callables = True def _seems_like_callable(self, type: UnboundType) -> bool: if not type.args: return False return isinstance(type.args[0], (EllipsisType, TypeList, ParamSpecType)) def visit_unbound_type(self, t: UnboundType) -> None: name = t.name node = self.api.lookup_qualified(name, t) if node and node.fullname in SELF_TYPE_NAMES: self.has_self_type = True if ( node and isinstance(node.node, TypeVarLikeExpr) and self.scope.get_binding(node) is None ): if (name, node.node) not in self.type_var_likes: self.type_var_likes.append((name, node.node)) elif not self.include_callables and self._seems_like_callable(t): if find_self_type( t, lambda name: self.api.lookup_qualified(name, t, suppress_errors=True) ): self.has_self_type = True return elif node and node.fullname in LITERAL_TYPE_NAMES: return elif node and node.fullname in ANNOTATED_TYPE_NAMES and t.args: # Don't query the second argument to Annotated for TypeVars self.process_types([t.args[0]]) elif t.args: self.process_types(t.args) def visit_type_list(self, t: TypeList) -> None: self.process_types(t.items) def visit_callable_argument(self, t: CallableArgument) -> None: t.typ.accept(self) def visit_any(self, t: AnyType) -> None: pass def visit_uninhabited_type(self, t: UninhabitedType) -> None: pass def visit_none_type(self, t: NoneType) -> None: pass def visit_erased_type(self, t: ErasedType) -> None: pass def visit_deleted_type(self, t: DeletedType) -> None: pass def visit_type_var(self, t: TypeVarType) -> None: self.process_types([t.upper_bound, t.default] + t.values) def visit_param_spec(self, t: ParamSpecType) -> None: self.process_types([t.upper_bound, t.default, t.prefix]) def visit_type_var_tuple(self, t: TypeVarTupleType) -> None: self.process_types([t.upper_bound, t.default]) def visit_unpack_type(self, t: UnpackType) -> None: self.process_types([t.type]) def visit_parameters(self, t: Parameters) -> None: self.process_types(t.arg_types) def visit_partial_type(self, t: PartialType) -> None: pass def visit_instance(self, t: Instance) -> None: self.process_types(t.args) def visit_callable_type(self, t: CallableType) -> None: # FIX generics self.process_types(t.arg_types) t.ret_type.accept(self) def visit_tuple_type(self, t: TupleType) -> None: self.process_types(t.items) def visit_typeddict_type(self, t: TypedDictType) -> None: self.process_types(list(t.items.values())) def visit_raw_expression_type(self, t: RawExpressionType) -> None: pass def visit_literal_type(self, t: LiteralType) -> None: pass def visit_union_type(self, t: UnionType) -> None: self.process_types(t.items) def visit_overloaded(self, t: Overloaded) -> None: for it in t.items: it.accept(self) def visit_type_type(self, t: TypeType) -> None: t.item.accept(self) def visit_ellipsis_type(self, t: EllipsisType) -> None: pass def visit_placeholder_type(self, t: PlaceholderType) -> None: return self.process_types(t.args) def visit_type_alias_type(self, t: TypeAliasType) -> None: self.process_types(t.args) def process_types(self, types: list[Type] | tuple[Type, ...]) -> None: # Redundant type check helps mypyc. if isinstance(types, list): for t in types: t.accept(self) else: for t in types: t.accept(self)
FindTypeVarVisitor
python
getsentry__sentry
tests/sentry/seer/autofix/test_autofix_utils.py
{ "start": 9453, "end": 15729 }
class ____(TestCase): """Test the is_issue_eligible_for_seer_automation function.""" def setUp(self): super().setUp() self.organization = self.create_organization(name="test-org") self.project = self.create_project(organization=self.organization) self.group = self.create_group(project=self.project) def test_returns_false_for_unsupported_issue_categories(self): """Test returns False for unsupported issue categories like REPLAY and FEEDBACK.""" from sentry.issues.grouptype import FeedbackGroup, ReplayRageClickType # Create groups with unsupported categories replay_group = self.create_group(project=self.project, type=ReplayRageClickType.type_id) feedback_group = self.create_group(project=self.project, type=FeedbackGroup.type_id) assert is_issue_eligible_for_seer_automation(replay_group) is False assert is_issue_eligible_for_seer_automation(feedback_group) is False def test_returns_true_for_supported_issue_categories(self): """Test returns True for supported issue categories when all conditions are met.""" with self.feature("organizations:gen-ai-features"): with patch( "sentry.seer.seer_setup.get_seer_org_acknowledgement_for_scanner" ) as mock_ack: with patch("sentry.quotas.backend.has_available_reserved_budget") as mock_budget: mock_ack.return_value = True mock_budget.return_value = True self.project.update_option("sentry:seer_scanner_automation", True) # Test supported categories - using default error group result = is_issue_eligible_for_seer_automation(self.group) assert result is True def test_returns_false_when_gen_ai_features_not_enabled(self): """Test returns False when organizations:gen-ai-features feature flag is not enabled.""" result = is_issue_eligible_for_seer_automation(self.group) assert result is False def test_returns_false_when_ai_features_hidden(self): """Test returns False when sentry:hide_ai_features option is enabled.""" with self.feature("organizations:gen-ai-features"): self.organization.update_option("sentry:hide_ai_features", True) result = is_issue_eligible_for_seer_automation(self.group) assert result is False def test_returns_false_when_scanner_automation_disabled_and_not_always_trigger(self): """Test returns False when scanner automation is disabled and issue type doesn't always trigger.""" with self.feature("organizations:gen-ai-features"): self.project.update_option("sentry:seer_scanner_automation", False) result = is_issue_eligible_for_seer_automation(self.group) assert result is False @patch("sentry.seer.seer_setup.get_seer_org_acknowledgement_for_scanner") def test_returns_false_when_org_not_acknowledged(self, mock_get_acknowledgement): """Test returns False when organization has not acknowledged Seer for scanner.""" with self.feature("organizations:gen-ai-features"): self.project.update_option("sentry:seer_scanner_automation", True) mock_get_acknowledgement.return_value = False result = is_issue_eligible_for_seer_automation(self.group) assert result is False mock_get_acknowledgement.assert_called_once_with(self.organization) @patch("sentry.seer.seer_setup.get_seer_org_acknowledgement_for_scanner") @patch("sentry.quotas.backend.has_available_reserved_budget") def test_returns_false_when_no_budget_available( self, mock_has_budget, mock_get_acknowledgement ): """Test returns False when organization has no available budget for scanner.""" with self.feature("organizations:gen-ai-features"): self.project.update_option("sentry:seer_scanner_automation", True) mock_get_acknowledgement.return_value = True mock_has_budget.return_value = False result = is_issue_eligible_for_seer_automation(self.group) assert result is False mock_has_budget.assert_called_once_with( org_id=self.organization.id, data_category=DataCategory.SEER_SCANNER ) @patch("sentry.seer.seer_setup.get_seer_org_acknowledgement_for_scanner") @patch("sentry.quotas.backend.has_available_reserved_budget") def test_returns_true_when_all_conditions_met(self, mock_has_budget, mock_get_acknowledgement): """Test returns True when all eligibility conditions are met.""" with self.feature("organizations:gen-ai-features"): self.project.update_option("sentry:seer_scanner_automation", True) mock_get_acknowledgement.return_value = True mock_has_budget.return_value = True result = is_issue_eligible_for_seer_automation(self.group) assert result is True mock_get_acknowledgement.assert_called_once_with(self.organization) mock_has_budget.assert_called_once_with( org_id=self.organization.id, data_category=DataCategory.SEER_SCANNER ) @patch("sentry.seer.seer_setup.get_seer_org_acknowledgement_for_scanner") @patch("sentry.quotas.backend.has_available_reserved_budget") def test_returns_true_when_issue_type_always_triggers( self, mock_has_budget, mock_get_acknowledgement ): """Test returns True when issue type has always_trigger_seer_automation even if scanner automation is disabled.""" with self.feature("organizations:gen-ai-features"): # Disable scanner automation self.project.update_option("sentry:seer_scanner_automation", False) mock_get_acknowledgement.return_value = True mock_has_budget.return_value = True # Mock the group's issue_type to always trigger with patch.object(self.group.issue_type, "always_trigger_seer_automation", True): result = is_issue_eligible_for_seer_automation(self.group) assert result is True
TestIsIssueEligibleForSeerAutomation
python
getsentry__sentry
src/sentry/models/releases/util.py
{ "start": 922, "end": 1074 }
class ____: operator: str version_parts: Sequence[int | str] package: str | Sequence[str] | None = None negated: bool = False
SemverFilter
python
openai__openai-python
src/openai/types/responses/response_incomplete_event.py
{ "start": 231, "end": 516 }
class ____(BaseModel): response: Response """The response that was incomplete.""" sequence_number: int """The sequence number of this event.""" type: Literal["response.incomplete"] """The type of the event. Always `response.incomplete`."""
ResponseIncompleteEvent
python
charliermarsh__ruff
crates/ruff_python_parser/resources/valid/statement/class.py
{ "start": 425, "end": 485 }
class ____[T: str](): ... # TypeVar with bound and default
Test
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/overloadOverlap1.py
{ "start": 7928, "end": 8156 }
class ____(Protocol): def __call__(self, *args: Any) -> Any: ... @overload def func29(func: CBProto29) -> None: ... @overload def func29(func: Callable[..., Any]) -> None: ... def func29(func: Any) -> None: ...
CBProto29
python
sqlalchemy__sqlalchemy
test/orm/test_defaults.py
{ "start": 694, "end": 6706 }
class ____(fixtures.MappedTest): __requires__ = ("row_triggers",) __sparse_driver_backend__ = True @classmethod def define_tables(cls, metadata): dt = Table( "dt", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("col1", String(20)), Column( "col2", String(20), server_default=sa.schema.FetchedValue() ), Column( "col3", String(20), sa.schema.FetchedValue(for_update=True) ), Column( "col4", String(20), sa.schema.FetchedValue(), sa.schema.FetchedValue(for_update=True), ), implicit_returning=False, ) dialect_name = testing.db.dialect.name for ins in ( sa.DDL( "CREATE TRIGGER dt_ins AFTER INSERT ON dt " "FOR EACH ROW BEGIN " "UPDATE dt SET col2='ins', col4='ins' " "WHERE dt.id = NEW.id; END" ).execute_if(dialect="sqlite"), sa.DDL( "CREATE TRIGGER dt_ins ON dt AFTER INSERT AS " "UPDATE dt SET col2='ins', col4='ins' " "WHERE dt.id IN (SELECT id FROM inserted);" ).execute_if(dialect="mssql"), sa.DDL( "CREATE TRIGGER dt_ins BEFORE INSERT " "ON dt " "FOR EACH ROW " "BEGIN " ":NEW.col2 := 'ins'; :NEW.col4 := 'ins'; END;" ).execute_if(dialect="oracle"), sa.DDL( "CREATE TRIGGER dt_ins BEFORE INSERT " "ON dt " "FOR EACH ROW " "EXECUTE PROCEDURE my_func_ins();" ).execute_if(dialect="postgresql"), sa.DDL( "CREATE TRIGGER dt_ins BEFORE INSERT ON dt " "FOR EACH ROW BEGIN " "SET NEW.col2='ins'; SET NEW.col4='ins'; END" ).execute_if( callable_=lambda ddl, target, bind, **kw: bind.engine.name not in ("oracle", "mssql", "sqlite", "postgresql") ), ): my_func_ins = sa.DDL( "CREATE OR REPLACE FUNCTION my_func_ins() " "RETURNS TRIGGER AS $$ " "BEGIN " "NEW.col2 := 'ins'; NEW.col4 := 'ins'; " "RETURN NEW; " "END; $$ LANGUAGE PLPGSQL" ).execute_if(dialect="postgresql") event.listen(dt, "after_create", my_func_ins) event.listen(dt, "after_create", ins) if dialect_name == "postgresql": event.listen( dt, "before_drop", sa.DDL("DROP TRIGGER dt_ins ON dt") ) else: event.listen(dt, "before_drop", sa.DDL("DROP TRIGGER dt_ins")) for up in ( sa.DDL( "CREATE TRIGGER dt_up AFTER UPDATE ON dt " "FOR EACH ROW BEGIN " "UPDATE dt SET col3='up', col4='up' " "WHERE dt.id = OLD.id; END" ).execute_if(dialect="sqlite"), sa.DDL( "CREATE TRIGGER dt_up ON dt AFTER UPDATE AS " "UPDATE dt SET col3='up', col4='up' " "WHERE dt.id IN (SELECT id FROM deleted);" ).execute_if(dialect="mssql"), sa.DDL( "CREATE TRIGGER dt_up BEFORE UPDATE ON dt " "FOR EACH ROW BEGIN " ":NEW.col3 := 'up'; :NEW.col4 := 'up'; END;" ).execute_if(dialect="oracle"), sa.DDL( "CREATE TRIGGER dt_up BEFORE UPDATE ON dt " "FOR EACH ROW " "EXECUTE PROCEDURE my_func_up();" ).execute_if(dialect="postgresql"), sa.DDL( "CREATE TRIGGER dt_up BEFORE UPDATE ON dt " "FOR EACH ROW BEGIN " "SET NEW.col3='up'; SET NEW.col4='up'; END" ).execute_if( callable_=lambda ddl, target, bind, **kw: bind.engine.name not in ("oracle", "mssql", "sqlite", "postgresql") ), ): my_func_up = sa.DDL( "CREATE OR REPLACE FUNCTION my_func_up() " "RETURNS TRIGGER AS $$ " "BEGIN " "NEW.col3 := 'up'; NEW.col4 := 'up'; " "RETURN NEW; " "END; $$ LANGUAGE PLPGSQL" ).execute_if(dialect="postgresql") event.listen(dt, "after_create", my_func_up) event.listen(dt, "after_create", up) if dialect_name == "postgresql": event.listen(dt, "before_drop", sa.DDL("DROP TRIGGER dt_up ON dt")) else: event.listen(dt, "before_drop", sa.DDL("DROP TRIGGER dt_up")) @classmethod def setup_classes(cls): class Default(cls.Comparable): pass @classmethod def setup_mappers(cls): Default, dt = cls.classes.Default, cls.tables.dt cls.mapper_registry.map_imperatively(Default, dt) def test_insert(self): Default = self.classes.Default d1 = Default(id=1) eq_(d1.col1, None) eq_(d1.col2, None) eq_(d1.col3, None) eq_(d1.col4, None) session = fixture_session() session.add(d1) session.flush() eq_(d1.col1, None) eq_(d1.col2, "ins") eq_(d1.col3, None) # don't care which trigger fired assert d1.col4 in ("ins", "up") def test_update(self): Default = self.classes.Default d1 = Default(id=1) session = fixture_session() session.add(d1) session.flush() d1.col1 = "set" session.flush() eq_(d1.col1, "set") eq_(d1.col2, "ins") eq_(d1.col3, "up") eq_(d1.col4, "up")
TriggerDefaultsTest
python
pytorch__pytorch
torch/_dynamo/source.py
{ "start": 11496, "end": 12215 }
class ____(ChainedSource): def reconstruct(self, codegen: "PyCodegen") -> None: codegen(self.base) codegen.extend_output(codegen.create_load_attrs("__dict__")) def guard_source(self) -> GuardSource: return self.base.guard_source() def name(self) -> str: # type(ob).__dict__ can return a proxy of the dict. But in the C++ # guard accessor, we are use type->tp_dict which is a dict. So, # forcefully pass a dict object to ensure that the GuardManager # registers that its working on a dict object. return f"dict({self.base.name()}.__dict__)" # Represents obj.__mro__ where object is type object @dataclasses.dataclass(frozen=True)
TypeDictSource
python
jmcnamara__XlsxWriter
xlsxwriter/test/styles/test_initialisation.py
{ "start": 295, "end": 816 }
class ____(unittest.TestCase): """ Test initialisation of the Styles class and call a method. """ def setUp(self): self.fh = StringIO() self.styles = Styles() self.styles._set_filehandle(self.fh) def test_xml_declaration(self): """Test Styles xml_declaration()""" self.styles._xml_declaration() exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n""" got = self.fh.getvalue() self.assertEqual(exp, got)
TestInitialisation
python
zarr-developers__zarr-python
src/zarr/core/buffer/core.py
{ "start": 986, "end": 3044 }
class ____(Protocol): """Protocol for the nd-array-like type that underlie NDBuffer""" @property def dtype(self) -> np.dtype[Any]: ... @property def ndim(self) -> int: ... @property def size(self) -> int: ... @property def shape(self) -> tuple[int, ...]: ... def __len__(self) -> int: ... def __getitem__(self, key: slice) -> Self: ... def __setitem__(self, key: slice, value: Any) -> None: ... def __array__(self) -> npt.NDArray[Any]: ... def reshape( self, shape: tuple[int, ...] | Literal[-1], *, order: Literal["A", "C", "F"] = ... ) -> Self: ... def view(self, dtype: npt.DTypeLike) -> Self: ... def astype( self, dtype: npt.DTypeLike, order: Literal["K", "A", "C", "F"] = ..., *, copy: bool = ..., ) -> Self: ... def fill(self, value: Any) -> None: ... def copy(self) -> Self: ... def transpose(self, axes: SupportsIndex | Sequence[SupportsIndex] | None) -> Self: ... def ravel(self, order: Literal["K", "A", "C", "F"] = ...) -> Self: ... def all(self) -> bool: ... def __eq__(self, other: object) -> Self: # type: ignore[override] """Element-wise equal Notes ----- Type checkers such as mypy complains because the return type isn't a bool like its supertype "object", which violates the Liskov substitution principle. This is true, but since NumPy's ndarray is defined as an element-wise equal, our hands are tied. """ ScalarType = int | float | complex | bytes | str | bool | np.generic NDArrayLikeOrScalar = ScalarType | NDArrayLike def check_item_key_is_1d_contiguous(key: Any) -> None: """Raises error if `key` isn't a 1d contiguous slice""" if not isinstance(key, slice): raise TypeError( f"Item key has incorrect type (expected slice, got {key.__class__.__name__})" ) if not (key.step is None or key.step == 1): raise ValueError("slice must be contiguous")
NDArrayLike
python
numba__numba
numba/core/typing/builtins.py
{ "start": 13895, "end": 13964 }
class ____(UnorderedCmpOp): pass @infer_global(operator.ne)
CmpOpEq
python
pypa__virtualenv
src/virtualenv/run/plugin/base.py
{ "start": 736, "end": 2096 }
class ____(PluginLoader): def __init__(self, interpreter, parser, name, possible) -> None: self.interpreter = interpreter self.name = name self._impl_class = None self.possible = possible self.parser = parser.add_argument_group(title=name) self.add_selector_arg_parse(name, list(self.possible)) @classmethod def options(cls, key): if cls._OPTIONS is None: cls._OPTIONS = cls.entry_points_for(key) return cls._OPTIONS def add_selector_arg_parse(self, name, choices): raise NotImplementedError def handle_selected_arg_parse(self, options): selected = getattr(options, self.name) if selected not in self.possible: msg = f"No implementation for {self.interpreter}" raise RuntimeError(msg) self._impl_class = self.possible[selected] self.populate_selected_argparse(selected, options.app_data) return selected def populate_selected_argparse(self, selected, app_data): self.parser.description = f"options for {self.name} {selected}" self._impl_class.add_parser_arguments(self.parser, self.interpreter, app_data) def create(self, options): return self._impl_class(options, self.interpreter) __all__ = [ "ComponentBuilder", "PluginLoader", ]
ComponentBuilder
python
django__django
tests/gis_tests/geoapp/feeds.py
{ "start": 1278, "end": 1345 }
class ____(TestGeoRSS2): feed_type = feeds.W3CGeoFeed
TestW3CGeo2
python
huggingface__transformers
tests/models/maskformer/test_modeling_maskformer_swin.py
{ "start": 6129, "end": 15098 }
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} test_resize_embeddings = False test_torch_exportable = True def setUp(self): self.model_tester = MaskFormerSwinModelTester(self) self.config_tester = ConfigTester( self, config_class=MaskFormerSwinConfig, has_text_modality=False, embed_dim=37, common_properties=["image_size", "patch_size", "num_channels"], ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def test_multi_gpu_data_parallel_forward(self): pass def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @unittest.skip(reason="Swin does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Swin does not support feedforward chunking") def test_feed_forward_chunking(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions") def test_attention_outputs(self): pass def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Swin has a different seq_length patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_hidden_states_output_with_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints") def test_model_from_pretrained(self): pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def test_gradient_checkpointing_backward_compatibility(self): pass def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (list, tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) @require_torch
MaskFormerSwinModelTest
python
Textualize__textual
src/textual/_on.py
{ "start": 442, "end": 3336 }
class ____(Exception): """A selector was applied to an attribute that isn't a widget.""" def on( message_type: type[Message], selector: str | None = None, **kwargs: str ) -> Callable[[DecoratedType], DecoratedType]: """Decorator to declare that the method is a message handler. The decorator accepts an optional CSS selector that will be matched against a widget exposed by a `control` property on the message. Example: ```python # Handle the press of buttons with ID "#quit". @on(Button.Pressed, "#quit") def quit_button(self) -> None: self.app.quit() ``` Keyword arguments can be used to match additional selectors for attributes listed in [`ALLOW_SELECTOR_MATCH`][textual.message.Message.ALLOW_SELECTOR_MATCH]. Example: ```python # Handle the activation of the tab "#home" within the `TabbedContent` "#tabs". @on(TabbedContent.TabActivated, "#tabs", pane="#home") def switch_to_home(self) -> None: self.log("Switching back to the home tab.") ... ``` Args: message_type: The message type (i.e. the class). selector: An optional [selector](/guide/CSS#selectors). If supplied, the handler will only be called if `selector` matches the widget from the `control` attribute of the message. **kwargs: Additional selectors for other attributes of the message. """ selectors: dict[str, str] = {} if selector is not None: selectors["control"] = selector if kwargs: selectors.update(kwargs) parsed_selectors: dict[str, tuple[SelectorSet, ...]] = {} for attribute, css_selector in selectors.items(): if attribute == "control": if message_type.control == Message.control: raise OnDecoratorError( "The message class must have a 'control' to match with the on decorator" ) elif attribute not in message_type.ALLOW_SELECTOR_MATCH: raise OnDecoratorError( f"The attribute {attribute!r} can't be matched; have you added it to " + f"{message_type.__name__}.ALLOW_SELECTOR_MATCH?" ) try: parsed_selectors[attribute] = parse_selectors(css_selector) except TokenError: raise OnDecoratorError( f"Unable to parse selector {css_selector!r} for {attribute}; check for syntax errors" ) from None def decorator(method: DecoratedType) -> DecoratedType: """Store message and selector in function attribute, return callable unaltered.""" if not hasattr(method, "_textual_on"): setattr(method, "_textual_on", []) getattr(method, "_textual_on").append((message_type, parsed_selectors)) return method return decorator
OnNoWidget
python
airbytehq__airbyte
airbyte-ci/connectors/metadata_service/orchestrator/orchestrator/utils/object_helpers.py
{ "start": 1399, "end": 1758 }
class ____(EnumMeta): """A metaclass for creating enums with case-insensitive keys.""" def __getitem__(cls, item): try: return super().__getitem__(item) except: for key in cls._member_map_: if key.casefold() == item.casefold(): return super().__getitem__(key)
CaseInsensitveKeys
python
keon__algorithms
tests/test_stack.py
{ "start": 4291, "end": 4732 }
class ____(unittest.TestCase): def test_OrderedStack(self): stack = OrderedStack() self.assertTrue(stack.is_empty()) stack.push(1) stack.push(4) stack.push(3) stack.push(6) "bottom - > 1 3 4 6 " self.assertEqual(6, stack.pop()) self.assertEqual(4, stack.peek()) self.assertEqual(3, stack.size()) if __name__ == "__main__": unittest.main()
TestOrderedStack
python
getsentry__sentry
src/sentry/uptime/consumers/results_consumer.py
{ "start": 6746, "end": 17523 }
class ____(ResultProcessor[CheckResult, UptimeSubscription]): subscription_model = UptimeSubscription def get_subscription_id(self, result: CheckResult) -> str: return result["subscription_id"] def handle_result(self, subscription: UptimeSubscription | None, result: CheckResult): if random.random() < 0.01: logger.info("process_result", extra=result) # If there's no subscription in the database, this subscription has # been orphaned. Remove from the checker if subscription is None: send_uptime_config_deletion(result["region"], result["subscription_id"]) metrics.incr( "uptime.result_processor.subscription_not_found", sample_rate=1.0, tags={"uptime_region": result.get("region", "default")}, ) return metric_tags = { "host_provider": get_host_provider_if_valid(subscription), "status": result["status"], "uptime_region": result["region"], } subscription_regions = load_regions_for_uptime_subscription(subscription.id) if result["status"] == CHECKSTATUS_DISALLOWED_BY_ROBOTS: try: detector = get_detector(subscription) logger.info("disallowed_by_robots", extra=result) metrics.incr( "uptime.result_processor.disallowed_by_robots", sample_rate=1.0, tags={"uptime_region": result.get("region", "default")}, ) disable_uptime_detector(detector) except Exception as e: logger.exception("disallowed_by_robots.error", extra={"error": e, "result": result}) return # Discard shadow mode region results if is_shadow_region_result(result, subscription_regions): metrics.incr( "uptime.result_processor.dropped_shadow_result", sample_rate=1.0, tags=metric_tags, ) return if should_run_region_checks(subscription, result): try_check_and_update_regions(subscription, subscription_regions) try: detector = get_detector(subscription, prefetch_workflow_data=True) except Detector.DoesNotExist: # Nothing to do if there's an orphaned uptime subscription delete_uptime_subscription(subscription) return organization = detector.project.organization # Nothing to do if this subscription is disabled. if not detector.enabled: return # Nothing to do if the feature isn't enabled if not features.has("organizations:uptime", organization): metrics.incr("uptime.result_processor.dropped_no_feature") return mode_name = UptimeMonitorMode(detector.config["mode"]).name.lower() status_reason = "none" if result["status_reason"]: status_reason = result["status_reason"]["type"] metrics.incr( "uptime.result_processor.handle_result_for_project", tags={"mode": mode_name, "status_reason": status_reason, **metric_tags}, sample_rate=1.0, ) cluster = get_cluster() last_update_key = build_last_update_key(detector) last_update_raw: str | None = cluster.get(last_update_key) last_update_ms = 0 if last_update_raw is None else int(last_update_raw) # Nothing to do if we've already processed this result at an earlier time if result["scheduled_check_time_ms"] <= last_update_ms: # If the scheduled check time is older than the most recent update then we've already processed it. # We can end up with duplicates due to Kafka replaying tuples, or due to the uptime checker processing # the same check multiple times and sending duplicate results. # We only ever want to process the first value related to each check, so we just skip and log here metrics.incr( "uptime.result_processor.skipping_already_processed_update", tags={"mode": mode_name, **metric_tags}, sample_rate=1.0, ) # Don't log too much; this codepath can get used when the consumer is doing increased # work, which can further increase its work, and so make a bad situation even worse. if random.random() < 0.01: logger.info( "uptime.result_processor.skipping_already_processed_update", extra={ "guid": result["guid"], "region": result["region"], "subscription_id": result["subscription_id"], }, ) return subscription_interval_ms = 1000 * subscription.interval_seconds num_intervals = ( result["scheduled_check_time_ms"] - last_update_ms ) / subscription_interval_ms # If the scheduled check is two or more intervals since the last seen check, we can declare the # intervening checks missed... if last_update_raw is not None and num_intervals > 1: # ... but it might be the case that the user changed the frequency of the detector. So, first # verify that the interval in postgres is the same as the last-seen interval (in redis). # We only store in redis when we encounter a difference like this, which means we won't be able # to tell if a missed check is real with the very first missed check. This is probably okay, # and preferable to just writing the interval to redis on every check consumed. last_interval_key = build_last_seen_interval_key(detector) # If we've never set an interval before, just set this to zero, which will never compare # true with any valid interval. last_interval_seen: str = cluster.get(last_interval_key) or "0" if int(last_interval_seen) == subscription_interval_ms: # Bound the number of missed checks we generate--just in case. num_missed_checks = min(MAX_SYNTHETIC_MISSED_CHECKS, int(num_intervals - 1)) metrics.distribution( "uptime.result_processer.num_missing_check", num_missed_checks, tags=metric_tags, ) logger.info( "uptime.result_processor.num_missing_check", extra={"num_missed_checks": num_missed_checks, **result}, ) if num_intervals != int(num_intervals): logger.info( "uptime.result_processor.invalid_check_interval", 0, extra={ "last_update_ms": last_update_ms, "current_update_ms": result["scheduled_check_time_ms"], "interval_ms": subscription_interval_ms, **result, }, ) synthetic_metric_tags = metric_tags.copy() synthetic_metric_tags["status"] = CHECKSTATUS_MISSED_WINDOW for i in range(0, num_missed_checks): missed_result: CheckResult = { "guid": str(uuid.uuid4()), "subscription_id": result["subscription_id"], "status": CHECKSTATUS_MISSED_WINDOW, "status_reason": None, "trace_id": str(uuid.uuid4()), "span_id": str(uuid.uuid4()), "region": result["region"], "scheduled_check_time_ms": last_update_ms + ((i + 1) * subscription_interval_ms), "actual_check_time_ms": result["actual_check_time_ms"], "duration_ms": 0, "request_info": None, } produce_eap_uptime_result( detector, missed_result, synthetic_metric_tags.copy(), ) else: logger.info( "uptime.result_processor.false_num_missing_check", extra={**result}, ) cluster.set(last_interval_key, subscription_interval_ms, ex=LAST_UPDATE_REDIS_TTL) # We log the result stats here after the duplicate check so that we # know the "true" duration and delay of each check. Since during # deploys we might have checks run from both the old/new checker # deployments, there will be overlap of when things run. The new # deployment will have artificially inflated delay stats, since it may # duplicate checks that already ran on time on the old deployment, but # will have run them later. # # Since we process all results for a given uptime monitor in order, we # can guarantee that we get the earliest delay stat for each scheduled # check for the monitor here, and so this stat will be a more accurate # measurement of delay/duration. record_check_metrics(result, detector, {"mode": mode_name, **metric_tags}) Mode = UptimeMonitorMode try: match detector.config["mode"]: case Mode.AUTO_DETECTED_ONBOARDING: handle_onboarding_result(detector, subscription, result, metric_tags.copy()) case Mode.AUTO_DETECTED_ACTIVE | Mode.MANUAL: handle_active_result(detector, subscription, result, metric_tags.copy()) case _: logger.error( "Unknown subscription mode", extra={"mode": detector.config["mode"]}, ) except Exception: logger.exception("Failed to process result for uptime project subscription") # EAP production _must_ happen after handling the result, since we # may mutate the UptimeSubscription when we determine we're in an incident produce_eap_uptime_result(detector, result, metric_tags.copy()) # Track the last update date to allow deduplication cluster.set( last_update_key, int(result["scheduled_check_time_ms"]), ex=LAST_UPDATE_REDIS_TTL, ) record_check_completion_metrics(result, metric_tags)
UptimeResultProcessor
python
kamyu104__LeetCode-Solutions
Python/find-champion-i.py
{ "start": 39, "end": 258 }
class ____(object): def findChampion(self, grid): """ :type grid: List[List[int]] :rtype: int """ return next(u for u in xrange(len(grid)) if sum(grid[u]) == len(grid)-1)
Solution
python
Textualize__rich
rich/markdown.py
{ "start": 9602, "end": 10766 }
class ____(MarkdownElement): """A list element.""" @classmethod def create(cls, markdown: Markdown, token: Token) -> ListElement: return cls(token.type, int(token.attrs.get("start", 1))) def __init__(self, list_type: str, list_start: int | None) -> None: self.items: list[ListItem] = [] self.list_type = list_type self.list_start = list_start def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool: assert isinstance(child, ListItem) self.items.append(child) return False def __rich_console__( self, console: Console, options: ConsoleOptions ) -> RenderResult: if self.list_type == "bullet_list_open": for item in self.items: yield from item.render_bullet(console, options) else: number = 1 if self.list_start is None else self.list_start last_number = number + len(self.items) for index, item in enumerate(self.items): yield from item.render_number( console, options, number + index, last_number )
ListElement
python
readthedocs__readthedocs.org
readthedocs/proxito/tests/test_hosting.py
{ "start": 1251, "end": 45028 }
class ____(TestCase): def setUp(self): self.user = fixture.get(User, username="testuser") self.user.set_password("testuser") self.user.save() self.project = fixture.get( Project, slug="project", name="project", language="en", privacy_level=PUBLIC, external_builds_privacy_level=PUBLIC, repo="https://github.com/readthedocs/project", programming_language="words", users=[self.user], main_language_project=None, project_url="http://project.com", versioning_scheme=MULTIPLE_VERSIONS_WITH_TRANSLATIONS, ) for tag in ("tag", "project", "test"): self.project.tags.add(tag) self.project.versions.update( privacy_level=PUBLIC, built=True, active=True, type="tag", identifier="a1b2c3", ) self.version = self.project.versions.get(slug=LATEST) self.version.identifier = "master" self.version.save() self.build = fixture.get( Build, project=self.project, version=self.version, commit="a1b2c3", length=60, state="finished", success=True, ) def _get_response_dict(self, view_name, filepath=None): filepath = filepath or __file__ filename = Path(filepath).absolute().parent / "responses" / f"{view_name}.json" return json.load(open(filename)) def _normalize_datetime_fields(self, obj): try: obj["projects"]["current"]["created"] = "2019-04-29T10:00:00Z" obj["projects"]["current"]["modified"] = "2019-04-29T12:00:00Z" except: pass try: obj["builds"]["current"]["created"] = "2019-04-29T10:00:00Z" obj["builds"]["current"]["finished"] = "2019-04-29T10:01:00Z" except: pass return obj def test_get_config_v1(self): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 assert self._normalize_datetime_fields(r.json()) == self._get_response_dict( "v1" ) def test_get_config_v2(self): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "api-version": "2.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 assert r.json() == self._get_response_dict("v2") def test_get_config_unsupported_version(self): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "api-version": "3.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 400 assert r.json() == self._get_response_dict("v3") def test_disabled_addons_via_addons_config(self): self.project.addons.analytics_enabled = False self.project.addons.doc_diff_enabled = False self.project.addons.external_version_warning_enabled = False self.project.addons.ethicalads_enabled = False self.project.addons.flyout_enabled = False self.project.addons.hotkeys_enabled = False self.project.addons.search_enabled = False self.project.addons.notifications_enabled = False self.project.addons.notifications_show_on_latest = False self.project.addons.notifications_show_on_non_stable = False self.project.addons.notifications_show_on_external = False self.project.addons.save() r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 assert r.json()["addons"]["analytics"]["enabled"] is False assert r.json()["addons"]["notifications"]["enabled"] is False assert r.json()["addons"]["notifications"]["show_on_latest"] is False assert r.json()["addons"]["notifications"]["show_on_non_stable"] is False assert r.json()["addons"]["notifications"]["show_on_external"] is False assert r.json()["addons"]["doc_diff"]["enabled"] is False assert r.json()["addons"]["flyout"]["enabled"] is False assert r.json()["addons"]["search"]["enabled"] is False assert r.json()["addons"]["hotkeys"]["enabled"] is False def test_non_latest_version_warning_versions(self): fixture.get( Version, project=self.project, privacy_level=PRIVATE, slug="private", verbose_name="private", built=True, active=True, ) fixture.get( Version, project=self.project, privacy_level=PUBLIC, slug="public-built", verbose_name="public-built", built=True, active=True, ) fixture.get( Version, project=self.project, privacy_level=PUBLIC, slug="public-not-built", verbose_name="public-not-built", built=False, active=True, ) r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 expected = ["latest", "public-built"] assert [v["slug"] for v in r.json()["versions"]["active"]] == expected def test_flyout_versions(self): fixture.get( Version, project=self.project, privacy_level=PRIVATE, slug="private", verbose_name="private", built=True, active=True, ) fixture.get( Version, project=self.project, privacy_level=PUBLIC, slug="public-built", verbose_name="public-built", built=True, active=True, ) fixture.get( Version, project=self.project, privacy_level=PUBLIC, slug="public-not-built", verbose_name="public-not-built", built=False, active=True, ) fixture.get( Version, project=self.project, privacy_level=PUBLIC, slug="hidden", verbose_name="hidden", built=False, hidden=True, active=True, ) r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 assert len(r.json()["versions"]["active"]) == 2 assert r.json()["versions"]["active"][0]["slug"] == "latest" assert ( r.json()["versions"]["active"][0]["urls"]["documentation"] == "https://project.dev.readthedocs.io/en/latest/" ) assert r.json()["versions"]["active"][1]["slug"] == "public-built" assert ( r.json()["versions"]["active"][1]["urls"]["documentation"] == "https://project.dev.readthedocs.io/en/public-built/" ) def test_flyout_translations(self): translation_ja = fixture.get( Project, slug="translation", main_language_project=self.project, language="ja", privacy_level=PUBLIC, ) translation_ja.versions.update( built=True, active=True, privacy_level=PUBLIC, ) r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 # Hitting the English version of the docs, will return Japanese as translation assert len(r.json()["projects"]["translations"]) == 1 assert r.json()["projects"]["translations"][0]["slug"] == "translation" assert r.json()["projects"]["translations"][0]["language"]["code"] == "ja" assert ( r.json()["projects"]["translations"][0]["urls"]["documentation"] == "https://project.dev.readthedocs.io/ja/latest/" ) # Hitting the Japanese version of the docs, will return English as translation r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/ja/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 assert len(r.json()["projects"]["translations"]) == 1 assert r.json()["projects"]["translations"][0]["slug"] == "project" assert r.json()["projects"]["translations"][0]["language"]["code"] == "en" assert ( r.json()["projects"]["translations"][0]["urls"]["documentation"] == "https://project.dev.readthedocs.io/en/latest/" ) def test_flyout_downloads(self): fixture.get( Version, project=self.project, privacy_level=PUBLIC, slug="offline", verbose_name="offline", built=True, has_pdf=True, has_epub=True, has_htmlzip=True, active=True, ) r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/offline/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 expected = { "pdf": "https://project.dev.readthedocs.io/_/downloads/en/offline/pdf/", "htmlzip": "https://project.dev.readthedocs.io/_/downloads/en/offline/htmlzip/", "epub": "https://project.dev.readthedocs.io/_/downloads/en/offline/epub/", } assert r.json()["versions"]["current"]["downloads"] == expected def test_number_of_queries_versions_with_downloads(self): for i in range(10): fixture.get( Version, project=self.project, privacy_level=PUBLIC, slug=f"offline-{i}", verbose_name=f"offline-{i}", built=True, has_pdf=True, has_epub=True, has_htmlzip=True, active=True, ) with self.assertNumQueries(12): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/offline/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 def test_flyout_single_version_project(self): self.version.has_pdf = True self.version.has_epub = True self.version.has_htmlzip = True self.version.save() # Add extra built and active versions to emulate a project that went # from multiple versions to single version. # These versions shouldn't be included in the `versions.active` field. for i in range(5): fixture.get( Version, privacy_level=PUBLIC, active=True, built=True, project=self.project, ) self.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS self.project.save() r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 expected = ["latest"] assert [v["slug"] for v in r.json()["versions"]["active"]] == expected expected = { "pdf": "https://project.dev.readthedocs.io/_/downloads/en/latest/pdf/", "htmlzip": "https://project.dev.readthedocs.io/_/downloads/en/latest/htmlzip/", "epub": "https://project.dev.readthedocs.io/_/downloads/en/latest/epub/", } assert r.json()["versions"]["current"]["downloads"] == expected def test_builds_current_is_latest_one(self): # Create 10 successful build objects # The latest one (ordered by date) will be ``a1b2c3-9`` for i in range(10): fixture.get( Build, date=timezone.now(), project=self.project, version=self.version, commit=f"a1b2c3-{i}", length=60, state="finished", success=True, ) # Latest failed build fixture.get( Build, date=timezone.now(), project=self.project, version=self.version, commit=f"a1b2c3-failed", length=60, state="finished", success=False, ) r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 # ``a1b2c3-9``is the latest successful build object created assert r.json()["builds"]["current"]["commit"] == "a1b2c3-9" def test_builds_current_is_latest_one_without_url_parameter(self): # Create 10 successful build objects # The latest one (ordered by date) will be ``a1b2c3-9`` for i in range(10): fixture.get( Build, date=timezone.now(), project=self.project, version=self.version, commit=f"a1b2c3-{i}", length=60, state="finished", success=True, ) # Latest failed build fixture.get( Build, date=timezone.now(), project=self.project, version=self.version, commit=f"a1b2c3-failed", length=60, state="finished", success=False, ) r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "project-slug": "project", "version-slug": "latest", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 # ``a1b2c3-9``is the latest successful build object created assert r.json()["builds"]["current"]["commit"] == "a1b2c3-9" def test_project_subproject(self): subproject = fixture.get( Project, slug="subproject", repo="https://github.com/readthedocs/subproject", privacy_level=PUBLIC, ) subproject.versions.update(privacy_level=PUBLIC, built=True, active=True) self.project.add_subproject(subproject) r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/projects/subproject/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 assert r.json()["projects"]["current"]["id"] == subproject.pk assert r.json()["projects"]["current"]["slug"] == subproject.slug assert ( r.json()["projects"]["current"]["repository"]["url"] == "https://github.com/readthedocs/subproject" ) def test_flyout_subproject_urls(self): translation = fixture.get( Project, slug="translation", language="es", repo="https://github.com/readthedocs/subproject", privacy_level=PUBLIC, ) translation.versions.update( built=True, active=True, ) subproject = fixture.get( Project, slug="subproject", repo="https://github.com/readthedocs/subproject", privacy_level=PUBLIC, ) self.project.add_subproject(subproject) subproject.translations.add(translation) subproject.save() fixture.get(Version, slug="v1", project=subproject, privacy_level=PUBLIC) fixture.get(Version, slug="v2.3", project=subproject, privacy_level=PUBLIC) subproject.versions.update( privacy_level=PUBLIC, built=True, active=True, hidden=False, ) r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/projects/subproject/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 assert len(r.json()["versions"]["active"]) == 3 assert r.json()["versions"]["active"][0]["slug"] == "latest" assert ( r.json()["versions"]["active"][0]["urls"]["documentation"] == "https://project.dev.readthedocs.io/projects/subproject/en/latest/" ) assert r.json()["versions"]["active"][1]["slug"] == "v2.3" assert ( r.json()["versions"]["active"][1]["urls"]["documentation"] == "https://project.dev.readthedocs.io/projects/subproject/en/v2.3/" ) assert r.json()["versions"]["active"][2]["slug"] == "v1" assert ( r.json()["versions"]["active"][2]["urls"]["documentation"] == "https://project.dev.readthedocs.io/projects/subproject/en/v1/" ) assert len(r.json()["projects"]["translations"]) == 1 assert r.json()["projects"]["translations"][0]["slug"] == "translation" assert r.json()["projects"]["translations"][0]["language"]["code"] == "es" assert ( r.json()["projects"]["translations"][0]["urls"]["documentation"] == "https://project.dev.readthedocs.io/projects/subproject/es/latest/" ) def test_send_project_not_version_slugs(self): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "api-version": "1.0.0", "client-version": "0.6.0", "project-slug": self.project.slug, }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 400 assert r.json() == { "error": "'project-slug' and 'version-slug' GET attributes are required when not sending 'url'" } def test_send_version_not_project_slugs(self): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "api-version": "1.0.0", "client-version": "0.6.0", "version-slug": self.version.slug, }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 400 assert r.json() == { "error": "'project-slug' and 'version-slug' GET attributes are required when not sending 'url'" } def test_send_project_version_slugs(self): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "api-version": "1.0.0", "client-version": "0.6.0", "project-slug": self.project.slug, "version-slug": self.version.slug, }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 expected_response = self._get_response_dict("v1") # Remove `addons.doc_diff` from the response because it's not present when `url=` is not sent expected_response["addons"].pop("doc_diff") expected_response["readthedocs"]["resolver"]["filename"] = None assert self._normalize_datetime_fields(r.json()) == expected_response def test_send_project_version_slugs_and_url(self): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "api-version": "1.0.0", "client-version": "0.6.0", "url": "https://project.dev.readthedocs.io/en/latest/", # When sending `url=`, slugs are ignored "project-slug": "different-project-slug-than-url", "version-slug": "different-version-slug-than-url", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 assert self._normalize_datetime_fields(r.json()) == self._get_response_dict( "v1" ) def test_send_project_slug_and_notfound_version_slug(self): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "api-version": "1.0.0", "client-version": "0.6.0", "project-slug": self.project.slug, "version-slug": "not-found", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 expected_response = self._get_response_dict("v1") # Since there is no version, there are some fields that we need to change from the default response del expected_response["addons"]["doc_diff"] expected_response["builds"]["current"] = None expected_response["versions"]["current"] = None expected_response["readthedocs"]["resolver"]["filename"] = None expected_response["addons"]["search"]["default_filter"] = f"project:{self.project.slug}" assert self._normalize_datetime_fields(r.json()) == expected_response def test_custom_domain_url(self): fixture.get( Domain, domain="docs.example.com", canonical=True, project=self.project, ) r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "api-version": "1.0.0", "client-version": "0.6.0", "url": "https://docs.example.com/en/latest/", }, secure=True, headers={ "host": "docs.example.com", }, ) assert r.status_code == 200 assert len(r.json()["versions"]["active"]) == 1 assert r.json()["versions"]["active"][0]["slug"] == "latest" assert ( r.json()["versions"]["active"][0]["urls"]["documentation"] == "https://docs.example.com/en/latest/" ) def test_linkpreviews(self): self.project.addons.linkpreviews_enabled = True self.project.addons.save() r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "api-version": "1.0.0", "client-version": "0.6.0", "url": "https://project.dev.readthedocs.io/en/latest/", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) expected = { "enabled": True, "selector": None, } assert r.status_code == 200 assert r.json()["addons"]["linkpreviews"] == expected def test_non_existent_project(self): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "api-version": "1.0.0", "client-version": "0.6.0", "project-slug": "non-existent-project", "version-slug": "latest", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 404 assert r.json() == {"detail": "No Project matches the given query."} def test_number_of_queries_project_version_slug(self): # The number of queries should not increase too much, even if we change # some of the responses from the API. This test will help us to # understand how much this number varies depending on the changes we do. # Create many versions for this project. # These versions will call `resolver.resolve` to generate the URL returned for # `projects.translations` and `versions.active` fields. for i in range(35): name = f"public-built-{i}" fixture.get( Version, project=self.project, privacy_level=PUBLIC, slug=name, verbose_name=name, built=True, active=True, ) with self.assertNumQueries(14): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "api-version": "1.0.0", "client-version": "0.6.0", "project-slug": "project", "version-slug": "latest", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 def test_number_of_queries_url(self): for i in range(35): name = f"public-built-{i}" fixture.get( Version, project=self.project, privacy_level=PUBLIC, slug=name, verbose_name=name, built=True, active=True, ) with self.assertNumQueries(15): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 def test_number_of_queries_url_subproject(self): subproject = fixture.get( Project, slug="subproject", repo="https://github.com/readthedocs/subproject", privacy_level=PUBLIC, ) subproject.versions.update(privacy_level=PUBLIC, built=True, active=True) self.project.add_subproject(subproject) for i in range(35): name = f"public-built-{i}" fixture.get( Version, project=subproject, privacy_level=PUBLIC, slug=name, verbose_name=name, built=True, active=True, ) with self.assertNumQueries(18): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/projects/subproject/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 # Test parent project has fewer queries with self.assertNumQueries(14): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 def test_number_of_queries_url_translations(self): # Create multiple translations to be shown in the flyout for language in ["ja", "es", "ru", "pt-br"]: slug = f"translation-{language}" fixture.get( Project, slug=slug, main_language_project=self.project, language=language, ) with self.assertNumQueries(24): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) assert r.status_code == 200 @override_settings( RTD_FILETREEDIFF_ALL=True, ) @mock.patch("readthedocs.proxito.views.hosting.get_diff") def test_file_tree_diff_ignored_files(self, get_diff): ignored_files = [ "ignored.html", "archives/*", ] self.project.addons.filetreediff_enabled = True self.project.addons.filetreediff_ignored_files = ignored_files self.project.addons.save() get_diff.return_value = FileTreeDiff( current_version=self.version, current_version_build=self.build, base_version=self.version, base_version_build=self.build, files=[ ("tags/newtag.html", FileTreeDiffFileStatus.added), ("ignored.html", FileTreeDiffFileStatus.modified), ("archives/2025.html", FileTreeDiffFileStatus.modified), ("changelog/2025.2.html", FileTreeDiffFileStatus.modified), ("deleted.html", FileTreeDiffFileStatus.deleted), ], outdated=False, ) r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project.dev.readthedocs.io/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project.dev.readthedocs.io", }, ) expected = { "enabled": True, "outdated": False, "diff": { "added": [ { "filename": "tags/newtag.html", "urls": { "current": "https://project.dev.readthedocs.io/en/latest/tags/newtag.html", "base": "https://project.dev.readthedocs.io/en/latest/tags/newtag.html", }, }, ], "deleted": [ { "filename": "deleted.html", "urls": { "current": "https://project.dev.readthedocs.io/en/latest/deleted.html", "base": "https://project.dev.readthedocs.io/en/latest/deleted.html", }, }, ], "modified": [ { "filename": "changelog/2025.2.html", "urls": { "current": "https://project.dev.readthedocs.io/en/latest/changelog/2025.2.html", "base": "https://project.dev.readthedocs.io/en/latest/changelog/2025.2.html", }, }, ], }, } assert r.status_code == 200 assert r.json()["addons"]["filetreediff"] == expected @mock.patch("readthedocs.filetreediff.get_manifest") def test_file_tree_diff(self, get_manifest): self.project.addons.filetreediff_enabled = True self.project.addons.save() pr_version = get( Version, project=self.project, slug="123", active=True, built=True, privacy_level=PUBLIC, type=EXTERNAL, ) pr_build = get( Build, project=self.project, version=pr_version, commit="a1b2c3", state=BUILD_STATE_FINISHED, success=True, ) get_manifest.side_effect = [ FileTreeDiffManifest( build_id=pr_build.id, files=[ FileTreeDiffManifestFile( path="index.html", main_content_hash="hash1", ), FileTreeDiffManifestFile( path="tutorial/index.html", main_content_hash="hash1", ), FileTreeDiffManifestFile( path="new-file.html", main_content_hash="hash1", ), ], ), FileTreeDiffManifest( build_id=self.build.id, files=[ FileTreeDiffManifestFile( path="index.html", main_content_hash="hash1", ), FileTreeDiffManifestFile( path="tutorial/index.html", main_content_hash="hash-changed", ), FileTreeDiffManifestFile( path="deleted.html", main_content_hash="hash-deleted", ), ], ), ] with self.assertNumQueries(19): r = self.client.get( reverse("proxito_readthedocs_docs_addons"), { "url": "https://project--123.dev.readthedocs.build/en/123/", "client-version": "0.6.0", "api-version": "1.0.0", }, secure=True, headers={ "host": "project--123.dev.readthedocs.build", }, ) assert r.status_code == 200 filetreediff_response = r.json()["addons"]["filetreediff"] assert filetreediff_response == { "enabled": True, "outdated": False, "diff": { "added": [ { "filename": "new-file.html", "urls": { "current": "https://project--123.dev.readthedocs.build/en/123/new-file.html", "base": "https://project.dev.readthedocs.io/en/latest/new-file.html", }, }, ], "deleted": [ { "filename": "deleted.html", "urls": { "current": "https://project--123.dev.readthedocs.build/en/123/deleted.html", "base": "https://project.dev.readthedocs.io/en/latest/deleted.html", }, }, ], "modified": [ { "filename": "tutorial/index.html", "urls": { "current": "https://project--123.dev.readthedocs.build/en/123/tutorial/index.html", "base": "https://project.dev.readthedocs.io/en/latest/tutorial/index.html", }, }, ], }, } def test_version_ordering(self): for slug in ["1.0", "1.2", "1.12", "2.0", "2020.01.05", "a-slug", "z-slug"]: fixture.get( Version, project=self.project, privacy_level=PUBLIC, slug=slug, verbose_name=slug, built=True, active=True, ) self.project.update_stable_version() self.project.versions.update( privacy_level=PUBLIC, built=True, active=True, ) kwargs = { "path": reverse("proxito_readthedocs_docs_addons"), "data": { "url": "https://project.dev.readthedocs.io/en/latest/", "client-version": "0.6.0", "api-version": "1.0.0", }, "secure": True, "headers": { "host": "project.dev.readthedocs.io", }, } # Default ordering (SemVer) expected = [ "latest", "stable", "2020.01.05", "2.0", "1.12", "1.2", "1.0", "z-slug", "a-slug", ] r = self.client.get(**kwargs) assert r.status_code == 200 assert [v["slug"] for v in r.json()["versions"]["active"]] == expected self.project.refresh_from_db() addons = self.project.addons # The order of latest and stable doesn't change when using semver. addons.flyout_sorting_latest_stable_at_beginning = False addons.save() r = self.client.get(**kwargs) assert r.status_code == 200 assert [v["slug"] for v in r.json()["versions"]["active"]] == expected addons.flyout_sorting = ADDONS_FLYOUT_SORTING_ALPHABETICALLY addons.flyout_sorting_latest_stable_at_beginning = True addons.save() expected = [ "z-slug", "stable", "latest", "a-slug", "2020.01.05", "2.0", "1.2", "1.12", "1.0", ] r = self.client.get(**kwargs) assert r.status_code == 200 assert [v["slug"] for v in r.json()["versions"]["active"]] == expected # The order of latest and stable doesn't change when using alphabetical sorting. addons.flyout_sorting_latest_stable_at_beginning = False addons.save() r = self.client.get(**kwargs) assert r.status_code == 200 assert [v["slug"] for v in r.json()["versions"]["active"]] == expected addons.flyout_sorting = ADDONS_FLYOUT_SORTING_PYTHON_PACKAGING addons.flyout_sorting_latest_stable_at_beginning = True addons.save() r = self.client.get(**kwargs) assert r.status_code == 200 expected = [ "latest", "stable", "2020.01.05", "2.0", "1.12", "1.2", "1.0", "z-slug", "a-slug", ] assert [v["slug"] for v in r.json()["versions"]["active"]] == expected addons.flyout_sorting_latest_stable_at_beginning = False addons.save() r = self.client.get(**kwargs) assert r.status_code == 200 expected = [ "2020.01.05", "2.0", "1.12", "1.2", "1.0", "z-slug", "stable", "latest", "a-slug", ] assert [v["slug"] for v in r.json()["versions"]["active"]] == expected addons.flyout_sorting = ADDONS_FLYOUT_SORTING_CALVER addons.flyout_sorting_latest_stable_at_beginning = True addons.save() r = self.client.get(**kwargs) assert r.status_code == 200 expected = [ "latest", "stable", "2020.01.05", "z-slug", "a-slug", "2.0", "1.2", "1.12", "1.0", ] assert [v["slug"] for v in r.json()["versions"]["active"]] == expected addons.flyout_sorting_latest_stable_at_beginning = False addons.save() r = self.client.get(**kwargs) assert r.status_code == 200 expected = [ "2020.01.05", "z-slug", "stable", "latest", "a-slug", "2.0", "1.2", "1.12", "1.0", ] assert [v["slug"] for v in r.json()["versions"]["active"]] == expected
TestReadTheDocsConfigJson
python
Textualize__textual
src/textual/validation.py
{ "start": 3279, "end": 8081 }
class ____(ABC): '''Base class for the validation of string values. Commonly used in conjunction with the `Input` widget, which accepts a list of validators via its constructor. This validation framework can also be used to validate any 'stringly-typed' values (for example raw command line input from `sys.args`). To implement your own `Validator`, subclass this class. Example: ```python def is_palindrome(value: str) -> bool: """Check has string has the same code points left to right, as right to left.""" return value == value[::-1] class Palindrome(Validator): def validate(self, value: str) -> ValidationResult: if is_palindrome(value): return self.success() else: return self.failure("Not a palindrome!") ``` ''' def __init__(self, failure_description: str | None = None) -> None: self.failure_description = failure_description """A description of why the validation failed. The description (intended to be user-facing) to attached to the Failure if the validation fails. This failure description is ultimately accessible at the time of validation failure via the `Input.Changed` or `Input.Submitted` event, and you can access it on your message handler (a method called, for example, `on_input_changed` or a method decorated with `@on(Input.Changed)`. """ @abstractmethod def validate(self, value: str) -> ValidationResult: """Validate the value and return a ValidationResult describing the outcome of the validation. Implement this method when defining custom validators. Args: value: The value to validate. Returns: The result of the validation ([`self.success()`][textual.validation.Validator.success) or [`self.failure(...)`][textual.validation.Validator.failure]). """ def describe_failure(self, failure: Failure) -> str | None: """Return a string description of the Failure. Used to provide a more fine-grained description of the failure. A Validator could fail for multiple reasons, so this method could be used to provide a different reason for different types of failure. !!! warning This method is only called if no other description has been supplied. If you supply a description inside a call to `self.failure(description="...")`, or pass a description into the constructor of the validator, those will take priority, and this method won't be called. Args: failure: Information about why the validation failed. Returns: A string description of the failure. """ return self.failure_description def success(self) -> ValidationResult: """Shorthand for `ValidationResult(True)`. Return `self.success()` from [`validate()`][textual.validation.Validator.validate] to indicated that validation *succeeded*. Returns: A ValidationResult indicating validation succeeded. """ return ValidationResult() def failure( self, description: str | None = None, value: str | None = None, failures: Failure | Sequence[Failure] | None = None, ) -> ValidationResult: """Shorthand for signaling validation failure. Return `self.failure(...)` from [`validate()`][textual.validation.Validator.validate] to indicated that validation *failed*. Args: description: The failure description that will be used. When used in conjunction with the Input widget, this is the description that will ultimately be available inside the handler for `Input.Changed`. If not supplied, the `failure_description` from the `Validator` will be used. If that is not supplied either, then the `describe_failure` method on `Validator` will be called. value: The value that was considered invalid. This is optional, and only needs to be supplied if required in your `Input.Changed` handler. failures: The reasons the validator failed. If not supplied, a generic `Failure` will be included in the ValidationResult returned from this function. Returns: A ValidationResult representing failed validation, and containing the metadata supplied to this function. """ if isinstance(failures, Failure): failures = [failures] result = ValidationResult( failures or [Failure(validator=self, value=value, description=description)], ) return result
Validator
python
run-llama__llama_index
llama-index-core/tests/program/test_function_program.py
{ "start": 1522, "end": 4637 }
class ____(MagicMock): def predict_and_call( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, **kwargs: Any, ) -> "AgentChatResponse": """Predict and call the tool.""" return _get_mock_album_response( allow_parallel_tool_calls=allow_parallel_tool_calls ) async def apredict_and_call( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, **kwargs: Any, ) -> "AgentChatResponse": """Predict and call the tool.""" return _get_mock_album_response( allow_parallel_tool_calls=allow_parallel_tool_calls ) @property def metadata(self) -> LLMMetadata: return LLMMetadata(is_function_calling_model=True) def test_function_program() -> None: """Test Function program.""" prompt_template_str = """This is a test album with {topic}""" llm_program = FunctionCallingProgram.from_defaults( output_cls=MockAlbum, prompt_template_str=prompt_template_str, llm=MockLLM(), ) obj_output = llm_program(topic="songs") assert isinstance(obj_output, MockAlbum) assert obj_output.title == "hello" assert obj_output.artist == "world" assert obj_output.songs[0].title == "song1" assert obj_output.songs[1].title == "song2" def test_function_program_multiple() -> None: """Test Function program multiple.""" prompt_template_str = """This is a test album with {topic}""" llm_program = FunctionCallingProgram.from_defaults( output_cls=MockAlbum, prompt_template_str=prompt_template_str, llm=MockLLM(), allow_parallel_tool_calls=True, ) obj_outputs = llm_program(topic="songs") assert isinstance(obj_outputs, list) assert len(obj_outputs) == 2 assert isinstance(obj_outputs[0], MockAlbum) assert isinstance(obj_outputs[1], MockAlbum) # test second output assert obj_outputs[1].title == "hello2" assert obj_outputs[1].artist == "world2" assert obj_outputs[1].songs[0].title == "song3" assert obj_outputs[1].songs[1].title == "song4" @pytest.mark.asyncio async def test_async_function_program() -> None: """Test async function program.""" # same as above but async prompt_template_str = """This is a test album with {topic}""" llm_program = FunctionCallingProgram.from_defaults( output_cls=MockAlbum, prompt_template_str=prompt_template_str, llm=MockLLM(), ) obj_output = await llm_program.acall(topic="songs") assert isinstance(obj_output, MockAlbum) assert obj_output.title == "hello" assert obj_output.artist == "world" assert obj_output.songs[0].title == "song1" assert obj_output.songs[1].title == "song2"
MockLLM
python
eth-brownie__brownie
brownie/network/contract.py
{ "start": 2520, "end": 5046 }
class ____: _dir_color: Final = "bright magenta" def __init__( self, project: Optional[Union["Project", "TempProject"]], build: ContractBuildJson, sources: Dict[str, Any], ) -> None: self._project = project self._build: Final = build.copy() self._sources: Final = sources abi = self.abi self.topics: Final = _get_topics(abi) self.selectors: Final[Dict[Selector, FunctionName]] = { build_function_selector(i): FunctionName(i["name"]) for i in abi if i["type"] == "function" } # this isn't fully accurate because of overloaded methods - will be removed in `v2.0.0` self.signatures: Final[Dict[FunctionName, Selector]] = { v: k for k, v in self.selectors.items() } parse_errors_from_abi(abi) @property def abi(self) -> List[ABIElement]: return self._build["abi"] @property def _name(self) -> ContractName: return self._build["contractName"] def info(self) -> None: """ Display NatSpec documentation for this contract. """ if natspec := self._build.get("natspec"): _print_natspec(natspec) def get_method(self, calldata: str) -> Optional[str]: sig = calldata[:10].lower() return self.selectors.get(sig) def decode_input(self, calldata: Union[str, bytes]) -> Tuple[str, Any]: """ Decode input calldata for this contract. Arguments --------- calldata : str | bytes Calldata for a call to this contract Returns ------- str Signature of the function that was called Any Decoded input arguments """ if not isinstance(calldata, HexBytes): calldata = HexBytes(calldata) fn_selector = hexbytes_to_hexstring(calldata[:4]) abi: Optional[ABIFunction] = None for i in self.abi: if i["type"] == "function" and build_function_selector(i) == fn_selector: abi = i break if abi is None: raise ValueError("Four byte selector does not match the ABI for this contract") function_sig = build_function_signature(abi) types_list = get_type_strings(abi["inputs"]) result = decode_abi(types_list, calldata[4:]) input_args = format_input(abi, result) return function_sig, input_args
_ContractBase
python
PyCQA__pylint
doc/exts/pyreverse_configuration.py
{ "start": 560, "end": 3294 }
class ____(NamedTuple): name: str optdict: OptionDict PYREVERSE_PATH = ( Path(__file__).resolve().parent.parent / "additional_tools" / "pyreverse" ) """Path to the pyreverse documentation folder.""" def _write_config_page(run: Run) -> None: """Create or overwrite the configuration page.""" sections: list[str] = [ f"""\ .. This file is auto-generated. Make any changes to the associated .. docs extension in 'doc/exts/pyreverse_configuration.py'. {get_rst_title("Usage", "#")} ``pyreverse`` is run from the command line using the following syntax:: pyreverse [options] <packages> where ``<packages>`` is one or more Python packages or modules to analyze. The available options are organized into the following categories: * :ref:`filtering-and-scope` - Control which classes and relationships appear in your diagrams * :ref:`display-options` - Customize the visual appearance including colors and labels * :ref:`output-control` - Select output formats and set the destination directory * :ref:`project-configuration` - Define project settings like source roots and ignored files """ ] options: list[OptionsData] = [OptionsData(name, info) for name, info in run.options] option_groups: dict[str, list[str]] = {g: [] for g in OPTIONS_GROUPS.values()} for option in sorted(options, key=lambda x: x.name): option_string = get_rst_title(f"--{option.name}", "-") option_string += f"*{option.optdict.get('help')}*\n\n" if option.optdict.get("default") == "": option_string += '**Default:** ``""``\n\n\n' else: option_string += f"**Default:** ``{option.optdict.get('default')}``\n\n\n" option_groups[str(option.optdict.get("group"))].append(option_string) for group_title in OPTIONS_GROUPS.values(): ref_title = group_title.lower().replace(" ", "-") sections.append( f"""\ .. _{ref_title}: {get_rst_title(group_title, "=")} {"".join(option_groups[group_title])}""" ) # Join all sections and remove the final two newlines final_page = "\n\n".join(sections)[:-2] with open(PYREVERSE_PATH / "configuration.rst", "w", encoding="utf-8") as stream: stream.write(final_page) # pylint: disable-next=unused-argument def build_options_page(app: Sphinx | None) -> None: # Write configuration page _write_config_page(Run([])) def setup(app: Sphinx) -> dict[str, bool]: """Connects the extension to the Sphinx process.""" # Register callback at the builder-inited Sphinx event # See https://www.sphinx-doc.org/en/master/extdev/appapi.html app.connect("builder-inited", build_options_page) return {"parallel_read_safe": True}
OptionsData
python
huggingface__transformers
src/transformers/models/pixtral/modeling_pixtral.py
{ "start": 18219, "end": 19268 }
class ____(PreTrainedModel): config: PixtralVisionConfig base_model_prefix = "model" main_input_name = "pixel_values" input_modalities = ("image",) supports_gradient_checkpointing = True _supports_attention_backend = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _no_split_modules = ["PixtralAttentionLayer"] def generate_block_attention_mask(patch_embeds_list, tensor): dtype = tensor.dtype device = tensor.device seq_len = tensor.shape[1] d_min = torch.finfo(dtype).min causal_mask = torch.full((seq_len, seq_len), fill_value=d_min, dtype=dtype, device=device) block_end_idx = torch.tensor(patch_embeds_list).cumsum(-1) block_start_idx = torch.tensor([0] + patch_embeds_list[:-1]).cumsum(-1) for start, end in zip(block_start_idx, block_end_idx): causal_mask[start:end, start:end] = 0 causal_mask = causal_mask[None, None, :, :].expand(tensor.shape[0], 1, -1, -1) return causal_mask @auto_docstring
PixtralPreTrainedModel
python
astropy__astropy
astropy/table/tests/test_init_table.py
{ "start": 2054, "end": 3531 }
class ____: def _setup(self, table_type): pass def test_basic_init(self, table_type): self._setup(table_type) t = table_type(self.data, names=("a", "b", "c")) assert t.colnames == ["a", "b", "c"] assert np.all(t["a"] == np.array([1, 3])) assert np.all(t["b"] == np.array([2, 4])) assert np.all(t["c"] == np.array([3, 5])) assert all(t[name].name == name for name in t.colnames) def test_set_dtype(self, table_type): self._setup(table_type) t = table_type(self.data, names=("a", "b", "c"), dtype=("i4", "f4", "f8")) assert t.colnames == ["a", "b", "c"] assert np.all(t["a"] == np.array([1, 3], dtype="i4")) assert np.all(t["b"] == np.array([2, 4], dtype="f4")) assert np.all(t["c"] == np.array([3, 5], dtype="f8")) assert t["a"].dtype.type == np.int32 assert t["b"].dtype.type == np.float32 assert t["c"].dtype.type == np.float64 assert all(t[name].name == name for name in t.colnames) def test_names_dtype_mismatch(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type(self.data, names=("a",), dtype=("i4", "f4", "i4")) def test_names_cols_mismatch(self, table_type): self._setup(table_type) with pytest.raises(ValueError): table_type(self.data, names=("a",), dtype="i4") @pytest.mark.usefixtures("table_type")
BaseInitFrom
python
Netflix__metaflow
metaflow/plugins/kubernetes/kubernetes_jobsets.py
{ "start": 32398, "end": 37802 }
class ____(object): def __init__( self, client, name=None, namespace=None, num_parallel=None, # explcitly declaring num_parallel because we need to ensure that # num_parallel is an INTEGER and this abstraction is called by the # local runtime abstraction of kubernetes. # Argo will call another abstraction that will allow setting a lot of these # values from the top level argo code. **kwargs ): self._client = client self._annotations = {} self._labels = {} self._group = KUBERNETES_JOBSET_GROUP self._version = KUBERNETES_JOBSET_VERSION self._namespace = namespace self.name = name self._jobset_control_addr = _make_domain_name( name, "control", 0, 0, namespace, ) self._control_spec = JobSetSpec( client.get(), name="control", namespace=namespace, **kwargs ) self._worker_spec = JobSetSpec( client.get(), name="worker", namespace=namespace, **kwargs ) assert ( type(num_parallel) == int ), "num_parallel must be an integer" # todo: [final-refactor] : fix-me @property def jobset_control_addr(self): return self._jobset_control_addr @property def worker(self): return self._worker_spec @property def control(self): return self._control_spec def environment_variable_from_selector(self, name, label_value): self.worker.environment_variable_from_selector(name, label_value) self.control.environment_variable_from_selector(name, label_value) return self def environment_variables_from_selectors(self, env_dict): for name, label_value in env_dict.items(): self.worker.environment_variable_from_selector(name, label_value) self.control.environment_variable_from_selector(name, label_value) return self def environment_variable(self, name, value): self.worker.environment_variable(name, value) self.control.environment_variable(name, value) return self def label(self, name, value): self.worker.label(name, value) self.control.label(name, value) self._labels = dict(self._labels, **{name: value}) return self def annotation(self, name, value): self.worker.annotation(name, value) self.control.annotation(name, value) self._annotations = dict(self._annotations, **{name: value}) return self def labels(self, labels): for k, v in labels.items(): self.label(k, v) return self def annotations(self, annotations): for k, v in annotations.items(): self.annotation(k, v) return self def secret(self, name): self.worker.secret(name) self.control.secret(name) return self def dump(self): client = self._client.get() return dict( apiVersion=self._group + "/" + self._version, kind="JobSet", metadata=client.api_client.ApiClient().sanitize_for_serialization( client.V1ObjectMeta( name=self.name, labels=self._labels, annotations=self._annotations, ) ), spec=dict( replicatedJobs=[self.control.dump(), self.worker.dump()], suspend=False, startupPolicy=dict( # We explicitly set an InOrder Startup policy so that # we can ensure that the control pod starts before the worker pods. # This is required so that when worker pods try to access the control's IP # we are able to resolve the control's IP address. startupPolicyOrder="InOrder" ), successPolicy=None, # The Failure Policy helps setting the number of retries for the jobset. # but we don't rely on it and instead rely on either the local scheduler # or the Argo Workflows to handle retries. failurePolicy=None, network=None, ), status=None, ) def execute(self): client = self._client.get() api_instance = client.CoreV1Api() with client.ApiClient() as api_client: api_instance = client.CustomObjectsApi(api_client) try: jobset_obj = api_instance.create_namespaced_custom_object( group=self._group, version=self._version, namespace=self._namespace, plural="jobsets", body=self.dump(), ) except Exception as e: raise KubernetesJobsetException( "Exception when calling CustomObjectsApi->create_namespaced_custom_object: %s\n" % e ) return RunningJobSet( client=self._client, name=jobset_obj["metadata"]["name"], namespace=jobset_obj["metadata"]["namespace"], group=self._group, version=self._version, )
KubernetesJobSet
python
pola-rs__polars
py-polars/src/polars/datatypes/classes.py
{ "start": 42026, "end": 42490 }
class ____(BaseExtension): """ Generic extension data type. When `UNKNOWN_EXTENSION_TYPE_BEHAVIOR` is set to `"load_as_extension"`, any non-registered extension type will be loaded as this type. .. warning:: This functionality is considered **unstable**. It may be changed at any point without it being considered a breaking change. See Also -------- BaseExtension polars.register_extension_type """
Extension
python
walkccc__LeetCode
solutions/2979. Most Expensive Item That Can Not Be Bought/2979.py
{ "start": 0, "end": 186 }
class ____: def mostExpensiveItem(self, primeOne: int, primeTwo: int) -> int: # https://en.wikipedia.org/wiki/Coin_problem return primeOne * primeTwo - primeOne - primeTwo
Solution
python
Textualize__textual
src/textual/layouts/grid.py
{ "start": 412, "end": 13640 }
class ____(Layout): """Used to layout Widgets into a grid.""" name = "grid" def __init__(self) -> None: self.min_column_width: int | None = None """Maintain a minimum column width, or `None` for no minimum.""" self.max_column_width: int | None = None """Maintain a maximum column width, or `None` for no maximum.""" self.stretch_height: bool = False """Stretch the height of cells to be equal in each row.""" self.regular: bool = False """Grid should be regular (no remainder in last row).""" self.expand: bool = False """Expand the grid to fit the container if it is smaller.""" self.shrink: bool = False """Shrink the grid to fit the container if it is larger.""" self.auto_minimum: bool = False """If self.shrink is `True`, auto-detect and limit the width.""" self._grid_size: tuple[int, int] | None = None """Grid size after last arrange call.""" @property def grid_size(self) -> tuple[int, int] | None: """The grid size after the last arrange call. Returns: A tuple of (WIDTH, HEIGHT) or `None` prior to the first `arrange`. """ return self._grid_size def arrange( self, parent: Widget, children: list[Widget], size: Size, greedy: bool = True ) -> ArrangeResult: parent.pre_layout(self) styles = parent.styles row_scalars = styles.grid_rows or ( [Scalar.parse("1fr")] if (size.height and not parent.styles.is_auto_height) else [Scalar.parse("auto")] ) column_scalars = styles.grid_columns or [Scalar.parse("1fr")] gutter_horizontal = styles.grid_gutter_horizontal gutter_vertical = styles.grid_gutter_vertical table_size_columns = max(1, styles.grid_size_columns) min_column_width = self.min_column_width max_column_width = self.max_column_width container_width = size.width if max_column_width is not None: container_width = ( max(1, min(len(children), (container_width // max_column_width))) * max_column_width ) size = Size(container_width, size.height) if min_column_width is not None: table_size_columns = max( 1, (container_width + gutter_horizontal) // (min_column_width + gutter_horizontal), ) table_size_columns = min(table_size_columns, len(children)) if self.regular: while len(children) % table_size_columns and table_size_columns > 1: table_size_columns -= 1 table_size_rows = styles.grid_size_rows viewport = parent.app.viewport_size keyline_style, _keyline_color = styles.keyline offset = (0, 0) gutter_spacing: Spacing | None if keyline_style == "none": gutter_spacing = None else: size -= (2, 2) offset = (1, 1) gutter_spacing = Spacing( gutter_vertical, gutter_horizontal, gutter_vertical, gutter_horizontal, ) def cell_coords(column_count: int) -> Iterable[tuple[int, int]]: """Iterate over table coordinates ad infinitum. Args: column_count: Number of columns """ row = 0 while True: for column in range(column_count): yield (column, row) row += 1 def widget_coords( column_start: int, row_start: int, columns: int, rows: int ) -> set[tuple[int, int]]: """Get coords occupied by a cell. Args: column_start: Start column. row_start: Start_row. columns: Number of columns. rows: Number of rows. Returns: Set of coords. """ return { (column, row) for column in range(column_start, column_start + columns) for row in range(row_start, row_start + rows) } def repeat_scalars(scalars: Iterable[Scalar], count: int) -> list[Scalar]: """Repeat an iterable of scalars as many times as required to return a list of `count` values. Args: scalars: Iterable of values. count: Number of values to return. Returns: A list of values. """ limited_values = list(scalars)[:] while len(limited_values) < count: limited_values.extend(scalars) return limited_values[:count] cell_map: dict[tuple[int, int], tuple[Widget, bool]] = {} cell_size_map: dict[Widget, tuple[int, int, int, int]] = {} next_coord = iter(cell_coords(table_size_columns)).__next__ cell_coord = (0, 0) column = row = 0 for child in children: child_styles = child.styles column_span = child_styles.column_span or 1 row_span = child_styles.row_span or 1 # Find a slot where this cell fits # A cell on a previous row may have a row span while True: column, row = cell_coord coords = widget_coords(column, row, column_span, row_span) if cell_map.keys().isdisjoint(coords): for coord in coords: cell_map[coord] = (child, coord == cell_coord) cell_size_map[child] = ( column, row, column_span - 1, row_span - 1, ) break else: cell_coord = next_coord() continue cell_coord = next_coord() column_scalars = repeat_scalars(column_scalars, table_size_columns) table_size_rows = table_size_rows if table_size_rows else row + 1 row_scalars = repeat_scalars(row_scalars, table_size_rows) self._grid_size = (table_size_columns, table_size_rows) def apply_width_limits(widget: Widget, width: int) -> int: """Apply min and max widths to dimension. Args: widget: A Widget. width: A width. Returns: New width. """ styles = widget.styles if styles.min_width is not None: width = max( width, int(styles.min_width.resolve(size, viewport, Fraction(width))), ) if styles.max_width is not None: width = min( width, int(styles.max_width.resolve(size, viewport, Fraction(width))), ) return width def apply_height_limits(widget: Widget, height: int) -> int: """Apply min and max height to a dimension. Args: widget: A widget. height: A height. Returns: New height """ styles = widget.styles if styles.min_height is not None: height = max( height, int(styles.min_height.resolve(size, viewport, Fraction(height))), ) if styles.max_height is not None: height = min( height, int(styles.max_height.resolve(size, viewport, Fraction(height))), ) return height # Handle any auto columns for column, scalar in enumerate(column_scalars): if scalar.is_auto: width = 0.0 for row in range(len(row_scalars)): coord = (column, row) try: widget, _ = cell_map[coord] except KeyError: pass else: if widget.styles.column_span != 1: continue width = max( width, apply_width_limits( widget, widget.get_content_width(size, viewport) + widget.styles.gutter.width, ), ) column_scalars[column] = Scalar.from_number(width) column_minimums: list[int] | None = None if self.auto_minimum and self.shrink: column_minimums = [1] * table_size_columns for column_index in range(table_size_columns): for row_index in range(len(row_scalars)): if ( cell_info := cell_map.get((column_index, row_index)) ) is not None: widget = cell_info[0] column_minimums[column_index] = max( visualize(widget, widget.render()).get_minimal_width( widget.styles ) + widget.styles.gutter.width, column_minimums[column_index], ) columns = resolve( column_scalars, size.width, gutter_vertical, size, viewport, expand=self.expand, shrink=self.shrink, minimums=column_minimums, ) # Handle any auto rows for row, scalar in enumerate(row_scalars): if scalar.is_auto: height = 0.0 for column in range(len(column_scalars)): coord = (column, row) try: widget, _ = cell_map[coord] except KeyError: pass else: if widget.styles.row_span != 1: continue column_width = columns[column][1] gutter_width, gutter_height = widget.styles.gutter.totals widget_height = apply_height_limits( widget, widget.get_content_height( size, viewport, column_width - gutter_width, ) + gutter_height, ) height = max(height, widget_height) row_scalars[row] = Scalar.from_number(height) rows = resolve(row_scalars, size.height, gutter_horizontal, size, viewport) placements: list[WidgetPlacement] = [] _WidgetPlacement = WidgetPlacement add_placement = placements.append max_column = len(columns) - 1 max_row = len(rows) - 1 for widget, (column, row, column_span, row_span) in cell_size_map.items(): x = columns[column][0] if row > max_row: break y = rows[row][0] x2, cell_width = columns[min(max_column, column + column_span)] y2, cell_height = rows[min(max_row, row + row_span)] cell_size = Size(cell_width + x2 - x, cell_height + y2 - y) box_width, box_height, margin = widget._get_box_model( cell_size, viewport, Fraction(cell_size.width), Fraction(cell_size.height), constrain_width=True, greedy=greedy, ) if self.stretch_height and len(children) > 1: if box_height <= cell_size.height: box_height = Fraction(cell_size.height) region = ( Region( x, y, int(box_width + margin.width), int(box_height + margin.height) ) .crop_size(cell_size) .shrink(margin) ) + offset widget_styles = widget.styles placement_offset = ( widget_styles.offset.resolve(cell_size, viewport) if widget_styles.has_rule("offset") else NULL_OFFSET ) absolute = ( widget_styles.has_rule("position") and styles.position == "absolute" ) add_placement( _WidgetPlacement( region, placement_offset, ( margin if gutter_spacing is None else margin.grow_maximum(gutter_spacing) ), widget, absolute, ) ) return placements
GridLayout
python
sanic-org__sanic
sanic/views.py
{ "start": 349, "end": 8571 }
class ____: """Class based implementation for creating and grouping handlers Class-based views (CBVs) are an alternative to function-based views. They allow you to reuse common logic, and group related views, while keeping the flexibility of function-based views. To use a class-based view, subclass the method handler, and implement methods (`get`, `post`, `put`, `patch`, `delete`) for the class to correspond to each HTTP method you want to support. For example: ```python class DummyView(HTTPMethodView): def get(self, request: Request): return text('I am get method') def put(self, request: Request): return text('I am put method') ``` If someone tries to use a non-implemented method, they will reveive a 405 response. If you need any url params just include them in method signature, like you would for function-based views. ```python class DummyView(HTTPMethodView): def get(self, request: Request, my_param_here: str): return text(f"I am get method with {my_param_here}") ``` Next, you need to attach the view to the app or blueprint. You can do this in the exact same way as you would for a function-based view, except you should you use `MyView.as_view()` instead of `my_view_handler`. ```python app.add_route(DummyView.as_view(), "/<my_param_here>") ``` Alternatively, you can use the `attach` method: ```python DummyView.attach(app, "/<my_param_here>") ``` Or, at the time of subclassing: ```python class DummyView(HTTPMethodView, attach=app, uri="/<my_param_here>"): ... ``` To add a decorator, you can either: 1. Add it to the `decorators` list on the class, which will apply it to all methods on the class; or 2. Add it to the method directly, which will only apply it to that method. ```python class DummyView(HTTPMethodView): decorators = [my_decorator] ... # or class DummyView(HTTPMethodView): @my_decorator def get(self, request: Request): ... ``` One catch is that you need to be mindful that the call inside the decorator may need to account for the `self` argument, which is passed to the method as the first argument. Alternatively, you may want to also mark your method as `staticmethod` to avoid this. Available attributes at the time of subclassing: - **attach** (Optional[Union[Sanic, Blueprint]]): The app or blueprint to attach the view to. - **uri** (str): The uri to attach the view to. - **methods** (Iterable[str]): The HTTP methods to attach the view to. Defaults to `{"GET"}`. - **host** (Optional[str]): The host to attach the view to. - **strict_slashes** (Optional[bool]): Whether to add a redirect rule for the uri with a trailing slash. - **version** (Optional[int]): The version to attach the view to. - **name** (Optional[str]): The name to attach the view to. - **stream** (bool): Whether the view is a stream handler. - **version_prefix** (str): The prefix to use for the version. Defaults to `"/v"`. """ decorators: list[Callable[[Callable[..., Any]], Callable[..., Any]]] = [] def __init_subclass__( cls, attach: Optional[Union[Sanic, Blueprint]] = None, uri: str = "", methods: Iterable[str] = frozenset({"GET"}), host: Optional[str] = None, strict_slashes: Optional[bool] = None, version: Optional[int] = None, name: Optional[str] = None, stream: bool = False, version_prefix: str = "/v", **kwargs: Any, ) -> None: super().__init_subclass__(**kwargs) if attach: cls.attach( attach, uri=uri, methods=methods, host=host, strict_slashes=strict_slashes, version=version, name=name, stream=stream, version_prefix=version_prefix, ) def dispatch_request(self, request: Request, *args, **kwargs): """Dispatch request to appropriate handler method.""" method = request.method.lower() handler = getattr(self, method, None) if not handler and method == "head": handler = getattr(self, "get") if not handler: # The router will never allow us to get here, but this is # included as a fallback and for completeness. raise NotImplementedError( f"{request.method} is not supported for this endpoint." ) return handler(request, *args, **kwargs) @classmethod def as_view(cls, *class_args: Any, **class_kwargs: Any) -> RouteHandler: """Return view function for use with the routing system, that dispatches request to appropriate handler method. If you need to pass arguments to the class's constructor, you can pass the arguments to `as_view` and they will be passed to the class `__init__` method. Args: *class_args: Variable length argument list for the class instantiation. **class_kwargs: Arbitrary keyword arguments for the class instantiation. Returns: RouteHandler: The view function. Examples: ```python class DummyView(HTTPMethodView): def __init__(self, foo: MyFoo): self.foo = foo async def get(self, request: Request): return text(self.foo.bar) app.add_route(DummyView.as_view(foo=MyFoo()), "/") ``` """ # noqa: E501 def view(*args, **kwargs): self = view.view_class(*class_args, **class_kwargs) return self.dispatch_request(*args, **kwargs) if cls.decorators: view.__module__ = cls.__module__ for decorator in cls.decorators: view = decorator(view) view.view_class = cls # type: ignore view.__doc__ = cls.__doc__ view.__module__ = cls.__module__ view.__name__ = cls.__name__ return view @classmethod def attach( cls, to: Union[Sanic, Blueprint], uri: str, methods: Iterable[str] = frozenset({"GET"}), host: Optional[str] = None, strict_slashes: Optional[bool] = None, version: Optional[int] = None, name: Optional[str] = None, stream: bool = False, version_prefix: str = "/v", ) -> None: """Attaches the view to a Sanic app or Blueprint at the specified URI. Args: cls: The class that this method is part of. to (Union[Sanic, Blueprint]): The Sanic application or Blueprint to attach to. uri (str): The URI to bind the view to. methods (Iterable[str], optional): A collection of HTTP methods that the view should respond to. Defaults to `frozenset({"GET"})`. host (Optional[str], optional): A specific host or hosts to bind the view to. Defaults to `None`. strict_slashes (Optional[bool], optional): Enforce or not the trailing slash. Defaults to `None`. version (Optional[int], optional): Version of the API if versioning is used. Defaults to `None`. name (Optional[str], optional): Unique name for the route. Defaults to `None`. stream (bool, optional): Enable or disable streaming for the view. Defaults to `False`. version_prefix (str, optional): The prefix for the version, if versioning is used. Defaults to `"/v"`. """ # noqa: E501 to.add_route( cls.as_view(), uri=uri, methods=methods, host=host, strict_slashes=strict_slashes, version=version, name=name, stream=stream, version_prefix=version_prefix, ) def stream(func): """Decorator to mark a function as a stream handler.""" func.is_stream = True return func
HTTPMethodView
python
pyqtgraph__pyqtgraph
pyqtgraph/examples/optics/pyoptic.py
{ "start": 207, "end": 2810 }
class ____: """ Database of dispersion coefficients for Schott glasses + Corning 7980 """ def __init__(self, fileName='schott_glasses.csv'): path = os.path.dirname(__file__) fh = gzip.open(os.path.join(path, 'schott_glasses.csv.gz'), 'rb') r = csv.reader(map(str, fh.readlines())) lines = [x for x in r] self.data = {} header = lines[0] for l in lines[1:]: info = {} for i in range(1, len(l)): info[header[i]] = l[i] self.data[l[0]] = info self.data['Corning7980'] = { ## Thorlabs UV fused silica--not in schott catalog. 'B1': 0.68374049400, 'B2': 0.42032361300, 'B3': 0.58502748000, 'C1': 0.00460352869, 'C2': 0.01339688560, 'C3': 64.49327320000, 'TAUI25/250': 0.95, ## transmission data is fabricated, but close. 'TAUI25/1400': 0.98, } for k in self.data: self.data[k]['ior_cache'] = {} def ior(self, glass, wl): """ Return the index of refraction for *glass* at wavelength *wl*. The *glass* argument must be a key in self.data. """ info = self.data[glass] cache = info['ior_cache'] if wl not in cache: B = list(map(float, [info['B1'], info['B2'], info['B3']])) C = list(map(float, [info['C1'], info['C2'], info['C3']])) w2 = (wl/1000.)**2 n = sqrt(1.0 + (B[0]*w2 / (w2-C[0])) + (B[1]*w2 / (w2-C[1])) + (B[2]*w2 / (w2-C[2]))) cache[wl] = n return cache[wl] def transmissionCurve(self, glass): data = self.data[glass] keys = [int(x[7:]) for x in data.keys() if 'TAUI25' in x] keys.sort() curve = np.empty((2,len(keys))) for i in range(len(keys)): curve[0][i] = keys[i] key = 'TAUI25/%d' % keys[i] val = data[key] if val == '': val = 0 else: val = float(val) curve[1][i] = val return curve GLASSDB = GlassDB() def wlPen(wl): """Return a pen representing the given wavelength""" l1 = 400 l2 = 700 hue = np.clip(((l2-l1) - (wl-l1)) * 0.8 / (l2-l1), 0, 0.8) val = 1.0 if wl > 700: val = 1.0 * (((700-wl)/700.) + 1) elif wl < 400: val = wl * 1.0/400. #print hue, val color = pg.hsvColor(hue, 1.0, val) pen = pg.mkPen(color) return pen
GlassDB
python
kamyu104__LeetCode-Solutions
Python/smallest-common-region.py
{ "start": 33, "end": 620 }
class ____(object): def findSmallestRegion(self, regions, region1, region2): """ :type regions: List[List[str]] :type region1: str :type region2: str :rtype: str """ parents = {region[i] : region[0] for region in regions for i in xrange(1, len(region))} lookup = {region1} while region1 in parents: region1 = parents[region1] lookup.add(region1) while region2 not in lookup: region2 = parents[region2] return region2
Solution
python
doocs__leetcode
solution/0900-0999/0960.Delete Columns to Make Sorted III/Solution.py
{ "start": 0, "end": 299 }
class ____: def minDeletionSize(self, strs: List[str]) -> int: n = len(strs[0]) f = [1] * n for i in range(n): for j in range(i): if all(s[j] <= s[i] for s in strs): f[i] = max(f[i], f[j] + 1) return n - max(f)
Solution
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 541628, "end": 542048 }
class ____(sgqlc.types.Type): """Autogenerated return type of CreateRef""" __schema__ = github_schema __field_names__ = ("client_mutation_id", "ref") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" ref = sgqlc.types.Field("Ref", graphql_name="ref") """The newly created ref."""
CreateRefPayload
python
getsentry__sentry
src/sentry/integrations/opsgenie/integration.py
{ "start": 9533, "end": 11570 }
class ____(IntegrationProvider): key = IntegrationProviderSlug.OPSGENIE.value name = "Opsgenie" metadata = metadata integration_cls = OpsgenieIntegration features = frozenset( [ IntegrationFeatures.ENTERPRISE_INCIDENT_MANAGEMENT, IntegrationFeatures.ENTERPRISE_ALERT_RULE, ] ) def get_pipeline_views(self) -> Sequence[PipelineView[IntegrationPipeline]]: return [InstallationConfigView()] def build_integration(self, state: Mapping[str, Any]) -> IntegrationData: api_key = state["installation_data"]["api_key"] base_url = state["installation_data"]["base_url"] name = state["installation_data"]["provider"] return { "name": name, "external_id": name, "metadata": { "api_key": api_key, "base_url": base_url, "domain_name": f"{name}.{OPSGENIE_BASE_URL_TO_DOMAIN_NAME[base_url]}", }, } def post_install( self, integration: Integration, organization: RpcOrganization, *, extra: dict[str, Any], ) -> None: with record_event(OnCallInteractionType.POST_INSTALL).capture(): try: org_integration = OrganizationIntegration.objects.get( integration=integration, organization_id=organization.id ) except OrganizationIntegration.DoesNotExist: logger.exception("The Opsgenie post_install step failed.") return key = integration.metadata["api_key"] team_table = [] if key: team_name = "my-first-key" team_id = f"{org_integration.id}-{team_name}" team_table.append({"team": team_name, "id": team_id, "integration_key": key}) org_integration.config.update({"team_table": team_table}) org_integration.update(config=org_integration.config)
OpsgenieIntegrationProvider