language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
arrow-py__arrow
arrow/locales.py
{ "start": 68929, "end": 72499 }
class ____(Locale): names = ["cs", "cs-cz"] timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { "now": "Teď", "second": {"past": "vteřina", "future": "vteřina"}, "seconds": { "zero": "vteřina", "past": "{0} sekundami", "future-singular": "{0} sekundy", "future-paucal": "{0} sekund", }, "minute": {"past": "minutou", "future": "minutu"}, "minutes": { "zero": "{0} minut", "past": "{0} minutami", "future-singular": "{0} minuty", "future-paucal": "{0} minut", }, "hour": {"past": "hodinou", "future": "hodinu"}, "hours": { "zero": "{0} hodin", "past": "{0} hodinami", "future-singular": "{0} hodiny", "future-paucal": "{0} hodin", }, "day": {"past": "dnem", "future": "den"}, "days": { "zero": "{0} dnů", "past": "{0} dny", "future-singular": "{0} dny", "future-paucal": "{0} dnů", }, "week": {"past": "týdnem", "future": "týden"}, "weeks": { "zero": "{0} týdnů", "past": "{0} týdny", "future-singular": "{0} týdny", "future-paucal": "{0} týdnů", }, "month": {"past": "měsícem", "future": "měsíc"}, "months": { "zero": "{0} měsíců", "past": "{0} měsíci", "future-singular": "{0} měsíce", "future-paucal": "{0} měsíců", }, "year": {"past": "rokem", "future": "rok"}, "years": { "zero": "{0} let", "past": "{0} lety", "future-singular": "{0} roky", "future-paucal": "{0} let", }, } past = "Před {0}" future = "Za {0}" month_names = [ "", "leden", "únor", "březen", "duben", "květen", "červen", "červenec", "srpen", "září", "říjen", "listopad", "prosinec", ] month_abbreviations = [ "", "led", "úno", "bře", "dub", "kvě", "čvn", "čvc", "srp", "zář", "říj", "lis", "pro", ] day_names = [ "", "pondělí", "úterý", "středa", "čtvrtek", "pátek", "sobota", "neděle", ] day_abbreviations = ["", "po", "út", "st", "čt", "pá", "so", "ne"] def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: """Czech aware time frame format function, takes into account the differences between past and future forms.""" abs_delta = abs(delta) form = self.timeframes[timeframe] if isinstance(form, str): return form.format(abs_delta) if delta == 0: key = "zero" # And *never* use 0 in the singular! elif delta < 0: key = "past" else: # Needed since both regular future and future-singular and future-paucal cases if "future-singular" not in form: key = "future" elif 2 <= abs_delta % 10 <= 4 and ( abs_delta % 100 < 10 or abs_delta % 100 >= 20 ): key = "future-singular" else: key = "future-paucal" form: str = form[key] return form.format(abs_delta)
CzechLocale
python
altair-viz__altair
tools/vega_expr.py
{ "start": 7014, "end": 8242 }
class ____(_RSTRenderer): def __init__(self) -> None: super().__init__() def link(self, token: Token, state: BlockState) -> str: """Store link url, for appending at the end of doc.""" attrs = token["attrs"] url = expand_urls(attrs["url"]) text = self.render_children(token, state) text = text.replace("`", "") inline = f"`{text}`_" state.env["ref_links"][text] = {"url": url} return inline def _with_links(self, s: str, links: dict[str, Any] | Any, /) -> str: it = chain.from_iterable( (f".. _{ref_name}:", f" {attrs['url']}") for ref_name, attrs in links.items() ) return "\n".join(chain([s], it)) def __call__(self, tokens: Iterable[Token], state: BlockState) -> str: result = super().__call__(tokens, state) if links := state.env.get("ref_links", {}): return self._with_links(result, links) else: return result parser: RSTParse = RSTParse(RSTRenderer()) text_wrap = _TextWrapper( width=100, break_long_words=False, break_on_hyphens=False, initial_indent=METHOD_INDENT, subsequent_indent=METHOD_INDENT, )
RSTRenderer
python
kubernetes-client__python
kubernetes/base/config/kube_config_test.py
{ "start": 15803, "end": 63243 }
class ____(BaseTestCase): TEST_KUBE_CONFIG = { "current-context": "no_user", "contexts": [ { "name": "no_user", "context": { "cluster": "default" } }, { "name": "simple_token", "context": { "cluster": "default", "user": "simple_token" } }, { "name": "gcp", "context": { "cluster": "default", "user": "gcp" } }, { "name": "expired_gcp", "context": { "cluster": "default", "user": "expired_gcp" } }, { "name": "expired_gcp_refresh", "context": { "cluster": "default", "user": "expired_gcp_refresh" } }, { "name": "oidc", "context": { "cluster": "default", "user": "oidc" } }, { "name": "azure", "context": { "cluster": "default", "user": "azure" } }, { "name": "azure_num", "context": { "cluster": "default", "user": "azure_num" } }, { "name": "azure_str", "context": { "cluster": "default", "user": "azure_str" } }, { "name": "azure_num_error", "context": { "cluster": "default", "user": "azure_str_error" } }, { "name": "azure_str_error", "context": { "cluster": "default", "user": "azure_str_error" } }, { "name": "expired_oidc", "context": { "cluster": "default", "user": "expired_oidc" } }, { "name": "expired_oidc_with_idp_ca_file", "context": { "cluster": "default", "user": "expired_oidc_with_idp_ca_file" } }, { "name": "expired_oidc_nocert", "context": { "cluster": "default", "user": "expired_oidc_nocert" } }, { "name": "oidc_contains_reserved_character", "context": { "cluster": "default", "user": "oidc_contains_reserved_character" } }, { "name": "oidc_invalid_padding_length", "context": { "cluster": "default", "user": "oidc_invalid_padding_length" } }, { "name": "user_pass", "context": { "cluster": "default", "user": "user_pass" } }, { "name": "ssl", "context": { "cluster": "ssl", "user": "ssl" } }, { "name": "no_ssl_verification", "context": { "cluster": "no_ssl_verification", "user": "ssl" } }, { "name": "ssl-no_file", "context": { "cluster": "ssl-no_file", "user": "ssl-no_file" } }, { "name": "ssl-local-file", "context": { "cluster": "ssl-local-file", "user": "ssl-local-file" } }, { "name": "non_existing_user", "context": { "cluster": "default", "user": "non_existing_user" } }, { "name": "exec_cred_user", "context": { "cluster": "default", "user": "exec_cred_user" } }, { "name": "exec_cred_user_certificate", "context": { "cluster": "ssl", "user": "exec_cred_user_certificate" } }, { "name": "contexttestcmdpath", "context": { "cluster": "clustertestcmdpath", "user": "usertestcmdpath" } }, { "name": "contexttestcmdpathempty", "context": { "cluster": "clustertestcmdpath", "user": "usertestcmdpathempty" } }, { "name": "contexttestcmdpathscope", "context": { "cluster": "clustertestcmdpath", "user": "usertestcmdpathscope" } }, { "name": "tls-server-name", "context": { "cluster": "tls-server-name", "user": "ssl" } }, ], "clusters": [ { "name": "default", "cluster": { "server": TEST_HOST } }, { "name": "ssl-no_file", "cluster": { "server": TEST_SSL_HOST, "certificate-authority": TEST_CERTIFICATE_AUTH, } }, { "name": "ssl-local-file", "cluster": { "server": TEST_SSL_HOST, "certificate-authority": "cert_test", } }, { "name": "ssl", "cluster": { "server": TEST_SSL_HOST, "certificate-authority-data": TEST_CERTIFICATE_AUTH_BASE64, "insecure-skip-tls-verify": False, } }, { "name": "no_ssl_verification", "cluster": { "server": TEST_SSL_HOST, "insecure-skip-tls-verify": True, } }, { "name": "clustertestcmdpath", "cluster": {} }, { "name": "tls-server-name", "cluster": { "server": TEST_SSL_HOST, "certificate-authority-data": TEST_CERTIFICATE_AUTH_BASE64, "insecure-skip-tls-verify": False, "tls-server-name": TEST_TLS_SERVER_NAME, } }, ], "users": [ { "name": "simple_token", "user": { "token": TEST_DATA_BASE64, "username": TEST_USERNAME, # should be ignored "password": TEST_PASSWORD, # should be ignored } }, { "name": "gcp", "user": { "auth-provider": { "name": "gcp", "config": { "access-token": TEST_DATA_BASE64, } }, "token": TEST_DATA_BASE64, # should be ignored "username": TEST_USERNAME, # should be ignored "password": TEST_PASSWORD, # should be ignored } }, { "name": "expired_gcp", "user": { "auth-provider": { "name": "gcp", "config": { "access-token": TEST_DATA_BASE64, "expiry": TEST_TOKEN_EXPIRY_PAST, # always in past } }, "token": TEST_DATA_BASE64, # should be ignored "username": TEST_USERNAME, # should be ignored "password": TEST_PASSWORD, # should be ignored } }, # Duplicated from "expired_gcp" so test_load_gcp_token_with_refresh # is isolated from test_gcp_get_api_key_with_prefix. { "name": "expired_gcp_refresh", "user": { "auth-provider": { "name": "gcp", "config": { "access-token": TEST_DATA_BASE64, "expiry": TEST_TOKEN_EXPIRY_PAST, # always in past } }, "token": TEST_DATA_BASE64, # should be ignored "username": TEST_USERNAME, # should be ignored "password": TEST_PASSWORD, # should be ignored } }, { "name": "oidc", "user": { "auth-provider": { "name": "oidc", "config": { "id-token": TEST_OIDC_LOGIN } } } }, { "name": "azure", "user": { "auth-provider": { "config": { "access-token": TEST_AZURE_TOKEN, "apiserver-id": "00000002-0000-0000-c000-" "000000000000", "environment": "AzurePublicCloud", "refresh-token": "refreshToken", "tenant-id": "9d2ac018-e843-4e14-9e2b-4e0ddac75433" }, "name": "azure" } } }, { "name": "azure_num", "user": { "auth-provider": { "config": { "access-token": TEST_AZURE_TOKEN, "apiserver-id": "00000002-0000-0000-c000-" "000000000000", "environment": "AzurePublicCloud", "expires-in": "0", "expires-on": "156207275", "refresh-token": "refreshToken", "tenant-id": "9d2ac018-e843-4e14-9e2b-4e0ddac75433" }, "name": "azure" } } }, { "name": "azure_str", "user": { "auth-provider": { "config": { "access-token": TEST_AZURE_TOKEN, "apiserver-id": "00000002-0000-0000-c000-" "000000000000", "environment": "AzurePublicCloud", "expires-in": "0", "expires-on": "2018-10-18 00:52:29.044727", "refresh-token": "refreshToken", "tenant-id": "9d2ac018-e843-4e14-9e2b-4e0ddac75433" }, "name": "azure" } } }, { "name": "azure_str_error", "user": { "auth-provider": { "config": { "access-token": TEST_AZURE_TOKEN, "apiserver-id": "00000002-0000-0000-c000-" "000000000000", "environment": "AzurePublicCloud", "expires-in": "0", "expires-on": "2018-10-18 00:52", "refresh-token": "refreshToken", "tenant-id": "9d2ac018-e843-4e14-9e2b-4e0ddac75433" }, "name": "azure" } } }, { "name": "azure_num_error", "user": { "auth-provider": { "config": { "access-token": TEST_AZURE_TOKEN, "apiserver-id": "00000002-0000-0000-c000-" "000000000000", "environment": "AzurePublicCloud", "expires-in": "0", "expires-on": "-1", "refresh-token": "refreshToken", "tenant-id": "9d2ac018-e843-4e14-9e2b-4e0ddac75433" }, "name": "azure" } } }, { "name": "expired_oidc", "user": { "auth-provider": { "name": "oidc", "config": { "client-id": "tectonic-kubectl", "client-secret": "FAKE_SECRET", "id-token": TEST_OIDC_EXPIRED_LOGIN, "idp-certificate-authority-data": TEST_OIDC_CA, "idp-issuer-url": "https://example.org/identity", "refresh-token": "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" } } } }, { "name": "expired_oidc_with_idp_ca_file", "user": { "auth-provider": { "name": "oidc", "config": { "client-id": "tectonic-kubectl", "client-secret": "FAKE_SECRET", "id-token": TEST_OIDC_EXPIRED_LOGIN, "idp-certificate-authority": TEST_CERTIFICATE_AUTH, "idp-issuer-url": "https://example.org/identity", "refresh-token": "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" } } } }, { "name": "expired_oidc_nocert", "user": { "auth-provider": { "name": "oidc", "config": { "client-id": "tectonic-kubectl", "client-secret": "FAKE_SECRET", "id-token": TEST_OIDC_EXPIRED_LOGIN, "idp-issuer-url": "https://example.org/identity", "refresh-token": "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" } } } }, { "name": "oidc_contains_reserved_character", "user": { "auth-provider": { "name": "oidc", "config": { "client-id": "tectonic-kubectl", "client-secret": "FAKE_SECRET", "id-token": TEST_OIDC_CONTAINS_RESERVED_CHARACTERS, "idp-issuer-url": "https://example.org/identity", "refresh-token": "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" } } } }, { "name": "oidc_invalid_padding_length", "user": { "auth-provider": { "name": "oidc", "config": { "client-id": "tectonic-kubectl", "client-secret": "FAKE_SECRET", "id-token": TEST_OIDC_INVALID_PADDING_LENGTH, "idp-issuer-url": "https://example.org/identity", "refresh-token": "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" } } } }, { "name": "user_pass", "user": { "username": TEST_USERNAME, # should be ignored "password": TEST_PASSWORD, # should be ignored } }, { "name": "ssl-no_file", "user": { "token": TEST_DATA_BASE64, "client-certificate": TEST_CLIENT_CERT, "client-key": TEST_CLIENT_KEY, } }, { "name": "ssl-local-file", "user": { "tokenFile": "token_file", "client-certificate": "client_cert", "client-key": "client_key", } }, { "name": "ssl", "user": { "token": TEST_DATA_BASE64, "client-certificate-data": TEST_CLIENT_CERT_BASE64, "client-key-data": TEST_CLIENT_KEY_BASE64, } }, { "name": "exec_cred_user", "user": { "exec": { "apiVersion": "client.authentication.k8s.io/v1beta1", "command": "aws-iam-authenticator", "args": ["token", "-i", "dummy-cluster"] } } }, { "name": "exec_cred_user_certificate", "user": { "exec": { "apiVersion": "client.authentication.k8s.io/v1beta1", "command": "custom-certificate-authenticator", "args": [] } } }, { "name": "usertestcmdpath", "user": { "auth-provider": { "name": "gcp", "config": { "cmd-path": "cmdtorun" } } } }, { "name": "usertestcmdpathempty", "user": { "auth-provider": { "name": "gcp", "config": { "cmd-path": "" } } } }, { "name": "usertestcmdpathscope", "user": { "auth-provider": { "name": "gcp", "config": { "cmd-path": "cmd", "scopes": "scope" } } } } ] } def test_no_user_context(self): expected = FakeConfig(host=TEST_HOST) actual = FakeConfig() KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="no_user").load_and_set(actual) self.assertEqual(expected, actual) def test_simple_token(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) actual = FakeConfig() KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="simple_token").load_and_set(actual) self.assertEqual(expected, actual) def test_load_user_token(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="simple_token") self.assertTrue(loader._load_user_token()) self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, loader.token) def test_gcp_no_refresh(self): fake_config = FakeConfig() self.assertIsNone(fake_config.refresh_api_key_hook) KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="gcp", get_google_credentials=lambda: _raise_exception( "SHOULD NOT BE CALLED")).load_and_set(fake_config) # Should now be populated with a gcp token fetcher. self.assertIsNotNone(fake_config.refresh_api_key_hook) self.assertEqual(TEST_HOST, fake_config.host) self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, fake_config.api_key['authorization']) def test_load_gcp_token_no_refresh(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="gcp", get_google_credentials=lambda: _raise_exception( "SHOULD NOT BE CALLED")) self.assertTrue(loader._load_auth_provider_token()) self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, loader.token) def test_load_gcp_token_with_refresh(self): def cred(): return None cred.token = TEST_ANOTHER_DATA_BASE64 cred.expiry = datetime.datetime.now(tz=UTC).replace(tzinfo=None) loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="expired_gcp", get_google_credentials=lambda: cred) original_expiry = _get_expiry(loader, "expired_gcp") self.assertTrue(loader._load_auth_provider_token()) new_expiry = _get_expiry(loader, "expired_gcp") # assert that the configs expiry actually updates self.assertTrue(new_expiry > original_expiry) self.assertEqual(BEARER_TOKEN_FORMAT % TEST_ANOTHER_DATA_BASE64, loader.token) def test_gcp_refresh_api_key_hook(self): class cred_old: token = TEST_DATA_BASE64 expiry = DATETIME_EXPIRY_PAST class cred_new: token = TEST_ANOTHER_DATA_BASE64 expiry = DATETIME_EXPIRY_FUTURE fake_config = FakeConfig() _get_google_credentials = mock.Mock() _get_google_credentials.side_effect = [cred_old, cred_new] loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="expired_gcp_refresh", get_google_credentials=_get_google_credentials) loader.load_and_set(fake_config) original_expiry = _get_expiry(loader, "expired_gcp_refresh") # Refresh the GCP token. fake_config.refresh_api_key_hook(fake_config) new_expiry = _get_expiry(loader, "expired_gcp_refresh") self.assertTrue(new_expiry > original_expiry) self.assertEqual(BEARER_TOKEN_FORMAT % TEST_ANOTHER_DATA_BASE64, loader.token) def test_oidc_no_refresh(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="oidc", ) self.assertTrue(loader._load_auth_provider_token()) self.assertEqual(TEST_OIDC_TOKEN, loader.token) @mock.patch('kubernetes.config.kube_config.OAuth2Session.refresh_token') @mock.patch('kubernetes.config.kube_config.ApiClient.request') def test_oidc_with_refresh(self, mock_ApiClient, mock_OAuth2Session): mock_response = mock.MagicMock() type(mock_response).status = mock.PropertyMock( return_value=200 ) type(mock_response).data = mock.PropertyMock( return_value=json.dumps({ "token_endpoint": "https://example.org/identity/token" }) ) mock_ApiClient.return_value = mock_response mock_OAuth2Session.return_value = {"id_token": "abc123", "refresh_token": "newtoken123"} loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="expired_oidc", ) self.assertTrue(loader._load_auth_provider_token()) self.assertEqual("Bearer abc123", loader.token) @mock.patch('kubernetes.config.kube_config.OAuth2Session.refresh_token') @mock.patch('kubernetes.config.kube_config.ApiClient.request') def test_oidc_with_idp_ca_file_refresh(self, mock_ApiClient, mock_OAuth2Session): mock_response = mock.MagicMock() type(mock_response).status = mock.PropertyMock( return_value=200 ) type(mock_response).data = mock.PropertyMock( return_value=json.dumps({ "token_endpoint": "https://example.org/identity/token" }) ) mock_ApiClient.return_value = mock_response mock_OAuth2Session.return_value = {"id_token": "abc123", "refresh_token": "newtoken123"} loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="expired_oidc_with_idp_ca_file", ) self.assertTrue(loader._load_auth_provider_token()) self.assertEqual("Bearer abc123", loader.token) @mock.patch('kubernetes.config.kube_config.OAuth2Session.refresh_token') @mock.patch('kubernetes.config.kube_config.ApiClient.request') def test_oidc_with_refresh_nocert( self, mock_ApiClient, mock_OAuth2Session): mock_response = mock.MagicMock() type(mock_response).status = mock.PropertyMock( return_value=200 ) type(mock_response).data = mock.PropertyMock( return_value=json.dumps({ "token_endpoint": "https://example.org/identity/token" }) ) mock_ApiClient.return_value = mock_response mock_OAuth2Session.return_value = {"id_token": "abc123", "refresh_token": "newtoken123"} loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="expired_oidc_nocert", ) self.assertTrue(loader._load_auth_provider_token()) self.assertEqual("Bearer abc123", loader.token) def test_oidc_fails_if_contains_reserved_chars(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="oidc_contains_reserved_character", ) self.assertEqual( loader._load_oid_token("oidc_contains_reserved_character"), None, ) def test_oidc_fails_if_invalid_padding_length(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="oidc_invalid_padding_length", ) self.assertEqual( loader._load_oid_token("oidc_invalid_padding_length"), None, ) def test_azure_no_refresh(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="azure", ) self.assertTrue(loader._load_auth_provider_token()) self.assertEqual(TEST_AZURE_TOKEN_FULL, loader.token) def test_azure_with_expired_num(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="azure_num", ) provider = loader._user['auth-provider'] self.assertTrue(loader._azure_is_expired(provider)) def test_azure_with_expired_str(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="azure_str", ) provider = loader._user['auth-provider'] self.assertTrue(loader._azure_is_expired(provider)) def test_azure_with_expired_str_error(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="azure_str_error", ) provider = loader._user['auth-provider'] self.assertRaises(ValueError, loader._azure_is_expired, provider) def test_azure_with_expired_int_error(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="azure_num_error", ) provider = loader._user['auth-provider'] self.assertRaises(ValueError, loader._azure_is_expired, provider) def test_user_pass(self): expected = FakeConfig(host=TEST_HOST, token=TEST_BASIC_TOKEN) actual = FakeConfig() KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="user_pass").load_and_set(actual) self.assertEqual(expected, actual) def test_load_user_pass_token(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="user_pass") self.assertTrue(loader._load_user_pass_token()) self.assertEqual(TEST_BASIC_TOKEN, loader.token) def test_ssl_no_cert_files(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="ssl-no_file") self.expect_exception( loader.load_and_set, "does not exist", FakeConfig()) def test_ssl(self): expected = FakeConfig( host=TEST_SSL_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, cert_file=self._create_temp_file(TEST_CLIENT_CERT), key_file=self._create_temp_file(TEST_CLIENT_KEY), ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH), verify_ssl=True ) actual = FakeConfig() KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="ssl").load_and_set(actual) self.assertEqual(expected, actual) def test_ssl_no_verification(self): expected = FakeConfig( host=TEST_SSL_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, cert_file=self._create_temp_file(TEST_CLIENT_CERT), key_file=self._create_temp_file(TEST_CLIENT_KEY), verify_ssl=False, ssl_ca_cert=None, ) actual = FakeConfig() KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="no_ssl_verification").load_and_set(actual) self.assertEqual(expected, actual) def test_tls_server_name(self): expected = FakeConfig( host=TEST_SSL_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, cert_file=self._create_temp_file(TEST_CLIENT_CERT), key_file=self._create_temp_file(TEST_CLIENT_KEY), ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH), verify_ssl=True, tls_server_name=TEST_TLS_SERVER_NAME ) actual = FakeConfig() KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="tls-server-name").load_and_set(actual) self.assertEqual(expected, actual) def test_list_contexts(self): loader = KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="no_user") actual_contexts = loader.list_contexts() expected_contexts = ConfigNode("", self.TEST_KUBE_CONFIG)['contexts'] for actual in actual_contexts: expected = expected_contexts.get_with_name(actual['name']) self.assertEqual(expected.value, actual) def test_current_context(self): loader = KubeConfigLoader(config_dict=self.TEST_KUBE_CONFIG) expected_contexts = ConfigNode("", self.TEST_KUBE_CONFIG)['contexts'] self.assertEqual(expected_contexts.get_with_name("no_user").value, loader.current_context) def test_set_active_context(self): loader = KubeConfigLoader(config_dict=self.TEST_KUBE_CONFIG) loader.set_active_context("ssl") expected_contexts = ConfigNode("", self.TEST_KUBE_CONFIG)['contexts'] self.assertEqual(expected_contexts.get_with_name("ssl").value, loader.current_context) def test_ssl_with_relative_ssl_files(self): expected = FakeConfig( host=TEST_SSL_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, cert_file=self._create_temp_file(TEST_CLIENT_CERT), key_file=self._create_temp_file(TEST_CLIENT_KEY), ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH) ) try: temp_dir = tempfile.mkdtemp() actual = FakeConfig() with open(os.path.join(temp_dir, "cert_test"), "wb") as fd: fd.write(TEST_CERTIFICATE_AUTH.encode()) with open(os.path.join(temp_dir, "client_cert"), "wb") as fd: fd.write(TEST_CLIENT_CERT.encode()) with open(os.path.join(temp_dir, "client_key"), "wb") as fd: fd.write(TEST_CLIENT_KEY.encode()) with open(os.path.join(temp_dir, "token_file"), "wb") as fd: fd.write(TEST_DATA_BASE64.encode()) KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="ssl-local-file", config_base_path=temp_dir).load_and_set(actual) self.assertEqual(expected, actual) finally: shutil.rmtree(temp_dir) def test_load_kube_config_from_file_path(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) config_file = self._create_temp_file( yaml.safe_dump(self.TEST_KUBE_CONFIG)) actual = FakeConfig() load_kube_config(config_file=config_file, context="simple_token", client_configuration=actual) self.assertEqual(expected, actual) def test_load_kube_config_from_file_like_object(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) config_file_like_object = io.StringIO() # py3 (won't have unicode) vs py2 (requires it) try: unicode('') config_file_like_object.write( unicode( yaml.safe_dump( self.TEST_KUBE_CONFIG), errors='replace')) except NameError: config_file_like_object.write( yaml.safe_dump( self.TEST_KUBE_CONFIG)) actual = FakeConfig() load_kube_config( config_file=config_file_like_object, context="simple_token", client_configuration=actual) self.assertEqual(expected, actual) def test_load_kube_config_from_dict(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) actual = FakeConfig() load_kube_config_from_dict(config_dict=self.TEST_KUBE_CONFIG, context="simple_token", client_configuration=actual) self.assertEqual(expected, actual) def test_load_kube_config_from_dict_with_temp_file_path(self): expected = FakeConfig( host=TEST_SSL_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, cert_file=self._create_temp_file(TEST_CLIENT_CERT), key_file=self._create_temp_file(TEST_CLIENT_KEY), ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH), verify_ssl=True ) actual = FakeConfig() tmp_path = os.path.join( os.path.dirname( os.path.dirname( os.path.abspath(__file__))), 'tmp_file_path_test') load_kube_config_from_dict(config_dict=self.TEST_KUBE_CONFIG, context="ssl", client_configuration=actual, temp_file_path=tmp_path) self.assertFalse(True if not os.listdir(tmp_path) else False) self.assertEqual(expected, actual) _cleanup_temp_files() def test_load_kube_config_from_empty_file_like_object(self): config_file_like_object = io.StringIO() self.assertRaises( ConfigException, load_kube_config, config_file_like_object) def test_load_kube_config_from_empty_file(self): config_file = self._create_temp_file( yaml.safe_dump(None)) self.assertRaises( ConfigException, load_kube_config, config_file) def test_list_kube_config_contexts(self): config_file = self._create_temp_file( yaml.safe_dump(self.TEST_KUBE_CONFIG)) contexts, active_context = list_kube_config_contexts( config_file=config_file) self.assertDictEqual(self.TEST_KUBE_CONFIG['contexts'][0], active_context) if PY3: self.assertCountEqual(self.TEST_KUBE_CONFIG['contexts'], contexts) else: self.assertItemsEqual(self.TEST_KUBE_CONFIG['contexts'], contexts) def test_new_client_from_config(self): config_file = self._create_temp_file( yaml.safe_dump(self.TEST_KUBE_CONFIG)) client = new_client_from_config( config_file=config_file, context="simple_token") self.assertEqual(TEST_HOST, client.configuration.host) self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, client.configuration.api_key['authorization']) def test_new_client_from_config_dict(self): client = new_client_from_config_dict( config_dict=self.TEST_KUBE_CONFIG, context="simple_token") self.assertEqual(TEST_HOST, client.configuration.host) self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, client.configuration.api_key['authorization']) def test_no_users_section(self): expected = FakeConfig(host=TEST_HOST) actual = FakeConfig() test_kube_config = self.TEST_KUBE_CONFIG.copy() del test_kube_config['users'] KubeConfigLoader( config_dict=test_kube_config, active_context="gcp").load_and_set(actual) self.assertEqual(expected, actual) def test_non_existing_user(self): expected = FakeConfig(host=TEST_HOST) actual = FakeConfig() KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="non_existing_user").load_and_set(actual) self.assertEqual(expected, actual) @mock.patch('kubernetes.config.kube_config.ExecProvider.run') def test_user_exec_auth(self, mock): token = "dummy" mock.return_value = { "token": token } expected = FakeConfig(host=TEST_HOST, api_key={ "authorization": BEARER_TOKEN_FORMAT % token}) actual = FakeConfig() KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="exec_cred_user").load_and_set(actual) self.assertEqual(expected, actual) @mock.patch('kubernetes.config.kube_config.ExecProvider.run') def test_user_exec_auth_with_expiry(self, mock): expired_token = "expired" current_token = "current" mock.side_effect = [ { "token": expired_token, "expirationTimestamp": format_rfc3339(DATETIME_EXPIRY_PAST) }, { "token": current_token, "expirationTimestamp": format_rfc3339(DATETIME_EXPIRY_FUTURE) } ] fake_config = FakeConfig() self.assertIsNone(fake_config.refresh_api_key_hook) KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="exec_cred_user").load_and_set(fake_config) # The kube config should use the first token returned from the # exec provider. self.assertEqual(fake_config.api_key["authorization"], BEARER_TOKEN_FORMAT % expired_token) # Should now be populated with a method to refresh expired tokens. self.assertIsNotNone(fake_config.refresh_api_key_hook) # Refresh the token; the kube config should be updated. fake_config.refresh_api_key_hook(fake_config) self.assertEqual(fake_config.api_key["authorization"], BEARER_TOKEN_FORMAT % current_token) @mock.patch('kubernetes.config.kube_config.ExecProvider.run') def test_user_exec_auth_certificates(self, mock): mock.return_value = { "clientCertificateData": TEST_CLIENT_CERT, "clientKeyData": TEST_CLIENT_KEY, } expected = FakeConfig( host=TEST_SSL_HOST, cert_file=self._create_temp_file(TEST_CLIENT_CERT), key_file=self._create_temp_file(TEST_CLIENT_KEY), ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH), verify_ssl=True) actual = FakeConfig() KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="exec_cred_user_certificate").load_and_set(actual) self.assertEqual(expected, actual) @mock.patch('kubernetes.config.kube_config.ExecProvider.run', autospec=True) def test_user_exec_cwd(self, mock): capture = {} def capture_cwd(exec_provider): capture['cwd'] = exec_provider.cwd mock.side_effect = capture_cwd expected = "/some/random/path" KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="exec_cred_user", config_base_path=expected).load_and_set(FakeConfig()) self.assertEqual(expected, capture['cwd']) def test_user_cmd_path(self): A = namedtuple('A', ['token', 'expiry']) token = "dummy" return_value = A(token, parse_rfc3339(datetime.datetime.now())) CommandTokenSource.token = mock.Mock(return_value=return_value) expected = FakeConfig(api_key={ "authorization": BEARER_TOKEN_FORMAT % token}) actual = FakeConfig() KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="contexttestcmdpath").load_and_set(actual) self.assertEqual(expected, actual) def test_user_cmd_path_empty(self): A = namedtuple('A', ['token', 'expiry']) token = "dummy" return_value = A(token, parse_rfc3339(datetime.datetime.now())) CommandTokenSource.token = mock.Mock(return_value=return_value) expected = FakeConfig(api_key={ "authorization": BEARER_TOKEN_FORMAT % token}) actual = FakeConfig() self.expect_exception(lambda: KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="contexttestcmdpathempty").load_and_set(actual), "missing access token cmd " "(cmd-path is an empty string in your kubeconfig file)") def test_user_cmd_path_with_scope(self): A = namedtuple('A', ['token', 'expiry']) token = "dummy" return_value = A(token, parse_rfc3339(datetime.datetime.now())) CommandTokenSource.token = mock.Mock(return_value=return_value) expected = FakeConfig(api_key={ "authorization": BEARER_TOKEN_FORMAT % token}) actual = FakeConfig() self.expect_exception(lambda: KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="contexttestcmdpathscope").load_and_set(actual), "scopes can only be used when kubectl is using " "a gcp service account key") def test__get_kube_config_loader_for_yaml_file_no_persist(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) config_file = self._create_temp_file( yaml.safe_dump(self.TEST_KUBE_CONFIG)) actual = _get_kube_config_loader_for_yaml_file(config_file) self.assertIsNone(actual._config_persister) def test__get_kube_config_loader_for_yaml_file_persist(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) config_file = self._create_temp_file( yaml.safe_dump(self.TEST_KUBE_CONFIG)) actual = _get_kube_config_loader_for_yaml_file(config_file, persist_config=True) self.assertTrue(callable(actual._config_persister)) self.assertEqual(actual._config_persister.__name__, "save_changes") def test__get_kube_config_loader_file_no_persist(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) config_file = self._create_temp_file( yaml.safe_dump(self.TEST_KUBE_CONFIG)) actual = _get_kube_config_loader(filename=config_file) self.assertIsNone(actual._config_persister) def test__get_kube_config_loader_file_persist(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) config_file = self._create_temp_file( yaml.safe_dump(self.TEST_KUBE_CONFIG)) actual = _get_kube_config_loader(filename=config_file, persist_config=True) self.assertTrue(callable(actual._config_persister)) self.assertEqual(actual._config_persister.__name__, "save_changes") def test__get_kube_config_loader_dict_no_persist(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) actual = _get_kube_config_loader( config_dict=self.TEST_KUBE_CONFIG) self.assertIsNone(actual._config_persister)
TestKubeConfigLoader
python
getsentry__sentry
src/sentry/taskworker/scheduler/schedules.py
{ "start": 3100, "end": 7190 }
class ____(Schedule): """ Task schedules defined as crontab expressions. crontab expressions naturally align to clock intervals. For example an interval of `crontab(minute="*/2")` will spawn on the even numbered minutes. If a crontab schedule loses its last_run state, it will assume that one or more intervals have been missed, and it will align to the next interval window. Missed intervals will not be recovered. For tasks with very long intervals, you should consider the impact of a deploy or scheduler restart causing a missed window. Consider a more frequent interval to help spread load out and reduce the impacts of missed intervals. """ def __init__(self, name: str, crontab: crontab) -> None: self._crontab = crontab self._name = name try: self._cronsim = CronSim(str(crontab), timezone.now()) except CronSimError as e: raise ValueError(f"crontab expression {self._crontab} is invalid") from e def monitor_value(self) -> str: """Get the crontab expression as a string""" return str(self._crontab) def is_due(self, last_run: datetime | None = None) -> bool: """Check if the schedule is due to run again based on last_run.""" if last_run is None: last_run = timezone.now() - timedelta(minutes=1) remaining = self.remaining_seconds(last_run) return remaining <= 0 def remaining_seconds(self, last_run: datetime | None = None) -> int: """ Get the number of seconds until this schedule is due again Use the current time to find the next schedule time """ if last_run is None: last_run = timezone.now() - timedelta(minutes=1) # This could result in missed beats, or increased load on redis. last_run = last_run.replace(second=0, microsecond=0) now = timezone.now().replace(second=0, microsecond=0) # A future last_run means we should wait until the # next scheduled time, and then we can try again. # we could be competing with another scheduler, or # missing beats. if last_run > now: logger.warning( "taskworker.scheduler.future_value", extra={ "task": self._name, "last_run": last_run, "now": now, }, ) next_run = self._advance(last_run + timedelta(minutes=1)) return int(next_run.timestamp() - now.timestamp()) # If last run is in the past, see if the next runtime # is in the future. if last_run < now: next_run = self._advance(last_run + timedelta(minutes=1)) # Our next runtime is in the future, or now if next_run >= now: return int(next_run.timestamp() - now.timestamp()) # still in the past, we missed an interval :( missed = next_run next_run = self._advance(now) logger.warning( "taskworker.scheduler.missed_interval", extra={ "task": self._name, "last_run": last_run.isoformat(), "missed": missed.isoformat(), "now": now.isoformat(), "next_run": next_run.isoformat(), }, ) return int(next_run.timestamp() - now.timestamp()) # last_run == now, we are on the beat, find the next interval next_run = self._advance(now + timedelta(minutes=1)) return int(next_run.timestamp() - now.timestamp()) def _advance(self, dt: datetime) -> datetime: self._cronsim.dt = dt self._cronsim.advance() return self._cronsim.dt def runtime_after(self, start: datetime) -> datetime: """Get the next time a task should be spawned after `start`""" start = start.replace(second=0, microsecond=0) + timedelta(minutes=1) return self._advance(start)
CrontabSchedule
python
google__jax
tests/fft_test.py
{ "start": 2895, "end": 17147 }
class ____(jtu.JaxTestCase): def testLaxFftAcceptsStringTypes(self): rng = jtu.rand_default(self.rng()) x = rng((10,), np.complex64) self.assertAllClose(np.fft.fft(x).astype(np.complex64), lax.fft(x, "FFT", fft_lengths=(10,))) self.assertAllClose(np.fft.fft(x).astype(np.complex64), lax.fft(x, "fft", fft_lengths=(10,))) def testLaxFftErrors(self): with self.assertRaisesRegex( ValueError, r"FFT input shape \(14, 15\) must have at least as many input " r"dimensions as fft_lengths \(4, 5, 6\)"): lax.fft(np.ones((14, 15)), fft_type="fft", fft_lengths=(4, 5, 6)) with self.assertRaisesRegex( ValueError, r"FFT input shape \(14, 15\) minor dimensions must be equal to " r"fft_lengths \(17,\)"): lax.fft(np.ones((14, 15)), fft_type="fft", fft_lengths=(17,)) with self.assertRaisesRegex( ValueError, r"RFFT input shape \(2, 14, 15\) minor dimensions must be equal to " r"fft_lengths \(14, 12\)"): lax.fft(np.ones((2, 14, 15)), fft_type="rfft", fft_lengths=(14, 12)) with self.assertRaisesRegex( ValueError, r"IRFFT input shape \(2, 14, 15\) minor dimensions must be equal to " r"all except the last fft_length, got fft_lengths=\(13, 15\)"): lax.fft(np.ones((2, 14, 15)), fft_type="irfft", fft_lengths=(13, 15)) with self.assertRaisesRegex( ValueError, "RFFT input must be float32 or float64, got bfloat16"): lax.fft(np.ones((14, 15), jnp.bfloat16), fft_type="rfft", fft_lengths=(5, 6)) @parameterized.parameters((np.float32,), (np.float64,)) def testLaxIrfftDoesNotMutateInputs(self, dtype): if dtype == np.float64 and not config.enable_x64.value: raise self.skipTest("float64 requires jax_enable_x64=true") x = (1 + 1j) * jnp.array([[1.0, 2.0], [3.0, 4.0]], dtype=dtypes.to_complex_dtype(dtype)) y = np.asarray(jnp.fft.irfft2(x)) z = np.asarray(jnp.fft.irfft2(x)) self.assertAllClose(y, z) @jtu.sample_product( [dict(inverse=inverse, real=real, dtype=dtype) for inverse in [False, True] for real in [False, True] for dtype in (real_dtypes if real and not inverse else all_dtypes) ], [dict(shape=shape, axes=axes, s=s) for shape in [(10,), (10, 10), (9,), (2, 3, 4), (2, 3, 4, 5), (2, 3, 4, 5, 6)] for axes in _get_fftn_test_axes(shape) for s in _get_fftn_test_s(shape, axes) ], norm=FFT_NORMS, ) def testFftn(self, inverse, real, shape, dtype, axes, s, norm): rng = jtu.rand_default(self.rng()) args_maker = lambda: (rng(shape, dtype),) jnp_op = _get_fftn_func(jnp.fft, inverse, real) np_op = _get_fftn_func(np.fft, inverse, real) jnp_fn = lambda a: jnp_op(a, axes=axes, norm=norm) np_fn = lambda a: np_op(a, axes=axes, norm=norm) if axes is None or axes else a # Numpy promotes to complex128 aggressively. self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False, tol=1e-4) self._CompileAndCheck(jnp_fn, args_maker, atol={np.complex64: 2e-6}, rtol={np.float32: 2e-6}) # Test gradient for differentiable types. if (config.enable_x64.value and dtype in (float_dtypes if real and not inverse else inexact_dtypes)): # TODO(skye): can we be more precise? tol = 0.16 jtu.check_grads(jnp_fn, args_maker(), order=2, atol=tol, rtol=tol) # check dtypes dtype = jnp_fn(rng(shape, dtype)).dtype expected_dtype = jnp.promote_types(float if inverse and real else complex, dtype) self.assertEqual(dtype, expected_dtype) def testIrfftTranspose(self): # regression test for https://github.com/jax-ml/jax/issues/6223 def build_matrix(linear_func, size): return jax.vmap(linear_func)(jnp.eye(size, size)) def func(x): x, = promote_dtypes_complex(x) return jnp.fft.irfft(jnp.concatenate([jnp.zeros_like(x, shape=1), x[:2] + 1j*x[2:]])) def func_transpose(x): return jax.linear_transpose(func, x)(x)[0] matrix = build_matrix(func, 4) matrix2 = build_matrix(func_transpose, 4).T self.assertAllClose(matrix, matrix2) @jtu.sample_product( inverse=[False, True], real=[False, True], ) def testFftnErrors(self, inverse, real): rng = jtu.rand_default(self.rng()) name = 'fftn' if real: name = 'r' + name if inverse: name = 'i' + name func = _get_fftn_func(jnp.fft, inverse, real) self.assertRaisesRegex( ValueError, f"jax.numpy.fft.{name} does not support repeated axes. Got axes \\[1, 1\\].", lambda: func(rng([2, 3], dtype=np.float64), axes=[1, 1])) self.assertRaises( ValueError, lambda: func(rng([2, 3], dtype=np.float64), axes=[2])) self.assertRaises( ValueError, lambda: func(rng([2, 3], dtype=np.float64), axes=[-3])) def testFftEmpty(self): out = jnp.fft.fft(jnp.zeros((0,), jnp.complex64)).block_until_ready() self.assertArraysEqual(jnp.zeros((0,), jnp.complex64), out) @jtu.sample_product( [dict(inverse=inverse, real=real, hermitian=hermitian, dtype=dtype) for inverse in [False, True] for real in [False, True] for hermitian in [False, True] for dtype in (real_dtypes if (real and not inverse) or (hermitian and inverse) else all_dtypes) ], shape=[(10,)], n=[None, 1, 7, 13, 20], axis=[-1, 0], ) def testFft(self, inverse, real, hermitian, shape, dtype, n, axis): rng = jtu.rand_default(self.rng()) args_maker = lambda: (rng(shape, dtype),) name = 'fft' if real: name = 'r' + name elif hermitian: name = 'h' + name if inverse: name = 'i' + name jnp_op = getattr(jnp.fft, name) np_op = getattr(np.fft, name) jnp_fn = lambda a: jnp_op(a, n=n, axis=axis) np_fn = lambda a: np_op(a, n=n, axis=axis) # Numpy promotes to complex128 aggressively. self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False, tol=1e-4) self._CompileAndCheck(jnp_op, args_maker) @jtu.sample_product( inverse=[False, True], real=[False, True], hermitian=[False, True], ) def testFftErrors(self, inverse, real, hermitian): rng = jtu.rand_default(self.rng()) name = 'fft' if real: name = 'r' + name elif hermitian: name = 'h' + name if inverse: name = 'i' + name func = getattr(jnp.fft, name) self.assertRaisesRegex( ValueError, f"jax.numpy.fft.{name} does not support multiple axes. " f"Please use jax.numpy.fft.{name}n. Got axis = \\[1, 1\\].", lambda: func(rng([2, 3], dtype=np.float64), axis=[1, 1]) ) self.assertRaisesRegex( ValueError, f"jax.numpy.fft.{name} does not support multiple axes. " f"Please use jax.numpy.fft.{name}n. Got axis = \\(1, 1\\).", lambda: func(rng([2, 3], dtype=np.float64), axis=(1, 1)) ) self.assertRaises( ValueError, lambda: func(rng([2, 3], dtype=np.float64), axis=[2])) self.assertRaises( ValueError, lambda: func(rng([2, 3], dtype=np.float64), axis=[-3])) @jtu.sample_product( [dict(inverse=inverse, real=real, dtype=dtype) for inverse in [False, True] for real in [False, True] for dtype in (real_dtypes if real and not inverse else all_dtypes) ], shape=[(16, 8, 4, 8), (16, 8, 4, 8, 4)], axes=[(-2, -1), (0, 1), (1, 3), (-1, 2)], norm=FFT_NORMS, ) def testFft2_(self, inverse, real, shape, dtype, axes, norm): rng = jtu.rand_default(self.rng()) args_maker = lambda: (rng(shape, dtype),) name = 'fft2' if real: name = 'r' + name if inverse: name = 'i' + name jnp_op = getattr(jnp.fft, name) np_op = getattr(np.fft, name) jnp_fn = lambda a: jnp_op(a, axes=axes, norm=norm) np_fn = lambda a: np_op(a, axes=axes, norm=norm) if axes is None or axes else a # Numpy promotes to complex128 aggressively. self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False, tol=1e-4) self._CompileAndCheck(jnp_op, args_maker) @jtu.sample_product( inverse=[False, True], real=[False, True], ) def testFft2Errors(self, inverse, real): rng = jtu.rand_default(self.rng()) name = 'fft2' if real: name = 'r' + name if inverse: name = 'i' + name func = getattr(jnp.fft, name) self.assertRaisesRegex( ValueError, "jax.numpy.fft.{} only supports 2 axes. " "Got axes = \\[0\\].".format(name), lambda: func(rng([2, 3], dtype=np.float64), axes=[0]) ) self.assertRaisesRegex( ValueError, "jax.numpy.fft.{} only supports 2 axes. " "Got axes = \\(0, 1, 2\\).".format(name), lambda: func(rng([2, 3, 3], dtype=np.float64), axes=(0, 1, 2)) ) self.assertRaises( ValueError, lambda: func(rng([2, 3], dtype=np.float64), axes=[2, 3])) self.assertRaises( ValueError, lambda: func(rng([2, 3], dtype=np.float64), axes=[-3, -4])) @jtu.sample_product( dtype=jtu.dtypes.floating + jtu.dtypes.complex, size=[9, 10, 101, 102], d=[0.1, 2.], device=[None, -1], ) def testFftfreq(self, size, d, dtype, device): rng = jtu.rand_default(self.rng()) args_maker = lambda: (rng([size], dtype),) if device is not None: device = jax.devices()[device] jnp_fn = lambda a: jnp.fft.fftfreq(size, d=d, device=device, dtype=dtype) np_fn = lambda a: np.fft.fftfreq(size, d=d).astype(dtype) # Numpy promotes to complex128 aggressively. self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, tol=1e-4) self._CompileAndCheck(jnp_fn, args_maker) # Test gradient for differentiable types. if dtype in inexact_dtypes: tol = 0.15 # TODO(skye): can we be more precise? jtu.check_grads(jnp_fn, args_maker(), order=2, atol=tol, rtol=tol) # Test device if device is not None: out = jnp_fn(args_maker()) self.assertEqual(out.devices(), {device}) @jtu.sample_product(n=[[0, 1, 2]]) def testFftfreqErrors(self, n): name = 'fftfreq' func = jnp.fft.fftfreq self.assertRaisesRegex( ValueError, "The n argument of jax.numpy.fft.{} only takes an int. " "Got n = \\[0, 1, 2\\].".format(name), lambda: func(n=n) ) self.assertRaisesRegex( ValueError, "The d argument of jax.numpy.fft.{} only takes a single value. " "Got d = \\[0, 1, 2\\].".format(name), lambda: func(n=10, d=n) ) @jtu.sample_product( dtype=all_dtypes, size=[9, 10, 101, 102], d=[0.1, 2.], device=[None, -1], ) def testRfftfreq(self, size, d, dtype, device): rng = jtu.rand_default(self.rng()) args_maker = lambda: (rng([size], dtype),) jnp_op = jnp.fft.rfftfreq np_op = np.fft.rfftfreq if device is not None: device = jax.devices()[device] jnp_fn = lambda a: jnp_op(size, d=d, device=device) np_fn = lambda a: np_op(size, d=d) # Numpy promotes to complex128 aggressively. self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False, tol=1e-4) self._CompileAndCheck(jnp_fn, args_maker) # Test gradient for differentiable types. if dtype in inexact_dtypes: tol = 0.15 # TODO(skye): can we be more precise? jtu.check_grads(jnp_fn, args_maker(), order=2, atol=tol, rtol=tol) # Test device if device is not None: out = jnp_fn(args_maker()) self.assertEqual(out.devices(), {device}) @jtu.sample_product(n=[[0, 1, 2]]) def testRfftfreqErrors(self, n): name = 'rfftfreq' func = jnp.fft.rfftfreq self.assertRaisesRegex( ValueError, "The n argument of jax.numpy.fft.{} only takes an int. " "Got n = \\[0, 1, 2\\].".format(name), lambda: func(n=n) ) self.assertRaisesRegex( ValueError, "The d argument of jax.numpy.fft.{} only takes a single value. " "Got d = \\[0, 1, 2\\].".format(name), lambda: func(n=10, d=n) ) @jtu.sample_product( [dict(shape=shape, axes=axes) for shape in [[9], [10], [101], [102], [3, 5], [3, 17], [5, 7, 11]] for axes in _get_fftn_test_axes(shape) ], dtype=all_dtypes, ) def testFftshift(self, shape, dtype, axes): rng = jtu.rand_default(self.rng()) args_maker = lambda: (rng(shape, dtype),) jnp_fn = lambda arg: jnp.fft.fftshift(arg, axes=axes) np_fn = lambda arg: np.fft.fftshift(arg, axes=axes) self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker) @jtu.sample_product( [dict(shape=shape, axes=axes) for shape in [[9], [10], [101], [102], [3, 5], [3, 17], [5, 7, 11]] for axes in _get_fftn_test_axes(shape) ], dtype=all_dtypes, ) def testIfftshift(self, shape, dtype, axes): rng = jtu.rand_default(self.rng()) args_maker = lambda: (rng(shape, dtype),) jnp_fn = lambda arg: jnp.fft.ifftshift(arg, axes=axes) np_fn = lambda arg: np.fft.ifftshift(arg, axes=axes) self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker) @jtu.sample_product( norm=["ortho", "forward"], func_name = ["fft", "ifft"], dtype=jtu.dtypes.integer ) def testFftnormOverflow(self, norm, func_name, dtype): # non-regression test for gh-18453 shape = jnp.array([3] + [900] * 3, dtype=dtype) jax_norm = _fft_norm(shape, func_name, norm) np_norm = np.array(shape).prod(dtype=np.float64) if norm == "ortho": np_norm = np.sqrt(np_norm) if func_name[0] != "i": np_norm = np.reciprocal(np_norm) self.assertArraysAllClose(jax_norm, np_norm, rtol=3e-8, check_dtypes=False) def testFftNormalizationPrecision(self): # reported in https://github.com/jax-ml/jax/issues/23827 if not config.enable_x64.value: raise self.skipTest("requires jax_enable_x64=true") n = 31 a = np.ones((n, 15), dtype="complex128") self.assertArraysAllClose( jnp.fft.ifft(a, n=n, axis=1), np.fft.ifft(a, n=n, axis=1), atol=1e-14) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
FftTest
python
astropy__astropy
astropy/io/votable/validator/result.py
{ "start": 411, "end": 11779 }
class ____: def __init__(self, url, root="results", timeout=10): self.url = url m = hashlib.md5(usedforsecurity=False) m.update(url) self._hash = m.hexdigest() self._root = root self._path = os.path.join(self._hash[0:2], self._hash[2:4], self._hash[4:]) if not os.path.exists(self.get_dirpath()): os.makedirs(self.get_dirpath()) self.timeout = timeout self.load_attributes() def __enter__(self): return self def __exit__(self, *args): self.save_attributes() def get_dirpath(self): return os.path.join(self._root, self._path) def get_htmlpath(self): return self._path def get_attribute_path(self): return os.path.join(self.get_dirpath(), "values.dat") def get_vo_xml_path(self): return os.path.join(self.get_dirpath(), "vo.xml") # ATTRIBUTES def load_attributes(self): path = self.get_attribute_path() if os.path.exists(path): try: with open(path, "rb") as fd: self._attributes = pickle.load(fd) except Exception: shutil.rmtree(self.get_dirpath()) os.makedirs(self.get_dirpath()) self._attributes = {} else: self._attributes = {} def save_attributes(self): path = self.get_attribute_path() with open(path, "wb") as fd: pickle.dump(self._attributes, fd) def __getitem__(self, key): return self._attributes[key] def __setitem__(self, key, val): self._attributes[key] = val def __contains__(self, key): return key in self._attributes # VO XML def download_xml_content(self): path = self.get_vo_xml_path() if "network_error" not in self._attributes: self["network_error"] = None if os.path.exists(path): return def fail(reason): reason = str(reason) with open(path, "wb") as fd: fd.write(f"FAILED: {reason}\n".encode()) self["network_error"] = reason r = None try: r = urllib.request.urlopen(self.url.decode("ascii"), timeout=self.timeout) except urllib.error.URLError as e: if hasattr(e, "reason"): reason = e.reason else: reason = e.code fail(reason) return except http.client.HTTPException as e: fail(f"HTTPException: {e}") return except (TimeoutError, OSError) as e: fail("Timeout") return if r is None: fail("Invalid URL") return try: content = r.read() except TimeoutError as e: fail("Timeout") return else: r.close() with open(path, "wb") as fd: fd.write(content) def get_xml_content(self): path = self.get_vo_xml_path() if not os.path.exists(path): self.download_xml_content() with open(path, "rb") as fd: return fd.read() def validate_vo(self): path = self.get_vo_xml_path() if not os.path.exists(path): self.download_xml_content() self["version"] = "" if "network_error" in self and self["network_error"] is not None: self["nwarnings"] = 0 self["nexceptions"] = 0 self["warnings"] = [] self["xmllint"] = None self["warning_types"] = set() return nexceptions = 0 nwarnings = 0 t = None lines = [] with open(path, "rb") as input: with warnings.catch_warnings(record=True) as warning_lines: try: t = table.parse(input, verify="warn", filename=path) except (ValueError, TypeError, ExpatError) as e: lines.append(str(e)) nexceptions += 1 lines = [str(x.message) for x in warning_lines] + lines if t is not None: self["version"] = version = t.version else: self["version"] = version = "1.0" if "xmllint" not in self: # Now check the VO schema based on the version in # the file. try: success, stdout, stderr = xmlutil.validate_schema(path, version) # OSError is raised when XML file eats all memory and # system sends kill signal. except OSError as e: self["xmllint"] = None self["xmllint_content"] = str(e) else: self["xmllint"] = success == 0 self["xmllint_content"] = stderr warning_types = set() for line in lines: w = exceptions.parse_vowarning(line) if w["is_warning"]: nwarnings += 1 if w["is_exception"]: nexceptions += 1 warning_types.add(w["warning"]) self["nwarnings"] = nwarnings self["nexceptions"] = nexceptions self["warnings"] = lines self["warning_types"] = warning_types def has_warning(self, warning_code): return warning_code in self["warning_types"] def match_expectations(self): if "network_error" not in self: self["network_error"] = None if self["expected"] == "good": return ( not self["network_error"] and self["nwarnings"] == 0 and self["nexceptions"] == 0 ) elif self["expected"] == "incorrect": return not self["network_error"] and ( self["nwarnings"] > 0 or self["nexceptions"] > 0 ) elif self["expected"] == "broken": return self["network_error"] is not None def validate_with_votlint(self, path_to_stilts_jar): filename = self.get_vo_xml_path() p = subprocess.Popen( ["java", "-jar", path_to_stilts_jar, "votlint", "validate=false", filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = p.communicate() if len(stdout) or p.returncode: self["votlint"] = False else: self["votlint"] = True self["votlint_content"] = stdout def get_result_subsets(results, root, s=None): all_results = [] correct = [] not_expected = [] fail_schema = [] schema_mismatch = [] fail_votlint = [] votlint_mismatch = [] network_failures = [] version_10 = [] version_11 = [] version_12 = [] version_13 = [] version_14 = [] version_15 = [] version_unknown = [] has_warnings = [] warning_set = {} has_exceptions = [] exception_set = {} for url in results: if s: next(s) if isinstance(url, Result): x = url else: x = Result(url, root=root) all_results.append(x) if x["nwarnings"] == 0 and x["nexceptions"] == 0 and x["xmllint"] is True: correct.append(x) if not x.match_expectations(): not_expected.append(x) if x["xmllint"] is False: fail_schema.append(x) if x["xmllint"] is False and x["nwarnings"] == 0 and x["nexceptions"] == 0: schema_mismatch.append(x) if "votlint" in x and x["votlint"] is False: fail_votlint.append(x) if "network_error" not in x: x["network_error"] = None if ( x["nwarnings"] == 0 and x["nexceptions"] == 0 and x["network_error"] is None ): votlint_mismatch.append(x) if "network_error" in x and x["network_error"] is not None: network_failures.append(x) version = x["version"] if version == "1.0": version_10.append(x) elif version == "1.1": version_11.append(x) elif version == "1.2": version_12.append(x) elif version == "1.3": version_13.append(x) elif version == "1.4": version_14.append(x) elif version == "1.5": version_15.append(x) else: version_unknown.append(x) if x["nwarnings"] > 0: has_warnings.append(x) for warning in x["warning_types"]: if ( warning is not None and len(warning) == 3 and warning.startswith("W") ): warning_set.setdefault(warning, []) warning_set[warning].append(x) if x["nexceptions"] > 0: has_exceptions.append(x) for exc in x["warning_types"]: if exc is not None and len(exc) == 3 and exc.startswith("E"): exception_set.setdefault(exc, []) exception_set[exc].append(x) warning_set = list(warning_set.items()) warning_set.sort() exception_set = list(exception_set.items()) exception_set.sort() tables = [ ("all", "All tests", all_results), ("correct", "Correct", correct), ("unexpected", "Unexpected", not_expected), ("schema", "Invalid against schema", fail_schema), ( "schema_mismatch", "Invalid against schema/Passed vo.table", schema_mismatch, ["ul"], ), ("fail_votlint", "Failed votlint", fail_votlint), ( "votlint_mismatch", "Failed votlint/Passed vo.table", votlint_mismatch, ["ul"], ), ("network_failures", "Network failures", network_failures), ("version1.0", "Version 1.0", version_10), ("version1.1", "Version 1.1", version_11), ("version1.2", "Version 1.2", version_12), ("version1.3", "Version 1.3", version_13), ("version1.4", "Version 1.4", version_14), ("version1.5", "Version 1.5", version_15), ("version_unknown", "Version unknown", version_unknown), ("warnings", "Warnings", has_warnings), ] for warning_code, warning in warning_set: if s: next(s) warning_class = getattr(exceptions, warning_code, None) if warning_class: warning_descr = warning_class.get_short_name() tables.append( ( warning_code, f"{warning_code}: {warning_descr}", warning, ["ul", "li"], ) ) tables.append(("exceptions", "Exceptions", has_exceptions)) for exception_code, exc in exception_set: if s: next(s) exception_class = getattr(exceptions, exception_code, None) if exception_class: exception_descr = exception_class.get_short_name() tables.append( ( exception_code, f"{exception_code}: {exception_descr}", exc, ["ul", "li"], ) ) return tables
Result
python
plotly__plotly.py
_plotly_utils/png.py
{ "start": 10392, "end": 10571 }
class ____(Error): """ Problem with input file format. In other words, PNG file does not conform to the specification in some way and is invalid. """
FormatError
python
prabhupant__python-ds
data_structures/binary_trees/array_to_binary_tree.py
{ "start": 198, "end": 679 }
class ____: def __init__(self, val): self.val = val self.left = None self.right = None def create_tree(arr): curr_ptr = 0 child_ptr = 0 root = Node(arr[0]) curr_node = root while i < (len(arr) - 1)/2: curr_ptr = arr[i] child_ptr = i + 1 left_child = arr[child_ptr] right_child = arr[child_ptr + 1] curr_node.left = Node(left_child) curr_node.right = Node(right_child)
Node
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mysql/types.py
{ "start": 15305, "end": 16039 }
class ____(sqltypes.TIMESTAMP): """MySQL TIMESTAMP type.""" __visit_name__ = "TIMESTAMP" def __init__(self, timezone: bool = False, fsp: Optional[int] = None): """Construct a MySQL TIMESTAMP type. :param timezone: not used by the MySQL dialect. :param fsp: fractional seconds precision value. MySQL 5.6.4 supports storage of fractional seconds; this parameter will be used when emitting DDL for the TIMESTAMP type. .. note:: DBAPI driver support for fractional seconds may be limited; current support includes MySQL Connector/Python. """ super().__init__(timezone=timezone) self.fsp = fsp
TIMESTAMP
python
walkccc__LeetCode
solutions/2611. Mice and Cheese/2611.py
{ "start": 0, "end": 233 }
class ____: def miceAndCheese( self, reward1: list[int], reward2: list[int], k: int, ) -> int: return (sum(reward2) + sum(heapq.nlargest(k, (a - b for a, b in zip(reward1, reward2)))))
Solution
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/paramSpec1.py
{ "start": 634, "end": 1115 }
class ____(Protocol[P]): def __call__(self, *args: P.args, **kwargs: P.kwargs): ... # This should generate an error because P cannot be used with other # type arguments. def func3(x: SomeWrapper[P, int]): pass # This should generate an error because P cannot be used with other # type arguments. def func4(x: SomeWrapper[[P, int]]): pass def func5(x: SomeWrapper[P]): pass # This form is considered an error. def func6(x: SomeWrapper[[P]]): pass
SomeWrapper
python
readthedocs__readthedocs.org
readthedocs/rtd_tests/tests/test_project_forms.py
{ "start": 21910, "end": 23392 }
class ____(TestCase): def setUp(self): # User with connection # User without connection self.user_github = get(User) self.social_github = get( SocialAccount, user=self.user_github, provider=GitHubProvider.id ) self.user_email = get(User) def test_form_prevalidation_email_user(self): form_auto = ProjectAutomaticForm(user=self.user_email) form_manual = ProjectManualForm(user=self.user_email) # Test validation errors directly self.assertRaises(RichValidationError, form_auto.clean_prevalidation) form_manual.clean_prevalidation() # Test downstream self.assertFalse(form_auto.is_valid()) self.assertEqual(form_auto.errors, {NON_FIELD_ERRORS: mock.ANY}) self.assertTrue(form_manual.is_valid()) self.assertEqual(form_manual.errors, {}) def test_form_prevalidation_github_user(self): form_auto = ProjectAutomaticForm(user=self.user_github) form_manual = ProjectManualForm(user=self.user_github) # Test validation errors directly form_auto.clean_prevalidation() form_manual.clean_prevalidation() # Test downstream self.assertTrue(form_auto.is_valid()) self.assertEqual(form_auto.errors, {}) self.assertTrue(form_manual.is_valid()) self.assertEqual(form_manual.errors, {}) @override_settings(RTD_ALLOW_ORGANIZATIONS=True)
TestProjectPrevalidationForms
python
kamyu104__LeetCode-Solutions
Python/target-sum.py
{ "start": 54, "end": 634 }
class ____(object): def findTargetSumWays(self, nums, S): """ :type nums: List[int] :type S: int :rtype: int """ def subsetSum(nums, S): dp = collections.defaultdict(int) dp[0] = 1 for n in nums: for i in reversed(xrange(n, S+1)): if i-n in dp: dp[i] += dp[i-n] return dp[S] total = sum(nums) if total < S or (S + total) % 2: return 0 P = (S + total) // 2 return subsetSum(nums, P)
Solution
python
sphinx-doc__sphinx
sphinx/domains/python/_object.py
{ "start": 4963, "end": 5019 }
class ____(PyXrefMixin, TypedField): pass
PyTypedField
python
huggingface__transformers
src/transformers/models/timesfm/modular_timesfm.py
{ "start": 3194, "end": 3925 }
class ____(nn.Module): """TimesFM residual block.""" def __init__(self, input_dims, hidden_dims, output_dims): super().__init__() self.input_dims = input_dims self.hidden_dims = hidden_dims self.output_dims = output_dims self.input_layer = nn.Linear(input_dims, hidden_dims) self.activation = nn.SiLU() self.output_layer = nn.Linear(hidden_dims, output_dims) self.residual_layer = nn.Linear(input_dims, output_dims) def forward(self, x): hidden = self.input_layer(x) hidden = self.activation(hidden) output = self.output_layer(hidden) residual = self.residual_layer(x) return output + residual
TimesFmResidualBlock
python
pandas-dev__pandas
pandas/tests/indexes/interval/test_indexing.py
{ "start": 24413, "end": 25293 }
class ____: @pytest.mark.parametrize("tz", ["US/Pacific", None]) def test_putmask_dt64(self, tz): # GH#37968 dti = date_range("2016-01-01", periods=9, tz=tz) idx = IntervalIndex.from_breaks(dti) mask = np.zeros(idx.shape, dtype=bool) mask[0:3] = True result = idx.putmask(mask, idx[-1]) expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:])) tm.assert_index_equal(result, expected) def test_putmask_td64(self): # GH#37968 dti = date_range("2016-01-01", periods=9, unit="ns") tdi = dti - dti[0] idx = IntervalIndex.from_breaks(tdi) mask = np.zeros(idx.shape, dtype=bool) mask[0:3] = True result = idx.putmask(mask, idx[-1]) expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:])) tm.assert_index_equal(result, expected)
TestPutmask
python
pypa__warehouse
warehouse/legacy/api/xmlrpc/cache/interfaces.py
{ "start": 118, "end": 1031 }
class ____(Interface): def create_service(context, request): """ Create the service, given the context and request for which it is being created for. """ def fetch(func, args, kwargs, key, tag, expire): """ Gets cached function return value from the cache or calls func with the supplied args and kwargs, stashing it in the cache. Cache is drawn from by the specified func.__name__ and key. A tag is set on the cached response which can be used for purging. Set expire to override default expiration. """ def purge(tag): """ Issues a purge, clearing all cached objects associated with the tag from the cache. """ def purge_tags(tags): """ Issues a purge, clearing all cached objects associated with each tag in the iterable tags. """
IXMLRPCCache
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/newType1.py
{ "start": 2066, "end": 2240 }
class ____(AbstractBase): def method1(self, /) -> int: return 0 NewDerived = NewType("NewDerived", AbstractBase) new_derived = NewDerived(DerivedBase())
DerivedBase
python
great-expectations__great_expectations
great_expectations/exceptions/exceptions.py
{ "start": 3313, "end": 4022 }
class ____(GreatExpectationsValidationError): def __init__(self, message, validation_error=None, field_name=None) -> None: if validation_error is not None: if ( validation_error and validation_error.messages and isinstance(validation_error.messages, dict) and all(key is None for key in validation_error.messages) ): validation_error.messages = list( itertools.chain.from_iterable(validation_error.messages.values()) ) super().__init__(message=message, validation_error=validation_error) self.field_name = field_name
InvalidBaseYamlConfigError
python
dask__dask
dask/optimization.py
{ "start": 13961, "end": 32408 }
class ____(Enum): token = 0 def __repr__(self) -> str: return "<default>" _default = Default.token def fuse( dsk, keys=None, dependencies=None, ave_width=_default, max_width=_default, max_height=_default, max_depth_new_edges=_default, rename_keys=_default, ): """Fuse tasks that form reductions; more advanced than ``fuse_linear`` This trades parallelism opportunities for faster scheduling by making tasks less granular. It can replace ``fuse_linear`` in optimization passes. This optimization applies to all reductions--tasks that have at most one dependent--so it may be viewed as fusing "multiple input, single output" groups of tasks into a single task. There are many parameters to fine tune the behavior, which are described below. ``ave_width`` is the natural parameter with which to compare parallelism to granularity, so it should always be specified. Reasonable values for other parameters will be determined using ``ave_width`` if necessary. Parameters ---------- dsk: dict dask graph keys: list or set, optional Keys that must remain in the returned dask graph dependencies: dict, optional {key: [list-of-keys]}. Must be a list to provide count of each key This optional input often comes from ``cull`` ave_width: float (default 1) Upper limit for ``width = num_nodes / height``, a good measure of parallelizability. dask.config key: ``optimization.fuse.ave-width`` max_width: int (default infinite) Don't fuse if total width is greater than this. dask.config key: ``optimization.fuse.max-width`` max_height: int or None (default None) Don't fuse more than this many levels. Set to None to dynamically adjust to ``1.5 + ave_width * log(ave_width + 1)``. dask.config key: ``optimization.fuse.max-height`` max_depth_new_edges: int or None (default None) Don't fuse if new dependencies are added after this many levels. Set to None to dynamically adjust to ave_width * 1.5. dask.config key: ``optimization.fuse.max-depth-new-edges`` rename_keys: bool or func, optional (default True) Whether to rename the fused keys with ``default_fused_keys_renamer`` or not. Renaming fused keys can keep the graph more understandable and comprehensive, but it comes at the cost of additional processing. If False, then the top-most key will be used. For advanced usage, a function to create the new name is also accepted. dask.config key: ``optimization.fuse.rename-keys`` Returns ------- dsk output graph with keys fused dependencies dict mapping dependencies after fusion. Useful side effect to accelerate other downstream optimizations. """ # Perform low-level fusion unless the user has # specified False explicitly. if config.get("optimization.fuse.active") is False: return dsk, dependencies if keys is not None and not isinstance(keys, set): if not isinstance(keys, list): keys = [keys] keys = set(flatten(keys)) # Read defaults from dask.yaml and/or user-defined config file if ave_width is _default: ave_width = config.get("optimization.fuse.ave-width") assert ave_width is not _default if max_height is _default: max_height = config.get("optimization.fuse.max-height") assert max_height is not _default if max_depth_new_edges is _default: max_depth_new_edges = config.get("optimization.fuse.max-depth-new-edges") assert max_depth_new_edges is not _default if max_depth_new_edges is None: max_depth_new_edges = ave_width * 1.5 if max_width is _default: max_width = config.get("optimization.fuse.max-width") assert max_width is not _default if max_width is None: max_width = 1.5 + ave_width * math.log(ave_width + 1) if not ave_width or not max_height: return dsk, dependencies if rename_keys is _default: rename_keys = config.get("optimization.fuse.rename-keys") assert rename_keys is not _default if rename_keys is True: key_renamer = default_fused_keys_renamer elif rename_keys is False: key_renamer = None elif not callable(rename_keys): raise TypeError("rename_keys must be a boolean or callable") else: key_renamer = rename_keys rename_keys = key_renamer is not None if dependencies is None: deps = {k: get_dependencies(dsk, k, as_list=True) for k in dsk} else: deps = { k: v if isinstance(v, list) else get_dependencies(dsk, k, as_list=True) for k, v in dependencies.items() } rdeps = {} for k, vals in deps.items(): for v in vals: if v not in rdeps: rdeps[v] = [k] else: rdeps[v].append(k) deps[k] = set(vals) reducible = set() for k, vals in rdeps.items(): if ( len(vals) == 1 and k not in (keys or ()) and k in dsk and not isinstance(dsk[k], GraphNode) and (type(dsk[k]) is tuple or isinstance(dsk[k], (numbers.Number, str))) and not any(isinstance(dsk[v], GraphNode) for v in vals) ): reducible.add(k) if not reducible and (all(len(set(v)) != 1 for v in rdeps.values())): # Quick return if there's nothing to do. Only progress if there's tasks # fusible by the main `fuse` return dsk, deps rv = dsk.copy() fused_trees = {} # These are the stacks we use to store data as we traverse the graph info_stack = [] children_stack = [] # For speed deps_pop = deps.pop reducible_add = reducible.add reducible_pop = reducible.pop reducible_remove = reducible.remove fused_trees_pop = fused_trees.pop info_stack_append = info_stack.append info_stack_pop = info_stack.pop children_stack_append = children_stack.append children_stack_extend = children_stack.extend children_stack_pop = children_stack.pop while reducible: parent = reducible_pop() reducible_add(parent) while parent in reducible: # Go to the top parent = rdeps[parent][0] children_stack_append(parent) children_stack_extend(reducible & deps[parent]) while True: child = children_stack[-1] if child != parent: children = reducible & deps[child] while children: # Depth-first search children_stack_extend(children) parent = child child = children_stack[-1] children = reducible & deps[child] children_stack_pop() # This is a leaf node in the reduction region # key, task, fused_keys, height, width, number of nodes, fudge, set of edges info_stack_append( ( child, rv[child], [child] if rename_keys else None, 1, 1, 1, 0, deps[child] - reducible, ) ) else: children_stack_pop() # Calculate metrics and fuse as appropriate deps_parent = deps[parent] edges = deps_parent - reducible children = deps_parent - edges num_children = len(children) if num_children == 1: ( child_key, child_task, child_keys, height, width, num_nodes, fudge, children_edges, ) = info_stack_pop() num_children_edges = len(children_edges) if fudge > num_children_edges - 1 >= 0: fudge = num_children_edges - 1 edges |= children_edges no_new_edges = len(edges) == num_children_edges if not no_new_edges: fudge += 1 if ( (num_nodes + fudge) / height <= ave_width and # Sanity check; don't go too deep if new levels introduce new edge dependencies (no_new_edges or height < max_depth_new_edges) and ( not isinstance(dsk[parent], GraphNode) # TODO: substitute can be implemented with GraphNode.inline # or isinstance(dsk[child_key], GraphNode) ) ): # Perform substitutions as we go val = subs(dsk[parent], child_key, child_task) deps_parent.remove(child_key) deps_parent |= deps_pop(child_key) del rv[child_key] reducible_remove(child_key) if rename_keys: child_keys.append(parent) fused_trees[parent] = child_keys fused_trees_pop(child_key, None) if children_stack: if no_new_edges: # Linear fuse info_stack_append( ( parent, val, child_keys, height, width, num_nodes, fudge, edges, ) ) else: info_stack_append( ( parent, val, child_keys, height + 1, width, num_nodes + 1, fudge, edges, ) ) else: rv[parent] = val break else: rv[child_key] = child_task reducible_remove(child_key) if children_stack: # Allow the parent to be fused, but only under strict circumstances. # Ensure that linear chains may still be fused. if fudge > int(ave_width - 1): fudge = int(ave_width - 1) # This task *implicitly* depends on `edges` info_stack_append( ( parent, rv[parent], [parent] if rename_keys else None, 1, width, 1, fudge, edges, ) ) else: break else: child_keys = [] height = 1 width = 0 num_single_nodes = 0 num_nodes = 0 fudge = 0 children_edges = set() max_num_edges = 0 children_info = info_stack[-num_children:] del info_stack[-num_children:] for ( _, _, _, cur_height, cur_width, cur_num_nodes, cur_fudge, cur_edges, ) in children_info: if cur_height == 1: num_single_nodes += 1 elif cur_height > height: height = cur_height width += cur_width num_nodes += cur_num_nodes fudge += cur_fudge if len(cur_edges) > max_num_edges: max_num_edges = len(cur_edges) children_edges |= cur_edges # Fudge factor to account for possible parallelism with the boundaries num_children_edges = len(children_edges) fudge += min( num_children - 1, max(0, num_children_edges - max_num_edges) ) if fudge > num_children_edges - 1 >= 0: fudge = num_children_edges - 1 edges |= children_edges no_new_edges = len(edges) == num_children_edges if not no_new_edges: fudge += 1 if ( (num_nodes + fudge) / height <= ave_width and num_single_nodes <= ave_width and width <= max_width and height <= max_height and # Sanity check; don't go too deep if new levels introduce new edge dependencies (no_new_edges or height < max_depth_new_edges) and ( not isinstance(dsk[parent], GraphNode) and not any( isinstance(dsk[child_key], GraphNode) for child_key in children ) # TODO: substitute can be implemented with GraphNode.inline # or all( # isintance(dsk[child], GraphNode) for child in children ) ): # Perform substitutions as we go val = dsk[parent] children_deps = set() for child_info in children_info: cur_child = child_info[0] val = subs(val, cur_child, child_info[1]) del rv[cur_child] children_deps |= deps_pop(cur_child) reducible_remove(cur_child) if rename_keys: fused_trees_pop(cur_child, None) child_keys.extend(child_info[2]) deps_parent -= children deps_parent |= children_deps if rename_keys: child_keys.append(parent) fused_trees[parent] = child_keys if children_stack: info_stack_append( ( parent, val, child_keys, height + 1, width, num_nodes + 1, fudge, edges, ) ) else: rv[parent] = val break else: for child_info in children_info: rv[child_info[0]] = child_info[1] reducible_remove(child_info[0]) if children_stack: # Allow the parent to be fused, but only under strict circumstances. # Ensure that linear chains may still be fused. if width > max_width: width = max_width if fudge > int(ave_width - 1): fudge = int(ave_width - 1) # key, task, height, width, number of nodes, fudge, set of edges # This task *implicitly* depends on `edges` info_stack_append( ( parent, rv[parent], [parent] if rename_keys else None, 1, width, 1, fudge, edges, ) ) else: break # Traverse upwards parent = rdeps[parent][0] if key_renamer: for root_key, fused_keys in fused_trees.items(): alias = key_renamer(fused_keys) if alias is not None and alias not in rv: rv[alias] = rv[root_key] rv[root_key] = alias deps[alias] = deps[root_key] deps[root_key] = {alias} return rv, deps
Default
python
doocs__leetcode
solution/1200-1299/1278.Palindrome Partitioning III/Solution.py
{ "start": 0, "end": 730 }
class ____: def palindromePartition(self, s: str, k: int) -> int: n = len(s) g = [[0] * n for _ in range(n)] for i in range(n - 1, -1, -1): for j in range(i + 1, n): g[i][j] = int(s[i] != s[j]) if i + 1 < j: g[i][j] += g[i + 1][j - 1] f = [[0] * (k + 1) for _ in range(n + 1)] for i in range(1, n + 1): for j in range(1, min(i, k) + 1): if j == 1: f[i][j] = g[0][i - 1] else: f[i][j] = inf for h in range(j - 1, i): f[i][j] = min(f[i][j], f[h][j - 1] + g[h][i - 1]) return f[n][k]
Solution
python
google__jax
jax/_src/util.py
{ "start": 23455, "end": 24014 }
class ____(Generic[T]): elts_set: set[T] elts_list: list[T] def __init__(self): self.elts_set = set() self.elts_list = [] def add(self, elt: T) -> None: if elt not in self.elts_set: self.elts_set.add(elt) self.elts_list.append(elt) def update(self, elts: Seq[T]) -> None: for e in elts: self.add(e) def __iter__(self) -> Iterator[T]: return iter(self.elts_list) def __len__(self) -> int: return len(self.elts_list) def __contains__(self, elt: T) -> bool: return elt in self.elts_set
OrderedSet
python
davidhalter__jedi
jedi/api/classes.py
{ "start": 20916, "end": 25147 }
class ____(BaseName): """ ``Completion`` objects are returned from :meth:`.Script.complete`. They provide additional information about a completion. """ def __init__(self, inference_state, name, stack, like_name_length, is_fuzzy, cached_name=None): super().__init__(inference_state, name) self._like_name_length = like_name_length self._stack = stack self._is_fuzzy = is_fuzzy self._cached_name = cached_name # Completion objects with the same Completion name (which means # duplicate items in the completion) self._same_name_completions = [] def _complete(self, like_name): append = '' if settings.add_bracket_after_function \ and self.type == 'function': append = '(' name = self._name.get_public_name() if like_name: name = name[self._like_name_length:] return name + append @property def complete(self): """ Only works with non-fuzzy completions. Returns None if fuzzy completions are used. Return the rest of the word, e.g. completing ``isinstance``:: isinstan# <-- Cursor is here would return the string 'ce'. It also adds additional stuff, depending on your ``settings.py``. Assuming the following function definition:: def foo(param=0): pass completing ``foo(par`` would give a ``Completion`` which ``complete`` would be ``am=``. """ if self._is_fuzzy: return None return self._complete(True) @property def name_with_symbols(self): """ Similar to :attr:`.name`, but like :attr:`.name` returns also the symbols, for example assuming the following function definition:: def foo(param=0): pass completing ``foo(`` would give a ``Completion`` which ``name_with_symbols`` would be "param=". """ return self._complete(False) def docstring(self, raw=False, fast=True): """ Documented under :meth:`BaseName.docstring`. """ if self._like_name_length >= 3: # In this case we can just resolve the like name, because we # wouldn't load like > 100 Python modules anymore. fast = False return super().docstring(raw=raw, fast=fast) def _get_docstring(self): if self._cached_name is not None: return completion_cache.get_docstring( self._cached_name, self._name.get_public_name(), lambda: self._get_cache() ) return super()._get_docstring() def _get_docstring_signature(self): if self._cached_name is not None: return completion_cache.get_docstring_signature( self._cached_name, self._name.get_public_name(), lambda: self._get_cache() ) return super()._get_docstring_signature() def _get_cache(self): return ( super().type, super()._get_docstring_signature(), super()._get_docstring(), ) @property def type(self): """ Documented under :meth:`BaseName.type`. """ # Purely a speed optimization. if self._cached_name is not None: return completion_cache.get_type( self._cached_name, self._name.get_public_name(), lambda: self._get_cache() ) return super().type def get_completion_prefix_length(self): """ Returns the length of the prefix being completed. For example, completing ``isinstance``:: isinstan# <-- Cursor is here would return 8, because len('isinstan') == 8. Assuming the following function definition:: def foo(param=0): pass completing ``foo(par`` would return 3. """ return self._like_name_length def __repr__(self): return '<%s: %s>' % (type(self).__name__, self._name.get_public_name())
Completion
python
cython__cython
Cython/Compiler/Nodes.py
{ "start": 14644, "end": 15801 }
class ____(Node): # stats a list of StatNode child_attrs = ["stats"] @staticmethod def create_analysed(pos, env, **kw): node = StatListNode(pos, **kw) return node # No node-specific analysis needed def analyse_declarations(self, env): #print "StatListNode.analyse_declarations" ### for stat in self.stats: stat.analyse_declarations(env) def analyse_expressions(self, env): #print "StatListNode.analyse_expressions" ### self.stats = [stat.analyse_expressions(env) for stat in self.stats] return self def generate_function_definitions(self, env, code): #print "StatListNode.generate_function_definitions" ### for stat in self.stats: stat.generate_function_definitions(env, code) def generate_execution_code(self, code): #print "StatListNode.generate_execution_code" ### for stat in self.stats: code.mark_pos(stat.pos) stat.generate_execution_code(code) def annotate(self, code): for stat in self.stats: stat.annotate(code)
StatListNode
python
dagster-io__dagster
python_modules/libraries/dagster-msteams/dagster_msteams_tests/test_hooks.py
{ "start": 460, "end": 5470 }
class ____(Exception): pass def my_message_fn(_): return "Some custom text" @op def pass_op(_): pass @op def fail_op(_): raise SomeUserException() @pytest.mark.parametrize( "webhook_url", [ LEGACY_WEBHOOK_URL, WEBHOOK_URL, ], ) def test_failure_hook_with_pythonic_resource(webhook_url: str, snapshot: Any, mock_post_method): @job(resource_defs={"msteams": MSTeamsResource(hook_url=webhook_url)}) def job_def(): pass_op.with_hooks(hook_defs={teams_on_failure()})() pass_op.alias("fail_op_with_hook").with_hooks(hook_defs={teams_on_failure()})() fail_op.alias("fail_op_without_hook")() fail_op.with_hooks( hook_defs={ teams_on_failure(message_fn=my_message_fn, webserver_base_url="localhost:3000") } )() result = job_def.execute_in_process( raise_on_error=False, run_id=TEST_RUN_ID, # Ensure that run id is consistent for snapshot testing ) assert not result.success assert mock_post_method.call_count == 1 snapshot.assert_match(mock_post_method.call_args_list) @pytest.mark.parametrize( "webhook_url", [ LEGACY_WEBHOOK_URL, WEBHOOK_URL, ], ) def test_success_hook_with_pythonic_resource(webhook_url: str, snapshot: Any, mock_post_method): @job(resource_defs={"msteams": MSTeamsResource(hook_url=webhook_url)}) def job_def(): pass_op.with_hooks(hook_defs={teams_on_success()})() pass_op.alias("success_solid_with_hook").with_hooks(hook_defs={teams_on_success()})() fail_op.alias("success_solid_without_hook")() fail_op.with_hooks( hook_defs={ teams_on_success(message_fn=my_message_fn, webserver_base_url="localhost:3000") } )() result = job_def.execute_in_process( raise_on_error=False, run_id=TEST_RUN_ID, # Ensure that run id is consistent for snapshot testing ) assert not result.success assert mock_post_method.call_count == 2 snapshot.assert_match(mock_post_method.call_args_list) @patch("dagster_msteams.client.TeamsClient.post_message") def test_failure_hook_on_op_instance(mock_teams_post_message): @job(resource_defs={"msteams": msteams_resource}) def job_def(): pass_op.with_hooks(hook_defs={teams_on_failure()})() pass_op.alias("fail_op_with_hook").with_hooks(hook_defs={teams_on_failure()})() fail_op.alias("fail_op_without_hook")() fail_op.with_hooks( hook_defs={ teams_on_failure(message_fn=my_message_fn, webserver_base_url="localhost:3000") } )() result = job_def.execute_in_process( run_config={"resources": {"msteams": {"config": {"hook_url": "https://some_url_here/"}}}}, raise_on_error=False, ) assert not result.success assert mock_teams_post_message.call_count == 1 @patch("dagster_msteams.client.TeamsClient.post_message") def test_success_hook_on_op_instance(mock_teams_post_message): @job(resource_defs={"msteams": msteams_resource}) def job_def(): pass_op.with_hooks(hook_defs={teams_on_success()})() pass_op.alias("success_solid_with_hook").with_hooks(hook_defs={teams_on_success()})() fail_op.alias("success_solid_without_hook")() fail_op.with_hooks( hook_defs={ teams_on_success(message_fn=my_message_fn, webserver_base_url="localhost:3000") } )() result = job_def.execute_in_process( run_config={"resources": {"msteams": {"config": {"hook_url": "https://some_url_here/"}}}}, raise_on_error=False, ) assert not result.success assert mock_teams_post_message.call_count == 2 @patch("dagster_msteams.client.TeamsClient.post_message") def test_failure_hook_decorator(mock_teams_post_message): @teams_on_failure(webserver_base_url="http://localhost:3000/") @job(resource_defs={"msteams": msteams_resource}) def job_def(): pass_op() fail_op() fail_op.alias("another_fail_op")() result = job_def.execute_in_process( run_config={"resources": {"msteams": {"config": {"hook_url": "https://some_url_here/"}}}}, raise_on_error=False, ) assert not result.success assert mock_teams_post_message.call_count == 2 @patch("dagster_msteams.client.TeamsClient.post_message") def test_success_hook_decorator(mock_teams_post_message): @teams_on_success(message_fn=my_message_fn, webserver_base_url="http://localhost:3000/") @job(resource_defs={"msteams": msteams_resource}) def job_def(): pass_op() pass_op.alias("another_pass_op")() fail_op() result = job_def.execute_in_process( run_config={"resources": {"msteams": {"config": {"hook_url": "https://some_url_here/"}}}}, raise_on_error=False, ) assert not result.success assert mock_teams_post_message.call_count == 2
SomeUserException
python
tiangolo__fastapi
tests/test_additional_responses_router.py
{ "start": 114, "end": 5229 }
class ____(BaseModel): message: str app = FastAPI() router = APIRouter() @router.get("/a", responses={501: {"description": "Error 1"}}) async def a(): return "a" @router.get( "/b", responses={ 502: {"description": "Error 2"}, "4XX": {"description": "Error with range, upper"}, }, ) async def b(): return "b" @router.get( "/c", responses={ "400": {"description": "Error with str"}, "5xx": {"description": "Error with range, lower"}, "default": {"description": "A default response"}, }, ) async def c(): return "c" @router.get( "/d", responses={ "400": {"description": "Error with str"}, "5XX": {"model": ResponseModel}, "default": {"model": ResponseModel}, }, ) async def d(): return "d" app.include_router(router) client = TestClient(app) def test_a(): response = client.get("/a") assert response.status_code == 200, response.text assert response.json() == "a" def test_b(): response = client.get("/b") assert response.status_code == 200, response.text assert response.json() == "b" def test_c(): response = client.get("/c") assert response.status_code == 200, response.text assert response.json() == "c" def test_d(): response = client.get("/d") assert response.status_code == 200, response.text assert response.json() == "d" def test_openapi_schema(): response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/a": { "get": { "responses": { "501": {"description": "Error 1"}, "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, }, }, "summary": "A", "operationId": "a_a_get", } }, "/b": { "get": { "responses": { "502": {"description": "Error 2"}, "4XX": {"description": "Error with range, upper"}, "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, }, }, "summary": "B", "operationId": "b_b_get", } }, "/c": { "get": { "responses": { "400": {"description": "Error with str"}, "5XX": {"description": "Error with range, lower"}, "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, }, "default": {"description": "A default response"}, }, "summary": "C", "operationId": "c_c_get", } }, "/d": { "get": { "responses": { "400": {"description": "Error with str"}, "5XX": { "description": "Server Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ResponseModel" } } }, }, "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, }, "default": { "description": "Default Response", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ResponseModel" } } }, }, }, "summary": "D", "operationId": "d_d_get", } }, }, "components": { "schemas": { "ResponseModel": { "title": "ResponseModel", "required": ["message"], "type": "object", "properties": {"message": {"title": "Message", "type": "string"}}, } } }, }
ResponseModel
python
doocs__leetcode
solution/1200-1299/1250.Check If It Is a Good Array/Solution.py
{ "start": 0, "end": 106 }
class ____: def isGoodArray(self, nums: List[int]) -> bool: return reduce(gcd, nums) == 1
Solution
python
huggingface__transformers
src/transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py
{ "start": 22832, "end": 23113 }
class ____(GenericForSequenceClassification, HunYuanDenseV1PreTrainedModel): pass __all__ = [ "HunYuanDenseV1ForCausalLM", "HunYuanDenseV1Model", "HunYuanDenseV1PreTrainedModel", "HunYuanDenseV1ForSequenceClassification", ]
HunYuanDenseV1ForSequenceClassification
python
pytest-dev__pytest
testing/test_config.py
{ "start": 102462, "end": 103860 }
class ____: """Tests for the deprecation of config.inicfg.""" def test_inicfg_deprecated(self, pytester: Pytester) -> None: """Test that accessing config.inicfg issues a deprecation warning.""" pytester.makeini( """ [pytest] minversion = 3.0 """ ) config = pytester.parseconfig() with pytest.warns( PytestDeprecationWarning, match=r"config\.inicfg is deprecated" ): inicfg = config.inicfg # type: ignore[deprecated] assert config.getini("minversion") == "3.0" assert inicfg["minversion"] == "3.0" assert inicfg.get("minversion") == "3.0" del inicfg["minversion"] inicfg["minversion"] = "4.0" assert list(inicfg.keys()) == ["minversion"] assert list(inicfg.items()) == [("minversion", "4.0")] assert len(inicfg) == 1 def test_issue_13946_setting_bool_no_longer_crashes( self, pytester: Pytester ) -> None: """Regression test for #13946 - setting inicfg doesn't cause a crash.""" pytester.makepyfile( """ def pytest_configure(config): config.inicfg["xfail_strict"] = True def test(): pass """ ) result = pytester.runpytest() assert result.ret == 0
TestInicfgDeprecation
python
pytorch__pytorch
test/distributed/test_functional_api.py
{ "start": 15215, "end": 15655 }
class ____(MultiThreadedTestCase): @property def world_size(self): return 2 def setUp(self): super().setUp() self._spawn_threads() def test_all_reduce(self): x = torch.rand([4], requires_grad=True) y = torch.rand([4], requires_grad=True) out = ft_c.all_reduce(x, "sum", dist.group.WORLD) (out + y).sum().backward() self.assertIsNone(x.grad)
TestGradCollectives
python
tensorflow__tensorflow
tensorflow/python/training/coordinator_test.py
{ "start": 1872, "end": 10062 }
class ____(test.TestCase): def testStopAPI(self): coord = coordinator.Coordinator() self.assertFalse(coord.should_stop()) self.assertFalse(coord.wait_for_stop(0.01)) coord.request_stop() self.assertTrue(coord.should_stop()) self.assertTrue(coord.wait_for_stop(0.01)) def testStopAsync(self): coord = coordinator.Coordinator() self.assertFalse(coord.should_stop()) self.assertFalse(coord.wait_for_stop(0.1)) wait_for_stop_ev = threading.Event() has_stopped_ev = threading.Event() t = threading.Thread( target=StopOnEvent, args=(coord, wait_for_stop_ev, has_stopped_ev)) t.start() self.assertFalse(coord.should_stop()) self.assertFalse(coord.wait_for_stop(0.01)) wait_for_stop_ev.set() has_stopped_ev.wait() self.assertTrue(coord.wait_for_stop(0.05)) self.assertTrue(coord.should_stop()) def testJoin(self): coord = coordinator.Coordinator() threads = [ threading.Thread(target=SleepABit, args=(0.01,)), threading.Thread(target=SleepABit, args=(0.02,)), threading.Thread(target=SleepABit, args=(0.01,)) ] for t in threads: t.start() coord.join(threads) for t in threads: self.assertFalse(t.is_alive()) def testJoinAllRegistered(self): coord = coordinator.Coordinator() threads = [ threading.Thread(target=SleepABit, args=(0.01, coord)), threading.Thread(target=SleepABit, args=(0.02, coord)), threading.Thread(target=SleepABit, args=(0.01, coord)) ] for t in threads: t.start() WaitForThreadsToRegister(coord, 3) coord.join() for t in threads: self.assertFalse(t.is_alive()) def testJoinSomeRegistered(self): coord = coordinator.Coordinator() threads = [ threading.Thread(target=SleepABit, args=(0.01, coord)), threading.Thread(target=SleepABit, args=(0.02,)), threading.Thread(target=SleepABit, args=(0.01, coord)) ] for t in threads: t.start() WaitForThreadsToRegister(coord, 2) # threads[1] is not registered we must pass it in. coord.join([threads[1]]) for t in threads: self.assertFalse(t.is_alive()) def testJoinGraceExpires(self): def TestWithGracePeriod(stop_grace_period): coord = coordinator.Coordinator() wait_for_stop_ev = threading.Event() has_stopped_ev = threading.Event() threads = [ threading.Thread( target=StopOnEvent, args=(coord, wait_for_stop_ev, has_stopped_ev)), threading.Thread(target=SleepABit, args=(10.0,)) ] for t in threads: t.daemon = True t.start() wait_for_stop_ev.set() has_stopped_ev.wait() with self.assertRaisesRegex(RuntimeError, "threads still running"): coord.join(threads, stop_grace_period_secs=stop_grace_period) TestWithGracePeriod(1e-10) TestWithGracePeriod(0.002) TestWithGracePeriod(1.0) def testJoinWithoutGraceExpires(self): coord = coordinator.Coordinator() wait_for_stop_ev = threading.Event() has_stopped_ev = threading.Event() threads = [ threading.Thread( target=StopOnEvent, args=(coord, wait_for_stop_ev, has_stopped_ev)), threading.Thread(target=SleepABit, args=(10.0,)) ] for t in threads: t.daemon = True t.start() wait_for_stop_ev.set() has_stopped_ev.wait() coord.join(threads, stop_grace_period_secs=1., ignore_live_threads=True) def testJoinRaiseReportExcInfo(self): coord = coordinator.Coordinator() ev_1 = threading.Event() ev_2 = threading.Event() threads = [ threading.Thread( target=RaiseOnEvent, args=(coord, ev_1, ev_2, RuntimeError("First"), False)), threading.Thread( target=RaiseOnEvent, args=(coord, ev_2, None, RuntimeError("Too late"), False)) ] for t in threads: t.start() ev_1.set() with self.assertRaisesRegex(RuntimeError, "First"): coord.join(threads) def testJoinRaiseReportException(self): coord = coordinator.Coordinator() ev_1 = threading.Event() ev_2 = threading.Event() threads = [ threading.Thread( target=RaiseOnEvent, args=(coord, ev_1, ev_2, RuntimeError("First"), True)), threading.Thread( target=RaiseOnEvent, args=(coord, ev_2, None, RuntimeError("Too late"), True)) ] for t in threads: t.start() ev_1.set() with self.assertRaisesRegex(RuntimeError, "First"): coord.join(threads) def testJoinIgnoresOutOfRange(self): coord = coordinator.Coordinator() ev_1 = threading.Event() threads = [ threading.Thread( target=RaiseOnEvent, args=(coord, ev_1, None, errors_impl.OutOfRangeError(None, None, "First"), True)) ] for t in threads: t.start() ev_1.set() coord.join(threads) def testJoinIgnoresMyExceptionType(self): coord = coordinator.Coordinator(clean_stop_exception_types=(ValueError,)) ev_1 = threading.Event() threads = [ threading.Thread( target=RaiseOnEvent, args=(coord, ev_1, None, ValueError("Clean stop"), True)) ] for t in threads: t.start() ev_1.set() coord.join(threads) def testJoinRaiseReportExceptionUsingHandler(self): coord = coordinator.Coordinator() ev_1 = threading.Event() ev_2 = threading.Event() threads = [ threading.Thread( target=RaiseOnEventUsingContextHandler, args=(coord, ev_1, ev_2, RuntimeError("First"))), threading.Thread( target=RaiseOnEventUsingContextHandler, args=(coord, ev_2, None, RuntimeError("Too late"))) ] for t in threads: t.start() ev_1.set() with self.assertRaisesRegex(RuntimeError, "First"): coord.join(threads) def testClearStopClearsExceptionToo(self): coord = coordinator.Coordinator() ev_1 = threading.Event() threads = [ threading.Thread( target=RaiseOnEvent, args=(coord, ev_1, None, RuntimeError("First"), True)), ] for t in threads: t.start() with self.assertRaisesRegex(RuntimeError, "First"): ev_1.set() coord.join(threads) coord.clear_stop() threads = [ threading.Thread( target=RaiseOnEvent, args=(coord, ev_1, None, RuntimeError("Second"), True)), ] for t in threads: t.start() with self.assertRaisesRegex(RuntimeError, "Second"): ev_1.set() coord.join(threads) def testRequestStopRaisesIfJoined(self): coord = coordinator.Coordinator() # Join the coordinator right away. coord.join([]) reported = False with self.assertRaisesRegex(RuntimeError, "Too late"): try: raise RuntimeError("Too late") except RuntimeError as e: reported = True coord.request_stop(e) self.assertTrue(reported) # If we clear_stop the exceptions are handled normally. coord.clear_stop() try: raise RuntimeError("After clear") except RuntimeError as e: coord.request_stop(e) with self.assertRaisesRegex(RuntimeError, "After clear"): coord.join([]) def testRequestStopRaisesIfJoined_ExcInfo(self): # Same as testRequestStopRaisesIfJoined but using syc.exc_info(). coord = coordinator.Coordinator() # Join the coordinator right away. coord.join([]) reported = False with self.assertRaisesRegex(RuntimeError, "Too late"): try: raise RuntimeError("Too late") except RuntimeError: reported = True coord.request_stop(sys.exc_info()) self.assertTrue(reported) # If we clear_stop the exceptions are handled normally. coord.clear_stop() try: raise RuntimeError("After clear") except RuntimeError: coord.request_stop(sys.exc_info()) with self.assertRaisesRegex(RuntimeError, "After clear"): coord.join([]) def _StopAt0(coord, n): if n[0] == 0: coord.request_stop() else: n[0] -= 1
CoordinatorTest
python
ray-project__ray
rllib/connectors/agent/obs_preproc.py
{ "start": 429, "end": 2520 }
class ____(AgentConnector): """A connector that wraps around existing RLlib observation preprocessors. This includes: - OneHotPreprocessor for Discrete and Multi-Discrete spaces. - GenericPixelPreprocessor and AtariRamPreprocessor for Atari spaces. - TupleFlatteningPreprocessor and DictFlatteningPreprocessor for flattening arbitrary nested input observations. - RepeatedValuesPreprocessor for padding observations from RLlib Repeated observation space. """ def __init__(self, ctx: ConnectorContext): super().__init__(ctx) if hasattr(ctx.observation_space, "original_space"): # ctx.observation_space is the space this Policy deals with. # We need to preprocess data from the original observation space here. obs_space = ctx.observation_space.original_space else: obs_space = ctx.observation_space self._preprocessor = get_preprocessor(obs_space)( obs_space, ctx.config.get("model", {}) ) def is_identity(self): """Returns whether this preprocessor connector is a no-op preprocessor.""" return isinstance(self._preprocessor, NoPreprocessor) def transform(self, ac_data: AgentConnectorDataType) -> AgentConnectorDataType: d = ac_data.data assert type(d) is dict, ( "Single agent data must be of type Dict[str, TensorStructType] but is of " "type {}".format(type(d)) ) if SampleBatch.OBS in d: d[SampleBatch.OBS] = self._preprocessor.transform(d[SampleBatch.OBS]) if SampleBatch.NEXT_OBS in d: d[SampleBatch.NEXT_OBS] = self._preprocessor.transform( d[SampleBatch.NEXT_OBS] ) return ac_data def to_state(self): return ObsPreprocessorConnector.__name__, None @staticmethod def from_state(ctx: ConnectorContext, params: Any): return ObsPreprocessorConnector(ctx) register_connector(ObsPreprocessorConnector.__name__, ObsPreprocessorConnector)
ObsPreprocessorConnector
python
keras-team__keras
keras/src/metrics/confusion_metrics.py
{ "start": 9603, "end": 15874 }
class ____(Metric): """Computes the precision of the predictions with respect to the labels. The metric creates two local variables, `true_positives` and `false_positives` that are used to compute the precision. This value is ultimately returned as `precision`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_positives`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. If `top_k` is set, we'll calculate precision as how often on average a class among the top-k classes with the highest predicted values of a batch entry is correct and can be found in the label for that entry. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is above the threshold and/or in the top-k highest predictions, and computing the fraction of them for which `class_id` is indeed a correct label. Args: thresholds: (Optional) A float value, or a Python list/tuple of float threshold values in `[0, 1]`. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `True`, below is `False`). If used with a loss function that sets `from_logits=True` (i.e. no sigmoid applied to predictions), `thresholds` should be set to 0. One metric value is generated for each threshold value. If neither `thresholds` nor `top_k` are set, the default is to calculate precision with `thresholds=0.5`. top_k: (Optional) Unset by default. An int value specifying the top-k predictions to consider when calculating precision. class_id: (Optional) Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Example: >>> m = keras.metrics.Precision() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.result() 0.6666667 >>> m.reset_state() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0]) >>> m.result() 1.0 >>> # With top_k=2, it will calculate precision over y_true[:2] >>> # and y_pred[:2] >>> m = keras.metrics.Precision(top_k=2) >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1]) >>> m.result() 0.0 >>> # With top_k=4, it will calculate precision over y_true[:4] >>> # and y_pred[:4] >>> m = keras.metrics.Precision(top_k=4) >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1]) >>> m.result() 0.5 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=[keras.metrics.Precision()]) ``` Usage with a loss with `from_logits=True`: ```python model.compile(optimizer='adam', loss=keras.losses.BinaryCrossentropy(from_logits=True), metrics=[keras.metrics.Precision(thresholds=0)]) ``` """ def __init__( self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None ): super().__init__(name=name, dtype=dtype) # Metric should be maximized during optimization. self._direction = "up" self.init_thresholds = thresholds self.top_k = top_k self.class_id = class_id default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=default_threshold ) self._thresholds_distributed_evenly = ( metrics_utils.is_evenly_distributed_thresholds(self.thresholds) ) self.true_positives = self.add_variable( shape=(len(self.thresholds),), initializer=initializers.Zeros(), name="true_positives", ) self.false_positives = self.add_variable( shape=(len(self.thresholds),), initializer=initializers.Zeros(), name="false_positives", ) def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates true positive and false positive statistics. Args: y_true: The ground truth values, with the same dimensions as `y_pred`. Will be cast to `bool`. y_pred: The predicted values. Each element must be in the range `[0, 1]`. sample_weight: Optional weighting of each example. Defaults to `1`. Can be a tensor whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. """ metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501 metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501 }, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight, ) def result(self): result = ops.divide_no_nan( self.true_positives, ops.add(self.true_positives, self.false_positives), ) return result[0] if len(self.thresholds) == 1 else result def reset_state(self): num_thresholds = len(to_list(self.thresholds)) self.true_positives.assign(ops.zeros((num_thresholds,))) self.false_positives.assign(ops.zeros((num_thresholds,))) def get_config(self): config = { "thresholds": self.init_thresholds, "top_k": self.top_k, "class_id": self.class_id, } base_config = super().get_config() return {**base_config, **config} @keras_export("keras.metrics.Recall")
Precision
python
EpistasisLab__tpot
tpot/search_spaces/tuple_index.py
{ "start": 1504, "end": 2414 }
class ____(): """ TPOT uses tuples to create a unique id for some pipeline search spaces. However, tuples sometimes don't interact correctly with pandas indexes. This class is a wrapper around a tuple that allows it to be used as a key in a dictionary, without it being an itereable. An alternative could be to make unique id return a string, but this would not work with graphpipelines, which require a special object. This class allows linear pipelines to contain graph pipelines while still being able to be used as a key in a dictionary. """ def __init__(self, tup): self.tup = tup def __eq__(self,other) -> bool: return self.tup == other def __hash__(self) -> int: return self.tup.__hash__() def __str__(self) -> str: return self.tup.__str__() def __repr__(self) -> str: return self.tup.__repr__()
TupleIndex
python
cython__cython
Cython/Compiler/Tests/TestGrammar.py
{ "start": 2233, "end": 5086 }
class ____(CythonTest): def test_invalid_number_literals(self): for literal in INVALID_UNDERSCORE_LITERALS: for expression in ['%s', '1 + %s', '%s + 1', '2 * %s', '%s * 2']: code = 'x = ' + expression % literal try: self.fragment('''\ # cython: language_level=3 ''' + code) except CompileError as exc: assert code in [s.strip() for s in str(exc).splitlines()], str(exc) else: assert False, "Invalid Cython code '%s' failed to raise an exception" % code def test_valid_number_literals(self): for literal in VALID_UNDERSCORE_LITERALS: for i, expression in enumerate(['%s', '1 + %s', '%s + 1', '2 * %s', '%s * 2']): code = 'x = ' + expression % literal node = self.fragment('''\ # cython: language_level=3 ''' + code).root assert node is not None literal_node = node.stats[0].rhs # StatListNode([SingleAssignmentNode('x', expr)]) if i > 0: # Add/MulNode() -> literal is first or second operand literal_node = literal_node.operand2 if i % 2 else literal_node.operand1 if 'j' in literal or 'J' in literal: if '+' in literal: # FIXME: tighten this test assert isinstance(literal_node, ExprNodes.AddNode), (literal, literal_node) else: assert isinstance(literal_node, ExprNodes.ImagNode), (literal, literal_node) elif '.' in literal or 'e' in literal or 'E' in literal and not ('0x' in literal or '0X' in literal): assert isinstance(literal_node, ExprNodes.FloatNode), (literal, literal_node) else: assert isinstance(literal_node, ExprNodes.IntNode), (literal, literal_node) def test_invalid_ellipsis(self): ERR = ":{0}:{1}: Expected an identifier or literal" for code, line, col in INVALID_ELLIPSIS: try: ast.parse(textwrap.dedent(code)) except SyntaxError as exc: assert True else: assert False, "Invalid Python code '%s' failed to raise an exception" % code try: self.fragment('''\ # cython: language_level=3 ''' + code) except CompileError as exc: assert ERR.format(line, col) in str(exc), str(exc) else: assert False, "Invalid Cython code '%s' failed to raise an exception" % code if __name__ == "__main__": import unittest unittest.main()
TestGrammar
python
python-openxml__python-docx
src/docx/text/parfmt.py
{ "start": 198, "end": 10339 }
class ____(ElementProxy): """Provides access to paragraph formatting such as justification, indentation, line spacing, space before and after, and widow/orphan control.""" @property def alignment(self): """A member of the :ref:`WdParagraphAlignment` enumeration specifying the justification setting for this paragraph. A value of |None| indicates paragraph alignment is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.jc_val @alignment.setter def alignment(self, value): pPr = self._element.get_or_add_pPr() pPr.jc_val = value @property def first_line_indent(self): """|Length| value specifying the relative difference in indentation for the first line of the paragraph. A positive value causes the first line to be indented. A negative value produces a hanging indent. |None| indicates first line indentation is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.first_line_indent @first_line_indent.setter def first_line_indent(self, value): pPr = self._element.get_or_add_pPr() pPr.first_line_indent = value @property def keep_together(self): """|True| if the paragraph should be kept "in one piece" and not broken across a page boundary when the document is rendered. |None| indicates its effective value is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.keepLines_val @keep_together.setter def keep_together(self, value): self._element.get_or_add_pPr().keepLines_val = value @property def keep_with_next(self): """|True| if the paragraph should be kept on the same page as the subsequent paragraph when the document is rendered. For example, this property could be used to keep a section heading on the same page as its first paragraph. |None| indicates its effective value is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.keepNext_val @keep_with_next.setter def keep_with_next(self, value): self._element.get_or_add_pPr().keepNext_val = value @property def left_indent(self): """|Length| value specifying the space between the left margin and the left side of the paragraph. |None| indicates the left indent value is inherited from the style hierarchy. Use an |Inches| value object as a convenient way to apply indentation in units of inches. """ pPr = self._element.pPr if pPr is None: return None return pPr.ind_left @left_indent.setter def left_indent(self, value): pPr = self._element.get_or_add_pPr() pPr.ind_left = value @property def line_spacing(self): """|float| or |Length| value specifying the space between baselines in successive lines of the paragraph. A value of |None| indicates line spacing is inherited from the style hierarchy. A float value, e.g. ``2.0`` or ``1.75``, indicates spacing is applied in multiples of line heights. A |Length| value such as ``Pt(12)`` indicates spacing is a fixed height. The |Pt| value class is a convenient way to apply line spacing in units of points. Assigning |None| resets line spacing to inherit from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return self._line_spacing(pPr.spacing_line, pPr.spacing_lineRule) @line_spacing.setter def line_spacing(self, value): pPr = self._element.get_or_add_pPr() if value is None: pPr.spacing_line = None pPr.spacing_lineRule = None elif isinstance(value, Length): pPr.spacing_line = value if pPr.spacing_lineRule != WD_LINE_SPACING.AT_LEAST: pPr.spacing_lineRule = WD_LINE_SPACING.EXACTLY else: pPr.spacing_line = Emu(value * Twips(240)) pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE @property def line_spacing_rule(self): """A member of the :ref:`WdLineSpacing` enumeration indicating how the value of :attr:`line_spacing` should be interpreted. Assigning any of the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or :attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing` to be updated to produce the corresponding line spacing. """ pPr = self._element.pPr if pPr is None: return None return self._line_spacing_rule(pPr.spacing_line, pPr.spacing_lineRule) @line_spacing_rule.setter def line_spacing_rule(self, value): pPr = self._element.get_or_add_pPr() if value == WD_LINE_SPACING.SINGLE: pPr.spacing_line = Twips(240) pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE elif value == WD_LINE_SPACING.ONE_POINT_FIVE: pPr.spacing_line = Twips(360) pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE elif value == WD_LINE_SPACING.DOUBLE: pPr.spacing_line = Twips(480) pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE else: pPr.spacing_lineRule = value @property def page_break_before(self): """|True| if the paragraph should appear at the top of the page following the prior paragraph. |None| indicates its effective value is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.pageBreakBefore_val @page_break_before.setter def page_break_before(self, value): self._element.get_or_add_pPr().pageBreakBefore_val = value @property def right_indent(self): """|Length| value specifying the space between the right margin and the right side of the paragraph. |None| indicates the right indent value is inherited from the style hierarchy. Use a |Cm| value object as a convenient way to apply indentation in units of centimeters. """ pPr = self._element.pPr if pPr is None: return None return pPr.ind_right @right_indent.setter def right_indent(self, value): pPr = self._element.get_or_add_pPr() pPr.ind_right = value @property def space_after(self): """|Length| value specifying the spacing to appear between this paragraph and the subsequent paragraph. |None| indicates this value is inherited from the style hierarchy. |Length| objects provide convenience properties, such as :attr:`~.Length.pt` and :attr:`~.Length.inches`, that allow easy conversion to various length units. """ pPr = self._element.pPr if pPr is None: return None return pPr.spacing_after @space_after.setter def space_after(self, value): self._element.get_or_add_pPr().spacing_after = value @property def space_before(self): """|Length| value specifying the spacing to appear between this paragraph and the prior paragraph. |None| indicates this value is inherited from the style hierarchy. |Length| objects provide convenience properties, such as :attr:`~.Length.pt` and :attr:`~.Length.cm`, that allow easy conversion to various length units. """ pPr = self._element.pPr if pPr is None: return None return pPr.spacing_before @space_before.setter def space_before(self, value): self._element.get_or_add_pPr().spacing_before = value @lazyproperty def tab_stops(self): """|TabStops| object providing access to the tab stops defined for this paragraph format.""" pPr = self._element.get_or_add_pPr() return TabStops(pPr) @property def widow_control(self): """|True| if the first and last lines in the paragraph remain on the same page as the rest of the paragraph when Word repaginates the document. |None| indicates its effective value is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.widowControl_val @widow_control.setter def widow_control(self, value): self._element.get_or_add_pPr().widowControl_val = value @staticmethod def _line_spacing(spacing_line, spacing_lineRule): """Return the line spacing value calculated from the combination of `spacing_line` and `spacing_lineRule`. Returns a |float| number of lines when `spacing_lineRule` is ``WD_LINE_SPACING.MULTIPLE``, otherwise a |Length| object of absolute line height is returned. Returns |None| when `spacing_line` is |None|. """ if spacing_line is None: return None if spacing_lineRule == WD_LINE_SPACING.MULTIPLE: return spacing_line / Pt(12) return spacing_line @staticmethod def _line_spacing_rule(line, lineRule): """Return the line spacing rule value calculated from the combination of `line` and `lineRule`. Returns special members of the :ref:`WdLineSpacing` enumeration when line spacing is single, double, or 1.5 lines. """ if lineRule == WD_LINE_SPACING.MULTIPLE: if line == Twips(240): return WD_LINE_SPACING.SINGLE if line == Twips(360): return WD_LINE_SPACING.ONE_POINT_FIVE if line == Twips(480): return WD_LINE_SPACING.DOUBLE return lineRule
ParagraphFormat
python
streamlit__streamlit
lib/streamlit/testing/v1/element_tree.py
{ "start": 15603, "end": 17023 }
class ____(Widget): """A representation of ``st.date_input``.""" _value: DateValue | None | InitialValue proto: DateInputProto = field(repr=False) label: str min: date max: date is_range: bool help: str form_id: str def __init__(self, proto: DateInputProto, root: ElementTree) -> None: super().__init__(proto, root) self._value = InitialValue() self.type = "date_input" self.min = datetime.strptime(proto.min, "%Y/%m/%d").date() self.max = datetime.strptime(proto.max, "%Y/%m/%d").date() def set_value(self, v: DateValue) -> DateInput: """Set the value of the widget.""" self._value = v return self @property def _widget_state(self) -> WidgetState: ws = WidgetState() ws.id = self.id serde = DateInputSerde(None) # type: ignore ws.string_array_value.data[:] = serde.serialize(self.value) return ws @property def value(self) -> DateWidgetReturn: """The value of the widget. (date or Tuple of date)""" # noqa: D400 if not isinstance(self._value, InitialValue): parsed, _ = _parse_date_value(self._value) return tuple(parsed) if parsed is not None else None # type: ignore state = self.root.session_state assert state return state[self.id] # type: ignore @dataclass(repr=False)
DateInput
python
huggingface__transformers
tests/models/llava_next/test_image_processing_llava_next.py
{ "start": 3704, "end": 13212 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = LlavaNextImageProcessor if is_vision_available() else None fast_image_processing_class = LlavaNextImageProcessorFast if is_torchvision_available() else None # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.setUp with CLIP->LlavaNext def setUp(self): super().setUp() self.image_processor_tester = LlavaNextImageProcessingTester(self) @property # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "image_grid_pinpoints")) # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.test_image_processor_from_dict_with_kwargs def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_select_best_resolution(self): possible_resolutions = [[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]] # Test with a square aspect ratio best_resolution = select_best_resolution((336, 336), possible_resolutions) self.assertEqual(best_resolution, (672, 336)) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) @unittest.skip( reason="LlavaNextImageProcessor doesn't treat 4 channel PIL and numpy consistently yet" ) # FIXME Amy def test_call_numpy_4_channels(self): pass def test_nested_input(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched as a nested list of images, where each sublist is one batch image_inputs_nested = [image_inputs[:3], image_inputs[3:]] encoded_images_nested = image_processing(image_inputs_nested, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) # Image processor should return same pixel values, independently of ipnut format self.assertTrue((encoded_images_nested == encoded_images).all()) def test_pad_for_patching(self): for image_processing_class in self.image_processor_list: if image_processing_class == self.fast_image_processing_class: numpify = False torchify = True input_data_format = image_processing_class.data_format else: numpify = True torchify = False input_data_format = ChannelDimension.LAST image_processing = image_processing_class(**self.image_processor_dict) # Create odd-sized images image_input = self.image_processor_tester.prepare_image_inputs( equal_resolution=True, numpify=numpify, torchify=torchify, )[0] self.assertIn(image_input.shape, [(3, 400, 400), (400, 400, 3)]) # Test odd-width image_shape = (400, 601) encoded_images = image_processing._pad_for_patching(image_input, image_shape, input_data_format) encoded_image_shape = ( encoded_images.shape[:-1] if input_data_format == ChannelDimension.LAST else encoded_images.shape[1:] ) self.assertEqual(encoded_image_shape, image_shape) # Test odd-height image_shape = (503, 400) encoded_images = image_processing._pad_for_patching(image_input, image_shape, input_data_format) encoded_image_shape = ( encoded_images.shape[:-1] if input_data_format == ChannelDimension.LAST else encoded_images.shape[1:] ) self.assertEqual(encoded_image_shape, image_shape) def test_call_without_padding(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test not batched input encoded_images = image_processing(image_inputs[0], do_pad=False).pixel_values self.assertEqual(len(encoded_images), 1) # Test batched encoded_images = image_processing(image_inputs, do_pad=False).pixel_values self.assertEqual(len(encoded_images), len(image_inputs))
LlavaNextImageProcessingTest
python
geekcomputers__Python
Python Programs/Program to reverse Linked List( Recursive solution).py
{ "start": 135, "end": 1207 }
class ____: def __init__(self, data): self.data = data self.next = None def reverseLinkedListRec(head): if head is None: return None if head.next is None: return head smallhead = reverseLinkedListRec(head.next) head.next.next = head head.next = None return smallhead # Taking Input Using Fast I/O def takeInput(): head = None tail = None datas = list(map(int, stdin.readline().rstrip().split(" "))) i = 0 while (i < len(datas)) and (datas[i] != -1): data = datas[i] newNode = Node(data) if head is None: head = newNode tail = newNode else: tail.next = newNode tail = newNode i += 1 return head def printLinkedList(head): while head is not None: print(head.data, end=" ") head = head.next print() # main t = int(stdin.readline().rstrip()) while t > 0: head = takeInput() newHead = reverseLinkedListRec(head) printLinkedList(newHead) t -= 1
Node
python
pypa__pip
src/pip/_vendor/packaging/utils.py
{ "start": 614, "end": 744 }
class ____(ValueError): """ An invalid wheel filename was found, users should refer to PEP 427. """
InvalidWheelFilename
python
google__pytype
pytype/pyc/opcodes.py
{ "start": 16866, "end": 16992 }
class ____(OpcodeWithArg): # Arg: Jump offset to finally block _FLAGS = HAS_JREL | HAS_ARGUMENT __slots__ = ()
CALL_FINALLY
python
pandas-dev__pandas
asv_bench/benchmarks/sparse.py
{ "start": 5803, "end": 6083 }
class ____: def setup(self): N = 1_000_000 d = 1e-5 arr = make_array(N, d, np.nan, np.float64) self.sp_arr = SparseArray(arr) def time_integer_indexing(self): self.sp_arr[78] def time_slice(self): self.sp_arr[1:]
GetItem
python
tensorflow__tensorflow
tensorflow/python/framework/subscribe_test.py
{ "start": 1413, "end": 13247 }
class ____(test_util.TensorFlowTestCase): def _ExpectSubscribedIdentities(self, container): """Convenience function to test a container of subscribed identities.""" self.assertTrue( all(subscribe._is_subscribed_identity(x) for x in container)) @test_util.run_deprecated_v1 def testSideEffect(self): a = constant_op.constant(1) b = constant_op.constant(1) c = math_ops.add(a, b) with ops.control_dependencies([c]): d = constant_op.constant(42) n = math_ops.negative(c) shared = [] def sub(t): shared.append(t) return t c0 = c self.assertTrue(c0.op in d.op.control_inputs) c = subscribe.subscribe(c, lambda t: script_ops.py_func(sub, [t], [t.dtype])) # Verify that control dependencies are correctly moved to the subscription. self.assertFalse(c0.op in d.op.control_inputs) self.assertTrue(c.op in d.op.control_inputs) with self.cached_session() as sess: c_out = self.evaluate([c]) n_out = self.evaluate([n]) d_out = self.evaluate([d]) self.assertEqual(n_out, [-2]) self.assertEqual(c_out, [2]) self.assertEqual(d_out, [42]) self.assertEqual(shared, [2, 2, 2]) @test_util.run_deprecated_v1 def testSupportedTypes(self): """Confirm that supported types are correctly detected and handled.""" a = constant_op.constant(1) b = constant_op.constant(1) c = math_ops.add(a, b) def sub(t): return t # Tuples. subscribed = subscribe.subscribe( (a, b), lambda t: script_ops.py_func(sub, [t], [t.dtype])) self.assertIsInstance(subscribed, tuple) self._ExpectSubscribedIdentities(subscribed) # Lists. subscribed = subscribe.subscribe( [a, b], lambda t: script_ops.py_func(sub, [t], [t.dtype])) self.assertIsInstance(subscribed, list) self._ExpectSubscribedIdentities(subscribed) # Dictionaries. subscribed = subscribe.subscribe({ 'first': a, 'second': b }, lambda t: script_ops.py_func(sub, [t], [t.dtype])) self.assertIsInstance(subscribed, dict) self._ExpectSubscribedIdentities(subscribed.values()) # Namedtuples. # pylint: disable=invalid-name TensorPair = collections.namedtuple('TensorPair', ['first', 'second']) # pylint: enable=invalid-name pair = TensorPair(a, b) subscribed = subscribe.subscribe( pair, lambda t: script_ops.py_func(sub, [t], [t.dtype])) self.assertIsInstance(subscribed, TensorPair) self._ExpectSubscribedIdentities(subscribed) # Expect an exception to be raised for unsupported types. with self.assertRaisesRegex(TypeError, 'has invalid type'): subscribe.subscribe(c.name, lambda t: script_ops.py_func(sub, [t], [t.dtype])) @test_util.run_deprecated_v1 def testCaching(self): """Confirm caching of control output is recalculated between calls.""" a = constant_op.constant(1) b = constant_op.constant(2) with ops.control_dependencies([a]): c = constant_op.constant(42) shared = {} def sub(t): shared[t] = shared.get(t, 0) + 1 return t a = subscribe.subscribe(a, lambda t: script_ops.py_func(sub, [t], [t.dtype])) with ops.control_dependencies([b]): d = constant_op.constant(11) # If it was using outdated cached control_outputs then # evaling would not trigger the new subscription. b = subscribe.subscribe(b, lambda t: script_ops.py_func(sub, [t], [t.dtype])) with self.cached_session() as sess: c_out = self.evaluate([c]) d_out = self.evaluate([d]) self.assertEqual(c_out, [42]) self.assertEqual(d_out, [11]) self.assertEqual(shared, {2: 1, 1: 1}) @test_util.run_deprecated_v1 def testIsSubscribedIdentity(self): """Confirm subscribed identity ops are correctly detected.""" a = constant_op.constant(1) b = constant_op.constant(2) c = math_ops.add(a, b) idop = array_ops.identity(c) c_sub = subscribe.subscribe(c, []) self.assertFalse(subscribe._is_subscribed_identity(a)) self.assertFalse(subscribe._is_subscribed_identity(c)) self.assertFalse(subscribe._is_subscribed_identity(idop)) self.assertTrue(subscribe._is_subscribed_identity(c_sub)) @test_util.run_deprecated_v1 def testSubscribeExtend(self): """Confirm side effect are correctly added for different input types.""" a = constant_op.constant(1) b = constant_op.constant(2) c = math_ops.add(a, b) shared = {} def sub(t, name): shared[name] = shared.get(name, 0) + 1 return t # Subscribe with a first side effect graph, passing an unsubscribed tensor. sub_graph1 = lambda t: sub(t, 'graph1') c_sub = subscribe.subscribe( c, lambda t: script_ops.py_func(sub_graph1, [t], [t.dtype])) # Add a second side effect graph, passing the tensor returned by the # previous call to subscribe(). sub_graph2 = lambda t: sub(t, 'graph2') c_sub2 = subscribe.subscribe( c_sub, lambda t: script_ops.py_func(sub_graph2, [t], [t.dtype])) # Add a third side effect graph, passing the original tensor. sub_graph3 = lambda t: sub(t, 'graph3') c_sub3 = subscribe.subscribe( c, lambda t: script_ops.py_func(sub_graph3, [t], [t.dtype])) # Make sure there's only one identity op matching the source tensor's name. graph_ops = ops.get_default_graph().get_operations() name_prefix = c.op.name + '/subscription/Identity' identity_ops = [op for op in graph_ops if op.name.startswith(name_prefix)] self.assertEqual(1, len(identity_ops)) # Expect the objects returned by subscribe() to reference the same tensor. self.assertIs(c_sub, c_sub2) self.assertIs(c_sub, c_sub3) # Expect the three side effect graphs to have been evaluated. with self.cached_session() as sess: self.evaluate([c_sub]) self.assertIn('graph1', shared) self.assertIn('graph2', shared) self.assertIn('graph3', shared) @test_util.run_v1_only('b/120545219') def testSubscribeVariable(self): """Confirm that variables can be subscribed.""" v1 = variable_v1.VariableV1(0.0) v2 = variable_v1.VariableV1(4.0) add = math_ops.add(v1, v2) assign_v1 = v1.assign(3.0) shared = [] def sub(t): shared.append(t) return t v1_sub = subscribe.subscribe( v1, lambda t: script_ops.py_func(sub, [t], [t.dtype])) self.assertTrue(subscribe._is_subscribed_identity(v1_sub)) with self.cached_session() as sess: # Initialize the variables first. self.evaluate([v1.initializer]) self.evaluate([v2.initializer]) # Expect the side effects to be triggered when evaluating the add op as # it will read the value of the variable. self.evaluate([add]) self.assertEqual(1, len(shared)) # Expect the side effect not to be triggered when evaluating the assign # op as it will not access the 'read' output of the variable. self.evaluate([assign_v1]) self.assertEqual(1, len(shared)) self.evaluate([add]) self.assertEqual(2, len(shared)) # Make sure the values read from the variable match the expected ones. self.assertEqual([0.0, 3.0], shared) @test_util.run_v1_only('b/120545219') def testResourceType(self): """Confirm that subscribe correctly handles tensors with 'resource' type.""" tensor_array = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name='test', size=3, infer_shape=False) writer = tensor_array.write(0, [[4.0, 5.0]]) reader = writer.read(0) shared = [] def sub(t): shared.append(t) return t # TensorArray's handle output tensor has a 'resource' type and cannot be # subscribed as it's not 'numpy compatible' (see dtypes.py). # Expect that the original tensor is returned when subscribing to it. tensor_array_sub = subscribe.subscribe( tensor_array.handle, lambda t: script_ops.py_func(sub, [t], [t.dtype])) self.assertIs(tensor_array_sub, tensor_array.handle) self.assertFalse(subscribe._is_subscribed_identity(tensor_array.handle)) with self.cached_session() as sess: self.evaluate([reader]) self.assertEqual(0, len(shared)) @test_util.run_deprecated_v1 def testMultipleOutputs(self): """Handle subscriptions to multiple outputs from the same op.""" sparse_tensor_1 = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) sparse_tensor_2 = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[2, 3], dense_shape=[3, 4]) # This op has three outputs. sparse_add = sparse_ops.sparse_add(sparse_tensor_1, sparse_tensor_2) self.assertEqual(3, len(sparse_add.op.outputs)) c1 = constant_op.constant(1) with ops.control_dependencies(sparse_add.op.outputs): # This op depends on all the three outputs. neg = -c1 shared = [] def sub(t): shared.append(t) return t # Subscribe the three outputs at once. subscribe.subscribe(sparse_add.op.outputs, lambda t: script_ops.py_func(sub, [t], [t.dtype])) with self.cached_session() as sess: self.evaluate([neg]) # All three ops have been processed. self.assertEqual(3, len(shared)) @test_util.run_deprecated_v1 def test_subscribe_tensors_on_different_devices(self): """Side effect ops are added with the same device of the subscribed op.""" c1 = constant_op.constant(10) c2 = constant_op.constant(20) with ops.device('cpu:0'): add = math_ops.add(c1, c2) with ops.device('cpu:1'): mul = math_ops.multiply(c1, c2) def sub(t): return t add_sub = subscribe.subscribe( add, lambda t: script_ops.py_func(sub, [t], [t.dtype])) mul_sub = subscribe.subscribe( mul, lambda t: script_ops.py_func(sub, [t], [t.dtype])) # Expect the identity tensors injected by subscribe to have been created # on the same device as their original tensors. self.assertNotEqual(add_sub.device, mul_sub.device) self.assertEqual(add.device, add_sub.device) self.assertEqual(mul.device, mul_sub.device) @test_util.run_v1_only('b/120545219') def test_subscribe_tensors_within_control_flow_context(self): """Side effect ops are added with the same control flow context.""" c1 = constant_op.constant(10) c2 = constant_op.constant(20) x1 = math_ops.add(c1, c2) x2 = math_ops.multiply(c1, c2) cond = tf_cond.cond( x1 < x2, lambda: math_ops.add(c1, c2, name='then'), lambda: math_ops.subtract(c1, c2, name='else'), name='cond') branch = ops.get_default_graph().get_tensor_by_name('cond/then:0') def context(tensor): return tensor.op._get_control_flow_context() self.assertIs(context(x1), context(x2)) self.assertIsNot(context(x1), context(branch)) results = [] def sub(tensor): results.append(tensor) return tensor tensors = [x1, branch, x2] subscriptions = subscribe.subscribe( tensors, lambda t: script_ops.py_func(sub, [t], [t.dtype])) for tensor, subscription in zip(tensors, subscriptions): self.assertIs(context(tensor), context(subscription)) # Verify that sub(x1) and sub(x2) are in the same context. self.assertIs(context(subscriptions[0]), context(subscriptions[2])) # Verify that sub(x1) and sub(branch) are not. self.assertIsNot(context(subscriptions[0]), context(subscriptions[1])) with self.cached_session() as sess: self.evaluate(cond) self.assertEqual(3, len(results)) if __name__ == '__main__': googletest.main()
SubscribeTest
python
pytorch__pytorch
test/distributed/test_c10d_gloo.py
{ "start": 6830, "end": 7542 }
class ____(TestCase): @requires_gloo() @retry_on_connect_failures def test_logging_init(self): os.environ["WORLD_SIZE"] = "1" os.environ["MASTER_ADDR"] = "127.0.0.1" os.environ["MASTER_PORT"] = str(common.find_free_port()) os.environ["RANK"] = "0" previous_handlers = logging.root.handlers c10d.init_process_group(backend="gloo", init_method="env://") current_handlers = logging.root.handlers self.assertEqual(len(previous_handlers), len(current_handlers)) for current, previous in zip(current_handlers, previous_handlers): self.assertEqual(current, previous) c10d.destroy_process_group()
RendezvousEnvTest
python
mlflow__mlflow
mlflow/entities/webhook.py
{ "start": 589, "end": 1148 }
class ____(str, Enum): ACTIVE = "ACTIVE" DISABLED = "DISABLED" def __str__(self) -> str: return self.value @classmethod def from_proto(cls, proto: int) -> Self: proto_name = ProtoWebhookStatus.Name(proto) try: return cls(proto_name) except ValueError: raise ValueError(f"Unknown proto status: {proto_name}") def to_proto(self) -> int: return ProtoWebhookStatus.Value(self.value) def is_active(self) -> bool: return self == WebhookStatus.ACTIVE
WebhookStatus
python
sympy__sympy
sympy/printing/rust.py
{ "start": 7398, "end": 8055 }
class ____(Expr): """ The type casting operator of the Rust language. """ def __init__(self, expr, type_) -> None: super().__init__() self.explicit = expr.is_integer and type_ is not integer self._assumptions = expr._assumptions if self.explicit: setattr(self, 'precedence', PRECEDENCE["Func"] + 10) else: setattr(self, 'precedence', precedence(self.expr)) @property def expr(self): return self.args[0] @property def type_(self): return self.args[1] def sort_key(self, order=None): return self.args[0].sort_key(order=order)
TypeCast
python
airbytehq__airbyte
airbyte-integrations/connectors/source-linkedin-ads/components.py
{ "start": 1466, "end": 3020 }
class ____(HttpClient): """ A custom HTTP client that safely validates query parameters, ensuring that the symbols ():,% are preserved during UTF-8 encoding. """ def _create_prepared_request( self, http_method: str, url: str, dedupe_query_params: bool = False, headers: Optional[Mapping[str, str]] = None, params: Optional[Mapping[str, str]] = None, json: Optional[Mapping[str, Any]] = None, data: Optional[Union[str, Mapping[str, Any]]] = None, ) -> requests.PreparedRequest: """ Prepares an HTTP request with optional deduplication of query parameters and safe encoding. """ if dedupe_query_params: query_params = self._dedupe_query_params(url, params) else: query_params = params or {} query_params = urlencode(query_params, safe="():,%") args = {"method": http_method, "url": url, "headers": headers, "params": query_params} if http_method.upper() in BODY_REQUEST_METHODS: if json and data: raise RequestBodyException( "At the same time only one of the 'request_body_data' and 'request_body_json' functions can return data" ) elif json: args["json"] = json elif data: args["data"] = data prepared_request: requests.PreparedRequest = self._session.prepare_request(requests.Request(**args)) return prepared_request @dataclass
SafeHttpClient
python
kamyu104__LeetCode-Solutions
Python/find-largest-value-in-each-tree-row.py
{ "start": 664, "end": 1042 }
class ____(object): def largestValues(self, root): """ :type root: TreeNode :rtype: List[int] """ result = [] curr = [root] while any(curr): result.append(max(node.val for node in curr)) curr = [child for node in curr for child in (node.left, node.right) if child] return result
Solution2
python
pytorch__pytorch
torch/_dynamo/exc.py
{ "start": 6078, "end": 6367 }
class ____(Enum): DYNAMIC_CONTROL_FLOW = auto() ANTI_PATTERN = auto() STANDARD_LIBRARY = auto() CONSTRAINT_VIOLATION = auto() DYNAMIC_DIM = auto() INVALID_INPUT = auto() INVALID_OUTPUT = auto() UNSUPPORTED_ALIASED_MUTATED_DYNAMIC_INPUTS = auto()
UserErrorType
python
PrefectHQ__prefect
src/prefect/events/schemas/labelling.py
{ "start": 2131, "end": 3103 }
class ____(RootModel[Dict[str, str]]): def keys(self) -> Iterable[str]: return self.root.keys() def items(self) -> Iterable[Tuple[str, str]]: return self.root.items() def __getitem__(self, label: str) -> str: return self.root[label] def __setitem__(self, label: str, value: str) -> str: self.root[label] = value return value def __contains__(self, key: str) -> bool: return key in self.root def get(self, label: str, default: Optional[str] = None) -> Optional[str]: return self.root.get(label, default) def as_label_value_array(self) -> List[Dict[str, str]]: return [{"label": label, "value": value} for label, value in self.items()] @property def labels(self) -> LabelDiver: return LabelDiver(self.root) def has_all_labels(self, labels: Dict[str, str]) -> bool: return all(self.root.get(label) == value for label, value in labels.items())
Labelled
python
py-pdf__pypdf
pypdf/constants.py
{ "start": 19217, "end": 19357 }
class ____(IntFlag): """A class used as an enumerable flag for formatting an outline font.""" italic = 1 bold = 2
OutlineFontFlag
python
pandas-dev__pandas
pandas/tests/dtypes/test_missing.py
{ "start": 1496, "end": 23386 }
class ____: def test_0d_array(self): assert isna(np.array(np.nan)) assert not isna(np.array(0.0)) assert not isna(np.array(0)) # test object dtype assert isna(np.array(np.nan, dtype=object)) assert not isna(np.array(0.0, dtype=object)) assert not isna(np.array(0, dtype=object)) @pytest.mark.parametrize("shape", [(4, 0), (4,)]) def test_empty_object(self, shape): arr = np.empty(shape=shape, dtype=object) result = isna(arr) expected = np.ones(shape=shape, dtype=bool) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize("isna_f", [isna, isnull]) def test_isna_isnull(self, isna_f): assert not isna_f(1.0) assert isna_f(None) assert isna_f(np.nan) assert float("nan") assert not isna_f(np.inf) assert not isna_f(-np.inf) # type assert not isna_f(type(Series(dtype=object))) assert not isna_f(type(Series(dtype=np.float64))) assert not isna_f(type(pd.DataFrame())) @pytest.mark.parametrize("isna_f", [isna, isnull]) @pytest.mark.parametrize( "data", [ np.arange(4, dtype=float), [0.0, 1.0, 0.0, 1.0], Series(list("abcd"), dtype=object), date_range("2020-01-01", periods=4), ], ) @pytest.mark.parametrize( "index", [ date_range("2020-01-01", periods=4), range(4), period_range("2020-01-01", periods=4), ], ) def test_isna_isnull_frame(self, isna_f, data, index): # frame df = pd.DataFrame(data, index=index) result = isna_f(df) expected = df.apply(isna_f) tm.assert_frame_equal(result, expected) def test_isna_lists(self): result = isna([[False]]) exp = np.array([[False]]) tm.assert_numpy_array_equal(result, exp) result = isna([[1], [2]]) exp = np.array([[False], [False]]) tm.assert_numpy_array_equal(result, exp) # list of strings / unicode result = isna(["foo", "bar"]) exp = np.array([False, False]) tm.assert_numpy_array_equal(result, exp) result = isna(["foo", "bar"]) exp = np.array([False, False]) tm.assert_numpy_array_equal(result, exp) # GH20675 result = isna([np.nan, "world"]) exp = np.array([True, False]) tm.assert_numpy_array_equal(result, exp) def test_isna_nat(self): result = isna([NaT]) exp = np.array([True]) tm.assert_numpy_array_equal(result, exp) result = isna(np.array([NaT], dtype=object)) exp = np.array([True]) tm.assert_numpy_array_equal(result, exp) def test_isna_numpy_nat(self): arr = np.array( [ NaT, np.datetime64("NaT"), np.timedelta64("NaT"), np.datetime64("NaT", "s"), ] ) result = isna(arr) expected = np.array([True] * 4) tm.assert_numpy_array_equal(result, expected) def test_isna_datetime(self): assert not isna(datetime.now()) assert notna(datetime.now()) idx = date_range("1/1/1990", periods=20) exp = np.ones(len(idx), dtype=bool) tm.assert_numpy_array_equal(notna(idx), exp) idx = np.asarray(idx) idx[0] = iNaT idx = DatetimeIndex(idx) mask = isna(idx) assert mask[0] exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool) tm.assert_numpy_array_equal(mask, exp) # GH 9129 pidx = idx.to_period(freq="M") mask = isna(pidx) assert mask[0] exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool) tm.assert_numpy_array_equal(mask, exp) mask = isna(pidx[1:]) exp = np.zeros(len(mask), dtype=bool) tm.assert_numpy_array_equal(mask, exp) def test_isna_old_datetimelike(self): # isna_old should work for dt64tz, td64, and period, not just tznaive dti = date_range("2016-01-01", periods=3) dta = dti._data dta[-1] = NaT expected = np.array([False, False, True], dtype=bool) objs = [dta, dta.tz_localize("US/Eastern"), dta - dta, dta.to_period("D")] for obj in objs: result = isna(obj) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( "value, expected", [ (np.complex128(np.nan), True), (np.float64(1), False), (np.array([1, 1 + 0j, np.nan, 3]), np.array([False, False, True, False])), ( np.array([1, 1 + 0j, np.nan, 3], dtype=object), np.array([False, False, True, False]), ), ( np.array([1, 1 + 0j, np.nan, 3]).astype(object), np.array([False, False, True, False]), ), ], ) def test_complex(self, value, expected): result = isna(value) if is_scalar(result): assert result is expected else: tm.assert_numpy_array_equal(result, expected) def test_datetime_other_units(self): idx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-02"]) exp = np.array([False, True, False]) tm.assert_numpy_array_equal(isna(idx), exp) tm.assert_numpy_array_equal(notna(idx), ~exp) tm.assert_numpy_array_equal(isna(idx.values), exp) tm.assert_numpy_array_equal(notna(idx.values), ~exp) @pytest.mark.parametrize( "dtype", [ "datetime64[D]", "datetime64[h]", "datetime64[m]", "datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]", ], ) def test_datetime_other_units_astype(self, dtype): idx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-02"]) values = idx.values.astype(dtype) exp = np.array([False, True, False]) tm.assert_numpy_array_equal(isna(values), exp) tm.assert_numpy_array_equal(notna(values), ~exp) exp = Series([False, True, False]) s = Series(values) tm.assert_series_equal(isna(s), exp) tm.assert_series_equal(notna(s), ~exp) s = Series(values, dtype=object) tm.assert_series_equal(isna(s), exp) tm.assert_series_equal(notna(s), ~exp) def test_timedelta_other_units(self): idx = TimedeltaIndex(["1 days", "NaT", "2 days"]) exp = np.array([False, True, False]) tm.assert_numpy_array_equal(isna(idx), exp) tm.assert_numpy_array_equal(notna(idx), ~exp) tm.assert_numpy_array_equal(isna(idx.values), exp) tm.assert_numpy_array_equal(notna(idx.values), ~exp) @pytest.mark.parametrize( "dtype", [ "timedelta64[D]", "timedelta64[h]", "timedelta64[m]", "timedelta64[s]", "timedelta64[ms]", "timedelta64[us]", "timedelta64[ns]", ], ) def test_timedelta_other_units_dtype(self, dtype): idx = TimedeltaIndex(["1 days", "NaT", "2 days"]) values = idx.values.astype(dtype) exp = np.array([False, True, False]) tm.assert_numpy_array_equal(isna(values), exp) tm.assert_numpy_array_equal(notna(values), ~exp) exp = Series([False, True, False]) s = Series(values) tm.assert_series_equal(isna(s), exp) tm.assert_series_equal(notna(s), ~exp) s = Series(values, dtype=object) tm.assert_series_equal(isna(s), exp) tm.assert_series_equal(notna(s), ~exp) def test_period(self): idx = pd.PeriodIndex(["2011-01", "NaT", "2012-01"], freq="M") exp = np.array([False, True, False]) tm.assert_numpy_array_equal(isna(idx), exp) tm.assert_numpy_array_equal(notna(idx), ~exp) exp = Series([False, True, False]) s = Series(idx) tm.assert_series_equal(isna(s), exp) tm.assert_series_equal(notna(s), ~exp) s = Series(idx, dtype=object) tm.assert_series_equal(isna(s), exp) tm.assert_series_equal(notna(s), ~exp) def test_decimal(self): # scalars GH#23530 a = Decimal("1.0") assert isna(a) is False assert notna(a) is True b = Decimal("NaN") assert isna(b) is True assert notna(b) is False # array arr = np.array([a, b]) expected = np.array([False, True]) result = isna(arr) tm.assert_numpy_array_equal(result, expected) result = notna(arr) tm.assert_numpy_array_equal(result, ~expected) # series ser = Series(arr) expected = Series(expected) result = isna(ser) tm.assert_series_equal(result, expected) result = notna(ser) tm.assert_series_equal(result, ~expected) # index idx = Index(arr) expected = np.array([False, True]) result = isna(idx) tm.assert_numpy_array_equal(result, expected) result = notna(idx) tm.assert_numpy_array_equal(result, ~expected) @pytest.mark.parametrize("dtype_equal", [True, False]) def test_array_equivalent(dtype_equal): assert array_equivalent( np.array([np.nan, np.nan]), np.array([np.nan, np.nan]), dtype_equal=dtype_equal ) assert array_equivalent( np.array([np.nan, 1, np.nan]), np.array([np.nan, 1, np.nan]), dtype_equal=dtype_equal, ) assert array_equivalent( np.array([np.nan, None], dtype="object"), np.array([np.nan, None], dtype="object"), dtype_equal=dtype_equal, ) # Check the handling of nested arrays in array_equivalent_object assert array_equivalent( np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"), np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"), dtype_equal=dtype_equal, ) assert array_equivalent( np.array([np.nan, 1 + 1j], dtype="complex"), np.array([np.nan, 1 + 1j], dtype="complex"), dtype_equal=dtype_equal, ) assert not array_equivalent( np.array([np.nan, 1 + 1j], dtype="complex"), np.array([np.nan, 1 + 2j], dtype="complex"), dtype_equal=dtype_equal, ) assert not array_equivalent( np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]), dtype_equal=dtype_equal, ) assert not array_equivalent( np.array(["a", "b", "c", "d"]), np.array(["e", "e"]), dtype_equal=dtype_equal ) assert array_equivalent( Index([0, np.nan]), Index([0, np.nan]), dtype_equal=dtype_equal ) assert not array_equivalent( Index([0, np.nan]), Index([1, np.nan]), dtype_equal=dtype_equal ) @pytest.mark.parametrize("dtype_equal", [True, False]) def test_array_equivalent_tdi(dtype_equal): assert array_equivalent( TimedeltaIndex([0, np.nan]), TimedeltaIndex([0, np.nan]), dtype_equal=dtype_equal, ) assert not array_equivalent( TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan]), dtype_equal=dtype_equal, ) @pytest.mark.parametrize("dtype_equal", [True, False]) def test_array_equivalent_dti(dtype_equal): assert array_equivalent( DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]), dtype_equal=dtype_equal ) assert not array_equivalent( DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]), dtype_equal=dtype_equal ) dti1 = DatetimeIndex([0, np.nan], tz="US/Eastern") dti2 = DatetimeIndex([0, np.nan], tz="CET") dti3 = DatetimeIndex([1, np.nan], tz="US/Eastern") assert array_equivalent( dti1, dti1, dtype_equal=dtype_equal, ) assert not array_equivalent( dti1, dti3, dtype_equal=dtype_equal, ) # The rest are not dtype_equal assert not array_equivalent(DatetimeIndex([0, np.nan]), dti1) assert array_equivalent( dti2, dti1, ) assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan])) @pytest.mark.parametrize( "val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None] ) def test_array_equivalent_series(val): arr = np.array([1, 2]) assert not array_equivalent(Series([arr, arr]), Series([arr, val])) def test_array_equivalent_array_mismatched_shape(): # to trigger the motivating bug, the first N elements of the arrays need # to match first = np.array([1, 2, 3]) second = np.array([1, 2]) left = Series([first, "a"], dtype=object) right = Series([second, "a"], dtype=object) assert not array_equivalent(left, right) def test_array_equivalent_array_mismatched_dtype(): # same shape, different dtype can still be equivalent first = np.array([1, 2], dtype=np.float64) second = np.array([1, 2]) left = Series([first, "a"], dtype=object) right = Series([second, "a"], dtype=object) assert array_equivalent(left, right) def test_array_equivalent_different_dtype_but_equal(): # Unclear if this is exposed anywhere in the public-facing API assert array_equivalent(np.array([1, 2]), np.array([1.0, 2.0])) @pytest.mark.parametrize( "lvalue, rvalue", [ # There are 3 variants for each of lvalue and rvalue. We include all # three for the tz-naive `now` and exclude the datetim64 variant # for utcnow because it drops tzinfo. (fix_now, fix_utcnow), (fix_now.to_datetime64(), fix_utcnow), (fix_now.to_pydatetime(), fix_utcnow), (fix_now.to_datetime64(), fix_utcnow.to_pydatetime()), (fix_now.to_pydatetime(), fix_utcnow.to_pydatetime()), ], ) def test_array_equivalent_tzawareness(lvalue, rvalue): # we shouldn't raise if comparing tzaware and tznaive datetimes left = np.array([lvalue], dtype=object) right = np.array([rvalue], dtype=object) assert not array_equivalent(left, right, strict_nan=True) assert not array_equivalent(left, right, strict_nan=False) def test_array_equivalent_compat(): # see gh-13388 m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)]) n = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)]) assert array_equivalent(m, n, strict_nan=True) assert array_equivalent(m, n, strict_nan=False) m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)]) n = np.array([(1, 2), (4, 3)], dtype=[("a", int), ("b", float)]) assert not array_equivalent(m, n, strict_nan=True) assert not array_equivalent(m, n, strict_nan=False) m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)]) n = np.array([(1, 2), (3, 4)], dtype=[("b", int), ("a", float)]) assert not array_equivalent(m, n, strict_nan=True) assert not array_equivalent(m, n, strict_nan=False) @pytest.mark.parametrize("dtype", ["O", "S", "U"]) def test_array_equivalent_str(dtype): assert array_equivalent( np.array(["A", "B"], dtype=dtype), np.array(["A", "B"], dtype=dtype) ) assert not array_equivalent( np.array(["A", "B"], dtype=dtype), np.array(["A", "X"], dtype=dtype) ) @pytest.mark.parametrize("strict_nan", [True, False]) def test_array_equivalent_nested(strict_nan): # reached in groupby aggregations, make sure we use np.any when checking # if the comparison is truthy left = np.array([np.array([50, 70, 90]), np.array([20, 30])], dtype=object) right = np.array([np.array([50, 70, 90]), np.array([20, 30])], dtype=object) assert array_equivalent(left, right, strict_nan=strict_nan) assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) left = np.empty(2, dtype=object) left[:] = [np.array([50, 70, 90]), np.array([20, 30, 40])] right = np.empty(2, dtype=object) right[:] = [np.array([50, 70, 90]), np.array([20, 30, 40])] assert array_equivalent(left, right, strict_nan=strict_nan) assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) left = np.array([np.array([50, 50, 50]), np.array([40, 40])], dtype=object) right = np.array([50, 40]) assert not array_equivalent(left, right, strict_nan=strict_nan) @pytest.mark.filterwarnings("ignore:elementwise comparison failed:DeprecationWarning") @pytest.mark.parametrize("strict_nan", [True, False]) def test_array_equivalent_nested2(strict_nan): # more than one level of nesting left = np.array( [ np.array([np.array([50, 70]), np.array([90])], dtype=object), np.array([np.array([20, 30])], dtype=object), ], dtype=object, ) right = np.array( [ np.array([np.array([50, 70]), np.array([90])], dtype=object), np.array([np.array([20, 30])], dtype=object), ], dtype=object, ) assert array_equivalent(left, right, strict_nan=strict_nan) assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) left = np.array([np.array([np.array([50, 50, 50])], dtype=object)], dtype=object) right = np.array([50]) assert not array_equivalent(left, right, strict_nan=strict_nan) @pytest.mark.parametrize("strict_nan", [True, False]) def test_array_equivalent_nested_list(strict_nan): left = np.array([[50, 70, 90], [20, 30]], dtype=object) right = np.array([[50, 70, 90], [20, 30]], dtype=object) assert array_equivalent(left, right, strict_nan=strict_nan) assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) left = np.array([[50, 50, 50], [40, 40]], dtype=object) right = np.array([50, 40]) assert not array_equivalent(left, right, strict_nan=strict_nan) @pytest.mark.filterwarnings("ignore:elementwise comparison failed:DeprecationWarning") @pytest.mark.xfail(reason="failing") @pytest.mark.parametrize("strict_nan", [True, False]) def test_array_equivalent_nested_mixed_list(strict_nan): # mixed arrays / lists in left and right # https://github.com/pandas-dev/pandas/issues/50360 left = np.array([np.array([1, 2, 3]), np.array([4, 5])], dtype=object) right = np.array([[1, 2, 3], [4, 5]], dtype=object) assert array_equivalent(left, right, strict_nan=strict_nan) assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) # multiple levels of nesting left = np.array( [ np.array([np.array([1, 2, 3]), np.array([4, 5])], dtype=object), np.array([np.array([6]), np.array([7, 8]), np.array([9])], dtype=object), ], dtype=object, ) right = np.array([[[1, 2, 3], [4, 5]], [[6], [7, 8], [9]]], dtype=object) assert array_equivalent(left, right, strict_nan=strict_nan) assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) # same-length lists subarr = np.empty(2, dtype=object) subarr[:] = [ np.array([None, "b"], dtype=object), np.array(["c", "d"], dtype=object), ] left = np.array([subarr, None], dtype=object) right = np.array([[[None, "b"], ["c", "d"]], None], dtype=object) assert array_equivalent(left, right, strict_nan=strict_nan) assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) @pytest.mark.xfail(reason="failing") @pytest.mark.parametrize("strict_nan", [True, False]) def test_array_equivalent_nested_dicts(strict_nan): left = np.array([{"f1": 1, "f2": np.array(["a", "b"], dtype=object)}], dtype=object) right = np.array( [{"f1": 1, "f2": np.array(["a", "b"], dtype=object)}], dtype=object ) assert array_equivalent(left, right, strict_nan=strict_nan) assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) right2 = np.array([{"f1": 1, "f2": ["a", "b"]}], dtype=object) assert array_equivalent(left, right2, strict_nan=strict_nan) assert not array_equivalent(left, right2[::-1], strict_nan=strict_nan) def test_array_equivalent_index_with_tuples(): # GH#48446 idx1 = Index(np.array([(pd.NA, 4), (1, 1)], dtype="object")) idx2 = Index(np.array([(1, 1), (pd.NA, 4)], dtype="object")) assert not array_equivalent(idx1, idx2) assert not idx1.equals(idx2) assert not array_equivalent(idx2, idx1) assert not idx2.equals(idx1) idx1 = Index(np.array([(4, pd.NA), (1, 1)], dtype="object")) idx2 = Index(np.array([(1, 1), (4, pd.NA)], dtype="object")) assert not array_equivalent(idx1, idx2) assert not idx1.equals(idx2) assert not array_equivalent(idx2, idx1) assert not idx2.equals(idx1) @pytest.mark.parametrize( "dtype, na_value", [ # Datetime-like (np.dtype("M8[ns]"), np.datetime64("NaT", "ns")), (np.dtype("m8[ns]"), np.timedelta64("NaT", "ns")), (DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]"), NaT), (PeriodDtype("M"), NaT), # Integer ("u1", 0), ("u2", 0), ("u4", 0), ("u8", 0), ("i1", 0), ("i2", 0), ("i4", 0), ("i8", 0), # Bool ("bool", False), # Float ("f2", np.nan), ("f4", np.nan), ("f8", np.nan), # Complex ("c8", np.nan), ("c16", np.nan), # Object ("O", np.nan), # Interval (IntervalDtype(), np.nan), ], ) def test_na_value_for_dtype(dtype, na_value): result = na_value_for_dtype(pandas_dtype(dtype)) # identify check doesn't work for datetime64/timedelta64("NaT") bc they # are not singletons assert result is na_value or ( isna(result) and isna(na_value) and type(result) is type(na_value) )
TestIsNA
python
allegroai__clearml
clearml/backend_api/services/v2_23/datasets.py
{ "start": 94934, "end": 97213 }
class ____(Request): """ Delete frames in a draft version. :param version: Draft version ID :type version: str :param frames: Frame IDs to delete :type frames: Sequence[str] :param force: Ignore ongoing annotation tasks with this version as input :type force: bool """ _service = "datasets" _action = "delete_frames" _version = "2.23" _schema = { "definitions": {}, "properties": { "force": { "default": False, "description": "Ignore ongoing annotation tasks with this version as input", "type": "boolean", }, "frames": { "description": "Frame IDs to delete", "items": {"type": "string"}, "type": "array", }, "version": {"description": "Draft version ID", "type": "string"}, }, "required": ["version", "frames"], "type": "object", } def __init__(self, version, frames, force=False, **kwargs): super(DeleteFramesRequest, self).__init__(**kwargs) self.version = version self.frames = frames self.force = force @schema_property("version") def version(self): return self._property_version @version.setter def version(self, value): if value is None: self._property_version = None return self.assert_isinstance(value, "version", six.string_types) self._property_version = value @schema_property("frames") def frames(self): return self._property_frames @frames.setter def frames(self, value): if value is None: self._property_frames = None return self.assert_isinstance(value, "frames", (list, tuple)) self.assert_isinstance(value, "frames", six.string_types, is_array=True) self._property_frames = value @schema_property("force") def force(self): return self._property_force @force.setter def force(self, value): if value is None: self._property_force = None return self.assert_isinstance(value, "force", (bool,)) self._property_force = value
DeleteFramesRequest
python
psf__black
src/blib2to3/pytree.py
{ "start": 7046, "end": 11332 }
class ____(Base): """Concrete implementation for interior nodes.""" fixers_applied: list[Any] | None used_names: set[str] | None def __init__( self, type: int, children: list[NL], context: Any | None = None, prefix: str | None = None, fixers_applied: list[Any] | None = None, ) -> None: """ Initializer. Takes a type constant (a symbol number >= 256), a sequence of child nodes, and an optional context keyword argument. As a side effect, the parent pointers of the children are updated. """ assert type >= 256, type self.type = type self.children = list(children) for ch in self.children: assert ch.parent is None, repr(ch) ch.parent = self self.invalidate_sibling_maps() if prefix is not None: self.prefix = prefix if fixers_applied: self.fixers_applied = fixers_applied[:] else: self.fixers_applied = None def __repr__(self) -> str: """Return a canonical string representation.""" assert self.type is not None return f"{self.__class__.__name__}({type_repr(self.type)}, {self.children!r})" def __str__(self) -> str: """ Return a pretty string representation. This reproduces the input source exactly. """ return "".join(map(str, self.children)) def _eq(self, other: Base) -> bool: """Compare two nodes for equality.""" return (self.type, self.children) == (other.type, other.children) def clone(self) -> "Node": assert self.type is not None """Return a cloned (deep) copy of self.""" return Node( self.type, [ch.clone() for ch in self.children], fixers_applied=self.fixers_applied, ) def post_order(self) -> Iterator[NL]: """Return a post-order iterator for the tree.""" for child in self.children: yield from child.post_order() yield self def pre_order(self) -> Iterator[NL]: """Return a pre-order iterator for the tree.""" yield self for child in self.children: yield from child.pre_order() @property def prefix(self) -> str: """ The whitespace and comments preceding this node in the input. """ if not self.children: return "" return self.children[0].prefix @prefix.setter def prefix(self, prefix: str) -> None: if self.children: self.children[0].prefix = prefix def set_child(self, i: int, child: NL) -> None: """ Equivalent to 'node.children[i] = child'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children[i].parent = None self.children[i] = child self.changed() self.invalidate_sibling_maps() def insert_child(self, i: int, child: NL) -> None: """ Equivalent to 'node.children.insert(i, child)'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children.insert(i, child) self.changed() self.invalidate_sibling_maps() def append_child(self, child: NL) -> None: """ Equivalent to 'node.children.append(child)'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children.append(child) self.changed() self.invalidate_sibling_maps() def invalidate_sibling_maps(self) -> None: self.prev_sibling_map: dict[int, NL | None] | None = None self.next_sibling_map: dict[int, NL | None] | None = None def update_sibling_maps(self) -> None: _prev: dict[int, NL | None] = {} _next: dict[int, NL | None] = {} self.prev_sibling_map = _prev self.next_sibling_map = _next previous: NL | None = None for current in self.children: _prev[id(current)] = previous _next[id(previous)] = current previous = current _next[id(current)] = None
Node
python
django-haystack__django-haystack
haystack/management/commands/rebuild_index.py
{ "start": 149, "end": 2177 }
class ____(BaseCommand): help = "Completely rebuilds the search index by removing the old data and then updating." # noqa A003 def add_arguments(self, parser): parser.add_argument( "--noinput", action="store_false", dest="interactive", default=True, help="If provided, no prompts will be issued to the user and the data will be wiped out.", ) parser.add_argument( "-u", "--using", action="append", default=[], help="Update only the named backend (can be used multiple times). " "By default all backends will be updated.", ) parser.add_argument( "-k", "--workers", default=0, type=int, help="Allows for the use multiple workers to parallelize indexing. Requires multiprocessing.", ) parser.add_argument( "--nocommit", action="store_false", dest="commit", default=True, help="Will pass commit=False to the backend.", ) parser.add_argument( "-b", "--batch-size", dest="batchsize", type=int, help="Number of items to index at once.", ) parser.add_argument( "-t", "--max-retries", action="store", dest="max_retries", type=int, default=DEFAULT_MAX_RETRIES, help="Maximum number of attempts to write to the backend when an error occurs.", ) def handle(self, **options): clear_options = options.copy() update_options = options.copy() for key in ("batchsize", "workers", "max_retries"): del clear_options[key] for key in ("interactive",): del update_options[key] call_command("clear_index", **clear_options) call_command("update_index", **update_options)
Command
python
streamlit__streamlit
lib/streamlit/elements/plotly_chart.py
{ "start": 11882, "end": 28104 }
class ____: @overload def plotly_chart( self, figure_or_data: FigureOrData, use_container_width: bool | None = None, *, width: Width = "stretch", height: Height = "content", theme: Literal["streamlit"] | None = "streamlit", key: Key | None = None, on_select: Literal["ignore"], # No default value here to make it work with mypy selection_mode: SelectionMode | Iterable[SelectionMode] = ( "points", "box", "lasso", ), **kwargs: Any, ) -> DeltaGenerator: ... @overload def plotly_chart( self, figure_or_data: FigureOrData, use_container_width: bool | None = None, *, width: Width = "stretch", height: Height = "content", theme: Literal["streamlit"] | None = "streamlit", key: Key | None = None, on_select: Literal["rerun"] | WidgetCallback = "rerun", selection_mode: SelectionMode | Iterable[SelectionMode] = ( "points", "box", "lasso", ), **kwargs: Any, ) -> PlotlyState: ... @gather_metrics("plotly_chart") def plotly_chart( self, figure_or_data: FigureOrData, use_container_width: bool | None = None, *, width: Width = "stretch", height: Height = "content", theme: Literal["streamlit"] | None = "streamlit", key: Key | None = None, on_select: Literal["rerun", "ignore"] | WidgetCallback = "ignore", selection_mode: SelectionMode | Iterable[SelectionMode] = ( "points", "box", "lasso", ), config: dict[str, Any] | None = None, **kwargs: Any, ) -> DeltaGenerator | PlotlyState: """Display an interactive Plotly chart. `Plotly <https://plot.ly/python>`_ is a charting library for Python. The arguments to this function closely follow the ones for Plotly's ``plot()`` function. To show Plotly charts in Streamlit, call ``st.plotly_chart`` wherever you would call Plotly's ``py.plot`` or ``py.iplot``. .. Important:: You must install ``plotly>=4.0.0`` to use this command. Your app's performance may be enhanced by installing ``orjson`` as well. You can install all charting dependencies (except Bokeh) as an extra with Streamlit: .. code-block:: shell pip install streamlit[charts] Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data,\ or dict/list of plotly.graph_objs.Figure/Data The Plotly ``Figure`` or ``Data`` object to render. See https://plot.ly/python/ for examples of graph descriptions. .. note:: If your chart contains more than 1000 data points, Plotly will use a WebGL renderer to display the chart. Different browsers have different limits on the number of WebGL contexts per page. If you have multiple WebGL contexts on a page, you may need to switch to SVG rendering mode. You can do this by setting ``render_mode="svg"`` within the figure. For example, the following code defines a Plotly Express line chart that will render in SVG mode when passed to ``st.plotly_chart``: ``px.line(df, x="x", y="y", render_mode="svg")``. width : "stretch", "content", or int The width of the chart element. This can be one of the following: - ``"stretch"`` (default): The width of the element matches the width of the parent container. - ``"content"``: The width of the element matches the width of its content, but doesn't exceed the width of the parent container. - An integer specifying the width in pixels: The element has a fixed width. If the specified width is greater than the width of the parent container, the width of the element matches the width of the parent container. height : "stretch", "content", or int How to size the chart's height. Can be one of: - ``"content"`` (default): Size the chart to fit its contents. - ``"stretch"``: Expand to the height of the parent container. - An integer: Set the chart height to this many pixels. use_container_width : bool or None Whether to override the figure's native width with the width of the parent container. This can be one of the following: - ``None`` (default): Streamlit will use the value of ``width``. - ``True``: Streamlit sets the width of the figure to match the width of the parent container. - ``False``: Streamlit sets the width of the figure to fit its contents according to the plotting library, up to the width of the parent container. .. deprecated:: ``use_container_width`` is deprecated and will be removed in a future release. For ``use_container_width=True``, use ``width="stretch"``. theme : "streamlit" or None The theme of the chart. If ``theme`` is ``"streamlit"`` (default), Streamlit uses its own design default. If ``theme`` is ``None``, Streamlit falls back to the default behavior of the library. The ``"streamlit"`` theme can be partially customized through the configuration options ``theme.chartCategoricalColors`` and ``theme.chartSequentialColors``. Font configuration options are also applied. key : str An optional string to use for giving this element a stable identity. If ``key`` is ``None`` (default), this element's identity will be determined based on the values of the other parameters. Additionally, if selections are activated and ``key`` is provided, Streamlit will register the key in Session State to store the selection state. The selection state is read-only. on_select : "ignore" or "rerun" or callable How the figure should respond to user selection events. This controls whether or not the figure behaves like an input widget. ``on_select`` can be one of the following: - ``"ignore"`` (default): Streamlit will not react to any selection events in the chart. The figure will not behave like an input widget. - ``"rerun"``: Streamlit will rerun the app when the user selects data in the chart. In this case, ``st.plotly_chart`` will return the selection data as a dictionary. - A ``callable``: Streamlit will rerun the app and execute the ``callable`` as a callback function before the rest of the app. In this case, ``st.plotly_chart`` will return the selection data as a dictionary. selection_mode : "points", "box", "lasso" or an Iterable of these The selection mode of the chart. This can be one of the following: - ``"points"``: The chart will allow selections based on individual data points. - ``"box"``: The chart will allow selections based on rectangular areas. - ``"lasso"``: The chart will allow selections based on freeform areas. - An ``Iterable`` of the above options: The chart will allow selections based on the modes specified. All selections modes are activated by default. config : dict or None A dictionary of Plotly configuration options. This is passed to Plotly's ``show()`` function. For more information about Plotly configuration options, see Plotly's documentation on `Configuration in Python <https://plotly.com/python/configuration-options/>`_. **kwargs Additional arguments accepted by Plotly's ``plot()`` function. This supports ``config``, a dictionary of Plotly configuration options. For more information about Plotly configuration options, see Plotly's documentation on `Configuration in Python <https://plotly.com/python/configuration-options/>`_. .. deprecated:: ``**kwargs`` are deprecated and will be removed in a future release. Use ``config`` instead. Returns ------- element or dict If ``on_select`` is ``"ignore"`` (default), this command returns an internal placeholder for the chart element. Otherwise, this command returns a dictionary-like object that supports both key and attribute notation. The attributes are described by the ``PlotlyState`` dictionary schema. Examples -------- **Example 1: Basic Plotly chart** The example below comes from the examples at https://plot.ly/python. Note that ``plotly.figure_factory`` requires ``scipy`` to run. >>> import plotly.figure_factory as ff >>> import streamlit as st >>> from numpy.random import default_rng as rng >>> >>> hist_data = [ ... rng(0).standard_normal(200) - 2, ... rng(1).standard_normal(200), ... rng(2).standard_normal(200) + 2, ... ] >>> group_labels = ["Group 1", "Group 2", "Group 3"] >>> >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[0.1, 0.25, 0.5] ... ) >>> >>> st.plotly_chart(fig) .. output:: https://doc-plotly-chart.streamlit.app/ height: 550px **Example 2: Plotly Chart with configuration** By default, Plotly charts have scroll zoom enabled. If you have a longer page and want to avoid conflicts between page scrolling and zooming, you can use Plotly's configuration options to disable scroll zoom. In the following example, scroll zoom is disabled, but the zoom buttons are still enabled in the modebar. >>> import plotly.graph_objects as go >>> import streamlit as st >>> >>> fig = go.Figure() >>> fig.add_trace( ... go.Scatter( ... x=[1, 2, 3, 4, 5], ... y=[1, 3, 2, 5, 4] ... ) ... ) >>> >>> st.plotly_chart(fig, config = {'scrollZoom': False}) .. output:: https://doc-plotly-chart-config.streamlit.app/ height: 550px """ if use_container_width is not None: show_deprecation_warning( make_deprecated_name_warning( "use_container_width", "width", "2025-12-31", "For `use_container_width=True`, use `width='stretch'`. " "For `use_container_width=False`, use `width='content'`.", include_st_prefix=False, ), show_in_browser=False, ) if use_container_width: width = "stretch" elif not isinstance(width, int): width = "content" validate_width(width, allow_content=True) validate_height(height, allow_content=True) import plotly.io import plotly.tools # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. if kwargs: show_deprecation_warning( "Variable keyword arguments for `st.plotly_chart` have been " "deprecated and will be removed in a future release. Use the " "`config` argument instead to specify Plotly configuration " "options." ) if theme not in ["streamlit", None]: raise StreamlitAPIException( f'You set theme="{theme}" while Streamlit charts only support ' "theme=”streamlit” or theme=None to fallback to the default " "library theme." ) if on_select not in ["ignore", "rerun"] and not callable(on_select): raise StreamlitAPIException( f"You have passed {on_select} to `on_select`. But only 'ignore', " "'rerun', or a callable is supported." ) key = to_key(key) is_selection_activated = on_select != "ignore" if is_selection_activated: # Run some checks that are only relevant when selections are activated is_callback = callable(on_select) check_widget_policies( self.dg, key, on_change=cast("WidgetCallback", on_select) if is_callback else None, default_value=None, writes_allowed=False, enable_check_callback_rules=is_callback, ) if type_util.is_type(figure_or_data, "matplotlib.figure.Figure"): # Convert matplotlib figure to plotly figure: figure = plotly.tools.mpl_to_plotly(figure_or_data) else: figure = plotly.tools.return_figure_from_figure_or_data( figure_or_data, validate_figure=True ) plotly_chart_proto = PlotlyChartProto() plotly_chart_proto.theme = theme or "" plotly_chart_proto.form_id = current_form_id(self.dg) config = config or {} plotly_chart_proto.spec = plotly.io.to_json(figure, validate=False) plotly_chart_proto.config = json.dumps(config) ctx = get_script_run_ctx() # We are computing the widget id for all plotly uses # to also allow non-widget Plotly charts to keep their state # when the frontend component gets unmounted and remounted. plotly_chart_proto.id = compute_and_register_element_id( "plotly_chart", user_key=key, key_as_main_identity=False, dg=self.dg, plotly_spec=plotly_chart_proto.spec, plotly_config=plotly_chart_proto.config, selection_mode=selection_mode, is_selection_activated=is_selection_activated, theme=theme, width=width, height=height, ) # Handle "content" width and height by inspecting the figure's natural dimensions final_width = _resolve_content_width(width, figure) final_height = _resolve_content_height(height, figure) if is_selection_activated: # Selections are activated, treat plotly chart as a widget: plotly_chart_proto.selection_mode.extend( parse_selection_mode(selection_mode) ) serde = PlotlyChartSelectionSerde() widget_state = register_widget( plotly_chart_proto.id, on_change_handler=on_select if callable(on_select) else None, deserializer=serde.deserialize, serializer=serde.serialize, ctx=ctx, value_type="string_value", ) layout_config = LayoutConfig(width=final_width, height=final_height) self.dg._enqueue( "plotly_chart", plotly_chart_proto, layout_config=layout_config ) return widget_state.value layout_config = LayoutConfig(width=final_width, height=final_height) return self.dg._enqueue( "plotly_chart", plotly_chart_proto, layout_config=layout_config ) @property def dg(self) -> DeltaGenerator: """Get our DeltaGenerator.""" return cast("DeltaGenerator", self)
PlotlyMixin
python
getsentry__sentry
src/sentry/plugins/base/manager.py
{ "start": 389, "end": 2893 }
class ____(InstanceManager): def __iter__(self) -> Iterator[Plugin | Plugin2]: return iter(self.all()) def __len__(self) -> int: return sum(1 for i in self.all()) @overload def all(self) -> Generator[Plugin]: ... @overload def all(self, *, version: Literal[2]) -> Generator[Plugin2]: ... @overload def all(self, *, version: None) -> Generator[Plugin | Plugin2]: ... def all(self, version: int | None = 1) -> Generator[Plugin | Plugin2]: for plugin in sorted(super().all(), key=lambda x: x.get_title()): if not plugin.is_enabled(): continue if version is not None and plugin.__version__ != version: continue yield plugin def plugin_that_can_be_configured(self) -> Generator[Plugin | Plugin2]: for plugin in self.all(version=None): if plugin.has_project_conf(): yield plugin def configurable_for_project(self, project, version=1): for plugin in self.all(version=version): if not safe_execute(plugin.can_configure_for_project, project): continue yield plugin def exists(self, slug: str) -> bool: for plugin in self.all(version=None): if plugin.slug == slug: return True return False def for_project(self, project, version=1) -> Generator[Plugin | Plugin2]: for plugin in self.all(version=version): if not safe_execute(plugin.is_enabled, project): continue yield plugin def get(self, slug): for plugin in self.all(version=None): if plugin.slug == slug: return plugin raise KeyError(slug) def first(self, func_name, *args, **kwargs): version = kwargs.pop("version", 1) for plugin in self.all(version=version): try: result = getattr(plugin, func_name)(*args, **kwargs) except Exception as e: logger = logging.getLogger(f"sentry.plugins.{type(plugin).slug}") logger.exception("%s.process_error", func_name, extra={"exception": e}) continue if result is not None: return result def register(self, cls): self.add(f"{cls.__module__}.{cls.__name__}") return cls def unregister(self, cls): self.remove(f"{cls.__module__}.{cls.__name__}") return cls
PluginManager
python
pydata__xarray
xarray/tests/test_plot.py
{ "start": 48002, "end": 65735 }
class ____: """ Common tests for 2d plotting go here. These tests assume that a staticmethod for `self.plotfunc` exists. Should have the same name as the method. """ darray: DataArray plotfunc: staticmethod pass_in_axis: Callable # Needs to be overridden in TestSurface for facet grid plots subplot_kws: dict[Any, Any] | None = None @pytest.fixture(autouse=True) def setUp(self) -> None: da = DataArray( easy_array((10, 15), start=-1), dims=["y", "x"], coords={"y": np.arange(10), "x": np.arange(15)}, ) # add 2d coords ds = da.to_dataset(name="testvar") x, y = np.meshgrid(da.x.values, da.y.values) ds["x2d"] = DataArray(x, dims=["y", "x"]) ds["y2d"] = DataArray(y, dims=["y", "x"]) ds = ds.set_coords(["x2d", "y2d"]) # set darray and plot method self.darray: DataArray = ds.testvar # Add CF-compliant metadata self.darray.attrs["long_name"] = "a_long_name" self.darray.attrs["units"] = "a_units" self.darray.x.attrs["long_name"] = "x_long_name" self.darray.x.attrs["units"] = "x_units" self.darray.y.attrs["long_name"] = "y_long_name" self.darray.y.attrs["units"] = "y_units" self.plotmethod = getattr(self.darray.plot, self.plotfunc.__name__) def test_label_names(self) -> None: self.plotmethod() assert "x_long_name [x_units]" == plt.gca().get_xlabel() assert "y_long_name [y_units]" == plt.gca().get_ylabel() def test_1d_raises_valueerror(self) -> None: with pytest.raises(ValueError, match=r"DataArray must be 2d"): self.plotfunc(self.darray[0, :]) def test_bool(self) -> None: xr.ones_like(self.darray, dtype=bool).plot() # type: ignore[call-arg] def test_complex_raises_typeerror(self) -> None: with pytest.raises(TypeError, match=r"complex128"): (self.darray + 1j).plot() # type: ignore[call-arg] def test_3d_raises_valueerror(self) -> None: a = DataArray(easy_array((2, 3, 4))) if self.plotfunc.__name__ == "imshow": pytest.skip() with pytest.raises(ValueError, match=r"DataArray must be 2d"): self.plotfunc(a) def test_nonnumeric_index(self) -> None: a = DataArray(easy_array((3, 2)), coords=[["a", "b", "c"], ["d", "e"]]) if self.plotfunc.__name__ == "surface": # ax.plot_surface errors with nonnumerics: with pytest.raises(TypeError, match="not supported for the input types"): self.plotfunc(a) else: self.plotfunc(a) def test_multiindex_raises_typeerror(self) -> None: a = DataArray( easy_array((3, 2)), dims=("x", "y"), coords=dict(x=("x", [0, 1, 2]), a=("y", [0, 1]), b=("y", [2, 3])), ) a = a.set_index(y=("a", "b")) with pytest.raises(TypeError, match=r"[Pp]lot"): self.plotfunc(a) def test_can_pass_in_axis(self) -> None: self.pass_in_axis(self.plotmethod) def test_xyincrease_defaults(self) -> None: # With default settings the axis must be ordered regardless # of the coords order. self.plotfunc(DataArray(easy_array((3, 2)), coords=[[1, 2, 3], [1, 2]])) bounds = plt.gca().get_ylim() assert bounds[0] < bounds[1] bounds = plt.gca().get_xlim() assert bounds[0] < bounds[1] # Inverted coords self.plotfunc(DataArray(easy_array((3, 2)), coords=[[3, 2, 1], [2, 1]])) bounds = plt.gca().get_ylim() assert bounds[0] < bounds[1] bounds = plt.gca().get_xlim() assert bounds[0] < bounds[1] def test_xyincrease_false_changes_axes(self) -> None: self.plotmethod(xincrease=False, yincrease=False) xlim = plt.gca().get_xlim() ylim = plt.gca().get_ylim() diffs = xlim[0] - 14, xlim[1] - 0, ylim[0] - 9, ylim[1] - 0 assert all(abs(x) < 1 for x in diffs) def test_xyincrease_true_changes_axes(self) -> None: self.plotmethod(xincrease=True, yincrease=True) xlim = plt.gca().get_xlim() ylim = plt.gca().get_ylim() diffs = xlim[0] - 0, xlim[1] - 14, ylim[0] - 0, ylim[1] - 9 assert all(abs(x) < 1 for x in diffs) def test_dates_are_concise(self) -> None: import matplotlib.dates as mdates time = pd.date_range("2000-01-01", "2000-01-10") a = DataArray(np.random.randn(2, len(time)), [("xx", [1, 2]), ("t", time)]) self.plotfunc(a, x="t") ax = plt.gca() assert isinstance(ax.xaxis.get_major_locator(), mdates.AutoDateLocator) assert isinstance(ax.xaxis.get_major_formatter(), mdates.ConciseDateFormatter) def test_plot_nans(self) -> None: x1 = self.darray[:5] x2 = self.darray.copy() x2[5:] = np.nan clim1 = self.plotfunc(x1).get_clim() clim2 = self.plotfunc(x2).get_clim() assert clim1 == clim2 @pytest.mark.filterwarnings("ignore::UserWarning") @pytest.mark.filterwarnings("ignore:invalid value encountered") def test_can_plot_all_nans(self) -> None: # regression test for issue #1780 self.plotfunc(DataArray(np.full((2, 2), np.nan))) @pytest.mark.filterwarnings("ignore: Attempting to set") def test_can_plot_axis_size_one(self) -> None: if self.plotfunc.__name__ not in ("contour", "contourf"): self.plotfunc(DataArray(np.ones((1, 1)))) def test_disallows_rgb_arg(self) -> None: with pytest.raises(ValueError): # Always invalid for most plots. Invalid for imshow with 2D data. self.plotfunc(DataArray(np.ones((2, 2))), rgb="not None") def test_viridis_cmap(self) -> None: cmap_name = self.plotmethod(cmap="viridis").get_cmap().name assert "viridis" == cmap_name def test_default_cmap(self) -> None: cmap_name = self.plotmethod().get_cmap().name assert "RdBu_r" == cmap_name cmap_name = self.plotfunc(abs(self.darray)).get_cmap().name assert "viridis" == cmap_name @requires_seaborn def test_seaborn_palette_as_cmap(self) -> None: cmap_name = self.plotmethod(levels=2, cmap="husl").get_cmap().name assert "husl" == cmap_name def test_can_change_default_cmap(self) -> None: cmap_name = self.plotmethod(cmap="Blues").get_cmap().name assert "Blues" == cmap_name def test_diverging_color_limits(self) -> None: artist = self.plotmethod() vmin, vmax = artist.get_clim() assert round(abs(-vmin - vmax), 7) == 0 def test_xy_strings(self) -> None: self.plotmethod(x="y", y="x") ax = plt.gca() assert "y_long_name [y_units]" == ax.get_xlabel() assert "x_long_name [x_units]" == ax.get_ylabel() def test_positional_coord_string(self) -> None: self.plotmethod(y="x") ax = plt.gca() assert "x_long_name [x_units]" == ax.get_ylabel() assert "y_long_name [y_units]" == ax.get_xlabel() self.plotmethod(x="x") ax = plt.gca() assert "x_long_name [x_units]" == ax.get_xlabel() assert "y_long_name [y_units]" == ax.get_ylabel() def test_bad_x_string_exception(self) -> None: with pytest.raises(ValueError, match=r"x and y cannot be equal."): self.plotmethod(x="y", y="y") error_msg = "must be one of None, 'x', 'x2d', 'y', 'y2d'" with pytest.raises(ValueError, match=rf"x {error_msg}"): self.plotmethod(x="not_a_real_dim", y="y") with pytest.raises(ValueError, match=rf"x {error_msg}"): self.plotmethod(x="not_a_real_dim") with pytest.raises(ValueError, match=rf"y {error_msg}"): self.plotmethod(y="not_a_real_dim") self.darray.coords["z"] = 100 def test_coord_strings(self) -> None: # 1d coords (same as dims) assert {"x", "y"} == set(self.darray.dims) self.plotmethod(y="y", x="x") def test_non_linked_coords(self) -> None: # plot with coordinate names that are not dimensions newy = self.darray.y + 150 newy.attrs = {} # Clear attrs since binary ops keep them by default self.darray.coords["newy"] = newy # Normal case, without transpose self.plotfunc(self.darray, x="x", y="newy") ax = plt.gca() assert "x_long_name [x_units]" == ax.get_xlabel() assert "newy" == ax.get_ylabel() # ax limits might change between plotfuncs # simply ensure that these high coords were passed over assert np.min(ax.get_ylim()) > 100.0 def test_non_linked_coords_transpose(self) -> None: # plot with coordinate names that are not dimensions, # and with transposed y and x axes # This used to raise an error with pcolormesh and contour # https://github.com/pydata/xarray/issues/788 newy = self.darray.y + 150 newy.attrs = {} # Clear attrs since binary ops keep them by default self.darray.coords["newy"] = newy self.plotfunc(self.darray, x="newy", y="x") ax = plt.gca() assert "newy" == ax.get_xlabel() assert "x_long_name [x_units]" == ax.get_ylabel() # ax limits might change between plotfuncs # simply ensure that these high coords were passed over assert np.min(ax.get_xlim()) > 100.0 def test_multiindex_level_as_coord(self) -> None: da = DataArray( easy_array((3, 2)), dims=("x", "y"), coords=dict(x=("x", [0, 1, 2]), a=("y", [0, 1]), b=("y", [2, 3])), ) da = da.set_index(y=["a", "b"]) for x, y in (("a", "x"), ("b", "x"), ("x", "a"), ("x", "b")): self.plotfunc(da, x=x, y=y) ax = plt.gca() assert x == ax.get_xlabel() assert y == ax.get_ylabel() with pytest.raises(ValueError, match=r"levels of the same MultiIndex"): self.plotfunc(da, x="a", y="b") with pytest.raises(ValueError, match=r"y must be one of None, 'a', 'b', 'x'"): self.plotfunc(da, x="a", y="y") def test_default_title(self) -> None: a = DataArray(easy_array((4, 3, 2)), dims=["a", "b", "c"]) a.coords["c"] = [0, 1] a.coords["d"] = "foo" self.plotfunc(a.isel(c=1)) title = plt.gca().get_title() assert title in {"c = 1, d = foo", "d = foo, c = 1"} def test_colorbar_default_label(self) -> None: self.plotmethod(add_colorbar=True) assert "a_long_name [a_units]" in text_in_fig() def test_no_labels(self) -> None: self.darray.name = "testvar" self.darray.attrs["units"] = "test_units" self.plotmethod(add_labels=False) alltxt = text_in_fig() for string in [ "x_long_name [x_units]", "y_long_name [y_units]", "testvar [test_units]", ]: assert string not in alltxt def test_colorbar_kwargs(self) -> None: # replace label self.darray.attrs.pop("long_name") self.darray.attrs["units"] = "test_units" # check default colorbar label self.plotmethod(add_colorbar=True) alltxt = text_in_fig() assert "testvar [test_units]" in alltxt self.darray.attrs.pop("units") self.darray.name = "testvar" self.plotmethod(add_colorbar=True, cbar_kwargs={"label": "MyLabel"}) alltxt = text_in_fig() assert "MyLabel" in alltxt assert "testvar" not in alltxt # you can use anything accepted by the dict constructor as well self.plotmethod(add_colorbar=True, cbar_kwargs=(("label", "MyLabel"),)) alltxt = text_in_fig() assert "MyLabel" in alltxt assert "testvar" not in alltxt # change cbar ax _fig, axs = plt.subplots(1, 2, squeeze=False) ax = axs[0, 0] cax = axs[0, 1] self.plotmethod( ax=ax, cbar_ax=cax, add_colorbar=True, cbar_kwargs={"label": "MyBar"} ) assert ax.has_data() assert cax.has_data() alltxt = text_in_fig() assert "MyBar" in alltxt assert "testvar" not in alltxt # note that there are two ways to achieve this _fig, axs = plt.subplots(1, 2, squeeze=False) ax = axs[0, 0] cax = axs[0, 1] self.plotmethod( ax=ax, add_colorbar=True, cbar_kwargs={"label": "MyBar", "cax": cax} ) assert ax.has_data() assert cax.has_data() alltxt = text_in_fig() assert "MyBar" in alltxt assert "testvar" not in alltxt # see that no colorbar is respected self.plotmethod(add_colorbar=False) assert "testvar" not in text_in_fig() # check that error is raised pytest.raises( ValueError, self.plotmethod, add_colorbar=False, cbar_kwargs={"label": "label"}, ) def test_verbose_facetgrid(self) -> None: a = easy_array((10, 15, 3)) d = DataArray(a, dims=["y", "x", "z"]) g = xplt.FacetGrid(d, col="z", subplot_kws=self.subplot_kws) g.map_dataarray(self.plotfunc, "x", "y") for ax in g.axs.flat: assert ax.has_data() def test_2d_function_and_method_signature_same(self) -> None: func_sig = inspect.signature(self.plotfunc) method_sig = inspect.signature(self.plotmethod) for argname, param in method_sig.parameters.items(): assert func_sig.parameters[argname] == param @pytest.mark.filterwarnings("ignore:tight_layout cannot") def test_convenient_facetgrid(self) -> None: a = easy_array((10, 15, 4)) d = DataArray(a, dims=["y", "x", "z"]) g = self.plotfunc(d, x="x", y="y", col="z", col_wrap=2) assert_array_equal(g.axs.shape, [2, 2]) for (y, x), ax in np.ndenumerate(g.axs): assert ax.has_data() if x == 0: assert "y" == ax.get_ylabel() else: assert "" == ax.get_ylabel() if y == 1: assert "x" == ax.get_xlabel() else: assert "" == ax.get_xlabel() # Inferring labels g = self.plotfunc(d, col="z", col_wrap=2) assert_array_equal(g.axs.shape, [2, 2]) for (y, x), ax in np.ndenumerate(g.axs): assert ax.has_data() if x == 0: assert "y" == ax.get_ylabel() else: assert "" == ax.get_ylabel() if y == 1: assert "x" == ax.get_xlabel() else: assert "" == ax.get_xlabel() @pytest.mark.filterwarnings("ignore:tight_layout cannot") def test_convenient_facetgrid_4d(self) -> None: a = easy_array((10, 15, 2, 3)) d = DataArray(a, dims=["y", "x", "columns", "rows"]) g = self.plotfunc(d, x="x", y="y", col="columns", row="rows") assert_array_equal(g.axs.shape, [3, 2]) for ax in g.axs.flat: assert ax.has_data() @pytest.mark.filterwarnings("ignore:This figure includes") def test_facetgrid_map_only_appends_mappables(self) -> None: a = easy_array((10, 15, 2, 3)) d = DataArray(a, dims=["y", "x", "columns", "rows"]) g = self.plotfunc(d, x="x", y="y", col="columns", row="rows") expected = g._mappables g.map(lambda: plt.plot(1, 1)) actual = g._mappables assert expected == actual def test_facetgrid_cmap(self) -> None: # Regression test for GH592 data = np.random.random(size=(20, 25, 12)) + np.linspace(-3, 3, 12) d = DataArray(data, dims=["x", "y", "time"]) fg = d.plot.pcolormesh(col="time") # check that all color limits are the same assert len({m.get_clim() for m in fg._mappables}) == 1 # check that all colormaps are the same assert len({m.get_cmap().name for m in fg._mappables}) == 1 def test_facetgrid_cbar_kwargs(self) -> None: a = easy_array((10, 15, 2, 3)) d = DataArray(a, dims=["y", "x", "columns", "rows"]) g = self.plotfunc( d, x="x", y="y", col="columns", row="rows", cbar_kwargs={"label": "test_label"}, ) # catch contour case if g.cbar is not None: assert get_colorbar_label(g.cbar) == "test_label" def test_facetgrid_no_cbar_ax(self) -> None: a = easy_array((10, 15, 2, 3)) d = DataArray(a, dims=["y", "x", "columns", "rows"]) with pytest.raises(ValueError): self.plotfunc(d, x="x", y="y", col="columns", row="rows", cbar_ax=1) def test_cmap_and_color_both(self) -> None: with pytest.raises(ValueError): self.plotmethod(colors="k", cmap="RdBu") def test_2d_coord_with_interval(self) -> None: for dim in self.darray.dims: gp = self.darray.groupby_bins(dim, range(15), restore_coord_dims=True).mean( [dim] ) for kind in ["imshow", "pcolormesh", "contourf", "contour"]: getattr(gp.plot, kind)() def test_colormap_error_norm_and_vmin_vmax(self) -> None: norm = mpl.colors.LogNorm(0.1, 1e1) with pytest.raises(ValueError): self.darray.plot(norm=norm, vmin=2) # type: ignore[call-arg] with pytest.raises(ValueError): self.darray.plot(norm=norm, vmax=2) # type: ignore[call-arg] @pytest.mark.slow
Common2dMixin
python
keras-team__keras
keras/src/layers/reshaping/reshape.py
{ "start": 257, "end": 2650 }
class ____(Layer): """Layer that reshapes inputs into the given shape. Args: target_shape: Target shape. Tuple of integers, does not include the samples dimension (batch size). One element of the `target_shape` can be -1 in which case the missing value is inferred from the size of the array and remaining dimensions. Input shape: Arbitrary, but required to be compatible with `target_shape`. Output shape: `(batch_size, *target_shape)` Example: >>> x = keras.Input(shape=(12,)) >>> y = keras.layers.Reshape((3, 4))(x) >>> y.shape (None, 3, 4) >>> # another example with shape inference using `-1` as dimension >>> y = keras.layers.Reshape((-1, 2, 2))(x) >>> y.shape (None, 3, 2, 2) """ def __init__(self, target_shape, **kwargs): super().__init__(**kwargs) target_shape = tuple(target_shape) # test validity of target_shape if target_shape.count(-1) > 1: raise ValueError( "The `target_shape` argument must not contain more than one " f"`-1` value. Received: target_shape={target_shape}" ) self.target_shape = target_shape self.built = True def compute_output_shape(self, input_shape): return ( input_shape[0], *operation_utils.compute_reshape_output_shape( input_shape[1:], self.target_shape, "target_shape" ), ) def compute_output_spec(self, inputs): output_shape = self.compute_output_shape(inputs.shape) return KerasTensor( shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse ) def call(self, inputs): potentially_resolved_target_shape = ( operation_utils.compute_reshape_output_shape( tuple(inputs.shape)[1:], self.target_shape, "target_shape" ) ) potentially_resolved_target_shape = tuple( -1 if d is None else d for d in potentially_resolved_target_shape ) return ops.reshape( inputs, (ops.shape(inputs)[0],) + potentially_resolved_target_shape ) def get_config(self): config = {"target_shape": self.target_shape} base_config = super().get_config() return {**base_config, **config}
Reshape
python
PyCQA__pylint
tests/functional/a/arguments_differ.py
{ "start": 5384, "end": 5969 }
class ____: def kwonly_1(self, first, *, second, third): "Normal positional with two positional only params." def kwonly_2(self, *, first, second): "Two positional only parameter." def kwonly_3(self, *, first, second): "Two positional only params." def kwonly_4(self, *, first, second=None): "One positional only and another with a default." def kwonly_5(self, *, first, **kwargs): "Keyword only and keyword variadics." def kwonly_6(self, first, second, *, third): "Two positional and one keyword"
AbstractFoo
python
pandas-dev__pandas
pandas/io/formats/info.py
{ "start": 10287, "end": 12535 }
class ____(ABC): """ Base class for DataFrameInfo and SeriesInfo. Parameters ---------- data : DataFrame or Series Either dataframe or series. memory_usage : bool or str, optional If "deep", introspect the data deeply by interrogating object dtypes for system-level memory consumption, and include it in the returned values. """ data: DataFrame | Series memory_usage: bool | str @property @abstractmethod def dtypes(self) -> Iterable[Dtype]: """ Dtypes. Returns ------- dtypes : sequence Dtype of each of the DataFrame's columns (or one series column). """ @property @abstractmethod def dtype_counts(self) -> Mapping[str, int]: """Mapping dtype - number of counts.""" @property @abstractmethod def non_null_counts(self) -> list[int] | Series: """Sequence of non-null counts for all columns or column (if series).""" @property @abstractmethod def memory_usage_bytes(self) -> int: """ Memory usage in bytes. Returns ------- memory_usage_bytes : int Object's total memory usage in bytes. """ @property def memory_usage_string(self) -> str: """Memory usage in a form of human readable string.""" return f"{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\n" @property def size_qualifier(self) -> str: size_qualifier = "" if self.memory_usage: if self.memory_usage != "deep": # size_qualifier is just a best effort; not guaranteed to catch # all cases (e.g., it misses categorical data even with object # categories) if ( "object" in self.dtype_counts or self.data.index._is_memory_usage_qualified ): size_qualifier = "+" return size_qualifier @abstractmethod def render( self, *, buf: WriteBuffer[str] | None, max_cols: int | None, verbose: bool | None, show_counts: bool | None, ) -> None: pass
_BaseInfo
python
getsentry__sentry
src/sentry/dashboards/endpoints/organization_dashboards.py
{ "start": 2249, "end": 4813 }
class ____(TypedDict): prebuilt_id: PrebuiltDashboardId title: str # Prebuilt dashboards store minimal fields in the database. The actual dashboard and widget settings are # coded in the frontend and we rely on matching prebuilt_id to populate the dashboard and widget display. # Prebuilt dashboard database records are purely for tracking things like starred status, last viewed, etc. # # Note A: This is stored differently from the `default-overview` prebuilt dashboard, which we should # deprecate once this feature is released. # Note B: Consider storing all dashboard and widget data in the database instead of relying on matching # prebuilt_id on the frontend, if there are issues. PREBUILT_DASHBOARDS: list[PrebuiltDashboard] = [ { "prebuilt_id": PrebuiltDashboardId.FRONTEND_SESSION_HEALTH, "title": "Frontend Session Health", }, { "prebuilt_id": PrebuiltDashboardId.BACKEND_QUERIES, "title": "Queries Overview", }, { "prebuilt_id": PrebuiltDashboardId.BACKEND_QUERIES_SUMMARY, "title": "Query Summary", }, ] def sync_prebuilt_dashboards(organization: Organization) -> None: """ Queries the database to check if prebuilt dashboards have a Dashboard record and creates them if they don't, or deletes them if they should no longer exist. """ with transaction.atomic(router.db_for_write(Dashboard)): saved_prebuilt_dashboards = Dashboard.objects.filter( organization=organization, prebuilt_id__isnull=False, ) saved_prebuilt_dashboard_ids = set( saved_prebuilt_dashboards.values_list("prebuilt_id", flat=True) ) # Create prebuilt dashboards if they don't exist for prebuilt_dashboard in PREBUILT_DASHBOARDS: prebuilt_id: PrebuiltDashboardId = prebuilt_dashboard["prebuilt_id"] if prebuilt_id not in saved_prebuilt_dashboard_ids: # Create new dashboard Dashboard.objects.create( organization=organization, title=prebuilt_dashboard["title"], created_by_id=None, prebuilt_id=prebuilt_id, ) # Delete old prebuilt dashboards if they should no longer exist prebuilt_ids = [d["prebuilt_id"] for d in PREBUILT_DASHBOARDS] Dashboard.objects.filter( organization=organization, prebuilt_id__isnull=False, ).exclude(prebuilt_id__in=prebuilt_ids).delete()
PrebuiltDashboard
python
HypothesisWorks__hypothesis
hypothesis-python/tests/nocover/test_build_signature.py
{ "start": 2077, "end": 2787 }
class ____: __annotations__ = get_type_hints(use_annotations) __signature__ = signature(use_signature) def __init__(self, **kwargs): # Check that we're being called with the expected arguments assert set(kwargs) == {"testA", "testX", "testY"} assert isinstance(kwargs["testA"], int) assert isinstance(kwargs["testX"], float) assert isinstance(kwargs["testY"], list) assert all(isinstance(elem, str) for elem in kwargs["testY"]) @given(st.builds(ModelWithAlias)) def test_build_using_different_signature_and_annotations(val): assert isinstance(val, ModelWithAlias) def use_bad_signature(self, testA: 1, *, testX: float): pass
ModelWithAlias
python
lxml__lxml
src/lxml/tests/test_xmlschema.py
{ "start": 187, "end": 14086 }
class ____(HelperTestCase): def test_xmlschema(self): tree_valid = self.parse('<a><b></b></a>') tree_invalid = self.parse('<a><c></c></a>') schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence> <xsd:element name="b" type="xsd:string" /> </xsd:sequence> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema) self.assertTrue(schema.validate(tree_valid)) self.assertFalse(schema.validate(tree_invalid)) self.assertTrue(schema.validate(tree_valid)) # retry valid self.assertFalse(schema.validate(tree_invalid)) # retry invalid def test_xmlschema_error_log(self): tree_valid = self.parse('<a><b></b></a>') tree_invalid = self.parse('<a><c></c></a>') schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence> <xsd:element name="b" type="xsd:string" /> </xsd:sequence> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema) self.assertTrue(schema.validate(tree_valid)) self.assertFalse(schema.error_log.filter_from_errors()) self.assertFalse(schema.validate(tree_invalid)) self.assertTrue(schema.error_log.filter_from_errors()) self.assertTrue(schema.error_log.filter_types( etree.ErrorTypes.SCHEMAV_ELEMENT_CONTENT)) self.assertTrue(schema.validate(tree_valid)) self.assertFalse(schema.error_log.filter_from_errors()) self.assertFalse(schema.validate(tree_invalid)) self.assertTrue(schema.error_log.filter_from_errors()) self.assertTrue(schema.error_log.filter_types( etree.ErrorTypes.SCHEMAV_ELEMENT_CONTENT)) def test_xmlschema_error_log_path(self): """We don't have a guarantee that there will always be a path for a _LogEntry object (or even a node for which to determine a path), but at least when this test was created schema validation errors always got a node and an XPath value. If that ever changes, we can modify this test to something like:: self.assertTrue(error_path is None or tree_path == error_path) That way, we can at least verify that if we did get a path value it wasn't bogus. """ tree = self.parse('<a><b>42</b><b>dada</b></a>') schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence> <xsd:element name="b" type="xsd:integer" maxOccurs="2"/> </xsd:sequence> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema) schema.validate(tree) tree_path = tree.getpath(tree.findall('b')[1]) error_path = schema.error_log[0].path self.assertTrue(tree_path == error_path) def test_xmlschema_default_attributes(self): schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence minOccurs="4" maxOccurs="4"> <xsd:element name="b" type="BType" /> </xsd:sequence> </xsd:complexType> <xsd:complexType name="BType"> <xsd:attribute name="hardy" type="xsd:string" default="hey" /> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema, attribute_defaults=True) tree = self.parse('<a><b hardy="ho"/><b/><b hardy="ho"/><b/></a>') root = tree.getroot() self.assertEqual('ho', root[0].get('hardy')) self.assertEqual(None, root[1].get('hardy')) self.assertEqual('ho', root[2].get('hardy')) self.assertEqual(None, root[3].get('hardy')) self.assertTrue(schema(tree)) root = tree.getroot() self.assertEqual('ho', root[0].get('hardy')) self.assertEqual('hey', root[1].get('hardy')) self.assertEqual('ho', root[2].get('hardy')) self.assertEqual('hey', root[3].get('hardy')) def test_xmlschema_parse(self): schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence> <xsd:element name="b" type="xsd:string" /> </xsd:sequence> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema) parser = etree.XMLParser(schema=schema) tree_valid = self.parse('<a><b></b></a>', parser=parser) self.assertEqual('a', tree_valid.getroot().tag) self.assertRaises(etree.XMLSyntaxError, self.parse, '<a><c></c></a>', parser=parser) def test_xmlschema_parse_default_attributes(self): # does not work as of libxml2 2.7.3 schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence minOccurs="4" maxOccurs="4"> <xsd:element name="b" type="BType" /> </xsd:sequence> </xsd:complexType> <xsd:complexType name="BType"> <xsd:attribute name="hardy" type="xsd:string" default="hey" /> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema) parser = etree.XMLParser(schema=schema, attribute_defaults=True) tree_valid = self.parse('<a><b hardy="ho"/><b/><b hardy="ho"/><b/></a>', parser=parser) root = tree_valid.getroot() self.assertEqual('ho', root[0].get('hardy')) self.assertEqual('hey', root[1].get('hardy')) self.assertEqual('ho', root[2].get('hardy')) self.assertEqual('hey', root[3].get('hardy')) def test_xmlschema_parse_default_attributes_schema_config(self): # does not work as of libxml2 2.7.3 schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence minOccurs="4" maxOccurs="4"> <xsd:element name="b" type="BType" /> </xsd:sequence> </xsd:complexType> <xsd:complexType name="BType"> <xsd:attribute name="hardy" type="xsd:string" default="hey" /> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema, attribute_defaults=True) parser = etree.XMLParser(schema=schema) tree_valid = self.parse('<a><b hardy="ho"/><b/><b hardy="ho"/><b/></a>', parser=parser) root = tree_valid.getroot() self.assertEqual('ho', root[0].get('hardy')) self.assertEqual('hey', root[1].get('hardy')) self.assertEqual('ho', root[2].get('hardy')) self.assertEqual('hey', root[3].get('hardy')) def test_xmlschema_parse_fixed_attributes(self): # does not work as of libxml2 2.7.3 schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence minOccurs="3" maxOccurs="3"> <xsd:element name="b" type="BType" /> </xsd:sequence> </xsd:complexType> <xsd:complexType name="BType"> <xsd:attribute name="hardy" type="xsd:string" fixed="hey" /> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema) parser = etree.XMLParser(schema=schema, attribute_defaults=True) tree_valid = self.parse('<a><b/><b hardy="hey"/><b/></a>', parser=parser) root = tree_valid.getroot() self.assertEqual('hey', root[0].get('hardy')) self.assertEqual('hey', root[1].get('hardy')) self.assertEqual('hey', root[2].get('hardy')) def test_xmlschema_stringio(self): schema_file = BytesIO(b''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence> <xsd:element name="b" type="xsd:string" /> </xsd:sequence> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(file=schema_file) parser = etree.XMLParser(schema=schema) tree_valid = self.parse('<a><b></b></a>', parser=parser) self.assertEqual('a', tree_valid.getroot().tag) self.assertRaises(etree.XMLSyntaxError, self.parse, '<a><c></c></a>', parser=parser) def test_xmlschema_iterparse(self): schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence> <xsd:element name="b" type="xsd:string" /> </xsd:sequence> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema) xml = BytesIO(b'<a><b></b></a>') events = [ (event, el.tag) for (event, el) in etree.iterparse(xml, schema=schema) ] self.assertEqual([('end', 'b'), ('end', 'a')], events) def test_xmlschema_iterparse_incomplete(self): schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence> <xsd:element name="b" type="xsd:string" /> </xsd:sequence> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema) xml = BytesIO(b'<a><b></b></a>') event, element = next(iter(etree.iterparse(xml, schema=schema))) self.assertEqual('end', event) self.assertEqual('b', element.tag) def test_xmlschema_iterparse_fail(self): schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence> <xsd:element name="b" type="xsd:string" /> </xsd:sequence> </xsd:complexType> </xsd:schema> ''') schema = etree.XMLSchema(schema) self.assertRaises( etree.XMLSyntaxError, list, etree.iterparse(BytesIO(b'<a><c></c></a>'), schema=schema)) def test_xmlschema_elementtree_error(self): self.assertRaises(ValueError, etree.XMLSchema, etree.ElementTree()) def test_xmlschema_comment_error(self): self.assertRaises(ValueError, etree.XMLSchema, etree.Comment('TEST')) def test_xmlschema_illegal_validation_error(self): schema = self.parse(''' <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="xsd:string"/> </xsd:schema> ''') schema = etree.XMLSchema(schema) root = etree.Element('a') root.text = 'TEST' self.assertTrue(schema(root)) self.assertRaises(ValueError, schema, etree.Comment('TEST')) self.assertRaises(ValueError, schema, etree.PI('a', 'text')) self.assertRaises(ValueError, schema, etree.Entity('text')) def test_xmlschema_invalid_schema1(self): schema = self.parse('''\ <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence> <xsd:element name="b" type="xsd:string" /> </xsd:sequence> </xsd:complexType> </xsd:schema> ''') self.assertRaises(etree.XMLSchemaParseError, etree.XMLSchema, schema) def test_xmlschema_invalid_schema2(self): schema = self.parse('<test/>') self.assertRaises(etree.XMLSchemaParseError, etree.XMLSchema, schema) def test_xmlschema_file(self): # this will only work if we access the file through path or # file object.. f = open(fileInTestDir('test.xsd'), 'rb') try: schema = etree.XMLSchema(file=f) finally: f.close() tree_valid = self.parse('<a><b></b></a>') self.assertTrue(schema.validate(tree_valid)) def test_xmlschema_import_file(self): # this will only work if we access the file through path or # file object.. schema = etree.XMLSchema(file=fileInTestDir('test_import.xsd')) tree_valid = self.parse( '<a:x xmlns:a="http://codespeak.net/lxml/schema/ns1"><b></b></a:x>') self.assertTrue(schema.validate(tree_valid)) def test_xmlschema_shortcut(self): tree_valid = self.parse('<a><b></b></a>') tree_invalid = self.parse('<a><c></c></a>') schema = self.parse('''\ <xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="a" type="AType"/> <xsd:complexType name="AType"> <xsd:sequence> <xsd:element name="b" type="xsd:string" /> </xsd:sequence> </xsd:complexType> </xsd:schema> ''') self.assertTrue(tree_valid.xmlschema(schema)) self.assertFalse(tree_invalid.xmlschema(schema)) def test_create_from_partial_doc(self): # this used to crash because the schema part was not properly copied out wsdl = self.parse('''\ <wsdl:definitions xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" xmlns:xs="http://www.w3.org/2001/XMLSchema"> <wsdl:types> <xs:schema> </xs:schema> </wsdl:types> </wsdl:definitions> ''') schema_element = wsdl.find( "{http://schemas.xmlsoap.org/wsdl/}types/" "{http://www.w3.org/2001/XMLSchema}schema" ) etree.XMLSchema(schema_element) etree.XMLSchema(schema_element) etree.XMLSchema(schema_element) def test_xmlschema_pathlike(self): schema = etree.XMLSchema(file=SimpleFSPath(fileInTestDir('test.xsd'))) tree_valid = self.parse('<a><b></b></a>') self.assertTrue(schema.validate(tree_valid))
ETreeXMLSchemaTestCase
python
wandb__wandb
wandb/sdk/artifacts/_generated/fragments.py
{ "start": 4868, "end": 5074 }
class ____(GQLResult): typename__: Typename[Literal["PageInfo"]] = "PageInfo" end_cursor: Optional[str] = Field(alias="endCursor") has_next_page: bool = Field(alias="hasNextPage")
PageInfoFragment
python
ApeWorX__ape
tests/functional/test_provider.py
{ "start": 28015, "end": 30759 }
class ____: FAKE_PID = 12345678901234567890 @pytest.fixture(autouse=True) def mock_process(self, mocker): mock_process = mocker.MagicMock() mock_process.pid = self.FAKE_PID return mock_process @pytest.fixture(autouse=True) def popen_patch(self, mocker, mock_process): # Prevent actually creating new processes. patch = mocker.patch("ape.api.providers.popen") patch.return_value = mock_process return patch @pytest.fixture(autouse=True) def spawn_patch(self, mocker): # Prevent spawning process monitoring threads. return mocker.patch("ape.api.providers.spawn") @pytest.fixture def subprocess_provider(self, popen_patch, eth_tester_provider): class MockSubprocessProvider(SubprocessProvider): @property def is_connected(self): # Once Popen is called once, we are "connected" return popen_patch.call_count > 0 def build_command(self) -> list[str]: return ["apemockprocess"] # Hack to allow abstract methods anyway. MockSubprocessProvider.__abstractmethods__ = set() # type: ignore return MockSubprocessProvider(name="apemockprocess", network=eth_tester_provider.network) # type: ignore def test_start(self, subprocess_provider): assert not subprocess_provider.is_connected subprocess_provider.start() assert subprocess_provider.is_connected # Show it gets tracked in network manager's managed nodes. assert self.FAKE_PID in subprocess_provider.network_manager.running_nodes def test_start_allow_start_false(self, subprocess_provider): subprocess_provider.allow_start = False expected = r"Process not started and cannot connect to existing process\." with pytest.raises(ProviderError, match=expected): subprocess_provider.start() def test_get_trace_from_revert_kwargs(ethereum, owner, chain): """ Trace already given, ignore transaction. """ trace = TransactionTrace(transaction_hash="0x") txn = ethereum.create_transaction( sender=owner, max_fee=chain.provider.base_fee, max_priority_fee=0, nonce=0 ) txn = owner.sign_transaction(txn) actual = _get_trace_from_revert_kwargs(trace=trace, txn=txn) assert actual == trace # Only given txn. It uses the provider to get it. actual = _get_trace_from_revert_kwargs(txn=txn) assert actual == txn.trace # Only given a receipt. It is cached on the receipt after using the provider. receipt = owner.call(txn) actual = _get_trace_from_revert_kwargs(txn=receipt) assert actual == receipt.trace
TestSubprocessProvider
python
django__django
tests/one_to_one/models.py
{ "start": 194, "end": 378 }
class ____(models.Model): name = models.CharField(max_length=50) address = models.CharField(max_length=80) def __str__(self): return "%s the place" % self.name
Place
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/overloadImpl1.py
{ "start": 1254, "end": 1810 }
class ____: @overload def method4(self, a: None) -> None: ... @overload def method4(self, a: list[T]) -> T: ... def method4(self, a: list[T] | None) -> T | None: ... @overload def func5(a: list[T]) -> T: ... @overload def func5(a: None) -> None: ... # This should generate an error because list is not compatible with dict. def func5(a: dict[Any, Any] | None) -> Any | None: ... @overload def func6(foo: int, /) -> int: ... @overload def func6(bar: str, /) -> int: ... def func6(p0: int | str, /) -> int: return 3
ClassA
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dataform.py
{ "start": 11364, "end": 12213 }
class ____: @mock.patch(HOOK_STR) @mock.patch(WRITE_FILE_RESPONSE_STR) def test_execute(self, _, hook_mock): op = DataformWriteFileOperator( task_id="write-file", project_id=PROJECT_ID, region=REGION, repository_id=REPOSITORY_ID, workspace_id=WORKSPACE_ID, filepath=FILEPATH, contents=FILE_CONTENT, ) op.execute(context=mock.MagicMock()) hook_mock.return_value.write_file.assert_called_once_with( project_id=PROJECT_ID, region=REGION, repository_id=REPOSITORY_ID, workspace_id=WORKSPACE_ID, filepath=FILEPATH, contents=FILE_CONTENT, retry=DEFAULT, timeout=None, metadata=(), )
TestDataformWriteFileOperator
python
joke2k__faker
tests/providers/test_emoji.py
{ "start": 43, "end": 343 }
class ____(unittest.TestCase): """Test emoji provider methods""" def setUp(self): self.fake = Faker() # No locale specified, gets global for this provider Faker.seed(0) def test_emoji(self): emoji = self.fake.emoji() assert isinstance(emoji, str)
TestGlobal
python
jazzband__django-model-utils
tests/test_fields/test_field_tracker.py
{ "start": 15309, "end": 16957 }
class ____(FieldTrackerMixin, TestCase): tracked_class = TrackedNonFieldAttr instance: TrackedNonFieldAttr def setUp(self) -> None: self.instance = self.tracked_class() self.tracker = self.instance.tracker def test_previous(self) -> None: self.assertPrevious(rounded=None) self.instance.number = 7.5 self.assertPrevious(rounded=None) self.instance.save() self.assertPrevious(rounded=8) self.instance.number = 7.2 self.assertPrevious(rounded=8) self.instance.save() self.assertPrevious(rounded=7) def test_has_changed(self) -> None: self.assertHasChanged(rounded=False) self.instance.number = 7.5 self.assertHasChanged(rounded=True) self.instance.save() self.assertHasChanged(rounded=False) self.instance.number = 7.2 self.assertHasChanged(rounded=True) self.instance.number = 7.8 self.assertHasChanged(rounded=False) def test_changed(self) -> None: self.assertChanged() self.instance.number = 7.5 self.assertPrevious(rounded=None) self.instance.save() self.assertPrevious() self.instance.number = 7.8 self.assertPrevious() self.instance.number = 7.2 self.assertPrevious(rounded=8) self.instance.save() self.assertPrevious() def test_current(self) -> None: self.assertCurrent(rounded=None) self.instance.number = 7.5 self.assertCurrent(rounded=8) self.instance.save() self.assertCurrent(rounded=8)
FieldTrackedModelAttributeTests
python
python__mypy
mypy/semanal.py
{ "start": 352012, "end": 352554 }
class ____(TrivialSyntheticTypeTranslator): def visit_any(self, t: AnyType) -> Type: if t.type_of_any == TypeOfAny.explicit: return t.copy_modified(TypeOfAny.special_form) return t def visit_type_alias_type(self, t: TypeAliasType) -> Type: return t.copy_modified(args=[a.accept(self) for a in t.args]) def make_any_non_unimported(t: Type) -> Type: """Replace all Any types that come from unimported types with special form Any.""" return t.accept(MakeAnyNonUnimported())
MakeAnyNonExplicit
python
realpython__materials
python-getter-setter/person.py
{ "start": 0, "end": 359 }
class ____: def __init__(self, name, birth_date): self.name = name self._birth_date = birth_date def get_birth_date(self): return self._birth_date def set_birth_date(self, value, force=False): if force: self._birth_date = value else: raise AttributeError("can't set birth_date")
Person
python
tensorflow__tensorflow
tensorflow/core/function/polymorphism/type_dispatch_test.py
{ "start": 2131, "end": 12267 }
class ____(test.TestCase): def testVertical(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, None, None)) table.add_target(make_shape_function_type(None, None, 1)) table.add_target(make_shape_function_type(None, 1, 1)) table.add_target(make_shape_function_type(1, 1, 1)) self.assertEqual( list(table.targets), [ make_shape_function_type(None, None, None), make_shape_function_type(None, None, 1), make_shape_function_type(None, 1, 1), make_shape_function_type(1, 1, 1) ]) def testHorizontal(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(1,)) table.add_target(make_shape_function_type(1, 2)) table.add_target(make_shape_function_type(1, 2, 3)) self.assertEqual( list(table.targets), [ make_shape_function_type(1,), make_shape_function_type(1, 2), make_shape_function_type(1, 2, 3) ]) def testDuplicateNodes(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, None)) table.add_target(make_shape_function_type(1, None)) table.add_target(make_shape_function_type(None, 2)) table.add_target(make_shape_function_type(None, None)) self.assertEqual( list(table.targets), [ make_shape_function_type(None, None), make_shape_function_type(1, None), make_shape_function_type(None, 2) ]) def testDeletion(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, None)) table.add_target(make_shape_function_type(None, 1)) table.add_target(make_shape_function_type(None, 2)) self.assertEqual( list(table.targets), [ make_shape_function_type(None, None), make_shape_function_type(None, 1), make_shape_function_type(None, 2) ]) table.delete(make_shape_function_type(None, 2)) # Should remove the target self.assertEqual( list(table.targets), [ make_shape_function_type(None, None), make_shape_function_type(None, 1), ]) table.delete(make_shape_function_type(None, 2)) # Should have no effect self.assertEqual( list(table.targets), [ make_shape_function_type(None, None), make_shape_function_type(None, 1), ]) def testContains(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, None, None)) table.add_target(make_shape_function_type(None, 1)) table.add_target(make_shape_function_type(1, 1)) table.add_target(make_shape_function_type(None, 2, 1)) self.assertIn(make_shape_function_type(None, None, None), table.targets) self.assertIn(make_shape_function_type(None, 1), table.targets) self.assertIn(make_shape_function_type(1, 1), table.targets) self.assertIn(make_shape_function_type(None, 2, 1), table.targets) self.assertNotIn(make_shape_function_type(None, None, 1), table.targets) self.assertNotIn(make_shape_function_type(1, None), table.targets) self.assertNotIn(make_shape_function_type(1, 2), table.targets) self.assertNotIn(make_shape_function_type(None, 2, None), table.targets) def testDispatchExactMatches(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, None, None)) table.add_target(make_shape_function_type(None, 1, None)) table.add_target(make_shape_function_type(None, 1, 2)) table.add_target(make_shape_function_type(None, 2, 2)) self.assertEqual( table.dispatch(make_shape_function_type(None, 1, 2)), make_shape_function_type(None, 1, 2)) self.assertEqual( table.dispatch(make_shape_function_type(None, 1, None)), make_shape_function_type(None, 1, None)) self.assertEqual( table.dispatch(make_shape_function_type(None, None, None)), make_shape_function_type(None, None, None)) self.assertEqual( table.dispatch(make_shape_function_type(None, 2, 2)), make_shape_function_type(None, 2, 2)) def testDispatchMoreSpecific(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, None, None)) table.add_target(make_shape_function_type(None, 1, None)) table.add_target(make_shape_function_type(None, 1, 2)) table.add_target(make_shape_function_type(None, 2, 2)) self.assertEqual( table.dispatch(make_shape_function_type(1, 1, 2)), make_shape_function_type(None, 1, 2)) self.assertEqual( table.dispatch(make_shape_function_type(1, 1, 3)), make_shape_function_type(None, 1, None)) self.assertEqual( table.dispatch(make_shape_function_type(1, 3, 3)), make_shape_function_type(None, None, None)) self.assertEqual( table.dispatch(make_shape_function_type(1, 2, 2)), make_shape_function_type(None, 2, 2)) def testDispatchNoMatches(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, 1, None)) table.add_target(make_shape_function_type(None, 1, 2)) table.add_target(make_shape_function_type(None, 2, 2)) self.assertIsNone(table.dispatch(make_shape_function_type(1, 2))) self.assertIsNone(table.dispatch(make_shape_function_type(1, 2, 3))) self.assertIsNone(table.dispatch(make_shape_function_type(1, 2, 3, 4))) def testDispatchCachedAddUpdates(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, None, None)) self.assertEqual( table.dispatch(make_shape_function_type(1, 1, 2)), make_shape_function_type(None, None, None)) table.add_target(make_shape_function_type(None, 1, None)) self.assertEqual( table.dispatch(make_shape_function_type(1, 1, 2)), make_shape_function_type(None, 1, None)) table.add_target(make_shape_function_type(None, 1, 2)) self.assertEqual( table.dispatch(make_shape_function_type(1, 1, 2)), make_shape_function_type(None, 1, 2)) table.add_target(make_shape_function_type(1, 1, 2)) self.assertEqual( table.dispatch(make_shape_function_type(1, 1, 2)), make_shape_function_type(1, 1, 2)) def testDispatchCachedDeleteUpdates(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, None, None)) table.add_target(make_shape_function_type(None, 1, None)) table.add_target(make_shape_function_type(None, 1, 2)) table.add_target(make_shape_function_type(1, 1, 2)) self.assertEqual( table.dispatch(make_shape_function_type(1, 1, 2)), make_shape_function_type(1, 1, 2)) table.delete(make_shape_function_type(1, 1, 2)) self.assertEqual( table.dispatch(make_shape_function_type(1, 1, 2)), make_shape_function_type(None, 1, 2)) table.delete(make_shape_function_type(None, 1, 2)) self.assertEqual( table.dispatch(make_shape_function_type(1, 1, 2)), make_shape_function_type(None, 1, None)) table.delete(make_shape_function_type(None, 1, None)) self.assertEqual( table.dispatch(make_shape_function_type(1, 1, 2)), make_shape_function_type(None, None, None)) def testDispatchCacheOrderingDeterminism(self): table_1 = type_dispatch.TypeDispatchTable() table_1.add_target(make_shape_function_type(1, None, None)) table_1.add_target(make_shape_function_type(None, 2, None)) table_1.add_target(make_shape_function_type(None, None, 3)) table_2 = type_dispatch.TypeDispatchTable() table_2.add_target(make_shape_function_type(None, 2, None)) table_2.add_target(make_shape_function_type(1, None, None)) table_2.add_target(make_shape_function_type(None, None, 3)) table_3 = type_dispatch.TypeDispatchTable() table_3.add_target(make_shape_function_type(None, None, 3)) table_3.add_target(make_shape_function_type(1, None, None)) table_3.add_target(make_shape_function_type(None, 2, None)) # table_1, table_2, table_3 have the same targets self.assertEqual(set(table_1.targets), set(table_2.targets)) self.assertEqual(set(table_2.targets), set(table_3.targets)) # But they dispatch to the first target they find which does not have any # more specific viable target. shape = make_shape_function_type(1, 2, 3) self.assertEqual( table_1.dispatch(shape), make_shape_function_type(1, None, None)) self.assertEqual( table_2.dispatch(shape), make_shape_function_type(None, 2, None)) self.assertEqual( table_3.dispatch(shape), make_shape_function_type(None, None, 3)) def testGeneralizedExisting(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, None, None)) table.add_target(make_shape_function_type(None, 1, None)) table.add_target(make_shape_function_type(None, 1, 2)) self.assertEqual( table.try_generalizing_function_type( make_shape_function_type(None, 1, 3)), make_shape_function_type(None, None, None)) def testGeneralizedNovel(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, 1, None)) table.add_target(make_shape_function_type(None, 1, 2)) self.assertEqual( table.try_generalizing_function_type( make_shape_function_type(None, 2, 3)), make_shape_function_type(None, None, None)) def testGeneralizedUnknown(self): table = type_dispatch.TypeDispatchTable() table.add_target(make_shape_function_type(None, 1)) table.add_target(make_shape_function_type(None, 2)) table.add_target(make_shape_function_type(None, 3)) self.assertEqual( table.try_generalizing_function_type( make_shape_function_type(None, 4, 3)), make_shape_function_type(None, 4, 3)) if __name__ == "__main__": test.main()
TypeDispatchTableTest
python
getsentry__sentry
src/sentry/incidents/models/incident.py
{ "start": 9745, "end": 10863 }
class ____(BaseManager["IncidentTrigger"]): CACHE_KEY = "incident:triggers:%s" @classmethod def _build_cache_key(cls, incident_id): return cls.CACHE_KEY % incident_id def get_for_incident(self, incident): """ Fetches the IncidentTriggers associated with an Incident. Attempts to fetch from cache then hits the database. """ cache_key = self._build_cache_key(incident.id) triggers = cache.get(cache_key) if triggers is None: triggers = list(IncidentTrigger.objects.filter(incident=incident)) cache.set(cache_key, triggers, 3600) return triggers @classmethod def clear_incident_cache(cls, instance, **kwargs): cache.delete(cls._build_cache_key(instance.id)) assert cache.get(cls._build_cache_key(instance.id)) is None @classmethod def clear_incident_trigger_cache(cls, instance, **kwargs): cache.delete(cls._build_cache_key(instance.incident_id)) assert cache.get(cls._build_cache_key(instance.incident_id)) is None @region_silo_model
IncidentTriggerManager
python
doocs__leetcode
solution/0300-0399/0378.Kth Smallest Element in a Sorted Matrix/Solution.py
{ "start": 0, "end": 674 }
class ____: def kthSmallest(self, matrix: List[List[int]], k: int) -> int: def check(matrix, mid, k, n): count = 0 i, j = n - 1, 0 while i >= 0 and j < n: if matrix[i][j] <= mid: count += i + 1 j += 1 else: i -= 1 return count >= k n = len(matrix) left, right = matrix[0][0], matrix[n - 1][n - 1] while left < right: mid = (left + right) >> 1 if check(matrix, mid, k, n): right = mid else: left = mid + 1 return left
Solution
python
bokeh__bokeh
src/bokeh/models/mappers.py
{ "start": 7103, "end": 8419 }
class ____(ColorMapper): ''' Base class for continuous color mapper types. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) domain = List(Tuple(Instance("bokeh.models.renderers.GlyphRenderer"), Either(String, List(String))), default=[], help=""" A collection of glyph renderers to pool data from for establishing data metrics. If empty, mapped data will be used instead. """) low = Nullable(Float, help=""" The minimum value of the range to map into the palette. Values below this are clamped to ``low``. If ``None``, the value is inferred from data. """) high = Nullable(Float, help=""" The maximum value of the range to map into the palette. Values above this are clamped to ``high``. If ``None``, the value is inferred from data. """) low_color = Nullable(Color, help=""" Color to be used if data is lower than ``low`` value. If None, values lower than ``low`` are mapped to the first color in the palette. """) high_color = Nullable(Color, help=""" Color to be used if data is higher than ``high`` value. If None, values higher than ``high`` are mapped to the last color in the palette. """)
ContinuousColorMapper
python
pytorch__pytorch
torch/utils/_ordered_set.py
{ "start": 322, "end": 5658 }
class ____(MutableSet[T], Reversible[T]): """ Insertion ordered set, similar to OrderedDict. """ __slots__ = ("_dict",) def __init__(self, iterable: Iterable[T] | None = None) -> None: self._dict = dict.fromkeys(iterable, None) if iterable is not None else {} @staticmethod def _from_dict(dict_inp: dict[T, None]) -> OrderedSet[T]: s: OrderedSet[T] = OrderedSet() s._dict = dict_inp return s # # Required overridden abstract methods # def __contains__(self, elem: object) -> bool: return elem in self._dict def __iter__(self) -> Iterator[T]: return iter(self._dict) def __len__(self) -> int: return len(self._dict) def __reversed__(self) -> Iterator[T]: return reversed(self._dict) def add(self, elem: T) -> None: self._dict[elem] = None def discard(self, elem: T) -> None: self._dict.pop(elem, None) def clear(self) -> None: # overridden because MutableSet impl is slow self._dict.clear() # Unimplemented set() methods in _collections_abc.MutableSet @classmethod def _wrap_iter_in_set(cls, other: Any) -> Any: """ Wrap non-Set Iterables in OrderedSets Some of the magic methods are more strict on input types than the public apis, so we need to wrap inputs in sets. """ if not isinstance(other, AbstractSet) and isinstance(other, Iterable): return cls(other) else: return other def pop(self) -> T: if not self: raise KeyError("pop from an empty set") # pyrefly: ignore [bad-return] return self._dict.popitem()[0] def copy(self) -> OrderedSet[T]: return OrderedSet._from_dict(self._dict.copy()) def difference(self, *others: Iterable[T]) -> OrderedSet[T]: res = self.copy() res.difference_update(*others) return res def difference_update(self, *others: Iterable[T]) -> None: for other in others: self -= other # type: ignore[arg-type] def update(self, *others: Iterable[T]) -> None: for other in others: self |= other def intersection(self, *others: Iterable[T]) -> OrderedSet[T]: res = self.copy() for other in others: if other is not self: res &= other # type: ignore[arg-type] return res def intersection_update(self, *others: Iterable[T]) -> None: for other in others: self &= other # type: ignore[arg-type] def issubset(self, other: Iterable[T]) -> bool: return self <= self._wrap_iter_in_set(other) def issuperset(self, other: Iterable[T]) -> bool: return self >= self._wrap_iter_in_set(other) def symmetric_difference(self, other: Iterable[T]) -> OrderedSet[T]: return self ^ other # type: ignore[operator] def symmetric_difference_update(self, other: Iterable[T]) -> None: self ^= other # type: ignore[arg-type] def union(self, *others: Iterable[T]) -> OrderedSet[T]: res = self.copy() for other in others: if other is self: continue res |= other return res # Specify here for correct type inference, otherwise would # return AbstractSet[T] def __sub__(self, other: AbstractSet[T_co]) -> OrderedSet[T]: # following cpython set impl optimization if isinstance(other, OrderedSet) and (len(self) * 4) > len(other): out = self.copy() out -= other return out return cast(OrderedSet[T], super().__sub__(other)) def __ior__(self, other: Iterable[T]) -> OrderedSet[T]: # type: ignore[misc, override] # noqa: PYI034 if isinstance(other, OrderedSet): self._dict.update(other._dict) return self return super().__ior__(other) # type: ignore[arg-type] def __eq__(self, other: object) -> bool: if isinstance(other, OrderedSet): return self._dict == other._dict return super().__eq__(other) def __ne__(self, other: object) -> bool: if isinstance(other, OrderedSet): return self._dict != other._dict return super().__ne__(other) def __or__(self, other: AbstractSet[T_co]) -> OrderedSet[T]: return cast(OrderedSet[T], super().__or__(other)) def __and__(self, other: AbstractSet[T_co]) -> OrderedSet[T]: # MutableSet impl will iterate over other, iter over smaller of two sets if isinstance(other, OrderedSet) and len(self) < len(other): # pyrefly: ignore [unsupported-operation, bad-return] return other & self return cast(OrderedSet[T], super().__and__(other)) def __xor__(self, other: AbstractSet[T_co]) -> OrderedSet[T]: return cast(OrderedSet[T], super().__xor__(other)) def __repr__(self) -> str: return f"{self.__class__.__name__}({list(self)})" def __getstate__(self) -> list[T]: return list(self._dict.keys()) def __setstate__(self, state: list[T]) -> None: self._dict = dict.fromkeys(state, None) def __reduce__(self) -> tuple[type[OrderedSet[T]], tuple[list[T]]]: return (OrderedSet, (list(self),))
OrderedSet
python
django__django
django/contrib/postgres/fields/ranges.py
{ "start": 5867, "end": 6093 }
class ____(ContinuousRangeField): base_field = models.DateTimeField range_type = DateTimeTZRange form_field = forms.DateTimeRangeField def db_type(self, connection): return "tstzrange"
DateTimeRangeField
python
pytorch__pytorch
benchmarks/operator_benchmark/pt/boolean_test.py
{ "start": 546, "end": 1247 }
class ____(op_bench.TorchBenchmarkBase): def init(self, M, N, K, device): self.inputs = { "input_one": torch.randint(0, 2, (M, N, K), device=device, dtype=torch.bool) } self.set_module_name("all") def forward(self, input_one): return torch.all(input_one) # The generated test names based on all_short_configs will be in the following pattern: # all_M8_N16_K32_devicecpu # all_M8_N16_K32_devicecpu_bwdall # all_M8_N16_K32_devicecpu_bwd1 # all_M8_N16_K32_devicecpu_bwd2 # ... # Those names can be used to filter tests. op_bench.generate_pt_test(all_long_configs + all_short_configs, AllBenchmark) """Mircobenchmark for any operator."""
AllBenchmark
python
apache__airflow
helm-tests/tests/helm_tests/airflow_aux/test_create_user_job.py
{ "start": 17339, "end": 18780 }
class ____: """Tests create user job service account.""" def test_should_add_component_specific_labels(self): docs = render_chart( values={ "createUserJob": { "labels": {"test_label": "test_label_value"}, }, }, show_only=["templates/jobs/create-user-job-serviceaccount.yaml"], ) assert "test_label" in jmespath.search("metadata.labels", docs[0]) assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value" def test_default_automount_service_account_token(self): docs = render_chart( values={ "createUserJob": { "serviceAccount": {"create": True}, }, }, show_only=["templates/jobs/create-user-job-serviceaccount.yaml"], ) assert jmespath.search("automountServiceAccountToken", docs[0]) is True def test_overridden_automount_service_account_token(self): docs = render_chart( values={ "createUserJob": { "serviceAccount": {"create": True, "automountServiceAccountToken": False}, }, }, show_only=["templates/jobs/create-user-job-serviceaccount.yaml"], ) assert jmespath.search("automountServiceAccountToken", docs[0]) is False
TestCreateUserJobServiceAccount
python
great-expectations__great_expectations
great_expectations/datasource/fluent/data_connector/google_cloud_storage_data_connector.py
{ "start": 962, "end": 12395 }
class ____(FilePathDataConnector): """Extension of FilePathDataConnector used to connect to Google Cloud Storage (GCS). Args: datasource_name: The name of the Datasource associated with this DataConnector instance data_asset_name: The name of the DataAsset using this DataConnector instance gcs_client: Reference to instantiated Google Cloud Storage client handle bucket_or_name (str): bucket name for Google Cloud Storage prefix (str): GCS prefix delimiter (str): GCS delimiter max_results (int): max blob filepaths to return recursive_file_discovery (bool): Flag to indicate if files should be searched recursively from subfolders file_path_template_map_fn: Format function mapping path to fully-qualified resource on GCS whole_directory_path_override: If present, treat entire directory as single Asset """ # noqa: E501 # FIXME CoP asset_level_option_keys: ClassVar[tuple[str, ...]] = ( "gcs_prefix", "gcs_delimiter", "gcs_max_results", "gcs_recursive_file_discovery", ) asset_options_type: ClassVar[Type[_GCSOptions]] = _GCSOptions def __init__( # noqa: PLR0913 # FIXME CoP self, datasource_name: str, data_asset_name: str, gcs_client: google.Client, bucket_or_name: str, prefix: str = "", delimiter: str = "/", max_results: Optional[int] = None, recursive_file_discovery: bool = False, file_path_template_map_fn: Optional[Callable] = None, whole_directory_path_override: PathStr | None = None, ) -> None: self._gcs_client: google.Client = gcs_client self._bucket_or_name = bucket_or_name self._prefix: str = prefix self._sanitized_prefix: str = sanitize_prefix_for_gcs_and_s3(text=prefix) self._delimiter = delimiter self._max_results = max_results self._recursive_file_discovery = recursive_file_discovery super().__init__( datasource_name=datasource_name, data_asset_name=data_asset_name, file_path_template_map_fn=file_path_template_map_fn, whole_directory_path_override=whole_directory_path_override, ) @classmethod def build_data_connector( # noqa: PLR0913 # FIXME CoP cls, datasource_name: str, data_asset_name: str, gcs_client: google.Client, bucket_or_name: str, prefix: str = "", delimiter: str = "/", max_results: Optional[int] = None, recursive_file_discovery: bool = False, file_path_template_map_fn: Optional[Callable] = None, whole_directory_path_override: PathStr | None = None, ) -> GoogleCloudStorageDataConnector: """Builds "GoogleCloudStorageDataConnector", which links named DataAsset to Google Cloud Storage. Args: datasource_name: The name of the Datasource associated with this "GoogleCloudStorageDataConnector" instance data_asset_name: The name of the DataAsset using this "GoogleCloudStorageDataConnector" instance gcs_client: Reference to instantiated Google Cloud Storage client handle bucket_or_name: bucket name for Google Cloud Storage prefix: GCS prefix delimiter: GCS delimiter recursive_file_discovery: Flag to indicate if files should be searched recursively from subfolders max_results: max blob filepaths to return file_path_template_map_fn: Format function mapping path to fully-qualified resource on GCS whole_directory_path_override: If present, treat entire directory as single Asset Returns: Instantiated "GoogleCloudStorageDataConnector" object """ # noqa: E501 # FIXME CoP return GoogleCloudStorageDataConnector( datasource_name=datasource_name, data_asset_name=data_asset_name, gcs_client=gcs_client, bucket_or_name=bucket_or_name, prefix=prefix, delimiter=delimiter, max_results=max_results, recursive_file_discovery=recursive_file_discovery, file_path_template_map_fn=file_path_template_map_fn, whole_directory_path_override=whole_directory_path_override, ) @classmethod def build_test_connection_error_message( cls, data_asset_name: str, bucket_or_name: str, prefix: str = "", delimiter: str = "/", recursive_file_discovery: bool = False, ) -> str: """Builds helpful error message for reporting issues when linking named DataAsset to Google Cloud Storage. Args: data_asset_name: The name of the DataAsset using this "GoogleCloudStorageDataConnector" instance bucket_or_name: bucket name for Google Cloud Storage prefix: GCS prefix delimiter: GCS delimiter recursive_file_discovery: Flag to indicate if files should be searched recursively from subfolders Returns: Customized error message """ # noqa: E501 # FIXME CoP test_connection_error_message_template: str = 'No file in bucket "{bucket_or_name}" with prefix "{prefix}" and recursive file discovery set to "{recursive_file_discovery}" found using delimiter "{delimiter}" for DataAsset "{data_asset_name}".' # noqa: E501 # FIXME CoP return test_connection_error_message_template.format( **{ "data_asset_name": data_asset_name, "bucket_or_name": bucket_or_name, "prefix": prefix, "delimiter": delimiter, "recursive_file_discovery": recursive_file_discovery, } ) @override def build_batch_spec(self, batch_definition: LegacyBatchDefinition) -> GCSBatchSpec: """ Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function. Args: batch_definition (LegacyBatchDefinition): to be used to build batch_spec Returns: BatchSpec built from batch_definition """ batch_spec: PathBatchSpec = super().build_batch_spec(batch_definition=batch_definition) return GCSBatchSpec(batch_spec) # Interface Method @override def get_data_references(self) -> List[str]: query_options: dict = { "bucket_or_name": self._bucket_or_name, "prefix": self._sanitized_prefix, "delimiter": self._delimiter, "max_results": self._max_results, } path_list: List[str] = list_gcs_keys( gcs_client=self._gcs_client, query_options=query_options, recursive=self._recursive_file_discovery, ) return path_list # Interface Method @override def _get_full_file_path(self, path: str) -> str: # If the path is already a fully qualified GCS URL (starts with gs://), return it as-is # This handles the case of whole_directory_path_override which is already fully qualified if path.startswith("gs://"): return path if self._file_path_template_map_fn is None: raise MissingFilePathTemplateMapFnError() template_arguments = { "bucket_or_name": self._bucket_or_name, "path": path, } return self._file_path_template_map_fn(**template_arguments) @override def _preprocess_batching_regex(self, regex: re.Pattern) -> re.Pattern: regex = re.compile(f"{re.escape(self._sanitized_prefix)}{regex.pattern}") return super()._preprocess_batching_regex(regex=regex) def list_gcs_keys( gcs_client, query_options: dict, recursive: bool = False, ) -> List[str]: """ Utilizes the GCS connection object to retrieve blob names based on user-provided criteria. For InferredAssetGCSDataConnector, we take `bucket_or_name` and `prefix` and search for files using RegEx at and below the level specified by those parameters. However, for ConfiguredAssetGCSDataConnector, we take `bucket_or_name` and `prefix` and search for files using RegEx only at the level specified by that bucket and prefix. This restriction for the ConfiguredAssetGCSDataConnector is needed because paths on GCS are comprised not only the leaf file name but the full path that includes both the prefix and the file name. Otherwise, in the situations where multiple data assets share levels of a directory tree, matching files to data assets will not be possible due to the path ambiguity. Please note that the SDK's `list_blobs` method takes in a `delimiter` key that drastically alters the traversal of a given bucket: - If a delimiter is not set (default), the traversal is recursive and the output will contain all blobs in the current directory as well as those in any nested directories. - If a delimiter is set, the traversal will continue until that value is seen; as the default is "/", traversal will be scoped within the current directory and end before visiting nested directories. In order to provide users with finer control of their config while also ensuring output that is in line with the `recursive` arg, we deem it appropriate to manually override the value of the delimiter only in cases where it is absolutely necessary. Args: gcs_client (storage.Client): GCS connnection object responsible for accessing bucket query_options (dict): GCS query attributes ("bucket_or_name", "prefix", "delimiter", "max_results") recursive (bool): True for InferredAssetGCSDataConnector and False for ConfiguredAssetGCSDataConnector (see above) Returns: List of keys representing GCS file paths (as filtered by the `query_options` dict) """ # noqa: E501 # FIXME CoP # Delimiter determines whether or not traversal of bucket is recursive # Manually set to appropriate default if not already set by user delimiter = query_options["delimiter"] if delimiter is None and not recursive: warnings.warn( 'In order to access blobs with a ConfiguredAssetGCSDataConnector, \ or with a Fluent datasource without enabling recursive file discovery, \ the delimiter that has been passed to gcs_options in your config cannot be empty; \ please note that the value is being set to the default "/" in order to work with the Google SDK.' # noqa: E501 # FIXME CoP ) query_options["delimiter"] = "/" elif delimiter is not None and recursive: warnings.warn( "In order to access blobs with an InferredAssetGCSDataConnector, \ or enabling recursive file discovery with a Fluent datasource, \ the delimiter that has been passed to gcs_options in your config must be empty; \ please note that the value is being set to None in order to work with the Google SDK." ) query_options["delimiter"] = None keys: List[str] = [] for blob in gcs_client.list_blobs(**query_options): name: str = blob.name if name.endswith("/"): # GCS includes directories in blob output continue keys.append(name) return keys
GoogleCloudStorageDataConnector
python
scrapy__scrapy
tests/test_exporters.py
{ "start": 6395, "end": 7531 }
class ____(TestBaseItemExporter): def _get_exporter(self, **kwargs): return PickleItemExporter(self.output, **kwargs) def _check_output(self): self._assert_expected_item(pickle.loads(self.output.getvalue())) def test_export_multiple_items(self): i1 = self.item_class(name="hello", age="world") i2 = self.item_class(name="bye", age="world") f = BytesIO() ie = PickleItemExporter(f) ie.start_exporting() ie.export_item(i1) ie.export_item(i2) ie.finish_exporting() del ie # See the first “del self.ie” in this file for context. f.seek(0) assert self.item_class(**pickle.load(f)) == i1 assert self.item_class(**pickle.load(f)) == i2 def test_nonstring_types_item(self): item = self._get_nonstring_types_item() fp = BytesIO() ie = PickleItemExporter(fp) ie.start_exporting() ie.export_item(item) ie.finish_exporting() del ie # See the first “del self.ie” in this file for context. assert pickle.loads(fp.getvalue()) == item
TestPickleItemExporter
python
doocs__leetcode
solution/1200-1299/1231.Divide Chocolate/Solution.py
{ "start": 0, "end": 513 }
class ____: def maximizeSweetness(self, sweetness: List[int], k: int) -> int: def check(x: int) -> bool: s = cnt = 0 for v in sweetness: s += v if s >= x: s = 0 cnt += 1 return cnt > k l, r = 0, sum(sweetness) while l < r: mid = (l + r + 1) >> 1 if check(mid): l = mid else: r = mid - 1 return l
Solution
python
pytorch__pytorch
torch/_dynamo/variables/dicts.py
{ "start": 40010, "end": 40341 }
class ____(ConstDictVariable): # Special class to avoid adding any guards on the nn module hook ids. def install_dict_keys_match_guard(self) -> None: pass def install_dict_contains_guard( self, tx: "InstructionTranslator", args: list[VariableTracker] ) -> None: pass
NNModuleHooksDictVariable
python
tensorflow__tensorflow
tensorflow/python/ops/special_math_ops_test.py
{ "start": 44933, "end": 46988 }
class ____(test.Benchmark): cases = [ # Unary cases. ['ijk->i', 100], ['ijk->kji', 100], # Regular matmul or batch matmul. ['ij,jk->ik', 500], ['ji,kj->ik', 500], ['bij,bjk->bik', 100], ['bji,bjk->bki', 100], ['ikl,kji->kl', 100], ['klj,lki->ij', 100], ['ijk,ilj->kli', 100], ['ijk,jklm->il', 50], # Larger binary contractions. ['efabc,eabcd->efd', 20], ['fabec,abcde->fde', 20], ['efabc,edabc->efd', 20], ['eadbf,dfebc->ecfad', 20], ['abcdef,bcdfg->abcdeg', 20], # Chain matmul. ['ij,jk,kl->il', 1000], # Long cases. Path optimization should kick in. ['ea,fb,abcd,gc,hd->efgh', 10], ['bca,cdb,dbf,afc->', 10], ['efc,dbc,acf,fd->abe', 10], ['abhe,hidj,jgba,hiab,gab->ed', 10], ] def benchmark_einsum(self): for equation, dim in self.cases: with ops.Graph().as_default(), \ session.Session(config=benchmark.benchmark_config()) as sess, \ ops.device('/cpu:0'): r = np.random.RandomState(0) input_subscripts = equation.split('->')[0].split(',') input_vars = [] for subscript in input_subscripts: input_shape = (dim,) * len(subscript) input_vars.append( variables.Variable(np.array(r.randn(*input_shape), np.float32))) self.evaluate(variables.global_variables_initializer()) if len(input_vars) <= 2: self.run_op_benchmark( sess, special_math_ops.einsum(equation, *input_vars), min_iters=50, name='einsum_cpu_({})_{}'.format(equation, dim)) else: for optimize in ['greedy', 'auto']: self.run_op_benchmark( sess, special_math_ops.einsum( equation, *input_vars, optimize=optimize), min_iters=50, name='einsum_cpu_({})_{}_{}'.format(equation, optimize, dim)) if __name__ == '__main__': test.main()
EinsumBenchmark
python
sympy__sympy
sympy/plotting/pygletplot/plot_camera.py
{ "start": 253, "end": 3928 }
class ____: min_dist = 0.05 max_dist = 500.0 min_ortho_dist = 100.0 max_ortho_dist = 10000.0 _default_dist = 6.0 _default_ortho_dist = 600.0 rot_presets = { 'xy': (0, 0, 0), 'xz': (-90, 0, 0), 'yz': (0, 90, 0), 'perspective': (-45, 0, -45) } def __init__(self, window, ortho=False): self.window = window self.axes = self.window.plot.axes self.ortho = ortho self.reset() def init_rot_matrix(self): pgl.glPushMatrix() pgl.glLoadIdentity() self._rot = get_model_matrix() pgl.glPopMatrix() def set_rot_preset(self, preset_name): self.init_rot_matrix() if preset_name not in self.rot_presets: raise ValueError( "%s is not a valid rotation preset." % preset_name) r = self.rot_presets[preset_name] self.euler_rotate(r[0], 1, 0, 0) self.euler_rotate(r[1], 0, 1, 0) self.euler_rotate(r[2], 0, 0, 1) def reset(self): self._dist = 0.0 self._x, self._y = 0.0, 0.0 self._rot = None if self.ortho: self._dist = self._default_ortho_dist else: self._dist = self._default_dist self.init_rot_matrix() def mult_rot_matrix(self, rot): pgl.glPushMatrix() pgl.glLoadMatrixf(rot) pgl.glMultMatrixf(self._rot) self._rot = get_model_matrix() pgl.glPopMatrix() def setup_projection(self): pgl.glMatrixMode(pgl.GL_PROJECTION) pgl.glLoadIdentity() if self.ortho: # yep, this is pseudo ortho (don't tell anyone) pgl.gluPerspective( 0.3, float(self.window.width)/float(self.window.height), self.min_ortho_dist - 0.01, self.max_ortho_dist + 0.01) else: pgl.gluPerspective( 30.0, float(self.window.width)/float(self.window.height), self.min_dist - 0.01, self.max_dist + 0.01) pgl.glMatrixMode(pgl.GL_MODELVIEW) def _get_scale(self): return 1.0, 1.0, 1.0 def apply_transformation(self): pgl.glLoadIdentity() pgl.glTranslatef(self._x, self._y, -self._dist) if self._rot is not None: pgl.glMultMatrixf(self._rot) pgl.glScalef(*self._get_scale()) def spherical_rotate(self, p1, p2, sensitivity=1.0): mat = get_spherical_rotatation(p1, p2, self.window.width, self.window.height, sensitivity) if mat is not None: self.mult_rot_matrix(mat) def euler_rotate(self, angle, x, y, z): pgl.glPushMatrix() pgl.glLoadMatrixf(self._rot) pgl.glRotatef(angle, x, y, z) self._rot = get_model_matrix() pgl.glPopMatrix() def zoom_relative(self, clicks, sensitivity): if self.ortho: dist_d = clicks * sensitivity * 50.0 min_dist = self.min_ortho_dist max_dist = self.max_ortho_dist else: dist_d = clicks * sensitivity min_dist = self.min_dist max_dist = self.max_dist new_dist = (self._dist - dist_d) if (clicks < 0 and new_dist < max_dist) or new_dist > min_dist: self._dist = new_dist def mouse_translate(self, x, y, dx, dy): pgl.glPushMatrix() pgl.glLoadIdentity() pgl.glTranslatef(0, 0, -self._dist) z = model_to_screen(0, 0, 0)[2] d = vec_subs(screen_to_model(x, y, z), screen_to_model(x - dx, y - dy, z)) pgl.glPopMatrix() self._x += d[0] self._y += d[1]
PlotCamera
python
sphinx-doc__sphinx
tests/roots/test-ext-autodoc/target/enums.py
{ "start": 4403, "end": 4534 }
class ____(enum.Enum): @property def name(self): """inherited""" return super().name
_NamePropertyInEnumMixin
python
PyCQA__pylint
tests/functional/o/overloaded_operator.py
{ "start": 73, "end": 435 }
class ____: def __init__(self, array): self.array = array def __mul__(self, val): return Myarray(val) def astype(self): return "ASTYPE", self def randint(maximum): if maximum is not None: return Myarray([1, 2, 3]) * 2 return int(5) print(randint(1).astype()) # we don't want an error for astype access
Myarray
python
scipy__scipy
scipy/stats/tests/test_multivariate.py
{ "start": 58025, "end": 78094 }
class ____: def test_bad_input(self): # Check that bad inputs raise errors num_rows = 4 num_cols = 3 df = 5 M = np.full((num_rows, num_cols), 0.3) U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5) V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3) # Nonpositive degrees of freedom with pytest.raises(ValueError, match="Degrees of freedom must be positive."): matrix_t(df=0) # Incorrect dimensions with pytest.raises(ValueError, match="Array `mean` must be 2D."): matrix_t(mean=np.zeros((5, 4, 3))) with pytest.raises(ValueError, match="Array `mean` has invalid shape."): matrix_t(mean=np.zeros((4, 3, 0))) with pytest.raises(ValueError, match="Array `row_spread` has invalid shape."): matrix_t(row_spread=np.ones((1, 0))) with pytest.raises( ValueError, match="Array `row_spread` must be a scalar or a 2D array." ): matrix_t(row_spread=np.ones((1, 2, 3))) with pytest.raises(ValueError, match="Array `row_spread` must be square."): matrix_t(row_spread=np.ones((1, 2))) with pytest.raises(ValueError, match="Array `col_spread` has invalid shape."): matrix_t(col_spread=np.ones((1, 0))) with pytest.raises( ValueError, match="Array `col_spread` must be a scalar or a 2D array." ): matrix_t(col_spread=np.ones((1, 2, 3))) with pytest.raises(ValueError, match="Array `col_spread` must be square."): matrix_t(col_spread=np.ones((1, 2))) with pytest.raises( ValueError, match="Arrays `mean` and `row_spread` must have the same number " "of rows.", ): matrix_t(mean=M, row_spread=V) with pytest.raises( ValueError, match="Arrays `mean` and `col_spread` must have the same number " "of columns.", ): matrix_t(mean=M, col_spread=U) # Incorrect dimension of input matrix with pytest.raises( ValueError, match="The shape of array `X` is not conformal with " "the distribution parameters.", ): matrix_t.pdf(X=np.zeros((num_rows, num_rows)), mean=M) # Singular covariance for a non-frozen instance with pytest.raises( np.linalg.LinAlgError, match="2-th leading minor of the array is not positive definite", ): matrix_t.rvs(M, U, np.ones((num_cols, num_cols)), df) with pytest.raises( np.linalg.LinAlgError, match="2-th leading minor of the array is not positive definite", ): matrix_t.rvs(M, np.ones((num_rows, num_rows)), V, df) # Singular covariance for a frozen instance with pytest.raises( np.linalg.LinAlgError, match="When `allow_singular is False`, the input matrix must be " "symmetric positive definite.", ): matrix_t(M, U, np.ones((num_cols, num_cols)), df) with pytest.raises( np.linalg.LinAlgError, match="When `allow_singular is False`, the input matrix must be " "symmetric positive definite.", ): matrix_t(M, np.ones((num_rows, num_rows)), V, df) def test_default_inputs(self): # Check that default argument handling works num_rows = 4 num_cols = 3 df = 5 M = np.full((num_rows, num_cols), 0.3) U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5) V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3) Z = np.zeros((num_rows, num_cols)) Zr = np.zeros((num_rows, 1)) Zc = np.zeros((1, num_cols)) Ir = np.identity(num_rows) Ic = np.identity(num_cols) I1 = np.identity(1) dfdefault = 1 assert_equal( matrix_t.rvs(mean=M, row_spread=U, col_spread=V, df=df).shape, (num_rows, num_cols), ) assert_equal(matrix_t.rvs(mean=M).shape, (num_rows, num_cols)) assert_equal(matrix_t.rvs(row_spread=U).shape, (num_rows, 1)) assert_equal(matrix_t.rvs(col_spread=V).shape, (1, num_cols)) assert_equal(matrix_t.rvs(mean=M, col_spread=V).shape, (num_rows, num_cols)) assert_equal(matrix_t.rvs(mean=M, row_spread=U).shape, (num_rows, num_cols)) assert_equal( matrix_t.rvs(row_spread=U, col_spread=V).shape, (num_rows, num_cols) ) assert_equal(matrix_t().df, dfdefault) assert_equal(matrix_t(mean=M).row_spread, Ir) assert_equal(matrix_t(mean=M).col_spread, Ic) assert_equal(matrix_t(row_spread=U).mean, Zr) assert_equal(matrix_t(row_spread=U).col_spread, I1) assert_equal(matrix_t(col_spread=V).mean, Zc) assert_equal(matrix_t(col_spread=V).row_spread, I1) assert_equal(matrix_t(mean=M, row_spread=U).col_spread, Ic) assert_equal(matrix_t(mean=M, col_spread=V).row_spread, Ir) assert_equal(matrix_t(row_spread=U, col_spread=V, df=df).mean, Z) def test_covariance_expansion(self): # Check that covariance can be specified with scalar or vector num_rows = 4 num_cols = 3 df = 1 M = np.full((num_rows, num_cols), 0.3) Uv = np.full(num_rows, 0.2) Us = 0.2 Vv = np.full(num_cols, 0.1) Vs = 0.1 Ir = np.identity(num_rows) Ic = np.identity(num_cols) assert_equal( matrix_t(mean=M, row_spread=Uv, col_spread=Vv, df=df).row_spread, 0.2 * Ir ) assert_equal( matrix_t(mean=M, row_spread=Uv, col_spread=Vv, df=df).col_spread, 0.1 * Ic ) assert_equal( matrix_t(mean=M, row_spread=Us, col_spread=Vs, df=df).row_spread, 0.2 * Ir ) assert_equal( matrix_t(mean=M, row_spread=Us, col_spread=Vs, df=df).col_spread, 0.1 * Ic ) @pytest.mark.parametrize("i", range(1, 4)) @pytest.mark.parametrize("j", range(1, 4)) def test_frozen_matrix_t(self, i, j): M = np.full((i, j), 0.3) U = 0.5 * np.identity(i) + np.full((i, i), 0.5) V = 0.7 * np.identity(j) + np.full((j, j), 0.3) df = i + j frozen = matrix_t(mean=M, row_spread=U, col_spread=V, df=df) rvs1 = frozen.rvs(random_state=1234) rvs2 = matrix_t.rvs( mean=M, row_spread=U, col_spread=V, df=df, random_state=1234 ) assert_equal(rvs1, rvs2) X = frozen.rvs(random_state=1234) pdf1 = frozen.pdf(X) pdf2 = matrix_t.pdf(X, mean=M, row_spread=U, col_spread=V, df=df) assert_equal(pdf1, pdf2) logpdf1 = frozen.logpdf(X) logpdf2 = matrix_t.logpdf(X, mean=M, row_spread=U, col_spread=V, df=df) assert_equal(logpdf1, logpdf2) def test_array_input(self): # Check array of inputs has the same output as the separate entries. num_rows = 4 num_cols = 3 M = np.full((num_rows, num_cols), 0.3) U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5) V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3) df = 1 N = 10 frozen = matrix_t(mean=M, row_spread=U, col_spread=V, df=df) X1 = frozen.rvs(size=N, random_state=1234) X2 = frozen.rvs(size=N, random_state=4321) X = np.concatenate((X1[np.newaxis, :, :, :], X2[np.newaxis, :, :, :]), axis=0) assert_equal(X.shape, (2, N, num_rows, num_cols)) array_logpdf = frozen.logpdf(X) logpdf_shape = array_logpdf.shape assert_equal(logpdf_shape, (2, N)) for i in range(2): for j in range(N): separate_logpdf = matrix_t.logpdf( X[i, j], mean=M, row_spread=U, col_spread=V, df=df ) assert_allclose(separate_logpdf, array_logpdf[i, j], 1e-10) @staticmethod def relative_error(vec1: np.ndarray, vec2: np.ndarray): numerator = np.linalg.norm(vec1 - vec2) ** 2 denominator = np.linalg.norm(vec1) ** 2 + np.linalg.norm(vec2) ** 2 return numerator / denominator @staticmethod def matrix_divergence(mat_true: np.ndarray, mat_est: np.ndarray) -> float: mat_true_psd = _PSD(mat_true, allow_singular=False) mat_est_psd = _PSD(mat_est, allow_singular=False) if (np.exp(mat_est_psd.log_pdet) <= 0) or (np.exp(mat_true_psd.log_pdet) <= 0): return np.inf trace_term = np.trace(mat_est_psd.pinv @ mat_true) log_detratio = mat_est_psd.log_pdet - mat_true_psd.log_pdet return (trace_term + log_detratio - len(mat_true)) / 2 @staticmethod def vec(a_mat: np.ndarray) -> np.ndarray: """ For an (m,n) array `a_mat` the output `vec(a_mat)` is an (m*n, 1) array formed by stacking the columns of `a_mat` in the order in which they occur in `a_mat`. """ assert a_mat.ndim == 2 return a_mat.T.reshape((a_mat.size,)) def test_moments(self): r""" Gupta and Nagar (2000) Theorem 4.3.1 (p.135) -------------------------------------------- The covariance of the vectorized matrix variate t-distribution equals $ (V \otimes U) / (\text{df} - 2)$, where $\otimes$ denotes the usual Kronecker product. """ df = 5 num_rows = 4 num_cols = 3 M = np.full((num_rows, num_cols), 0.3) U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5) V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3) N = 10**4 atol = 1e-1 frozen = matrix_t(mean=M, row_spread=U, col_spread=V, df=df) X = frozen.rvs(size=N, random_state=42) relerr = self.relative_error(M, X.mean(axis=0)) assert_close(relerr, 0, atol=atol) cov_vec_true = np.kron(V, U) / (df - 2) cov_vec_rvs = np.cov(np.array([self.vec(x) for x in X]), rowvar=False) kl = self.matrix_divergence(cov_vec_true, cov_vec_rvs) assert_close(kl, 0, atol=atol) def test_pdf_against_julia(self): """ Test values generated from Julia. Dockerfile ---------- FROM julia:1.11.5 RUN julia -e 'using Pkg; Pkg.add("Distributions"); Pkg.add("PDMats")' WORKDIR /usr/src Commands -------- using DelimitedFiles using Distributions using PDMats using Random Random.seed!(42) ν = 5 M = [1 2 3; 4 5 6] Σ = PDMats.PDMat([1 0.5; 0.5 1]) Ω = PDMats.PDMat([1 0.3 0.2; 0.3 1 0.4; 0.2 0.4 1]) dist = MatrixTDist(ν, M, Σ, Ω) samples = rand(dist, 10) pdfs = [pdf(dist, s) for s in samples] """ df = 5 M = np.array([[1, 2, 3], [4, 5, 6]]) U = np.array([[1, 0.5], [0.5, 1]]) V = np.array([[1, 0.3, 0.2], [0.3, 1, 0.4], [0.2, 0.4, 1]]) rtol = 1e-10 samples_j = np.array( [ [ [0.958884881003464, 2.328976673167312, 2.936195396714506], [3.656388568544394, 5.677549814962506, 6.292509556719057] ], [ [0.830992685140180, 2.588946865508210, 3.310327469315906], [3.850637198786261, 5.106074165416971, 6.403143979925566] ], [ [1.572053537500711, 1.760828063560249, 2.812123062636012], [4.156334686390513, 5.075942019982631, 5.827004350136873] ], [ [1.683810860278459, 2.801203900480317, 4.054517744825265], [4.778239956376877, 5.070613721477604, 6.640349743267192] ], [ [0.443183825511296, 2.072092271247398, 3.045385527559403], [4.374387994815022, 5.083432151729137, 5.958013783940404] ], [ [0.311591337218329, 1.162836182564980, 2.562167762547456], [3.079154928756626, 4.202325496476140, 5.485839479663457] ], [ [0.943713128785340, 1.923800464789872, 2.511941262351750], [4.124882619205123, 4.889406461458511, 5.689675454116582] ], [ [1.487852512870631, 1.933859334657448, 2.681311906634522], [4.124418827930267, 5.335204598518954, 5.988120342017037] ], [ [1.002470749319751, 1.386785511789551, 2.890832331097640], [4.372884362128993, 4.729718562700068, 6.732322315921552] ], [ [1.421351511333299, 2.106946903600814, 2.654619331838720], [4.188693248790616, 5.336439611284261, 5.279121290355546] ] ] ) pdfs_j = np.array( [ 0.082798951655369, 0.119993852401118, 0.151969434727803, 0.003620324481841, 0.072538716346179, 0.027002666410192, 0.485180162388507, 0.135740468069511, 0.013619162593841, 0.034813885519299 ] ) pdfs_py = matrix_t.pdf(samples_j, mean=M, row_spread=U, col_spread=V, df=df) assert_allclose(pdfs_j, pdfs_py, rtol=rtol) def test_pdf_against_mathematica(self): """ Test values generated from Mathematica 13.0.0 for Linux x86 (64-bit) Release ID 13.0.0.0 (7522564, 2021120311723), Patch Level 0 mu={{1,2,3},{4,5,6}}; sigma={{1,0.5},{0.5,1}}; omega={{1,0.3,0.2},{0.3,1,0.4},{0.2,0.4,1}}; df=5; sampleSize=10; SeedRandom[42]; dist=MatrixTDistribution[mu,sigma,omega,df]; samples=SetPrecision[RandomVariate[dist,sampleSize],15]; pdfs=SetPrecision[PDF[dist,#]&/@samples,15]; """ df = 5 M = np.array([[1, 2, 3], [4, 5, 6]]) U = np.array([[1, 0.5], [0.5, 1]]) V = np.array([[1, 0.3, 0.2], [0.3, 1, 0.4], [0.2, 0.4, 1]]) rtol = 1e-10 samples_m = np.array( [ [ [0.639971699425374, 2.171718671534955, 2.575826093352771], [4.031082477912233, 5.021680958526638, 6.268126154787008], ], [ [1.164842884206232, 2.526297099993045, 3.781375229865069], [3.912979114956833, 4.202714884504189, 5.661830748993523], ], [ [1.00461853907369, 2.080028751298565, 3.406489485602410], [3.993327716320432, 5.655909265966448, 6.578059791357837], ], [ [0.80625209501374, 2.529009560674907, 2.807513313302189], [3.722896768794995, 5.26987322525995, 5.801155613199776], ], [ [0.445816208657817, 3.224059910964103, 2.954990980541423], [3.451520519442941, 7.064424621385415, 5.438834195890955], ], [ [0.919232769636664, 2.374572300756703, 3.495118928313048], [3.924447237903237, 5.627654256287447, 5.806104608153957], ], [ [2.014242004090113, 1.377018127709871, 3.114064311468686], [3.88881648137925, 4.603482820518904, 5.714205489738063], ], [ [1.322000147426889, 2.602135838377777, 2.558921028724319], [4.50534702030683, 5.861137323151889, 5.181872548334852], ], [ [1.448743656862261, 2.053847557652242, 3.637321543241769], [4.097711403906707, 4.506916241403669, 5.68010653497977], ], [ [1.045187318995198, 1.645467189679729, 3.284396214544507], [3.648493466445393, 5.004212508553601, 6.301624351328048], ], ] ) pdfs_m = np.array( [ 0.085671937131824, 0.004821273644067, 0.105978034029754, 0.174250448808208, 3.945711836053583e-05, 0.027158790350349, 0.00299095120309, 0.005594546018078, 0.025788366971310, 0.120210733598845, ] ) pdfs_py = matrix_t.pdf(samples_m, mean=M, row_spread=U, col_spread=V, df=df) assert_allclose(pdfs_m, pdfs_py, rtol=rtol) def test_samples(self): df = 5 num_rows = 4 num_cols = 3 M = np.full((num_rows, num_cols), 0.3) U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5) V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3) N = 10**4 rtol = 0.05 # `rvs` performs Cholesky-inverse-Wishart sampling on the smaller # dimension of `mean` frozen = matrix_t(mean=M, row_spread=U, col_spread=V, df=df) X = frozen.rvs(size=N, random_state=42) # column-wise rvs m = X.mean(0) frozenT = matrix_t(mean=M.T, row_spread=V, col_spread=U, df=df) XT = frozenT.rvs(size=N, random_state=42) # row-wise rvs mT = XT.mean(0) # Gupta and Nagar (2000) Theorem 4.3.3 (p.137) # -------------------------------------------- # If T follows a matrix variate t-distribution with mean M and row_spread U # and col_spread V and df degrees of freedom, then its transpose T.T follows # a matrix variate t-distribution with mean M.T and row_spread V and # col_spread U and df degrees of freedom. assert_allclose(M, m, rtol=rtol) assert_allclose(M.T, mT, rtol=rtol) assert_allclose(m, mT.T, rtol=rtol) assert_allclose(m.T, mT, rtol=rtol) @pytest.mark.parametrize("shape_case", ["row", "col"]) def test_against_multivariate_t(self, shape_case): r""" Gupta and Nagar (2000) p.133f When the number of rows or the number of columns equals 1 the matrix t reduces to the multivariate t. But, the matrix t is parameterized by raw 2nd moments whereas the multivariate t is parameterized by a covariance (raw 2nd central moment normalized by df). We can see the difference by comparing the author's notation $t_p(n, \omega, \mathbf{\mu}, \Sigma)$ for a matrix t with a single column to the formula (4.1.2) for the PDF of the multivariate t. """ rtol = 1e-6 df = 5 if shape_case == "row": num_rows = 1 num_cols = 3 row_spread = 1 col_spread = np.array([[1, 0.3, 0.2], [0.3, 1, 0.4], [0.2, 0.4, 1]]) shape = col_spread / df else: # shape_case == "col" num_rows = 3 num_cols = 1 row_spread = np.array([[1, 0.3, 0.2], [0.3, 1, 0.4], [0.2, 0.4, 1]]) col_spread=1 shape = row_spread / df M = np.full((num_rows, num_cols), 0.3) t_mat = matrix_t( mean=M, row_spread=row_spread, col_spread=col_spread, df=df ) t_mvt = multivariate_t(loc=M.squeeze(), shape=shape, df=df) X = t_mat.rvs(size=3, random_state=42) t_mat_logpdf = t_mat.logpdf(X) t_mvt_logpdf = t_mvt.logpdf(X.squeeze()) assert_allclose(t_mvt_logpdf, t_mat_logpdf, rtol=rtol)
TestMatrixT
python
huggingface__transformers
tests/models/timm_wrapper/test_modeling_timm_wrapper.py
{ "start": 10124, "end": 17280 }
class ____(unittest.TestCase): # some popular ones model_names_to_test = [ "vit_small_patch16_384.augreg_in21k_ft_in1k", "resnet50.a1_in1k", "tf_mobilenetv3_large_minimal_100.in1k", "swin_tiny_patch4_window7_224.ms_in1k", "ese_vovnet19b_dw.ra_in1k", "hrnet_w18.ms_aug_in1k", ] @slow def test_inference_image_classification_head(self): checkpoint = "timm/resnet18.a1_in1k" model = TimmWrapperForImageClassification.from_pretrained(checkpoint, device_map=torch_device).eval() image_processor = TimmWrapperImageProcessor.from_pretrained(checkpoint) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the shape and logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_label = 281 # tabby cat self.assertEqual(torch.argmax(outputs.logits).item(), expected_label) expectations = Expectations( { (None, None): [-11.2618, -9.6192, -10.3205], ("cuda", 8): [-11.2634, -9.6208, -10.3199], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) resulted_slice = outputs.logits[0, :3] torch.testing.assert_close(resulted_slice, expected_slice, atol=1e-3, rtol=1e-3) @slow def test_inference_with_pipeline(self): image = prepare_img() classifier = pipeline(model="timm/resnet18.a1_in1k", device=torch_device) result = classifier(image) # verify result expected_label = "tabby, tabby cat" expected_score = 0.4329 self.assertEqual(result[0]["label"], expected_label) self.assertAlmostEqual(result[0]["score"], expected_score, places=3) @slow @require_bitsandbytes def test_inference_image_classification_quantized(self): from transformers import BitsAndBytesConfig checkpoint = "timm/vit_small_patch16_384.augreg_in21k_ft_in1k" quantization_config = BitsAndBytesConfig(load_in_8bit=True) model = TimmWrapperForImageClassification.from_pretrained( checkpoint, quantization_config=quantization_config, device_map=torch_device ).eval() image_processor = TimmWrapperImageProcessor.from_pretrained(checkpoint) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the shape and logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_label = 281 # tabby cat self.assertEqual(torch.argmax(outputs.logits).item(), expected_label) expectations = Expectations( { (None, None): [-2.4043, 1.4492, -0.5127], ("cuda", 8): [-2.2676, 1.5303, -0.4409], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) resulted_slice = outputs.logits[0, :3].to(dtype=torch.float32) torch.testing.assert_close(resulted_slice, expected_slice, atol=0.1, rtol=0.1) @slow def test_transformers_model_for_classification_is_equivalent_to_timm(self): # check that wrapper logits are the same as timm model logits image = prepare_img() for model_name in self.model_names_to_test: checkpoint = f"timm/{model_name}" with self.subTest(msg=model_name): # prepare inputs image_processor = TimmWrapperImageProcessor.from_pretrained(checkpoint) pixel_values = image_processor(images=image).pixel_values.to(torch_device) # load models model = TimmWrapperForImageClassification.from_pretrained(checkpoint, device_map=torch_device).eval() timm_model = timm.create_model(model_name, pretrained=True).to(torch_device).eval() with torch.inference_mode(): outputs = model(pixel_values) timm_outputs = timm_model(pixel_values) # check shape is the same self.assertEqual(outputs.logits.shape, timm_outputs.shape) # check logits are the same diff = (outputs.logits - timm_outputs).max().item() self.assertLess(diff, 1e-4) @slow def test_transformers_model_is_equivalent_to_timm(self): # check that wrapper logits are the same as timm model logits image = prepare_img() models_to_test = ["vit_small_patch16_224.dino"] + self.model_names_to_test for model_name in models_to_test: checkpoint = f"timm/{model_name}" with self.subTest(msg=model_name): # prepare inputs image_processor = TimmWrapperImageProcessor.from_pretrained(checkpoint) pixel_values = image_processor(images=image).pixel_values.to(torch_device) # load models model = TimmWrapperModel.from_pretrained(checkpoint, device_map=torch_device).eval() timm_model = timm.create_model(model_name, pretrained=True, num_classes=0).to(torch_device).eval() with torch.inference_mode(): outputs = model(pixel_values) timm_outputs = timm_model(pixel_values) # check shape is the same self.assertEqual(outputs.pooler_output.shape, timm_outputs.shape) # check logits are the same diff = (outputs.pooler_output - timm_outputs).max().item() self.assertLess(diff, 1e-4) @slow def test_save_load_to_timm(self): # test that timm model can be loaded to transformers, saved and then loaded back into timm model = TimmWrapperForImageClassification.from_pretrained( "timm/resnet18.a1_in1k", num_labels=10, ignore_mismatched_sizes=True ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # there is no direct way to load timm model from folder, use the same config + path to weights timm_model = timm.create_model( "resnet18", num_classes=10, checkpoint_path=f"{tmpdirname}/model.safetensors" ) # check that all weights are the same after reload different_weights = [] for (name1, param1), (name2, param2) in zip( model.timm_model.named_parameters(), timm_model.named_parameters() ): if param1.shape != param2.shape or not torch.equal(param1, param2): different_weights.append((name1, name2)) if different_weights: self.fail(f"Found different weights after reloading: {different_weights}")
TimmWrapperModelIntegrationTest
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 127509, "end": 127672 }
class ____(BaseModel, extra="forbid"): type: "Snowball" = Field(..., description="") language: "SnowballLanguage" = Field(..., description="")
SnowballParams
python
pyca__cryptography
tests/hazmat/primitives/decrepit/test_algorithms.py
{ "start": 11081, "end": 11542 }
class ____: test_ofb = generate_encrypt_test( load_nist_vectors, os.path.join("ciphers", "SEED"), ["seed-ofb.txt"], lambda key, **kwargs: SEED(binascii.unhexlify(key)), lambda iv, **kwargs: OFB(binascii.unhexlify(iv)), ) @pytest.mark.supported( only_if=lambda backend: backend.cipher_supported( SEED(b"\x00" * 16), CFB(b"\x00" * 16) ), skip_message="Does not support SEED CFB", )
TestSEEDModeOFB
python
jazzband__django-polymorphic
src/polymorphic/tests/models.py
{ "start": 1121, "end": 1200 }
class ____(ModelExtraA): field2 = models.CharField(max_length=30)
ModelExtraB
python
Textualize__textual
tests/option_list/test_option_list_option_subclass.py
{ "start": 451, "end": 1314 }
class ____(App[None]): """Test option list application.""" def compose(self) -> ComposeResult: yield OptionList(*[OptionWithExtras(n) for n in range(100)]) async def test_option_list_with_subclassed_options() -> None: """It should be possible to build an option list with subclassed options.""" async with OptionListApp().run_test() as pilot: option_list = pilot.app.query_one(OptionList) assert option_list.option_count == 100 for n in range(option_list.option_count): for option in ( option_list.get_option(str(n)), option_list.get_option_at_index(n), ): assert isinstance(option, OptionWithExtras) assert option.prompt == str(n) assert option.id == str(n) assert option.test == n
OptionListApp
python
python-visualization__folium
folium/plugins/groupedlayercontrol.py
{ "start": 161, "end": 3169 }
class ____(JSCSSMixin, MacroElement): """ Create a Layer Control with groups of overlays. Parameters ---------- groups : dict A dictionary where the keys are group names and the values are lists of layer objects. e.g. { "Group 1": [layer1, layer2], "Group 2": [layer3, layer4] } exclusive_groups: bool, default True Whether to use radio buttons (default) or checkboxes. If you want to use both, use two separate instances of this class. **kwargs Additional (possibly inherited) options. See https://leafletjs.com/reference.html#control-layers """ default_js = [ ( "leaflet.groupedlayercontrol.min.js", "https://cdnjs.cloudflare.com/ajax/libs/leaflet-groupedlayercontrol/0.6.1/leaflet.groupedlayercontrol.min.js", # noqa ), ] default_css = [ ( "leaflet.groupedlayercontrol.min.css", "https://cdnjs.cloudflare.com/ajax/libs/leaflet-groupedlayercontrol/0.6.1/leaflet.groupedlayercontrol.min.css", # noqa ) ] _template = Template( """ {% macro script(this,kwargs) %} L.control.groupedLayers( null, { {%- for group_name, overlays in this.grouped_overlays.items() %} {{ group_name|tojson }} : { {%- for overlaykey, val in overlays.items() %} {{ overlaykey|tojson }} : {{val}}, {%- endfor %} }, {%- endfor %} }, {{ this.options|tojavascript }}, ).addTo({{this._parent.get_name()}}); {%- for val in this.layers_untoggle %} {{ val }}.remove(); {%- endfor %} {% endmacro %} """ ) def __init__(self, groups, exclusive_groups=True, **kwargs): super().__init__() self._name = "GroupedLayerControl" self.options = remove_empty(**kwargs) if exclusive_groups: self.options["exclusiveGroups"] = list(groups.keys()) self.layers_untoggle = set() self.grouped_overlays = {} for group_name, sublist in groups.items(): self.grouped_overlays[group_name] = {} for element in sublist: self.grouped_overlays[group_name][ element.layer_name ] = element.get_name() if not element.show: self.layers_untoggle.add(element.get_name()) # make sure the elements used in GroupedLayerControl # don't show up in the regular LayerControl. element.control = False if exclusive_groups: # only enable the first radio button for element in sublist[1:]: self.layers_untoggle.add(element.get_name())
GroupedLayerControl
python
keras-team__keras
keras/src/backend/torch/optimizers/torch_adamax.py
{ "start": 147, "end": 1483 }
class ____( torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adamax ): def _parallel_update_step( self, grads, variables, learning_rate, ): keras_variables = variables variables = [v.value for v in variables] dtype = variables[0].dtype lr = ops.cast(learning_rate, dtype) local_step = ops.cast(self.iterations + 1, dtype) beta_1_power = ops.power(ops.cast(self.beta_1, dtype), local_step) m_list = [ self._m[self._get_variable_index(variable)].value for variable in keras_variables ] u_list = [ self._u[self._get_variable_index(variable)].value for variable in keras_variables ] torch._foreach_mul_(m_list, self.beta_1) torch._foreach_add_(m_list, grads, alpha=1 - self.beta_1) torch._foreach_mul_(u_list, self.beta_2) torch._foreach_maximum_(u_list, torch._foreach_abs(grads)) torch._foreach_add_( variables, torch._foreach_div( torch._foreach_mul(m_list, lr), torch._foreach_mul( torch._foreach_add(u_list, self.epsilon), 1 - beta_1_power, ), ), alpha=-1, )
Adamax
python
scipy__scipy
scipy/linalg/tests/test_decomp.py
{ "start": 84082, "end": 87209 }
class ____: def test_simple(self): a = [[-149, -50, -154], [537, 180, 546], [-27, -9, -25]] h1 = [[-149.0000, 42.2037, -156.3165], [-537.6783, 152.5511, -554.9272], [0, 0.0728, 2.4489]] h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(q.T @ a @ q, h) assert_array_almost_equal(h, h1, decimal=4) def test_simple_complex(self): a = [[-149, -50, -154], [537, 180j, 546], [-27j, -9, -25]] h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(q.conj().T @ a @ q, h) def test_simple2(self): a = [[1, 2, 3, 4, 5, 6, 7], [0, 2, 3, 4, 6, 7, 2], [0, 2, 2, 3, 0, 3, 2], [0, 0, 2, 8, 0, 0, 2], [0, 3, 1, 2, 0, 1, 2], [0, 1, 2, 3, 0, 1, 0], [0, 0, 0, 0, 0, 1, 2]] h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(q.T @ a @ q, h) def test_simple3(self): a = np.eye(3) a[-1, 0] = 2 h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(q.T @ a @ q, h) def test_random(self): rng = np.random.RandomState(1234) n = 20 for k in range(2): a = rng.random([n, n]) h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(q.T @ a @ q, h) def test_random_complex(self): rng = np.random.RandomState(1234) n = 20 for k in range(2): a = rng.random([n, n]) + 1j*rng.random([n, n]) h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(q.conj().T @ a @ q, h) def test_check_finite(self): a = [[-149, -50, -154], [537, 180, 546], [-27, -9, -25]] h1 = [[-149.0000, 42.2037, -156.3165], [-537.6783, 152.5511, -554.9272], [0, 0.0728, 2.4489]] h, q = hessenberg(a, calc_q=1, check_finite=False) assert_array_almost_equal(q.T @ a @ q, h) assert_array_almost_equal(h, h1, decimal=4) def test_2x2(self): a = [[2, 1], [7, 12]] h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(q, np.eye(2)) assert_array_almost_equal(h, a) b = [[2-7j, 1+2j], [7+3j, 12-2j]] h2, q2 = hessenberg(b, calc_q=1) assert_array_almost_equal(q2, np.eye(2)) assert_array_almost_equal(h2, b) @pytest.mark.parametrize('dt', [int, float, float32, complex, complex64]) def test_empty(self, dt): a = np.empty((0, 0), dtype=dt) h = hessenberg(a) assert h.shape == (0, 0) assert h.dtype == hessenberg(np.eye(3, dtype=dt)).dtype h, q = hessenberg(a, calc_q=True) h3, q3 = hessenberg(a, calc_q=True) assert h.shape == (0, 0) assert h.dtype == h3.dtype assert q.shape == (0, 0) assert q.dtype == q3.dtype blas_provider = blas_version = None blas_provider = CONFIG['Build Dependencies']['blas']['name'] blas_version = CONFIG['Build Dependencies']['blas']['version']
TestHessenberg
python
doocs__leetcode
solution/0600-0699/0682.Baseball Game/Solution.py
{ "start": 0, "end": 391 }
class ____: def calPoints(self, operations: List[str]) -> int: stk = [] for op in operations: if op == "+": stk.append(stk[-1] + stk[-2]) elif op == "D": stk.append(stk[-1] << 1) elif op == "C": stk.pop() else: stk.append(int(op)) return sum(stk)
Solution
python
pandas-dev__pandas
pandas/tests/frame/indexing/test_indexing.py
{ "start": 54702, "end": 61419 }
class ____: @pytest.fixture def orig(self): cats = Categorical(["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"]) idx = Index(["h", "i", "j", "k", "l", "m", "n"]) values = [1, 1, 1, 1, 1, 1, 1] orig = DataFrame({"cats": cats, "values": values}, index=idx) return orig @pytest.fixture def exp_parts_cats_col(self): # changed part of the cats column cats3 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"]) idx3 = Index(["h", "i", "j", "k", "l", "m", "n"]) values3 = [1, 1, 1, 1, 1, 1, 1] exp_parts_cats_col = DataFrame({"cats": cats3, "values": values3}, index=idx3) return exp_parts_cats_col @pytest.fixture def exp_single_cats_value(self): # changed single value in cats col cats4 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"]) idx4 = Index(["h", "i", "j", "k", "l", "m", "n"]) values4 = [1, 1, 1, 1, 1, 1, 1] exp_single_cats_value = DataFrame( {"cats": cats4, "values": values4}, index=idx4 ) return exp_single_cats_value def test_loc_iloc_setitem_list_of_lists(self, orig, indexer_li): # - assign multiple rows (mixed values) -> exp_multi_row df = orig.copy() key = slice(2, 4) if indexer_li is tm.loc: key = slice("j", "k") indexer_li(df)[key, :] = [["b", 2], ["b", 2]] cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"]) idx2 = Index(["h", "i", "j", "k", "l", "m", "n"]) values2 = [1, 1, 2, 2, 1, 1, 1] exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2) tm.assert_frame_equal(df, exp_multi_row) df = orig.copy() with pytest.raises(TypeError, match=msg1): indexer_li(df)[key, :] = [["c", 2], ["c", 2]] @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc, tm.at, tm.iat]) def test_loc_iloc_at_iat_setitem_single_value_in_categories( self, orig, exp_single_cats_value, indexer ): # - assign a single value -> exp_single_cats_value df = orig.copy() key = (2, 0) if indexer in [tm.loc, tm.at]: key = (df.index[2], df.columns[0]) # "b" is among the categories for df["cat"}] indexer(df)[key] = "b" tm.assert_frame_equal(df, exp_single_cats_value) # "c" is not among the categories for df["cat"] with pytest.raises(TypeError, match=msg1): indexer(df)[key] = "c" def test_loc_iloc_setitem_mask_single_value_in_categories( self, orig, exp_single_cats_value, indexer_li ): # mask with single True df = orig.copy() mask = df.index == "j" key = 0 if indexer_li is tm.loc: key = df.columns[key] indexer_li(df)[mask, key] = "b" tm.assert_frame_equal(df, exp_single_cats_value) def test_loc_iloc_setitem_full_row_non_categorical_rhs(self, orig, indexer_li): # - assign a complete row (mixed values) -> exp_single_row df = orig.copy() key = 2 if indexer_li is tm.loc: key = df.index[2] # not categorical dtype, but "b" _is_ among the categories for df["cat"] indexer_li(df)[key, :] = ["b", 2] cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"]) idx1 = Index(["h", "i", "j", "k", "l", "m", "n"]) values1 = [1, 1, 2, 1, 1, 1, 1] exp_single_row = DataFrame({"cats": cats1, "values": values1}, index=idx1) tm.assert_frame_equal(df, exp_single_row) # "c" is not among the categories for df["cat"] with pytest.raises(TypeError, match=msg1): indexer_li(df)[key, :] = ["c", 2] def test_loc_iloc_setitem_partial_col_categorical_rhs( self, orig, exp_parts_cats_col, indexer_li ): # assign a part of a column with dtype == categorical -> # exp_parts_cats_col df = orig.copy() key = (slice(2, 4), 0) if indexer_li is tm.loc: key = (slice("j", "k"), df.columns[0]) # same categories as we currently have in df["cats"] compat = Categorical(["b", "b"], categories=["a", "b"]) indexer_li(df)[key] = compat tm.assert_frame_equal(df, exp_parts_cats_col) # categories do not match df["cat"]'s, but "b" is among them semi_compat = Categorical(list("bb"), categories=list("abc")) with pytest.raises(TypeError, match=msg2): # different categories but holdable values # -> not sure if this should fail or pass indexer_li(df)[key] = semi_compat # categories do not match df["cat"]'s, and "c" is not among them incompat = Categorical(list("cc"), categories=list("abc")) with pytest.raises(TypeError, match=msg2): # different values indexer_li(df)[key] = incompat def test_loc_iloc_setitem_non_categorical_rhs( self, orig, exp_parts_cats_col, indexer_li ): # assign a part of a column with dtype != categorical -> exp_parts_cats_col df = orig.copy() key = (slice(2, 4), 0) if indexer_li is tm.loc: key = (slice("j", "k"), df.columns[0]) # "b" is among the categories for df["cat"] indexer_li(df)[key] = ["b", "b"] tm.assert_frame_equal(df, exp_parts_cats_col) # "c" not part of the categories with pytest.raises(TypeError, match=msg1): indexer_li(df)[key] = ["c", "c"] @pytest.mark.parametrize("indexer", [tm.getitem, tm.loc, tm.iloc]) def test_getitem_preserve_object_index_with_dates(self, indexer): # https://github.com/pandas-dev/pandas/pull/42950 - when selecting a column # from dataframe, don't try to infer object dtype index on Series construction idx = date_range("2012", periods=3).astype(object) df = DataFrame({0: [1, 2, 3]}, index=idx) assert df.index.dtype == object if indexer is tm.getitem: ser = indexer(df)[0] else: ser = indexer(df)[:, 0] assert ser.index.dtype == object def test_loc_on_multiindex_one_level(self): # GH#45779 df = DataFrame( data=[[0], [1]], index=MultiIndex.from_tuples([("a",), ("b",)], names=["first"]), ) expected = DataFrame( data=[[0]], index=MultiIndex.from_tuples([("a",)], names=["first"]) ) result = df.loc["a"] tm.assert_frame_equal(result, expected)
TestLocILocDataFrameCategorical