language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
ansible__ansible
lib/ansible/plugins/doc_fragments/url_windows.py
{ "start": 193, "end": 5491 }
class ____: # Common options for Ansible.ModuleUtils.WebRequest DOCUMENTATION = r""" options: method: description: - The HTTP Method of the request. type: str follow_redirects: description: - Whether or the module should follow redirects. - V(all) will follow all redirect. - V(none) will not follow any redirect. - V(safe) will follow only "safe" redirects, where "safe" means that the client is only doing a C(GET) or C(HEAD) on the URI to which it is being redirected. - When following a redirected URL, the C(Authorization) header and any credentials set will be dropped and not redirected. choices: - all - none - safe default: safe type: str headers: description: - Extra headers to set on the request. - This should be a dictionary where the key is the header name and the value is the value for that header. type: dict http_agent: description: - Header to identify as, generally appears in web server logs. - This is set to the C(User-Agent) header on a HTTP request. default: ansible-httpget type: str maximum_redirection: description: - Specify how many times the module will redirect a connection to an alternative URI before the connection fails. - If set to V(0) or O(follow_redirects) is set to V(null), or V(safe) when not doing a C(GET) or C(HEAD) it prevents all redirection. default: 50 type: int timeout: description: - Specifies how long the request can be pending before it times out (in seconds). - Set to V(0) to specify an infinite timeout. default: 30 type: int validate_certs: description: - If V(no), SSL certificates will not be validated. - This should only be used on personally controlled sites using self-signed certificates. default: yes type: bool client_cert: description: - The path to the client certificate C(.pfx) that is used for X509 authentication. This path can either be the path to the C(.pfx) on the filesystem or the PowerShell certificate path C(Cert:\CurrentUser\My\<thumbprint>). - The WinRM connection must be authenticated with C(CredSSP) or C(become) is used on the task if the certificate file is not password protected. - Other authentication types can set O(client_cert_password) when the cert is password protected. type: str client_cert_password: description: - The password for O(client_cert) if the cert is password protected. type: str force_basic_auth: description: - By default the authentication header is only sent when a webservice responses to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins will fail. - This option forces the sending of the Basic authentication header upon the original request. default: no type: bool url_username: description: - The username to use for authentication. type: str url_password: description: - The password for O(url_username). type: str use_default_credential: description: - Uses the current user's credentials when authenticating with a server protected with C(NTLM), C(Kerberos), or C(Negotiate) authentication. - Sites that use C(Basic) auth will still require explicit credentials through the O(url_username) and O(url_password) options. - The module will only have access to the user's credentials if using C(become) with a password, you are connecting with SSH using a password, or connecting with WinRM using C(CredSSP) or C(Kerberos with delegation). - If not using C(become) or a different auth method to the ones stated above, there will be no default credentials available and no authentication will occur. default: no type: bool use_proxy: description: - If V(no), it will not use the proxy defined in IE for the current user. default: yes type: bool proxy_url: description: - An explicit proxy to use for the request. - By default, the request will use the IE defined proxy unless O(use_proxy=no). type: str proxy_username: description: - The username to use for proxy authentication. type: str proxy_password: description: - The password for O(proxy_username). type: str proxy_use_default_credential: description: - Uses the current user's credentials when authenticating with a proxy host protected with C(NTLM), C(Kerberos), or C(Negotiate) authentication. - Proxies that use C(Basic) auth will still require explicit credentials through the O(proxy_username) and O(proxy_password) options. - The module will only have access to the user's credentials if using C(become) with a password, you are connecting with SSH using a password, or connecting with WinRM using C(CredSSP) or C(Kerberos with delegation). - If not using C(become) or a different auth method to the ones stated above, there will be no default credentials available and no proxy authentication will occur. default: no type: bool seealso: - module: community.windows.win_inet_proxy """
ModuleDocFragment
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/util/typing.py
{ "start": 18438, "end": 18704 }
class ____(Protocol): def __get__(self, instance: object, owner: Any) -> Any: ... def __set__(self, instance: Any, value: Any) -> None: ... def __delete__(self, instance: Any) -> None: ... _DESC = TypeVar("_DESC", bound=DescriptorProto)
DescriptorProto
python
coleifer__peewee
tests/db_tests.py
{ "start": 28208, "end": 31747 }
class ____(ModelTestCase): database = db_loader('sqlite3') requires = [Data] def test_attach(self): database = self.database Data.create(key='k1', value='v1') Data.create(key='k2', value='v2') # Attach an in-memory cache database. database.attach(':memory:', 'cache') # Clone data into the in-memory cache. class CacheData(Data): class Meta: schema = 'cache' self.assertFalse(CacheData.table_exists()) CacheData.create_table(safe=False) self.assertTrue(CacheData.table_exists()) (CacheData .insert_from(Data.select(), fields=[Data.id, Data.key, Data.value]) .execute()) # Update the source data. query = Data.update({Data.value: Data.value + '-x'}) self.assertEqual(query.execute(), 2) # Verify the source data was updated. query = Data.select(Data.key, Data.value).order_by(Data.key) self.assertSQL(query, ( 'SELECT "t1"."key", "t1"."value" ' 'FROM "main"."data" AS "t1" ' 'ORDER BY "t1"."key"'), []) self.assertEqual([v for k, v in query.tuples()], ['v1-x', 'v2-x']) # Verify the cached data reflects the original data, pre-update. query = (CacheData .select(CacheData.key, CacheData.value) .order_by(CacheData.key)) self.assertSQL(query, ( 'SELECT "t1"."key", "t1"."value" ' 'FROM "cache"."cache_data" AS "t1" ' 'ORDER BY "t1"."key"'), []) self.assertEqual([v for k, v in query.tuples()], ['v1', 'v2']) database.close() # On re-connecting, the in-memory database will re-attached. database.connect() # Cache-Data table does not exist. self.assertFalse(CacheData.table_exists()) # Double-check the sqlite master table. curs = database.execute_sql('select * from cache.sqlite_master;') self.assertEqual(curs.fetchall(), []) # Because it's in-memory, the table needs to be re-created. CacheData.create_table(safe=False) self.assertEqual(CacheData.select().count(), 0) # Original data is still there. self.assertEqual(Data.select().count(), 2) def test_attach_detach(self): database = self.database Data.create(key='k1', value='v1') Data.create(key='k2', value='v2') # Attach an in-memory cache database. database.attach(':memory:', 'cache') curs = database.execute_sql('select * from cache.sqlite_master') self.assertEqual(curs.fetchall(), []) self.assertFalse(database.attach(':memory:', 'cache')) self.assertRaises(OperationalError, database.attach, 'foo.db', 'cache') self.assertTrue(database.detach('cache')) self.assertFalse(database.detach('cache')) self.assertRaises(OperationalError, database.execute_sql, 'select * from cache.sqlite_master') def test_sqlite_schema_support(self): class CacheData(Data): class Meta: schema = 'cache' # Attach an in-memory cache database and create the cache table. self.database.attach(':memory:', 'cache') CacheData.create_table() tables = self.database.get_tables() self.assertEqual(tables, ['data']) tables = self.database.get_tables(schema='cache') self.assertEqual(tables, ['cache_data'])
TestAttachDatabase
python
gevent__gevent
src/greentest/3.14/test_urllib2.py
{ "start": 11898, "end": 12208 }
class ____(io.StringIO): def __init__(self, code, msg, headers, data, url=None): io.StringIO.__init__(self, data) self.code, self.msg, self.headers, self.url = code, msg, headers, url def info(self): return self.headers def geturl(self): return self.url
MockResponse
python
h5py__h5py
h5py/tests/test_file.py
{ "start": 26076, "end": 27388 }
class ____(TestCase): """ Ensure that closing a file invalidates object IDs, as appropriate """ def test_close(self): """ Closing a file invalidates any of the file's open objects """ with File(self.mktemp(), 'w') as f1: g1 = f1.create_group('foo') self.assertTrue(bool(f1.id)) self.assertTrue(bool(g1.id)) f1.close() self.assertFalse(bool(f1.id)) self.assertFalse(bool(g1.id)) with File(self.mktemp(), 'w') as f2: g2 = f2.create_group('foo') self.assertTrue(bool(f2.id)) self.assertTrue(bool(g2.id)) self.assertFalse(bool(f1.id)) self.assertFalse(bool(g1.id)) def test_close_one_handle(self): fname = self.mktemp() with File(fname, 'w') as f: f.create_group('foo') f1 = File(fname) f2 = File(fname) g1 = f1['foo'] g2 = f2['foo'] assert g1.id.valid assert g2.id.valid f1.close() assert not g1.id.valid # Closing f1 shouldn't close f2 or objects belonging to it assert f2.id.valid assert g2.id.valid f2.close() assert not f2.id.valid assert not g2.id.valid
TestCloseInvalidatesOpenObjectIDs
python
ray-project__ray
rllib/algorithms/tests/test_env_runner_failures.py
{ "start": 6074, "end": 6946 }
class ____(MultiAgentEnvRunner): """Configure EnvRunner to error in specific condition is hard. So we take a short-cut, and simply forward ping() to env.sample(). """ def ping(self) -> str: # See if Env wants to throw error. self.sample(num_timesteps=1, random_actions=True) # If there is no error raised from sample(), we simply reply pong. return super().ping() def on_algorithm_init(algorithm, **kwargs): # Add a custom module to algorithm. spec = algorithm.config.get_default_rl_module_spec() spec.observation_space = gym.spaces.Box(low=0, high=1, shape=(8,)) spec.action_space = gym.spaces.Discrete(2) spec.inference_only = True algorithm.add_module( module_id="test_module", module_spec=spec, add_to_eval_env_runners=True, )
ForwardHealthCheckToEnvWorkerMultiAgent
python
getsentry__sentry
tests/sentry/integrations/vsts/test_client.py
{ "start": 17932, "end": 23453 }
class ____(VstsIntegrationTestCase): def setUp(self) -> None: super().setUp() self.integration, _, _, _ = self.create_identity_integration( user=self.user, organization=self.organization, integration_params={ "provider": "vsts", "external_id": "vsts:1", "name": "fabrikam-fiber-inc", "metadata": { "domain_name": "https://fabrikam-fiber-inc.visualstudio.com/", "default_project": "0987654321", }, }, identity_params={ "external_id": "vsts", "data": {"access_token": self.access_token, "expires": time() + 1234567}, }, ) @responses.activate def test_integration_proxy_is_active(self) -> None: responses.add( responses.GET, "https://myvstsaccount.visualstudio.com/_apis/git/repositories/albertos-apples/commits", body=b"{}", match=[ matchers.query_param_matcher( {"commit": "b", "$top": "10"}, ), matchers.header_matcher( { "Accept": "application/json; api-version=4.1", "Content-Type": "application/json", "X-HTTP-Method-Override": "GET", "X-TFS-FedAuthRedirect": "Suppress", "Authorization": f"Bearer {self.access_token}", } ), ], ) responses.add( responses.GET, "http://controlserver/api/0/internal/integration-proxy/", body=b"{}", match=[ matchers.header_matcher( { "Accept": "application/json; api-version=4.1", "Content-Type": "application/json", "X-HTTP-Method-Override": "GET", "X-TFS-FedAuthRedirect": "Suppress", PROXY_PATH: "_apis/git/repositories/albertos-apples/commits?commit=b&%24top=10", } ), ], ) self.assert_installation() installation = get_installation_of_type( VstsIntegration, self.integration, self.organization.id ) repo = Repository.objects.create( provider="visualstudio", name="example", organization_id=self.organization.id, config={"instance": self.vsts_base_url, "project": "project-name", "name": "example"}, integration_id=self.integration.id, external_id="albertos-apples", ) assert repo.external_id is not None class ClientKwargs(TypedDict): base_url: str oauth_redirect_url: str org_integration_id: int identity_id: int | None class VstsProxyApiTestClient(VstsApiClient): _use_proxy_url_for_tests = True assert installation.org_integration is not None client_kwargs: ClientKwargs = { "base_url": self.vsts_base_url, "oauth_redirect_url": VstsIntegrationProvider.oauth_redirect_url, "org_integration_id": installation.org_integration.id, "identity_id": installation.org_integration.default_auth_id, } responses.calls.reset() with override_settings(SILO_MODE=SiloMode.MONOLITH): client = VstsProxyApiTestClient(**client_kwargs) client.get_commits(repo_id=repo.external_id, commit="b", limit=10) assert len(responses.calls) == 1 request = responses.calls[0].request assert ( "https://myvstsaccount.visualstudio.com/_apis/git/repositories/albertos-apples/commits?commit=b&%24top=10" == request.url ) assert client.base_url and (client.base_url.lower() in request.url) assert_proxy_request(request, is_proxy=False) responses.calls.reset() with override_settings(SILO_MODE=SiloMode.CONTROL): client = VstsProxyApiTestClient(**client_kwargs) client.get_commits(repo_id=repo.external_id, commit="b", limit=10) assert len(responses.calls) == 1 request = responses.calls[0].request assert ( "https://myvstsaccount.visualstudio.com/_apis/git/repositories/albertos-apples/commits?commit=b&%24top=10" == request.url ) assert client.base_url and (client.base_url.lower() in request.url) assert_proxy_request(request, is_proxy=False) responses.calls.reset() with override_settings(SILO_MODE=SiloMode.REGION): client = VstsProxyApiTestClient(**client_kwargs) client.get_commits(repo_id=repo.external_id, commit="b", limit=10) assert len(responses.calls) == 1 request = responses.calls[0].request assert request.url == "http://controlserver/api/0/internal/integration-proxy/" assert ( request.headers[PROXY_PATH] == "_apis/git/repositories/albertos-apples/commits?commit=b&%24top=10" ) assert client.base_url and (client.base_url.lower() not in request.url) assert_proxy_request(request, is_proxy=True)
VstsProxyApiClientTest
python
pytorch__pytorch
test/inductor/test_profiler.py
{ "start": 653, "end": 11748 }
class ____(torch._inductor.test_case.TestCase): @skipIfXpu( msg="AssertionError: False is not true, " "https://github.com/intel/torch-xpu-ops/issues/2335" ) @unittest.skipIf(not HAS_TRITON, "requires cuda & triton") def test_inductor_profiling_triton_launch(self): # Verify that we get some sort of CPU-side indication of triton kernel launches # in the profile traces. Currently, those appear as `cuLaunchKernel`. If this # detail changes, the test can be updated or removed. @torch.compile def fn(x, y): return (x + y).sin().cos() x, y = (torch.rand((4, 4), device=GPU_TYPE) for _ in range(2)) with torch.profiler.profile() as prof: fn(x, y) with TemporaryFileName(mode="w+") as fname: prof.export_chrome_trace(fname) with open(fname) as f: trace_json = json.load(f) self.assertTrue("traceEvents" in trace_json) events = trace_json["traceEvents"] valid_names = { "hipModuleLaunchKernel", "cuLaunchKernel", "triton_poi_fused_add_cos_sin_0", } self.assertTrue(any((event.get("name") in valid_names) for event in events)) def _test_profiling_kernel_names( self, fn, args, kernel_name_str: str, check_fn: Optional[Callable] = None ): """ We expect a record_function event to be added on the CPU side, surrounding the launch of each triton kernel. """ fn_opt = torch.compile(fn) for _ in range(2): fn_opt(*args) if check_fn is not None: check_fn() with torch.profiler.profile( activities=[ProfilerActivity.CPU], record_shapes=True ) as prof: fn_opt(*args) # The name of the kernel is expected to match the name of the kernel in debug # files etc. The name could change in the future, but it seems reasonable that # the name should always contain "triton" and "kernel_name_str" - e.g. if the # kernel contains a sin op, it should probably contain "str" in the name. # If this changes in the future, feel free to change the assertion here. # Debugging tips: you can add prof.export_chrome_trace("test.json") inline in # this test, and then view test.json in chrome://tracing to see the trace. self.assertTrue( any( ( hasattr(event, "name") and kernel_name_str in event.name and "triton" in event.name ) for event in prof.events() ) ) return prof.events() @unittest.skipIf(not HAS_TRITON, "requires cuda & triton") def test_inductor_profiling_kernel_names_pointwise(self): def fn(x, y): return (x + y).sin().cos() args = [torch.rand((4, 4), device=GPU_TYPE) for _ in range(2)] events = self._test_profiling_kernel_names(fn, args, "sin") event_found = False for event in events: if event.name == "triton_poi_fused_add_cos_sin_0": event_found = True # Note: depending on the triton version, we might get 4 or 5 args # (including / not including the constexpr args). The last two are # both empty args, so we just truncate the event.input_shapes to the # first 4. self.assertEqual(event.input_shapes[:4], [[4, 4], [4, 4], [4, 4], []]) self.assertTrue(event_found) @unittest.skipIf( not IS_BIG_GPU, "Skipping triton backend only since not big GPU (not enough SM)" ) def test_inductor_profiling_kernel_names_template(self): with config.patch( {"max_autotune": True, "max_autotune_gemm_backends": "TRITON"} ): def fn(x, y): return x @ y args = [torch.rand((4, 4), device=GPU_TYPE) for _ in range(2)] def check_fn(): # test_profiling_kernel_names will check this before asserting mm is in the trace. # reason: sometimes testing runs on machines with not enough SMs, and autotuning is skipped. if ( torch._dynamo.utils.counters["inductor"][ "select_algorithm_autotune" ] == 0 ): raise unittest.SkipTest( "select_algorithm didn't run, we probably won't get profiling data. GPU might not have enough SMs." ) events = self._test_profiling_kernel_names(fn, args, "mm", check_fn) event_found = False for event in events: if event.name == "triton_tem_fused_mm_0": event_found = True self.assertEqual(event.input_shapes[:3], [[4, 4], [4, 4], [4, 4]]) self.assertTrue(event_found) @unittest.skipIf(not HAS_TRITON, "requires cuda & triton") def test_inductor_profiling_kernel_names_foreach(self): with config.patch( {"max_autotune": True, "max_autotune_gemm_backends": "TRITON"} ): def fn(x, y): return torch._foreach_add(x, y) x = [torch.rand((4, 4), device=GPU_TYPE) for _ in range(3)] y = [torch.rand((4, 4), device=GPU_TYPE) for _ in range(3)] args = (x, y) events = self._test_profiling_kernel_names(fn, args, "_for_") event_found = False for event in events: if event.name == "triton_for_fused_0": event_found = True self.assertTrue( event.input_shapes == [ [4, 4], [4, 4], [4, 4], [4, 4], [4, 4], [4, 4], [4, 4], [4, 4], [4, 4], ] ) self.assertTrue(event_found) @unittest.skipIf(not HAS_TRITON, "requires cuda & triton") @config.patch( "compile_threads", 1 ) # This test monkey patches global variables, which workers don't see def test_inductor_profiling_triton_hooks(self): from triton.compiler import CompiledKernel # @manual from torch._inductor.runtime.triton_compat import knobs hooks_called = {"enter": False, "exit": False} def launch_enter_hook(lazy_dict): hooks_called["enter"] = True def launch_exit_hook(lazy_dict): hooks_called["exit"] = True if knobs: knobs.runtime.launch_enter_hook = launch_enter_hook knobs.runtime.launch_exit_hook = launch_exit_hook else: CompiledKernel.launch_enter_hook = launch_enter_hook CompiledKernel.launch_exit_hook = launch_exit_hook def fn(x, y): return torch._foreach_add(x, y) x = [torch.rand((4, 4), device=GPU_TYPE) for _ in range(3)] y = [torch.rand((4, 4), device=GPU_TYPE) for _ in range(3)] args = (x, y) fn_opt = torch.compile(fn) fn_opt(*args) self.assertTrue(hooks_called["enter"]) self.assertTrue(hooks_called["exit"]) @skipIfXpu( msg="TypeError: list indices must be integers or slices, not str, https://github.com/intel/torch-xpu-ops/issues/2335" ) @unittest.skipIf(not HAS_TRITON, "requires cuda & triton") def test_pt2_triton_attributes(self): from torch._inductor.codecache import code_hash device = GPU_TYPE debug = False # set to True to get output file @torchdynamo.optimize("inductor") def fn(a, b, c): x = torch.nn.functional.linear(a, b) x = x + c return x.cos() a, b, c = (torch.randn(4, 4, requires_grad=True).to(device) for _ in range(3)) inputs = [a, b, c] with config.patch(compile_threads=1): fn(*inputs) fp = tempfile.NamedTemporaryFile("w+t", suffix=".json", delete=not debug) fp.close() with torch.profiler.profile( activities=torch.profiler.supported_activities(), record_shapes=True, schedule=torch.profiler.schedule( skip_first=3, wait=1, warmup=1, active=2, repeat=1 ), ) as prof: for _ in range(10): fn(*inputs) prof.step() prof.export_chrome_trace(fp.name) print(f"Trace written to {fp.name}, set debug=True to retain file.") triton_events = [] with open(fp.name) as f: trace_json = json.load(f) triton_events = [ event for event in trace_json["traceEvents"] if "kernel_backend" in event.get("args", {}) ] print(triton_events) self.assertEqual(len(triton_events), 2) def get_hash(kernel_file: str) -> str: with open(kernel_file) as f: kernel_src = f.read() return code_hash(kernel_src.strip()) def check_triton_event(e) -> None: args = e.get("args", {}) self.assertNotEqual(args, {}, msg=f"event = {e}") self.assertEqual(args["kernel_backend"], "triton", msg=f"event = {e}") self.assertTrue("stream" in args, msg=f"event = {e}") self.assertTrue("kernel_file" in args, msg=f"event = {e}") kernel_file = args["kernel_file"] self.assertTrue(os.path.isfile(kernel_file), msg=f"event = {e}") self.assertTrue("kernel_hash" in args, msg=f"event = {e}") self.assertEqual( args["kernel_hash"], get_hash(kernel_file), msg=f"event = {e}" ) self.assertTrue("kernel_kwargs" in args, msg=f"event = {e}") self.assertTrue( args["kernel_kwargs"].startswith("XBLOCK="), msg=f"event = {e}" ) for e in triton_events: check_triton_event(e) @unittest.skipIf(not HAS_TRITON, "requires cuda & triton") def test_cupti_lazy_reinit(self): x, y = (torch.randn(4, 4, device=GPU_TYPE) for _ in range(2)) def fn(x, y): return (x + y).sin() fn_c = torch.compile(fn, mode="reduce-overhead") with torch.profiler.profile(): fn_c(x, y) if TorchVersion(torch.version.cuda) >= "12.6": self.assertEqual("0", os.environ.get("DISABLE_CUPTI_LAZY_REINIT", "0")) else: self.assertEqual("1", os.environ.get("DISABLE_CUPTI_LAZY_REINIT", "0")) if __name__ == "__main__": from torch._inductor.test_case import run_tests if HAS_GPU_AND_TRITON: run_tests()
DynamoProfilerTests
python
tensorflow__tensorflow
tensorflow/python/saved_model/model_utils/mode_keys_test.py
{ "start": 821, "end": 2287 }
class ____(test.TestCase): def test_map(self): mode_map = mode_keys.ModeKeyMap(**{ mode_keys.KerasModeKeys.PREDICT: 3, mode_keys.KerasModeKeys.TEST: 1 }) # Test dictionary __getitem__ self.assertEqual(3, mode_map[mode_keys.KerasModeKeys.PREDICT]) self.assertEqual(3, mode_map[mode_keys.EstimatorModeKeys.PREDICT]) self.assertEqual(1, mode_map[mode_keys.KerasModeKeys.TEST]) self.assertEqual(1, mode_map[mode_keys.EstimatorModeKeys.EVAL]) with self.assertRaises(KeyError): _ = mode_map[mode_keys.KerasModeKeys.TRAIN] with self.assertRaises(KeyError): _ = mode_map[mode_keys.EstimatorModeKeys.TRAIN] with self.assertRaisesRegex(ValueError, 'Invalid mode'): _ = mode_map['serve'] # Test common dictionary methods self.assertLen(mode_map, 2) self.assertEqual({1, 3}, set(mode_map.values())) self.assertEqual( {mode_keys.KerasModeKeys.TEST, mode_keys.KerasModeKeys.PREDICT}, set(mode_map.keys())) # Map is immutable with self.assertRaises(TypeError): mode_map[mode_keys.KerasModeKeys.TEST] = 1 # pylint: disable=unsupported-assignment-operation def test_invalid_init(self): with self.assertRaisesRegex(ValueError, 'Multiple keys/values found'): _ = mode_keys.ModeKeyMap(**{ mode_keys.KerasModeKeys.PREDICT: 3, mode_keys.EstimatorModeKeys.PREDICT: 1 }) if __name__ == '__main__': test.main()
ModeKeyMapTest
python
numba__numba
numba/tests/test_ir_utils.py
{ "start": 721, "end": 796 }
class ____(object): def __init__(self, val): self.val = val
Dummy
python
arrow-py__arrow
tests/test_locales.py
{ "start": 145507, "end": 149767 }
class ____: def test_format_timeframe(self): # Now assert self.locale._format_timeframe("now", 0) == "දැන්" # Second(s) assert self.locale._format_timeframe("second", -1) == "තත්පරයක" assert self.locale._format_timeframe("second", 1) == "තත්පරයකින්" assert self.locale._format_timeframe("seconds", -30) == "තත්පර 30 ක" assert self.locale._format_timeframe("seconds", 30) == "තත්පර 30 කින්" # Minute(s) assert self.locale._format_timeframe("minute", -1) == "විනාඩියක" assert self.locale._format_timeframe("minute", 1) == "විනාඩියකින්" assert self.locale._format_timeframe("minutes", -4) == "විනාඩි 4 ක" assert self.locale._format_timeframe("minutes", 4) == "මිනිත්තු 4 කින්" # Hour(s) assert self.locale._format_timeframe("hour", -1) == "පැයක" assert self.locale._format_timeframe("hour", 1) == "පැයකින්" assert self.locale._format_timeframe("hours", -23) == "පැය 23 ක" assert self.locale._format_timeframe("hours", 23) == "පැය 23 කින්" # Day(s) assert self.locale._format_timeframe("day", -1) == "දිනක" assert self.locale._format_timeframe("day", 1) == "දිනකට" assert self.locale._format_timeframe("days", -12) == "දින 12 ක" assert self.locale._format_timeframe("days", 12) == "දින 12 කින්" # Week(s) assert self.locale._format_timeframe("week", -1) == "සතියක" assert self.locale._format_timeframe("week", 1) == "සතියකින්" assert self.locale._format_timeframe("weeks", -10) == "සති 10 ක" assert self.locale._format_timeframe("weeks", 10) == "සති 10 කින්" # Month(s) assert self.locale._format_timeframe("month", -1) == "මාසයක" assert self.locale._format_timeframe("month", 1) == "එය මාසය තුළ" assert self.locale._format_timeframe("months", -2) == "මාස 2 ක" assert self.locale._format_timeframe("months", 2) == "මාස 2 කින්" # Year(s) assert self.locale._format_timeframe("year", -1) == "වසරක" assert self.locale._format_timeframe("year", 1) == "වසරක් තුළ" assert self.locale._format_timeframe("years", -21) == "අවුරුදු 21 ක" assert self.locale._format_timeframe("years", 21) == "අවුරුදු 21 තුළ" def test_describe_si(self): assert self.locale.describe("second", only_distance=True) == "තත්පරයක්" assert ( self.locale.describe("second", only_distance=False) == "තත්පරයකින්" ) # (in) a second assert self.locale.describe("minute", only_distance=True) == "මිනිත්තුවක්" assert ( self.locale.describe("minute", only_distance=False) == "විනාඩියකින්" ) # (in) a minute assert self.locale.describe("hour", only_distance=True) == "පැයක්" assert self.locale.describe("hour", only_distance=False) == "පැයකින්" assert self.locale.describe("day", only_distance=True) == "දවසක්" assert self.locale.describe("day", only_distance=False) == "දිනකට" assert self.locale.describe("week", only_distance=True) == "සතියක්" assert self.locale.describe("week", only_distance=False) == "සතියකින්" assert self.locale.describe("month", only_distance=True) == "මාසයක්" assert self.locale.describe("month", only_distance=False) == "එය මාසය තුළ" assert self.locale.describe("year", only_distance=True) == "අවුරුද්දක්" assert self.locale.describe("year", only_distance=False) == "වසරක් තුළ" def test_format_relative_now(self): result = self.locale._format_relative("දැන්", "now", 0) assert result == "දැන්" def test_format_relative_future(self): result = self.locale._format_relative("පැයකින්", "පැය", 1) assert result == "පැයකින්" # (in) one hour def test_format_relative_past(self): result = self.locale._format_relative("පැයක", "පැය", -1) assert result == "පැයකට පෙර" # an hour ago def test_weekday(self): dt = arrow.Arrow(2015, 4, 11, 17, 30, 00) assert self.locale.day_name(dt.isoweekday()) == "සෙනසුරාදා" assert self.locale.day_abbreviation(dt.isoweekday()) == "අ" @pytest.mark.usefixtures("lang_locale")
TestSinhalaLocale
python
getsentry__sentry
tests/sentry/models/test_grouphistory.py
{ "start": 306, "end": 1150 }
class ____(TestCase): def test_owner(self) -> None: team = self.create_team() GroupAssignee.objects.assign(self.group, self.user) history = GroupHistory.objects.filter(group_id=self.group.id).first() assert history actor = Actor.from_id(user_id=self.user.id) assert actor history.owner = actor owner = history.owner assert owner assert owner.identifier == actor.identifier assert history.user_id == self.user.id assert history.team_id is None actor = Actor.from_id(team_id=team.id) assert actor history.owner = actor owner = history.owner assert owner assert owner.identifier == actor.identifier assert history.team_id == team.id assert history.user_id is None
GroupHistoryTest
python
Netflix__metaflow
metaflow/plugins/airflow/airflow_utils.py
{ "start": 1099, "end": 1437 }
class ____(Exception): headline = ( "Kubernetes Provider version is incompatible with Metaflow `foreach`s. " "Install the provider via " "`%s -m pip install apache-airflow-providers-cncf-kubernetes==%s`" ) % (sys.executable, KUBERNETES_PROVIDER_FOREACH_VERSION)
IncompatibleKubernetesProviderVersionException
python
conda__conda
conda/auxlib/entity.py
{ "start": 11963, "end": 17398 }
class ____: """ Fields are doing something very similar to boxing and unboxing of c#/java primitives. __set__ should take a "primitive" or "raw" value and create a "boxed" or "programmatically usable" value of it. While __get__ should return the boxed value, dump in turn should unbox the value into a primitive or raw value. Arguments: types_ (primitive literal or type or sequence of types): default (any, callable, optional): If default is callable, it's guaranteed to return a valid value at the time of Entity creation. required (boolean, optional): validation (callable, optional): dump (boolean, optional): """ # Used to track order of field declarations. Supporting python 2.7, so can't rely # on __prepare__. Strategy lifted from http://stackoverflow.com/a/4460034/2127762 _order_helper = 0 def __init__(self, default=NULL, required=True, validation=None, in_dump=True, default_in_dump=True, nullable=False, immutable=False, aliases=()): self._required = required self._validation = validation self._in_dump = in_dump self._default_in_dump = default_in_dump self._nullable = nullable self._immutable = immutable self._aliases = aliases if default is NULL: self._default = NULL else: self._default = default if callable(default) else self.box(None, None, default) self.validate(None, self.box(None, None, maybecall(default))) self._order_helper = Field._order_helper Field._order_helper += 1 @property def name(self): try: return self._name except AttributeError: log.error("The name attribute has not been set for this field. " "Call set_name at class creation time.") raise def set_name(self, name): self._name = name return self def __get__(self, instance, instance_type): try: if instance is None: # if calling from the class object val = getattr(instance_type, KEY_OVERRIDES_MAP)[self.name] else: val = instance.__dict__[self.name] except AttributeError: log.error("The name attribute has not been set for this field.") raise AttributeError("The name attribute has not been set for this field.") except KeyError: if self.default is NULL: raise AttributeError(f"A value for {self.name} has not been set") else: val = maybecall(self.default) # default *can* be a callable if val is None and not self.nullable: # means the "tricky edge case" was activated in __delete__ raise AttributeError(f"The {self.name} field has been deleted.") return self.unbox(instance, instance_type, val) def __set__(self, instance, val): if self.immutable and instance._initd: raise AttributeError(f"The {self.name} field is immutable.") # validate will raise an exception if invalid # validate will return False if the value should be removed instance.__dict__[self.name] = self.validate( instance, self.box(instance, instance.__class__, val), ) def __delete__(self, instance): if self.immutable and instance._initd: raise AttributeError(f"The {self.name} field is immutable.") elif self.required: raise AttributeError(f"The {self.name} field is required and cannot be deleted.") elif not self.nullable: # tricky edge case # given a field Field(default='some value', required=False, nullable=False) # works together with Entity.dump() logic for selecting fields to include in dump # `if value is not None or field.nullable` instance.__dict__[self.name] = None else: instance.__dict__.pop(self.name, None) def box(self, instance, instance_type, val): return val def unbox(self, instance, instance_type, val): return val def dump(self, instance, instance_type, val): return val def validate(self, instance, val): """ Returns: True: if val is valid Raises: ValidationError """ # note here calling, but not assigning; could lead to unexpected behavior if isinstance(val, self._type) and (self._validation is None or self._validation(val)): return val elif val is NULL and not self.required: return val elif val is None and self.nullable: return val else: raise ValidationError(getattr(self, 'name', 'undefined name'), val) @property def required(self): return self._required @property def type(self): return self._type @property def default(self): return self._default @property def in_dump(self): return self._in_dump @property def default_in_dump(self): return self._default_in_dump @property def nullable(self): return self.is_nullable @property def is_nullable(self): return self._nullable @property def immutable(self): return self._immutable
Field
python
kamyu104__LeetCode-Solutions
Python/buildings-with-an-ocean-view.py
{ "start": 29, "end": 399 }
class ____(object): def findBuildings(self, heights): """ :type heights: List[int] :rtype: List[int] """ result = [] for i, h in enumerate(heights): while result and heights[result[-1]] <= h: result.pop() result.append(i) return result # Time: O(n) # Space: O(1)
Solution
python
getsentry__sentry
src/sentry/workflow_engine/endpoints/serializers/workflow_group_history_serializer.py
{ "start": 777, "end": 966 }
class ____(TypedDict): group: BaseGroupSerializerResponse count: int lastTriggered: datetime eventId: str detector: NotRequired[dict[str, Any]]
WorkflowFireHistoryResponse
python
pytorch__pytorch
torch/backends/miopen/__init__.py
{ "start": 804, "end": 1208 }
class ____(PropModule): immediate = ContextProp( torch._C._get_miopen_immediate, torch._C._set_miopen_immediate ) # This is the sys.modules replacement trick, see # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273 sys.modules[__name__] = MiopenModule(sys.modules[__name__], __name__) # Add type annotation for the replaced module immediate: bool
MiopenModule
python
pytorch__pytorch
test/test_tensorboard.py
{ "start": 4123, "end": 7469 }
class ____(BaseTestCase): def test_pytorch_np(self): tensors = [torch.rand(3, 10, 10), torch.rand(1), torch.rand(1, 2, 3, 4, 5)] for tensor in tensors: # regular tensor self.assertIsInstance(make_np(tensor), np.ndarray) # CUDA tensor if torch.cuda.is_available(): self.assertIsInstance(make_np(tensor.cuda()), np.ndarray) # regular variable self.assertIsInstance(make_np(torch.autograd.Variable(tensor)), np.ndarray) # CUDA variable if torch.cuda.is_available(): self.assertIsInstance( make_np(torch.autograd.Variable(tensor).cuda()), np.ndarray ) # python primitive type self.assertIsInstance(make_np(0), np.ndarray) self.assertIsInstance(make_np(0.1), np.ndarray) def test_pytorch_autograd_np(self): x = torch.autograd.Variable(torch.empty(1)) self.assertIsInstance(make_np(x), np.ndarray) def test_pytorch_write(self): with self.createSummaryWriter() as w: w.add_scalar("scalar", torch.autograd.Variable(torch.rand(1)), 0) def test_pytorch_histogram(self): with self.createSummaryWriter() as w: w.add_histogram("float histogram", torch.rand((50,))) w.add_histogram("int histogram", torch.randint(0, 100, (50,))) w.add_histogram("bfloat16 histogram", torch.rand(50, dtype=torch.bfloat16)) def test_pytorch_histogram_raw(self): with self.createSummaryWriter() as w: num = 50 floats = make_np(torch.rand((num,))) bins = [0.0, 0.25, 0.5, 0.75, 1.0] counts, limits = np.histogram(floats, bins) sum_sq = floats.dot(floats).item() w.add_histogram_raw( "float histogram raw", min=floats.min().item(), max=floats.max().item(), num=num, sum=floats.sum().item(), sum_squares=sum_sq, bucket_limits=limits[1:].tolist(), bucket_counts=counts.tolist(), ) ints = make_np(torch.randint(0, 100, (num,))) bins = [0, 25, 50, 75, 100] counts, limits = np.histogram(ints, bins) sum_sq = ints.dot(ints).item() w.add_histogram_raw( "int histogram raw", min=ints.min().item(), max=ints.max().item(), num=num, sum=ints.sum().item(), sum_squares=sum_sq, bucket_limits=limits[1:].tolist(), bucket_counts=counts.tolist(), ) ints = torch.tensor(range(100)).float() nbins = 100 counts = torch.histc(ints, bins=nbins, min=0, max=99) limits = torch.tensor(range(nbins)) sum_sq = ints.dot(ints).item() w.add_histogram_raw( "int histogram raw", min=ints.min().item(), max=ints.max().item(), num=num, sum=ints.sum().item(), sum_squares=sum_sq, bucket_limits=limits.tolist(), bucket_counts=counts.tolist(), )
TestTensorBoardPyTorchNumpy
python
pytorch__pytorch
test/test_autograd.py
{ "start": 464098, "end": 471155 }
class ____(TestCase): def assertClonedLenEqual(self, ctx, n): self.assertEqual(len(list(ctx.cloned.items())), n) def assertTIDMapLenEqual(self, ctx, n): self.assertEqual(len(list(ctx.tid_to_weakhandle.items())), n) def test_basic(self): a = torch.rand(2, 3, requires_grad=True) def fn(a): b = a.clone() out = (b**2).sum() b.sin_() out.sum().backward() return a.grad msg = ( "variables needed for gradient computation has been modified by an inplace" ) with self.assertRaisesRegex(RuntimeError, msg): fn(a) with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: da = fn(a) self.assertTrue(torch.allclose(a * 2, da)) self.assertClonedLenEqual(ctx, 0) def test_views(self): a = torch.rand(2, 3, requires_grad=True) def fn(a): b = a.clone() c = b.view_as(b) out = (b**2).sum() # How does this work? c.sin_() out.sum().backward() return a.grad msg = ( "variables needed for gradient computation has been modified by an inplace" ) with self.assertRaisesRegex(RuntimeError, msg): fn(a) with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: da = fn(a) self.assertClonedLenEqual(ctx, 0) self.assertTrue(torch.allclose(a * 2, da)) def test_save_base_and_modify_view(self): with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: a = torch.rand(2, 3, requires_grad=True) b = a.clone() c = b[:1] out = b**2 # modify the view c *= 10 # self.assertClonedLenEqual(ctx, 1) out.sum().backward() self.assertClonedLenEqual(ctx, 0) self.assertClonedLenEqual(ctx, 0) self.assertTrue(torch.allclose(a * 2, a.grad)) def test_save_view_modify_base(self): with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: a = torch.rand(2, 3, requires_grad=True) b = a.clone() c = b[:] out = (c**2).sum() b *= 2 out.backward() self.assertTrue(torch.allclose(a * 2, a.grad)) def test_double_backward(self): with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: a = torch.rand(2, 3, requires_grad=True) b = a.clone() out = (b**2).sum() b.sin_() torch.autograd.grad(out, a, create_graph=True) (da,) = torch.autograd.grad(out, a, create_graph=True) (d2a,) = torch.autograd.grad(da.sum(), a) self.assertTrue(torch.allclose(torch.ones_like(a) * 2, d2a)) self.assertClonedLenEqual(ctx, 0) def test_saved_but_not_anymore(self): # Make sure we don't clone if the tensor was once saved, but # by the time we do in-place, it is no longer saved with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: a = torch.randn(2, 3, requires_grad=True).clone() out = (a**2).sum() self.assertTIDMapLenEqual(ctx, 1) self.assertClonedLenEqual(ctx, 0) out.backward() a.sin_() self.assertClonedLenEqual(ctx, 0) out = (a**2).sum() a.sin_() self.assertClonedLenEqual(ctx, 1) del out self.assertClonedLenEqual(ctx, 0) def test_saved_same_tensor_many_times(self): # We should only clone once with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: a = torch.randn(2, 3, requires_grad=True).clone() b = a**2 c = a**2 a.sin_() self.assertClonedLenEqual(ctx, 1) del b, c self.assertClonedLenEqual(ctx, 0) def test_saved_same_tensor_different_versions(self): with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: a = torch.randn(2, 3, requires_grad=True).clone() b = a**2 a.sin_() c = a**2 a.sin_() self.assertClonedLenEqual(ctx, 2) del b self.assertClonedLenEqual(ctx, 1) del c self.assertClonedLenEqual(ctx, 0) def test_with_math_views(self): with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: a = torch.tensor([1 + 1j], requires_grad=True).clone() b = a.conj() out = (b**2).sum() a.sin_() out.abs().backward() a = torch.tensor([1 + 1j], requires_grad=True).clone() b = a.conj() out = (b**2).sum() # in this case, it is no longer a view it seems b.sin_() out.abs().backward() def test_with_out_variant(self): with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: a = torch.tensor([1.0], requires_grad=True) b = torch.tensor([1.0]) c = torch.tensor([2.0]) out = a * b self.assertTIDMapLenEqual(ctx, 1) torch.sin(c, out=b) self.assertClonedLenEqual(ctx, 1) out.backward() self.assertClonedLenEqual(ctx, 0) def test_backward_out_of_context(self): # Out of context with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: a = torch.rand(2, 3, requires_grad=True) out = (a**2).sum() msg = "Trying to backward outside of the 'allow_mutation_on_saved_tensors' context" with self.assertRaisesRegex(AssertionError, msg): out.backward() # Different context with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: a = torch.rand(2, 3, requires_grad=True) out = (a**2).sum() with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: with self.assertRaisesRegex(AssertionError, msg): out.backward() def test_disallow_nesting(self): with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: msg = "allow_mutation_on_saved_tensors contexts cannot be nested" with self.assertRaisesRegex(RuntimeError, msg): with torch.autograd.graph.allow_mutation_on_saved_tensors() as ctx: pass def test_inplace_foreach(self): with torch.autograd.graph.allow_mutation_on_saved_tensors(): a = [ torch.tensor(1.0, requires_grad=True), torch.tensor(1.0, requires_grad=True), ] b = torch._foreach_exp(a) torch._foreach_add_(b, 1) (b[0] + b[1]).backward() self.assertEqual([a[0].grad, a[1].grad], torch._foreach_exp(a))
TestAllowMutationOnSaved
python
doocs__leetcode
solution/2600-2699/2610.Convert an Array Into a 2D Array With Conditions/Solution.py
{ "start": 0, "end": 308 }
class ____: def findMatrix(self, nums: List[int]) -> List[List[int]]: cnt = Counter(nums) ans = [] for x, v in cnt.items(): for i in range(v): if len(ans) <= i: ans.append([]) ans[i].append(x) return ans
Solution
python
getsentry__sentry
tests/sentry/workflow_engine/test_task.py
{ "start": 832, "end": 1624 }
class ____(TestCase): def test_fetch_event_retries_on_retry_error(self) -> None: """Test that fetch_event retries when encountering RetryError.""" event_id = "test_event_id" project_id = self.project.id # Mock nodestore to fail with RetryError twice, then succeed with mock.patch("sentry.workflow_engine.tasks.utils.nodestore.backend.get") as mock_get: mock_get.side_effect = [ RetryError("retry", None), RetryError("retry", None), {"data": "test"}, ] result = fetch_event(event_id, project_id) # Should have been called 3 times (2 failures + 1 success) assert mock_get.call_count == 3 assert result is not None
FetchEventTests
python
pandas-dev__pandas
asv_bench/benchmarks/strftime.py
{ "start": 69, "end": 1669 }
class ____: timeout = 1500 params = [1000, 10000] param_names = ["nobs"] def setup(self, nobs): d = "2018-11-29" dt = "2018-11-26 11:18:27.0" self.data = pd.DataFrame( { "dt": [np.datetime64(dt)] * nobs, "d": [np.datetime64(d)] * nobs, "r": [np.random.uniform()] * nobs, } ) def time_frame_date_to_str(self, nobs): self.data["d"].astype(str) def time_frame_date_formatting_default(self, nobs): self.data["d"].dt.strftime(date_format=None) def time_frame_date_formatting_default_explicit(self, nobs): self.data["d"].dt.strftime(date_format="%Y-%m-%d") def time_frame_date_formatting_custom(self, nobs): self.data["d"].dt.strftime(date_format="%Y---%m---%d") def time_frame_datetime_to_str(self, nobs): self.data["dt"].astype(str) def time_frame_datetime_formatting_default(self, nobs): self.data["dt"].dt.strftime(date_format=None) def time_frame_datetime_formatting_default_explicit_date_only(self, nobs): self.data["dt"].dt.strftime(date_format="%Y-%m-%d") def time_frame_datetime_formatting_default_explicit(self, nobs): self.data["dt"].dt.strftime(date_format="%Y-%m-%d %H:%M:%S") def time_frame_datetime_formatting_default_with_float(self, nobs): self.data["dt"].dt.strftime(date_format="%Y-%m-%d %H:%M:%S.%f") def time_frame_datetime_formatting_custom(self, nobs): self.data["dt"].dt.strftime(date_format="%Y-%m-%d --- %H:%M:%S")
DatetimeStrftime
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/array_ops/matrix_band_part_op_test.py
{ "start": 1497, "end": 2501 }
class ____(test_lib.TestCase): pass # Filled in below def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_): @test_util.run_v1_only("b/120545219") def Test(self): mat = np.ones(shape_).astype(dtype_) batch_mat = np.tile(mat, batch_shape_ + (1, 1)) for lower in -1, 0, 1, shape_[-2] - 1: for upper in -1, 0, 1, shape_[-1] - 1: band_np = mat if lower >= 0: band_np = np.triu(band_np, -lower) if upper >= 0: band_np = np.tril(band_np, upper) if batch_shape_ != (): band_np = np.tile(band_np, batch_shape_ + (1, 1)) for index_dtype in [dtypes_lib.int32, dtypes_lib.int64]: with self.cached_session(use_gpu=False): band = array_ops.matrix_band_part( batch_mat, constant_op.constant(lower, index_dtype), constant_op.constant(upper, index_dtype)) self.assertAllEqual(band_np, self.evaluate(band)) return Test
MatrixBandPartTest
python
sympy__sympy
sympy/codegen/cfunctions.py
{ "start": 6450, "end": 7439 }
class ____(Function): """ Represents "fused multiply add". Explanation =========== The benefit of using ``fma(x, y, z)`` over ``x*y + z`` is that, under finite precision arithmetic, the former is supported by special instructions on some CPUs. Examples ======== >>> from sympy.abc import x, y, z >>> from sympy.codegen.cfunctions import fma >>> fma(x, y, z).diff(x) y """ nargs = 3 def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex in (1, 2): return self.args[2 - argindex] elif argindex == 3: return S.One else: raise ArgumentIndexError(self, argindex) def _eval_expand_func(self, **hints): return _fma(*self.args) def _eval_rewrite_as_tractable(self, arg, limitvar=None, **kwargs): return _fma(arg) _Ten = S(10) def _log10(x): return log(x)/log(_Ten)
fma
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 289679, "end": 290162 }
class ____(sgqlc.types.Input): """Autogenerated input type of RetireSponsorsTier""" __schema__ = github_schema __field_names__ = ("tier_id", "client_mutation_id") tier_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="tierId") """The ID of the published tier to retire.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
RetireSponsorsTierInput
python
mlflow__mlflow
mlflow/projects/submitted_run.py
{ "start": 222, "end": 1727 }
class ____: """ Wrapper around an MLflow project run (e.g. a subprocess running an entry point command or a Databricks job run) and exposing methods for waiting on and cancelling the run. This class defines the interface that the MLflow project runner uses to manage the lifecycle of runs launched in different environments (e.g. runs launched locally or on Databricks). ``SubmittedRun`` is not thread-safe. That is, concurrent calls to wait() / cancel() from multiple threads may inadvertently kill resources (e.g. local processes) unrelated to the run. NOTE: Subclasses of ``SubmittedRun`` must expose a ``run_id`` member containing the run's MLflow run ID. """ @abstractmethod def wait(self): """ Wait for the run to finish, returning True if the run succeeded and false otherwise. Note that in some cases (e.g. remote execution on Databricks), we may wait until the remote job completes rather than until the MLflow run completes. """ @abstractmethod def get_status(self): """ Get status of the run. """ @abstractmethod def cancel(self): """ Cancel the run (interrupts the command subprocess, cancels the Databricks run, etc) and waits for it to terminate. The MLflow run status may not be set correctly upon run cancellation. """ @property @abstractmethod def run_id(self): pass
SubmittedRun
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 745753, "end": 746033 }
class ____(VegaLiteSchema): """MarkPropDefstringnullTypeForShape schema wrapper.""" _schema = {"$ref": "#/definitions/MarkPropDef<(string|null),TypeForShape>"} def __init__(self, *args, **kwds): super().__init__(*args, **kwds)
MarkPropDefstringnullTypeForShape
python
zarr-developers__zarr-python
src/zarr/core/dtype/common.py
{ "start": 5818, "end": 5907 }
class ____(ValueError): ... @dataclass(frozen=True, kw_only=True)
ScalarTypeValidationError
python
astropy__astropy
astropy/units/errors.py
{ "start": 1028, "end": 1129 }
class ____(AstropyWarning): """ The base class for unit-specific warnings. """
UnitsWarning
python
pypa__warehouse
tests/unit/manage/test_forms.py
{ "start": 12996, "end": 13818 }
class ____: """ Covers ConfirmPasswordForm """ def test_validate_confirm_password(self): request = pretend.stub( remote_addr=REMOTE_ADDR, banned=pretend.stub(by_ip=lambda ip_address: False) ) user_service = pretend.stub( find_userid=pretend.call_recorder(lambda userid: 1), check_password=pretend.call_recorder( lambda userid, password, tags=None: True ), ) form = forms.DeleteTOTPForm( formdata=MultiDict({"username": "username", "password": "password"}), request=request, user_service=user_service, ) assert form.request is request assert form.user_service is user_service assert form.validate(), str(form.errors)
TestDeleteTOTPForm
python
ray-project__ray
python/ray/serve/_private/benchmarks/streaming/common.py
{ "start": 507, "end": 572 }
class ____(enum.Enum): SYNC = "SYNC" ASYNC = "ASYNC"
IOMode
python
pennersr__django-allauth
allauth/socialaccount/providers/soundcloud/provider.py
{ "start": 450, "end": 930 }
class ____(OAuth2Provider): id = "soundcloud" name = "SoundCloud" account_class = SoundCloudAccount oauth2_adapter_class = SoundCloudOAuth2Adapter def extract_uid(self, data): return str(data["urn"]) def extract_common_fields(self, data): return dict( name=data.get("full_name"), username=data.get("username"), email=data.get("email"), ) provider_classes = [SoundCloudProvider]
SoundCloudProvider
python
great-expectations__great_expectations
great_expectations/expectations/window.py
{ "start": 319, "end": 611 }
class ____(pydantic.BaseModel): """ A definition for a temporal window across <`range`> number of previous invocations """ constraint_fn: str parameter_name: str range: int offset: Offset strict: bool = False class Config: extra = Extra.forbid
Window
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/classVar1.py
{ "start": 123, "end": 264 }
class ____: def __get__(self, *args: Any) -> str: return "" def __set__(self, obj: Any, value: str): pass
MyDescriptor
python
django__django
tests/i18n/test_extraction.py
{ "start": 48647, "end": 49885 }
class ____(ExtractorTests): work_subdir = "unchanged" def setUp(self): super().setUp() po_file = Path(self.PO_FILE) po_file_tmp = Path(self.PO_FILE + ".tmp") if os.name == "nt": # msgmerge outputs Windows style paths on Windows. po_contents = po_file_tmp.read_text().replace( "#: __init__.py", "#: .\\__init__.py", ) po_file.write_text(po_contents) else: po_file_tmp.rename(po_file) self.original_po_contents = po_file.read_text() def test_po_remains_unchanged(self): """PO files are unchanged unless there are new changes.""" _, po_contents = self._run_makemessages() self.assertEqual(po_contents, self.original_po_contents) def test_po_changed_with_new_strings(self): """PO files are updated when new changes are detected.""" Path("models.py.tmp").rename("models.py") _, po_contents = self._run_makemessages() self.assertNotEqual(po_contents, self.original_po_contents) self.assertMsgId( "This is a hitherto undiscovered translatable string.", po_contents, )
UnchangedPoExtractionTests
python
mlflow__mlflow
mlflow/models/evaluation/base.py
{ "start": 2485, "end": 22765 }
class ____: ''' An evaluation metric. Args: eval_fn: A function that computes the metric with the following signature: .. code-block:: python def eval_fn( predictions: pandas.Series, targets: pandas.Series, metrics: Dict[str, MetricValue], **kwargs, ) -> Union[float, MetricValue]: """ Args: predictions: A pandas Series containing the predictions made by the model. targets: (Optional) A pandas Series containing the corresponding labels for the predictions made on that input. metrics: (Optional) A dictionary containing the metrics calculated by the default evaluator. The keys are the names of the metrics and the values are the metric values. To access the MetricValue for the metrics calculated by the system, make sure to specify the type hint for this parameter as Dict[str, MetricValue]. Refer to the DefaultEvaluator behavior section for what metrics will be returned based on the type of model (i.e. classifier or regressor). kwargs: Includes a list of args that are used to compute the metric. These args could be information coming from input data, model outputs, other metrics, or parameters specified in the `evaluator_config` argument of the `mlflow.evaluate` API. Returns: MetricValue with per-row scores, per-row justifications, and aggregate results. """ ... name: The name of the metric. greater_is_better: Whether a greater value of the metric is better. long_name: (Optional) The long name of the metric. For example, ``"root_mean_squared_error"`` for ``"mse"``. version: (Optional) The metric version. For example ``v1``. metric_details: (Optional) A description of the metric and how it is calculated. metric_metadata: (Optional) A dictionary containing metadata for the metric. genai_metric_args: (Optional) A dictionary containing arguments specified by users when calling make_genai_metric or make_genai_metric_from_prompt. Those args are persisted so that we can deserialize the same metric object later. ''' def __init__( self, eval_fn, name, greater_is_better, long_name=None, version=None, metric_details=None, metric_metadata=None, genai_metric_args=None, ): self.eval_fn = eval_fn self.name = name self.greater_is_better = greater_is_better self.long_name = long_name or name self.version = version self.metric_details = metric_details self.metric_metadata = metric_metadata self.genai_metric_args = genai_metric_args def __str__(self): parts = [f"name={self.name}, greater_is_better={self.greater_is_better}"] if self.long_name: parts.append(f"long_name={self.long_name}") if self.version: parts.append(f"version={self.version}") if self.metric_details: parts.append(f"metric_details={self.metric_details}") if self.metric_metadata: parts.append(f"metric_metadata={self.metric_metadata}") return "EvaluationMetric(" + ", ".join(parts) + ")" # NB: we need this function because we cannot modify the signature of # a class's __call__ method after the class has been defined. # This is also useful to distinguish between the metric signatures with different eval_fn signatures def _generate_eval_metric_class(eval_fn, require_strict_signature=False): """ Dynamically generate a GenAIEvaluationMetric class that can be used to evaluate the metric on the given input data. The generated class is callable with a __call__ method that takes the arguments specified in the signature of the eval_fn function. Args: eval_fn: the evaluation function of the EvaluationMetric. require_strict_signature: (Optional) Whether the eval_fn needs to follow a strict signature. If True, then the eval_fn must follow below signature: .. code-block:: python def eval_fn( predictions: "pd.Series", metrics: Dict[str, MetricValue], inputs: "pd.Series", *args, ) -> MetricValue: pass When generating a metric from `make_genai_metric`, this should be set to True. Default to False. Returns: A dynamically generated callable CallableEvaluationMetric class. """ from mlflow.metrics.base import MetricValue if require_strict_signature: allowed_kwargs_names = [ param_name for param_name in inspect.signature(eval_fn).parameters.keys() if param_name not in ["predictions", "metrics", "inputs"] ] def genai_call_method( self, *, predictions: pd.Series | str | list[str], inputs: pd.Series | str | list[str], metrics: dict[str, MetricValue] | None = None, **kwargs, ) -> MetricValue: if missed_kwargs := set(allowed_kwargs_names) - set(kwargs.keys()): raise MlflowException.invalid_parameter_value( f"Missing required arguments: {missed_kwargs}", ) if extra_kwargs := set(kwargs.keys()) - set(allowed_kwargs_names): raise MlflowException.invalid_parameter_value( f"Unexpected arguments: {extra_kwargs}", ) return self.eval_fn( _convert_val_to_pd_Series(predictions, "predictions"), metrics or {}, _convert_val_to_pd_Series(inputs, "inputs"), # Note: based on https://github.com/mlflow/mlflow/blob/4fef77afdbe4d76302cb0b1aad2bd72b5cde64e9/mlflow/metrics/genai/genai_metric.py#L49-L53 # the extra params passed https://github.com/mlflow/mlflow/blob/4fef77afdbe4d76302cb0b1aad2bd72b5cde64e9/mlflow/metrics/genai/genai_metric.py#L513 # should always be pandas Series *[ _convert_val_to_pd_Series(kwargs[arg_name], arg_name) for arg_name in allowed_kwargs_names ], ) genai_call_method.__signature__ = Signature( parameters=[ Parameter("self", Parameter.POSITIONAL_OR_KEYWORD), Parameter( "predictions", Parameter.KEYWORD_ONLY, annotation=pd.Series | str | list[str], ), Parameter( "inputs", Parameter.KEYWORD_ONLY, annotation=pd.Series | str | list[str], ), Parameter( "metrics", Parameter.KEYWORD_ONLY, annotation=dict[str, MetricValue] | None, default=None, ), *[ Parameter(name, Parameter.KEYWORD_ONLY, annotation=pd.Series | str | list[str]) for name in allowed_kwargs_names ], ] ) genai_call_method.__doc__ = f""" Evaluate the metric on the given inputs and predictions. Note: only keyword arguments are supported. Args: predictions: predictions made by the model. inputs: inputs used to make the predictions. metrics: metrics calculated by the default evaluator. kwargs: additional arguments used to compute the metric. Required arguments: {allowed_kwargs_names} Returns: evaluation result as MetricValue object. """ call_method = genai_call_method else: def _call_method( self, **kwargs, ) -> MetricValue: return self.eval_fn(**kwargs) allowed_kwargs_params = inspect.signature(eval_fn).parameters _call_method.__signature__ = Signature( parameters=[ Parameter("self", Parameter.POSITIONAL_OR_KEYWORD), *[ Parameter( name, Parameter.KEYWORD_ONLY, annotation=allowed_kwargs_params[name].annotation, ) for name in allowed_kwargs_params.keys() ], ] ) _call_method.__doc__ = f""" Evaluate the metric on the given inputs and predictions. Note: only keyword arguments are supported. Args: kwargs: additional arguments used to compute the metric. Required arguments: {list(allowed_kwargs_params.keys())} Returns: evaluation result as MetricValue object. """ call_method = _call_method return type( "CallableEvaluationMetric", (EvaluationMetric,), {"__call__": call_method}, ) def _convert_val_to_pd_Series(val, name): if val is not None and not isinstance(val, pd.Series): if isinstance(val, str): return pd.Series([val]) elif isinstance(val, list): return pd.Series(val) else: raise TypeError( f"Expected {name} to be a string, list, or Pandas Series, got {type(val)}" ) return val def make_metric( *, eval_fn, greater_is_better, name=None, long_name=None, version=None, metric_details=None, metric_metadata=None, genai_metric_args=None, ): ''' A factory function to create an :py:class:`EvaluationMetric` object. Args: eval_fn: A function that computes the metric with the following signature: .. code-block:: python def eval_fn( predictions: pandas.Series, targets: pandas.Series, metrics: Dict[str, MetricValue], **kwargs, ) -> Union[float, MetricValue]: """ Args: predictions: A pandas Series containing the predictions made by the model. targets: (Optional) A pandas Series containing the corresponding labels for the predictions made on that input. metrics: (Optional) A dictionary containing the metrics calculated by the default evaluator. The keys are the names of the metrics and the values are the metric values. To access the MetricValue for the metrics calculated by the system, make sure to specify the type hint for this parameter as Dict[str, MetricValue]. Refer to the DefaultEvaluator behavior section for what metrics will be returned based on the type of model (i.e. classifier or regressor). kwargs: Includes a list of args that are used to compute the metric. These args could information coming from input data, model outputs or parameters specified in the `evaluator_config` argument of the `mlflow.evaluate` API. kwargs: Includes a list of args that are used to compute the metric. These args could be information coming from input data, model outputs, other metrics, or parameters specified in the `evaluator_config` argument of the `mlflow.evaluate` API. Returns: MetricValue with per-row scores, per-row justifications, and aggregate results. """ ... greater_is_better: Whether a greater value of the metric is better. name: The name of the metric. This argument must be specified if ``eval_fn`` is a lambda function or the ``eval_fn.__name__`` attribute is not available. long_name: (Optional) The long name of the metric. For example, ``"mean_squared_error"`` for ``"mse"``. version: (Optional) The metric version. For example ``v1``. metric_details: (Optional) A description of the metric and how it is calculated. metric_metadata: (Optional) A dictionary containing metadata for the metric. genai_metric_args: (Optional) A dictionary containing arguments specified by users when calling make_genai_metric or make_genai_metric_from_prompt. Those args are persisted so that we can deserialize the same metric object later. .. seealso:: - :py:class:`mlflow.models.EvaluationMetric` - :py:func:`mlflow.evaluate` ''' return _make_metric( eval_fn=eval_fn, greater_is_better=greater_is_better, name=name, long_name=long_name, version=version, metric_details=metric_details, metric_metadata=metric_metadata, genai_metric_args=genai_metric_args, require_strict_signature=False, ) def _make_metric( *, eval_fn, greater_is_better, name=None, long_name=None, version=None, metric_details=None, metric_metadata=None, genai_metric_args=None, require_strict_signature=False, ): ''' A factory function to create an :py:class:`EvaluationMetric` object. Args: eval_fn: A function that computes the metric with the following signature: .. code-block:: python def eval_fn( predictions: pandas.Series, targets: pandas.Series, metrics: Dict[str, MetricValue], **kwargs, ) -> Union[float, MetricValue]: """ Args: predictions: A pandas Series containing the predictions made by the model. targets: (Optional) A pandas Series containing the corresponding labels for the predictions made on that input. metrics: (Optional) A dictionary containing the metrics calculated by the default evaluator. The keys are the names of the metrics and the values are the metric values. To access the MetricValue for the metrics calculated by the system, make sure to specify the type hint for this parameter as Dict[str, MetricValue]. Refer to the DefaultEvaluator behavior section for what metrics will be returned based on the type of model (i.e. classifier or regressor). kwargs: Includes a list of args that are used to compute the metric. These args could information coming from input data, model outputs or parameters specified in the `evaluator_config` argument of the `mlflow.evaluate` API. kwargs: Includes a list of args that are used to compute the metric. These args could be information coming from input data, model outputs, other metrics, or parameters specified in the `evaluator_config` argument of the `mlflow.evaluate` API. Returns: MetricValue with per-row scores, per-row justifications, and aggregate results. """ ... greater_is_better: Whether a greater value of the metric is better. name: The name of the metric. This argument must be specified if ``eval_fn`` is a lambda function or the ``eval_fn.__name__`` attribute is not available. long_name: (Optional) The long name of the metric. For example, ``"mean_squared_error"`` for ``"mse"``. version: (Optional) The metric version. For example ``v1``. metric_details: (Optional) A description of the metric and how it is calculated. metric_metadata: (Optional) A dictionary containing metadata for the metric. genai_metric_args: (Optional) A dictionary containing arguments specified by users when calling make_genai_metric or make_genai_metric_from_prompt. Those args are persisted so that we can deserialize the same metric object later. require_strict_signature: (Optional) Whether the eval_fn needs to follow a strict signature. If True, then the eval_fn must follow below signature: .. code-block:: python def eval_fn( predictions: "pd.Series", metrics: Dict[str, MetricValue], inputs: "pd.Series", *args, ) -> MetricValue: pass When generating a metric from `make_genai_metric`, this should be set to True. Default to False. .. seealso:: - :py:class:`mlflow.models.EvaluationMetric` - :py:func:`mlflow.evaluate` ''' if name is None: if isinstance(eval_fn, FunctionType) and eval_fn.__name__ == "<lambda>": raise MlflowException( "`name` must be specified if `eval_fn` is a lambda function.", INVALID_PARAMETER_VALUE, ) if not hasattr(eval_fn, "__name__"): raise MlflowException( "`name` must be specified if `eval_fn` does not have a `__name__` attribute.", INVALID_PARAMETER_VALUE, ) name = eval_fn.__name__ if "/" in name: raise MlflowException( f"Invalid metric name '{name}'. Metric names cannot include forward slashes ('/').", INVALID_PARAMETER_VALUE, ) if not name.isidentifier(): _logger.warning( f"The metric name '{name}' provided is not a valid Python identifier, which will " "prevent its use as a base metric for derived metrics. Please use a valid identifier " "to enable creation of derived metrics that use the given metric." ) if keyword.iskeyword(name): _logger.warning( f"The metric name '{name}' is a reserved Python keyword, which will " "prevent its use as a base metric for derived metrics. Please use a valid identifier " "to enable creation of derived metrics that use the given metric." ) if name in ["predictions", "targets", "metrics"]: _logger.warning( f"The metric name '{name}' is used as a special parameter in MLflow metrics, which " "will prevent its use as a base metric for derived metrics. Please use a different " "name to enable creation of derived metrics that use the given metric." ) return _generate_eval_metric_class(eval_fn, require_strict_signature=require_strict_signature)( eval_fn=eval_fn, name=name, greater_is_better=greater_is_better, long_name=long_name, version=version, metric_details=metric_details, metric_metadata=metric_metadata, genai_metric_args=genai_metric_args, ) @developer_stable
EvaluationMetric
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/linear_operator_householder_test.py
{ "start": 1385, "end": 4633 }
class ____( linear_operator_test_util.SquareLinearOperatorDerivedClassTest): """Most tests done in the base class LinearOperatorDerivedClassTest.""" def tearDown(self): config.enable_tensor_float_32_execution(self.tf32_keep_) def setUp(self): self.tf32_keep_ = config.tensor_float_32_execution_enabled() config.enable_tensor_float_32_execution(False) @staticmethod def operator_shapes_infos(): shape_info = linear_operator_test_util.OperatorShapesInfo return [ shape_info((1, 1)), shape_info((1, 3, 3)), shape_info((3, 4, 4)), shape_info((2, 1, 4, 4))] @staticmethod def skip_these_tests(): # This linear operator is never positive definite. return ["cholesky"] def operator_and_matrix( self, build_info, dtype, use_placeholder, ensure_self_adjoint_and_pd=False): shape = list(build_info.shape) reflection_axis = linear_operator_test_util.random_sign_uniform( shape[:-1], minval=1., maxval=2., dtype=dtype) # Make sure unit norm. reflection_axis = reflection_axis / linalg_ops.norm( reflection_axis, axis=-1, keepdims=True) lin_op_reflection_axis = reflection_axis if use_placeholder: lin_op_reflection_axis = array_ops.placeholder_with_default( reflection_axis, shape=None) operator = householder.LinearOperatorHouseholder(lin_op_reflection_axis) mat = reflection_axis[..., array_ops.newaxis] matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True) matrix = array_ops.matrix_set_diag( matrix, 1. + array_ops.matrix_diag_part(matrix)) return operator, matrix def test_scalar_reflection_axis_raises(self): with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"): householder.LinearOperatorHouseholder(1.) def test_householder_adjoint_type(self): reflection_axis = [1., 3., 5., 8.] operator = householder.LinearOperatorHouseholder(reflection_axis) self.assertIsInstance( operator.adjoint(), householder.LinearOperatorHouseholder) def test_householder_inverse_type(self): reflection_axis = [1., 3., 5., 8.] operator = householder.LinearOperatorHouseholder(reflection_axis) self.assertIsInstance( operator.inverse(), householder.LinearOperatorHouseholder) def test_tape_safe(self): reflection_axis = variables_module.Variable([1., 3., 5., 8.]) operator = householder.LinearOperatorHouseholder(reflection_axis) self.check_tape_safe( operator, skip_options=[ # Determinant hard-coded as 1. CheckTapeSafeSkipOptions.DETERMINANT, CheckTapeSafeSkipOptions.LOG_ABS_DETERMINANT, # Trace hard-coded. CheckTapeSafeSkipOptions.TRACE, ]) def test_convert_variables_to_tensors(self): reflection_axis = variables_module.Variable([1., 3., 5., 8.]) operator = householder.LinearOperatorHouseholder(reflection_axis) with self.cached_session() as sess: sess.run([reflection_axis.initializer]) self.check_convert_variables_to_tensors(operator) if __name__ == "__main__": linear_operator_test_util.add_tests(LinearOperatorHouseholderTest) test.main()
LinearOperatorHouseholderTest
python
django__django
tests/queries/tests.py
{ "start": 164490, "end": 165108 }
class ____(TestCase): def test_double_subquery_in(self): lfa1 = LeafA.objects.create(data="foo") lfa2 = LeafA.objects.create(data="bar") lfb1 = LeafB.objects.create(data="lfb1") lfb2 = LeafB.objects.create(data="lfb2") Join.objects.create(a=lfa1, b=lfb1) Join.objects.create(a=lfa2, b=lfb2) leaf_as = LeafA.objects.filter(data="foo").values_list("pk", flat=True) joins = Join.objects.filter(a__in=leaf_as).values_list("b__id", flat=True) qs = LeafB.objects.filter(pk__in=joins) self.assertSequenceEqual(qs, [lfb1])
DoubleInSubqueryTests
python
langchain-ai__langchain
libs/langchain/langchain_classic/evaluation/criteria/eval_chain.py
{ "start": 2707, "end": 5737 }
class ____(BaseOutputParser[dict]): """A parser for the output of the CriteriaEvalChain.""" @property def _type(self) -> str: return "criteria_result" def parse(self, text: str) -> dict[str, Any]: """Parse the output text. Args: text: The output text to parse. Returns: The parsed output. """ verdict = None score = None match_last = re.search(r"\s*(Y|N)\s*$", text, re.IGNORECASE) match_first = re.search(r"^\s*(Y|N)\s*", text, re.IGNORECASE) match_end = re.search(r"\b(Y|N)\b\s*$", text, re.IGNORECASE) if match_last: verdict = match_last.group(1).strip() text = text[: match_last.start()].strip() elif match_first: verdict = match_first.group(1).strip() text = text[match_first.end() :].strip() elif match_end: verdict = match_end.group(1).strip() text = text[: match_end.start()].strip() else: splits = text.strip().rsplit("\n", maxsplit=1) verdict = splits[-1] if verdict: score = ( 1 if verdict.upper() == "Y" else (0 if verdict.upper() == "N" else None) ) return { "reasoning": text.strip(), "value": verdict, "score": score, } CRITERIA_TYPE = Mapping[str, str] | Criteria | ConstitutionalPrinciple def resolve_criteria( criteria: CRITERIA_TYPE | str | None, ) -> dict[str, str]: """Resolve the criteria to evaluate. Parameters ---------- criteria : CRITERIA_TYPE The criteria to evaluate the runs against. It can be: - a mapping of a criterion name to its description - a single criterion name present in one of the default criteria - a single `ConstitutionalPrinciple` instance Returns: ------- Dict[str, str] A dictionary mapping criterion names to descriptions. Examples: -------- >>> criterion = "relevance" >>> CriteriaEvalChain.resolve_criteria(criteria) {'relevance': 'Is the submission referring to a real quote from the text?'} """ if criteria is None: return { "helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS], } if isinstance(criteria, Criteria): criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} elif isinstance(criteria, str): criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} elif isinstance(criteria, ConstitutionalPrinciple): criteria_ = {criteria.name: criteria.critique_request} else: if not criteria: msg = ( "Criteria cannot be empty. " "Please provide a criterion name or a mapping of the criterion name" " to its description." ) raise ValueError(msg) criteria_ = dict(criteria) return criteria_
CriteriaResultOutputParser
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-milvus/llama_index/vector_stores/milvus/utils.py
{ "start": 6370, "end": 7123 }
class ____(ABC): @abstractmethod def encode_queries(self, queries: List[str]) -> List[Dict[int, float]]: pass async def async_encode_queries(self, queries: List[str]) -> List[Dict[int, float]]: """ Encode queries asynchronously. Use sync method if not implemented. """ return self.encode_queries(queries) @abstractmethod def encode_documents(self, documents: List[str]) -> List[Dict[int, float]]: pass async def async_encode_documents( self, documents: List[str] ) -> List[Dict[int, float]]: """ Encode documents asynchronously. Use sync method if not implemented. """ return self.encode_documents(documents)
BaseSparseEmbeddingFunction
python
matplotlib__matplotlib
lib/mpl_toolkits/axes_grid1/axes_divider.py
{ "start": 9416, "end": 11575 }
class ____(Divider): """ The Divider class whose rectangle area is specified as a subplot geometry. """ def __init__(self, fig, *args, horizontal=None, vertical=None, aspect=None, anchor='C'): """ Parameters ---------- fig : `~matplotlib.figure.Figure` *args : tuple (*nrows*, *ncols*, *index*) or int The array of subplots in the figure has dimensions ``(nrows, ncols)``, and *index* is the index of the subplot being created. *index* starts at 1 in the upper left corner and increases to the right. If *nrows*, *ncols*, and *index* are all single digit numbers, then *args* can be passed as a single 3-digit number (e.g. 234 for (2, 3, 4)). horizontal : list of :mod:`~mpl_toolkits.axes_grid1.axes_size`, optional Sizes for horizontal division. vertical : list of :mod:`~mpl_toolkits.axes_grid1.axes_size`, optional Sizes for vertical division. aspect : bool, optional Whether overall rectangular area is reduced so that the relative part of the horizontal and vertical scales have the same scale. anchor : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', \ 'NW', 'W'}, default: 'C' Placement of the reduced rectangle, when *aspect* is True. """ self.figure = fig super().__init__(fig, [0, 0, 1, 1], horizontal=horizontal or [], vertical=vertical or [], aspect=aspect, anchor=anchor) self.set_subplotspec(SubplotSpec._from_subplot_args(fig, args)) def get_position(self): """Return the bounds of the subplot box.""" return self.get_subplotspec().get_position(self.figure).bounds def get_subplotspec(self): """Get the SubplotSpec instance.""" return self._subplotspec def set_subplotspec(self, subplotspec): """Set the SubplotSpec instance.""" self._subplotspec = subplotspec self.set_position(subplotspec.get_position(self.figure))
SubplotDivider
python
pennersr__django-allauth
allauth/mfa/internal/constants.py
{ "start": 24, "end": 174 }
class ____(str, Enum): MFA_SIGNUP_WEBAUTHN = "mfa_signup_webauthn" MFA_AUTHENTICATE = "mfa_authenticate" MFA_TRUST = "mfa_trust"
LoginStageKey
python
davidhalter__jedi
test/refactor/extract_function.py
{ "start": 3617, "end": 6909 }
class ____(): a = 3 #? 11 text {'new_name': 'f'} c = f(a) # -------------------------------------------------- in-closure def x(z): def y(x): #? 15 text {'new_name': 'f'} return -x * z # ++++++++++++++++++++++++++++++++++++++++++++++++++ def f(x, z): return -x * z def x(z): def y(x): #? 15 text {'new_name': 'f'} return f(x, z) # -------------------------------------------------- with-range-1 #? 0 text {'new_name': 'a', 'until_line': 4} v1 = 3 v2 = 2 x = test(v1 + v2 * v3) # ++++++++++++++++++++++++++++++++++++++++++++++++++ #? 0 text {'new_name': 'a', 'until_line': 4} def a(test, v3): v1 = 3 v2 = 2 x = test(v1 + v2 * v3) return x x = a(test, v3) # -------------------------------------------------- with-range-2 #? 2 text {'new_name': 'a', 'until_line': 6, 'until_column': 4} #foo v1 = 3 v2 = 2 x, y = test(v1 + v2 * v3) #raaaa y # ++++++++++++++++++++++++++++++++++++++++++++++++++ #? 2 text {'new_name': 'a', 'until_line': 6, 'until_column': 4} def a(test, v3): #foo v1 = 3 v2 = 2 x, y = test(v1 + v2 * v3) #raaaa return y y = a(test, v3) y # -------------------------------------------------- with-range-3 #foo #? 2 text {'new_name': 'a', 'until_line': 5, 'until_column': 4} v1 = 3 v2 = 2 x, y = test(v1 + v2 * v3) #raaaa y # ++++++++++++++++++++++++++++++++++++++++++++++++++ #foo #? 2 text {'new_name': 'a', 'until_line': 5, 'until_column': 4} def a(test, v3): v1 = 3 v2 = 2 x, y = test(v1 + v2 * v3) return y y = a(test, v3) #raaaa y # -------------------------------------------------- with-range-func-1 import os # comment1 @dec # comment2 def x(v1): #foo #? 2 text {'new_name': 'a', 'until_line': 9, 'until_column': 5} v2 = 2 if 1: x, y = os.listdir(v1 + v2 * v3) #bar return x, y # ++++++++++++++++++++++++++++++++++++++++++++++++++ import os # comment1 def a(v1, v3): v2 = 2 if 1: x, y = os.listdir(v1 + v2 * v3) return x, y @dec # comment2 def x(v1): #foo #? 2 text {'new_name': 'a', 'until_line': 9, 'until_column': 5} x, y = a(v1, v3) #bar return x, y # -------------------------------------------------- with-range-func-2 import os # comment1 # comment2 def x(v1): #? 2 text {'new_name': 'a', 'until_line': 10, 'until_column': 0} #foo v2 = 2 if 1: x, y = os.listdir(v1 + v2 * v3) #bar return y x # ++++++++++++++++++++++++++++++++++++++++++++++++++ import os # comment1 # comment2 def a(v1, v3): #foo v2 = 2 if 1: x, y = os.listdir(v1 + v2 * v3) #bar return y def x(v1): #? 2 text {'new_name': 'a', 'until_line': 10, 'until_column': 0} y = a(v1, v3) return y x # -------------------------------------------------- with-range-func-3 def x(v1): #? 2 text {'new_name': 'func', 'until_line': 6, 'until_column': 4} #foo v2 = 2 x = v1 * 2 y = 3 #bar return x x # ++++++++++++++++++++++++++++++++++++++++++++++++++ def func(v1): #foo v2 = 2 x = v1 * 2 return x def x(v1): #? 2 text {'new_name': 'func', 'until_line': 6, 'until_column': 4} x = func(v1) y = 3 #bar return x x # -------------------------------------------------- in-class-range-1
Ya
python
pennersr__django-allauth
allauth/account/apps.py
{ "start": 214, "end": 715 }
class ____(AppConfig): name = "allauth.account" verbose_name = _("Accounts") default_auto_field = app_settings.DEFAULT_AUTO_FIELD or "django.db.models.AutoField" def ready(self): from allauth.account import checks # noqa required_mw = "allauth.account.middleware.AccountMiddleware" if required_mw not in settings.MIDDLEWARE: raise ImproperlyConfigured( f"{required_mw} must be added to settings.MIDDLEWARE" )
AccountConfig
python
coleifer__peewee
tests/regressions.py
{ "start": 20262, "end": 20712 }
class ____(ModelTestCase): requires = [TS] def test_zero_timestamp(self): t0 = TS.create(key='t0', timestamp=0) t1 = TS.create(key='t1', timestamp=1) t0_db = TS.get(TS.key == 't0') self.assertEqual(t0_db.timestamp, datetime.datetime(1970, 1, 1)) t1_db = TS.get(TS.key == 't1') self.assertEqual(t1_db.timestamp, datetime.datetime(1970, 1, 1, 0, 0, 1))
TestZeroTimestamp
python
getsentry__sentry
src/sentry/integrations/messaging/linkage.py
{ "start": 14972, "end": 21796 }
class ____(TeamLinkageView, ABC): @property def metrics_operation_key(self) -> str: return "link_team_view" def execute( self, request: HttpRequest, integration: RpcIntegration, params: Mapping[str, Any] ) -> HttpResponseBase: from sentry.integrations.slack.analytics import SlackIntegrationIdentityLinked from sentry.integrations.slack.views.link_team import ( SUCCESS_LINKED_MESSAGE, SUCCESS_LINKED_TITLE, SelectTeamForm, ) user = serialize_generic_user(request.user) if user is None: raise TypeError("Cannot link team without a logged-in user") channel_id: str = params["channel_id"] channel_name: str = params["channel_name"] slack_id: str = params["slack_id"] logger_params = { "user_id": user.id, "integration_id": integration.id, "channel_id": channel_id, "channel_name": channel_name, "slack_id": slack_id, "response_url": params["response_url"], } teams_by_id = {team.id: team for team in self._get_teams(integration, user)} if not teams_by_id: logger.info("team.no_teams_found", extra=logger_params) self.capture_metric("failure.get_teams") return self.render_error_page( request, status=404, body_text="HTTP 404: No teams found in your organizations to link. You must be a Sentry organization admin/manager/owner or a team admin to link a team in your respective organization.", ) form = SelectTeamForm(list(teams_by_id.values()), request.POST or None) if request.method == "GET": return self.respond( "sentry/integrations/slack/link-team.html", { "form": form, "teams": teams_by_id.values(), "channel_name": channel_name, "provider": integration.get_provider(), }, ) if not form.is_valid(): logger.info("form.invalid", extra={**logger_params, "form_errors": form.errors}) self.capture_metric("failure.form_invalid") return self.render_error_page(request, status=400, body_text="HTTP 400: Bad request") team_id = int(form.cleaned_data["team"]) team = teams_by_id.get(team_id) if not team: logger.info("team.not_found", extra={"team_id": team_id}) self.capture_metric("failure.team_not_found") return self.render_error_page( request, status=404, body_text="HTTP 404: Team does not exist or you do not have sufficient permission to link a team", ) logger_params["team_id"] = team.id idp = identity_service.get_provider( provider_type=IntegrationProviderSlug.SLACK.value, provider_ext_id=integration.external_id, ) logger_params["provider_ext_id"] = integration.external_id if idp is None: logger.info("identity_provider.not_found", extra=logger_params) self.capture_metric("failure.identity_provider_not_found") return self.render_error_page( request, status=403, body_text="HTTP 403: Invalid team ID" ) ident = identity_service.get_identity( filter={"provider_id": idp.id, "identity_ext_id": slack_id} ) if not ident: logger.info("identity.not_found", extra=logger_params) self.capture_metric("failure.identity_not_found") return self.render_error_page( request, status=403, body_text="HTTP 403: User identity does not exist" ) _, created = ExternalActor.objects.get_or_create( team_id=team.id, organization=team.organization, integration_id=integration.id, provider=self.provider.value, defaults=dict( external_name=channel_name, external_id=channel_id, ), ) try: analytics.record( SlackIntegrationIdentityLinked( provider=self.provider_slug, actor_id=team.id, actor_type="team", ) ) except Exception as e: sentry_sdk.capture_exception(e) if not created: self.capture_metric("failure.team_already_linked") return self.notify_team_already_linked(request, channel_id, integration, team) notifications_service.enable_all_settings_for_provider( external_provider=self.external_provider_enum, team_id=team.id, types=[NotificationSettingEnum.ISSUE_ALERTS], ) message = SUCCESS_LINKED_MESSAGE.format( slug=team.slug, workflow_addon="", channel_name=channel_name, ) self.notify_on_success(channel_id, integration, message) self.capture_metric("success") return render_to_response( "sentry/integrations/slack/post-linked-team.html", request=request, context={ "heading_text": SUCCESS_LINKED_TITLE, "body_text": message, "channel_id": channel_id, "team_id": integration.external_id, }, ) def _get_teams(self, integration: RpcIntegration, user: RpcUser) -> Iterable[Team]: organization_memberships = OrganizationMember.objects.get_for_integration(integration, user) # Filter to teams where we have write access to, either through having a sufficient # organization role (owner/manager/admin) or by being a team admin on at least one team. for org_membership in organization_memberships: # Setting is_team_admin to True only returns teams that member is team admin on. # We only want to filter for this when the user does not have a sufficient # role in the org, which is checked using is_valid_role. is_team_admin = not self.is_valid_role(org_membership) yield from Team.objects.get_for_user( org_membership.organization, user, is_team_admin=is_team_admin ) @abstractmethod def notify_on_success(self, channel_id: str, integration: RpcIntegration, message: str) -> None: raise NotImplementedError @abstractmethod def notify_team_already_linked( self, request: HttpRequest, channel_id: str, integration: RpcIntegration, team: Team ) -> HttpResponse: raise NotImplementedError
LinkTeamView
python
jazzband__django-model-utils
tests/models.py
{ "start": 12451, "end": 12725 }
class ____(models.IntegerField): def contribute_to_class(self, cls: type[models.Model], name: str, *args: Any, **kwargs: Any) -> None: super().contribute_to_class(cls, name, *args, **kwargs) setattr(cls, name, StringyDescriptor(name))
CustomDescriptorField
python
django__django
tests/test_client_regress/tests.py
{ "start": 14618, "end": 28539 }
class ____(ExtraAssertMixin, SimpleTestCase): def test_redirect_page(self): """ An assertion is raised if the original page couldn't be retrieved as expected """ # This page will redirect with code 301, not 302 response = self.client.get("/permanent_redirect_view/") try: self.assertRedirects(response, "/get_view/") except AssertionError as e: self.assertIn( "Response didn't redirect as expected: Response code was 301 " "(expected 302)", str(e), ) try: self.assertRedirects(response, "/get_view/", msg_prefix="abc") except AssertionError as e: self.assertIn( "abc: Response didn't redirect as expected: Response code was 301 " "(expected 302)", str(e), ) def test_followed_redirect_unexpected_initial_status_code(self): response = self.client.get("/permanent_redirect_view/", follow=True) msg = ( "Initial response didn't redirect as expected: Response code was 301 " "(expected 302)" ) self.assertRaisesPrefixedMessage( self.assertRedirects, response, "/get_view/", expected_msg=msg, ) def test_followed_redirect_unexpected_final_status_code(self): response = self.client.get("/redirect_view/", follow=True) msg = ( "Response didn't redirect as expected: Final Response code was 200 " "(expected 403)" ) self.assertRaisesPrefixedMessage( self.assertRedirects, response, "/get_view/", status_code=302, target_status_code=403, expected_msg=msg, ) def test_lost_query(self): """ An assertion is raised if the redirect location doesn't preserve GET parameters. """ response = self.client.get("/redirect_view/", {"var": "value"}) try: self.assertRedirects(response, "/get_view/") except AssertionError as e: self.assertIn( "Response redirected to '/get_view/?var=value', expected '/get_view/'", str(e), ) try: self.assertRedirects(response, "/get_view/", msg_prefix="abc") except AssertionError as e: self.assertIn( "abc: Response redirected to '/get_view/?var=value', expected " "'/get_view/'", str(e), ) def test_incorrect_target(self): "An assertion is raised if the response redirects to another target" response = self.client.get("/permanent_redirect_view/") try: # Should redirect to get_view self.assertRedirects(response, "/some_view/") except AssertionError as e: self.assertIn( "Response didn't redirect as expected: Response code was 301 " "(expected 302)", str(e), ) def test_target_page(self): """ An assertion is raised if the response redirect target cannot be retrieved as expected. """ response = self.client.get("/double_redirect_view/") try: # The redirect target responds with a 301 code, not 200 self.assertRedirects(response, "http://testserver/permanent_redirect_view/") except AssertionError as e: self.assertIn( "Couldn't retrieve redirection page '/permanent_redirect_view/': " "response code was 301 (expected 200)", str(e), ) try: # The redirect target responds with a 301 code, not 200 self.assertRedirects( response, "http://testserver/permanent_redirect_view/", msg_prefix="abc" ) except AssertionError as e: self.assertIn( "abc: Couldn't retrieve redirection page '/permanent_redirect_view/': " "response code was 301 (expected 200)", str(e), ) def test_redirect_chain(self): "You can follow a redirect chain of multiple redirects" response = self.client.get("/redirects/further/more/", {}, follow=True) self.assertRedirects( response, "/no_template_view/", status_code=302, target_status_code=200 ) self.assertEqual(len(response.redirect_chain), 1) self.assertEqual(response.redirect_chain[0], ("/no_template_view/", 302)) def test_multiple_redirect_chain(self): "You can follow a redirect chain of multiple redirects" response = self.client.get("/redirects/", {}, follow=True) self.assertRedirects( response, "/no_template_view/", status_code=302, target_status_code=200 ) self.assertEqual(len(response.redirect_chain), 3) self.assertEqual(response.redirect_chain[0], ("/redirects/further/", 302)) self.assertEqual(response.redirect_chain[1], ("/redirects/further/more/", 302)) self.assertEqual(response.redirect_chain[2], ("/no_template_view/", 302)) def test_redirect_chain_to_non_existent(self): "You can follow a chain to a nonexistent view." response = self.client.get("/redirect_to_non_existent_view2/", {}, follow=True) self.assertRedirects( response, "/non_existent_view/", status_code=302, target_status_code=404 ) def test_redirect_chain_to_self(self): "Redirections to self are caught and escaped" with self.assertRaises(RedirectCycleError) as context: self.client.get("/redirect_to_self/", {}, follow=True) response = context.exception.last_response # The chain of redirects stops once the cycle is detected. self.assertRedirects( response, "/redirect_to_self/", status_code=302, target_status_code=302 ) self.assertEqual(len(response.redirect_chain), 2) def test_redirect_to_self_with_changing_query(self): "Redirections don't loop forever even if query is changing" with self.assertRaises(RedirectCycleError): self.client.get( "/redirect_to_self_with_changing_query_view/", {"counter": "0"}, follow=True, ) def test_circular_redirect(self): "Circular redirect chains are caught and escaped" with self.assertRaises(RedirectCycleError) as context: self.client.get("/circular_redirect_1/", {}, follow=True) response = context.exception.last_response # The chain of redirects will get back to the starting point, but stop # there. self.assertRedirects( response, "/circular_redirect_2/", status_code=302, target_status_code=302 ) self.assertEqual(len(response.redirect_chain), 4) def test_redirect_chain_post(self): "A redirect chain will be followed from an initial POST post" response = self.client.post("/redirects/", {"nothing": "to_send"}, follow=True) self.assertRedirects(response, "/no_template_view/", 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_head(self): "A redirect chain will be followed from an initial HEAD request" response = self.client.head("/redirects/", {"nothing": "to_send"}, follow=True) self.assertRedirects(response, "/no_template_view/", 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_options(self): "A redirect chain will be followed from an initial OPTIONS request" response = self.client.options("/redirects/", follow=True) self.assertRedirects(response, "/no_template_view/", 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_put(self): "A redirect chain will be followed from an initial PUT request" response = self.client.put("/redirects/", follow=True) self.assertRedirects(response, "/no_template_view/", 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_delete(self): "A redirect chain will be followed from an initial DELETE request" response = self.client.delete("/redirects/", follow=True) self.assertRedirects(response, "/no_template_view/", 302, 200) self.assertEqual(len(response.redirect_chain), 3) @modify_settings(ALLOWED_HOSTS={"append": "otherserver"}) def test_redirect_to_different_host(self): "The test client will preserve scheme, host and port changes" response = self.client.get("/redirect_other_host/", follow=True) self.assertRedirects( response, "https://otherserver:8443/no_template_view/", status_code=302, target_status_code=200, ) # We can't use is_secure() or get_host() # because response.request is a dictionary, not an HttpRequest self.assertEqual(response.request.get("wsgi.url_scheme"), "https") self.assertEqual(response.request.get("SERVER_NAME"), "otherserver") self.assertEqual(response.request.get("SERVER_PORT"), "8443") # assertRedirects() can follow redirect to 'otherserver' too. response = self.client.get("/redirect_other_host/", follow=False) self.assertRedirects( response, "https://otherserver:8443/no_template_view/", status_code=302, target_status_code=200, ) def test_redirect_chain_on_non_redirect_page(self): """ An assertion is raised if the original page couldn't be retrieved as expected. """ # This page will redirect with code 301, not 302 response = self.client.get("/get_view/", follow=True) try: self.assertRedirects(response, "/get_view/") except AssertionError as e: self.assertIn( "Response didn't redirect as expected: Response code was 200 " "(expected 302)", str(e), ) try: self.assertRedirects(response, "/get_view/", msg_prefix="abc") except AssertionError as e: self.assertIn( "abc: Response didn't redirect as expected: Response code was 200 " "(expected 302)", str(e), ) def test_redirect_on_non_redirect_page(self): """ An assertion is raised if the original page couldn't be retrieved as expected """ # This page will redirect with code 301, not 302 response = self.client.get("/get_view/") try: self.assertRedirects(response, "/get_view/") except AssertionError as e: self.assertIn( "Response didn't redirect as expected: Response code was 200 " "(expected 302)", str(e), ) try: self.assertRedirects(response, "/get_view/", msg_prefix="abc") except AssertionError as e: self.assertIn( "abc: Response didn't redirect as expected: Response code was 200 " "(expected 302)", str(e), ) def test_redirect_scheme(self): """ An assertion is raised if the response doesn't have the scheme specified in expected_url. """ # For all possible True/False combinations of follow and secure for follow, secure in itertools.product([True, False], repeat=2): # always redirects to https response = self.client.get( "/https_redirect_view/", follow=follow, secure=secure ) # the goal scheme is https self.assertRedirects( response, "https://testserver/secure_view/", status_code=302 ) with self.assertRaises(AssertionError): self.assertRedirects( response, "http://testserver/secure_view/", status_code=302 ) def test_redirect_fetch_redirect_response(self): """Preserve extra headers of requests made with django.test.Client.""" methods = ( "get", "post", "head", "options", "put", "patch", "delete", "trace", ) for method in methods: with self.subTest(method=method): req_method = getattr(self.client, method) # HTTP_REDIRECT in "extra". response = req_method( "/redirect_based_on_extra_headers_1/", follow=False, HTTP_REDIRECT="val", ) self.assertRedirects( response, "/redirect_based_on_extra_headers_2/", fetch_redirect_response=True, status_code=302, target_status_code=302, ) # HTTP_REDIRECT in "headers". response = req_method( "/redirect_based_on_extra_headers_1/", follow=False, headers={"redirect": "val"}, ) self.assertRedirects( response, "/redirect_based_on_extra_headers_2/", fetch_redirect_response=True, status_code=302, target_status_code=302, ) @override_settings(ROOT_URLCONF="test_client_regress.urls")
AssertRedirectsTests
python
tqdm__tqdm
tqdm/contrib/discord.py
{ "start": 3077, "end": 5243 }
class ____(tqdm_auto): """ Standard `tqdm.auto.tqdm` but also sends updates to a Discord Bot. May take a few seconds to create (`__init__`). - create a discord bot (not public, no requirement of OAuth2 code grant, only send message permissions) & invite it to a channel: <https://discordpy.readthedocs.io/en/latest/discord.html> - copy the bot `{token}` & `{channel_id}` and paste below >>> from tqdm.contrib.discord import tqdm, trange >>> for i in tqdm(iterable, token='{token}', channel_id='{channel_id}'): ... ... """ def __init__(self, *args, **kwargs): """ Parameters ---------- token : str, required. Discord bot token [default: ${TQDM_DISCORD_TOKEN}]. channel_id : int, required. Discord channel ID [default: ${TQDM_DISCORD_CHANNEL_ID}]. See `tqdm.auto.tqdm.__init__` for other parameters. """ if not kwargs.get('disable'): kwargs = kwargs.copy() self.dio = DiscordIO( kwargs.pop('token', getenv('TQDM_DISCORD_TOKEN')), kwargs.pop('channel_id', getenv('TQDM_DISCORD_CHANNEL_ID'))) super().__init__(*args, **kwargs) def display(self, **kwargs): super().display(**kwargs) fmt = self.format_dict if fmt.get('bar_format', None): fmt['bar_format'] = fmt['bar_format'].replace( '<bar/>', '{bar:10u}').replace('{bar}', '{bar:10u}') else: fmt['bar_format'] = '{l_bar}{bar:10u}{r_bar}' self.dio.write(self.format_meter(**fmt)) def clear(self, *args, **kwargs): super().clear(*args, **kwargs) if not self.disable: self.dio.write("") def close(self): if self.disable: return super().close() if not (self.leave or (self.leave is None and self.pos == 0)): self.dio.delete() def tdrange(*args, **kwargs): """Shortcut for `tqdm.contrib.discord.tqdm(range(*args), **kwargs)`.""" return tqdm_discord(range(*args), **kwargs) # Aliases tqdm = tqdm_discord trange = tdrange
tqdm_discord
python
django__django
tests/invalid_models_tests/test_relative_fields.py
{ "start": 48639, "end": 51623 }
class ____(SimpleTestCase): def test_fk_to_integer(self): self._test_explicit_related_name_clash( target=models.IntegerField(), relative=models.ForeignKey("Target", models.CASCADE, related_name="clash"), ) def test_fk_to_fk(self): self._test_explicit_related_name_clash( target=models.ForeignKey("Another", models.CASCADE), relative=models.ForeignKey("Target", models.CASCADE, related_name="clash"), ) def test_fk_to_m2m(self): self._test_explicit_related_name_clash( target=models.ManyToManyField("Another"), relative=models.ForeignKey("Target", models.CASCADE, related_name="clash"), ) def test_m2m_to_integer(self): self._test_explicit_related_name_clash( target=models.IntegerField(), relative=models.ManyToManyField("Target", related_name="clash"), ) def test_m2m_to_fk(self): self._test_explicit_related_name_clash( target=models.ForeignKey("Another", models.CASCADE), relative=models.ManyToManyField("Target", related_name="clash"), ) def test_m2m_to_m2m(self): self._test_explicit_related_name_clash( target=models.ManyToManyField("Another"), relative=models.ManyToManyField("Target", related_name="clash"), ) def _test_explicit_related_name_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): clash = target class Model(models.Model): rel = relative self.assertEqual( Model.check(), [ Error( "Reverse accessor 'Target.clash' for " "'invalid_models_tests.Model.rel' clashes with field name " "'invalid_models_tests.Target.clash'.", hint=( "Rename field 'invalid_models_tests.Target.clash', or " "add/change a related_name argument to the definition for " "field 'invalid_models_tests.Model.rel'." ), obj=Model._meta.get_field("rel"), id="fields.E302", ), Error( "Reverse query name for 'invalid_models_tests.Model.rel' " "clashes with field name 'invalid_models_tests.Target.clash'.", hint=( "Rename field 'invalid_models_tests.Target.clash', or " "add/change a related_name argument to the definition for " "field 'invalid_models_tests.Model.rel'." ), obj=Model._meta.get_field("rel"), id="fields.E303", ), ], ) @isolate_apps("invalid_models_tests")
ExplicitRelatedNameClashTests
python
allegroai__clearml
clearml/backend_api/services/v2_20/queues.py
{ "start": 85808, "end": 86991 }
class ____(Response): """ Response of queues.remove_task endpoint. :param removed: Number of tasks removed (0 or 1) :type removed: int """ _service = "queues" _action = "remove_task" _version = "2.20" _schema = { "definitions": {}, "properties": { "removed": { "description": "Number of tasks removed (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], } }, "type": "object", } def __init__(self, removed: Optional[int] = None, **kwargs: Any) -> None: super(RemoveTaskResponse, self).__init__(**kwargs) self.removed = removed @schema_property("removed") def removed(self) -> Optional[int]: return self._property_removed @removed.setter def removed(self, value: Optional[int]) -> None: if value is None: self._property_removed = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "removed", six.integer_types) self._property_removed = value
RemoveTaskResponse
python
PyCQA__pylint
doc/data/messages/n/non-iterator-returned/bad.py
{ "start": 16, "end": 546 }
class ____: def __init__(self, signs, predictions): self.signs = signs self.predictions = predictions def __iter__(self): # [non-iterator-returned] self.index = 0 self.number_of_prediction = len(self.predictions) return self SIGNS = ["Aries", "Taurus", "Gemini", "Cancer", "Leo", "Virgo", "Libra"] PREDICTIONS = ["good things", "bad thing", "existential dread"] for sign, prediction in GenericAstrology(SIGNS, PREDICTIONS): print(f"{sign} : {prediction} today")
GenericAstrology
python
kamyu104__LeetCode-Solutions
Python/construct-the-longest-new-string.py
{ "start": 61, "end": 277 }
class ____(object): def longestString(self, x, y, z): """ :type x: int :type y: int :type z: int :rtype: int """ return ((min(x, y)*2+int(x != y))+z)*2
Solution
python
doocs__leetcode
solution/1800-1899/1886.Determine Whether Matrix Can Be Obtained By Rotation/Solution.py
{ "start": 0, "end": 650 }
class ____: def findRotation(self, mat: List[List[int]], target: List[List[int]]) -> bool: def rotate(matrix): n = len(matrix) for i in range(n // 2): for j in range(i, n - 1 - i): t = matrix[i][j] matrix[i][j] = matrix[n - j - 1][i] matrix[n - j - 1][i] = matrix[n - i - 1][n - j - 1] matrix[n - i - 1][n - j - 1] = matrix[j][n - i - 1] matrix[j][n - i - 1] = t for _ in range(4): if mat == target: return True rotate(mat) return False
Solution
python
python__mypy
mypy/nodes.py
{ "start": 66930, "end": 67112 }
class ____(Expression): """Ellipsis (...)""" __slots__ = () def accept(self, visitor: ExpressionVisitor[T]) -> T: return visitor.visit_ellipsis(self)
EllipsisExpr
python
astropy__astropy
astropy/cosmology/_src/tests/flrw/test_lambdacdm.py
{ "start": 1364, "end": 4511 }
class ____(FLRWTest): """Test :class:`astropy.cosmology.LambdaCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = LambdaCDM # =============================================================== # Method & Attribute Tests _FLRW_redshift_methods = get_redshift_methods( LambdaCDM, include_private=True, include_z2=False ) - {"_dS_age"} # `_dS_age` is removed because it doesn't strictly rely on the value of `z`, # so any input that doesn't trip up ``np.shape`` is "valid" @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize("method", sorted(_FLRW_redshift_methods)) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" super().test_redshift_method_bad_input(cosmo, method, z, exc) @pytest.mark.parametrize("z", valid_zs) def test_w(self, cosmo, z): """Test :meth:`astropy.cosmology.LambdaCDM.w`.""" super().test_w(cosmo, z) w = cosmo.w(z) assert u.allclose(w, -1.0) def test_repr(self, cosmo): """Test method ``.__repr__()``.""" assert repr(cosmo) == ( "LambdaCDM(name='ABCMeta', H0=<Quantity 70. km / (Mpc s)>, Om0=0.27," " Ode0=0.73, Tcmb0=<Quantity 3. K>, Neff=3.04," " m_nu=<Quantity [0., 0., 0.] eV>, Ob0=0.03)" ) def test_str(self, cosmo): """Test method ``.__str__()``.""" assert str(cosmo) == ( 'LambdaCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27, Ode0=0.73,' " Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.03)" ) @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [ ( # no relativistic species (75.0, 0.25, 0.5), {"Tcmb0": 0.0}, [2953.93001902, 4616.7134253, 5685.07765971, 6440.80611897] * u.Mpc, ), ( # massless neutrinos (75.0, 0.25, 0.6), {"Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV)}, [3037.12620424, 4776.86236327, 5889.55164479, 6671.85418235] * u.Mpc, ), ( # massive neutrinos (75.0, 0.3, 0.4), {"Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(10.0, u.eV)}, [2471.80626824, 3567.1902565, 4207.15995626, 4638.20476018] * u.Mpc, ), ], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected) # -----------------------------------------------------------------------------
TestLambdaCDM
python
allegroai__clearml
clearml/backend_api/services/v2_13/events.py
{ "start": 34576, "end": 34824 }
class ____(BatchRequest): """ Adds a batch of events in a single call (json-lines format, stream-friendly) """ _service = "events" _action = "add_batch" _version = "2.13" _batched_request_cls = AddRequest
AddBatchRequest
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-pgvector/destination_pgvector/config.py
{ "start": 522, "end": 1932 }
class ____(BaseModel): host: str = Field( ..., title="Host", order=1, description="Enter the account name you want to use to access the database.", examples=["AIRBYTE_ACCOUNT"], ) port: int = Field( default=5432, title="Port", order=2, description="Enter the port you want to use to access the database", examples=["5432"], ) database: str = Field( ..., title="Database", order=4, description="Enter the name of the database that you want to sync data into", examples=["AIRBYTE_DATABASE"], ) default_schema: str = Field( default="public", title="Default Schema", order=5, description="Enter the name of the default schema", examples=["AIRBYTE_SCHEMA"], ) username: str = Field( ..., title="Username", order=6, description="Enter the name of the user you want to use to access the database", examples=["AIRBYTE_USER"], ) # E.g. "credentials": {"password": "AIRBYTE_PASSWORD"} credentials: PasswordBasedAuthorizationModel class Config: title = "Postgres Connection" schema_extra = { "description": "Postgres can be used to store vector data and retrieve embeddings.", "group": "indexing", }
PGVectorIndexingModel
python
tensorflow__tensorflow
tensorflow/python/training/saving/saveable_object_util_test.py
{ "start": 5458, "end": 5818 }
class ____(saveable_object.SaveableObject): def __init__(self, obj, name): spec = saveable_object.SaveSpec(obj.read(), "", name) self.obj = obj super(_StateSaveable, self).__init__(obj, [spec], name) def restore(self, restored_tensors, restored_shapes): del restored_shapes # Unused. self.obj.assign(restored_tensors[0])
_StateSaveable
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 514623, "end": 518003 }
class ____(Response): """ Response of tasks.stop_many endpoint. :param succeeded: :type succeeded: Sequence[dict] :param failed: :type failed: Sequence[dict] """ _service = "tasks" _action = "stop_many" _version = "2.23" _schema = { "definitions": {}, "properties": { "failed": { "items": { "properties": { "error": { "description": "Error info", "properties": { "codes": { "items": {"type": "integer"}, "type": "array", }, "data": { "additionalProperties": True, "type": "object", }, "msg": {"type": "string"}, }, "type": "object", }, "id": { "description": "ID of the failed entity", "type": "string", }, }, "type": "object", }, "type": ["array", "null"], }, "succeeded": { "items": { "properties": { "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": "object", }, "id": { "description": "ID of the succeeded entity", "type": "string", }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": "integer", }, }, "type": "object", }, "type": ["array", "null"], }, }, "type": "object", } def __init__(self, succeeded=None, failed=None, **kwargs): super(StopManyResponse, self).__init__(**kwargs) self.succeeded = succeeded self.failed = failed @schema_property("succeeded") def succeeded(self): return self._property_succeeded @succeeded.setter def succeeded(self, value): if value is None: self._property_succeeded = None return self.assert_isinstance(value, "succeeded", (list, tuple)) self.assert_isinstance(value, "succeeded", (dict,), is_array=True) self._property_succeeded = value @schema_property("failed") def failed(self): return self._property_failed @failed.setter def failed(self, value): if value is None: self._property_failed = None return self.assert_isinstance(value, "failed", (list, tuple)) self.assert_isinstance(value, "failed", (dict,), is_array=True) self._property_failed = value
StopManyResponse
python
google__jax
tests/multiprocess_gpu_test.py
{ "start": 1141, "end": 7404 }
class ____(jtu.JaxTestCase): def test_gpu_distributed_initialize(self): if not jtu.test_device_matches(['gpu']): raise unittest.SkipTest('Tests only for GPU.') port = portpicker.pick_unused_port() num_gpus = 4 num_gpus_per_task = 1 num_tasks = num_gpus // num_gpus_per_task if jax.device_count() < num_gpus: raise unittest.SkipTest( f"Test requires >={num_gpus} GPUs; got {jax.device_count()}." ) subprocesses = [] for task in range(num_tasks): env = os.environ.copy() env["JAX_PORT"] = str(port) env["NUM_TASKS"] = str(num_tasks) env["TASK"] = str(task) if jtu.is_device_rocm(): env["HIP_VISIBLE_DEVICES"] = ",".join( str((task * num_gpus_per_task) + i) for i in range(num_gpus_per_task)) else: env["CUDA_VISIBLE_DEVICES"] = ",".join( str((task * num_gpus_per_task) + i) for i in range(num_gpus_per_task)) args = [ sys.executable, "-c", ('import jax, os; ' 'jax.distributed.initialize(' 'f\'localhost:{os.environ["JAX_PORT"]}\', ' 'int(os.environ["NUM_TASKS"]), int(os.environ["TASK"])); ' 'print(f\'{jax.local_device_count()},{jax.device_count()}\', end="")' ) ] proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) subprocesses.append(self.enter_context(proc)) try: for proc in subprocesses: out, err = proc.communicate() self.assertEqual(proc.returncode, 0, msg=f"Process failed:\n\n{out}\n\n{err}") self.assertEqual( out, f"{num_gpus_per_task},{num_gpus}", msg=f"Process failed:\n\n{out}\n\n{err}", ) finally: for proc in subprocesses: proc.kill() def test_distributed_jax_visible_devices(self): """Test jax_visible_devices works in distributed settings.""" if not jtu.test_device_matches(['gpu']): raise unittest.SkipTest('Tests only for GPU.') port = portpicker.pick_unused_port() num_gpus = 4 num_gpus_per_task = 1 num_tasks = num_gpus // num_gpus_per_task subprocesses = [] for task in range(num_tasks): env = os.environ.copy() env["JAX_PORT"] = str(port) env["NUM_TASKS"] = str(num_tasks) env["TASK"] = str(task) visible_devices = [ (task * num_gpus_per_task) + i for i in range(num_gpus_per_task) ] program = ( 'import jax, os; ' 'jax.distributed.initialize(' 'f\'localhost:{os.environ["JAX_PORT"]}\', ' f'int(os.environ["NUM_TASKS"]), int(os.environ["TASK"]), {visible_devices}); ' 's = jax.pmap(lambda x: jax.lax.psum(x, "i"), axis_name="i")(jax.numpy.ones(jax.local_device_count())); ' 'print(f\'{jax.local_device_count()},{jax.device_count()},{s}\', end=""); ' ) args = [sys.executable, "-c", program] proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) subprocesses.append(self.enter_context(proc)) try: for proc in subprocesses: out, err = proc.communicate() self.assertEqual(proc.returncode, 0, msg=f"Process failed:\n\n{err}") self.assertRegex(out, f'{num_gpus_per_task},{num_gpus},\\[{num_gpus}.\\]$') finally: for proc in subprocesses: proc.kill() def test_gpu_ompi_distributed_initialize(self): if not jtu.test_device_matches(['gpu']): raise unittest.SkipTest('Tests only for GPU.') if shutil.which('mpirun') is None: raise unittest.SkipTest('Tests only for MPI (mpirun not found).') num_gpus = 4 num_gpus_per_task = 1 args = [ 'mpirun', '--oversubscribe', '--allow-run-as-root', '-n', str(num_gpus), sys.executable, '-c', ('import jax, os; ' 'jax.distributed.initialize(); ' 'print(f\'{jax.local_device_count()},{jax.device_count()}\' if jax.process_index() == 0 else \'\', end="")' ) ] env = os.environ.copy() # In case the job was launched via Slurm, # prevent OpenMPI from detecting Slurm environment env.pop('SLURM_JOBID', None) proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) proc = self.enter_context(proc) try: out, _ = proc.communicate() self.assertEqual(proc.returncode, 0) self.assertEqual(out, f'{num_gpus_per_task},{num_gpus}') finally: proc.kill() def test_gpu_mpi4py_distributed_initialize(self): if not jtu.test_device_matches(['gpu']): raise unittest.SkipTest('Tests only for GPU.') if shutil.which('mpirun') is None: raise unittest.SkipTest('Tests only for MPI (mpirun not found).') if importlib.util.find_spec("mpi4py") is None: raise unittest.SkipTest('Test of mpi4py initialize only possible with mpi4py installed.') num_gpus = 4 num_gpus_per_task = 1 args = [ 'mpirun', '--oversubscribe', '--allow-run-as-root', '-n', str(num_gpus), sys.executable, '-c', ('import jax, os; ' 'jax.distributed.initialize(spec_detection_method="mpi4py"); ' 'print(f\'{jax.local_device_count()},{jax.device_count()}\' if jax.process_index() == 0 else \'\', end="")' ) ] env = os.environ.copy() # In case the job was launched via Slurm, # prevent OpenMPI from detecting Slurm environment env.pop('SLURM_JOBID', None) proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) proc = self.enter_context(proc) try: out, _ = proc.communicate() self.assertEqual(proc.returncode, 0) self.assertEqual(out, f'{num_gpus_per_task},{num_gpus}') finally: proc.kill() @unittest.skipIf( os.environ.get("SLURM_JOB_NUM_NODES", None) != "2", "Slurm environment with at least two nodes needed!") @jtu.pytest_mark_if_available('SlurmMultiNodeGpuTest')
MultiProcessGpuTest
python
pandas-dev__pandas
pandas/core/dtypes/dtypes.py
{ "start": 23998, "end": 32383 }
class ____(PandasExtensionDtype): """ An ExtensionDtype for timezone-aware datetime data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- unit : str, default "ns" The precision of the datetime data. Valid options are ``"s"``, ``"ms"``, ``"us"``, ``"ns"``. tz : str, int, or datetime.tzinfo The timezone. Attributes ---------- unit tz Methods ------- None Raises ------ ZoneInfoNotFoundError When the requested timezone cannot be found. See Also -------- numpy.datetime64 : Numpy data type for datetime. datetime.datetime : Python datetime object. Examples -------- >>> from zoneinfo import ZoneInfo >>> pd.DatetimeTZDtype(tz=ZoneInfo("UTC")) datetime64[ns, UTC] >>> pd.DatetimeTZDtype(tz=ZoneInfo("Europe/Paris")) datetime64[ns, Europe/Paris] """ type: type[Timestamp] = Timestamp kind: str_type = "M" num = 101 _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} _supports_2d = True _can_fast_transpose = True @property def na_value(self) -> NaTType: return NaT @cache_readonly def base(self) -> DtypeObj: # type: ignore[override] return np.dtype(f"M8[{self.unit}]") # error: Signature of "str" incompatible with supertype "PandasExtensionDtype" @cache_readonly def str(self) -> str: # type: ignore[override] return f"|M8[{self.unit}]" def __init__(self, unit: TimeUnit | DatetimeTZDtype = "ns", tz=None) -> None: if isinstance(unit, DatetimeTZDtype): # error: "str" has no attribute "tz" unit, tz = unit.unit, unit.tz # type: ignore[union-attr] if unit != "ns": if isinstance(unit, str) and tz is None: # maybe a string like datetime64[ns, tz], which we support for # now. result = type(self).construct_from_string(unit) unit = result.unit tz = result.tz msg = ( f"Passing a dtype alias like 'datetime64[ns, {tz}]' " "to DatetimeTZDtype is no longer supported. Use " "'DatetimeTZDtype.construct_from_string()' instead." ) raise ValueError(msg) if unit not in ["s", "ms", "us", "ns"]: raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units") if tz: tz = timezones.maybe_get_tz(tz) tz = timezones.tz_standardize(tz) elif tz is not None: raise zoneinfo.ZoneInfoNotFoundError(tz) if tz is None: raise TypeError("A 'tz' is required.") self._unit = unit self._tz = tz @cache_readonly def _creso(self) -> int: """ The NPY_DATETIMEUNIT corresponding to this dtype's resolution. """ return abbrev_to_npy_unit(self.unit) @property def unit(self) -> TimeUnit: """ The precision of the datetime data. See Also -------- DatetimeTZDtype.tz : Retrieves the timezone. Examples -------- >>> from zoneinfo import ZoneInfo >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo("America/Los_Angeles")) >>> dtype.unit 'ns' """ return self._unit @property def tz(self) -> tzinfo: """ The timezone. See Also -------- DatetimeTZDtype.unit : Retrieves precision of the datetime data. Examples -------- >>> from zoneinfo import ZoneInfo >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo("America/Los_Angeles")) >>> dtype.tz zoneinfo.ZoneInfo(key='America/Los_Angeles') """ return self._tz def construct_array_type(self) -> type_t[DatetimeArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import DatetimeArray return DatetimeArray @classmethod def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: """ Construct a DatetimeTZDtype from a string. Parameters ---------- string : str The string alias for this DatetimeTZDtype. Should be formatted like ``datetime64[ns, <tz>]``, where ``<tz>`` is the timezone name. Examples -------- >>> DatetimeTZDtype.construct_from_string("datetime64[ns, UTC]") datetime64[ns, UTC] """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" match = cls._match.match(string) if match: d = match.groupdict() try: unit = cast("TimeUnit", d["unit"]) return cls(unit=unit, tz=d["tz"]) except (KeyError, TypeError, ValueError) as err: # KeyError if maybe_get_tz tries and fails to get a # zoneinfo timezone (actually zoneinfo.ZoneInfoNotFoundError). # TypeError if we pass a nonsense tz; # ValueError if we pass a unit other than "ns" raise TypeError(msg) from err raise TypeError(msg) def __str__(self) -> str_type: return f"datetime64[{self.unit}, {self.tz}]" @property def name(self) -> str_type: """A string representation of the dtype.""" return str(self) def __hash__(self) -> int: # make myself hashable # TODO: update this. return hash(str(self)) def __eq__(self, other: object) -> bool: if isinstance(other, str): if other.startswith("M8["): other = f"datetime64[{other[3:]}" return other == self.name return ( isinstance(other, DatetimeTZDtype) and self.unit == other.unit and tz_compare(self.tz, other.tz) ) def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray: """ Construct DatetimeArray from pyarrow Array/ChunkedArray. Note: If the units in the pyarrow Array are the same as this DatetimeDtype, then values corresponding to the integer representation of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`) are converted to ``NaT``, regardless of the null indicator in the pyarrow array. Parameters ---------- array : pyarrow.Array or pyarrow.ChunkedArray The Arrow array to convert to DatetimeArray. Returns ------- extension array : DatetimeArray """ import pyarrow from pandas.core.arrays import DatetimeArray array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True) if isinstance(array, pyarrow.Array): np_arr = array.to_numpy(zero_copy_only=False) else: np_arr = array.to_numpy() return DatetimeArray._simple_new(np_arr, dtype=self) def __setstate__(self, state) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._tz = state["tz"] self._unit = state["unit"] def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: if all(isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes): np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]]) unit = np.datetime_data(np_dtype)[0] unit = cast("TimeUnit", unit) return type(self)(unit=unit, tz=self.tz) return super()._get_common_dtype(dtypes) @cache_readonly def index_class(self) -> type_t[DatetimeIndex]: from pandas import DatetimeIndex return DatetimeIndex @register_extension_dtype @set_module("pandas")
DatetimeTZDtype
python
huggingface__transformers
src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py
{ "start": 154673, "end": 155473 }
class ____(nn.Module): def __init__(self, hidden_size, eps: float = 1e-6) -> None: """ Qwen3OmniMoeCode2WavRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
Qwen3OmniMoeCode2WavRMSNorm
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/util/langhelpers.py
{ "start": 47072, "end": 47543 }
class ____(Generic[_T]): def __init__(self, func: Callable[..., _T]): self.func = func self.clslevel = func def __get__(self, instance: Any, owner: Any) -> _T: if instance is None: clsval = self.clslevel(owner) return clsval else: return self.func(instance) def classlevel(self, func: Callable[..., Any]) -> hybridproperty[_T]: self.clslevel = func return self
hybridproperty
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/single_string_slots.py
{ "start": 10, "end": 98 }
class ____: __slots__ = "bar" def __init__(self, bar): self.bar = bar
Foo
python
django-mptt__django-mptt
tests/myapp/models.py
{ "start": 2007, "end": 2161 }
class ____(MPTTModel): parent = models.ForeignKey( "self", null=True, blank=True, related_name="children", on_delete=models.CASCADE )
Insert
python
huggingface__transformers
src/transformers/models/clip/modeling_clip.py
{ "start": 39389, "end": 41744 }
class ____(CLIPPreTrainedModel): config: CLIPVisionConfig main_input_name = "pixel_values" input_modalities = ("image",) def __init__(self, config: CLIPVisionConfig): super().__init__(config) vision_model = CLIPVisionModel._from_config(config) self.vision_model = vision_model.vision_model self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @check_model_inputs(tie_last_hidden_states=False) @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> CLIPVisionModelOutput: r""" Examples: ```python >>> import torch >>> from transformers import AutoProcessor, CLIPVisionModelWithProjection >>> from transformers.image_utils import load_image >>> model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... outputs = model(**inputs) >>> image_embeds = outputs.image_embeds ```""" vision_outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs, ) pooled_output = vision_outputs.pooler_output image_embeds = self.visual_projection(pooled_output) return CLIPVisionModelOutput( image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, ) @auto_docstring( custom_intro=""" CLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of the patch tokens) e.g. for ImageNet. """ )
CLIPVisionModelWithProjection
python
kamyu104__LeetCode-Solutions
Python/find-all-people-with-secret.py
{ "start": 1087, "end": 2031 }
class ____(object): def findAllPeople(self, n, meetings, firstPerson): """ :type n: int :type meetings: List[List[int]] :type firstPerson: int :rtype: List[int] """ meetings.sort(key=lambda x: x[2]) result = {0, firstPerson} adj = collections.defaultdict(list) for i, (x, y, _) in enumerate(meetings): adj[x].append(y) adj[y].append(x) if i+1 != len(meetings) and meetings[i+1][2] == meetings[i][2]: continue stk = [i for i in adj.iterkeys() if i in result] while stk: u = stk.pop() for v in adj[u]: if v in result: continue result.add(v) stk.append(v) adj = collections.defaultdict(list) return list(result) # Time: O(nlogn) # Space: O(n)
Solution2
python
facelessuser__pymdown-extensions
tests/test_extensions/test_blocks/test_tab.py
{ "start": 93, "end": 1349 }
class ____(util.MdCase): """Test tab slug cases.""" extension = ['pymdownx.blocks.tab', 'toc'] extension_configs = { 'pymdownx.blocks.tab': {'slugify': slugify(case='lower'), 'alternate_style': True} } MD = r""" ### Here is some text /// tab | Here is some text content /// /// tab | Here is some text content /// """ def test_tab_slugs(self): """Test tab slugs.""" self.check_markdown( self.MD, ''' <h3 id="here-is-some-text">Here is some text</h3> <div class="tabbed-set tabbed-alternate" data-tabs="1:2"><input checked="checked" id="here-is-some-text_1" name="__tabbed_1" type="radio" /><input id="here-is-some-text_2" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="here-is-some-text_1">Here is some text</label><label for="here-is-some-text_2">Here is some text</label></div> <div class="tabbed-content"> <div class="tabbed-block"> <p>content</p> </div> <div class="tabbed-block"> <p>content</p> </div> </div> </div> ''', # noqa: E501 True )
TestTabSlugs
python
pytorch__pytorch
torch/nn/modules/transformer.py
{ "start": 1434, "end": 13176 }
class ____(Module): r"""A basic transformer layer. This Transformer layer implements the original Transformer architecture described in the `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_ paper. The intent of this layer is as a reference implementation for foundational understanding and thus it contains only limited features relative to newer Transformer architectures. Given the fast pace of innovation in transformer-like architectures, we recommend exploring this `tutorial <https://pytorch.org/tutorials/intermediate/transformer_building_blocks.html>`_ to build an efficient transformer layer from building blocks in core or using higher level libraries from the `PyTorch Ecosystem <https://landscape.pytorch.org/>`_. Args: d_model: the number of expected features in the encoder/decoder inputs (default=512). nhead: the number of heads in the multiheadattention models (default=8). num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6). num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of encoder/decoder intermediate layer, can be a string ("relu" or "gelu") or a unary callable. Default: relu custom_encoder: custom encoder (default=None). custom_decoder: custom decoder (default=None). layer_norm_eps: the eps value in layer normalization components (default=1e-5). batch_first: If ``True``, then the input and output tensors are provided as (batch, seq, feature). Default: ``False`` (seq, batch, feature). norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before other attention and feedforward operations, otherwise after. Default: ``False`` (after). bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive bias. Default: ``True``. Examples: >>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12) >>> src = torch.rand((10, 32, 512)) >>> tgt = torch.rand((20, 32, 512)) >>> out = transformer_model(src, tgt) Note: A full example to apply nn.Transformer module for the word language model is available in https://github.com/pytorch/examples/tree/master/word_language_model """ def __init__( self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6, num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1, activation: str | Callable[[Tensor], Tensor] = F.relu, custom_encoder: Optional[Any] = None, custom_decoder: Optional[Any] = None, layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}") if custom_encoder is not None: self.encoder = custom_encoder else: encoder_layer = TransformerEncoderLayer( d_model, nhead, dim_feedforward, dropout, activation, layer_norm_eps, batch_first, norm_first, bias, **factory_kwargs, ) encoder_norm = LayerNorm( d_model, eps=layer_norm_eps, bias=bias, # pyrefly: ignore [bad-argument-type] **factory_kwargs, ) self.encoder = TransformerEncoder( encoder_layer, num_encoder_layers, encoder_norm ) if custom_decoder is not None: self.decoder = custom_decoder else: decoder_layer = TransformerDecoderLayer( d_model, nhead, dim_feedforward, dropout, activation, layer_norm_eps, batch_first, norm_first, bias, **factory_kwargs, ) decoder_norm = LayerNorm( d_model, eps=layer_norm_eps, bias=bias, # pyrefly: ignore [bad-argument-type] **factory_kwargs, ) self.decoder = TransformerDecoder( decoder_layer, num_decoder_layers, decoder_norm ) self._reset_parameters() self.d_model = d_model self.nhead = nhead self.batch_first = batch_first def forward( self, src: Tensor, tgt: Tensor, src_mask: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, src_is_causal: Optional[bool] = None, tgt_is_causal: Optional[bool] = None, memory_is_causal: bool = False, ) -> Tensor: r"""Take in and process masked source/target sequences. .. note:: If a boolean tensor is provided for any of the [src/tgt/memory]_mask arguments, positions with a ``True`` value are not allowed to participate in the attention, which is the opposite of the definition for :attr:`attn_mask` in :func:`torch.nn.functional.scaled_dot_product_attention`. Args: src: the sequence to the encoder (required). tgt: the sequence to the decoder (required). src_mask: the additive mask for the src sequence (optional). tgt_mask: the additive mask for the tgt sequence (optional). memory_mask: the additive mask for the encoder output (optional). src_key_padding_mask: the Tensor mask for src keys per batch (optional). tgt_key_padding_mask: the Tensor mask for tgt keys per batch (optional). memory_key_padding_mask: the Tensor mask for memory keys per batch (optional). src_is_causal: If specified, applies a causal mask as ``src_mask``. Default: ``None``; try to detect a causal mask. Warning: ``src_is_causal`` provides a hint that ``src_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. tgt_is_causal: If specified, applies a causal mask as ``tgt_mask``. Default: ``None``; try to detect a causal mask. Warning: ``tgt_is_causal`` provides a hint that ``tgt_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. memory_is_causal: If specified, applies a causal mask as ``memory_mask``. Default: ``False``. Warning: ``memory_is_causal`` provides a hint that ``memory_mask`` is the causal mask. Providing incorrect hints can result in incorrect execution, including forward and backward compatibility. Shape: - src: :math:`(S, E)` for unbatched input, :math:`(S, N, E)` if `batch_first=False` or `(N, S, E)` if `batch_first=True`. - tgt: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or `(N, T, E)` if `batch_first=True`. - src_mask: :math:`(S, S)` or :math:`(N\cdot\text{num\_heads}, S, S)`. - tgt_mask: :math:`(T, T)` or :math:`(N\cdot\text{num\_heads}, T, T)`. - memory_mask: :math:`(T, S)`. - src_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`. - tgt_key_padding_mask: :math:`(T)` for unbatched input otherwise :math:`(N, T)`. - memory_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`. Note: [src/tgt/memory]_mask ensures that position :math:`i` is allowed to attend the unmasked positions. If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor is provided, it will be added to the attention weight. [src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by the attention. If a BoolTensor is provided, the positions with the value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. - output: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or `(N, T, E)` if `batch_first=True`. Note: Due to the multi-head attention architecture in the transformer model, the output sequence length of a transformer is same as the input sequence (i.e. target) length of the decoder. where :math:`S` is the source sequence length, :math:`T` is the target sequence length, :math:`N` is the batch size, :math:`E` is the feature number Examples: >>> # xdoctest: +SKIP >>> output = transformer_model( ... src, tgt, src_mask=src_mask, tgt_mask=tgt_mask ... ) """ is_batched = src.dim() == 3 if not self.batch_first and src.size(1) != tgt.size(1) and is_batched: raise RuntimeError("the batch number of src and tgt must be equal") elif self.batch_first and src.size(0) != tgt.size(0) and is_batched: raise RuntimeError("the batch number of src and tgt must be equal") if src.size(-1) != self.d_model or tgt.size(-1) != self.d_model: raise RuntimeError( "the feature number of src and tgt must be equal to d_model" ) memory = self.encoder( src, mask=src_mask, src_key_padding_mask=src_key_padding_mask, is_causal=src_is_causal, ) output = self.decoder( tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, tgt_is_causal=tgt_is_causal, memory_is_causal=memory_is_causal, ) return output @staticmethod def generate_square_subsequent_mask( sz: int, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> Tensor: r"""Generate a square causal mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). """ return _generate_square_subsequent_mask(sz, dtype=dtype, device=device) def _reset_parameters(self) -> None: r"""Initiate parameters in the transformer model.""" for p in self.parameters(): if p.dim() > 1: xavier_uniform_(p)
Transformer
python
openai__openai-python
src/openai/types/beta/realtime/session_update_event_param.py
{ "start": 10428, "end": 10749 }
class ____(TypedDict, total=False): session: Required[Session] """Realtime session object configuration.""" type: Required[Literal["session.update"]] """The event type, must be `session.update`.""" event_id: str """Optional client-generated ID used to identify this event."""
SessionUpdateEventParam
python
apache__airflow
airflow-core/src/airflow/models/dagrun.py
{ "start": 4658, "end": 5297 }
class ____(NamedTuple): """Type of return for DagRun.task_instance_scheduling_decisions.""" tis: list[TI] schedulable_tis: list[TI] changed_tis: bool unfinished_tis: list[TI] finished_tis: list[TI] def _default_run_after(ctx): params = ctx.get_current_parameters() return params["data_interval_end"] or params["logical_date"] or timezone.utcnow() def _creator_note(val): """Creator for the ``note`` association proxy.""" if isinstance(val, str): return DagRunNote(content=val) if isinstance(val, dict): return DagRunNote(**val) return DagRunNote(*val)
TISchedulingDecision
python
walkccc__LeetCode
solutions/605. Can Place Flowers/605.py
{ "start": 0, "end": 366 }
class ____: def canPlaceFlowers(self, flowerbed: list[int], n: int) -> bool: for i, flower in enumerate(flowerbed): if flower == 0 and ( i == 0 or flowerbed[i - 1] == 0) and ( i == len(flowerbed) - 1 or flowerbed[i + 1] == 0): flowerbed[i] = 1 n -= 1 if n <= 0: return True return False
Solution
python
gevent__gevent
src/greentest/3.13/test_socket.py
{ "start": 160361, "end": 180144 }
class ____(SendrecvmsgServerTimeoutBase): # Test sendmsg() and recvmsg[_into]() using the ancillary data # features of the RFC 3542 Advanced Sockets API for IPv6. # Currently we can only handle certain data items (e.g. traffic # class, hop limit, MTU discovery and fragmentation settings) # without resorting to unportable means such as the struct module, # but the tests here are aimed at testing the ancillary data # handling in sendmsg() and recvmsg() rather than the IPv6 API # itself. # Test value to use when setting hop limit of packet hop_limit = 2 # Test value to use when setting traffic class of packet. # -1 means "use kernel default". traffic_class = -1 def ancillaryMapping(self, ancdata): # Given ancillary data list ancdata, return a mapping from # pairs (cmsg_level, cmsg_type) to corresponding cmsg_data. # Check that no (level, type) pair appears more than once. d = {} for cmsg_level, cmsg_type, cmsg_data in ancdata: self.assertNotIn((cmsg_level, cmsg_type), d) d[(cmsg_level, cmsg_type)] = cmsg_data return d def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0): # Receive hop limit into ancbufsize bytes of ancillary data # space. Check that data is MSG, ancillary data is not # truncated (but ignore any flags in ignoreflags), and hop # limit is between 0 and maxhop inclusive. self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.misc_event.set() msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), ancbufsize) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC, ignore=ignoreflags) self.assertEqual(len(ancdata), 1) self.assertIsInstance(ancdata[0], tuple) cmsg_level, cmsg_type, cmsg_data = ancdata[0] self.assertEqual(cmsg_level, socket.IPPROTO_IPV6) self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT) self.assertIsInstance(cmsg_data, bytes) self.assertEqual(len(cmsg_data), SIZEOF_INT) a = array.array("i") a.frombytes(cmsg_data) self.assertGreaterEqual(a[0], 0) self.assertLessEqual(a[0], maxhop) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testRecvHopLimit(self): # Test receiving the packet hop limit as ancillary data. self.checkHopLimit(ancbufsize=10240) @testRecvHopLimit.client_skip def _testRecvHopLimit(self): # Need to wait until server has asked to receive ancillary # data, as implementations are not required to buffer it # otherwise. self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testRecvHopLimitCMSG_SPACE(self): # Test receiving hop limit, using CMSG_SPACE to calculate buffer size. self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT)) @testRecvHopLimitCMSG_SPACE.client_skip def _testRecvHopLimitCMSG_SPACE(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) # Could test receiving into buffer sized using CMSG_LEN, but RFC # 3542 says portable applications must provide space for trailing # padding. Implementations may set MSG_CTRUNC if there isn't # enough space for the padding. @requireAttrs(socket.socket, "sendmsg") @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSetHopLimit(self): # Test setting hop limit on outgoing packet and receiving it # at the other end. self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit) @testSetHopLimit.client_skip def _testSetHopLimit(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.assertEqual( self.sendmsgToServer([MSG], [(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT, array.array("i", [self.hop_limit]))]), len(MSG)) def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0): # Receive traffic class and hop limit into ancbufsize bytes of # ancillary data space. Check that data is MSG, ancillary # data is not truncated (but ignore any flags in ignoreflags), # and traffic class and hop limit are in range (hop limit no # more than maxhop). self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVTCLASS, 1) self.misc_event.set() msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), ancbufsize) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC, ignore=ignoreflags) self.assertEqual(len(ancdata), 2) ancmap = self.ancillaryMapping(ancdata) tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)] self.assertEqual(len(tcdata), SIZEOF_INT) a = array.array("i") a.frombytes(tcdata) self.assertGreaterEqual(a[0], 0) self.assertLessEqual(a[0], 255) hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)] self.assertEqual(len(hldata), SIZEOF_INT) a = array.array("i") a.frombytes(hldata) self.assertGreaterEqual(a[0], 0) self.assertLessEqual(a[0], maxhop) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testRecvTrafficClassAndHopLimit(self): # Test receiving traffic class and hop limit as ancillary data. self.checkTrafficClassAndHopLimit(ancbufsize=10240) @testRecvTrafficClassAndHopLimit.client_skip def _testRecvTrafficClassAndHopLimit(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testRecvTrafficClassAndHopLimitCMSG_SPACE(self): # Test receiving traffic class and hop limit, using # CMSG_SPACE() to calculate buffer size. self.checkTrafficClassAndHopLimit( ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2) @testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket.socket, "sendmsg") @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSetTrafficClassAndHopLimit(self): # Test setting traffic class and hop limit on outgoing packet, # and receiving them at the other end. self.checkTrafficClassAndHopLimit(ancbufsize=10240, maxhop=self.hop_limit) @testSetTrafficClassAndHopLimit.client_skip def _testSetTrafficClassAndHopLimit(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.assertEqual( self.sendmsgToServer([MSG], [(socket.IPPROTO_IPV6, socket.IPV6_TCLASS, array.array("i", [self.traffic_class])), (socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT, array.array("i", [self.hop_limit]))]), len(MSG)) @requireAttrs(socket.socket, "sendmsg") @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testOddCmsgSize(self): # Try to send ancillary data with first item one byte too # long. Fall back to sending with correct size if this fails, # and check that second item was handled correctly. self.checkTrafficClassAndHopLimit(ancbufsize=10240, maxhop=self.hop_limit) @testOddCmsgSize.client_skip def _testOddCmsgSize(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) try: nbytes = self.sendmsgToServer( [MSG], [(socket.IPPROTO_IPV6, socket.IPV6_TCLASS, array.array("i", [self.traffic_class]).tobytes() + b"\x00"), (socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT, array.array("i", [self.hop_limit]))]) except OSError as e: self.assertIsInstance(e.errno, int) nbytes = self.sendmsgToServer( [MSG], [(socket.IPPROTO_IPV6, socket.IPV6_TCLASS, array.array("i", [self.traffic_class])), (socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT, array.array("i", [self.hop_limit]))]) self.assertEqual(nbytes, len(MSG)) # Tests for proper handling of truncated ancillary data def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0): # Receive hop limit into ancbufsize bytes of ancillary data # space, which should be too small to contain the ancillary # data header (if ancbufsize is None, pass no second argument # to recvmsg()). Check that data is MSG, MSG_CTRUNC is set # (unless included in ignoreflags), and no ancillary data is # returned. self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.misc_event.set() args = () if ancbufsize is None else (ancbufsize,) msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), *args) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC, ignore=ignoreflags) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testCmsgTruncNoBufSize(self): # Check that no ancillary data is received when no ancillary # buffer size is provided. self.checkHopLimitTruncatedHeader(ancbufsize=None, # BSD seems to set # MSG_CTRUNC only if an item # has been partially # received. ignoreflags=socket.MSG_CTRUNC) @testCmsgTruncNoBufSize.client_skip def _testCmsgTruncNoBufSize(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSingleCmsgTrunc0(self): # Check that no ancillary data is received when ancillary # buffer size is zero. self.checkHopLimitTruncatedHeader(ancbufsize=0, ignoreflags=socket.MSG_CTRUNC) @testSingleCmsgTrunc0.client_skip def _testSingleCmsgTrunc0(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) # Check that no ancillary data is returned for various non-zero # (but still too small) buffer sizes. @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSingleCmsgTrunc1(self): self.checkHopLimitTruncatedHeader(ancbufsize=1) @testSingleCmsgTrunc1.client_skip def _testSingleCmsgTrunc1(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSingleCmsgTrunc2Int(self): self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT) @testSingleCmsgTrunc2Int.client_skip def _testSingleCmsgTrunc2Int(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSingleCmsgTruncLen0Minus1(self): self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1) @testSingleCmsgTruncLen0Minus1.client_skip def _testSingleCmsgTruncLen0Minus1(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSingleCmsgTruncInData(self): # Test truncation of a control message inside its associated # data. The message may be returned with its data truncated, # or not returned at all. self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.misc_event.set() with downgrade_malformed_data_warning(): # TODO: gh-110012 msg, ancdata, flags, addr = self.doRecvmsg( self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC) self.assertLessEqual(len(ancdata), 1) if ancdata: cmsg_level, cmsg_type, cmsg_data = ancdata[0] self.assertEqual(cmsg_level, socket.IPPROTO_IPV6) self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT) self.assertLess(len(cmsg_data), SIZEOF_INT) @testSingleCmsgTruncInData.client_skip def _testSingleCmsgTruncInData(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0): # Receive traffic class and hop limit into ancbufsize bytes of # ancillary data space, which should be large enough to # contain the first item, but too small to contain the header # of the second. Check that data is MSG, MSG_CTRUNC is set # (unless included in ignoreflags), and only one ancillary # data item is returned. self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVTCLASS, 1) self.misc_event.set() msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), ancbufsize) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC, ignore=ignoreflags) self.assertEqual(len(ancdata), 1) cmsg_level, cmsg_type, cmsg_data = ancdata[0] self.assertEqual(cmsg_level, socket.IPPROTO_IPV6) self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}) self.assertEqual(len(cmsg_data), SIZEOF_INT) a = array.array("i") a.frombytes(cmsg_data) self.assertGreaterEqual(a[0], 0) self.assertLessEqual(a[0], 255) # Try the above test with various buffer sizes. @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSecondCmsgTrunc0(self): self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT), ignoreflags=socket.MSG_CTRUNC) @testSecondCmsgTrunc0.client_skip def _testSecondCmsgTrunc0(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSecondCmsgTrunc1(self): self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1) @testSecondCmsgTrunc1.client_skip def _testSecondCmsgTrunc1(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSecondCmsgTrunc2Int(self): self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 2 * SIZEOF_INT) @testSecondCmsgTrunc2Int.client_skip def _testSecondCmsgTrunc2Int(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSecondCmsgTruncLen0Minus1(self): self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(0) - 1) @testSecondCmsgTruncLen0Minus1.client_skip def _testSecondCmsgTruncLen0Minus1(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSecondCmsgTruncInData(self): # Test truncation of the second of two control messages inside # its associated data. self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVTCLASS, 1) self.misc_event.set() with downgrade_malformed_data_warning(): # TODO: gh-110012 msg, ancdata, flags, addr = self.doRecvmsg( self.serv_sock, len(MSG), socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC) cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT} cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0) self.assertEqual(cmsg_level, socket.IPPROTO_IPV6) cmsg_types.remove(cmsg_type) self.assertEqual(len(cmsg_data), SIZEOF_INT) a = array.array("i") a.frombytes(cmsg_data) self.assertGreaterEqual(a[0], 0) self.assertLessEqual(a[0], 255) if ancdata: cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0) self.assertEqual(cmsg_level, socket.IPPROTO_IPV6) cmsg_types.remove(cmsg_type) self.assertLess(len(cmsg_data), SIZEOF_INT) self.assertEqual(ancdata, []) @testSecondCmsgTruncInData.client_skip def _testSecondCmsgTruncInData(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) # Derive concrete test classes for different socket types.
RFC3542AncillaryTest
python
walkccc__LeetCode
solutions/3237. Alt and Tab Simulation/3237.py
{ "start": 0, "end": 392 }
class ____: def simulationResult( self, windows: list[int], queries: list[int], ) -> list[int]: ans = [] seen = set() for query in reversed(queries): if query not in seen: ans.append(query) seen.add(query) for window in windows: if window not in seen: ans.append(window) seen.add(window) return ans
Solution
python
pytorch__pytorch
test/torch_np/numpy_tests/lib/test_function_base.py
{ "start": 32418, "end": 41380 }
class ____(TestCase): def test_basic(self): v = [[1, 1], [3, 4]] x = np.array(v) dx = [np.array([[2.0, 3.0], [2.0, 3.0]]), np.array([[0.0, 0.0], [1.0, 1.0]])] assert_array_equal(gradient(x), dx) assert_array_equal(gradient(v), dx) def test_args(self): dx = np.cumsum(np.ones(5)) dx_uneven = [1.0, 2.0, 5.0, 9.0, 11.0] f_2d = np.arange(25).reshape(5, 5) # distances must be scalars or have size equal to gradient[axis] gradient(np.arange(5), 3.0) gradient(np.arange(5), np.array(3.0)) gradient(np.arange(5), dx) # dy is set equal to dx because scalar gradient(f_2d, 1.5) gradient(f_2d, np.array(1.5)) gradient(f_2d, dx_uneven, dx_uneven) # mix between even and uneven spaces and # mix between scalar and vector gradient(f_2d, dx, 2) # 2D but axis specified gradient(f_2d, dx, axis=1) # 2d coordinate arguments are not yet allowed assert_raises_regex( ValueError, ".*scalars or 1d", gradient, f_2d, np.stack([dx] * 2, axis=-1), 1, ) def test_badargs(self): f_2d = np.arange(25).reshape(5, 5) x = np.cumsum(np.ones(5)) # wrong sizes assert_raises(ValueError, gradient, f_2d, x, np.ones(2)) assert_raises(ValueError, gradient, f_2d, 1, np.ones(2)) assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2)) # wrong number of arguments assert_raises(TypeError, gradient, f_2d, x) assert_raises(TypeError, gradient, f_2d, x, axis=(0, 1)) assert_raises(TypeError, gradient, f_2d, x, x, x) assert_raises(TypeError, gradient, f_2d, 1, 1, 1) assert_raises(TypeError, gradient, f_2d, x, x, axis=1) assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1) @torch._dynamo.config.patch(use_numpy_random_stream=True) def test_second_order_accurate(self): # Testing that the relative numerical error is less that 3% for # this example problem. This corresponds to second order # accurate finite differences for all interior and boundary # points. x = np.linspace(0, 1, 10) dx = x[1] - x[0] y = 2 * x**3 + 4 * x**2 + 2 * x analytical = 6 * x**2 + 8 * x + 2 num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1) assert_(np.all(num_error < 0.03).item() is True) # test with unevenly spaced np.random.seed(0) x = np.sort(np.random.random(10)) y = 2 * x**3 + 4 * x**2 + 2 * x analytical = 6 * x**2 + 8 * x + 2 num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1) assert_(np.all(num_error < 0.03).item() is True) def test_spacing(self): f = np.array([0, 2.0, 3.0, 4.0, 5.0, 5.0]) f = np.tile(f, (6, 1)) + f.reshape(-1, 1) x_uneven = np.array([0.0, 0.5, 1.0, 3.0, 5.0, 7.0]) x_even = np.arange(6.0) fdx_even_ord1 = np.tile([2.0, 1.5, 1.0, 1.0, 0.5, 0.0], (6, 1)) fdx_even_ord2 = np.tile([2.5, 1.5, 1.0, 1.0, 0.5, -0.5], (6, 1)) fdx_uneven_ord1 = np.tile([4.0, 3.0, 1.7, 0.5, 0.25, 0.0], (6, 1)) fdx_uneven_ord2 = np.tile([5.0, 3.0, 1.7, 0.5, 0.25, -0.25], (6, 1)) # evenly spaced for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]: res1 = gradient(f, 1.0, axis=(0, 1), edge_order=edge_order) res2 = gradient(f, x_even, x_even, axis=(0, 1), edge_order=edge_order) res3 = gradient(f, x_even, x_even, axis=None, edge_order=edge_order) assert_array_equal(res1, res2) assert_array_equal(res2, res3) assert_almost_equal(res1[0], exp_res.T) assert_almost_equal(res1[1], exp_res) res1 = gradient(f, 1.0, axis=0, edge_order=edge_order) res2 = gradient(f, x_even, axis=0, edge_order=edge_order) assert_(res1.shape == res2.shape) assert_almost_equal(res2, exp_res.T) res1 = gradient(f, 1.0, axis=1, edge_order=edge_order) res2 = gradient(f, x_even, axis=1, edge_order=edge_order) assert_(res1.shape == res2.shape) assert_array_equal(res2, exp_res) # unevenly spaced for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]: res1 = gradient(f, x_uneven, x_uneven, axis=(0, 1), edge_order=edge_order) res2 = gradient(f, x_uneven, x_uneven, axis=None, edge_order=edge_order) assert_array_equal(res1, res2) assert_almost_equal(res1[0], exp_res.T) assert_almost_equal(res1[1], exp_res) res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order) assert_almost_equal(res1, exp_res.T) res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order) assert_almost_equal(res1, exp_res) # mixed res1 = gradient(f, x_even, x_uneven, axis=(0, 1), edge_order=1) res2 = gradient(f, x_uneven, x_even, axis=(1, 0), edge_order=1) assert_array_equal(res1[0], res2[1]) assert_array_equal(res1[1], res2[0]) assert_almost_equal(res1[0], fdx_even_ord1.T) assert_almost_equal(res1[1], fdx_uneven_ord1) res1 = gradient(f, x_even, x_uneven, axis=(0, 1), edge_order=2) res2 = gradient(f, x_uneven, x_even, axis=(1, 0), edge_order=2) assert_array_equal(res1[0], res2[1]) assert_array_equal(res1[1], res2[0]) assert_almost_equal(res1[0], fdx_even_ord2.T) assert_almost_equal(res1[1], fdx_uneven_ord2) def test_specific_axes(self): # Testing that gradient can work on a given axis only v = [[1, 1], [3, 4]] x = np.array(v) dx = [np.array([[2.0, 3.0], [2.0, 3.0]]), np.array([[0.0, 0.0], [1.0, 1.0]])] assert_array_equal(gradient(x, axis=0), dx[0]) assert_array_equal(gradient(x, axis=1), dx[1]) assert_array_equal(gradient(x, axis=-1), dx[1]) assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]]) # test axis=None which means all axes assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]]) # and is the same as no axis keyword given assert_almost_equal(gradient(x, axis=None), gradient(x)) # test vararg order assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1] / 2.0, dx[0] / 3.0]) # test maximal number of varargs assert_raises(TypeError, gradient, x, 1, 2, axis=1) assert_raises(np.AxisError, gradient, x, axis=3) assert_raises(np.AxisError, gradient, x, axis=-3) # assert_raises(TypeError, gradient, x, axis=[1,]) def test_inexact_dtypes(self): for dt in [np.float16, np.float32, np.float64]: # dtypes should not be promoted in a different way to what diff does x = np.array([1, 2, 3], dtype=dt) assert_equal(gradient(x).dtype, np.diff(x).dtype) def test_values(self): # needs at least 2 points for edge_order ==1 gradient(np.arange(2), edge_order=1) # needs at least 3 points for edge_order ==1 gradient(np.arange(3), edge_order=2) assert_raises(ValueError, gradient, np.arange(0), edge_order=1) assert_raises(ValueError, gradient, np.arange(0), edge_order=2) assert_raises(ValueError, gradient, np.arange(1), edge_order=1) assert_raises(ValueError, gradient, np.arange(1), edge_order=2) assert_raises(ValueError, gradient, np.arange(2), edge_order=2) @parametrize( "f_dtype", [ np.uint8, ], ) def test_f_decreasing_unsigned_int(self, f_dtype): f = np.array([5, 4, 3, 2, 1], dtype=f_dtype) g = gradient(f) assert_array_equal(g, [-1] * len(f)) @parametrize("f_dtype", [np.int8, np.int16, np.int32, np.int64]) def test_f_signed_int_big_jump(self, f_dtype): maxint = np.iinfo(f_dtype).max x = np.array([1, 3]) f = np.array([-1, maxint], dtype=f_dtype) dfdx = gradient(f, x) assert_array_equal(dfdx, [(maxint + 1) // 2] * 2) @parametrize( "x_dtype", [ np.uint8, ], ) def test_x_decreasing_unsigned(self, x_dtype): x = np.array([3, 2, 1], dtype=x_dtype) f = np.array([0, 2, 4]) dfdx = gradient(f, x) assert_array_equal(dfdx, [-2] * len(x)) @parametrize("x_dtype", [np.int8, np.int16, np.int32, np.int64]) def test_x_signed_int_big_jump(self, x_dtype): minint = np.iinfo(x_dtype).min maxint = np.iinfo(x_dtype).max x = np.array([-1, maxint], dtype=x_dtype) f = np.array([minint // 2, 0]) dfdx = gradient(f, x) assert_array_equal(dfdx, [0.5, 0.5])
TestGradient
python
pytorch__pytorch
benchmarks/operator_benchmark/pt/qembedding_pack_test.py
{ "start": 1629, "end": 2050 }
class ____(op_bench.TorchBenchmarkBase): def init(self, num_embeddings, embedding_dim, batch_size, op_func): self.inputs = { "weight": torch.rand( batch_size, num_embeddings, embedding_dim, dtype=torch.float ) + 1 } self.op_func = op_func def forward(self, weight): return self.op_func(weight)
EmbeddingBagThreeDimFloatToFusedBase
python
astropy__astropy
astropy/modeling/parameters.py
{ "start": 695, "end": 821 }
class ____(ValueError, ParameterError): """Used for incorrect input parameter values and definitions."""
InputParameterError
python
pyca__cryptography
src/cryptography/hazmat/primitives/asymmetric/ec.py
{ "start": 7720, "end": 7902 }
class ____(EllipticCurve): name = "sect283k1" key_size = 281 group_order = 0x1FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE9AE2ED07577265DFF7F94451E061E163C61 # noqa: E501
SECT283K1
python
huggingface__transformers
tests/models/afmoe/test_modeling_afmoe.py
{ "start": 3298, "end": 4328 }
class ____(CausalLMModelTest, unittest.TestCase): model_tester_class = AfmoeModelTester all_model_classes = (AfmoeModel, AfmoeForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": AfmoeModel, "text-generation": AfmoeForCausalLM} if is_torch_available() else {} ) @unittest.skip("Afmoe applies key/query norm which doesn't work with packing") def test_eager_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("Afmoe applies key/query norm which doesn't work with packing") def test_sdpa_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("Afmoe applies key/query norm which doesn't work with packing") def test_model_rope_scaling_frequencies(self): pass @unittest.skip("Afmoe has moe, output can be different") def test_model_outputs_equivalence(self, **kwargs): pass # TODO: Add integration tests once we have a checkpoint on the Hub
AfmoeModelTest
python
pennersr__django-allauth
allauth/account/admin.py
{ "start": 266, "end": 1648 }
class ____(admin.ModelAdmin): list_display = ("email", "user", "primary", "verified") list_filter = ("primary", "verified") search_fields = [] raw_id_fields = ("user",) actions = ["make_verified"] def get_search_fields(self, request): base_fields = get_adapter().get_user_search_fields() return ["email"] + list(map(lambda a: "user__" + a, base_fields)) def make_verified(self, request, queryset): for email_address in queryset.filter(verified=False).iterator(): if email_address.set_verified(): signals.email_confirmed.send( sender=EmailAddress, request=request, email_address=email_address, ) self.message_user( request, _("Marked {email} as verified.").format(email=email_address.email), level=messages.SUCCESS, ) else: self.message_user( request, _("Failed to mark {email} as verified.").format( email=email_address.email ), level=messages.ERROR, ) make_verified.short_description = _("Mark selected email addresses as verified") # type: ignore[attr-defined]
EmailAddressAdmin
python
huggingface__transformers
src/transformers/models/bros/modeling_bros.py
{ "start": 3187, "end": 3808 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dim_bbox = config.dim_bbox self.x_pos_emb = BrosPositionalEmbedding1D(config) self.y_pos_emb = BrosPositionalEmbedding1D(config) def forward(self, bbox: torch.Tensor) -> torch.Tensor: stack = [] for i in range(self.dim_bbox): if i % 2 == 0: stack.append(self.x_pos_emb(bbox[..., i])) else: stack.append(self.y_pos_emb(bbox[..., i])) bbox_pos_emb = torch.cat(stack, dim=-1) return bbox_pos_emb
BrosPositionalEmbedding2D
python
optuna__optuna
tests/artifacts_tests/test_gcs.py
{ "start": 928, "end": 3577 }
class ____: def __init__(self, blob_name: str) -> None: self.blob_name = blob_name def download_as_bytes(self) -> bytes: return _MOCK_BUCKET_CONTENT[self.blob_name] def upload_from_string(self, data: bytes) -> None: _MOCK_BUCKET_CONTENT[self.blob_name] = data @contextlib.contextmanager def init_mock_client() -> Iterator[None]: # In case we fail to patch `google.cloud.storage.Client`, we deliberately set an invalid # credential path so that we do not accidentally access GCS. # Note that this is not a perfect measure; it can become ineffective in future when the # mechanism for finding the default credential is changed in the Cloud Storage API. os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/dev/null" with patch("google.cloud.storage.Client") as MockClient: instance = MockClient.return_value def bucket(name: str) -> MockBucket: assert name == "mock-bucket" return MockBucket() instance.bucket.side_effect = bucket yield @pytest.mark.parametrize("explicit_client", [False, True]) def test_upload_download(explicit_client: bool) -> None: with init_mock_client(): bucket_name = "mock-bucket" if explicit_client: backend = GCSArtifactStore(bucket_name, google.cloud.storage.Client()) else: backend = GCSArtifactStore(bucket_name) artifact_id = "dummy-uuid" dummy_content = b"Hello World" buf = io.BytesIO(dummy_content) backend.write(artifact_id, buf) client = google.cloud.storage.Client() assert len(list(client.bucket(bucket_name).list_blobs())) == 1 blob = client.bucket(bucket_name).blob(artifact_id) assert blob.download_as_bytes() == dummy_content with backend.open_reader(artifact_id) as f: actual = f.read() assert actual == dummy_content def test_remove() -> None: with init_mock_client(): bucket_name = "mock-bucket" backend = GCSArtifactStore(bucket_name) client = google.cloud.storage.Client() artifact_id = "dummy-uuid" backend.write(artifact_id, io.BytesIO(b"Hello")) assert len(list(client.bucket(bucket_name).list_blobs())) == 1 backend.remove(artifact_id) assert len(list(client.bucket(bucket_name).list_blobs())) == 0 def test_file_not_found_exception() -> None: with init_mock_client(): bucket_name = "mock-bucket" backend = GCSArtifactStore(bucket_name) with pytest.raises(ArtifactNotFound): backend.open_reader("not-found-id")
MockBlob
python
python-poetry__poetry
src/poetry/installation/installer.py
{ "start": 1106, "end": 12957 }
class ____: def __init__( self, io: IO, env: Env, package: ProjectPackage, locker: Locker, pool: RepositoryPool, config: Config, installed: InstalledRepository | None = None, executor: Executor | None = None, disable_cache: bool = False, ) -> None: self._io = io self._env = env self._package = package self._locker = locker self._pool = pool self._config = config self._dry_run = False self._requires_synchronization = False self._update = False self._verbose = False self._groups: Iterable[NormalizedName] | None = None self._skip_directory = False self._lock = False self._whitelist: list[NormalizedName] = [] self._extras: list[NormalizedName] = [] if executor is None: executor = Executor( self._env, self._pool, config, self._io, disable_cache=disable_cache ) self._executor = executor if installed is None: installed = self._get_installed() self._installed_repository = installed @property def executor(self) -> Executor: return self._executor def set_package(self, package: ProjectPackage) -> Installer: self._package = package return self def set_locker(self, locker: Locker) -> Installer: self._locker = locker return self def run(self) -> int: # Check if refresh if not self._update and self._lock and self._locker.is_locked(): return self._do_refresh() # Force update if there is no lock file present if not self._update and not self._locker.is_locked(): self._update = True if self.is_dry_run(): self.verbose(True) return self._do_install() def dry_run(self, dry_run: bool = True) -> Installer: self._dry_run = dry_run self._executor.dry_run(dry_run) return self def is_dry_run(self) -> bool: return self._dry_run def requires_synchronization( self, requires_synchronization: bool = True ) -> Installer: self._requires_synchronization = requires_synchronization return self def verbose(self, verbose: bool = True) -> Installer: self._verbose = verbose self._executor.verbose(verbose) return self def is_verbose(self) -> bool: return self._verbose def only_groups(self, groups: Iterable[NormalizedName]) -> Installer: self._groups = groups return self def update(self, update: bool = True) -> Installer: self._update = update return self def skip_directory(self, skip_directory: bool = False) -> Installer: self._skip_directory = skip_directory return self def lock(self, update: bool = True) -> Installer: """ Prepare the installer for locking only. """ self.update(update=update) self.execute_operations(False) self._lock = True return self def is_updating(self) -> bool: return self._update def execute_operations(self, execute: bool = True) -> Installer: if not execute: self._executor.disable() return self def whitelist(self, packages: Iterable[str]) -> Installer: self._whitelist = [canonicalize_name(p) for p in packages] return self def extras(self, extras: list[str]) -> Installer: self._extras = [canonicalize_name(extra) for extra in extras] return self def _do_refresh(self) -> int: from poetry.puzzle.solver import Solver # Checking extras for extra in self._extras: if extra not in self._package.extras: raise ValueError(f"Extra [{extra}] is not specified.") locked_repository = self._locker.locked_repository() solver = Solver( self._package, self._pool, locked_repository.packages, locked_repository.packages, self._io, ) # Always re-solve directory dependencies, otherwise we can't determine # if anything has changed (and the lock file contains an invalid version). use_latest = [ p.name for p in locked_repository.packages if p.source_type == "directory" ] with solver.provider.use_source_root( source_root=self._env.path.joinpath("src") ): solved_packages = solver.solve(use_latest=use_latest).get_solved_packages() self._write_lock_file(solved_packages, force=True) return 0 def _do_install(self) -> int: from poetry.puzzle.solver import Solver locked_repository = Repository("poetry-locked") reresolve = self._config.get("installer.re-resolve", True) solved_packages: dict[Package, TransitivePackageInfo] = {} lockfile_repo = LockfileRepository() if self._update: if not self._lock and self._locker.is_locked(): locked_repository = self._locker.locked_repository() # If no packages have been whitelisted (The ones we want to update), # we whitelist every package in the lock file. if not self._whitelist: for pkg in locked_repository.packages: self._whitelist.append(pkg.name) # Checking extras for extra in self._extras: if extra not in self._package.extras: raise ValueError(f"Extra [{extra}] is not specified.") self._io.write_line("<info>Updating dependencies</>") solver = Solver( self._package, self._pool, self._installed_repository.packages, locked_repository.packages, self._io, ) with solver.provider.use_source_root( source_root=self._env.path.joinpath("src") ): solved_packages = solver.solve( use_latest=self._whitelist ).get_solved_packages() if not self.executor.enabled: # If we are only in lock mode, no need to go any further self._write_lock_file(solved_packages) return 0 for package in solved_packages: if not lockfile_repo.has_package(package): lockfile_repo.add_package(package) else: self._io.write_line("<info>Installing dependencies from lock file</>") if not self._locker.is_fresh(): raise ValueError( "pyproject.toml changed significantly since poetry.lock was last" " generated. Run `poetry lock` to fix the lock file." ) if not (reresolve or self._locker.is_locked_groups_and_markers()): if self._io.is_verbose(): self._io.write_line( "<info>Cannot install without re-resolving" " because the lock file is not at least version 2.1</>" ) reresolve = True locker_extras = { canonicalize_name(extra) for extra in self._locker.lock_data.get("extras", {}) } for extra in self._extras: if extra not in locker_extras: raise ValueError(f"Extra [{extra}] is not specified.") locked_repository = self._locker.locked_repository() if reresolve: lockfile_repo = locked_repository else: solved_packages = self._locker.locked_packages() if self._io.is_verbose(): self._io.write_line("") self._io.write_line( "<info>Finding the necessary packages for the current system</>" ) if reresolve: if self._groups is not None: root = self._package.with_dependency_groups( list(self._groups), only=True ) else: root = self._package.without_optional_dependency_groups() # We resolve again by only using the lock file packages = lockfile_repo.packages + locked_repository.packages pool = RepositoryPool.from_packages(packages, self._config) solver = Solver( root, pool, self._installed_repository.packages, locked_repository.packages, NullIO(), active_root_extras=self._extras, ) # Everything is resolved at this point, so we no longer need # to load deferred dependencies (i.e. VCS, URL and path dependencies) solver.provider.load_deferred(False) with solver.use_environment(self._env): transaction = solver.solve(use_latest=self._whitelist) else: if self._groups is None: groups = self._package.dependency_group_names() else: groups = set(self._groups) transaction = Transaction( locked_repository.packages, solved_packages, self._installed_repository.packages, self._package, self._env.marker_env, groups, ) ops = transaction.calculate_operations( with_uninstalls=( self._requires_synchronization or (self._update and not reresolve) ), synchronize=self._requires_synchronization, skip_directory=self._skip_directory, extras=set(self._extras), system_site_packages={ p.name for p in self._installed_repository.system_site_packages }, ) if reresolve and not self._requires_synchronization: # If no packages synchronisation has been requested we need # to calculate the uninstall operations transaction = Transaction( locked_repository.packages, lockfile_repo.packages, installed_packages=self._installed_repository.packages, root_package=root, ) ops = [ op for op in transaction.calculate_operations(with_uninstalls=True) if op.job_type == "uninstall" ] + ops # Validate the dependencies for op in ops: dep = op.package.to_dependency() if dep.is_file() or dep.is_directory(): dep = cast("PathDependency", dep) dep.validate(raise_error=not op.skipped) # Execute operations status = self._execute(ops) if status == 0 and self._update: # Only write lock file when installation is success self._write_lock_file(solved_packages) return status def _write_lock_file( self, packages: dict[Package, TransitivePackageInfo], force: bool = False, ) -> None: if not self.is_dry_run() and (force or self._update): updated_lock = self._locker.set_lock_data(self._package, packages) if updated_lock: self._io.write_line("") self._io.write_line("<info>Writing lock file</>") def _execute(self, operations: list[Operation]) -> int: return self._executor.execute(operations) def _get_installed(self) -> InstalledRepository: return InstalledRepository.load(self._env)
Installer
python
numba__llvmlite
llvmlite/binding/value.py
{ "start": 13987, "end": 19477 }
class ____(_ValueIterator): kind = 'block' def _dispose(self): self._capi.LLVMPY_DisposeIncomingBlocksIter(self) def _next(self): return ffi.lib.LLVMPY_IncomingBlocksIterNext(self) # FFI ffi.lib.LLVMPY_PrintValueToString.argtypes = [ ffi.LLVMValueRef, POINTER(c_char_p) ] ffi.lib.LLVMPY_GetGlobalParent.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_GetGlobalParent.restype = ffi.LLVMModuleRef ffi.lib.LLVMPY_GetValueName.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_GetValueName.restype = c_char_p ffi.lib.LLVMPY_SetValueName.argtypes = [ffi.LLVMValueRef, c_char_p] ffi.lib.LLVMPY_TypeOf.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_TypeOf.restype = ffi.LLVMTypeRef ffi.lib.LLVMPY_GlobalGetValueType.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_GlobalGetValueType.restype = ffi.LLVMTypeRef ffi.lib.LLVMPY_GetTypeName.argtypes = [ffi.LLVMTypeRef] ffi.lib.LLVMPY_GetTypeName.restype = c_void_p ffi.lib.LLVMPY_GetLinkage.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_GetLinkage.restype = c_int ffi.lib.LLVMPY_SetLinkage.argtypes = [ffi.LLVMValueRef, c_int] ffi.lib.LLVMPY_GetVisibility.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_GetVisibility.restype = c_int ffi.lib.LLVMPY_SetVisibility.argtypes = [ffi.LLVMValueRef, c_int] ffi.lib.LLVMPY_GetDLLStorageClass.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_GetDLLStorageClass.restype = c_int ffi.lib.LLVMPY_SetDLLStorageClass.argtypes = [ffi.LLVMValueRef, c_int] ffi.lib.LLVMPY_GetEnumAttributeKindForName.argtypes = [c_char_p, c_size_t] ffi.lib.LLVMPY_GetEnumAttributeKindForName.restype = c_uint ffi.lib.LLVMPY_AddFunctionAttr.argtypes = [ffi.LLVMValueRef, c_uint] ffi.lib.LLVMPY_IsDeclaration.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_IsDeclaration.restype = c_int ffi.lib.LLVMPY_FunctionAttributesIter.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_FunctionAttributesIter.restype = ffi.LLVMAttributeListIterator ffi.lib.LLVMPY_CallInstAttributesIter.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_CallInstAttributesIter.restype = ffi.LLVMAttributeListIterator ffi.lib.LLVMPY_InvokeInstAttributesIter.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_InvokeInstAttributesIter.restype = ffi.LLVMAttributeListIterator ffi.lib.LLVMPY_GlobalAttributesIter.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_GlobalAttributesIter.restype = ffi.LLVMAttributeSetIterator ffi.lib.LLVMPY_ArgumentAttributesIter.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_ArgumentAttributesIter.restype = ffi.LLVMAttributeSetIterator ffi.lib.LLVMPY_FunctionBlocksIter.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_FunctionBlocksIter.restype = ffi.LLVMBlocksIterator ffi.lib.LLVMPY_FunctionArgumentsIter.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_FunctionArgumentsIter.restype = ffi.LLVMArgumentsIterator ffi.lib.LLVMPY_BlockInstructionsIter.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_BlockInstructionsIter.restype = ffi.LLVMInstructionsIterator ffi.lib.LLVMPY_InstructionOperandsIter.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_InstructionOperandsIter.restype = ffi.LLVMOperandsIterator ffi.lib.LLVMPY_PhiIncomingBlocksIter.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_PhiIncomingBlocksIter.restype = ffi.LLVMIncomingBlocksIterator ffi.lib.LLVMPY_DisposeAttributeListIter.argtypes = [ ffi.LLVMAttributeListIterator] ffi.lib.LLVMPY_DisposeAttributeSetIter.argtypes = [ffi.LLVMAttributeSetIterator] ffi.lib.LLVMPY_DisposeBlocksIter.argtypes = [ffi.LLVMBlocksIterator] ffi.lib.LLVMPY_DisposeInstructionsIter.argtypes = [ffi.LLVMInstructionsIterator] ffi.lib.LLVMPY_DisposeOperandsIter.argtypes = [ffi.LLVMOperandsIterator] ffi.lib.LLVMPY_DisposeIncomingBlocksIter.argtypes = [ ffi.LLVMIncomingBlocksIterator] ffi.lib.LLVMPY_AttributeListIterNext.argtypes = [ffi.LLVMAttributeListIterator] ffi.lib.LLVMPY_AttributeListIterNext.restype = c_void_p ffi.lib.LLVMPY_AttributeSetIterNext.argtypes = [ffi.LLVMAttributeSetIterator] ffi.lib.LLVMPY_AttributeSetIterNext.restype = c_void_p ffi.lib.LLVMPY_BlocksIterNext.argtypes = [ffi.LLVMBlocksIterator] ffi.lib.LLVMPY_BlocksIterNext.restype = ffi.LLVMValueRef ffi.lib.LLVMPY_ArgumentsIterNext.argtypes = [ffi.LLVMArgumentsIterator] ffi.lib.LLVMPY_ArgumentsIterNext.restype = ffi.LLVMValueRef ffi.lib.LLVMPY_InstructionsIterNext.argtypes = [ffi.LLVMInstructionsIterator] ffi.lib.LLVMPY_InstructionsIterNext.restype = ffi.LLVMValueRef ffi.lib.LLVMPY_OperandsIterNext.argtypes = [ffi.LLVMOperandsIterator] ffi.lib.LLVMPY_OperandsIterNext.restype = ffi.LLVMValueRef ffi.lib.LLVMPY_IncomingBlocksIterNext.argtypes = [ ffi.LLVMIncomingBlocksIterator] ffi.lib.LLVMPY_IncomingBlocksIterNext.restype = ffi.LLVMValueRef ffi.lib.LLVMPY_GetOpcodeName.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_GetOpcodeName.restype = c_void_p ffi.lib.LLVMPY_IsConstant.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_IsConstant.restype = c_bool ffi.lib.LLVMPY_GetValueKind.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_GetValueKind.restype = c_int ffi.lib.LLVMPY_GetConstantIntRawValue.argtypes = [ffi.LLVMValueRef, POINTER(c_bool)] ffi.lib.LLVMPY_GetConstantIntRawValue.restype = POINTER(c_uint64) ffi.lib.LLVMPY_GetConstantIntNumWords.argtypes = [ffi.LLVMValueRef] ffi.lib.LLVMPY_GetConstantIntNumWords.restype = c_uint ffi.lib.LLVMPY_GetConstantFPValue.argtypes = [ffi.LLVMValueRef, POINTER(c_bool)] ffi.lib.LLVMPY_GetConstantFPValue.restype = c_double
_IncomingBlocksIterator
python
joblib__joblib
joblib/numpy_pickle.py
{ "start": 14729, "end": 28791 }
class ____(Unpickler): """A subclass of the Unpickler to unpickle our numpy pickles. Attributes ---------- mmap_mode: str The memorymap mode to use for reading numpy arrays. file_handle: file_like File object to unpickle from. ensure_native_byte_order: bool If True, coerce the array to use the native endianness of the host system. filename: str Name of the file to unpickle from. It should correspond to file_handle. This parameter is required when using mmap_mode. np: module Reference to numpy module if numpy is installed else None. """ dispatch = Unpickler.dispatch.copy() def __init__(self, filename, file_handle, ensure_native_byte_order, mmap_mode=None): # The next line is for backward compatibility with pickle generated # with joblib versions less than 0.10. self._dirname = os.path.dirname(filename) self.mmap_mode = mmap_mode self.file_handle = file_handle # filename is required for numpy mmap mode. self.filename = filename self.compat_mode = False self.ensure_native_byte_order = ensure_native_byte_order Unpickler.__init__(self, self.file_handle) try: import numpy as np except ImportError: np = None self.np = np def load_build(self): """Called to set the state of a newly created object. We capture it to replace our place-holder objects, NDArrayWrapper or NumpyArrayWrapper, by the array we are interested in. We replace them directly in the stack of pickler. NDArrayWrapper is used for backward compatibility with joblib <= 0.9. """ Unpickler.load_build(self) # For backward compatibility, we support NDArrayWrapper objects. if isinstance(self.stack[-1], (NDArrayWrapper, NumpyArrayWrapper)): if self.np is None: raise ImportError( "Trying to unpickle an ndarray, but numpy didn't import correctly" ) array_wrapper = self.stack.pop() # If any NDArrayWrapper is found, we switch to compatibility mode, # this will be used to raise a DeprecationWarning to the user at # the end of the unpickling. if isinstance(array_wrapper, NDArrayWrapper): self.compat_mode = True _array_payload = array_wrapper.read(self) else: _array_payload = array_wrapper.read(self, self.ensure_native_byte_order) self.stack.append(_array_payload) # Be careful to register our new method. dispatch[pickle.BUILD[0]] = load_build ############################################################################### # Utility functions def dump(value, filename, compress=0, protocol=None): """Persist an arbitrary Python object into one file. Read more in the :ref:`User Guide <persistence>`. Parameters ---------- value: any Python object The object to store to disk. filename: str, pathlib.Path, or file object. The file object or path of the file in which it is to be stored. The compression method corresponding to one of the supported filename extensions ('.z', '.gz', '.bz2', '.xz' or '.lzma') will be used automatically. compress: int from 0 to 9 or bool or 2-tuple, optional Optional compression level for the data. 0 or False is no compression. Higher value means more compression, but also slower read and write times. Using a value of 3 is often a good compromise. See the notes for more details. If compress is True, the compression level used is 3. If compress is a 2-tuple, the first element must correspond to a string between supported compressors (e.g 'zlib', 'gzip', 'bz2', 'lzma' 'xz'), the second element must be an integer from 0 to 9, corresponding to the compression level. protocol: int, optional Pickle protocol, see pickle.dump documentation for more details. Returns ------- filenames: list of strings The list of file names in which the data is stored. If compress is false, each array is stored in a different file. See Also -------- joblib.load : corresponding loader Notes ----- Memmapping on load cannot be used for compressed files. Thus using compression can significantly slow down loading. In addition, compressed files take up extra memory during dump and load. """ if Path is not None and isinstance(filename, Path): filename = str(filename) is_filename = isinstance(filename, str) is_fileobj = hasattr(filename, "write") compress_method = "zlib" # zlib is the default compression method. if compress is True: # By default, if compress is enabled, we want the default compress # level of the compressor. compress_level = None elif isinstance(compress, tuple): # a 2-tuple was set in compress if len(compress) != 2: raise ValueError( "Compress argument tuple should contain exactly 2 elements: " "(compress method, compress level), you passed {}".format(compress) ) compress_method, compress_level = compress elif isinstance(compress, str): compress_method = compress compress_level = None # Use default compress level compress = (compress_method, compress_level) else: compress_level = compress if compress_method == "lz4" and lz4 is None: raise ValueError(LZ4_NOT_INSTALLED_ERROR) if ( compress_level is not None and compress_level is not False and compress_level not in range(10) ): # Raising an error if a non valid compress level is given. raise ValueError( 'Non valid compress level given: "{}". Possible values are {}.'.format( compress_level, list(range(10)) ) ) if compress_method not in _COMPRESSORS: # Raising an error if an unsupported compression method is given. raise ValueError( 'Non valid compression method given: "{}". Possible values are {}.'.format( compress_method, _COMPRESSORS ) ) if not is_filename and not is_fileobj: # People keep inverting arguments, and the resulting error is # incomprehensible raise ValueError( "Second argument should be a filename or a file-like object, " "%s (type %s) was given." % (filename, type(filename)) ) if is_filename and not isinstance(compress, tuple): # In case no explicit compression was requested using both compression # method and level in a tuple and the filename has an explicit # extension, we select the corresponding compressor. # unset the variable to be sure no compression level is set afterwards. compress_method = None for name, compressor in _COMPRESSORS.items(): if filename.endswith(compressor.extension): compress_method = name if compress_method in _COMPRESSORS and compress_level == 0: # we choose the default compress_level in case it was not given # as an argument (using compress). compress_level = None if compress_level != 0: with _write_fileobject( filename, compress=(compress_method, compress_level) ) as f: NumpyPickler(f, protocol=protocol).dump(value) elif is_filename: with open(filename, "wb") as f: NumpyPickler(f, protocol=protocol).dump(value) else: NumpyPickler(filename, protocol=protocol).dump(value) # If the target container is a file object, nothing is returned. if is_fileobj: return # For compatibility, the list of created filenames (e.g with one element # after 0.10.0) is returned by default. return [filename] def _unpickle(fobj, ensure_native_byte_order, filename="", mmap_mode=None): """Internal unpickling function.""" # We are careful to open the file handle early and keep it open to # avoid race-conditions on renames. # That said, if data is stored in companion files, which can be # the case with the old persistence format, moving the directory # will create a race when joblib tries to access the companion # files. unpickler = NumpyUnpickler( filename, fobj, ensure_native_byte_order, mmap_mode=mmap_mode ) obj = None try: obj = unpickler.load() if unpickler.compat_mode: warnings.warn( "The file '%s' has been generated with a " "joblib version less than 0.10. " "Please regenerate this pickle file." % filename, DeprecationWarning, stacklevel=3, ) except UnicodeDecodeError as exc: # More user-friendly error message new_exc = ValueError( "You may be trying to read with " "python 3 a joblib pickle generated with python 2. " "This feature is not supported by joblib." ) new_exc.__cause__ = exc raise new_exc return obj def load_temporary_memmap(filename, mmap_mode, unlink_on_gc_collect): from ._memmapping_reducer import JOBLIB_MMAPS, add_maybe_unlink_finalizer with open(filename, "rb") as f: with _validate_fileobject_and_memmap(f, filename, mmap_mode) as ( fobj, validated_mmap_mode, ): # Memmap are used for interprocess communication, which should # keep the objects untouched. We pass `ensure_native_byte_order=False` # to remain consistent with the loading behavior of non-memmaped arrays # in workers, where the byte order is preserved. # Note that we do not implement endianness change for memmaps, as this # would result in inconsistent behavior. obj = _unpickle( fobj, ensure_native_byte_order=False, filename=filename, mmap_mode=validated_mmap_mode, ) JOBLIB_MMAPS.add(obj.filename) if unlink_on_gc_collect: add_maybe_unlink_finalizer(obj) return obj def load(filename, mmap_mode=None, ensure_native_byte_order="auto"): """Reconstruct a Python object from a file persisted with joblib.dump. Read more in the :ref:`User Guide <persistence>`. WARNING: joblib.load relies on the pickle module and can therefore execute arbitrary Python code. It should therefore never be used to load files from untrusted sources. Parameters ---------- filename: str, pathlib.Path, or file object. The file object or path of the file from which to load the object mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional If not None, the arrays are memory-mapped from the disk. This mode has no effect for compressed files. Note that in this case the reconstructed object might no longer match exactly the originally pickled object. ensure_native_byte_order: bool, or 'auto', default=='auto' If True, ensures that the byte order of the loaded arrays matches the native byte ordering (or _endianness_) of the host system. This is not compatible with memory-mapped arrays and using non-null `mmap_mode` parameter at the same time will raise an error. The default 'auto' parameter is equivalent to True if `mmap_mode` is None, else False. Returns ------- result: any Python object The object stored in the file. See Also -------- joblib.dump : function to save an object Notes ----- This function can load numpy array files saved separately during the dump. If the mmap_mode argument is given, it is passed to np.load and arrays are loaded as memmaps. As a consequence, the reconstructed object might not match the original pickled object. Note that if the file was saved with compression, the arrays cannot be memmapped. """ if ensure_native_byte_order == "auto": ensure_native_byte_order = mmap_mode is None if ensure_native_byte_order and mmap_mode is not None: raise ValueError( "Native byte ordering can only be enforced if 'mmap_mode' parameter " f"is set to None, but got 'mmap_mode={mmap_mode}' instead." ) if Path is not None and isinstance(filename, Path): filename = str(filename) if hasattr(filename, "read"): fobj = filename filename = getattr(fobj, "name", "") with _validate_fileobject_and_memmap(fobj, filename, mmap_mode) as (fobj, _): obj = _unpickle(fobj, ensure_native_byte_order=ensure_native_byte_order) else: with open(filename, "rb") as f: with _validate_fileobject_and_memmap(f, filename, mmap_mode) as ( fobj, validated_mmap_mode, ): if isinstance(fobj, str): # if the returned file object is a string, this means we # try to load a pickle file generated with an version of # Joblib so we load it with joblib compatibility function. return load_compatibility(fobj) # A memory-mapped array has to be mapped with the endianness # it has been written with. Other arrays are coerced to the # native endianness of the host system. obj = _unpickle( fobj, ensure_native_byte_order=ensure_native_byte_order, filename=filename, mmap_mode=validated_mmap_mode, ) return obj
NumpyUnpickler
python
ansible__ansible
test/units/module_utils/facts/test_facts.py
{ "start": 17602, "end": 23266 }
class ____(unittest.TestCase): # FIXME: mock.patch instead def setUp(self): # The @timeout tracebacks if there isn't a GATHER_TIMEOUT is None (the default until get_all_facts sets it via global) facts.GATHER_TIMEOUT = 10 def tearDown(self): facts.GATHER_TIMEOUT = None # The Hardware subclasses freakout if instaniated directly, so # mock platform.system and inst Hardware() so we get a LinuxHardware() # we can test. @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._udevadm_uuid', return_value=UDEVADM_UUID) def test_get_mount_facts(self, mock_lsblk_uuid, mock_find_bind_mounts, mock_mtab_entries, mock_udevadm_uuid): module = Mock() # Returns a LinuxHardware-ish lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) # Nothing returned, just self.facts modified as a side effect mount_facts = lh.get_mount_facts() self.assertIsInstance(mount_facts, dict) self.assertIn('mounts', mount_facts) self.assertIsInstance(mount_facts['mounts'], list) self.assertIsInstance(mount_facts['mounts'][0], dict) # Find mounts with space in the mountpoint path mounts_with_space = [x for x in mount_facts['mounts'] if ' ' in x['mount']] self.assertEqual(len(mounts_with_space), 1) self.assertEqual(mounts_with_space[0]['mount'], '/mnt/foo bar') @patch('ansible.module_utils.facts.hardware.linux.get_file_content', return_value=MTAB) def test_get_mtab_entries(self, mock_get_file_content): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) mtab_entries = lh._mtab_entries() self.assertIsInstance(mtab_entries, list) self.assertIsInstance(mtab_entries[0], list) self.assertEqual(len(mtab_entries), 39) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, '')) def test_find_bind_mounts(self, mock_run_findmnt): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) bind_mounts = lh._find_bind_mounts() # If bind_mounts becomes another seq type, feel free to change self.assertIsInstance(bind_mounts, set) self.assertEqual(len(bind_mounts), 1) self.assertIn('/not/a/real/bind_mount', bind_mounts) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(37, '', '')) def test_find_bind_mounts_non_zero(self, mock_run_findmnt): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) bind_mounts = lh._find_bind_mounts() self.assertIsInstance(bind_mounts, set) self.assertEqual(len(bind_mounts), 0) def test_find_bind_mounts_no_findmnts(self): module = Mock() module.get_bin_path = Mock(return_value=None) lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) bind_mounts = lh._find_bind_mounts() self.assertIsInstance(bind_mounts, set) self.assertEqual(len(bind_mounts), 0) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT, '')) def test_lsblk_uuid(self, mock_run_lsblk): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertIn(b'/dev/loop9', lsblk_uuids) self.assertIn(b'/dev/sda1', lsblk_uuids) self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0') @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT, '')) def test_lsblk_uuid_non_zero(self, mock_run_lsblk): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertEqual(len(lsblk_uuids), 0) def test_lsblk_uuid_no_lsblk(self): module = Mock() module.get_bin_path = Mock(return_value=None) lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertEqual(len(lsblk_uuids), 0) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2, '')) def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertIn(b'/dev/loop0', lsblk_uuids) self.assertIn(b'/dev/sda1', lsblk_uuids) self.assertEqual(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373') self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
TestFactsLinuxHardwareGetMountFacts
python
sympy__sympy
sympy/polys/series/ringflint.py
{ "start": 2270, "end": 17652 }
class ____: """ Flint implementation of power series ring over integers :ref:`ZZ`. This class provides high-performance power series operations over the integer ring, leveraging the FLINT library for optimized arithmetic and series manipulations precision handling and truncation. Parameters ========== prec : int, optional The default precision for power series operations. Default is 6. Examples ======== >>> from sympy.polys.series.ringflint import FlintPowerSeriesRingZZ >>> R = FlintPowerSeriesRingZZ() >>> s = R([1, 2, 3]) # 1 + 2*x + 3*x^2 >>> R.print(s) 1 + 2*x + 3*x**2 >>> s_pow = R.pow_int(s, 2) # Square the series >>> R.print(s_pow) 1 + 4*x + 10*x**2 + 12*x**3 + 9*x**4 >>> s_inv = R.inverse(R([1, 1])) # Inverse of 1 + x >>> R.print(s_inv) 1 - x + x**2 - x**3 + x**4 - x**5 + O(x**6) Note ==== The recommended way to create a power series ring is using the factory function which returns a new instance of the higher level PowerSeriesRing class with the ring generator: >>> from sympy.polys.series import power_series_ring >>> from sympy.polys.domains import ZZ >>> R, x = power_series_ring("x", ZZ, 6) >>> R Power Series Ring in x over ZZ of size 6 >>> type(x) <class 'sympy.polys.series.ring.PowerSeriesElement'> This function automatically uses the Flint implementation if available for better performance, falling back to the Python implementation otherwise. See Also ======== sympy.polys.series.ringflint.FlintPowerSeriesRingQQ sympy.polys.series.ringpython.PythonPowerSeriesRingZZ sympy.polys.series.ring.power_series_ring sympy.polys.series.ring.PowerSeriesRingRing sympy.polys.series.ring.PowerSeriesRingField sympy.polys.series.ring.PowerSeriesElement """ _domain = ZZ def __init__(self, prec: int = 6) -> None: if prec < 0: raise ValueError("Power series precision must be non-negative") self._prec = prec def __repr__(self) -> str: return ( f"Flint Power Series Ring over {self._domain} with precision {self._prec}" ) def __eq__(self, other: object) -> bool: if not isinstance(other, FlintPowerSeriesRingZZ): return NotImplemented return self._prec == other.prec def __hash__(self) -> int: return hash((self._domain, self._prec)) def __call__( self, coeffs: Sequence[MPZ | int], prec: int | None = None ) -> ZZSeries: """ Create a power series from a list of coefficients. If `prec` is not specified, it defaults to the ring's precision. """ s: list[MPZ] = [] for c in coeffs: if isinstance(c, MPZ): s.append(c) elif isinstance(c, int): s.append(self._domain(c)) else: raise TypeError(f"Unsupported coefficient type: {type(c)}") return self.from_list(s, prec) @property def domain(self) -> Domain[MPZ]: """Return the ground domain of the power series ring.""" return self._domain @property def prec(self) -> int: """Return the ring's precision.""" return self._prec @property def one(self) -> ZZSeries: if self._prec == 0: return fmpz_series([1], prec=0) return fmpz_poly([1]) @property def zero(self) -> ZZSeries: if self._prec == 0: return fmpz_series([0], prec=0) return fmpz_poly([0]) @property def gen(self) -> ZZSeries: if self._prec < 2: return fmpz_series([0, 1], prec=self._prec) return fmpz_poly([0, 1]) def pretty(self, s: ZZSeries, *, symbol: str = "x", ascending: bool = True) -> str: """Return a pretty-printed string representation of a power series.""" coeffs = dup_reverse(s.coeffs(), ZZ) if isinstance(s, fmpz_poly): return series_pprint(coeffs, None) prec = self.series_prec(s) return series_pprint(coeffs, prec, sym=symbol, ascending=ascending) def print(self, s: ZZSeries, *, symbol: str = "x", ascending: bool = True) -> None: """Print a pretty-printed representation of a power series.""" print(self.pretty(s, symbol=symbol, ascending=ascending)) def from_list(self, coeffs: list[MPZ], prec: int | None = None) -> ZZSeries: """ Create a power series from a list of ground coefficients. If `prec` is not specified, it defaults to the ring's precision. """ if prec is None: if len(coeffs) <= self._prec: return fmpz_poly(coeffs) prec = self._prec return fmpz_series(coeffs, prec=prec) def from_element(self, s: ZZSeries) -> ZZSeries: """Convert a power series element into the corresponding element of this ring.""" ring_prec = self._prec if isinstance(s, fmpz_poly): if s.degree() >= ring_prec: return fmpz_series(s, prec=ring_prec) return s prec = min(_get_series_precision(s), ring_prec) if _require_flint_version: return fmpz_series(s, prec=prec) else: return fmpz_series(s.coeffs(), prec=prec) def to_list(self, s: ZZSeries) -> list[MPZ]: """Returns the list of series coefficients.""" return s.coeffs() def to_dense(self, s: ZZSeries) -> dup[MPZ]: """Return the coefficients of a power series as a dense list.""" return s.coeffs()[::-1] def series_prec(self, s: ZZSeries) -> int | None: """Return the precision of the series.""" if isinstance(s, fmpz_poly): return None return _get_series_precision(s) def equal(self, s1: ZZSeries, s2: ZZSeries) -> bool | None: """Check if two power series are equal up to their minimum precision.""" if isinstance(s1, fmpz_poly) and isinstance(s2, fmpz_poly): return s1 == s2 elif isinstance(s1, fmpz_poly): min_prec = self.series_prec(s2) elif isinstance(s2, fmpz_poly): min_prec = self.series_prec(s1) else: min_prec = min(_get_series_precision(s1), _get_series_precision(s2)) coeffs1 = s1.coeffs()[:min_prec] coeffs2 = s2.coeffs()[:min_prec] if coeffs1 != coeffs2: return False return None def equal_repr(self, s1: ZZSeries, s2: ZZSeries) -> bool: """Check if two power series have the same representation.""" if isinstance(s1, fmpz_poly) and isinstance(s2, fmpz_poly): return s1 == s2 elif isinstance(s1, fmpz_series) and isinstance(s2, fmpz_series): return s1._equal_repr(s2) else: return False def is_ground(self, arg: ZZSeries) -> bool | None: """Check if a arg is a ground element of the power series ring.""" if self.prec == 0: return None return len(arg) <= 1 def constant_coefficient(self, s: ZZSeries) -> MPZ: """Return the constant coefficient of a power series.""" return s[0] def positive(self, s: ZZSeries) -> ZZSeries: """Return the unary positive of a power series, adjusted to the ring's precision.""" ring_prec = self._prec if isinstance(s, fmpz_poly): if s.degree() >= ring_prec: return fmpz_series(s, prec=ring_prec) return s # XXX: This shold simply be: fmpz_series(s, prec=ring_prec) # https://github.com/flintlib/python-flint/issues/304 prec = min(_get_series_precision(s), ring_prec) return fmpz_series(s.coeffs(), prec=prec) def negative(self, s: ZZSeries) -> ZZSeries: """Return the unary negative of a power series.""" with _global_cap(self._prec): return self.positive(-s) def add(self, s1: ZZSeries, s2: ZZSeries) -> ZZSeries: """Add two power series.""" ring_prec = self._prec if isinstance(s1, fmpz_poly) and isinstance(s2, fmpz_poly): poly = s1 + s2 if poly.degree() < ring_prec: return poly return fmpz_series(poly, prec=ring_prec) with _global_cap(ring_prec): return s1 + s2 def add_ground(self, s: ZZSeries, n: MPZ) -> ZZSeries: """Add a ground element to a power series.""" with _global_cap(self._prec): return s + n def subtract(self, s1: ZZSeries, s2: ZZSeries) -> ZZSeries: """Subtract two power series.""" ring_prec = self._prec if isinstance(s1, fmpz_poly) and isinstance(s2, fmpz_poly): poly = s1 - s2 if poly.degree() < ring_prec: return poly return fmpz_series(poly, prec=ring_prec) with _global_cap(ring_prec): return s1 - s2 def subtract_ground(self, s: ZZSeries, n: MPZ) -> ZZSeries: """Subtract a ground element from a power series.""" with _global_cap(self._prec): return s - n def rsubtract_ground(self, s: ZZSeries, n: MPZ) -> ZZSeries: """Subtract a power series from a ground element.""" with _global_cap(self._prec): return n - s def multiply(self, s1: ZZSeries, s2: ZZSeries) -> ZZSeries: """Multiply two power series.""" ring_prec = self._prec if isinstance(s1, fmpz_poly) and isinstance(s2, fmpz_poly): deg1 = s1.degree() deg2 = s2.degree() if deg1 + deg2 < ring_prec: return s1 * s2 else: if deg1 >= ring_prec: s1 = self.truncate(s1, ring_prec) if deg2 >= ring_prec: s2 = self.truncate(s2, ring_prec) return fmpz_series(s1 * s2, prec=ring_prec) with _global_cap(ring_prec): return s1 * s2 def multiply_ground(self, s: ZZSeries, n: MPZ) -> ZZSeries: """Multiply a power series by a ground element.""" ring_prec = self._prec if isinstance(s, fmpz_poly): poly = s * n if poly.degree() < ring_prec: return poly return fmpz_series(poly, prec=ring_prec) with _global_cap(ring_prec): return s * n def divide(self, s1: ZZSeries, s2: ZZSeries) -> ZZSeries: """Divide two power series.""" ring_prec = self._prec if isinstance(s1, fmpz_poly) and isinstance(s2, fmpz_poly): try: return s1 / s2 except DomainError: ring_prec = ring_prec + _useries_valuation( (s2.coeffs()[::-1], None), self._domain ) s1 = fmpz_series(s1, prec=ring_prec) s2 = fmpz_series(s2, prec=ring_prec) with _global_cap(ring_prec): return s1 / s2 def pow_int(self, s: ZZSeries, n: int) -> ZZSeries: """Raise a power series to a integer power.""" ring_prec = self._prec if n < 0: n = -n s = self.pow_int(s, n) try: inv = self.inverse(s) return inv except NotReversible: raise ValueError("Result would not be a power series") if isinstance(s, fmpz_poly): if s.degree() * n < ring_prec: return s**n if s.degree() > ring_prec: s = self.truncate(s, ring_prec) poly = s**n return fmpz_series(poly, prec=ring_prec) with _global_cap(ring_prec): return s**n def square(self, s: ZZSeries) -> ZZSeries: """Compute the square of a power series.""" return self.pow_int(s, 2) def compose(self, s1: ZZSeries, s2: ZZSeries) -> ZZSeries: """Compose two power series, `s1(s2)`.""" dom: Domain[MPZ] = self._domain ring_prec: int = self._prec if s2 and not dom.is_zero(s2[0]): raise ValueError( "Series composition requires the second series to have a zero constant term." ) if isinstance(s1, fmpz_poly) and isinstance(s2, fmpz_poly): deg1: int = s1.degree() deg2: int = s2.degree() if deg1 * deg2 < ring_prec: return s1(s2) if deg1 <= ring_prec and deg2 <= ring_prec: return self.truncate(s1(s2), ring_prec) else: s1 = fmpz_series(s1, prec=ring_prec) s2 = fmpz_series(s2, prec=ring_prec) with _global_cap(ring_prec): return s1(s2) if isinstance(s1, fmpz_poly): prec2 = self.series_prec(s2) s1 = fmpz_series(s1, prec=prec2) if isinstance(s2, fmpz_poly): prec1 = self.series_prec(s1) s2 = fmpz_series(s2, prec=prec1) with _global_cap(ring_prec): return s1(s2) def inverse(self, s: ZZSeries) -> ZZSeries: """Compute the multiplicative inverse of a power series.""" dom: Domain[MPZ] = self._domain ring_prec: int = self._prec if not s or not dom.is_unit(s[0]): raise NotReversible( "Series inverse requires the constant term to be a unit" ) if isinstance(s, fmpz_poly): if len(s) == 1: return 1 / s s = fmpz_series(s, prec=ring_prec) with _global_cap(ring_prec): return 1 / s def reversion(self, s: ZZSeries) -> ZZSeries: """Compute the compositional inverse of a power series.""" dom = self._domain if not s or not dom.is_zero(s[0]): raise NotReversible( "Series compositional inverse requires the constant term to be zero." ) if len(s) >= 2 and not dom.is_unit(s[1]): raise NotReversible( "Series compositional inverse requires the linear term to be unit." ) if isinstance(s, fmpz_poly): s = fmpz_series(s, prec=self._prec) with _global_cap(self._prec): return s.reversion() def truncate(self, s: ZZSeries, n: int) -> ZZSeries: """Truncate a power series to `n` terms.""" if n < 0: raise ValueError("Truncation precision must be non-negative") if len(s) <= n: return s # XXX: This should simply be: return fmpz_series(s, prec=n) # https://github.com/flintlib/python-flint/issues/304 coeffs = s.coeffs()[:n] return fmpz_series(coeffs, prec=n) def differentiate(self, s: ZZSeries) -> ZZSeries: """Compute the derivative of a power series.""" if isinstance(s, fmpz_poly): poly = s.derivative() if poly.degree() < self._prec: return poly return fmpz_series(poly, prec=self._prec) poly = fmpz_poly(s.coeffs()) derivative = poly.derivative() prec = min(_get_series_precision(s) - 1, self._prec) return fmpz_series(derivative, prec=prec) @doctest_depends_on(ground_types=["flint"])
FlintPowerSeriesRingZZ
python
walkccc__LeetCode
solutions/431. Encode N-ary Tree to Binary Tree/431-2.py
{ "start": 0, "end": 894 }
class ____: # Encodes an n-ary tree to a binary tree. def encode(self, root: 'Node') -> TreeNode | None: if not root: return None rootTreeNode = TreeNode(root.val) if root.children: rootTreeNode.left = self.encode(root.children[0]) # The parent for the rest of the children currTreeNode = rootTreeNode.left # Encode the rest of the children for i in range(1, len(root.children)): currTreeNode.right = self.encode(root.children[i]) currTreeNode = currTreeNode.right return rootTreeNode # Decodes your binary tree to an n-ary tree. def decode(self, root: TreeNode | None) -> 'Node': if not root: return None rootNode = Node(root.val, []) currTreeNode = root.left while currTreeNode: rootNode.children.append(self.decode(currTreeNode)) currTreeNode = currTreeNode.right return rootNode
Codec
python
lazyprogrammer__machine_learning_examples
rl/comparing_epsilons.py
{ "start": 386, "end": 1820 }
class ____: def __init__(self, m): self.m = m self.mean = 0 self.N = 0 def pull(self): return np.random.randn() + self.m def update(self, x): self.N += 1 self.mean = (1 - 1.0/self.N)*self.mean + 1.0/self.N*x def run_experiment(m1, m2, m3, eps, N): bandits = [Bandit(m1), Bandit(m2), Bandit(m3)] data = np.empty(N) for i in range(N): # epsilon greedy p = np.random.random() if p < eps: j = np.random.choice(3) else: j = np.argmax([b.mean for b in bandits]) x = bandits[j].pull() bandits[j].update(x) # for the plot data[i] = x cumulative_average = np.cumsum(data) / (np.arange(N) + 1) # plot moving average ctr plt.plot(cumulative_average) plt.plot(np.ones(N)*m1) plt.plot(np.ones(N)*m2) plt.plot(np.ones(N)*m3) plt.xscale('log') plt.show() for b in bandits: print(b.mean) return cumulative_average if __name__ == '__main__': c_1 = run_experiment(1.0, 2.0, 3.0, 0.1, 100000) c_05 = run_experiment(1.0, 2.0, 3.0, 0.05, 100000) c_01 = run_experiment(1.0, 2.0, 3.0, 0.01, 100000) # log scale plot plt.plot(c_1, label='eps = 0.1') plt.plot(c_05, label='eps = 0.05') plt.plot(c_01, label='eps = 0.01') plt.legend() plt.xscale('log') plt.show() # linear plot plt.plot(c_1, label='eps = 0.1') plt.plot(c_05, label='eps = 0.05') plt.plot(c_01, label='eps = 0.01') plt.legend() plt.show()
Bandit
python
getsentry__sentry
src/sentry/testutils/helpers/apigateway.py
{ "start": 763, "end": 1009 }
class ____(ControlSiloOrganizationEndpoint): permission_classes: tuple[type[BasePermission], ...] = (AllowAny,) def get(self, request, organization, **kwargs): return Response({"proxy": False}) @region_silo_endpoint
ControlEndpoint
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance19.py
{ "start": 1890, "end": 2140 }
class ____(metaclass=Meta2): pass def func9(v: type[Class2] | Iterable[type[Class2]]): if isinstance(v, Meta2): reveal_type(v, expected_text="type[Class2]") else: reveal_type(v, expected_text="Iterable[type[Class2]]")
Class2
python
getsentry__sentry
src/sentry/api/bases/organizationmember.py
{ "start": 2582, "end": 2820 }
class ____(TypedDict): organization: Organization user_id: NotRequired[int] user_is_active: NotRequired[bool] id: NotRequired[int | str] organization_id: NotRequired[int] invite_status: NotRequired[int]
_FilterKwargs
python
sqlalchemy__sqlalchemy
test/orm/test_backref_mutations.py
{ "start": 15337, "end": 16745 }
class ____(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): Address, addresses, users, User = ( cls.classes.Address, cls.tables.addresses, cls.tables.users, cls.classes.User, ) cls.mapper_registry.map_imperatively(Address, addresses) cls.mapper_registry.map_imperatively( User, users, properties={ "address": relationship( Address, uselist=False, _legacy_inactive_history_style=( cls._legacy_inactive_history_style ), ) }, ) def test_collection_move_commitfirst(self): User, Address = self.classes.User, self.classes.Address sess = fixture_session() a1 = Address(email_address="address1") u1 = User(name="jack", address=a1) u2 = User(name="ed") sess.add_all([u1, u2]) sess.commit() # everything is expired # load u1.address u1.address # reassign u2.address = a1 assert u2.address is a1 # the commit cancels out u1.addresses # being loaded, on next access its fine. sess.commit() assert u1.address is None assert u2.address is a1
O2OScalarMoveTest
python
Netflix__metaflow
metaflow/exception.py
{ "start": 1841, "end": 2232 }
class ____(MetaflowException): headline = "Parameter field failed" def __init__(self, name, field): exc = traceback.format_exc() msg = ( "When evaluating the field *%s* for the Parameter *%s*, " "the following exception occurred:\n\n%s" % (field, name, exc) ) super(ParameterFieldFailed, self).__init__(msg)
ParameterFieldFailed
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 1533498, "end": 1545890 }
class ____(VegaLiteSchema): r""" TypedFieldDef schema wrapper. Definition object for a data field, its type and transformation of an encoding channel. Parameters ---------- aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb'] Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``, ``"min"``, ``"max"``, ``"count"``). **Default value:** ``undefined`` (None) **See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__ documentation. bandPosition : float Relative position on a band of a stacked, binned, time unit, or band scale. For example, the marks will be positioned at the beginning of the band if set to ``0``, and at the middle of the band if set to ``0.5``. bin : bool, dict, Literal['binned'], :class:`BinParams`, None A flag for binning a ``quantitative`` field, `an object defining binning parameters <https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating that the data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (``"binned"``). * If ``true``, default `binning parameters <https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be applied. * If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are already binned. You can map the bin-start field to ``x`` (or ``y``) and the bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can also set the axis's `tickMinStep <https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property. **Default value:** ``false`` **See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__ documentation. field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef` **Required.** A string defining the name of the field from which to pull a data value or an object defining iterated values from the `repeat <https://vega.github.io/vega-lite/docs/repeat.html>`__ operator. **See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__ documentation. **Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If field names contain dots or brackets but are not nested, you can use ``\\`` to escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details about escaping in the `field documentation <https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required if ``aggregate`` is ``count``. timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds'] Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal field. or `a temporal field that gets casted as ordinal <https://vega.github.io/vega-lite/docs/type.html#cast>`__. **Default value:** ``undefined`` (None) **See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__ documentation. title : str, :class:`Text`, Sequence[str], None A title for the field. If ``null``, the title will be removed. **Default value:** derived from the field's name and transformation function (``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function, the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the field is binned or has a time unit applied, the applied function is shown in parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``). Otherwise, the title is simply the field name. **Notes**: 1) You can customize the default field title format by providing the `fieldTitle <https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle function via the compile function's options <https://vega.github.io/vega-lite/usage/compile.html#field-title>`__. 2) If both field definition's ``title`` and axis, header, or legend ``title`` are defined, axis/header/legend title will be used. type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal'] The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or ``"nominal"``) for the encoded field or constant value (``datum``). It can also be a ``"geojson"`` type for encoding `'geoshape' <https://vega.github.io/vega-lite/docs/geoshape.html>`__. Vega-Lite automatically infers data types in many cases as discussed below. However, type is required for a field if: (1) the field is not nominal and the field encoding has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal scale for a field with ``bin`` or ``timeUnit``. **Default value:** 1) For a data ``field``, ``"nominal"`` is the default data type unless the field encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or ``timeUnit`` that satisfies the following criteria: * ``"quantitative"`` is the default type if (1) the encoded field contains ``bin`` or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is ``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__. * ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit`` or (2) the specified scale type is a time or utc scale * ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort order <https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__, (2) the specified scale type is an ordinal/point/band scale, or (3) the encoding channel is ``order``. 2) For a constant value in data domain (``datum``): * ``"quantitative"`` if the datum is a number * ``"nominal"`` if the datum is a string * ``"temporal"`` if the datum is `a date time object <https://vega.github.io/vega-lite/docs/datetime.html>`__ **Note:** * Data ``type`` describes the semantics of the data rather than the primitive data types (number, string, etc.). The same primitive data type can have different types of measurement. For example, numeric data can represent quantitative, ordinal, or nominal data. * Data values for a temporal field can be either a date-time string (e.g., ``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a timestamp number (e.g., ``1552199579097``). * When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the ``type`` property can be either ``"quantitative"`` (for using a linear bin scale) or `"ordinal" (for using an ordinal bin scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal" (for using an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property refers to the post-aggregation data type. For example, we can calculate count ``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct", "field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``. * Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have ``type`` as they must have exactly the same type as their primary channels (e.g., ``x``, ``y``). **See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__ documentation. """ _schema = {"$ref": "#/definitions/TypedFieldDef"} def __init__( self, aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined, bandPosition: Optional[float] = Undefined, bin: Optional[bool | SchemaBase | Literal["binned"] | Map | None] = Undefined, field: Optional[str | SchemaBase | Map] = Undefined, timeUnit: Optional[ SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T ] = Undefined, title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined, type: Optional[SchemaBase | StandardType_T] = Undefined, **kwds, ): super().__init__( aggregate=aggregate, bandPosition=bandPosition, bin=bin, field=field, timeUnit=timeUnit, title=title, type=type, **kwds, )
TypedFieldDef
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 220158, "end": 220713 }
class ____(sgqlc.types.Input): """Autogenerated input type of DeleteRepositoryRuleset""" __schema__ = github_schema __field_names__ = ("repository_ruleset_id", "client_mutation_id") repository_ruleset_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryRulesetId") """The global relay id of the repository ruleset to be deleted.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
DeleteRepositoryRulesetInput
python
MongoEngine__mongoengine
tests/fields/test_embedded_document_field.py
{ "start": 402, "end": 7831 }
class ____(MongoDBTestCase): def test___init___(self): class MyDoc(EmbeddedDocument): name = StringField() field = EmbeddedDocumentField(MyDoc) assert field.document_type_obj == MyDoc field2 = EmbeddedDocumentField("MyDoc") assert field2.document_type_obj == "MyDoc" def test___init___throw_error_if_document_type_is_not_EmbeddedDocument(self): with pytest.raises(ValidationError): EmbeddedDocumentField(dict) def test_document_type_throw_error_if_not_EmbeddedDocument_subclass(self): class MyDoc(Document): name = StringField() emb = EmbeddedDocumentField("MyDoc") with pytest.raises(ValidationError) as exc_info: emb.document_type assert ( "Invalid embedded document class provided to an EmbeddedDocumentField" in str(exc_info.value) ) def test_embedded_document_field_only_allow_subclasses_of_embedded_document(self): # Relates to #1661 class MyDoc(Document): name = StringField() with pytest.raises(ValidationError): class MyFailingDoc(Document): emb = EmbeddedDocumentField(MyDoc) with pytest.raises(ValidationError): class MyFailingdoc2(Document): emb = EmbeddedDocumentField("MyDoc") def test_embedded_document_list_field__has__instance_weakref(self): class Comment(EmbeddedDocument): content = StringField() class Post(Document): title = StringField() comment = EmbeddedDocumentField(Comment) comments = EmbeddedDocumentListField(Comment) comments2 = ListField(EmbeddedDocumentField(Comment)) Post.drop_collection() for i in range(5): Post( title=f"{i}", comment=Comment(content=f"{i}"), comments=[Comment(content=f"{i}")], comments2=[Comment(content=f"{i}")], ).save() posts = list(Post.objects) for post in posts: assert isinstance(post.comments._instance, weakref.ProxyTypes) assert isinstance(post.comments2._instance, weakref.ProxyTypes) assert isinstance(post.comment._instance, weakref.ProxyTypes) for comment in post.comments: assert isinstance(comment._instance, weakref.ProxyTypes) for comment2 in post.comments2: assert isinstance(comment2._instance, weakref.ProxyTypes) def test_embedded_document_field_validate_subclass(self): class BaseItem(EmbeddedDocument): f = IntField() meta = {"allow_inheritance": True} def validate(self, clean=True): if self.f == 0: raise Exception("can not be 0") return super().validate(clean) class RealItem(BaseItem): a = IntField() def validate(self, clean=True): if self.f == 1: raise Exception("can not be 1") return super().validate(clean) class TopLevel(Document): item = EmbeddedDocumentField(document_type=BaseItem) items = EmbeddedDocumentListField(document_type=BaseItem) passing_item = RealItem(f=2, a=0) item = TopLevel(item=passing_item, items=[passing_item]) item.validate() failing_item = RealItem(f=1, a=0) item = TopLevel(item=failing_item) with pytest.raises(Exception, match="can not be 1"): item.validate() item = TopLevel(items=[failing_item]) with pytest.raises(Exception, match="can not be 1"): item.validate() # verify that super calls the parent failing_item_in_base = RealItem(f=0, a=0) item = TopLevel(item=failing_item_in_base) with pytest.raises(Exception, match="can not be 0"): item.validate() def test_query_embedded_document_attribute(self): class AdminSettings(EmbeddedDocument): foo1 = StringField() foo2 = StringField() class Person(Document): settings = EmbeddedDocumentField(AdminSettings) name = StringField() Person.drop_collection() p = Person(settings=AdminSettings(foo1="bar1", foo2="bar2"), name="John").save() # Test non exiting attribute with pytest.raises(InvalidQueryError) as exc_info: Person.objects(settings__notexist="bar").first() assert str(exc_info.value) == 'Cannot resolve field "notexist"' with pytest.raises(LookUpError): Person.objects.only("settings.notexist") # Test existing attribute assert Person.objects(settings__foo1="bar1").first().id == p.id only_p = Person.objects.only("settings.foo1").first() assert only_p.settings.foo1 == p.settings.foo1 assert only_p.settings.foo2 is None assert only_p.name is None exclude_p = Person.objects.exclude("settings.foo1").first() assert exclude_p.settings.foo1 is None assert exclude_p.settings.foo2 == p.settings.foo2 assert exclude_p.name == p.name def test_query_embedded_document_attribute_with_inheritance(self): class BaseSettings(EmbeddedDocument): meta = {"allow_inheritance": True} base_foo = StringField() class AdminSettings(BaseSettings): sub_foo = StringField() class Person(Document): settings = EmbeddedDocumentField(BaseSettings) Person.drop_collection() p = Person(settings=AdminSettings(base_foo="basefoo", sub_foo="subfoo")) p.save() # Test non exiting attribute with pytest.raises(InvalidQueryError) as exc_info: assert Person.objects(settings__notexist="bar").first().id == p.id assert str(exc_info.value) == 'Cannot resolve field "notexist"' # Test existing attribute assert Person.objects(settings__base_foo="basefoo").first().id == p.id assert Person.objects(settings__sub_foo="subfoo").first().id == p.id only_p = Person.objects.only("settings.base_foo", "settings._cls").first() assert only_p.settings.base_foo == "basefoo" assert only_p.settings.sub_foo is None def test_query_list_embedded_document_with_inheritance(self): class Post(EmbeddedDocument): title = StringField(max_length=120, required=True) meta = {"allow_inheritance": True} class TextPost(Post): content = StringField() class MoviePost(Post): author = StringField() class Record(Document): posts = ListField(EmbeddedDocumentField(Post)) record_movie = Record(posts=[MoviePost(author="John", title="foo")]).save() record_text = Record(posts=[TextPost(content="a", title="foo")]).save() records = list(Record.objects(posts__author=record_movie.posts[0].author)) assert len(records) == 1 assert records[0].id == record_movie.id records = list(Record.objects(posts__content=record_text.posts[0].content)) assert len(records) == 1 assert records[0].id == record_text.id assert Record.objects(posts__title="foo").count() == 2
TestEmbeddedDocumentField
python
getsentry__sentry
src/sentry/integrations/slack/unfurl/types.py
{ "start": 505, "end": 616 }
class ____(enum.Enum): ISSUES = "issues" METRIC_ALERT = "metric_alert" DISCOVER = "discover"
LinkType
python
pypa__pip
src/pip/_vendor/urllib3/exceptions.py
{ "start": 3105, "end": 3233 }
class ____(TimeoutError): """Raised when a socket timeout occurs while connecting to a server""" pass
ConnectTimeoutError