language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
Delgan__loguru
loguru/_colorizer.py
{ "start": 980, "end": 1145 }
class ____: RESET_ALL = 0 BOLD = 1 DIM = 2 ITALIC = 3 UNDERLINE = 4 BLINK = 5 REVERSE = 7 HIDE = 8 STRIKE = 9 NORMAL = 22
Style
python
arrow-py__arrow
arrow/parser.py
{ "start": 1332, "end": 2044 }
class ____(ParserError): """ This class is a subclass of the ParserError class and is used to raise errors that occur during the matching process. Notes: This class is part of the Arrow parser and is used to provide error handling when a parsing match fails. """ pass _WEEKDATE_ELEMENT = Union[str, bytes, SupportsInt, bytearray] _FORMAT_TYPE = Literal[ "YYYY", "YY", "MM", "M", "DDDD", "DDD", "DD", "D", "HH", "H", "hh", "h", "mm", "m", "ss", "s", "X", "x", "ZZZ", "ZZ", "Z", "S", "W", "MMMM", "MMM", "Do", "dddd", "ddd", "d", "a", "A", ]
ParserMatchError
python
ray-project__ray
rllib/algorithms/tests/test_env_runner_failures.py
{ "start": 6946, "end": 32440 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls) -> None: ray.init() obs_space = gym.spaces.Box(0, 1, (2,), np.float32) def _sa(ctx): ctx.update({"observation_space": obs_space}) return FaultInjectEnv(ctx) register_env("fault_env", _sa) def _ma(ctx): ctx.update({"observation_space": obs_space}) return make_multi_agent(FaultInjectEnv)(ctx) register_env("multi_agent_fault_env", _ma) @classmethod def tearDownClass(cls) -> None: ray.shutdown() def _do_test_failing_fatal(self, config, fail_eval=False): """Test raises real error when out of EnvRunners.""" config.num_env_runners = 2 config.env = "multi_agent_fault_env" if config.is_multi_agent else "fault_env" # Make both EnvRunners idx=1 and 2 fail. config.env_config = {"bad_indices": [1, 2]} config.restart_failed_env_runners = False if fail_eval: config.evaluation_num_env_runners = 2 config.evaluation_interval = 1 config.evaluation_config = { # Make eval EnvRunners (index 1) fail. "env_config": { "bad_indices": [1], "evaluation": True, }, "restart_failed_env_runners": False, } algo = config.build() self.assertRaises(ray.exceptions.RayError, lambda: algo.train()) algo.stop() def _do_test_failing_ignore(self, config: AlgorithmConfig, fail_eval: bool = False): # Test fault handling config.num_env_runners = 2 config.ignore_env_runner_failures = True config.validate_env_runners_after_construction = False config.restart_failed_env_runners = False config.env = "fault_env" # Make EnvRunner idx=1 fail. Other EnvRunners will be ok. config.environment( env_config={ "bad_indices": [1], } ) if fail_eval: config.evaluation_num_env_runners = 2 config.evaluation_interval = 1 config.evaluation_config = { "ignore_env_runner_failures": True, "restart_failed_env_runners": False, "env_config": { # Make EnvRunner idx=1 fail. Other EnvRunners will be ok. "bad_indices": [1], "evaluation": True, }, } algo = config.build() algo.train() # One of the EnvRunners failed. self.assertEqual(algo.env_runner_group.num_healthy_remote_workers(), 1) if fail_eval: # One of the eval EnvRunners failed. self.assertEqual(algo.eval_env_runner_group.num_healthy_remote_workers(), 1) algo.stop() def _do_test_failing_recover(self, config, multi_agent=False): # Counter that will survive restarts. COUNTER_NAME = f"_do_test_failing_recover{'_ma' if multi_agent else ''}" counter = Counter.options(name=COUNTER_NAME).remote() # Test raises real error when out of EnvRunners. config.num_env_runners = 1 config.evaluation_num_env_runners = 1 config.evaluation_interval = 1 config.env = "fault_env" if not multi_agent else "multi_agent_fault_env" config.evaluation_config = AlgorithmConfig.overrides( restart_failed_env_runners=True, # 0 delay for testing purposes. delay_between_env_runner_restarts_s=0, # Make eval EnvRunner (index 1) fail. env_config={ "bad_indices": [1], "failure_start_count": 3, "failure_stop_count": 4, "counter": COUNTER_NAME, }, **( dict( policy_mapping_fn=( lambda aid, episode, **kwargs: ( # Allows this test to query this # different-from-training-workers policy mapping fn. "This is the eval mapping fn" if episode is None else "main" if hash(episode.id_) % 2 == aid else "p{}".format(np.random.choice([0, 1])) ) ) ) if multi_agent else {} ), ) # Reset interaction counter. ray.wait([counter.reset.remote()]) algo = config.build() # This should also work several times. for _ in range(2): algo.train() time.sleep(15.0) algo.restore_env_runners(algo.env_runner_group) algo.restore_env_runners(algo.eval_env_runner_group) self.assertEqual(algo.env_runner_group.num_healthy_remote_workers(), 1) self.assertEqual(algo.eval_env_runner_group.num_healthy_remote_workers(), 1) if multi_agent: # Make a dummy call to the eval EnvRunner's policy_mapping_fn and # make sure the restored eval EnvRunner received the correct one from # the eval config (not the main EnvRunners' one). test = algo.eval_env_runner_group.foreach_env_runner( lambda w: w.config.policy_mapping_fn(0, None) ) self.assertEqual(test[0], "This is the eval mapping fn") algo.stop() def test_fatal_single_agent(self): # Test the case where all EnvRunners fail (w/o recovery). self._do_test_failing_fatal( PPOConfig().env_runners( env_to_module_connector=( lambda env, spaces, device: FlattenObservations() ), ) ) def test_fatal_multi_agent(self): # Test the case where all EnvRunners fail (w/o recovery). self._do_test_failing_fatal( PPOConfig().multi_agent( policies={"p0"}, policy_mapping_fn=lambda *a, **k: "p0" ), ) def test_async_samples(self): self._do_test_failing_ignore( IMPALAConfig().env_runners(env_runner_cls=ForwardHealthCheckToEnvWorker) ) def test_sync_replay(self): self._do_test_failing_ignore( SACConfig() .environment( env_config={"action_space": gym.spaces.Box(0, 1, (2,), np.float32)} ) .env_runners(env_runner_cls=ForwardHealthCheckToEnvWorker) .reporting(min_sample_timesteps_per_iteration=1) ) def test_multi_gpu(self): self._do_test_failing_ignore( PPOConfig() .env_runners(env_runner_cls=ForwardHealthCheckToEnvWorker) .training( train_batch_size=10, minibatch_size=1, num_epochs=1, ) ) def test_sync_samples(self): self._do_test_failing_ignore( PPOConfig() .env_runners(env_runner_cls=ForwardHealthCheckToEnvWorker) .training(optimizer={}) ) def test_env_crash_during_sampling_but_restart_crashed_sub_envs(self): """Expect sub-envs to fail (and not recover), but re-start them individually.""" register_env( "ma_cartpole_crashing", lambda cfg: ( cfg.update({"num_agents": 2}), make_multi_agent(CartPoleCrashing)(cfg), )[1], ) config = ( PPOConfig() .env_runners(num_env_runners=4) .fault_tolerance( # Re-start failed individual sub-envs (then continue). # This means no EnvRunners will ever fail due to individual env errors # (only maybe for reasons other than the env). restart_failed_sub_environments=True, # If the EnvRunner was affected by an error (other than the env error), # allow it to be removed, but training will continue. ignore_env_runner_failures=True, ) .environment( env_config={ # Crash prob=0.1%. Keep this as low as necessary to be able to # get at least a train batch sampled w/o too many interruptions. "p_crash": 0.001, } ) ) for multi_agent in [False, True]: if multi_agent: config.environment("ma_cartpole_crashing") config.env_runners(num_envs_per_env_runner=1) config.multi_agent( policies={"p0", "p1"}, policy_mapping_fn=lambda aid, eps, **kw: f"p{aid}", ) else: config.environment(CartPoleCrashing) config.env_runners(num_envs_per_env_runner=2) # Pre-checking disables, so building the Algorithm is save. algo = config.build() # Try to re-create the sub-env for infinite amount of times. for _ in range(5): # Expect some errors being logged here, but in general, should continue # as we recover from all sub-env failures. algo.train() # No EnvRunner has been removed. Still 2 left. self.assertEqual(algo.env_runner_group.num_healthy_remote_workers(), 4) algo.stop() def test_eval_env_runners_failing_ignore(self): # Test the case where one eval EnvRunner fails, but we chose to ignore. self._do_test_failing_ignore( PPOConfig() .env_runners(env_runner_cls=ForwardHealthCheckToEnvWorker) .training(model={"fcnet_hiddens": [4]}), fail_eval=True, ) def test_eval_env_runners_parallel_to_training_failing_recover(self): # Test the case where all eval EnvRunners fail, but we chose to recover. config = ( PPOConfig() .env_runners(env_runner_cls=ForwardHealthCheckToEnvWorker) .evaluation( evaluation_num_env_runners=1, evaluation_parallel_to_training=True, evaluation_duration="auto", ) .training(model={"fcnet_hiddens": [4]}) ) self._do_test_failing_recover(config) def test_eval_env_runners_parallel_to_training_multi_agent_failing_recover( self, ): # Test the case where all eval EnvRunners fail on a multi-agent env with # different `policy_mapping_fn` in eval- vs train EnvRunners, but we chose # to recover. config = ( PPOConfig() .env_runners(env_runner_cls=ForwardHealthCheckToEnvWorkerMultiAgent) .multi_agent( policies={"main", "p0", "p1"}, policy_mapping_fn=( lambda aid, episode, **kwargs: ( "main" if hash(episode.id_) % 2 == aid else "p{}".format(np.random.choice([0, 1])) ) ), ) .evaluation( evaluation_num_env_runners=1, # evaluation_parallel_to_training=True, # evaluation_duration="auto", ) .training(model={"fcnet_hiddens": [4]}) ) self._do_test_failing_recover(config, multi_agent=True) def test_eval_env_runners_failing_fatal(self): # Test the case where all eval EnvRunners fail (w/o recovery). self._do_test_failing_fatal( ( PPOConfig() .api_stack( enable_rl_module_and_learner=True, enable_env_runner_and_connector_v2=True, ) .training(model={"fcnet_hiddens": [4]}) ), fail_eval=True, ) def test_env_runners_failing_recover(self): # Counter that will survive restarts. COUNTER_NAME = "test_env_runners_fatal_but_recover" counter = Counter.options(name=COUNTER_NAME).remote() config = ( PPOConfig() .env_runners( env_runner_cls=ForwardHealthCheckToEnvWorker, num_env_runners=2, rollout_fragment_length=16, ) .rl_module( model_config=DefaultModelConfig(fcnet_hiddens=[4]), ) .training( train_batch_size_per_learner=32, minibatch_size=32, ) .environment( env="fault_env", env_config={ # Make both EnvRunners idx=1 and 2 fail. "bad_indices": [1, 2], "failure_start_count": 3, "failure_stop_count": 4, "counter": COUNTER_NAME, }, ) .fault_tolerance( restart_failed_env_runners=True, # But recover. # 0 delay for testing purposes. delay_between_env_runner_restarts_s=0, ) ) # Try with both local EnvRunner and without. for local_env_runner in [True, False]: config.env_runners(create_local_env_runner=local_env_runner) # Reset interaciton counter. ray.wait([counter.reset.remote()]) algo = config.build() # Before training, 2 healthy EnvRunners. self.assertEqual(algo.env_runner_group.num_healthy_remote_workers(), 2) # Nothing is restarted. self.assertEqual(algo.env_runner_group.num_remote_worker_restarts(), 0) algo.train() time.sleep(15.0) algo.restore_env_runners(algo.env_runner_group) # After training, still 2 healthy EnvRunners. self.assertEqual(algo.env_runner_group.num_healthy_remote_workers(), 2) # Both EnvRunners are restarted. self.assertEqual(algo.env_runner_group.num_remote_worker_restarts(), 2) algo.stop() def test_modules_are_restored_on_recovered_env_runner(self): # Counter that will survive restarts. COUNTER_NAME = "test_modules_are_restored_on_recovered_env_runner" counter = Counter.options(name=COUNTER_NAME).remote() config = ( PPOConfig() .env_runners( env_runner_cls=ForwardHealthCheckToEnvWorkerMultiAgent, num_env_runners=2, rollout_fragment_length=16, ) .rl_module( model_config=DefaultModelConfig(fcnet_hiddens=[4]), ) .training( train_batch_size_per_learner=32, minibatch_size=32, ) .environment( env="multi_agent_fault_env", env_config={ # Make both EnvRunners idx=1 and 2 fail. "bad_indices": [1, 2], "failure_start_count": 3, "failure_stop_count": 4, "counter": COUNTER_NAME, }, ) .evaluation( evaluation_num_env_runners=1, evaluation_interval=1, evaluation_config=PPOConfig.overrides( restart_failed_env_runners=True, # Restart the entire eval EnvRunner. restart_failed_sub_environments=False, env_config={ "evaluation": True, # Make eval EnvRunner (index 1) fail. "bad_indices": [1], "failure_start_count": 3, "failure_stop_count": 4, "counter": COUNTER_NAME, }, ), ) .callbacks(on_algorithm_init=on_algorithm_init) .fault_tolerance( restart_failed_env_runners=True, # But recover. # Throwing error in constructor is a bad idea. # 0 delay for testing purposes. delay_between_env_runner_restarts_s=0, ) .multi_agent( policies={"p0"}, policy_mapping_fn=lambda *a, **kw: "p0", ) ) # Reset interaction counter. ray.wait([counter.reset.remote()]) algo = config.build() # Should have the custom module. self.assertIsNotNone(algo.get_module("test_module")) # Before train loop, EnvRunners are fresh and not recreated. self.assertEqual(algo.env_runner_group.num_healthy_remote_workers(), 2) self.assertEqual(algo.env_runner_group.num_remote_worker_restarts(), 0) self.assertEqual(algo.eval_env_runner_group.num_healthy_remote_workers(), 1) self.assertEqual(algo.eval_env_runner_group.num_remote_worker_restarts(), 0) algo.train() time.sleep(15.0) algo.restore_env_runners(algo.env_runner_group) algo.restore_env_runners(algo.eval_env_runner_group) # Everything healthy again. And all EnvRunners have been restarted. self.assertEqual(algo.env_runner_group.num_healthy_remote_workers(), 2) self.assertEqual(algo.env_runner_group.num_remote_worker_restarts(), 2) self.assertEqual(algo.eval_env_runner_group.num_healthy_remote_workers(), 1) self.assertEqual(algo.eval_env_runner_group.num_remote_worker_restarts(), 1) # Let's verify that our custom module exists on all recovered EnvRunners. def has_test_module(w): return "test_module" in w.module # EnvRunner has test module. self.assertTrue( all( algo.env_runner_group.foreach_env_runner( has_test_module, local_env_runner=False ) ) ) # Eval EnvRunner has test module. self.assertTrue( all( algo.eval_env_runner_group.foreach_env_runner( has_test_module, local_env_runner=False ) ) ) def test_eval_env_runners_failing_recover(self): # Counter that will survive restarts. COUNTER_NAME = "test_eval_env_runners_fault_but_recover" counter = Counter.options(name=COUNTER_NAME).remote() config = ( PPOConfig() .env_runners( env_runner_cls=ForwardHealthCheckToEnvWorker, num_env_runners=2, rollout_fragment_length=16, ) .rl_module( model_config=DefaultModelConfig(fcnet_hiddens=[4]), ) .training( train_batch_size_per_learner=32, minibatch_size=32, ) .environment(env="fault_env") .evaluation( evaluation_num_env_runners=2, evaluation_interval=1, evaluation_config=PPOConfig.overrides( env_config={ "evaluation": True, "p_terminated": 0.0, "max_episode_len": 20, # Make both eval EnvRunners fail. "bad_indices": [1, 2], # Env throws error between steps 10 and 12. "failure_start_count": 3, "failure_stop_count": 4, "counter": COUNTER_NAME, }, ), ) .fault_tolerance( restart_failed_env_runners=True, # And recover # 0 delay for testing purposes. delay_between_env_runner_restarts_s=0, ) ) # Reset interaciton counter. ray.wait([counter.reset.remote()]) algo = config.build() # Before train loop, EnvRunners are fresh and not recreated. self.assertEqual(algo.eval_env_runner_group.num_healthy_remote_workers(), 2) self.assertEqual(algo.eval_env_runner_group.num_remote_worker_restarts(), 0) algo.train() time.sleep(15.0) algo.restore_env_runners(algo.eval_env_runner_group) # Everything still healthy. And all EnvRunners are restarted. self.assertEqual(algo.eval_env_runner_group.num_healthy_remote_workers(), 2) self.assertEqual(algo.eval_env_runner_group.num_remote_worker_restarts(), 2) def test_env_runner_failing_recover_with_hanging_env_runners(self): # Counter that will survive restarts. COUNTER_NAME = "test_eval_env_runners_fault_but_recover" counter = Counter.options(name=COUNTER_NAME).remote() config = ( # First thought: We are using an off-policy algorithm here, b/c we have # hanging EnvRunners (samples may be delayed, thus off-policy?). # However, this actually does NOT matter. All synchronously sampling algos # (whether off- or on-policy) now have a sampling timeout to NOT block # the execution of the algorithm b/c of a single heavily stalling EnvRunner. # Timeout data (batches or episodes) are discarded. SACConfig() .env_runners( env_runner_cls=ForwardHealthCheckToEnvWorker, num_env_runners=3, rollout_fragment_length=16, sample_timeout_s=5.0, ) .reporting( # Make sure each iteration doesn't take too long. min_time_s_per_iteration=0.5, # Make sure metrics reporting doesn't hang for too long # since we will have a hanging EnvRunner. metrics_episode_collection_timeout_s=1, ) .environment( env="fault_env", env_config={ "action_space": gym.spaces.Box(0, 1, (2,), np.float32), "evaluation": True, "p_terminated": 0.0, "max_episode_len": 20, # EnvRunners 1 and 2 will fail in step(). "bad_indices": [1, 2], # Env throws error between steps 3 and 4. "failure_start_count": 3, "failure_stop_count": 4, "counter": COUNTER_NAME, # EnvRunner 2 will hang for long time during init after restart. "init_delay": 3600, "init_delay_indices": [2], # EnvRunner 3 will hang in env.step(). "step_delay": 3600, "step_delay_indices": [3], }, ) .fault_tolerance( restart_failed_env_runners=True, # And recover env_runner_health_probe_timeout_s=0.01, env_runner_restore_timeout_s=5, delay_between_env_runner_restarts_s=0, # For testing, no delay. ) ) # Reset interaciton counter. ray.wait([counter.reset.remote()]) algo = config.build() # Before train loop, EnvRunners are fresh and not recreated. self.assertEqual(algo.env_runner_group.num_healthy_remote_workers(), 3) self.assertEqual(algo.env_runner_group.num_remote_worker_restarts(), 0) algo.train() time.sleep(15.0) # Most importantly, training progresses fine b/c the stalling EnvRunner is # ignored via a timeout. algo.train() # 2 healthy remote EnvRunners left, although EnvRunner 3 is stuck in rollout. self.assertEqual(algo.env_runner_group.num_healthy_remote_workers(), 2) # Only 1 successful restore, since EnvRunner 2 is stuck in indefinite init # and can not be properly restored. self.assertEqual(algo.env_runner_group.num_remote_worker_restarts(), 1) def test_eval_env_runners_on_infinite_episodes(self): """Tests whether eval EnvRunners warn appropriately after episode timeout.""" # Create infinitely running episodes, but with horizon setting (RLlib will # auto-terminate the episode). However, in the eval EnvRunners, don't set a # horizon -> Expect warning and no proper evaluation results. config = ( PPOConfig() .api_stack( enable_rl_module_and_learner=False, enable_env_runner_and_connector_v2=False, ) .environment(RandomEnv, env_config={"p_terminated": 0.0}) .training(train_batch_size_per_learner=200) .evaluation( evaluation_num_env_runners=1, evaluation_interval=1, evaluation_sample_timeout_s=2.0, ) ) algo = config.build() results = algo.train() self.assertTrue( np.isnan( results[EVALUATION_RESULTS][ENV_RUNNER_RESULTS][EPISODE_RETURN_MEAN] ) ) if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestEnvRunnerFailures
python
giampaolo__psutil
tests/test_linux.py
{ "start": 22664, "end": 23375 }
class ____(PsutilTestCase): def test_fields(self): fields = psutil.cpu_times()._fields kernel_ver = re.findall(r'\d+\.\d+\.\d+', os.uname()[2])[0] kernel_ver_info = tuple(map(int, kernel_ver.split('.'))) if kernel_ver_info >= (2, 6, 11): assert 'steal' in fields else: assert 'steal' not in fields if kernel_ver_info >= (2, 6, 24): assert 'guest' in fields else: assert 'guest' not in fields if kernel_ver_info >= (3, 2, 0): assert 'guest_nice' in fields else: assert 'guest_nice' not in fields @pytest.mark.skipif(not LINUX, reason="LINUX only")
TestSystemCPUTimes
python
GoogleCloudPlatform__python-docs-samples
appengine/standard/ndb/properties/snippets.py
{ "start": 2350, "end": 2583 }
class ____(ndb.Model): name = ndb.StringProperty() name_lower = ndb.ComputedProperty(lambda self: self.name.lower()) def create_some_entity(): entity = SomeEntity(name="Nick") entity.put() return entity
SomeEntity
python
huggingface__transformers
src/transformers/convert_slow_tokenizer.py
{ "start": 32542, "end": 33268 }
class ____(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): return 3 def post_processor(self): return processors.TemplateProcessing( single="eng_Latn $A </s>", pair="eng_Latn $A $B </s>", special_tokens=[ ("eng_Latn", self.original_tokenizer.convert_tokens_to_ids("eng_Latn")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
NllbConverter
python
huggingface__transformers
src/transformers/models/flaubert/modeling_flaubert.py
{ "start": 54051, "end": 58339 }
class ____(FlaubertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = FlaubertModel(config) self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, langs: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, lengths: Optional[torch.Tensor] = None, cache: Optional[dict[str, torch.Tensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the *language name to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string). See usage examples detailed in the [multilingual documentation](../multilingual). lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in `[0, ..., input_ids.size(-1)]`. cache (`dict[str, torch.FloatTensor]`, *optional*): Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential decoding. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """ ) # Copied from transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringSimple with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
FlaubertForTokenClassification
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 242624, "end": 243976 }
class ____(GeneratedAirbyteSource): @public def __init__( self, name: str, instance_api_url: str, username: Optional[str] = None, password: Optional[str] = None, session_token: Optional[str] = None, ): r"""Airbyte Source for Metabase. Documentation can be found at https://docs.airbyte.com/integrations/sources/metabase Args: name (str): The name of the destination. instance_api_url (str): URL to your metabase instance API session_token (Optional[str]): To generate your session token, you need to run the following command: ``` curl -X POST \\ -H "Content-Type: application/json" \\ -d '{"username": "person@metabase.com", "password": "fakepassword"}' \\ http://localhost:3000/api/session ``` Then copy the value of the `id` field returned by a successful call to that API. Note that by default, sessions are good for 14 days and needs to be regenerated. """ self.instance_api_url = check.str_param(instance_api_url, "instance_api_url") self.username = check.opt_str_param(username, "username") self.password = check.opt_str_param(password, "password") self.session_token = check.opt_str_param(session_token, "session_token") super().__init__("Metabase", name)
MetabaseSource
python
langchain-ai__langchain
libs/langchain_v1/tests/unit_tests/agents/test_responses_spec.py
{ "start": 712, "end": 879 }
class ____(BaseSchema): name: str response_format: Union[Dict[str, Any], List[Dict[str, Any]]] assertions_by_invocation: List[AssertionByInvocation]
TestCase
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/links/batch.py
{ "start": 914, "end": 1233 }
class ____(BaseAwsLink): """Helper class for constructing AWS Batch Job Definition Link.""" name = "Batch Job Definition" key = "batch_job_definition" format_str = ( BASE_AWS_CONSOLE_LINK + "/batch/home?region={region_name}#job-definition/detail/{job_definition_arn}" )
BatchJobDefinitionLink
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py
{ "start": 633, "end": 680 }
class ____(object): pass # end # No error
Bar
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pydoclint/DOC202_google.py
{ "start": 286, "end": 702 }
class ____: # DOC202 def foo(self) -> str: """ Do something Args: num (int): A number Returns: str: A string """ print('test') # OK def bar(self) -> str: """ Do something Args: num (int): A number """ print('test') # See: https://github.com/astral-sh/ruff/issues/12650
Bar
python
sqlalchemy__sqlalchemy
test/sql/test_returning.py
{ "start": 17798, "end": 20726 }
class ____(fixtures.TablesTest, AssertsExecutionResults): __requires__ = ("update_returning",) __sparse_driver_backend__ = True run_create_tables = "each" define_tables = InsertReturningTest.define_tables def test_update_returning(self, connection): table = self.tables.returning_tbl connection.execute( table.insert(), [{"persons": 5, "full": False}, {"persons": 3, "full": False}], ) result = connection.execute( table.update() .values(dict(full=True)) .where(table.c.persons > 4) .returning(table.c.id) ) eq_(result.fetchall(), [(1,)]) result2 = connection.execute( select(table.c.id, table.c.full).order_by(table.c.id) ) eq_(result2.fetchall(), [(1, True), (2, False)]) def test_update_returning_w_expression_one(self, connection): table = self.tables.returning_tbl connection.execute( table.insert(), [ {"persons": 5, "full": False, "strval": "str1"}, {"persons": 3, "full": False, "strval": "str2"}, ], ) result = connection.execute( table.update() .where(table.c.persons > 4) .values(full=True) .returning(table.c.strval + "hi") ) eq_(result.fetchall(), [("str1hi",)]) result2 = connection.execute( select(table.c.id, table.c.strval).order_by(table.c.id) ) eq_(result2.fetchall(), [(1, "str1"), (2, "str2")]) def test_update_returning_w_type_coerce_expression(self, connection): table = self.tables.returning_tbl connection.execute( table.insert(), [ {"persons": 5, "goofy": "somegoofy1"}, {"persons": 3, "goofy": "somegoofy2"}, ], ) result = connection.execute( table.update() .where(table.c.persons > 4) .values(goofy="newgoofy") .returning(type_coerce(table.c.goofy, String)) ) eq_(result.fetchall(), [("FOOnewgoofy",)]) result2 = connection.execute( select(table.c.id, table.c.goofy).order_by(table.c.id) ) eq_( result2.fetchall(), [(1, "FOOnewgoofyBAR"), (2, "FOOsomegoofy2BAR")], ) def test_update_full_returning(self, connection): table = self.tables.returning_tbl connection.execute( table.insert(), [{"persons": 5, "full": False}, {"persons": 3, "full": False}], ) result = connection.execute( table.update() .where(table.c.persons > 2) .values(full=True) .returning(table.c.id, table.c.full) ) eq_(result.fetchall(), [(1, True), (2, True)])
UpdateReturningTest
python
yaml__pyyaml
tests/legacy_tests/canonical.py
{ "start": 62, "end": 110 }
class ____(yaml.YAMLError): pass
CanonicalError
python
pytorch__pytorch
torch/testing/_internal/common_quantization.py
{ "start": 87687, "end": 88556 }
class ____(torch.nn.Module): r"""A module with manually inserted `QuantStub` and `DeQuantStub` and contains both linear and conv modules """ def __init__(self, qconfig=None): super().__init__() self.qconfig = ( qconfig if qconfig else torch.ao.quantization.get_default_qat_qconfig("qnnpack") ) self.quant = QuantStub() self.dequant = DeQuantStub() self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float) self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float) self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float) def forward(self, x): x = self.quant(x) x = self.conv(x) x = x.view(-1, 64).contiguous() x = self.fc1(x) x = self.fc2(x) return self.dequant(x)
ManualConvLinearQATModel
python
keras-team__keras
keras/src/initializers/random_initializers.py
{ "start": 4855, "end": 6735 }
class ____(RandomInitializer): """Random uniform initializer. Draws samples from a uniform distribution for given parameters. Examples: >>> # Standalone usage: >>> initializer = RandomUniform(minval=0.0, maxval=1.0) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = RandomUniform(minval=0.0, maxval=1.0) >>> layer = Dense(3, kernel_initializer=initializer) Args: minval: A python scalar or a scalar keras tensor. Lower bound of the range of random values to generate (inclusive). maxval: A python scalar or a scalar keras tensor. Upper bound of the range of random values to generate (exclusive). seed: A Python integer or instance of `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance of `keras.backend.SeedGenerator`. """ def __init__(self, minval=-0.05, maxval=0.05, seed=None): self.minval = minval self.maxval = maxval super().__init__(seed=seed) def __call__(self, shape, dtype=None): return random.uniform( shape=shape, minval=self.minval, maxval=self.maxval, seed=self.seed, dtype=dtype, ) def get_config(self): base_config = super().get_config() config = {"minval": self.minval, "maxval": self.maxval} return {**base_config, **config} @keras_export( [ "keras.initializers.VarianceScaling", "keras.initializers.variance_scaling", ] )
RandomUniform
python
walkccc__LeetCode
solutions/2818. Apply Operations to Maximize Score/2818.py
{ "start": 0, "end": 2337 }
class ____: def maximumScore(self, nums: list[int], k: int) -> int: MOD = 1_000_000_007 n = len(nums) ans = 1 minPrimeFactors = self._sieveEratosthenes(max(nums) + 1) primeScores = [self._getPrimeScore(num, minPrimeFactors) for num in nums] # left[i] := the next index on the left (if any) # s.t. primeScores[left[i]] >= primeScores[i] left = [-1] * n # right[i] := the next index on the right (if any) # s.t. primeScores[right[i]] > primeScores[i] right = [n] * n stack = [] # Find the next indices on the left where `primeScores` are greater or equal. for i in reversed(range(n)): while stack and primeScores[stack[-1]] <= primeScores[i]: left[stack.pop()] = i stack.append(i) stack = [] # Find the next indices on the right where `primeScores` are greater. for i in range(n): while stack and primeScores[stack[-1]] < primeScores[i]: right[stack.pop()] = i stack.append(i) numAndIndexes = [(num, i) for i, num in enumerate(nums)] def modPow(x: int, n: int) -> int: if n == 0: return 1 if n % 2 == 1: return x * modPow(x, n - 1) % MOD return modPow(x * x % MOD, n // 2) for num, i in sorted(numAndIndexes, key=lambda x: (-x[0], x[1])): # nums[i] is the maximum value in the range [left[i] + 1, right[i] - 1] # So, there are (i - left[i]) * (right[i] - 1) ranges where nums[i] will # be chosen. rangeCount = (i - left[i]) * (right[i] - i) actualCount = min(rangeCount, k) k -= actualCount ans *= modPow(num, actualCount) ans %= MOD return ans def _sieveEratosthenes(self, n: int) -> list[int]: """Gets the minimum prime factor of i, where 2 <= i <= n.""" minPrimeFactors = [i for i in range(n + 1)] for i in range(2, int(n**0.5) + 1): if minPrimeFactors[i] == i: # `i` is prime. for j in range(i * i, n, i): minPrimeFactors[j] = min(minPrimeFactors[j], i) return minPrimeFactors def _getPrimeScore(self, num: int, minPrimeFactors: list[int]) -> int: primeFactors = set() while num > 1: divisor = minPrimeFactors[num] primeFactors.add(divisor) while num % divisor == 0: num //= divisor return len(primeFactors)
Solution
python
ray-project__ray
doc/source/serve/doc_code/tutorial_tensorflow.py
{ "start": 1335, "end": 2226 }
class ____: def __init__(self, model_path: str): import tensorflow as tf self.model_path = model_path self.model = tf.keras.models.load_model(model_path) async def __call__(self, starlette_request: Request) -> Dict: # Step 1: transform HTTP request -> tensorflow input # Here we define the request schema to be a json array. input_array = np.array((await starlette_request.json())["array"]) reshaped_array = input_array.reshape((1, 28, 28)) # Step 2: tensorflow input -> tensorflow output prediction = self.model(reshaped_array) # Step 3: tensorflow output -> web output return {"prediction": prediction.numpy().tolist(), "file": self.model_path} # __doc_define_servable_end__ # __doc_deploy_begin__ mnist_model = TFMnistModel.bind(TRAINED_MODEL_PATH) # __doc_deploy_end__
TFMnistModel
python
pyca__cryptography
tests/x509/test_name.py
{ "start": 306, "end": 8186 }
class ____: def test_invalid(self, subtests): for value in [ "C=US,CN=Joe , Smith,DC=example", ",C=US,CN=Joe , Smith,DC=example", "C=US,UNKNOWN=Joe , Smith,DC=example", "C=US,CN,DC=example", "C=US,FOOBAR=example", "CN=Lu\\C4\\8Di\\C4partial character", ]: with subtests.test(): with pytest.raises(ValueError): Name.from_rfc4514_string(value) def test_valid(self, subtests): for value, expected in [ ( r"CN=James \"Jim\" Smith\, III", Name( [ NameAttribute( NameOID.COMMON_NAME, 'James "Jim" Smith, III' ) ] ), ), ( r"UID=\# escape\+\,\;\00this\ ", Name([NameAttribute(NameOID.USER_ID, "# escape+,;\0this ")]), ), ( r"2.5.4.3=James \"Jim\" Smith\, III", Name( [ NameAttribute( NameOID.COMMON_NAME, 'James "Jim" Smith, III' ) ] ), ), ("ST=", Name([NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "")])), ( "OU=Sales+CN=J. Smith,DC=example,DC=net", Name( [ RelativeDistinguishedName( [NameAttribute(NameOID.DOMAIN_COMPONENT, "net")] ), RelativeDistinguishedName( [ NameAttribute( NameOID.DOMAIN_COMPONENT, "example" ) ] ), RelativeDistinguishedName( [ NameAttribute( NameOID.ORGANIZATIONAL_UNIT_NAME, "Sales" ), NameAttribute( NameOID.COMMON_NAME, "J. Smith" ), ] ), ] ), ), ( "CN=cryptography.io,O=PyCA,L=,ST=,C=US", Name( [ NameAttribute(NameOID.COUNTRY_NAME, "US"), NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ""), NameAttribute(NameOID.LOCALITY_NAME, ""), NameAttribute(NameOID.ORGANIZATION_NAME, "PyCA"), NameAttribute(NameOID.COMMON_NAME, "cryptography.io"), ] ), ), ( r"C=US,CN=Joe \, Smith,DC=example", Name( [ NameAttribute(NameOID.DOMAIN_COMPONENT, "example"), NameAttribute(NameOID.COMMON_NAME, "Joe , Smith"), NameAttribute(NameOID.COUNTRY_NAME, "US"), ] ), ), ( r"C=US,CN=Jane \"J\,S\" Smith,DC=example", Name( [ NameAttribute(NameOID.DOMAIN_COMPONENT, "example"), NameAttribute(NameOID.COMMON_NAME, 'Jane "J,S" Smith'), NameAttribute(NameOID.COUNTRY_NAME, "US"), ] ), ), ( 'C=US,CN=\\"Jane J\\,S Smith\\",DC=example', Name( [ NameAttribute(NameOID.DOMAIN_COMPONENT, "example"), NameAttribute(NameOID.COMMON_NAME, '"Jane J,S Smith"'), NameAttribute(NameOID.COUNTRY_NAME, "US"), ] ), ), ( 'C=US,CN=\\"Jane \\"J\\,S\\" Smith\\",DC=example', Name( [ NameAttribute(NameOID.DOMAIN_COMPONENT, "example"), NameAttribute( NameOID.COMMON_NAME, '"Jane "J,S" Smith"' ), NameAttribute(NameOID.COUNTRY_NAME, "US"), ] ), ), ( r"C=US,CN=Jane=Smith,DC=example", Name( [ NameAttribute(NameOID.DOMAIN_COMPONENT, "example"), NameAttribute(NameOID.COMMON_NAME, "Jane=Smith"), NameAttribute(NameOID.COUNTRY_NAME, "US"), ] ), ), (r"CN=#616263", Name([NameAttribute(NameOID.COMMON_NAME, "abc")])), (r"CN=👍", Name([NameAttribute(NameOID.COMMON_NAME, "👍")])), ( "CN=\\\\123", Name([NameAttribute(NameOID.COMMON_NAME, "\\123")]), ), ("CN=\\\\\\;", Name([NameAttribute(NameOID.COMMON_NAME, "\\;")])), ( "CN=\\\\#123", Name([NameAttribute(NameOID.COMMON_NAME, "\\#123")]), ), ( "2.5.4.10=abc", Name([NameAttribute(NameOID.ORGANIZATION_NAME, "abc")]), ), ("", Name([])), ( r"CN=Lu\C4\8Di\C4\87", Name([NameAttribute(NameOID.COMMON_NAME, "Lučić")]), ), ( r"CN=Lu\C4\8D\=i\C4\87\#,C=\55\53", Name( [ NameAttribute(NameOID.COUNTRY_NAME, "US"), NameAttribute(NameOID.COMMON_NAME, "Luč=ić#"), ] ), ), ]: with subtests.test(): result = Name.from_rfc4514_string(value) assert result == expected def test_attr_name_override(self): assert Name.from_rfc4514_string( "CN=Santa Claus,E=santa@north.pole", {"E": NameOID.EMAIL_ADDRESS} ) == Name( [ NameAttribute(NameOID.EMAIL_ADDRESS, "santa@north.pole"), NameAttribute(NameOID.COMMON_NAME, "Santa Claus"), ] ) assert Name.from_rfc4514_string( "CN=Santa Claus", {"CN": NameOID.EMAIL_ADDRESS} ) == Name( [ NameAttribute(NameOID.EMAIL_ADDRESS, "Santa Claus"), ] ) def test_generate_parse(self): name_value = Name( [ NameAttribute(NameOID.COMMON_NAME, "Common Name 1"), NameAttribute(NameOID.LOCALITY_NAME, "City for Name 1"), NameAttribute( NameOID.ORGANIZATION_NAME, "Name 1 Organization" ), ] ) assert ( Name.from_rfc4514_string(name_value.rfc4514_string()) == name_value ) name_string = "O=Organization,L=City,CN=Common Name" assert ( Name.from_rfc4514_string(name_string).rfc4514_string() == name_string ) def test_single_space_escaping(self): # Test that a single space is escaped correctly (not double-escaped) name = Name([NameAttribute(NameOID.ORGANIZATION_NAME, " ")]) s = name.rfc4514_string() # Should be "O=\ " not "O=\\ " assert s == r"O=\ " # Verify round-trip parsing works assert Name.from_rfc4514_string(s) == name
TestRFC4514
python
conda__conda
conda/activate.py
{ "start": 37591, "end": 38865 }
class ____(_Activator): pathsep_join = ";".join sep = "\\" path_conversion = staticmethod(_path_identity) script_extension = ".bat" tempfile_extension = ".env" command_join = "\n" needs_line_ending_fix = False # we are not generating a script to run but rather an INI style file # with key=value pairs to set environment variables, key= to unset them, # and _CONDA_SCRIPT=script pairs to run scripts unset_var_tmpl = "%s=" export_var_tmpl = "%s=%s" path_var_tmpl = export_var_tmpl set_var_tmpl = export_var_tmpl run_script_tmpl = "_CONDA_SCRIPT=%s" hook_source_path = None inline_hook_source = None def _update_prompt(self, set_vars, conda_prompt_modifier): prompt = os.getenv("PROMPT", "") current_prompt_modifier = os.getenv("CONDA_PROMPT_MODIFIER") if current_prompt_modifier: prompt = re.sub(re.escape(current_prompt_modifier), r"", prompt) set_vars["PROMPT"] = conda_prompt_modifier + prompt def _hook_preamble(self) -> None: # TODO: cmd.exe doesn't get a hook function? Or do we need to do something different? # Like, for cmd.exe only, put a special directory containing only conda.bat on PATH? pass
CmdExeActivator
python
getsentry__sentry
src/sentry/notifications/api/endpoints/notification_actions_index.py
{ "start": 1302, "end": 1667 }
class ____(OrganizationPermission): scope_map = { "GET": ["org:read", "org:write", "org:admin"], "POST": ["org:read", "org:write", "org:admin"], "PUT": ["org:read", "org:write", "org:admin"], "DELETE": ["org:read", "org:write", "org:admin"], } @region_silo_endpoint @extend_schema(tags=["Alerts"])
NotificationActionsPermission
python
langchain-ai__langchain
libs/partners/qdrant/langchain_qdrant/sparse_embeddings.py
{ "start": 405, "end": 1108 }
class ____(ABC): """An interface for sparse embedding models to use with Qdrant.""" @abstractmethod def embed_documents(self, texts: list[str]) -> list[SparseVector]: """Embed search docs.""" @abstractmethod def embed_query(self, text: str) -> SparseVector: """Embed query text.""" async def aembed_documents(self, texts: list[str]) -> list[SparseVector]: """Asynchronous Embed search docs.""" return await run_in_executor(None, self.embed_documents, texts) async def aembed_query(self, text: str) -> SparseVector: """Asynchronous Embed query text.""" return await run_in_executor(None, self.embed_query, text)
SparseEmbeddings
python
tensorflow__tensorflow
tensorflow/lite/python/op_hint.py
{ "start": 22568, "end": 27454 }
class ____(_LiteOperand): """An operand for a tflite hint function that is aggregated from many. For example, an LSTM is a grid of operators that are all related. Inputs going into them may need to be fused, so they should all be tracked as related arguments. """ def __init__(self, aggregation): _LiteOperand.__init__(self) self.aggregation = aggregation self.names = {} self.nodes = {} self.flattened = None def add(self, sort, node): self.names[sort] = _tensor_name_base(node.name) self.nodes[sort] = node def flatten_nodes(self): """Return a list of all the node protos in aggregation sorted order.""" if not self.flattened: self.flattened = [None] * len(self.nodes) for idx, node in self.nodes.items(): self.flattened[idx] = node for n in self.nodes: if n is None: raise RuntimeError("Aggregate was missing argument.") if self.aggregation == OpHint.AGGREGATE_FIRST: self.flattened = self.flattened[:1] elif self.aggregation == OpHint.AGGREGATE_LAST: self.flattened = self.flattened[-1:] elif self.aggregation == OpHint.AGGREGATE_STACK: pass else: raise ValueError("Invalid aggregation type %r specified" % self.aggregation) return self.flattened def flatten(self): """Return a list of all node names in aggregation sorted sorter.""" return [_tensor_name_base(x.name) for x in self.flatten_nodes()] def aggregate_and_return_name_for_input(self, out_graphdef): """This adds the nodes to out_graphdef and returns an aggregated output. In particular, if you have 4 inputs to a hint stub, this will be the node that you can use as an output. I.e. you have 4 timesteps from a static rnn, then a fused UnidirectionalLSTM will expect 1 input with all 4 time steps. So here we make a pack and return the output name of that pack. Args: out_graphdef: A graphdef that is ready to have this input added. Returns: The name of a pack that aggregates this node. """ flattened = self.flatten_nodes() if (self.aggregation == OpHint.AGGREGATE_FIRST) or ( self.aggregation == OpHint.AGGREGATE_LAST): assert len(flattened) == 1 if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK: return _tensor_name_base(flattened[0].name) else: new_node = _node_def_pb2.NodeDef() new_node.op = "Pack" new_node.name = "OpHintStack-%s" % flattened[0].name new_node.attr["N"].i = len(flattened) new_node.attr["T"].type = flattened[0].attr["T"].type for discrete in flattened: new_node.input.append(_tensor_name_base(discrete.name)) out_graphdef.node.extend([new_node]) return new_node.name def aggregate_and_return_name_for_output(self, fused_op_name, output_index, out_graphdef): """This adds to `out_graphdef` all the unaggregated outputs. I.e. we are outputting from a fused stub, but we need to make it compatible with the unfused original graph so we insert an unpack. Ideally in a later stage the unpack -> pack sequences will be removed. Args: fused_op_name: The name of the stub we are in the process of fusing. output_index: The output output_index this object represents. out_graphdef: The graphdef we are in the process of buildings Returns: The type of the aggregated output (so we can finish building the stub op). """ flattened = self.flatten_nodes() if (self.aggregation == OpHint.AGGREGATE_FIRST) or ( self.aggregation == OpHint.AGGREGATE_LAST): assert len(flattened) == 1 if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK: temp_op = _LiteSingleOperand(flattened[0]) return temp_op.aggregate_and_return_name_for_output( fused_op_name, output_index, out_graphdef) else: stack_node = _node_def_pb2.NodeDef() stack_node.op = "Unpack" stack_node.name = "OpHintUnstack-%s" % flattened[0].name stack_node.attr["num"].i = len(flattened) output_type = flattened[0].attr["T"].type stack_node.attr["T"].type = output_type stack_node.input.append( _tensorflow_output_name(fused_op_name, output_index)) out_graphdef.node.extend([stack_node]) for idx, discrete in enumerate(flattened): output_node = _copy.deepcopy(discrete) del output_node.input[:] output_node.input.append(_tensorflow_output_name(stack_node.name, idx)) out_graphdef.node.extend([output_node]) return output_type def __str__(self): s = "\t\t\tAGGREGATE %s\n" % self.aggregation for sort, val in self.names.iteritems(): s += "\t\t\t%d: %s\n" % (sort, val) return s
_LiteAggregateOperand
python
falconry__falcon
falcon/errors.py
{ "start": 3528, "end": 3749 }
class ____(RuntimeError): """The method or operation is not supported.""" # NOTE(kgriffs): This inherits from ValueError to be consistent with the type # raised by Python's built-in file-like objects.
UnsupportedError
python
getsentry__sentry
src/sentry/sentry_apps/external_requests/alert_rule_action_requester.py
{ "start": 1386, "end": 5387 }
class ____: install: SentryAppInstallation | RpcSentryAppInstallation uri: str fields: Sequence[Mapping[str, str]] = field(default_factory=list) http_method: str | None = "POST" def run(self) -> SentryAppAlertRuleActionResult: event = SentryAppEventType.ALERT_RULE_ACTION_REQUESTED with SentryAppInteractionEvent( operation_type=SentryAppInteractionType.EXTERNAL_REQUEST, event_type=event, ).capture() as lifecycle: extras: dict[str, Any] = { "uri": self.uri, "installation_uuid": self.install.uuid, "sentry_app_slug": self.sentry_app.slug, } try: response = send_and_save_sentry_app_request( url=self._build_url(), sentry_app=self.sentry_app, org_id=self.install.organization_id, event=event, headers=self._build_headers(), method=self.http_method, data=self.body, ) except RequestException as e: halt_reason = FAILURE_REASON_BASE.format( SentryAppExternalRequestHaltReason.BAD_RESPONSE ) lifecycle.record_halt(halt_reason=e, extra={"halt_reason": halt_reason, **extras}) return SentryAppAlertRuleActionResult( success=False, message=self._get_response_message(e.response, DEFAULT_ERROR_MESSAGE), error_type=SentryAppErrorType.INTEGRATOR, webhook_context={"error_type": halt_reason, **extras}, status_code=500, ) except Exception as e: failure_reason = FAILURE_REASON_BASE.format( SentryAppExternalRequestFailureReason.UNEXPECTED_ERROR ) lifecycle.record_failure( failure_reason=e, extra={"failure_reason": failure_reason, **extras} ) return SentryAppAlertRuleActionResult( success=False, message=DEFAULT_ERROR_MESSAGE, error_type=SentryAppErrorType.SENTRY, webhook_context={"error_type": failure_reason, **extras}, status_code=500, ) return SentryAppAlertRuleActionResult( success=True, message=self._get_response_message(response, DEFAULT_SUCCESS_MESSAGE) ) def _build_url(self) -> str: urlparts = list(urlparse(self.sentry_app.webhook_url)) urlparts[2] = self.uri return urlunparse(urlparts) def _build_headers(self) -> dict[str, str]: request_uuid = uuid4().hex return { "Content-Type": "application/json", "Request-ID": request_uuid, "Sentry-App-Signature": self.sentry_app.build_signature(self.body), } def _get_response_message(self, response: Response | None, default_message: str) -> str: """ Returns the message from the response body, if in the expected location. Used to bubble up info from the Sentry App to the UI. The location should be coordinated with the docs on Alert Rule Action UI Components. """ if response is None: message = default_message else: try: message = response.json().get("message", default_message) except Exception: message = default_message return f"{self.sentry_app.name}: {message}" @cached_property def body(self): return json.dumps( { "fields": self.fields, "installationId": self.install.uuid, } ) @cached_property def sentry_app(self): return self.install.sentry_app
SentryAppAlertRuleActionRequester
python
google__pytype
pytype/tools/traces/traces_test.py
{ "start": 677, "end": 1044 }
class ____(traces.MatchAstVisitor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.traces_by_node_type = collections.defaultdict(list) def generic_visit(self, node): try: matches = self.match(node) except NotImplementedError: return self.traces_by_node_type[node.__class__].extend(matches)
_TestVisitor
python
pydata__xarray
xarray/core/datatree.py
{ "start": 15690, "end": 15851 }
class ____: data_vars: dict[str, CoercibleValue] = field(default_factory=dict) coords: dict[str, CoercibleValue] = field(default_factory=dict)
_DatasetArgs
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 1020575, "end": 1021034 }
class ____(sgqlc.types.Type, Node): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("actor", "created_at", "lockable") actor = sgqlc.types.Field(Actor, graphql_name="actor") created_at = sgqlc.types.Field( sgqlc.types.non_null(DateTime), graphql_name="createdAt" ) lockable = sgqlc.types.Field( sgqlc.types.non_null(Lockable), graphql_name="lockable" )
UnlockedEvent
python
bokeh__bokeh
tests/unit/bokeh/core/test_validation.py
{ "start": 2970, "end": 9221 }
class ____(Model): foo = Int(default=0) @v.error("E") def _check_error(self): if self.foo > 5: return "err" @v.warning("W") def _check_warning(self): if self.foo < -5: return "wrn" def test_check_integrity_pass() -> None: m = Mod() issues = ValidationIssues(error=[], warning=[]) assert v.check_integrity([m]) == issues def test_check_integrity_error() -> None: m = Mod(foo = 10) issues = ValidationIssues( error=[ValidationIssue(9999, "EXT:E", "Custom extension reports error", "err")], warning=[], ) assert v.check_integrity([m]) == issues def test_check_integrity_warning() -> None: m = Mod(foo = -10) issues = ValidationIssues( error=[], warning=[ValidationIssue(9999, "EXT:W", "Custom extension reports warning", "wrn")], ) assert v.check_integrity([m]) == issues @patch('bokeh.core.validation.check.log.error') @patch('bokeh.core.validation.check.log.warning') def test_check_pass(mock_warn: MagicMock, mock_error: MagicMock) -> None: m = Mod() issues = v.check_integrity([m]) v.process_validation_issues(issues) assert not mock_error.called assert not mock_warn.called @patch('bokeh.core.validation.check.log.error') @patch('bokeh.core.validation.check.log.warning') def test_check_error(mock_warn: MagicMock, mock_error: MagicMock) -> None: m = Mod(foo=10) issues = v.check_integrity([m]) v.process_validation_issues(issues) assert mock_error.called assert not mock_warn.called @patch('bokeh.core.validation.check.log.error') @patch('bokeh.core.validation.check.log.warning') def test_check_warn(mock_warn: MagicMock, mock_error: MagicMock) -> None: m = Mod(foo=-10) issues = v.check_integrity([m]) v.process_validation_issues(issues) assert not mock_error.called assert mock_warn.called @patch('bokeh.core.validation.check.log.error') @patch('bokeh.core.validation.check.log.warning') def test_silence_and_check_warn(mock_warn: MagicMock, mock_error: MagicMock) -> None: from bokeh.core.validation.warnings import EXT m = Mod(foo=-10) try: v.silence(EXT) # turn the warning off issues = v.check_integrity([m]) v.process_validation_issues(issues) assert not mock_error.called assert not mock_warn.called finally: v.silence(EXT, False) # turn the warning back on issues = v.check_integrity([m]) v.process_validation_issues(issues) assert not mock_error.called assert mock_warn.called @patch('bokeh.core.validation.check.log.error') @patch('bokeh.core.validation.check.log.warning') def test_silence_with_bad_input_and_check_warn(mock_warn: MagicMock, mock_error: MagicMock) -> None: m = Mod(foo=-10) with pytest.raises(ValueError, match="Input to silence should be a warning object"): v.silence(cast(Any, "EXT:W")) issues = v.check_integrity([m]) v.process_validation_issues(issues) assert not mock_error.called assert mock_warn.called @patch('bokeh.core.validation.check.log.error') @patch('bokeh.core.validation.check.log.warning') def test_silence_warning_already_in_silencers_is_ok(mock_warn: MagicMock, mock_error: MagicMock) -> None: from bokeh.core.validation.warnings import EXT m = Mod(foo=-10) try: silencers0 = v.silence(EXT) # turn the warning off silencers1 = v.silence(EXT) # do it a second time - no-op assert len(silencers0) == 1 assert silencers0 == silencers1 # silencers is same as before issues = v.check_integrity([m]) v.process_validation_issues(issues) assert not mock_error.called assert not mock_warn.called finally: v.silence(EXT, False) # turn the warning back on issues = v.check_integrity([m]) v.process_validation_issues(issues) assert not mock_error.called assert mock_warn.called @patch('bokeh.core.validation.check.log.error') @patch('bokeh.core.validation.check.log.warning') def test_silence_remove_warning_that_is_not_in_silencers_is_ok(mock_warn: MagicMock, mock_error: MagicMock) -> None: from bokeh.core.validation.warnings import EXT m = Mod(foo=-10) silencers0 = v.silence(EXT) # turn the warning off assert len(silencers0) == 1 silencers1 = v.silence(EXT, False) # turn the warning back on silencers2 = v.silence(EXT, False) # do it a second time - no-op assert len(silencers1) == 0 assert silencers1 == silencers2 issues = v.check_integrity([m]) v.process_validation_issues(issues) assert not mock_error.called assert mock_warn.called @patch('bokeh.core.validation.check.log.error') @patch('bokeh.core.validation.check.log.warning') def test_process_validation_issues_pass(mock_warn: MagicMock, mock_error: MagicMock) -> None: issues = ValidationIssues(error=[], warning=[]) v.process_validation_issues(issues) assert not mock_error.called assert not mock_warn.called @patch('bokeh.core.validation.check.log.error') @patch('bokeh.core.validation.check.log.warning') def test_process_validation_issues_warn(mock_warn: MagicMock, mock_error: MagicMock) -> None: issues = ValidationIssues( error=[ValidationIssue(9999, "EXT:E", "Custom extension reports error", "err")], warning=[], ) v.process_validation_issues(issues) assert mock_error.called assert not mock_warn.called @patch('bokeh.core.validation.check.log.error') @patch('bokeh.core.validation.check.log.warning') def test_process_validation_issues_error(mock_warn: MagicMock, mock_error: MagicMock) -> None: issues = ValidationIssues( error=[], warning=[ValidationIssue(9999, "EXT:W", "Custom extension reports warning", "wrn")], ) v.process_validation_issues(issues) assert not mock_error.called assert mock_warn.called #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
Mod
python
instagram__MonkeyType
tests/test_stubs.py
{ "start": 33008, "end": 34097 }
class ____: def test_ignore_non_matching_functions(self): b = StubIndexBuilder('foo.bar', max_typed_dict_size=0) b.log(CallTrace(untyped_helper, {'x': int, 'y': str})) assert len(b.index) == 0 def test_build_index(self): idxb = StubIndexBuilder('tests', max_typed_dict_size=0) idxb.log(CallTrace(untyped_helper, {'x': int, 'y': str}, str)) sig = Signature.from_callable(untyped_helper) sig = sig.replace( parameters=[ Parameter('x', Parameter.POSITIONAL_OR_KEYWORD, annotation=int), Parameter('y', Parameter.POSITIONAL_OR_KEYWORD, annotation=str), ], return_annotation=str ) mod_stub = ModuleStub(function_stubs=[FunctionStub('untyped_helper', sig, FunctionKind.MODULE)]) expected = {'tests.test_stubs': mod_stub} assert idxb.get_stubs() == expected # These functions are intentionally partially typed to ensure we do not modify pre-existing # annotations as well as to ensure we update empty annotations.
TestStubIndexBuilder
python
getsentry__sentry
src/sentry/analytics/events/release_get_previous_commits.py
{ "start": 85, "end": 310 }
class ____(analytics.Event): user_id: int | None = None organization_id: int project_ids: list[int] user_agent: str | None = None analytics.register(ReleaseGetPreviousCommitsEvent)
ReleaseGetPreviousCommitsEvent
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/bigquery.py
{ "start": 71709, "end": 76186 }
class ____(GoogleCloudBaseOperator): """ Update a table for your Project in BigQuery. Use ``fields`` to specify which fields of table to update. If a field is listed in ``fields`` and is ``None`` in table, it will be deleted. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:BigQueryUpdateTableOperator` :param dataset_id: The id of dataset. Don't need to provide, if datasetId in table_reference. :param table_id: The id of table. Don't need to provide, if tableId in table_reference. :param table_resource: Dataset resource that will be provided with request body. https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource :param fields: The fields of ``table`` to change, spelled as the Table properties (e.g. "friendly_name"). :param project_id: The name of the project where we want to create the table. Don't need to provide, if projectId in table_reference. :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "dataset_id", "table_id", "project_id", "gcp_conn_id", "impersonation_chain", ) template_fields_renderers = {"table_resource": "json"} ui_color = BigQueryUIColors.TABLE.value operator_extra_links = (BigQueryTableLink(),) def __init__( self, *, table_resource: dict[str, Any], fields: list[str] | None = None, dataset_id: str | None = None, table_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: self.dataset_id = dataset_id self.table_id = table_id self.project_id = project_id self.fields = fields self.gcp_conn_id = gcp_conn_id self.table_resource = table_resource self.impersonation_chain = impersonation_chain self._table: dict | None = None super().__init__(**kwargs) def execute(self, context: Context): bq_hook = BigQueryHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) # Save table as attribute for further use by OpenLineage self._table = bq_hook.update_table( table_resource=self.table_resource, fields=self.fields, dataset_id=self.dataset_id, table_id=self.table_id, project_id=self.project_id, ) if self._table: BigQueryTableLink.persist( context=context, dataset_id=self._table["tableReference"]["datasetId"], project_id=self._table["tableReference"]["projectId"], table_id=self._table["tableReference"]["tableId"], ) return self._table def get_openlineage_facets_on_complete(self, _): """Implement _on_complete as we will use table resource returned by update method.""" from airflow.providers.common.compat.openlineage.facet import Dataset from airflow.providers.google.cloud.openlineage.utils import ( BIGQUERY_NAMESPACE, get_facets_from_bq_table, ) from airflow.providers.openlineage.extractors import OperatorLineage table = Table.from_api_repr(self._table) output_dataset = Dataset( namespace=BIGQUERY_NAMESPACE, name=f"{table.project}.{table.dataset_id}.{table.table_id}", facets=get_facets_from_bq_table(table), ) return OperatorLineage(outputs=[output_dataset])
BigQueryUpdateTableOperator
python
dagster-io__dagster
python_modules/dagster/dagster/_core/events/__init__.py
{ "start": 63552, "end": 64902 }
class ____( NamedTuple( "AssetFailedToMaterializeData", [ ("asset_materialization_failure", AssetMaterializationFailure), ("error", Optional[SerializableErrorInfo]), ], ) ): def __new__( cls, asset_materialization_failure: AssetMaterializationFailure, error: Optional[SerializableErrorInfo] = None, ): return super().__new__( cls, asset_materialization_failure=check.inst_param( asset_materialization_failure, "asset_materialization_failure", AssetMaterializationFailure, ), error=truncate_event_error_info( check.opt_inst_param(error, "error", SerializableErrorInfo) ), ) @property def asset_key(self) -> AssetKey: return self.asset_materialization_failure.asset_key @property def partition(self) -> Optional[str]: return self.asset_materialization_failure.partition @property def failure_type(self) -> AssetMaterializationFailureType: return self.asset_materialization_failure.failure_type @property def reason(self) -> AssetMaterializationFailureReason: return self.asset_materialization_failure.reason @whitelist_for_serdes
AssetFailedToMaterializeData
python
django-import-export__django-import-export
tests/core/tests/test_widgets.py
{ "start": 9752, "end": 10126 }
class ____(TestCase): def setUp(self): self.datetime = datetime(1868, 8, 13) self.widget = widgets.DateTimeWidget("%d.%m.%Y") def test_render(self): self.assertEqual("13.08.1868", self.widget.render(self.datetime)) def test_clean(self): self.assertEqual(self.datetime, self.widget.clean("13.08.1868"))
DateTimeWidgetBefore1900Test
python
redis__redis-py
redis/commands/bf/commands.py
{ "start": 1417, "end": 5921 }
class ____: """Bloom Filter commands.""" def create(self, key, errorRate, capacity, expansion=None, noScale=None): """ Create a new Bloom Filter `key` with desired probability of false positives `errorRate` expected entries to be inserted as `capacity`. Default expansion value is 2. By default, filter is auto-scaling. For more information see `BF.RESERVE <https://redis.io/commands/bf.reserve>`_. """ # noqa params = [key, errorRate, capacity] self.append_expansion(params, expansion) self.append_no_scale(params, noScale) return self.execute_command(BF_RESERVE, *params) reserve = create def add(self, key, item): """ Add to a Bloom Filter `key` an `item`. For more information see `BF.ADD <https://redis.io/commands/bf.add>`_. """ # noqa return self.execute_command(BF_ADD, key, item) def madd(self, key, *items): """ Add to a Bloom Filter `key` multiple `items`. For more information see `BF.MADD <https://redis.io/commands/bf.madd>`_. """ # noqa return self.execute_command(BF_MADD, key, *items) def insert( self, key, items, capacity=None, error=None, noCreate=None, expansion=None, noScale=None, ): """ Add to a Bloom Filter `key` multiple `items`. If `nocreate` remain `None` and `key` does not exist, a new Bloom Filter `key` will be created with desired probability of false positives `errorRate` and expected entries to be inserted as `size`. For more information see `BF.INSERT <https://redis.io/commands/bf.insert>`_. """ # noqa params = [key] self.append_capacity(params, capacity) self.append_error(params, error) self.append_expansion(params, expansion) self.append_no_create(params, noCreate) self.append_no_scale(params, noScale) self.append_items(params, items) return self.execute_command(BF_INSERT, *params) def exists(self, key, item): """ Check whether an `item` exists in Bloom Filter `key`. For more information see `BF.EXISTS <https://redis.io/commands/bf.exists>`_. """ # noqa return self.execute_command(BF_EXISTS, key, item) def mexists(self, key, *items): """ Check whether `items` exist in Bloom Filter `key`. For more information see `BF.MEXISTS <https://redis.io/commands/bf.mexists>`_. """ # noqa return self.execute_command(BF_MEXISTS, key, *items) def scandump(self, key, iter): """ Begin an incremental save of the bloom filter `key`. This is useful for large bloom filters which cannot fit into the normal SAVE and RESTORE model. The first time this command is called, the value of `iter` should be 0. This command will return successive (iter, data) pairs until (0, NULL) to indicate completion. For more information see `BF.SCANDUMP <https://redis.io/commands/bf.scandump>`_. """ # noqa params = [key, iter] options = {} options[NEVER_DECODE] = [] return self.execute_command(BF_SCANDUMP, *params, **options) def loadchunk(self, key, iter, data): """ Restore a filter previously saved using SCANDUMP. See the SCANDUMP command for example usage. This command will overwrite any bloom filter stored under key. Ensure that the bloom filter will not be modified between invocations. For more information see `BF.LOADCHUNK <https://redis.io/commands/bf.loadchunk>`_. """ # noqa return self.execute_command(BF_LOADCHUNK, key, iter, data) def info(self, key): """ Return capacity, size, number of filters, number of items inserted, and expansion rate. For more information see `BF.INFO <https://redis.io/commands/bf.info>`_. """ # noqa return self.execute_command(BF_INFO, key) def card(self, key): """ Returns the cardinality of a Bloom filter - number of items that were added to a Bloom filter and detected as unique (items that caused at least one bit to be set in at least one sub-filter). For more information see `BF.CARD <https://redis.io/commands/bf.card>`_. """ # noqa return self.execute_command(BF_CARD, key)
BFCommands
python
pypa__pip
src/pip/_vendor/urllib3/response.py
{ "start": 4249, "end": 30641 }
class ____(io.IOBase): """ HTTP Response container. Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is loaded and decoded on-demand when the ``data`` property is accessed. This class is also compatible with the Python standard library's :mod:`io` module, and can hence be treated as a readable object in the context of that framework. Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`: :param preload_content: If True, the response's body will be preloaded during construction. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. :param original_response: When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse` object, it's convenient to include the original for debug purposes. It's otherwise unused. :param retries: The retries contains the last :class:`~urllib3.util.retry.Retry` that was used during the request. :param enforce_content_length: Enforce content length checking. Body returned by server must match value of Content-Length header, if present. Otherwise, raise error. """ CONTENT_DECODERS = ["gzip", "deflate"] if brotli is not None: CONTENT_DECODERS += ["br"] REDIRECT_STATUSES = [301, 302, 303, 307, 308] def __init__( self, body="", headers=None, status=0, version=0, reason=None, strict=0, preload_content=True, decode_content=True, original_response=None, pool=None, connection=None, msg=None, retries=None, enforce_content_length=False, request_method=None, request_url=None, auto_close=True, ): if isinstance(headers, HTTPHeaderDict): self.headers = headers else: self.headers = HTTPHeaderDict(headers) self.status = status self.version = version self.reason = reason self.strict = strict self.decode_content = decode_content self.retries = retries self.enforce_content_length = enforce_content_length self.auto_close = auto_close self._decoder = None self._body = None self._fp = None self._original_response = original_response self._fp_bytes_read = 0 self.msg = msg self._request_url = request_url if body and isinstance(body, (six.string_types, bytes)): self._body = body self._pool = pool self._connection = connection if hasattr(body, "read"): self._fp = body # Are we using the chunked-style of transfer encoding? self.chunked = False self.chunk_left = None tr_enc = self.headers.get("transfer-encoding", "").lower() # Don't incur the penalty of creating a list and then discarding it encodings = (enc.strip() for enc in tr_enc.split(",")) if "chunked" in encodings: self.chunked = True # Determine length of response self.length_remaining = self._init_length(request_method) # If requested, preload the body. if preload_content and not self._body: self._body = self.read(decode_content=decode_content) def get_redirect_location(self): """ Should we redirect and where to? :returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code. """ if self.status in self.REDIRECT_STATUSES: return self.headers.get("location") return False def release_conn(self): if not self._pool or not self._connection: return self._pool._put_conn(self._connection) self._connection = None def drain_conn(self): """ Read and discard any remaining HTTP response data in the response connection. Unread data in the HTTPResponse connection blocks the connection from being released back to the pool. """ try: self.read() except (HTTPError, SocketError, BaseSSLError, HTTPException): pass @property def data(self): # For backwards-compat with earlier urllib3 0.4 and earlier. if self._body: return self._body if self._fp: return self.read(cache_content=True) @property def connection(self): return self._connection def isclosed(self): return is_fp_closed(self._fp) def tell(self): """ Obtain the number of bytes pulled over the wire so far. May differ from the amount of content returned by :meth:``urllib3.response.HTTPResponse.read`` if bytes are encoded on the wire (e.g, compressed). """ return self._fp_bytes_read def _init_length(self, request_method): """ Set initial length value for Response content if available. """ length = self.headers.get("content-length") if length is not None: if self.chunked: # This Response will fail with an IncompleteRead if it can't be # received as chunked. This method falls back to attempt reading # the response before raising an exception. log.warning( "Received response with both Content-Length and " "Transfer-Encoding set. This is expressly forbidden " "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " "attempting to process response as Transfer-Encoding: " "chunked." ) return None try: # RFC 7230 section 3.3.2 specifies multiple content lengths can # be sent in a single Content-Length header # (e.g. Content-Length: 42, 42). This line ensures the values # are all valid ints and that as long as the `set` length is 1, # all values are the same. Otherwise, the header is invalid. lengths = set([int(val) for val in length.split(",")]) if len(lengths) > 1: raise InvalidHeader( "Content-Length contained multiple " "unmatching values (%s)" % length ) length = lengths.pop() except ValueError: length = None else: if length < 0: length = None # Convert status to int for comparison # In some cases, httplib returns a status of "_UNKNOWN" try: status = int(self.status) except ValueError: status = 0 # Check for responses that shouldn't include a body if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD": length = 0 return length def _init_decoder(self): """ Set-up the _decoder attribute if necessary. """ # Note: content-encoding value should be case-insensitive, per RFC 7230 # Section 3.2 content_encoding = self.headers.get("content-encoding", "").lower() if self._decoder is None: if content_encoding in self.CONTENT_DECODERS: self._decoder = _get_decoder(content_encoding) elif "," in content_encoding: encodings = [ e.strip() for e in content_encoding.split(",") if e.strip() in self.CONTENT_DECODERS ] if len(encodings): self._decoder = _get_decoder(content_encoding) DECODER_ERROR_CLASSES = (IOError, zlib.error) if brotli is not None: DECODER_ERROR_CLASSES += (brotli.error,) def _decode(self, data, decode_content, flush_decoder): """ Decode the data passed in and potentially flush the decoder. """ if not decode_content: return data try: if self._decoder: data = self._decoder.decompress(data) except self.DECODER_ERROR_CLASSES as e: content_encoding = self.headers.get("content-encoding", "").lower() raise DecodeError( "Received response with content-encoding: %s, but " "failed to decode it." % content_encoding, e, ) if flush_decoder: data += self._flush_decoder() return data def _flush_decoder(self): """ Flushes the decoder. Should only be called if the decoder is actually being used. """ if self._decoder: buf = self._decoder.decompress(b"") return buf + self._decoder.flush() return b"" @contextmanager def _error_catcher(self): """ Catch low-level python exceptions, instead re-raising urllib3 variants, so that low-level exceptions are not leaked in the high-level api. On exit, release the connection back to the pool. """ clean_exit = False try: try: yield except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but # there is yet no clean way to get at it from this context. raise ReadTimeoutError(self._pool, None, "Read timed out.") except BaseSSLError as e: # FIXME: Is there a better way to differentiate between SSLErrors? if "read operation timed out" not in str(e): # SSL errors related to framing/MAC get wrapped and reraised here raise SSLError(e) raise ReadTimeoutError(self._pool, None, "Read timed out.") except (HTTPException, SocketError) as e: # This includes IncompleteRead. raise ProtocolError("Connection broken: %r" % e, e) # If no exception is thrown, we should avoid cleaning up # unnecessarily. clean_exit = True finally: # If we didn't terminate cleanly, we need to throw away our # connection. if not clean_exit: # The response may not be closed but we're not going to use it # anymore so close it now to ensure that the connection is # released back to the pool. if self._original_response: self._original_response.close() # Closing the response may not actually be sufficient to close # everything, so if we have a hold of the connection close that # too. if self._connection: self._connection.close() # If we hold the original response but it's closed now, we should # return the connection back to the pool. if self._original_response and self._original_response.isclosed(): self.release_conn() def _fp_read(self, amt): """ Read a response with the thought that reading the number of bytes larger than can fit in a 32-bit int at a time via SSL in some known cases leads to an overflow error that has to be prevented if `amt` or `self.length_remaining` indicate that a problem may happen. The known cases: * 3.8 <= CPython < 3.9.7 because of a bug https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900. * urllib3 injected with pyOpenSSL-backed SSL-support. * CPython < 3.10 only when `amt` does not fit 32-bit int. """ assert self._fp c_int_max = 2 ** 31 - 1 if ( ( (amt and amt > c_int_max) or (self.length_remaining and self.length_remaining > c_int_max) ) and not util.IS_SECURETRANSPORT and (util.IS_PYOPENSSL or sys.version_info < (3, 10)) ): buffer = io.BytesIO() # Besides `max_chunk_amt` being a maximum chunk size, it # affects memory overhead of reading a response by this # method in CPython. # `c_int_max` equal to 2 GiB - 1 byte is the actual maximum # chunk size that does not lead to an overflow error, but # 256 MiB is a compromise. max_chunk_amt = 2 ** 28 while amt is None or amt != 0: if amt is not None: chunk_amt = min(amt, max_chunk_amt) amt -= chunk_amt else: chunk_amt = max_chunk_amt data = self._fp.read(chunk_amt) if not data: break buffer.write(data) del data # to reduce peak memory usage by `max_chunk_amt`. return buffer.getvalue() else: # StringIO doesn't like amt=None return self._fp.read(amt) if amt is not None else self._fp.read() def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`http.client.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """ self._init_decoder() if decode_content is None: decode_content = self.decode_content if self._fp is None: return flush_decoder = False fp_closed = getattr(self._fp, "closed", False) with self._error_catcher(): data = self._fp_read(amt) if not fp_closed else b"" if amt is None: flush_decoder = True else: cache_content = False if ( amt != 0 and not data ): # Platform-specific: Buggy versions of Python. # Close the connection when no data is returned # # This is redundant to what httplib/http.client _should_ # already do. However, versions of python released before # December 15, 2012 (http://bugs.python.org/issue16298) do # not properly close the connection in all cases. There is # no harm in redundantly calling close. self._fp.close() flush_decoder = True if self.enforce_content_length and self.length_remaining not in ( 0, None, ): # This is an edge case that httplib failed to cover due # to concerns of backward compatibility. We're # addressing it here to make sure IncompleteRead is # raised during streaming, so all calls with incorrect # Content-Length are caught. raise IncompleteRead(self._fp_bytes_read, self.length_remaining) if data: self._fp_bytes_read += len(data) if self.length_remaining is not None: self.length_remaining -= len(data) data = self._decode(data, decode_content, flush_decoder) if cache_content: self._body = data return data def stream(self, amt=2 ** 16, decode_content=None): """ A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the connection is closed. :param amt: How much of the content to read. The generator will return up to much data per iteration, but may return less. This is particularly likely when using compressed data. However, the empty string will never be returned. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ if self.chunked and self.supports_chunked_reads(): for line in self.read_chunked(amt, decode_content=decode_content): yield line else: while not is_fp_closed(self._fp): data = self.read(amt=amt, decode_content=decode_content) if data: yield data @classmethod def from_httplib(ResponseCls, r, **response_kw): """ Given an :class:`http.client.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``. """ headers = r.msg if not isinstance(headers, HTTPHeaderDict): if six.PY2: # Python 2.7 headers = HTTPHeaderDict.from_httplib(headers) else: headers = HTTPHeaderDict(headers.items()) # HTTPResponse objects in Python 3 don't have a .strict attribute strict = getattr(r, "strict", 0) resp = ResponseCls( body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw ) return resp # Backwards-compatibility methods for http.client.HTTPResponse def getheaders(self): warnings.warn( "HTTPResponse.getheaders() is deprecated and will be removed " "in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.", category=DeprecationWarning, stacklevel=2, ) return self.headers def getheader(self, name, default=None): warnings.warn( "HTTPResponse.getheader() is deprecated and will be removed " "in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).", category=DeprecationWarning, stacklevel=2, ) return self.headers.get(name, default) # Backwards compatibility for http.cookiejar def info(self): return self.headers # Overrides from io.IOBase def close(self): if not self.closed: self._fp.close() if self._connection: self._connection.close() if not self.auto_close: io.IOBase.close(self) @property def closed(self): if not self.auto_close: return io.IOBase.closed.__get__(self) elif self._fp is None: return True elif hasattr(self._fp, "isclosed"): return self._fp.isclosed() elif hasattr(self._fp, "closed"): return self._fp.closed else: return True def fileno(self): if self._fp is None: raise IOError("HTTPResponse has no file to get a fileno from") elif hasattr(self._fp, "fileno"): return self._fp.fileno() else: raise IOError( "The file-like object this HTTPResponse is wrapped " "around has no file descriptor" ) def flush(self): if ( self._fp is not None and hasattr(self._fp, "flush") and not getattr(self._fp, "closed", False) ): return self._fp.flush() def readable(self): # This method is required for `io` module compatibility. return True def readinto(self, b): # This method is required for `io` module compatibility. temp = self.read(len(b)) if len(temp) == 0: return 0 else: b[: len(temp)] = temp return len(temp) def supports_chunked_reads(self): """ Checks if the underlying file-like object looks like a :class:`http.client.HTTPResponse` object. We do this by testing for the fp attribute. If it is present we assume it returns raw chunks as processed by read_chunked(). """ return hasattr(self._fp, "fp") def _update_chunk_length(self): # First, we'll figure out length of a chunk and then # we'll try to read it from socket. if self.chunk_left is not None: return line = self._fp.fp.readline() line = line.split(b";", 1)[0] try: self.chunk_left = int(line, 16) except ValueError: # Invalid chunked protocol response, abort. self.close() raise InvalidChunkLength(self, line) def _handle_chunk(self, amt): returned_chunk = None if amt is None: chunk = self._fp._safe_read(self.chunk_left) returned_chunk = chunk self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None elif amt < self.chunk_left: value = self._fp._safe_read(amt) self.chunk_left = self.chunk_left - amt returned_chunk = value elif amt == self.chunk_left: value = self._fp._safe_read(amt) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None returned_chunk = value else: # amt > self.chunk_left returned_chunk = self._fp._safe_read(self.chunk_left) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None return returned_chunk def read_chunked(self, amt=None, decode_content=None): """ Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ self._init_decoder() # FIXME: Rewrite this method and make it a class with a better structured logic. if not self.chunked: raise ResponseNotChunked( "Response is not chunked. " "Header 'transfer-encoding: chunked' is missing." ) if not self.supports_chunked_reads(): raise BodyNotHttplibCompatible( "Body should be http.client.HTTPResponse like. " "It should have have an fp attribute which returns raw chunks." ) with self._error_catcher(): # Don't bother reading the body of a HEAD request. if self._original_response and is_response_to_head(self._original_response): self._original_response.close() return # If a response is already read and closed # then return immediately. if self._fp.fp is None: return while True: self._update_chunk_length() if self.chunk_left == 0: break chunk = self._handle_chunk(amt) decoded = self._decode( chunk, decode_content=decode_content, flush_decoder=False ) if decoded: yield decoded if decode_content: # On CPython and PyPy, we should never need to flush the # decoder. However, on Jython we *might* need to, so # lets defensively do it anyway. decoded = self._flush_decoder() if decoded: # Platform-specific: Jython. yield decoded # Chunk content ends with \r\n: discard it. while True: line = self._fp.fp.readline() if not line: # Some sites may not end with '\r\n'. break if line == b"\r\n": break # We read everything; close the "file". if self._original_response: self._original_response.close() def geturl(self): """ Returns the URL that was the source of this response. If the request that generated this response redirected, this method will return the final redirect location. """ if self.retries is not None and len(self.retries.history): return self.retries.history[-1].redirect_location else: return self._request_url def __iter__(self): buffer = [] for chunk in self.stream(decode_content=True): if b"\n" in chunk: chunk = chunk.split(b"\n") yield b"".join(buffer) + chunk[0] + b"\n" for x in chunk[1:-1]: yield x + b"\n" if chunk[-1]: buffer = [chunk[-1]] else: buffer = [] else: buffer.append(chunk) if buffer: yield b"".join(buffer)
HTTPResponse
python
Pylons__pyramid
src/pyramid/httpexceptions.py
{ "start": 35277, "end": 35689 }
class ____(HTTPServerError): """ subclass of :class:`~HTTPServerError` This indicates that the server does not support the functionality required to fulfill the request. code: 501, title: Not Implemented """ # differences from webob.exc.HTTPNotAcceptable: # # - "template" attr left off (useless, bug in webob?) code = 501 title = 'Not Implemented'
HTTPNotImplemented
python
Textualize__textual
docs/examples/guide/styles/colors01.py
{ "start": 112, "end": 706 }
class ____(App): def compose(self) -> ComposeResult: self.widget1 = Static("Textual One") yield self.widget1 self.widget2 = Static("Textual Two") yield self.widget2 self.widget3 = Static("Textual Three") yield self.widget3 def on_mount(self) -> None: self.widget1.styles.background = "#9932CC" self.widget2.styles.background = "hsl(150,42.9%,49.4%)" self.widget2.styles.color = "blue" self.widget3.styles.background = Color(191, 78, 96) if __name__ == "__main__": app = ColorApp() app.run()
ColorApp
python
getsentry__sentry
tests/sentry/notifications/api/endpoints/test_notification_actions_details.py
{ "start": 1574, "end": 20918 }
class ____(APITestCase): endpoint = "sentry-api-0-organization-notification-actions-details" def setUp(self) -> None: self.user = self.create_user("summoner@rift.io") self.organization = self.create_organization(name="league", owner=self.user) self.other_organization = self.create_organization(name="wild-rift", owner=self.user) self.team = self.create_team(name="games", organization=self.organization) self.projects = [ self.create_project(name="bilgewater", organization=self.organization), self.create_project(name="demacia", organization=self.organization), ] self.notif_action = self.create_notification_action( organization=self.organization, projects=self.projects ) self.base_data: MutableMapping[str, Any] = { "serviceType": "email", "triggerType": "audit-log", "targetType": "specific", "targetDisplay": "@pyke", "targetIdentifier": "555", } self.login_as(user=self.user) def mock_msg_schedule_response(self, channel_id, result_name="channel"): body = { "ok": True, result_name: channel_id, "scheduled_message_id": "Q1298393284", } return mock_slack_response("chat_scheduleMessage", body=body) def mock_msg_delete_scheduled_response(self, channel_id, result_name="channel"): if channel_id == "channel_not_found": body = {"ok": False, "error": "channel_not_found"} else: body = { "ok": True, } return mock_slack_response("chat_deleteScheduledMessage", body=body) def test_requires_organization_access(self) -> None: for method in ["GET", "PUT", "DELETE"]: self.get_error_response( self.other_organization.slug, self.notif_action.id, status_code=status.HTTP_404_NOT_FOUND, method=method, ) def test_requires_project_access(self) -> None: """ This only tests 'GET' since members aren't granted project:write scopes so they 403 before reaching any endpoint logic (for PUT/DELETE) """ self.organization.flags = 0 self.organization.save() action = self.create_notification_action( organization=self.organization, projects=[self.create_project(organization=self.organization)], ) user = self.create_user("ruinedking@rift.com") self.create_member(user=user, organization=self.organization, role="admin") self.login_as(user=user) self.get_error_response( self.organization.slug, action.id, status_code=status.HTTP_403_FORBIDDEN, ) def test_get_simple(self) -> None: response = self.get_success_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_200_OK ) assert response.data == serialize(self.notif_action) def test_put_missing_action(self) -> None: self.get_error_response( self.organization.slug, -1, status_code=status.HTTP_404_NOT_FOUND, method="PUT", ) def test_put_missing_fields(self) -> None: required_fields = ["serviceType", "triggerType"] response = self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_400_BAD_REQUEST, method="PUT", ) for field in required_fields: assert field in response.data def test_put_invalid_types(self) -> None: invalid_types: MutableMapping[str, Any] = { "serviceType": "hexgate", "triggerType": "ruination", "targetType": "igl", } for type_key, invalid_value in invalid_types.items(): data = {**self.base_data} data[type_key] = invalid_value response = self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_400_BAD_REQUEST, method="PUT", **data, ) assert type_key in response.data def test_put_invalid_integration(self) -> None: data = {**self.base_data} # Unknown integration data["integrationId"] = -1 response = self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_400_BAD_REQUEST, method="PUT", **data, ) assert "integrationId" in response.data # Integration from another organization integration = self.create_integration( organization=self.other_organization, external_id="m0b1l3" ) data["integrationId"] = integration.id response = self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_400_BAD_REQUEST, method="PUT", **data, ) assert "integrationId" in response.data def test_put_invalid_projects(self) -> None: data = {**self.base_data} # Unknown project data["projects"] = ["piltover"] response = self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_400_BAD_REQUEST, method="PUT", **data, ) assert "projects" in response.data # Project from another organization project = self.create_project(name="zaun", organization=self.other_organization) data["projects"] = [project.slug] response = self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_400_BAD_REQUEST, method="PUT", **data, ) assert "projects" in response.data def test_put_no_project_access(self) -> None: user = self.create_user("tft@rift.com") self.create_member(user=user, organization=self.organization) self.login_as(user) data = { **self.base_data, "projects": [p.slug for p in self.projects], } self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_403_FORBIDDEN, method="PUT", **data, ) @patch.dict(NotificationAction._registry, {}) def test_put_raises_validation_from_registry(self) -> None: error_message = "oops-missed-cannon" class MockActionRegistration(ActionRegistration): validate_action = MagicMock(side_effect=serializers.ValidationError(error_message)) def fire(self, data: Any) -> None: raise NotImplementedError registration = MockActionRegistration _mock_register(self.base_data)(registration) response = self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_400_BAD_REQUEST, method="PUT", **self.base_data, ) assert error_message in str(response.data) @patch.dict(NotificationAction._registry, {}) def test_put_with_slack_validation(self) -> None: class MockActionRegistration(ActionRegistration): def fire(self, data: Any) -> None: raise NotImplementedError channel_name = "journal" channel_id = "CABC123" integration = install_slack(organization=self.organization) data = { "triggerType": "audit-log", "targetType": "specific", "serviceType": "slack", "integrationId": integration.id, "targetDisplay": f"#{channel_name}", } _mock_register(data)(MockActionRegistration) with self.mock_msg_schedule_response(channel_id): with self.mock_msg_delete_scheduled_response(channel_id): response = self.get_success_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_202_ACCEPTED, method="PUT", **data, ) assert response.data["targetIdentifier"] == channel_id @patch.dict(NotificationAction._registry, {}) def test_put_with_pagerduty_validation(self) -> None: class MockActionRegistration(ActionRegistration): def fire(self, data: Any) -> None: raise NotImplementedError service_name = "palace" integration = self.create_integration( organization=self.organization, external_id="pd-id", provider="pagerduty", name="dream" ) second_integration = self.create_integration( organization=self.organization, external_id="pd-id-2", provider="pagerduty", name="nail" ) data = { "triggerType": "audit-log", "targetType": "specific", "serviceType": "pagerduty", "integrationId": integration.id, "targetDisplay": "incorrect_service_name", } _mock_register(data)(MockActionRegistration) # Didn't provide a targetIdentifier key response = self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_400_BAD_REQUEST, method="PUT", **data, ) assert "Did not recieve PagerDuty service id" in str(response.data["targetIdentifier"]) with assume_test_silo_mode(SiloMode.CONTROL): org_integration = second_integration.organizationintegration_set.first() assert org_integration is not None, "org integration needs to exist!" service = add_service( org_integration, service_name=service_name, integration_key="abc", ) data["targetIdentifier"] = service["id"] response = self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_400_BAD_REQUEST, method="PUT", **data, ) assert "ensure Sentry has access" in str(response.data["targetIdentifier"]) with assume_test_silo_mode(SiloMode.CONTROL): org_integration = integration.organizationintegration_set.first() assert org_integration is not None, "org integration needs to exist!" service = add_service( org_integration, service_name=service_name, integration_key="def", ) data["targetIdentifier"] = service["id"] response = self.get_success_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_202_ACCEPTED, method="PUT", **data, ) assert response.data["targetIdentifier"] == service["id"] assert response.data["targetDisplay"] == service["service_name"] @patch.dict(NotificationAction._registry, {}) def test_put_simple(self) -> None: class MockActionRegistration(ActionRegistration): validate_action = MagicMock() def fire(self, data: Any) -> None: raise NotImplementedError _mock_register(self.base_data)(MockActionRegistration) data = {**self.base_data} MockActionRegistration.validate_action.assert_not_called() response = self.get_success_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_202_ACCEPTED, method="PUT", **data, ) # Response contains input data assert data.items() <= response.data.items() # Database reflects changes MockActionRegistration.validate_action.assert_called() self.notif_action.refresh_from_db() assert response.data == serialize(self.notif_action) # Relation table has been updated assert not NotificationActionProject.objects.filter(action_id=self.notif_action.id).exists() @patch.dict(NotificationAction._registry, {}) def test_put_org_member(self) -> None: user = self.create_user() self.create_member(organization=self.organization, user=user, teams=[self.team]) self.login_as(user) data = {**self.base_data} self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_403_FORBIDDEN, method="PUT", **data, ) @patch.dict(NotificationAction._registry, {}) def test_put_org_admin(self) -> None: user = self.create_user() self.create_member(organization=self.organization, user=user, role="admin") self.login_as(user) self.test_put_simple() @patch.dict(NotificationAction._registry, {}) def test_put_team_admin(self) -> None: user = self.create_user() member = self.create_member(organization=self.organization, user=user, role="member") OrganizationMemberTeam.objects.create( team=self.team, organizationmember=member, role="admin" ) self.login_as(user) self.test_put_simple() def test_delete_invalid_action(self) -> None: self.get_error_response( self.organization.slug, -1, status_code=status.HTTP_404_NOT_FOUND, method="DELETE", ) action = self.create_notification_action(organization=self.other_organization) self.get_error_response( self.organization.slug, action.id, status_code=status.HTTP_404_NOT_FOUND, method="DELETE", ) assert NotificationAction.objects.filter(id=action.id).exists() def test_delete_simple(self) -> None: assert NotificationAction.objects.filter(id=self.notif_action.id).exists() self.get_success_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_204_NO_CONTENT, method="DELETE", ) assert not NotificationAction.objects.filter(id=self.notif_action.id).exists() def test_delete_manager(self) -> None: user = self.create_user() self.create_member(user=user, organization=self.organization, role="manager") self.login_as(user) self.test_delete_simple() def test_delete_org_member(self) -> None: user = self.create_user() self.create_member(user=user, organization=self.organization) self.login_as(user) self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_403_FORBIDDEN, method="DELETE", ) def test_delete_org_admin(self) -> None: user = self.create_user() self.create_member(user=user, organization=self.organization, role="admin") self.login_as(user) self.test_delete_simple() def test_delete_team_admin(self) -> None: user = self.create_user() member = self.create_member(organization=self.organization, user=user, role="member") OrganizationMemberTeam.objects.create( team=self.team, organizationmember=member, role="admin" ) self.login_as(user) self.test_delete_simple() def test_get_respects_multiple_project_access(self) -> None: # Disable open membership self.organization.flags.allow_joinleave = False self.organization.save() # Create a user, team, project user = self.create_user() self.create_member(user=user, organization=self.organization) team = self.create_team(name="mobile", organization=self.organization, members=[user]) access_project = self.create_project( organization=self.organization, teams=[team], name="ionia" ) # Attempt to access a different project's action, should fail since open membership is off self.login_as(user) self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_403_FORBIDDEN, method="GET", ) # Add the project to the action to allow it to succeed now self.notif_action.projects.add(access_project) self.notif_action.save() self.get_success_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_200_OK, method="GET", ) def test_delete_respects_multiple_project_access(self) -> None: # Disable open membership self.organization.flags.allow_joinleave = False self.organization.save() # Create a user, team, project user = self.create_user() member = self.create_member(user=user, organization=self.organization) team = self.create_team(name="mobile", organization=self.organization) # Unrelated team, shouldn't affect anything self.create_team(name="desktop", organization=self.organization, members=[user]) team_membership = OrganizationMemberTeam.objects.create( team=team, organizationmember=member, role="contributor" ) access_project = self.create_project( organization=self.organization, teams=[team], name="ionia" ) # Attempt to access a different project's action, should fail since open membership is off self.login_as(user) self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_403_FORBIDDEN, method="DELETE", ) # Add the project to the action should still fail, without write access to other projects self.notif_action.projects.add(access_project) self.notif_action.save() self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_403_FORBIDDEN, method="DELETE", ) # Giving the team access to the other projects will still fail if the user is a contributor for project in self.projects: project.add_team(team) self.get_error_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_403_FORBIDDEN, method="DELETE", ) # Giving the user admin access to the team will finally allow it to succeed team_membership.role = "admin" team_membership.save() self.get_success_response( self.organization.slug, self.notif_action.id, status_code=status.HTTP_204_NO_CONTENT, method="DELETE", )
NotificationActionsDetailsEndpointTest
python
google__python-fire
fire/decorators_test.py
{ "start": 1450, "end": 1660 }
class ____: @decorators.SetParseFns(arg1=str) def example4(self, arg1, arg2): return arg1, arg2 @decorators.SetParseFns(arg2=str) def example5(self, arg1, arg2): return arg1, arg2
PartialParseFn
python
google__jax
jax/experimental/jax2tf/tests/jax2tf_test.py
{ "start": 60228, "end": 61297 }
class ____(JaxToTfTestCase): def test_key_argument(self): func = lambda key: jax.random.uniform(key, ()) key = jax.random.PRNGKey(0) key_raw = jax.random.key_data(key) with self.assertWarnsRegex(FutureWarning, "Raw arrays as random keys.*"): tf_result = jax2tf.convert(func)(key_raw) jax_result = func(key) self.assertEqual(tf_result, jax_result) def test_key_from_seed(self): func = lambda seed: jax.random.uniform(jax.random.PRNGKey(seed), ()) seed = 1701 tf_result = jax2tf.convert(func)(seed) jax_result = func(seed) self.assertEqual(tf_result, jax_result) def test_key_closure(self): def func(): # Include nontrivial shape operations to catch tracing bugs. key = global_key.reshape(1).squeeze() return jax.random.uniform(key) global_key = jax.random.PRNGKey(0) tf_result = jax2tf.convert(func)() jax_result = func() self.assertEqual(tf_result, jax_result) @unittest.skipIf(tf is None, "Test requires tensorflow") @jtu.thread_unsafe_test_class()
Jax2tfWithCustomPRNGTest
python
networkx__networkx
networkx/generators/tests/test_geometric.py
{ "start": 2527, "end": 5868 }
class ____: """Unit tests for :func:`~networkx.soft_random_geometric_graph`""" def test_number_of_nodes(self): G = nx.soft_random_geometric_graph(50, 0.25, seed=42) assert len(G) == 50 G = nx.soft_random_geometric_graph(range(50), 0.25, seed=42) assert len(G) == 50 def test_distances(self): """Tests that pairs of vertices adjacent if and only if they are within the prescribed radius. """ # Use the Euclidean metric, the default according to the # documentation. G = nx.soft_random_geometric_graph(50, 0.25) for u, v in combinations(G, 2): # Adjacent vertices must be within the given distance. if v in G[u]: assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 def test_p(self): """Tests for providing an alternate distance metric to the generator.""" # Use the L1 metric. def dist(x, y): return sum(abs(a - b) for a, b in zip(x, y)) G = nx.soft_random_geometric_graph(50, 0.25, p=1) for u, v in combinations(G, 2): # Adjacent vertices must be within the given distance. if v in G[u]: assert dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 def test_node_names(self): """Tests using values other than sequential numbers as node IDs.""" import string nodes = list(string.ascii_lowercase) G = nx.soft_random_geometric_graph(nodes, 0.25) assert len(G) == len(nodes) for u, v in combinations(G, 2): # Adjacent vertices must be within the given distance. if v in G[u]: assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 def test_p_dist_default(self): """Tests default p_dict = 0.5 returns graph with edge count <= RGG with same n, radius, dim and positions """ nodes = 50 dim = 2 pos = {v: [random.random() for i in range(dim)] for v in range(nodes)} RGG = nx.random_geometric_graph(50, 0.25, pos=pos) SRGG = nx.soft_random_geometric_graph(50, 0.25, pos=pos) assert len(SRGG.edges()) <= len(RGG.edges()) def test_p_dist_zero(self): """Tests if p_dict = 0 returns disconnected graph with 0 edges""" def p_dist(dist): return 0 G = nx.soft_random_geometric_graph(50, 0.25, p_dist=p_dist) assert len(G.edges) == 0 def test_pos_name(self): G = nx.soft_random_geometric_graph(50, 0.25, seed=42, pos_name="coords") assert all(len(d["coords"]) == 2 for n, d in G.nodes.items()) def join(G, u, v, theta, alpha, metric): """Returns ``True`` if and only if the nodes whose attributes are ``du`` and ``dv`` should be joined, according to the threshold condition for geographical threshold graphs. ``G`` is an undirected NetworkX graph, and ``u`` and ``v`` are nodes in that graph. The nodes must have node attributes ``'pos'`` and ``'weight'``. ``metric`` is a distance metric. """ du, dv = G.nodes[u], G.nodes[v] u_pos, v_pos = du["pos"], dv["pos"] u_weight, v_weight = du["weight"], dv["weight"] return (u_weight + v_weight) * metric(u_pos, v_pos) ** alpha >= theta
TestSoftRandomGeometricGraph
python
huggingface__transformers
tests/models/emu3/test_modeling_emu3.py
{ "start": 9813, "end": 11303 }
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Emu3Model, Emu3ForConditionalGeneration, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"any-to-any": Emu3ForConditionalGeneration, "image-text-to-text": Emu3ForConditionalGeneration} if is_torch_available() else {} ) def setUp(self): self.model_tester = Emu3Vision2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Emu3Config, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip( "Emu3 has a VQ module that uses `weight.data` directly in forward which prevent offloding on that module" ) def test_disk_offload_safetensors(self): pass @unittest.skip( "Emu3 has a VQ module that uses `weight.data` directly in forward which prevent offloding on that module" ) def test_disk_offload_bin(self): pass @unittest.skip( "Emu3 has a VQ module that uses `weight.data` directly in forward which prevent offloding on that module" ) def test_cpu_offload(self): pass @pytest.mark.generate @unittest.skip("Emu3 has dynamic control flow in vision backbone") def test_generate_with_static_cache(self): pass @require_torch
Emu3Vision2TextModelTest
python
getsentry__sentry
src/sentry/dynamic_sampling/rules/utils.py
{ "start": 3102, "end": 3222 }
class ____(TypedDict): type: SamplingValueType value: NotRequired[float] limit: NotRequired[int]
SamplingValue
python
kamyu104__LeetCode-Solutions
Python/maximum-strictly-increasing-cells-in-a-matrix.py
{ "start": 82, "end": 784 }
class ____(object): def maxIncreasingCells(self, mat): """ :type mat: List[List[int]] :rtype: int """ lookup = collections.defaultdict(list) for i in xrange(len(mat)): for j in xrange(len(mat[0])): lookup[mat[i][j]].append((i, j)) dp = [[0]*len(mat[0]) for _ in xrange(len(mat))] row, col = [0]*len(mat), [0]*len(mat[0]) for x in sorted(lookup.iterkeys()): for i, j in lookup[x]: dp[i][j] = max(row[i], col[j])+1 for i, j in lookup[x]: row[i] = max(row[i], dp[i][j]) col[j] = max(col[j], dp[i][j]) return max(row)
Solution
python
huggingface__transformers
src/transformers/models/modernbert_decoder/modeling_modernbert_decoder.py
{ "start": 29060, "end": 33936 }
class ____(ModernBertDecoderPreTrainedModel): def __init__(self, config: ModernBertDecoderConfig): super().__init__(config) self.num_labels = config.num_labels self.model = ModernBertDecoderModel(config) self.head = ModernBertDecoderPredictionHead(config) self.classifier = nn.Linear(config.hidden_size, config.num_labels, bias=config.classifier_bias) self.drop = torch.nn.Dropout(config.classifier_dropout) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring(checkpoint="blab-jhu/test-32m-dec") def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, **kwargs, ) -> Union[tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, **kwargs, ) hidden_states = transformer_outputs[0] hidden_states = self.drop(self.head(hidden_states)) logits = self.classifier(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = [ "ModernBertDecoderModel", "ModernBertDecoderPreTrainedModel", "ModernBertDecoderForCausalLM", "ModernBertDecoderForSequenceClassification", ]
ModernBertDecoderForSequenceClassification
python
jazzband__django-redis
django_redis/serializers/json.py
{ "start": 155, "end": 425 }
class ____(BaseSerializer): encoder_class = DjangoJSONEncoder def dumps(self, value: Any) -> bytes: return json.dumps(value, cls=self.encoder_class).encode() def loads(self, value: bytes) -> Any: return json.loads(value.decode())
JSONSerializer
python
vyperlang__vyper
vyper/semantics/types/user.py
{ "start": 955, "end": 1805 }
class ____(VyperType): def __init__(self, members=None): super().__init__(members=members) if members is not None: for mt in members.values(): if not mt.is_valid_member_type: raise StructureException(f"not a valid {self.typeclass} member: {mt}") def __eq__(self, other): return self is other def compare_type(self, other): # object exact comparison is a bit tricky here since we have # to be careful to construct any given user type exactly # only one time. however, the alternative requires reasoning # about both the name and source (module or json abi) of # the type. return self is other def __hash__(self): return hash(id(self)) # note: flag behaves a lot like uint256, or uints in general.
_UserType
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 335444, "end": 337173 }
class ____(Response): """ Response of tasks.failed endpoint. :param updated: Number of tasks updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict """ _service = "tasks" _action = "failed" _version = "2.23" _schema = { "definitions": {}, "properties": { "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__(self, updated=None, fields=None, **kwargs): super(FailedResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields @schema_property("updated") def updated(self): return self._property_updated @updated.setter def updated(self, value): if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self): return self._property_fields @fields.setter def fields(self, value): if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value
FailedResponse
python
marshmallow-code__apispec
tests/test_ext_marshmallow.py
{ "start": 9261, "end": 11242 }
class ____: @pytest.mark.parametrize("schema", [PetSchema, PetSchema()]) def test_can_use_schema_in_response(self, spec, schema): if spec.openapi_version.major < 3: resp = {"schema": schema} else: resp = {"content": {"application/json": {"schema": schema}}} spec.components.response("GetPetOk", resp) response = get_responses(spec)["GetPetOk"] if spec.openapi_version.major < 3: reference = response["schema"] else: reference = response["content"]["application/json"]["schema"] assert reference == build_ref(spec, "schema", "Pet") resolved_schema = spec.components.schemas["Pet"] assert resolved_schema["properties"]["id"]["type"] == "integer" assert resolved_schema["properties"]["name"]["type"] == "string" assert resolved_schema["properties"]["password"]["type"] == "string" @pytest.mark.parametrize("schema", [PetSchema, PetSchema()]) def test_can_use_schema_in_response_header(self, spec, schema): resp = {"headers": {"PetHeader": {"schema": schema}}} spec.components.response("GetPetOk", resp) response = get_responses(spec)["GetPetOk"] reference = response["headers"]["PetHeader"]["schema"] assert reference == build_ref(spec, "schema", "Pet") resolved_schema = spec.components.schemas["Pet"] assert resolved_schema["properties"]["id"]["type"] == "integer" assert resolved_schema["properties"]["name"]["type"] == "string" assert resolved_schema["properties"]["password"]["type"] == "string" @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_content_without_schema(self, spec): resp = {"content": {"application/json": {"example": {"name": "Example"}}}} spec.components.response("GetPetOk", resp) response = get_responses(spec)["GetPetOk"] assert response == resp
TestComponentResponseHelper
python
pallets__werkzeug
src/werkzeug/exceptions.py
{ "start": 22845, "end": 23112 }
class ____(HTTPException): """*501* `Not Implemented` Raise if the application does not support the action requested by the browser. """ code = 501 description = "The server does not support the action requested by the browser."
NotImplemented
python
charliermarsh__ruff
crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py
{ "start": 3765, "end": 4244 }
class ____(AltersData): """Represent a lazy database lookup for a set of objects.""" def as_manager(cls): # Address the circular dependency between `Queryset` and `Manager`. from django.db.models.manager import Manager manager = Manager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) # Decorators @decorator # comment
QuerySet
python
spack__spack
lib/spack/spack/cmd/buildcache.py
{ "start": 12343, "end": 31812 }
class ____(spack.error.SpackError): """Raised when a spec is not installed but picked to be packaged.""" def _specs_to_be_packaged( requested: List[Spec], things_to_install: str, build_deps: bool ) -> List[Spec]: """Collect all non-external with or without roots and dependencies""" if "dependencies" not in things_to_install: deptype = dt.NONE elif build_deps: deptype = dt.ALL else: deptype = dt.RUN | dt.LINK | dt.TEST specs = [ s for s in traverse.traverse_nodes( requested, root="package" in things_to_install, deptype=deptype, order="breadth", key=traverse.by_dag_hash, ) if not s.external ] specs.reverse() return specs def push_fn(args): """create a binary package and push it to a mirror""" if args.specs: roots = _matching_specs(spack.cmd.parse_specs(args.specs)) else: roots = spack.cmd.require_active_env(cmd_name="buildcache push").concrete_roots() mirror = args.mirror assert isinstance(mirror, spack.mirrors.mirror.Mirror) push_url = mirror.push_url # When neither --signed, --unsigned nor --key are specified, use the mirror's default. if args.signed is None and not args.key: unsigned = not mirror.signed else: unsigned = not (args.key or args.signed) # For OCI images, we require dependencies to be pushed for now. if spack.oci.image.is_oci_url(mirror.push_url) and not unsigned: tty.warn( "Code signing is currently not supported for OCI images. " "Use --unsigned to silence this warning." ) unsigned = True # Select a signing key, or None if unsigned. signing_key = ( None if unsigned else (args.key or spack.binary_distribution.select_signing_key()) ) specs = _specs_to_be_packaged( roots, things_to_install=args.things_to_install, build_deps=args.with_build_dependencies or not args.without_build_dependencies, ) if not args.private: specs = _skip_no_redistribute_for_public(specs) if len(specs) > 1: tty.info(f"Selected {len(specs)} specs to push to {push_url}") # Pushing not installed specs is an error. Either fail fast or populate the error list and # push installed package in best effort mode. failed: List[Tuple[Spec, BaseException]] = [] with spack.store.STORE.db.read_transaction(): if any(not s.installed for s in specs): specs, not_installed = stable_partition(specs, lambda s: s.installed) if args.fail_fast: raise PackagesAreNotInstalledError(not_installed) else: failed.extend( (s, PackageNotInstalledError("package not installed")) for s in not_installed ) # Warn about possible old binary mirror layout if not spack.oci.image.is_oci_url(mirror.push_url): check_mirror_for_layout(mirror) with spack.binary_distribution.make_uploader( mirror=mirror, force=args.force, update_index=args.update_index, signing_key=signing_key, base_image=args.base_image, ) as uploader: skipped, upload_errors = uploader.push(specs=specs) failed.extend(upload_errors) if skipped: if len(specs) == 1: tty.info("The spec is already in the buildcache. Use --force to overwrite it.") elif len(skipped) == len(specs): tty.info("All specs are already in the buildcache. Use --force to overwrite them.") else: tty.info( "The following {} specs were skipped as they already exist in the " "buildcache:\n" " {}\n" " Use --force to overwrite them.".format( len(skipped), ", ".join(elide_list([_format_spec(s) for s in skipped], 5)) ) ) if failed: if len(failed) == 1: raise failed[0][1] raise spack.error.SpackError( f"The following {len(failed)} errors occurred while pushing specs to the " "buildcache", "\n".join( elide_list( [ f" {_format_spec(spec)}: {e.__class__.__name__}: {e}" for spec, e in failed ], 5, ) ), ) # Finally tag all roots as a single image if requested. if args.tag: uploader.tag(args.tag, roots) def install_fn(args): """install from a binary package""" if not args.specs: tty.die("a spec argument is required to install from a buildcache") query = spack.binary_distribution.BinaryCacheQuery(all_architectures=args.otherarch) matches = spack.store.find(args.specs, multiple=args.multiple, query_fn=query) for match in matches: spack.binary_distribution.install_single_spec( match, unsigned=args.unsigned, force=args.force ) def list_fn(args): """list binary packages available from mirrors""" try: specs = spack.binary_distribution.update_cache_and_get_specs() except spack.binary_distribution.FetchCacheError as e: tty.die(e) if not args.allarch: arch = spack.spec.Spec.default_arch() specs = [s for s in specs if s.intersects(arch)] if args.specs: constraints = set(args.specs) specs = [s for s in specs if any(s.intersects(c) for c in constraints)] if sys.stdout.isatty(): builds = len(specs) tty.msg("%s." % plural(builds, "cached build")) if not builds and not args.allarch: tty.msg( "You can query all available architectures with:", "spack buildcache list --allarch", ) display_specs(specs, args, all_headers=True) def keys_fn(args): """get public keys available on mirrors""" spack.binary_distribution.get_keys(args.install, args.trust, args.force) def check_fn(args: argparse.Namespace): """check specs against remote binary mirror(s) to see if any need to be rebuilt this command uses the process exit code to indicate its result, specifically, if the exit code is non-zero, then at least one of the indicated specs needs to be rebuilt """ specs_arg = args.specs if specs_arg: specs = _matching_specs(spack.cmd.parse_specs(specs_arg)) else: specs = spack.cmd.require_active_env("buildcache check").all_specs() if not specs: tty.msg("No specs provided, exiting.") return specs = [spack.concretize.concretize_one(s) for s in specs] # Next see if there are any configured binary mirrors configured_mirrors = spack.config.get("mirrors", scope=args.scope) if args.mirror_url: configured_mirrors = {"additionalMirrorUrl": args.mirror_url} if not configured_mirrors: tty.msg("No mirrors provided, exiting.") return if ( spack.binary_distribution.check_specs_against_mirrors( configured_mirrors, specs, args.output_file ) == 1 ): sys.exit(1) def download_fn(args): """download buildcache entry from a remote mirror to local folder this command uses the process exit code to indicate its result, specifically, a non-zero exit code indicates that the command failed to download at least one of the required buildcache components """ specs = _matching_specs(spack.cmd.parse_specs(args.spec)) if len(specs) != 1: tty.die("a single spec argument is required to download from a buildcache") spack.binary_distribution.download_single_spec(specs[0], args.path) def save_specfile_fn(args): """get full spec for dependencies and write them to files in the specified output directory uses exit code to signal success or failure. an exit code of zero means the command was likely successful. if any errors or exceptions are encountered, or if expected command-line arguments are not provided, then the exit code will be non-zero """ specs = spack.cmd.parse_specs(args.root_spec) if len(specs) != 1: tty.die("a single spec argument is required to save specfile") root = specs[0] if not root.concrete: root = spack.concretize.concretize_one(root) save_dependency_specfiles( root, args.specfile_dir, dependencies=spack.cmd.parse_specs(args.specs) ) def copy_buildcache_entry(cache_entry: URLBuildcacheEntry, destination_url: str): """Download buildcache entry and copy it to the destination_url""" try: spec_dict = cache_entry.fetch_metadata() cache_entry.fetch_archive() except spack.binary_distribution.BuildcacheEntryError as e: tty.warn(f"Failed to retrieve buildcache for copying due to {e}") cache_entry.destroy() return spec_blob_record = cache_entry.get_blob_record(BuildcacheComponent.SPEC) local_spec_path = cache_entry.get_local_spec_path() tarball_blob_record = cache_entry.get_blob_record(BuildcacheComponent.TARBALL) local_tarball_path = cache_entry.get_local_archive_path() target_spec = spack.spec.Spec.from_dict(spec_dict) spec_label = f"{target_spec.name}/{target_spec.dag_hash()[:7]}" if not tarball_blob_record: cache_entry.destroy() raise BuildcacheEntryError(f"No source tarball blob record, failed to sync {spec_label}") # Try to push the tarball tarball_dest_url = cache_entry.get_blob_url(destination_url, tarball_blob_record) try: web_util.push_to_url(local_tarball_path, tarball_dest_url, keep_original=True) except Exception as e: tty.warn(f"Failed to push {local_tarball_path} to {tarball_dest_url} due to {e}") cache_entry.destroy() return if not spec_blob_record: cache_entry.destroy() raise BuildcacheEntryError(f"No source spec blob record, failed to sync {spec_label}") # Try to push the spec file spec_dest_url = cache_entry.get_blob_url(destination_url, spec_blob_record) try: web_util.push_to_url(local_spec_path, spec_dest_url, keep_original=True) except Exception as e: tty.warn(f"Failed to push {local_spec_path} to {spec_dest_url} due to {e}") cache_entry.destroy() return # Stage the manifest locally, since if it's signed, we don't want to try to # to reproduce that here. Instead just push the locally staged manifest to # the expected path at the destination url. manifest_src_url = cache_entry.remote_manifest_url manifest_dest_url = cache_entry.get_manifest_url(target_spec, destination_url) manifest_stage = spack.stage.Stage(manifest_src_url) try: manifest_stage.create() manifest_stage.fetch() except Exception as e: tty.warn(f"Failed to fetch manifest from {manifest_src_url} due to {e}") manifest_stage.destroy() cache_entry.destroy() return local_manifest_path = manifest_stage.save_filename try: web_util.push_to_url(local_manifest_path, manifest_dest_url, keep_original=True) except Exception as e: tty.warn(f"Failed to push manifest to {manifest_dest_url} due to {e}") manifest_stage.destroy() cache_entry.destroy() def sync_fn(args): """sync binaries (and associated metadata) from one mirror to another requires an active environment in order to know which specs to sync """ if args.manifest_glob: # Passing the args.src_mirror here because it is not possible to # have the destination be required when specifying a named source # mirror and optional for the --manifest-glob argument. In the case # of manifest glob sync, the source mirror positional argument is the # destination mirror if it is specified. If there are two mirrors # specified, the second is ignored and the first is the override # destination. if args.dest_mirror: tty.warn(f"Ignoring unused arguemnt: {args.dest_mirror.name}") manifest_copy(glob.glob(args.manifest_glob), args.src_mirror) return 0 if args.src_mirror is None or args.dest_mirror is None: tty.die("Provide mirrors to sync from and to.") src_mirror = args.src_mirror dest_mirror = args.dest_mirror src_mirror_url = src_mirror.fetch_url dest_mirror_url = dest_mirror.push_url # Get the active environment env = spack.cmd.require_active_env(cmd_name="buildcache sync") tty.msg( "Syncing environment buildcache files from {0} to {1}".format( src_mirror_url, dest_mirror_url ) ) tty.debug("Syncing the following specs:") specs_to_sync = [s for s in env.all_specs() if not s.external] for s in specs_to_sync: tty.debug(" {0}{1}: {2}".format("* " if s in env.roots() else " ", s.name, s.dag_hash())) cache_class = get_url_buildcache_class( layout_version=spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ) src_cache_entry = cache_class(src_mirror_url, s, allow_unsigned=True) src_cache_entry.read_manifest() copy_buildcache_entry(src_cache_entry, dest_mirror_url) def manifest_copy( manifest_file_list: List[str], dest_mirror: Optional[spack.mirrors.mirror.Mirror] = None ): """Read manifest files containing information about specific specs to copy from source to destination, remove duplicates since any binary packge for a given hash should be the same as any other, and copy all files specified in the manifest files.""" deduped_manifest = {} for manifest_path in manifest_file_list: with open(manifest_path, encoding="utf-8") as fd: manifest = json.loads(fd.read()) for spec_hash, copy_obj in manifest.items(): # Last duplicate hash wins deduped_manifest[spec_hash] = copy_obj for spec_hash, copy_obj in deduped_manifest.items(): cache_class = get_url_buildcache_class( layout_version=spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ) src_cache_entry = cache_class( cache_class.get_base_url(copy_obj["src"]), allow_unsigned=True ) src_cache_entry.read_manifest(manifest_url=copy_obj["src"]) if dest_mirror: destination_url = dest_mirror.push_url else: destination_url = cache_class.get_base_url(copy_obj["dest"]) tty.debug("copying {0} to {1}".format(copy_obj["src"], destination_url)) copy_buildcache_entry(src_cache_entry, destination_url) def update_index(mirror: spack.mirrors.mirror.Mirror, update_keys=False): # Special case OCI images for now. try: image_ref = spack.oci.oci.image_from_mirror(mirror) except ValueError: image_ref = None if image_ref: with tempfile.TemporaryDirectory( dir=spack.stage.get_stage_root() ) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor: spack.binary_distribution._oci_update_index(image_ref, tmpdir, executor) return # Otherwise, assume a normal mirror. url = mirror.push_url with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir: spack.binary_distribution._url_generate_package_index(url, tmpdir) if update_keys: try: with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir: spack.binary_distribution.generate_key_index(url, tmpdir) except spack.binary_distribution.CannotListKeys as e: # Do not error out if listing keys went wrong. This usually means that the _gpg path # does not exist. TODO: distinguish between this and other errors. tty.warn(f"did not update the key index: {e}") def update_index_fn(args): """update a buildcache index""" return update_index(args.mirror, update_keys=args.keys) def migrate_fn(args): """perform in-place binary mirror migration (2 to 3) A mirror can contain both layout version 2 and version 3 simultaneously without interference. This command performs in-place migration of a binary mirror laid out according to version 2, to a binary mirror laid out according to layout version 3. Only indexed specs will be migrated, so consider updating the mirror index before running this command. Re-run the command to migrate any missing items. The default mode of operation is to perform a signed migration, that is, spack will attempt to verify the signatures on specs, and then re-sign them before migration, using whatever keys are already installed in your key ring. You can migrate a mirror of unsigned binaries (or convert a mirror of signed binaries to unsigned) by providing the ``--unsigned`` argument. By default spack will leave the original mirror contents (in the old layout) in place after migration. You can have spack remove the old contents by providing the ``--delete-existing`` argument. Because migrating a mostly-already-migrated mirror should be fast, consider a workflow where you perform a default migration, (i.e. preserve the existing layout rather than deleting it) then evaluate the state of the migrated mirror by attempting to install from it, and finally running the migration again with ``--delete-existing``.""" target_mirror = args.mirror unsigned = args.unsigned assert isinstance(target_mirror, spack.mirrors.mirror.Mirror) delete_existing = args.delete_existing proceed = True if delete_existing and not args.yes_to_all: msg = ( "Using --delete-existing will delete the entire contents \n" " of the old layout within the mirror. Because migrating a mirror \n" " that has already been migrated should be fast, consider a workflow \n" " where you perform a default migration (i.e. preserve the existing \n" " layout rather than deleting it), then evaluate the state of the \n" " migrated mirror by attempting to install from it, and finally, \n" " run the migration again with --delete-existing." ) tty.warn(msg) proceed = tty.get_yes_or_no("Do you want to proceed?", default=False) if not proceed: tty.die("Migration aborted.") migrate(target_mirror, unsigned=unsigned, delete_existing=delete_existing) def prune_fn(args): """prune buildcache entries from the mirror If a keeplist file is provided, performs direct pruning (deletes packages not in keeplist) followed by orphan pruning. If no keeplist is provided, only performs orphan pruning. """ mirror: spack.mirrors.mirror.Mirror = args.mirror keeplist: Optional[str] = args.keeplist dry_run: bool = args.dry_run assert isinstance(mirror, spack.mirrors.mirror.Mirror) prune_buildcache(mirror=mirror, keeplist=keeplist, dry_run=dry_run) def buildcache(parser, args): return args.func(args)
PackageNotInstalledError
python
PyCQA__pylint
tests/functional/n/none_dunder_protocols_py38.py
{ "start": 136, "end": 317 }
class ____(metaclass=MetaContainer): if (__iter__ := lambda x: x): # [unnecessary-lambda-assignment] pass def test(): 1 in NamedExpressionClass()
NamedExpressionClass
python
falconry__falcon
tests/test_httpstatus.py
{ "start": 771, "end": 1718 }
class ____: @falcon.before(before_hook) def on_get(self, req, resp): resp.status = falcon.HTTP_500 resp.set_header('X-Failed', 'True') resp.text = 'Fail' def on_post(self, req, resp): resp.status = falcon.HTTP_500 resp.set_header('X-Failed', 'True') resp.text = 'Fail' raise HTTPStatus(falcon.HTTP_200, headers={'X-Failed': 'False'}, text='Pass') @falcon.after(after_hook) def on_put(self, req, resp): # NOTE(kgriffs): Test that passing a unicode status string # works just fine. resp.status = '500 Internal Server Error' resp.set_header('X-Failed', 'True') resp.text = 'Fail' def on_patch(self, req, resp): raise HTTPStatus(falcon.HTTP_200, text=None) @falcon.after(noop_after_hook) def on_delete(self, req, resp): raise HTTPStatus(201, headers={'X-Failed': 'False'}, text='Pass')
TestStatusResource
python
google__pytype
pytype/rewrite/flow/conditions.py
{ "start": 274, "end": 360 }
class ____(Condition): def __repr__(self): return 'TRUE' @_frozen_dataclass
_True
python
huggingface__transformers
src/transformers/models/smolvlm/image_processing_smolvlm_fast.py
{ "start": 5698, "end": 22647 }
class ____(BaseImageProcessorFast): resample = PILImageResampling.LANCZOS image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {"longest_edge": 4 * 364} max_image_size = {"longest_edge": 364} do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True do_image_splitting = True do_pad = True return_row_col_info = False valid_kwargs = SmolVLMImageProcessorKwargs def _prepare_images_structure(self, images: ImageInput, expected_ndims: int = 3) -> ImageInput: """ Prepare a nested images structure for processing. """ return make_nested_list_of_images(images, expected_ndims=expected_ndims) def resize( self, image: "torch.Tensor", size: SizeDict, interpolation: Optional["F.InterpolationMode"] = None, antialias: bool = True, **kwargs, ) -> "torch.Tensor": """ Resize an image. The longest edge of the image is resized to size.longest_edge, with the shortest edge resized to keep the input aspect ratio. Can also be used with size.height and size.width. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): `InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`. antialias (`bool`, *optional*, defaults to `True`): Whether to use antialiasing when resizing the image. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR if interpolation == F.InterpolationMode.LANCZOS: logger.warning_once( "You have used fast image processor with LANCZOS resample which not yet supported for torch.Tensor. " "BICUBIC resample will be used as an alternative. Please fall back to slow image processor if you " "want full consistency with the original model." ) interpolation = F.InterpolationMode.BICUBIC if size.longest_edge: size = get_resize_output_image_size(image, resolution_max_side=size.longest_edge) elif size.height and size.width: size = (size.height, size.width) else: raise ValueError("size must be a dictionary with key 'longest_edge' or 'height' and 'width'.") return F.resize(image, size, interpolation=interpolation, antialias=antialias) def split_images( self, images: torch.Tensor, max_image_size: dict[str, int], interpolation: Optional["F.InterpolationMode"] = None, ): """ Split an image into squares of side max_image_size and the original image resized to max_image_size. That means that a single image becomes a sequence of images. This is a "trick" to spend more compute on each image with no changes in the vision encoder. 1) If one side of the original image is larger than `max_image_size`, resize it to `max_image_size` while preserving the aspect ratio. 2) Divide the resulting image into `ceil(height / max_image_size)` x `ceil(width / max_image_size)` sub-images of the same size each (image_size, image_size). Typically, 364x364. 3) Returns the list of the crops and the original image, in addition to the number of splits for the height and the width. Args: images (`torch.Tensor`): Images to split. max_image_size (`Dict[str, int]`): Maximum size of the output image. If the image is larger than this size, it will be split into patches of this size, and the original image will be concatenated with the patches, resized to max_size. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): `InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`. """ batch_size, num_channels, height, width = images.size() height_dim, width_dim = 2, 3 max_height = max_width = max_image_size["longest_edge"] frames = [] if height > max_height or width > max_width: # Calculate the number of splits num_splits_h = math.ceil(height / max_height) num_splits_w = math.ceil(width / max_width) # Split the images by height, then by width frames = ( images.unfold(height_dim, size=max_height, step=max_height) .unfold(width_dim, size=max_width, step=max_width) .contiguous() .view(batch_size, num_channels, -1, max_height, max_width) .permute(0, 2, 1, 3, 4) ) # batch_size x n_frames x num_channels x height x width # For the global image at the end, we resize it to match the max_image_size, for cpu memory efficiency global_image_height, global_image_width = max_height, max_width images = self.resize( images, SizeDict(height=global_image_height, width=global_image_width), interpolation=interpolation ) frames = torch.cat((frames, images.unsqueeze(1)), dim=1) else: num_splits_h, num_splits_w = 0, 0 frames = images.unsqueeze(1) num_splits_h = [num_splits_h] * batch_size num_splits_w = [num_splits_w] * batch_size return frames, num_splits_h, num_splits_w def resize_for_vision_encoder( self, image: torch.Tensor, vision_encoder_max_size: int, interpolation: Optional["F.InterpolationMode"] = None, ): """ Resize images to be multiples of `vision_encoder_max_size` while preserving the aspect ratio. Args: image (`torch.Tensor`): Images to resize. vision_encoder_max_size (`int`): Maximum size of the output image. If the image is larger than this size, it will be split into patches of this size, and the original image will be concatenated with the patches, resized to max_size. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): `InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`. """ height, width = image.size()[-2:] aspect_ratio = width / height if width >= height: width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size height = int(width / aspect_ratio) height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size elif height > width: height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size width = int(height * aspect_ratio) width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size new_size = SizeDict(height=height, width=width) return self.resize(image, size=new_size, interpolation=interpolation) def pad( self, image: torch.Tensor, padded_size: tuple[int, int], fill: int = 0, return_pixel_mask: bool = True, ): original_size = image.shape[-2:] padding_bottom = padded_size[0] - original_size[0] padding_right = padded_size[1] - original_size[1] if padding_bottom < 0 or padding_right < 0: raise ValueError( f"Padding dimensions are negative. Please make sure that the padded size is larger than the " f"original size. Got padded size: {padded_size}, original size: {original_size}." ) # Only pad if necessary if original_size != padded_size: padding = (0, 0, padding_right, padding_bottom) image = F.pad(image, padding, fill=fill, padding_mode="constant") # Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. pixel_mask = None if return_pixel_mask: pixel_mask = torch.zeros_like(image[..., 0, :, :], dtype=torch.int64) pixel_mask[: original_size[0], : original_size[1]] = 1 return image, pixel_mask @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[SmolVLMImageProcessorKwargs]) -> BatchFeature: return super().preprocess(images, **kwargs) def _preprocess( self, images: list[list["torch.Tensor"]], do_resize: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: Optional[bool], do_image_splitting: Optional[bool], max_image_size: Optional[dict[str, int]], return_row_col_info: Optional[bool], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: """ Process a batch of images for the model. """ grouped_images, grouped_images_index = group_images_by_shape( images, is_nested=True, disable_grouping=disable_grouping ) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize(stacked_images, size, interpolation=interpolation) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index, is_nested=True) grouped_images, grouped_images_index = group_images_by_shape( resized_images, is_nested=True, disable_grouping=disable_grouping ) split_images_grouped = {} if do_image_splitting: rows_grouped = {} cols_grouped = {} for shape, stacked_images in grouped_images.items(): stacked_images = self.resize_for_vision_encoder( stacked_images, max_image_size["longest_edge"], interpolation=interpolation ) stacked_images, rows, cols = self.split_images( stacked_images, max_image_size=max_image_size, interpolation=interpolation ) split_images_grouped[shape] = stacked_images rows_grouped[shape] = rows cols_grouped[shape] = cols processed_images = reorder_images(split_images_grouped, grouped_images_index, is_nested=True) rows = reorder_images(rows_grouped, grouped_images_index, is_nested=True) cols = reorder_images(cols_grouped, grouped_images_index, is_nested=True) # flattenened the doubly nested list to a nested list for i, group_images in enumerate(processed_images): processed_images[i] = [image for sublist in group_images for image in sublist] else: for shape, stacked_images in grouped_images.items(): # We square the images to max_image_size stacked_images = self.resize( image=stacked_images, size=SizeDict(height=max_image_size["longest_edge"], width=max_image_size["longest_edge"]), interpolation=interpolation, ) split_images_grouped[shape] = stacked_images processed_images = reorder_images(split_images_grouped, grouped_images_index, is_nested=True) rows = [[0] * len(images) for images in processed_images] cols = [[0] * len(images) for images in processed_images] # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape( processed_images, is_nested=True, disable_grouping=disable_grouping ) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=True) if do_pad: # Get max images per batch max_num_images = max(len(images_) for images_ in processed_images) max_height, max_width = get_max_height_width(processed_images) processed_images_padded = torch.zeros( len(processed_images), max_num_images, *(processed_images[0][0].shape[0], max_height, max_width), device=processed_images[0][0].device, ) pixel_attention_masks = torch.zeros( len(processed_images), max_num_images, *(max_height, max_width), device=processed_images[0][0].device, ) for i, images in enumerate(processed_images): for j, image in enumerate(images): processed_images_padded[i, j], pixel_attention_masks[i, j] = self.pad( image, (max_height, max_width) ) processed_images = processed_images_padded if do_pad: data = {"pixel_values": processed_images, "pixel_attention_mask": pixel_attention_masks} elif return_tensors == "pt": data = {"pixel_values": torch.stack([torch.stack(images) for images in processed_images])} else: data = {"pixel_values": processed_images} # This is needed for generating correct text inputs in the processor - we don't pad to the max number of images encoding = BatchFeature(data=data, tensor_type=return_tensors) if return_row_col_info: encoding["rows"] = rows encoding["cols"] = cols return encoding def to_dict(self): encoder_dict = super().to_dict() encoder_dict.pop("_valid_processor_keys", None) encoder_dict.pop("return_row_col_info", None) return encoder_dict def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None): """ A utility that returns number of image patches for a given image size. Args: height (`int`): Height of the input image. width (`int`): Width of the input image. images_kwargs (`dict`, *optional*) Any kwargs to override defaults of the image processor. Returns: `int`: Number of patches per image. """ do_image_splitting = images_kwargs.get("do_image_splitting", self.do_image_splitting) max_image_size = images_kwargs.get("max_image_size", self.max_image_size) size = images_kwargs.get("size", self.size) num_patches = num_rows = num_cols = 1 if do_image_splitting: height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=size["longest_edge"]) height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=MAX_IMAGE_SIZE) aspect_ratio = width / height if width >= height: resized_width = math.ceil(width / max_image_size["longest_edge"]) * max_image_size["longest_edge"] resized_height = int(width / aspect_ratio) resized_height = math.ceil(height / max_image_size["longest_edge"]) * max_image_size["longest_edge"] elif height > width: resized_height = math.ceil(height / max_image_size["longest_edge"]) * max_image_size["longest_edge"] resized_width = int(height * aspect_ratio) resized_width = math.ceil(width / max_image_size["longest_edge"]) * max_image_size["longest_edge"] max_height = max_width = max_image_size["longest_edge"] if resized_height > max_height or resized_width > max_width: # Calculate the number of splits num_rows = math.ceil(resized_height / max_height) num_cols = math.ceil(resized_width / max_width) num_patches = num_rows * num_cols + 1 return num_patches, num_rows, num_cols __all__ = ["SmolVLMImageProcessorFast"]
SmolVLMImageProcessorFast
python
doocs__leetcode
solution/1700-1799/1786.Number of Restricted Paths From First to Last Node/Solution.py
{ "start": 0, "end": 782 }
class ____: def countRestrictedPaths(self, n: int, edges: List[List[int]]) -> int: @cache def dfs(i): if i == n: return 1 ans = 0 for j, _ in g[i]: if dist[i] > dist[j]: ans = (ans + dfs(j)) % mod return ans g = defaultdict(list) for u, v, w in edges: g[u].append((v, w)) g[v].append((u, w)) q = [(0, n)] dist = [inf] * (n + 1) dist[n] = 0 mod = 10**9 + 7 while q: _, u = heappop(q) for v, w in g[u]: if dist[v] > dist[u] + w: dist[v] = dist[u] + w heappush(q, (dist[v], v)) return dfs(1)
Solution
python
pydantic__pydantic
pydantic-core/tests/validators/test_pickling.py
{ "start": 1960, "end": 2592 }
class ____: __pydantic_validator__: SchemaValidator __pydantic_complete__ = True def test_schema_validator_not_reused_when_unpickling() -> None: s = SchemaValidator( core_schema.model_schema( cls=Model, schema=core_schema.model_fields_schema(fields={}, model_name='Model'), config={'title': 'Model'}, ref='Model:123', ) ) Model.__pydantic_validator__ = s assert 'Prebuilt' not in str(Model.__pydantic_validator__) reconstructed = pickle.loads(pickle.dumps(Model.__pydantic_validator__)) assert 'Prebuilt' not in str(reconstructed)
Model
python
tensorflow__tensorflow
tensorflow/python/ops/gradient_checker_v2_test.py
{ "start": 1760, "end": 9532 }
class ____(test.TestCase): def testSparseTensorReshape(self): x = constant_op.constant(2.0, shape=(2,)) def sparse_tensor_reshape(values): sparse = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=values, dense_shape=[3, 4]) sparse = sparse_ops.sparse_reshape(sparse, shape=(12,)) return sparse.values error = gradient_checker.max_error( *gradient_checker.compute_gradient(sparse_tensor_reshape, [x])) self.assertLess(error, 1e-4) def testWithStaticShape(self): size = (2, 3) constant = constant_op.constant(2.0, shape=size, name="const") def add_constant_with_static_shape_check(x): self.assertAllEqual(x.shape.as_list(), constant.shape.as_list()) return x + constant x = constant_op.constant(3.0, shape=size, name="x") error = gradient_checker.max_error(*gradient_checker.compute_gradient( add_constant_with_static_shape_check, [x])) self.assertLess(error, 1e-4) def testWithArgumentsAsTuple(self): size = (2, 3) x1 = constant_op.constant(2.0, shape=size, name="x1") x2 = constant_op.constant(3.0, shape=size, name="x2") error = gradient_checker.max_error(*gradient_checker.compute_gradient( lambda x1: math_ops.add(x1, x2), (x1,))) tf_logging.info("x1 error = %f", error) self.assertLess(error, 1e-4) def testAddSimple(self): size = (2, 3) x1 = constant_op.constant(2.0, shape=size, name="x1") x2 = constant_op.constant(3.0, shape=size, name="x2") error = gradient_checker.max_error(*gradient_checker.compute_gradient( lambda x1: math_ops.add(x1, x2), [x1])) tf_logging.info("x1 error = %f", error) self.assertLess(error, 1e-4) def testBfloat16(self): x1 = constant_op.constant(2.0, dtype="bfloat16") x2 = constant_op.constant(3.0, dtype="bfloat16") # bfloat16 is very imprecise, so we use very large delta and error bar here. error = gradient_checker.max_error(*gradient_checker.compute_gradient( lambda x1: math_ops.add(x1, x2), [x1], delta=0.1)) tf_logging.info("x1 error = %f", error) self.assertLess(error, 0.07) def testAddCustomized(self): size = (2, 3) x1 = constant_op.constant(2.0, shape=size, dtype=dtypes.float64, name="x1") x2 = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3)) # checkint gradients for x2 using a special delta error = gradient_checker.max_error(*gradient_checker.compute_gradient( lambda x2: math_ops.add(x1, x2), [x2], delta=1e-2)) tf_logging.info("x2 error = %f", error) self.assertLess(error, 1e-10) def testGather(self): def f(params): index_values = [1, 3] indices = constant_op.constant(index_values, name="i") return array_ops.gather(params, indices, name="y") p_shape = (4, 2) p_size = 8 params = constant_op.constant( np.arange(p_size).astype(np.float64), shape=p_shape, name="p") error = gradient_checker.max_error( *gradient_checker.compute_gradient(f, [params])) tf_logging.info("gather error = %f", error) self.assertLess(error, 1e-4) def testNestedGather(self): def f(params): index_values = [1, 3, 5, 6] indices = constant_op.constant(index_values, name="i") y = array_ops.gather(params, indices, name="y") index_values2 = [0, 2] indices2 = constant_op.constant(index_values2, name="i2") return array_ops.gather(y, indices2, name="y2") p_shape = (8, 2) p_size = 16 params = constant_op.constant( np.arange(p_size).astype(np.float64), shape=p_shape, name="p") error = gradient_checker.max_error( *gradient_checker.compute_gradient(f, [params])) tf_logging.info("nested gather error = %f", error) self.assertLess(error, 1e-4) def testComplexMul(self): c = constant_op.constant(5 + 7j, dtype=dtypes.complex64) def f(x): return c * x x_shape = c.shape x_dtype = c.dtype x = constant_op.constant(_random_complex(x_shape, x_dtype)) analytical, numerical = gradient_checker.compute_gradient(f, [x]) correct = np.array([[5, -7], [7, 5]]) self.assertAllEqual(correct, analytical[0]) self.assertAllClose(correct, numerical[0], rtol=1e-4) x = constant_op.constant(_random_complex(x_shape, x_dtype)) self.assertLess( gradient_checker.max_error(*gradient_checker.compute_gradient(f, [x])), 3e-4) def testComplexConj(self): def f(x): return math_ops.conj(x) x_shape = () x_dtype = dtypes.complex64 x = constant_op.constant(_random_complex(x_shape, x_dtype)) analytical, numerical = gradient_checker.compute_gradient(f, [x]) correct = np.array([[1, 0], [0, -1]]) self.assertAllEqual(correct, analytical[0]) self.assertAllClose(correct, numerical[0], rtol=2e-5) x = constant_op.constant(_random_complex(x_shape, x_dtype)) self.assertLess( gradient_checker.max_error(*gradient_checker.compute_gradient(f, [x])), 2e-5) def testEmptySucceeds(self): def f(x): return array_ops.identity(x) x = constant_op.constant( np.random.random_sample((0, 3)), dtype=dtypes.float32) for grad in gradient_checker.compute_gradient(f, [x]): self.assertEqual(grad[0].shape, (0, 0)) error = gradient_checker.max_error( *gradient_checker.compute_gradient(f, [x])) self.assertEqual(error, 0) def testEmptyMatMul(self): def f(x, y): return math_ops.matmul(x, y) x = constant_op.constant( np.random.random_sample((0, 3)), dtype=dtypes.float32) y = constant_op.constant( np.random.random_sample((3, 4)), dtype=dtypes.float32) for grad in gradient_checker.compute_gradient(f, [x, y]): self.assertEqual(grad[0].shape, (0, 0)) self.assertEqual(grad[1].shape, (0, 12)) error = gradient_checker.max_error( *gradient_checker.compute_gradient(f, [x, y])) self.assertEqual(error, 0) def testEmptyFails(self): @custom_gradient.custom_gradient def id_bad_grad(x): y = array_ops.identity(x) def grad_fn(dy): # dx = constant_op.constant(np.zeros((1, 4)), dtype=dtypes.float32) dx = array_ops.transpose(dy) return dx return y, grad_fn def f(x): return id_bad_grad(x) x = constant_op.constant( np.random.random_sample((0, 3)), dtype=dtypes.float32) bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)" with self.assertRaisesRegex(ValueError, bad): gradient_checker.compute_gradient(f, [x]) def testNaNGradFails(self): @custom_gradient.custom_gradient def id_nan_grad(x): y = array_ops.identity(x) def grad_fn(dy): dx = np.nan * dy # dx = dy return dx return y, grad_fn def f(x): return id_nan_grad(x) x = constant_op.constant( np.random.random_sample((1, 1)), dtype=dtypes.float32) error = gradient_checker.max_error( *gradient_checker.compute_gradient(f, [x])) # Typical test would assert error < max_err, so assert this test would # raise AssertionError, since NaN is not < 1.0. error_msg = r"(nan|np.float32\(nan\)) not less than 1.0" with self.assertRaisesRegex(AssertionError, error_msg): self.assertLess(error, 1.0) def testGradGrad(self): def f(x): with backprop.GradientTape() as tape: tape.watch(x) y = math_ops.square(x) z = math_ops.square(y) return tape.gradient(z, x) analytical, numerical = gradient_checker.compute_gradient(f, [2.0]) self.assertAllEqual([[[48.]]], analytical) self.assertAllClose([[[48.]]], numerical, rtol=1e-4) @test_util.run_all_in_graph_and_eager_modes
GradientCheckerTest
python
huggingface__transformers
tests/models/data2vec/test_modeling_data2vec_audio.py
{ "start": 13071, "end": 20669 }
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Data2VecAudioForCTC, Data2VecAudioModel, Data2VecAudioForSequenceClassification, Data2VecAudioForAudioFrameClassification, Data2VecAudioForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Data2VecAudioForSequenceClassification, "automatic-speech-recognition": Data2VecAudioForCTC, "feature-extraction": Data2VecAudioModel, } if is_torch_available() else {} ) def setUp(self): self.model_tester = Data2VecAudioModelTester(self) self.config_tester = ConfigTester(self, config_class=Data2VecAudioConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Data2VecAudio has no inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="`input_ids` is renamed to `input_values`") def test_forward_signature(self): pass @unittest.skip(reason="Data2VecAudio has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Data2VecAudio has no inputs_embeds") def test_model_get_set_embeddings(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # force eager attention to support output attentions config._attn_implementation = "eager" # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Data2VecAudioForCTC.from_pretrained( "hf-internal-testing/tiny-random-data2vec-seq-class", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Data2VecAudioForCTC.from_pretrained( "facebook/data2vec-audio-base-960h", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 299, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Data2VecAudioModel.from_pretrained("facebook/data2vec-audio-base") self.assertIsNotNone(model) @require_torch
Data2VecAudioModelTest
python
ray-project__ray
python/ray/dashboard/modules/job/tests/test_cli_integration.py
{ "start": 7755, "end": 8270 }
class ____: def test_bad_runtime_env(self, ray_start_stop): """Should fail with helpful error if runtime env setup fails.""" stdout, _ = _run_cmd( 'ray job submit --runtime-env-json=\'{"pip": ' '["does-not-exist"]}\' -- echo hi', should_fail=True, ) assert "Tailing logs until the job exits" in stdout assert "runtime_env setup failed" in stdout assert "No matching distribution found for does-not-exist" in stdout
TestRuntimeEnv
python
tensorflow__tensorflow
tensorflow/python/autograph/pyct/errors.py
{ "start": 937, "end": 1051 }
class ____(PyCTError, ValueError): """Raised when inspect can not access source code."""
InaccessibleSourceCodeError
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-gcs/llama_index/readers/gcs/base.py
{ "start": 950, "end": 10211 }
class ____(BasePydanticReader, ResourcesReaderMixin, FileSystemReaderMixin): """ A reader for Google Cloud Storage (GCS) files and directories. This class allows reading files from GCS, listing resources, and retrieving resource information. It supports authentication via service account keys and implements various reader mixins. Attributes: bucket (str): The name of the GCS bucket. key (Optional[str]): The specific file key to read. If None, the entire bucket is parsed. prefix (Optional[str]): The prefix to filter by when iterating through the bucket. recursive (bool): Whether to recursively search in subdirectories. file_extractor (Optional[Dict[str, Union[str, BaseReader]]]): Custom file extractors. required_exts (Optional[List[str]]): List of required file extensions. filename_as_id (bool): Whether to use the filename as the document ID. num_files_limit (Optional[int]): Maximum number of files to read. file_metadata (Optional[Callable[[str], Dict]]): Function to extract metadata from filenames. service_account_key (Optional[Dict[str, str]]): Service account key as a dictionary. service_account_key_json (Optional[str]): Service account key as a JSON string. service_account_key_path (Optional[str]): Path to the service account key file. """ is_remote: bool = True bucket: str key: Optional[str] = None prefix: Optional[str] = "" recursive: bool = True file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = Field( default=None, exclude=True ) required_exts: Optional[List[str]] = None filename_as_id: bool = True num_files_limit: Optional[int] = None file_metadata: Optional[FileMetadataCallable] = Field(default=None, exclude=True) service_account_key: Optional[Dict[str, str]] = None service_account_key_json: Optional[str] = None service_account_key_path: Optional[str] = None @classmethod def class_name(cls) -> str: """Return the name of the class.""" return "GCSReader" def _get_gcsfs(self): """ Create and return a GCSFileSystem object. This method handles authentication using the provided service account credentials. Returns: GCSFileSystem: An authenticated GCSFileSystem object. Raises: ValueError: If no valid authentication method is provided. DefaultCredentialsError: If there's an issue with the provided credentials. """ from gcsfs import GCSFileSystem try: if self.service_account_key is not None: creds = service_account.Credentials.from_service_account_info( self.service_account_key, scopes=SCOPES ) elif self.service_account_key_json is not None: creds = service_account.Credentials.from_service_account_info( json.loads(self.service_account_key_json), scopes=SCOPES ) elif self.service_account_key_path is not None: creds = service_account.Credentials.from_service_account_file( self.service_account_key_path, scopes=SCOPES ) else: logger.warning( "No explicit credentials provided. Falling back to default credentials." ) creds = None # This will use default credentials return GCSFileSystem(token=creds) except DefaultCredentialsError as e: logger.error(f"Failed to authenticate with GCS: {e!s}") raise def _get_simple_directory_reader(self) -> SimpleDirectoryReader: """ Create and return a SimpleDirectoryReader for GCS. This method sets up a SimpleDirectoryReader with the appropriate GCS filesystem and other configured parameters. Returns: SimpleDirectoryReader: A configured SimpleDirectoryReader for GCS. """ gcsfs = self._get_gcsfs() input_dir = self.bucket input_files = None if self.key: input_files = [f"{self.bucket}/{self.key}"] elif self.prefix: input_dir = f"{input_dir}/{self.prefix}" return SimpleDirectoryReader( input_dir=input_dir, input_files=input_files, recursive=self.recursive, file_extractor=self.file_extractor, required_exts=self.required_exts, filename_as_id=self.filename_as_id, num_files_limit=self.num_files_limit, file_metadata=self.file_metadata, fs=gcsfs, ) def load_data(self) -> List[Document]: """ Load data from the specified GCS bucket or file. Returns: List[Document]: A list of loaded documents. Raises: Exception: If there's an error loading the data. """ try: logger.info(f"Loading data from GCS bucket: {self.bucket}") return self._get_simple_directory_reader().load_data() except Exception as e: logger.error(f"Error loading data from GCS: {e!s}") raise def list_resources(self, **kwargs) -> List[str]: """ List resources in the specified GCS bucket or directory. Args: **kwargs: Additional arguments to pass to the underlying list_resources method. Returns: List[str]: A list of resource identifiers. Raises: Exception: If there's an error listing the resources. """ try: logger.info(f"Listing resources in GCS bucket: {self.bucket}") return self._get_simple_directory_reader().list_resources(**kwargs) except Exception as e: logger.error(f"Error listing resources in GCS: {e!s}") raise def get_resource_info(self, resource_id: str, **kwargs) -> Dict: """ Get information about a specific GCS resource. Args: resource_id (str): The identifier of the resource. **kwargs: Additional arguments to pass to the underlying info method. Returns: Dict: A dictionary containing resource information. Raises: Exception: If there's an error retrieving the resource information. """ try: logger.info(f"Getting info for resource: {resource_id}") gcsfs = self._get_gcsfs() info_result = gcsfs.info(resource_id) info_dict = { "file_path": info_result.get("name"), "file_size": info_result.get("size"), "last_modified_date": info_result.get("updated"), "content_hash": info_result.get("md5Hash"), "content_type": info_result.get("contentType"), "storage_class": info_result.get("storageClass"), "etag": info_result.get("etag"), "generation": info_result.get("generation"), "created_date": info_result.get("timeCreated"), } # Convert datetime objects to ISO format strings for key in ["last_modified_date", "created_date"]: if isinstance(info_dict.get(key), datetime): info_dict[key] = info_dict[key].isoformat() return {k: v for k, v in info_dict.items() if v is not None} except Exception as e: logger.error(f"Error getting resource info from GCS: {e!s}") raise def load_resource(self, resource_id: str, **kwargs) -> List[Document]: """ Load a specific resource from GCS. Args: resource_id (str): The identifier of the resource to load. **kwargs: Additional arguments to pass to the underlying load_resource method. Returns: List[Document]: A list containing the loaded document. Raises: Exception: If there's an error loading the resource. """ try: logger.info(f"Loading resource: {resource_id}") return self._get_simple_directory_reader().load_resource( resource_id, **kwargs ) except Exception as e: logger.error(f"Error loading resource from GCS: {e!s}") raise def read_file_content(self, input_file: Path, **kwargs) -> bytes: """ Read the content of a specific file from GCS. Args: input_file (Path): The path to the file to read. **kwargs: Additional arguments to pass to the underlying read_file_content method. Returns: bytes: The content of the file. Raises: Exception: If there's an error reading the file content. """ try: logger.info(f"Reading file content: {input_file}") return self._get_simple_directory_reader().read_file_content( input_file, **kwargs ) except Exception as e: logger.error(f"Error reading file content from GCS: {e!s}") raise
GCSReader
python
great-expectations__great_expectations
tests/datasource/fluent/test_pandas_azure_blob_storage_datasource.py
{ "start": 1340, "end": 1575 }
class ____: def walk_blobs( self, name_starts_with: str | None = None, include: Any | None = None, delimiter: str = "/", **kwargs, ) -> Iterator: return iter([])
MockContainerClient
python
huggingface__transformers
src/transformers/models/data2vec/modular_data2vec_audio.py
{ "start": 4487, "end": 6352 }
class ____(PreTrainedModel, Wav2Vec2PreTrainedModel): config: Data2VecAudioConfig base_model_prefix = "data2vec_audio" main_input_name = "input_values" input_modalities = "audio" supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Data2VecAudioFeatureProjection): k = math.sqrt(1 / module.projection.in_features) init.uniform_(module.projection.weight, a=-k, b=k) init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, Data2VecAudioPositionalConvLayer): init.constant_(module.conv.bias, 0) elif isinstance(module, nn.Linear): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): if module.bias is not None: init.zeros_(module.bias) if module.weight is not None: init.ones_(module.weight) elif isinstance(module, nn.Conv1d): init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) init.uniform_(module.bias, a=-k, b=k) def _get_adapters(self): raise AttributeError("Not needed for Data2VecAudio") def init_adapter_layers(self): raise AttributeError("Not needed for Data2VecAudio") def load_adapter(self): raise AttributeError("Not needed for Data2VecAudio") Data2VecAudioBaseModelOutput = Wav2Vec2BaseModelOutput
Data2VecAudioPreTrainedModel
python
viewflow__viewflow
tests/workflow/test_nodes__handle.py
{ "start": 1191, "end": 1321 }
class ____(Process): raise_exception = jsonstore.BooleanField() class Meta: proxy = True
TestWorkflowHandlerProcess
python
langchain-ai__langchain
libs/core/langchain_core/utils/iter.py
{ "start": 345, "end": 2955 }
class ____: """Dummy lock that provides the proper interface but no protection.""" def __enter__(self) -> None: """Do nothing.""" def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: """Return False (exception not suppressed).""" return False def tee_peer( iterator: Iterator[T], # the buffer specific to this peer buffer: deque[T], # the buffers of all peers, including our own peers: list[deque[T]], lock: AbstractContextManager[Any], ) -> Generator[T, None, None]: """An individual iterator of a `.tee`. This function is a generator that yields items from the shared iterator `iterator`. It buffers items until the least advanced iterator has yielded them as well. The buffer is shared with all other peers. Args: iterator: The shared iterator. buffer: The buffer for this peer. peers: The buffers of all peers. lock: The lock to synchronise access to the shared buffers. Yields: The next item from the shared iterator. """ try: while True: if not buffer: with lock: # Another peer produced an item while we were waiting for the lock. # Proceed with the next loop iteration to yield the item. if buffer: continue try: item = next(iterator) except StopIteration: break else: # Append to all buffers, including our own. We'll fetch our # item from the buffer again, instead of yielding it directly. # This ensures the proper item ordering if any of our peers # are fetching items concurrently. They may have buffered their # item already. for peer_buffer in peers: peer_buffer.append(item) yield buffer.popleft() finally: with lock: # this peer is done - remove its buffer for idx, peer_buffer in enumerate(peers): # pragma: no branch if peer_buffer is buffer: peers.pop(idx) break # if we are the last peer, try and close the iterator if not peers and hasattr(iterator, "close"): iterator.close()
NoLock
python
bokeh__bokeh
src/bokeh/core/property/required.py
{ "start": 1476, "end": 2481 }
class ____(SingleParameterizedProperty[T]): """ A property accepting a value of some other type while having undefined default. """ def __init__(self, type_param: TypeOrInst[Property[T]], *, default: Init[T] = Undefined, help: str | None = None) -> None: super().__init__(type_param, default=default, help=help) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- @register_type_link(Required) def _sphinx_type_link(obj: SingleParameterizedProperty[Any]) -> str: return f"{property_link(obj)}({type_link(obj.type_param)})"
Required
python
getsentry__sentry
tests/sentry/monitors/clock_tasks/test_check_timeout.py
{ "start": 653, "end": 18785 }
class ____(TestCase): @mock.patch("sentry.monitors.clock_tasks.check_timeout.mark_failed", wraps=mark_failed) @mock.patch("sentry.monitors.clock_tasks.check_timeout.produce_task") def test_timeout( self, mock_produce_task: mock.MagicMock, mock_mark_failed: mock.MagicMock ) -> None: org = self.create_organization() project = self.create_project(organization=org) ts = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) # Schedule is once a day monitor = Monitor.objects.create( organization_id=org.id, project_id=project.id, config={ "schedule_type": ScheduleType.CRONTAB, "schedule": "0 0 * * *", "checkin_margin": None, "max_runtime": 30, }, ) monitor_environment = MonitorEnvironment.objects.create( # XXX(epurkhiser): Arbitrarily large id to make sure we can # correctly use the monitor_environment.id as the partition key id=62702371781194950, monitor=monitor, environment_id=self.environment.id, last_checkin=ts, next_checkin=ts + timedelta(hours=24), next_checkin_latest=ts + timedelta(hours=24, minutes=1), status=MonitorStatus.OK, ) # Checkin will timeout in 30 minutes checkin = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_environment, project_id=project.id, status=CheckInStatus.IN_PROGRESS, date_added=ts, date_updated=ts, timeout_at=ts + timedelta(minutes=30), ) # Does not time out at 12:00 dispatch_check_timeout(ts) assert mock_produce_task.call_count == 0 # Does not time out at 12:29 dispatch_check_timeout(ts + timedelta(minutes=29)) assert mock_produce_task.call_count == 0 # Timout at 12:30 dispatch_check_timeout(ts + timedelta(minutes=30)) message: MarkTimeout = { "type": "mark_timeout", "ts": (ts + timedelta(minutes=30)).timestamp(), "monitor_environment_id": checkin.monitor_environment_id, "checkin_id": checkin.id, } payload = KafkaPayload( str(monitor_environment.id).encode(), MONITORS_CLOCK_TASKS_CODEC.encode(message), [], ) assert mock_produce_task.call_count == 1 assert mock_produce_task.mock_calls[0] == mock.call(payload) mark_checkin_timeout( checkin.id, ts + timedelta(minutes=30), ) # Check in is marked as timed out assert MonitorCheckIn.objects.filter(id=checkin.id, status=CheckInStatus.TIMEOUT).exists() # mark_failed called with the check-in with the status correctly updated assert mock_mark_failed.call_count == 1 assert mock_mark_failed.mock_calls[0].args[0].status == CheckInStatus.TIMEOUT # Monitor is in an error state monitor_env = MonitorEnvironment.objects.filter( id=monitor_environment.id, status=MonitorStatus.ERROR, ) assert monitor_env.exists() # Next check-in time has NOT changed assert monitor_env[0].next_checkin == ts + timedelta(hours=24) @mock.patch("sentry.monitors.clock_tasks.check_timeout.produce_task") def test_timeout_with_overlapping_concurrent_checkins( self, mock_produce_task: mock.MagicMock ) -> None: """ Tests the scenario where the max_runtime is larger than the gap between the schedule. """ org = self.create_organization() project = self.create_project(organization=org) ts = timezone.now().replace(hour=0, second=0, microsecond=0) monitor = Monitor.objects.create( organization_id=org.id, project_id=project.id, config={ # Every hour, 90 minute run time allowed "schedule_type": ScheduleType.CRONTAB, "schedule": "0 * * * *", "checkin_margin": None, "max_runtime": 90, }, ) monitor_environment = MonitorEnvironment.objects.create( monitor=monitor, environment_id=self.environment.id, last_checkin=ts, next_checkin=ts + timedelta(hours=1), next_checkin_latest=ts + timedelta(hours=1, minutes=1), status=MonitorStatus.OK, ) # In progress started an hour ago checkin1_start = ts - timedelta(hours=1) # Timesout 90 minutes from when it started checkin1 = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_environment, project_id=project.id, status=CheckInStatus.IN_PROGRESS, date_added=checkin1_start, date_updated=checkin1_start, timeout_at=checkin1_start + timedelta(minutes=90), ) # Second check in was started now, giving us the overlapping # "concurrent" checkin scenario. checkin2 = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_environment, project_id=project.id, status=CheckInStatus.IN_PROGRESS, date_added=ts, date_updated=ts, timeout_at=ts + timedelta(minutes=90), ) # Nothing happens running the task now. Both check-ins are running # concurrently. dispatch_check_timeout(ts) assert mock_produce_task.call_count == 0 # First checkin has not timed out yet dispatch_check_timeout(ts + timedelta(minutes=29)) assert mock_produce_task.call_count == 0 # First checkin timed out dispatch_check_timeout(ts + timedelta(minutes=30)) message: MarkTimeout = { "type": "mark_timeout", "ts": (ts + timedelta(minutes=30)).timestamp(), "monitor_environment_id": checkin1.monitor_environment_id, "checkin_id": checkin1.id, } payload = KafkaPayload( str(monitor_environment.id).encode(), MONITORS_CLOCK_TASKS_CODEC.encode(message), [], ) assert mock_produce_task.call_count == 1 assert mock_produce_task.mock_calls[0] == mock.call(payload) mark_checkin_timeout( checkin1.id, ts + timedelta(minutes=30), ) # First checkin is marked as timed out assert MonitorCheckIn.objects.filter(id=checkin1.id, status=CheckInStatus.TIMEOUT).exists() # Second checkin is not marked as timed out assert MonitorCheckIn.objects.filter( id=checkin2.id, status=CheckInStatus.IN_PROGRESS ).exists() monitor_env = MonitorEnvironment.objects.filter( id=monitor_environment.id, status=MonitorStatus.ERROR, ) assert monitor_env.exists() # Next check-in time has NOT changed assert monitor_env[0].next_checkin == ts + timedelta(hours=1) @mock.patch("sentry.monitors.clock_tasks.check_timeout.produce_task") def test_timeout_at_next_checkin_time(self, mock_produce_task: mock.MagicMock) -> None: """ Test that timeouts that happen the same time we expect another check-in """ org = self.create_organization() project = self.create_project(organization=org) ts = timezone.now().replace(hour=1, minute=0, second=0, microsecond=0) monitor = Monitor.objects.create( organization_id=org.id, project_id=project.id, config={ # Every hour, 90 minute run time allowed "schedule_type": ScheduleType.CRONTAB, "schedule": "0 * * * *", "checkin_margin": None, "max_runtime": 60, }, ) monitor_environment = MonitorEnvironment.objects.create( monitor=monitor, environment_id=self.environment.id, last_checkin=ts - timedelta(hours=1), next_checkin=ts, next_checkin_latest=ts + timedelta(minutes=1), status=MonitorStatus.OK, ) # In progress started an hour ago checkin_start_time = ts - timedelta(hours=1) checkin = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_environment, project_id=project.id, status=CheckInStatus.IN_PROGRESS, date_added=checkin_start_time, date_updated=checkin_start_time, timeout_at=checkin_start_time + timedelta(hours=1), ) # Check in was marked as timed out dispatch_check_timeout(ts) message: MarkTimeout = { "type": "mark_timeout", "ts": ts.timestamp(), "monitor_environment_id": checkin.monitor_environment_id, "checkin_id": checkin.id, } payload = KafkaPayload( str(monitor_environment.id).encode(), MONITORS_CLOCK_TASKS_CODEC.encode(message), [], ) assert mock_produce_task.call_count == 1 assert mock_produce_task.mock_calls[0] == mock.call(payload) mark_checkin_timeout(checkin.id, ts) # First checkin is marked as timed out assert MonitorCheckIn.objects.filter(id=checkin.id, status=CheckInStatus.TIMEOUT).exists() # Monitor is in an error state monitor_env = MonitorEnvironment.objects.filter( id=monitor_environment.id, status=MonitorStatus.ERROR ) assert monitor_env.exists() # Next check-in time has NOT changed, it will be happening now assert monitor_env[0].next_checkin == ts @mock.patch("sentry.monitors.clock_tasks.check_timeout.produce_task") def test_timeout_using_interval(self, mock_produce_task: mock.MagicMock) -> None: org = self.create_organization() project = self.create_project(organization=org) ts = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) # Schedule is once a day monitor = Monitor.objects.create( organization_id=org.id, project_id=project.id, config={ "schedule_type": ScheduleType.INTERVAL, "schedule": [10, "minute"], "checkin_margin": None, "max_runtime": 5, }, ) monitor_environment = MonitorEnvironment.objects.create( monitor=monitor, environment_id=self.environment.id, last_checkin=ts, next_checkin=ts + timedelta(minutes=10), next_checkin_latest=ts + timedelta(minutes=11), status=MonitorStatus.OK, ) # Checkin will timeout in 5 minutes checkin = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_environment, project_id=project.id, status=CheckInStatus.IN_PROGRESS, date_added=ts, date_updated=ts, timeout_at=ts + timedelta(minutes=5), ) # Timout at 12:05 dispatch_check_timeout(ts + timedelta(minutes=5)) message: MarkTimeout = { "type": "mark_timeout", "ts": (ts + timedelta(minutes=5)).timestamp(), "monitor_environment_id": checkin.monitor_environment_id, "checkin_id": checkin.id, } payload = KafkaPayload( str(monitor_environment.id).encode(), MONITORS_CLOCK_TASKS_CODEC.encode(message), [], ) assert mock_produce_task.call_count == 1 assert mock_produce_task.mock_calls[0] == mock.call(payload) mark_checkin_timeout( checkin.id, ts + timedelta(minutes=5), ) # Check in is marked as timed out assert MonitorCheckIn.objects.filter(id=checkin.id, status=CheckInStatus.TIMEOUT).exists() # Monitor is in an error state monitor_env = MonitorEnvironment.objects.filter( id=monitor_environment.id, status=MonitorStatus.ERROR, ) assert monitor_env.exists() # XXX(epurkhiser): Next check-in timeout is STILL 10 minutes from when # we started our check-in. This is likely WRONG for the user, since we # do't know when their system computed the next check-in. assert monitor_env[0].next_checkin == ts + timedelta(minutes=10) @mock.patch("sentry.monitors.clock_tasks.check_timeout.produce_task") def test_timeout_with_future_complete_checkin(self, mock_produce_task: mock.MagicMock) -> None: org = self.create_organization() project = self.create_project(organization=org) ts = timezone.now().replace(second=0, microsecond=0) check_in_24hr_ago = ts - timedelta(hours=24) # Schedule is once a day monitor = Monitor.objects.create( organization_id=org.id, project_id=project.id, config={ "schedule_type": ScheduleType.CRONTAB, "schedule": "0 0 * * *", "checkin_margin": None, "max_runtime": None, }, ) monitor_environment = MonitorEnvironment.objects.create( monitor=monitor, environment_id=self.environment.id, # Next checkin is in the future, we just completed our last checkin last_checkin=ts, next_checkin=ts + timedelta(hours=24), next_checkin_latest=ts + timedelta(hours=24, minutes=1), status=MonitorStatus.OK, ) # Checkin 24hr ago checkin1 = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_environment, project_id=project.id, status=CheckInStatus.IN_PROGRESS, date_added=check_in_24hr_ago, date_updated=check_in_24hr_ago, timeout_at=check_in_24hr_ago + timedelta(minutes=30), ) checkin2 = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_environment, project_id=project.id, status=CheckInStatus.OK, date_added=ts, date_updated=ts, timeout_at=ts + timedelta(minutes=30), ) assert checkin1.date_added == checkin1.date_updated == check_in_24hr_ago # Running check monitor will mark the first checkin as timed out. The # second checkin was already marked as OK. dispatch_check_timeout(ts) # assert that task is called for the specific checkin message: MarkTimeout = { "type": "mark_timeout", "ts": ts.timestamp(), "monitor_environment_id": checkin1.monitor_environment_id, "checkin_id": checkin1.id, } payload = KafkaPayload( str(monitor_environment.id).encode(), MONITORS_CLOCK_TASKS_CODEC.encode(message), [], ) assert mock_produce_task.call_count == 1 assert mock_produce_task.mock_calls[0] == mock.call(payload) mark_checkin_timeout(checkin1.id, ts) # The first checkin is marked as timed out assert MonitorCheckIn.objects.filter(id=checkin1.id, status=CheckInStatus.TIMEOUT).exists() # The second checkin has not changed status assert MonitorCheckIn.objects.filter(id=checkin2.id, status=CheckInStatus.OK).exists() # Monitor does not change from OK to TIMED OUT since it was already OK. assert MonitorEnvironment.objects.filter( id=monitor_environment.id, status=MonitorStatus.OK, ).exists() @mock.patch("sentry.monitors.clock_tasks.check_timeout.mark_failed") def test_timeout_checkin_backlog_handled(self, mock_mark_failed: mock.MagicMock) -> None: """ In the scenario where we have a clock-tick backlog we may produce multiple tasks to update a timed-out check-in. We want to make sure we do not call mark_failed when we marked it as timed_out in an earlier task. """ org = self.create_organization() project = self.create_project(organization=org) ts = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) # Schedule is once a day monitor = Monitor.objects.create( organization_id=org.id, project_id=project.id, config={ "schedule_type": ScheduleType.CRONTAB, "schedule": "0 0 * * *", "checkin_margin": None, "max_runtime": 30, }, ) monitor_environment = MonitorEnvironment.objects.create( monitor=monitor, environment_id=self.environment.id, last_checkin=ts, next_checkin=ts + timedelta(hours=24), next_checkin_latest=ts + timedelta(hours=24, minutes=1), status=MonitorStatus.OK, ) # Checkin will timeout in 30 minutes checkin = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_environment, project_id=project.id, status=CheckInStatus.IN_PROGRESS, date_added=ts, date_updated=ts, timeout_at=ts + timedelta(minutes=30), ) # Mark as timed out mark_checkin_timeout(checkin.id, ts + timedelta(minutes=30)) assert mock_mark_failed.call_count == 1 # Second call does NOT trigger a mark_failed mark_checkin_timeout(checkin.id, ts + timedelta(minutes=31)) assert mock_mark_failed.call_count == 1
MonitorClockTasksCheckTimeoutTest
python
Textualize__textual
src/textual/css/styles.py
{ "start": 2022, "end": 4973 }
class ____(TypedDict, total=False): """A typed dict for CSS rules. Any key may be absent, indicating that rule has not been set. Does not define composite rules, that is a rule that is made of a combination of other rules. """ display: Display visibility: Visibility layout: "Layout" auto_color: bool color: Color background: Color text_style: Style background_tint: Color opacity: float text_opacity: float padding: Spacing margin: Spacing offset: ScalarOffset position: str border_top: tuple[str, Color] border_right: tuple[str, Color] border_bottom: tuple[str, Color] border_left: tuple[str, Color] border_title_align: AlignHorizontal border_subtitle_align: AlignHorizontal outline_top: tuple[str, Color] outline_right: tuple[str, Color] outline_bottom: tuple[str, Color] outline_left: tuple[str, Color] keyline: tuple[str, Color] box_sizing: BoxSizing width: Scalar height: Scalar min_width: Scalar min_height: Scalar max_width: Scalar max_height: Scalar dock: str split: str overflow_x: Overflow overflow_y: Overflow layers: tuple[str, ...] layer: str transitions: dict[str, Transition] tint: Color scrollbar_color: Color scrollbar_color_hover: Color scrollbar_color_active: Color scrollbar_corner_color: Color scrollbar_background: Color scrollbar_background_hover: Color scrollbar_background_active: Color scrollbar_gutter: ScrollbarGutter scrollbar_size_vertical: int scrollbar_size_horizontal: int scrollbar_visibility: ScrollbarVisibility align_horizontal: AlignHorizontal align_vertical: AlignVertical content_align_horizontal: AlignHorizontal content_align_vertical: AlignVertical grid_size_rows: int grid_size_columns: int grid_gutter_horizontal: int grid_gutter_vertical: int grid_rows: tuple[Scalar, ...] grid_columns: tuple[Scalar, ...] row_span: int column_span: int text_align: TextAlign link_color: Color auto_link_color: bool link_background: Color link_style: Style link_color_hover: Color auto_link_color_hover: bool link_background_hover: Color link_style_hover: Style auto_border_title_color: bool border_title_color: Color border_title_background: Color border_title_style: Style auto_border_subtitle_color: bool border_subtitle_color: Color border_subtitle_background: Color border_subtitle_style: Style hatch: tuple[str, Color] | Literal["none"] overlay: Overlay constrain_x: Constrain constrain_y: Constrain text_wrap: TextWrap text_overflow: TextOverflow expand: Expand line_pad: int RULE_NAMES = list(RulesMap.__annotations__.keys()) RULE_NAMES_SET = frozenset(RULE_NAMES) _rule_getter = attrgetter(*RULE_NAMES)
RulesMap
python
tox-dev__tox
src/tox/session/cmd/run/common.py
{ "start": 1473, "end": 8441 }
class ____(Action): def __call__( self, parser: ArgumentParser, # noqa: ARG002 namespace: Namespace, values: str | Sequence[Any] | None, option_string: str | None = None, # noqa: ARG002 ) -> None: if not values: raise ArgumentError(self, "cannot be empty") path = Path(cast("str", values)).absolute() if not path.exists(): raise ArgumentError(self, f"{path} does not exist") if not path.is_file(): raise ArgumentError(self, f"{path} is not a file") setattr(namespace, self.dest, path) def env_run_create_flags(parser: ArgumentParser, mode: str) -> None: # mode can be one of: run, run-parallel, legacy, devenv, config if mode not in {"devenv", "depends"}: parser.add_argument( "-s", "--skip-missing-interpreters", default="config", metavar="v", nargs="?", action=SkipMissingInterpreterAction, help="don't fail tests for missing interpreters: {config,true,false} choice", ) if mode not in {"devenv", "config", "depends"}: parser.add_argument( "-n", "--notest", dest="no_test", help="do not run the test commands", action="store_true", ) parser.add_argument( "-b", "--pkg-only", "--sdistonly", action="store_true", help="only perform the packaging activity", dest="package_only", ) parser.add_argument( "--installpkg", help="use specified package for installation into venv, instead of packaging the project", default=None, of_type=Path | None, action=InstallPackageAction, dest="install_pkg", ) if mode not in {"devenv", "depends"}: parser.add_argument( "--develop", action="store_true", help="install package in development mode", dest="develop", ) if mode != "depends": parser.add_argument( "--no-recreate-pkg", dest="no_recreate_pkg", help="if recreate is set do not recreate packaging tox environment(s)", action="store_true", ) if mode not in {"devenv", "config", "depends"}: parser.add_argument( "--skip-pkg-install", dest="skip_pkg_install", help="skip package installation for this run", action="store_true", ) def report(start: float, runs: list[ToxEnvRunResult], is_colored: bool, verbosity: int) -> int: # noqa: FBT001 def _print(color_: int, message: str) -> None: if verbosity: print(f"{color_ if is_colored else ''}{message}{Fore.RESET if is_colored else ''}") # noqa: T201 successful, skipped = [], [] for run in runs: successful.append(run.code == Outcome.OK or run.ignore_outcome) skipped.append(run.skipped) duration_individual = [o.elapsed for o in run.outcomes] if verbosity >= 2 else [] # noqa: PLR2004 extra = f"+cmd[{','.join(f'{i:.2f}' for i in duration_individual)}]" if duration_individual else "" setup = run.duration - sum(duration_individual) msg, color = _get_outcome_message(run) out = f" {run.name}: {msg} ({run.duration:.2f}{f'=setup[{setup:.2f}]{extra}' if extra else ''} seconds)" _print(color, out) duration = time.monotonic() - start all_good = all(successful) and not all(skipped) if all_good: _print(Fore.GREEN, f" congratulations :) ({duration:.2f} seconds)") return Outcome.OK _print(Fore.RED, f" evaluation failed :( ({duration:.2f} seconds)") if len(runs) == 1: return runs[0].code if not runs[0].skipped else -1 return -1 def _get_outcome_message(run: ToxEnvRunResult) -> tuple[str, int]: if run.skipped: msg, color = "SKIP", Fore.YELLOW elif run.code == Outcome.OK: msg, color = "OK", Fore.GREEN elif run.ignore_outcome: msg, color = f"IGNORED FAIL code {run.code}", Fore.YELLOW else: msg, color = f"FAIL code {run.code}", Fore.RED return msg, color logger = logging.getLogger(__name__) def execute(state: State, max_workers: int | None, has_spinner: bool, live: bool) -> int: # noqa: FBT001 interrupt, done = Event(), Event() results: list[ToxEnvRunResult] = [] future_to_env: dict[Future[ToxEnvRunResult], ToxEnv] = {} state.envs.ensure_only_run_env_is_active() to_run_list: list[str] = list(state.envs.iter()) for name in to_run_list: cast("RunToxEnv", state.envs[name]).mark_active() previous, has_previous = None, False try: spinner = ToxSpinner(has_spinner, state, len(to_run_list)) thread = Thread( target=_queue_and_wait, name="tox-interrupt", args=(state, to_run_list, results, future_to_env, interrupt, done, max_workers, spinner, live), ) thread.start() try: thread.join() except KeyboardInterrupt: previous, has_previous = signal(SIGINT, Handlers.SIG_IGN), True spinner.print_report = False # no need to print reports at this point, final report coming up logger.error("[%s] KeyboardInterrupt - teardown started", os.getpid()) # noqa: TRY400 interrupt.set() # cancel in reverse order to not allow submitting new jobs as we cancel running ones for future, tox_env in reversed(list(future_to_env.items())): canceled = future.cancel() # if cannot be canceled and not done -> still runs if canceled is False and not future.done(): # pragma: no branch tox_env.interrupt() done.wait() # workaround for https://bugs.python.org/issue45274 lock = getattr(thread, "_tstate_lock", None) if lock is not None and lock.locked(): # pragma: no branch lock.release() # pragma: no cover # calling private method to fix thread state thread._stop() # type: ignore[attr-defined] # pragma: no cover # noqa: SLF001 thread.join() finally: name_to_run = {r.name: r for r in results} ordered_results: list[ToxEnvRunResult] = [name_to_run[env] for env in to_run_list] # write the journal write_journal(getattr(state.conf.options, "result_json", None), state._journal) # noqa: SLF001 # report the outcome exit_code = report( state.conf.options.start, ordered_results, state.conf.options.is_colored, state.conf.options.verbosity, ) if has_previous: signal(SIGINT, previous) return exit_code
InstallPackageAction
python
django__django
django/db/migrations/exceptions.py
{ "start": 603, "end": 715 }
class ____(RuntimeError): """An irreversible migration is about to be reversed.""" pass
IrreversibleError
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF033.py
{ "start": 1298, "end": 1557 }
class ____: def __post_init__( self, bar: int = (x := 1) # comment , baz: int = (y := 2), # comment foo = (a := 1) # comment , faz = (b := 2), # comment ) -> None: pass @dataclass
Foo
python
scipy__scipy
scipy/spatial/tests/test_qhull.py
{ "start": 15246, "end": 22170 }
class ____: """ Check that triangulation works. """ def test_masked_array_fails(self): masked_array = np.ma.masked_all(1) assert_raises(ValueError, qhull.Delaunay, masked_array) # Shouldn't be inherently unsafe; retry with cpython 3.14 once traceback # thread safety issues are fixed (also goes for other test with same name # further down) def test_array_with_nans_fails(self): points_with_nan = np.array([(0,0), (0,1), (1,1), (1,np.nan)], dtype=np.float64) assert_raises(ValueError, qhull.Delaunay, points_with_nan) def test_nd_simplex(self): # simple smoke test: triangulate a n-dimensional simplex for nd in range(2, 8): points = np.zeros((nd+1, nd)) for j in range(nd): points[j,j] = 1.0 points[-1,:] = 1.0 tri = qhull.Delaunay(points) tri.simplices.sort() assert_equal(tri.simplices, np.arange(nd+1, dtype=int)[None, :]) assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=int)[None,:]) def test_2d_square(self): # simple smoke test: 2d square points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.float64) tri = qhull.Delaunay(points) assert_equal(tri.simplices, [[1, 3, 2], [3, 1, 0]]) assert_equal(tri.neighbors, [[-1, -1, 1], [-1, -1, 0]]) def test_duplicate_points(self): x = np.array([0, 1, 0, 1], dtype=np.float64) y = np.array([0, 0, 1, 1], dtype=np.float64) xp = np.r_[x, x] yp = np.r_[y, y] # shouldn't fail on duplicate points qhull.Delaunay(np.c_[x, y]) qhull.Delaunay(np.c_[xp, yp]) def test_pathological(self): # both should succeed points = DATASETS['pathological-1'] tri = qhull.Delaunay(points) assert_equal(tri.points[tri.simplices].max(), points.max()) assert_equal(tri.points[tri.simplices].min(), points.min()) points = DATASETS['pathological-2'] tri = qhull.Delaunay(points) assert_equal(tri.points[tri.simplices].max(), points.max()) assert_equal(tri.points[tri.simplices].min(), points.min()) def test_joggle(self): # Check that the option QJ indeed guarantees that all input points # occur as vertices of the triangulation points = np.random.rand(10, 2) points = np.r_[points, points] # duplicate input data tri = qhull.Delaunay(points, qhull_options="QJ Qbb Pp") assert_array_equal(np.unique(tri.simplices.ravel()), np.arange(len(points))) def test_coplanar(self): # Check that the coplanar point output option indeed works points = np.random.rand(10, 2) points = np.r_[points, points] # duplicate input data tri = qhull.Delaunay(points) assert_(len(np.unique(tri.simplices.ravel())) == len(points)//2) assert_(len(tri.coplanar) == len(points)//2) assert_(len(np.unique(tri.coplanar[:,2])) == len(points)//2) assert_(np.all(tri.vertex_to_simplex >= 0)) def test_furthest_site(self): points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)] tri = qhull.Delaunay(points, furthest_site=True) expected = np.array([(1, 4, 0), (4, 2, 0)]) # from Qhull assert_array_equal(tri.simplices, expected) @pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS)) def test_incremental(self, name): # Test incremental construction of the triangulation chunks, opts = INCREMENTAL_DATASETS[name] points = np.concatenate(chunks, axis=0) obj = qhull.Delaunay(chunks[0], incremental=True, qhull_options=opts) for chunk in chunks[1:]: obj.add_points(chunk) obj2 = qhull.Delaunay(points) obj3 = qhull.Delaunay(chunks[0], incremental=True, qhull_options=opts) if len(chunks) > 1: obj3.add_points(np.concatenate(chunks[1:], axis=0), restart=True) # Check that the incremental mode agrees with upfront mode if name.startswith('pathological'): # XXX: These produce valid but different triangulations. # They look OK when plotted, but how to check them? assert_array_equal(np.unique(obj.simplices.ravel()), np.arange(points.shape[0])) assert_array_equal(np.unique(obj2.simplices.ravel()), np.arange(points.shape[0])) else: assert_unordered_tuple_list_equal(obj.simplices, obj2.simplices, tpl=sorted_tuple) assert_unordered_tuple_list_equal(obj2.simplices, obj3.simplices, tpl=sorted_tuple) def assert_hulls_equal(points, facets_1, facets_2): # Check that two convex hulls constructed from the same point set # are equal facets_1 = set(map(sorted_tuple, facets_1)) facets_2 = set(map(sorted_tuple, facets_2)) if facets_1 != facets_2 and points.shape[1] == 2: # The direct check fails for the pathological cases # --- then the convex hull from Delaunay differs (due # to rounding error etc.) from the hull computed # otherwise, by the question whether (tricoplanar) # points that lie almost exactly on the hull are # included as vertices of the hull or not. # # So we check the result, and accept it if the Delaunay # hull line segments are a subset of the usual hull. eps = 1000 * np.finfo(float).eps for a, b in facets_1: for ap, bp in facets_2: t = points[bp] - points[ap] t /= np.linalg.norm(t) # tangent n = np.array([-t[1], t[0]]) # normal # check that the two line segments are parallel # to the same line c1 = np.dot(n, points[b] - points[ap]) c2 = np.dot(n, points[a] - points[ap]) if not np.allclose(np.dot(c1, n), 0): continue if not np.allclose(np.dot(c2, n), 0): continue # Check that the segment (a, b) is contained in (ap, bp) c1 = np.dot(t, points[a] - points[ap]) c2 = np.dot(t, points[b] - points[ap]) c3 = np.dot(t, points[bp] - points[ap]) if c1 < -eps or c1 > c3 + eps: continue if c2 < -eps or c2 > c3 + eps: continue # OK: break else: raise AssertionError("comparison fails") # it was OK return assert_equal(facets_1, facets_2)
TestDelaunay
python
tensorflow__tensorflow
tensorflow/python/training/basic_session_run_hooks.py
{ "start": 38394, "end": 42659 }
class ____(session_run_hook.SessionRunHook): """Captures CPU/GPU profiling information every N steps or seconds. This produces files called "timeline-<step>.json", which are in Chrome Trace format. For more information see: https://github.com/catapult-project/catapult/blob/master/tracing/README.md """ def __init__(self, save_steps=None, save_secs=None, output_dir="", show_dataflow=True, show_memory=False): """Initializes a hook that takes periodic profiling snapshots. `options.run_metadata` argument of `tf.Session.Run` is used to collect metadata about execution. This hook sets the metadata and dumps it in Chrome Trace format. Args: save_steps: `int`, save profile traces every N steps. Exactly one of `save_secs` and `save_steps` should be set. save_secs: `int` or `float`, save profile traces every N seconds. output_dir: `string`, the directory to save the profile traces to. Defaults to the current directory. show_dataflow: `bool`, if True, add flow events to the trace connecting producers and consumers of tensors. show_memory: `bool`, if True, add object snapshot events to the trace showing the sizes and lifetimes of tensors. """ self._output_file = os.path.join(output_dir, "timeline-{}.json") self._file_writer = SummaryWriterCache.get(output_dir) self._show_dataflow = show_dataflow self._show_memory = show_memory self._timer = SecondOrStepTimer( every_secs=save_secs, every_steps=save_steps) def begin(self): self._next_step = None self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access if self._global_step_tensor is None: raise RuntimeError("Global step should be created to use ProfilerHook.") def before_run(self, run_context): self._request_summary = ( self._next_step is not None and self._timer.should_trigger_for_step(self._next_step)) requests = {"global_step": self._global_step_tensor} opts = ( config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE) if self._request_summary else None) return SessionRunArgs(requests, options=opts) def after_run(self, run_context, run_values): stale_global_step = run_values.results["global_step"] if self._next_step is None: # Update the timer so that it does not activate until N steps or seconds # have passed. self._timer.update_last_triggered_step(stale_global_step) global_step = stale_global_step + 1 if self._request_summary: global_step = run_context.session.run(self._global_step_tensor) self._timer.update_last_triggered_step(global_step) self._save(global_step, self._output_file.format(global_step), run_values.run_metadata.step_stats) self._file_writer.add_run_metadata(run_values.run_metadata, "step_%d" % global_step) self._next_step = global_step + 1 def _save(self, step, save_path, step_stats): logging.info("Saving timeline for %d into '%s'.", step, save_path) with gfile.Open(save_path, "w") as f: trace = timeline.Timeline(step_stats) f.write( trace.generate_chrome_trace_format( show_dataflow=self._show_dataflow, show_memory=self._show_memory)) def _as_graph_element(obj): """Retrieves Graph element.""" graph = ops.get_default_graph() if not isinstance(obj, str): if not hasattr(obj, "graph") or obj.graph != graph: raise ValueError("Passed %s should have graph attribute that is equal " "to current graph %s." % (obj, graph)) return obj if ":" in obj: element = graph.as_graph_element(obj) else: element = graph.as_graph_element(obj + ":0") # Check that there is no :1 (e.g. it's single output). try: graph.as_graph_element(obj + ":1") except (KeyError, ValueError): pass else: raise ValueError("Name %s is ambiguous, " "as this `Operation` has multiple outputs " "(at least 2)." % obj) return element
ProfilerHook
python
doocs__leetcode
solution/1900-1999/1985.Find the Kth Largest Integer in the Array/Solution.py
{ "start": 0, "end": 142 }
class ____: def kthLargestNumber(self, nums: List[str], k: int) -> str: return nlargest(k, nums, key=lambda x: int(x))[k - 1]
Solution
python
pandas-dev__pandas
pandas/io/formats/excel.py
{ "start": 15272, "end": 35115 }
class ____: """ Class for formatting a DataFrame to a list of ExcelCells, Parameters ---------- df : DataFrame or Styler na_rep: na representation float_format : str, default None Format string for floating point numbers cols : sequence, optional Columns to write header : bool or sequence of str, default True Write out column names. If a list of string is given it is assumed to be aliases for the column names index : bool, default True output row names (index) index_label : str or sequence, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. merge_cells : bool or 'columns', default False Format MultiIndex column headers and Hierarchical Rows as merged cells if True. Merge MultiIndex column headers only if 'columns'. .. versionchanged:: 3.0.0 Added the 'columns' option. inf_rep : str, default `'inf'` representation for np.inf values (which aren't representable in Excel) A `'-'` sign will be added in front of -inf. style_converter : callable, optional This translates Styler styles (CSS) into ExcelWriter styles. Defaults to ``CSSToExcelConverter()``. It should have signature css_declarations string -> excel style. This is only called for body cells. autofilter : bool, default False If True, add automatic filters to all columns. """ max_rows = 2**20 max_cols = 2**14 def __init__( self, df, na_rep: str = "", float_format: str | None = None, cols: Sequence[Hashable] | None = None, header: Sequence[Hashable] | bool = True, index: bool = True, index_label: IndexLabel | None = None, merge_cells: ExcelWriterMergeCells = False, inf_rep: str = "inf", style_converter: Callable | None = None, autofilter: bool = False, ) -> None: self.rowcounter = 0 self.na_rep = na_rep if not isinstance(df, DataFrame): self.styler = df self.styler._compute() # calculate applied styles df = df.data if style_converter is None: style_converter = CSSToExcelConverter() self.style_converter: Callable | None = style_converter else: self.styler = None self.style_converter = None self.df = df if cols is not None: # all missing, raise if not len(Index(cols).intersection(df.columns)): raise KeyError("passes columns are not ALL present dataframe") if len(Index(cols).intersection(df.columns)) != len(set(cols)): # Deprecated in GH#17295, enforced in 1.0.0 raise KeyError("Not all names specified in 'columns' are found") self.df = df.reindex(columns=cols) self.columns = self.df.columns self.float_format = float_format self.index = index self.index_label = index_label self.header = header if not isinstance(merge_cells, bool) and merge_cells != "columns": raise ValueError(f"Unexpected value for {merge_cells=}.") self.merge_cells = merge_cells self.inf_rep = inf_rep self.autofilter = autofilter def _format_value(self, val): if is_scalar(val) and missing.isna(val): val = self.na_rep elif is_float(val): if missing.isposinf_scalar(val): val = self.inf_rep elif missing.isneginf_scalar(val): val = f"-{self.inf_rep}" elif self.float_format is not None: val = float(self.float_format % val) if getattr(val, "tzinfo", None) is not None: raise ValueError( "Excel does not support datetimes with " "timezones. Please ensure that datetimes " "are timezone unaware before writing to Excel." ) return val def _format_header_mi(self) -> Iterable[ExcelCell]: if self.columns.nlevels > 1: if not self.index: raise NotImplementedError( "Writing to Excel with MultiIndex columns and no " "index ('index'=False) is not yet implemented." ) if not (self._has_aliases or self.header): return columns = self.columns merge_columns = self.merge_cells in {True, "columns"} level_strs = columns._format_multi(sparsify=merge_columns, include_names=False) level_lengths = get_level_lengths(level_strs) coloffset = 0 lnum = 0 if self.index and isinstance(self.df.index, MultiIndex): coloffset = self.df.index.nlevels - 1 for lnum, name in enumerate(columns.names): yield ExcelCell( row=lnum, col=coloffset, val=name, style=None, ) for lnum, (spans, levels, level_codes) in enumerate( zip(level_lengths, columns.levels, columns.codes, strict=True) ): values = levels.take(level_codes) for i, span_val in spans.items(): mergestart, mergeend = None, None if merge_columns and span_val > 1: mergestart, mergeend = lnum, coloffset + i + span_val yield CssExcelCell( row=lnum, col=coloffset + i + 1, val=values[i], style=None, css_styles=getattr(self.styler, "ctx_columns", None), css_row=lnum, css_col=i, css_converter=self.style_converter, mergestart=mergestart, mergeend=mergeend, ) self.rowcounter = lnum def _format_header_regular(self) -> Iterable[ExcelCell]: if self._has_aliases or self.header: coloffset = 0 if self.index: coloffset = 1 if isinstance(self.df.index, MultiIndex): coloffset = len(self.df.index.names) colnames = self.columns if self._has_aliases: self.header = cast(Sequence, self.header) if len(self.header) != len(self.columns): raise ValueError( f"Writing {len(self.columns)} cols " f"but got {len(self.header)} aliases" ) colnames = self.header for colindex, colname in enumerate(colnames): yield CssExcelCell( row=self.rowcounter, col=colindex + coloffset, val=colname, style=None, css_styles=getattr(self.styler, "ctx_columns", None), css_row=0, css_col=colindex, css_converter=self.style_converter, ) def _format_header(self) -> Iterable[ExcelCell]: gen: Iterable[ExcelCell] if isinstance(self.columns, MultiIndex): gen = self._format_header_mi() else: gen = self._format_header_regular() gen2: Iterable[ExcelCell] = () if self.df.index.names: row = [x if x is not None else "" for x in self.df.index.names] + [ "" ] * len(self.columns) if functools.reduce(lambda x, y: x and y, (x != "" for x in row)): gen2 = ( ExcelCell(self.rowcounter, colindex, val, None) for colindex, val in enumerate(row) ) self.rowcounter += 1 return itertools.chain(gen, gen2) def _format_body(self) -> Iterable[ExcelCell]: if isinstance(self.df.index, MultiIndex): return self._format_hierarchical_rows() else: return self._format_regular_rows() def _format_regular_rows(self) -> Iterable[ExcelCell]: if self._has_aliases or self.header: self.rowcounter += 1 # output index and index_label? if self.index: # check aliases # if list only take first as this is not a MultiIndex if self.index_label and isinstance( self.index_label, (list, tuple, np.ndarray, Index) ): index_label = self.index_label[0] # if string good to go elif self.index_label and isinstance(self.index_label, str): index_label = self.index_label else: index_label = self.df.index.names[0] if isinstance(self.columns, MultiIndex): self.rowcounter += 1 if index_label and self.header is not False: yield ExcelCell(self.rowcounter - 1, 0, index_label, None) # write index_values index_values = self.df.index if isinstance(self.df.index, PeriodIndex): index_values = self.df.index.to_timestamp() for idx, idxval in enumerate(index_values): yield CssExcelCell( row=self.rowcounter + idx, col=0, val=idxval, style=None, css_styles=getattr(self.styler, "ctx_index", None), css_row=idx, css_col=0, css_converter=self.style_converter, ) coloffset = 1 else: coloffset = 0 yield from self._generate_body(coloffset) def _format_hierarchical_rows(self) -> Iterable[ExcelCell]: if self._has_aliases or self.header: self.rowcounter += 1 gcolidx = 0 if self.index: index_labels = self.df.index.names # check for aliases if self.index_label and isinstance( self.index_label, (list, tuple, np.ndarray, Index) ): index_labels = self.index_label # MultiIndex columns require an extra row # with index names (blank if None) for # unambiguous round-trip, Issue #11328 if isinstance(self.columns, MultiIndex): self.rowcounter += 1 # if index labels are not empty go ahead and dump if com.any_not_none(*index_labels) and self.header is not False: for cidx, name in enumerate(index_labels): yield ExcelCell(self.rowcounter - 1, cidx, name, None) if self.merge_cells and self.merge_cells != "columns": # Format hierarchical rows as merged cells. level_strs = self.df.index._format_multi( sparsify=True, include_names=False ) level_lengths = get_level_lengths(level_strs) for spans, levels, level_codes in zip( level_lengths, self.df.index.levels, self.df.index.codes, strict=False, ): values = levels.take( level_codes, allow_fill=levels._can_hold_na, fill_value=levels._na_value, ) # GH#60099 if isinstance(values[0], Period): values = values.to_timestamp() for i, span_val in spans.items(): mergestart, mergeend = None, None if span_val > 1: mergestart = self.rowcounter + i + span_val - 1 mergeend = gcolidx yield CssExcelCell( row=self.rowcounter + i, col=gcolidx, val=values[i], style=None, css_styles=getattr(self.styler, "ctx_index", None), css_row=i, css_col=gcolidx, css_converter=self.style_converter, mergestart=mergestart, mergeend=mergeend, ) gcolidx += 1 else: # Format hierarchical rows with non-merged values. for indexcolvals in zip(*self.df.index, strict=True): for idx, indexcolval in enumerate(indexcolvals): # GH#60099 if isinstance(indexcolval, Period): indexcolval = indexcolval.to_timestamp() yield CssExcelCell( row=self.rowcounter + idx, col=gcolidx, val=indexcolval, style=None, css_styles=getattr(self.styler, "ctx_index", None), css_row=idx, css_col=gcolidx, css_converter=self.style_converter, ) gcolidx += 1 yield from self._generate_body(gcolidx) @property def _has_aliases(self) -> bool: """Whether the aliases for column names are present.""" return is_list_like(self.header) def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]: # Write the body of the frame data series by series. for colidx in range(len(self.columns)): series = self.df.iloc[:, colidx] for i, val in enumerate(series): yield CssExcelCell( row=self.rowcounter + i, col=colidx + coloffset, val=val, style=None, css_styles=getattr(self.styler, "ctx", None), css_row=i, css_col=colidx, css_converter=self.style_converter, ) def get_formatted_cells(self) -> Iterable[ExcelCell]: for cell in itertools.chain(self._format_header(), self._format_body()): cell.val = self._format_value(cell.val) yield cell def _num2excel(self, index: int) -> str: """ Convert 0-based column index to Excel column name. Parameters ---------- index : int The numeric column index to convert to a Excel column name. Returns ------- column_name : str The column name corresponding to the index. Raises ------ ValueError Index is negative """ if index < 0: raise ValueError(f"Index cannot be negative: {index}") column_name = "" # while loop in case column name needs to be longer than 1 character while index > 0 or not column_name: index, remainder = divmod(index, 26) column_name = chr(65 + remainder) + column_name return column_name @doc(storage_options=_shared_docs["storage_options"]) def write( self, writer: FilePath | WriteExcelBuffer | ExcelWriter, sheet_name: str = "Sheet1", startrow: int = 0, startcol: int = 0, freeze_panes: tuple[int, int] | None = None, engine: str | None = None, storage_options: StorageOptions | None = None, engine_kwargs: dict | None = None, ) -> None: """ writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame freeze_panes : tuple of integer (length 2), default None Specifies the one-based bottommost row and rightmost column that is to be frozen engine : string, default None write engine to use if writer is a path - you can also set this via the options ``io.excel.xlsx.writer``, or ``io.excel.xlsm.writer``. {storage_options} engine_kwargs: dict, optional Arbitrary keyword arguments passed to excel engine. """ from pandas.io.excel import ExcelWriter num_rows, num_cols = self.df.shape if num_rows > self.max_rows or num_cols > self.max_cols: raise ValueError( f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} " f"Max sheet size is: {self.max_rows}, {self.max_cols}" ) if self.autofilter: # default offset for header row startrowsoffset = 1 endrowsoffset = 1 if num_cols == 0: indexoffset = 0 elif self.index: indexoffset = 0 if isinstance(self.df.index, MultiIndex): if self.merge_cells: raise ValueError( "Excel filters merged cells by showing only the first row. " "'autofilter' and 'merge_cells' cannot " "be used simultaneously." ) else: indexoffset = self.df.index.nlevels - 1 if isinstance(self.columns, MultiIndex): if self.merge_cells: raise ValueError( "Excel filters merged cells by showing only the first row. " "'autofilter' and 'merge_cells' cannot " "be used simultaneously." ) else: startrowsoffset = self.columns.nlevels # multindex columns add a blank row between header and data endrowsoffset = self.columns.nlevels + 1 else: # no index column indexoffset = -1 start = f"{self._num2excel(startcol)}{startrow + startrowsoffset}" autofilter_end_column = self._num2excel(startcol + num_cols + indexoffset) end = f"{autofilter_end_column}{startrow + num_rows + endrowsoffset}" autofilter_range = f"{start}:{end}" else: autofilter_range = None if engine_kwargs is None: engine_kwargs = {} formatted_cells = self.get_formatted_cells() if isinstance(writer, ExcelWriter): need_save = False else: writer = ExcelWriter( writer, engine=engine, storage_options=storage_options, engine_kwargs=engine_kwargs, ) need_save = True try: writer._write_cells( formatted_cells, sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, autofilter_range=autofilter_range, ) finally: # make sure to close opened file handles if need_save: writer.close()
ExcelFormatter
python
walkccc__LeetCode
solutions/2518. Number of Great Partitions/2518.py
{ "start": 0, "end": 508 }
class ____: def countPartitions(self, nums: list[int], k: int) -> int: MOD = 1_000_000_007 summ = sum(nums) ans = pow(2, len(nums), MOD) # 2^n % MOD dp = [1] + [0] * k for num in nums: for i in range(k, num - 1, -1): dp[i] += dp[i - num] dp[i] %= MOD # Substract the cases that're not satisfied. for i in range(k): if summ - i < k: # Both group1 and group2 < k. ans -= dp[i] else: ans -= dp[i] * 2 return ans % MOD
Solution
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_format08.py
{ "start": 315, "end": 1561 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_format08.xlsx") def test_create_file(self): """Test the creation of an XlsxWriter file with chart formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "line"}) chart.axis_ids = [46164608, 46176128] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5", "trendline": {"type": "linear"}, } ) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$C$1:$C$5", } ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
pypa__warehouse
warehouse/manage/views/__init__.py
{ "start": 28137, "end": 31396 }
class ____: def __init__(self, request): self.request = request self.user_service = request.find_service(IUserService, context=None) @view_config( request_method="GET", route_name="manage.account.recovery-codes.generate", renderer="warehouse:templates/manage/account/recovery_codes-provision.html", require_reauth=10, # 10 seconds ) def recovery_codes_generate(self): if self.user_service.has_recovery_codes(self.request.user.id): return { "recovery_codes": None, "_error": self.request._("Recovery codes already generated"), "_message": self.request._( "Generating new recovery codes will invalidate your existing codes." ), } recovery_codes = self.user_service.generate_recovery_codes(self.request.user.id) send_recovery_codes_generated_email(self.request, self.request.user) self.request.user.record_event( tag=EventTag.Account.RecoveryCodesGenerated, request=self.request, ) return {"recovery_codes": recovery_codes} @view_config( request_method="GET", route_name="manage.account.recovery-codes.regenerate", renderer="warehouse:templates/manage/account/recovery_codes-provision.html", require_reauth=10, # 10 seconds ) def recovery_codes_regenerate(self): recovery_codes = self.user_service.generate_recovery_codes(self.request.user.id) send_recovery_codes_generated_email(self.request, self.request.user) self.request.user.record_event( tag=EventTag.Account.RecoveryCodesRegenerated, request=self.request, ) return {"recovery_codes": recovery_codes} @view_config( route_name="manage.account.recovery-codes.burn", renderer="warehouse:templates/manage/account/recovery_codes-burn.html", ) def recovery_codes_burn(self, _form_class=RecoveryCodeAuthenticationForm): user = self.user_service.get_user(self.request.user.id) if not user.has_recovery_codes: return HTTPSeeOther(self.request.route_path("manage.account")) if user.has_burned_recovery_codes: return HTTPSeeOther(self.request.route_path("manage.account.two-factor")) form = _form_class( self.request.POST, request=self.request, user_id=user.id, user_service=self.user_service, ) if self.request.method == "POST" and form.validate(): self.request.session.flash( self.request._( "Recovery code accepted. The supplied code cannot be used again." ), queue="success", ) return HTTPSeeOther(self.request.route_path("manage.account.two-factor")) return {"form": form} @view_defaults( uses_session=True, require_csrf=True, require_methods=False, permission=Permissions.AccountAPITokens, renderer="warehouse:templates/manage/account/token.html", route_name="manage.account.token", has_translations=True, require_reauth=True, )
ProvisionRecoveryCodesViews
python
ray-project__ray
python/ray/tune/tests/test_trainable_util.py
{ "start": 4615, "end": 5499 }
class ____(unittest.TestCase): def setUp(self): sys.modules["GPUtil"] = GPUUtilMock([0, 1], ["GPU-aaa", "GPU-bbb"]) def testGPUWait1(self): wait_for_gpu(0, delay_s=0) def testGPUWait2(self): wait_for_gpu("1", delay_s=0) def testGPUWait3(self): wait_for_gpu("GPU-aaa", delay_s=0) def testGPUWaitFail(self): with self.assertRaises(ValueError): wait_for_gpu(2, delay_s=0) with self.assertRaises(ValueError): wait_for_gpu("4", delay_s=0) with self.assertRaises(ValueError): wait_for_gpu(1.23, delay_s=0) @patch("ray.get_gpu_ids", lambda: ["0"]) def testDefaultGPU(self): import sys sys.modules["GPUtil"] = GPUUtilMock([0], ["GPU-aaa"]) wait_for_gpu(delay_s=0) if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__]))
GPUTest
python
bokeh__bokeh
src/bokeh/models/scales.py
{ "start": 3207, "end": 3510 }
class ____(Scale): ''' Represent a scale transformation between a categorical source range and continuous target range. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs)
CategoricalScale
python
apache__airflow
providers/fab/src/airflow/providers/fab/auth_manager/views/user_edit.py
{ "start": 1788, "end": 2158 }
class ____(ResetPasswordView): """Customize permission names for FAB's builtin ResetPasswordView.""" class_permission_name = permissions.RESOURCE_PASSWORD method_permission_name = { "this_form_get": "read", "this_form_post": "edit", } base_permissions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
CustomResetPasswordView
python
getsentry__sentry
src/sentry/web/frontend/csv.py
{ "start": 411, "end": 1216 }
class ____(Generic[T]): def get_header(self) -> tuple[str, ...]: raise NotImplementedError def get_row(self, item: T) -> tuple[str, ...]: raise NotImplementedError def respond(self, iterable: Iterable[T], filename: str) -> StreamingHttpResponse: def row_iter() -> Generator[tuple[str, ...]]: header = self.get_header() if header: yield header for item in iterable: yield self.get_row(item) pseudo_buffer = Echo() writer = csv.writer(pseudo_buffer) return StreamingHttpResponse( (writer.writerow(r) for r in row_iter()), content_type="text/csv", headers={"Content-Disposition": f'attachment; filename="{filename}.csv"'}, )
CsvResponder
python
sympy__sympy
sympy/plotting/pygletplot/color_scheme.py
{ "start": 1521, "end": 12522 }
class ____: def __init__(self, *args, **kwargs): self.args = args self.f, self.gradient = None, ColorGradient() if len(args) == 1 and not isinstance(args[0], Basic) and callable(args[0]): self.f = args[0] elif len(args) == 1 and isinstance(args[0], str): if args[0] in default_color_schemes: cs = default_color_schemes[args[0]] self.f, self.gradient = cs.f, cs.gradient.copy() else: self.f = lambdify('x,y,z,u,v', args[0]) else: self.f, self.gradient = self._interpret_args(args) self._test_color_function() if not isinstance(self.gradient, ColorGradient): raise ValueError("Color gradient not properly initialized. " "(Not a ColorGradient instance.)") def _interpret_args(self, args): f, gradient = None, self.gradient atoms, lists = self._sort_args(args) s = self._pop_symbol_list(lists) s = self._fill_in_vars(s) # prepare the error message for lambdification failure f_str = ', '.join(str(fa) for fa in atoms) s_str = (str(sa) for sa in s) s_str = ', '.join(sa for sa in s_str if sa.find('unbound') < 0) f_error = ValueError("Could not interpret arguments " "%s as functions of %s." % (f_str, s_str)) # try to lambdify args if len(atoms) == 1: fv = atoms[0] try: f = lambdify(s, [fv, fv, fv]) except TypeError: raise f_error elif len(atoms) == 3: fr, fg, fb = atoms try: f = lambdify(s, [fr, fg, fb]) except TypeError: raise f_error else: raise ValueError("A ColorScheme must provide 1 or 3 " "functions in x, y, z, u, and/or v.") # try to intrepret any given color information if len(lists) == 0: gargs = [] elif len(lists) == 1: gargs = lists[0] elif len(lists) == 2: try: (r1, g1, b1), (r2, g2, b2) = lists except TypeError: raise ValueError("If two color arguments are given, " "they must be given in the format " "(r1, g1, b1), (r2, g2, b2).") gargs = lists elif len(lists) == 3: try: (r1, r2), (g1, g2), (b1, b2) = lists except Exception: raise ValueError("If three color arguments are given, " "they must be given in the format " "(r1, r2), (g1, g2), (b1, b2). To create " "a multi-step gradient, use the syntax " "[0, colorStart, step1, color1, ..., 1, " "colorEnd].") gargs = [[r1, g1, b1], [r2, g2, b2]] else: raise ValueError("Don't know what to do with collection " "arguments %s." % (', '.join(str(l) for l in lists))) if gargs: try: gradient = ColorGradient(*gargs) except Exception as ex: raise ValueError(("Could not initialize a gradient " "with arguments %s. Inner " "exception: %s") % (gargs, str(ex))) return f, gradient def _pop_symbol_list(self, lists): symbol_lists = [] for l in lists: mark = True for s in l: if s is not None and not isinstance(s, Symbol): mark = False break if mark: lists.remove(l) symbol_lists.append(l) if len(symbol_lists) == 1: return symbol_lists[0] elif len(symbol_lists) == 0: return [] else: raise ValueError("Only one list of Symbols " "can be given for a color scheme.") def _fill_in_vars(self, args): defaults = symbols('x,y,z,u,v') v_error = ValueError("Could not find what to plot.") if len(args) == 0: return defaults if not isinstance(args, (tuple, list)): raise v_error if len(args) == 0: return defaults for s in args: if s is not None and not isinstance(s, Symbol): raise v_error # when vars are given explicitly, any vars # not given are marked 'unbound' as to not # be accidentally used in an expression vars = [Symbol('unbound%i' % (i)) for i in range(1, 6)] # interpret as t if len(args) == 1: vars[3] = args[0] # interpret as u,v elif len(args) == 2: if args[0] is not None: vars[3] = args[0] if args[1] is not None: vars[4] = args[1] # interpret as x,y,z elif len(args) >= 3: # allow some of x,y,z to be # left unbound if not given if args[0] is not None: vars[0] = args[0] if args[1] is not None: vars[1] = args[1] if args[2] is not None: vars[2] = args[2] # interpret the rest as t if len(args) >= 4: vars[3] = args[3] # ...or u,v if len(args) >= 5: vars[4] = args[4] return vars def _sort_args(self, args): lists, atoms = sift(args, lambda a: isinstance(a, (tuple, list)), binary=True) return atoms, lists def _test_color_function(self): if not callable(self.f): raise ValueError("Color function is not callable.") try: result = self.f(0, 0, 0, 0, 0) if len(result) != 3: raise ValueError("length should be equal to 3") except TypeError: raise ValueError("Color function needs to accept x,y,z,u,v, " "as arguments even if it doesn't use all of them.") except AssertionError: raise ValueError("Color function needs to return 3-tuple r,g,b.") except Exception: pass # color function probably not valid at 0,0,0,0,0 def __call__(self, x, y, z, u, v): try: return self.f(x, y, z, u, v) except Exception: return None def apply_to_curve(self, verts, u_set, set_len=None, inc_pos=None): """ Apply this color scheme to a set of vertices over a single independent variable u. """ bounds = create_bounds() cverts = [] if callable(set_len): set_len(len(u_set)*2) # calculate f() = r,g,b for each vert # and find the min and max for r,g,b for _u in range(len(u_set)): if verts[_u] is None: cverts.append(None) else: x, y, z = verts[_u] u, v = u_set[_u], None c = self(x, y, z, u, v) if c is not None: c = list(c) update_bounds(bounds, c) cverts.append(c) if callable(inc_pos): inc_pos() # scale and apply gradient for _u in range(len(u_set)): if cverts[_u] is not None: for _c in range(3): # scale from [f_min, f_max] to [0,1] cverts[_u][_c] = rinterpolate(bounds[_c][0], bounds[_c][1], cverts[_u][_c]) # apply gradient cverts[_u] = self.gradient(*cverts[_u]) if callable(inc_pos): inc_pos() return cverts def apply_to_surface(self, verts, u_set, v_set, set_len=None, inc_pos=None): """ Apply this color scheme to a set of vertices over two independent variables u and v. """ bounds = create_bounds() cverts = [] if callable(set_len): set_len(len(u_set)*len(v_set)*2) # calculate f() = r,g,b for each vert # and find the min and max for r,g,b for _u in range(len(u_set)): column = [] for _v in range(len(v_set)): if verts[_u][_v] is None: column.append(None) else: x, y, z = verts[_u][_v] u, v = u_set[_u], v_set[_v] c = self(x, y, z, u, v) if c is not None: c = list(c) update_bounds(bounds, c) column.append(c) if callable(inc_pos): inc_pos() cverts.append(column) # scale and apply gradient for _u in range(len(u_set)): for _v in range(len(v_set)): if cverts[_u][_v] is not None: # scale from [f_min, f_max] to [0,1] for _c in range(3): cverts[_u][_v][_c] = rinterpolate(bounds[_c][0], bounds[_c][1], cverts[_u][_v][_c]) # apply gradient cverts[_u][_v] = self.gradient(*cverts[_u][_v]) if callable(inc_pos): inc_pos() return cverts def str_base(self): return ", ".join(str(a) for a in self.args) def __repr__(self): return "%s" % (self.str_base()) x, y, z, t, u, v = symbols('x,y,z,t,u,v') default_color_schemes['rainbow'] = ColorScheme(z, y, x) default_color_schemes['zfade'] = ColorScheme(z, (0.4, 0.4, 0.97), (0.97, 0.4, 0.4), (None, None, z)) default_color_schemes['zfade3'] = ColorScheme(z, (None, None, z), [0.00, (0.2, 0.2, 1.0), 0.35, (0.2, 0.8, 0.4), 0.50, (0.3, 0.9, 0.3), 0.65, (0.4, 0.8, 0.2), 1.00, (1.0, 0.2, 0.2)]) default_color_schemes['zfade4'] = ColorScheme(z, (None, None, z), [0.0, (0.3, 0.3, 1.0), 0.30, (0.3, 1.0, 0.3), 0.55, (0.95, 1.0, 0.2), 0.65, (1.0, 0.95, 0.2), 0.85, (1.0, 0.7, 0.2), 1.0, (1.0, 0.3, 0.2)])
ColorScheme
python
tensorflow__tensorflow
tensorflow/python/types/core.py
{ "start": 1535, "end": 2165 }
class ____(object): """The base class of all dense Tensor objects. A dense tensor has a static data type (dtype), and may have a static rank and shape. Tensor objects are immutable. Mutable objects may be backed by a Tensor which holds the unique handle that identifies the mutable object. """ @property def dtype(self): pass @property def shape(self): pass # `ops.EagerTensor` subclasses `Symbol` by way of subclassing `tensor.Tensor`; # care should be taken when performing `isinstance` checks on `Value`, e.g.: # # ``` # if isinstance(core.Symbol) and not isinstance(core.Value): # ... # ```
Tensor
python
getsentry__sentry
src/sentry/sentry_apps/services/hook/service.py
{ "start": 580, "end": 3062 }
class ____(RpcService): key = "hook" local_mode = SiloMode.REGION @classmethod def get_local_implementation(cls) -> RpcService: from sentry.sentry_apps.services.hook.impl import DatabaseBackedHookService return DatabaseBackedHookService() @regional_rpc_method(ByOrganizationId()) @abc.abstractmethod def create_service_hook( self, *, application_id: int | None = None, actor_id: int = -1, installation_id: int | None = None, organization_id: int = -1, project_ids: list[int] | None = None, events: list[str] | None = None, url: str = "", ) -> RpcServiceHook: pass @regional_rpc_method(ByOrganizationId()) @abc.abstractmethod def update_webhook_and_events( self, *, organization_id: int, application_id: int | None, webhook_url: str | None, events: list[str], ) -> list[RpcServiceHook]: """ Update ALL webhooks for a given sentry app (region determined by organization_id). """ pass @regional_rpc_method(ByRegionName()) @abc.abstractmethod def update_webhook_and_events_for_app_by_region( self, *, application_id: int | None, webhook_url: str | None, events: list[str], region_name: str, ) -> list[RpcServiceHook]: """ Update ALL webhooks in a given region for a sentry app. """ pass @regional_rpc_method(ByOrganizationId()) @abc.abstractmethod def create_or_update_webhook_and_events_for_installation( self, *, installation_id: int, organization_id: int, webhook_url: str | None, events: list[str], application_id: int, ) -> list[RpcServiceHook]: """ Update the webhook and events for a given sentry app installation. """ pass @regional_rpc_method(ByRegionName()) @abc.abstractmethod def bulk_create_service_hooks_for_app( self, *, region_name: str, application_id: int, events: list[str], installation_organization_ids: list[RpcInstallationOrganizationPair], url: str, ) -> list[RpcServiceHook]: """Meant for bulk creating ServiceHooks for all installations of a given Sentry App in a given region""" pass hook_service = HookService.create_delegation()
HookService
python
pandas-dev__pandas
pandas/core/arrays/floating.py
{ "start": 4473, "end": 4765 }
class ____(FloatingDtype): type = np.float64 name: ClassVar[str] = "Float64" __doc__ = _dtype_docstring.format(dtype="float64") NUMPY_FLOAT_TO_DTYPE: dict[np.dtype, FloatingDtype] = { np.dtype(np.float32): Float32Dtype(), np.dtype(np.float64): Float64Dtype(), }
Float64Dtype
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_cond_format14.py
{ "start": 315, "end": 1262 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("cond_format14.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with conditional formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() cell_format = workbook.add_format({"bg_color": "red"}) worksheet.write("A1", 10) worksheet.write("A2", 20) worksheet.write("A3", 30) worksheet.write("A4", 40) worksheet.conditional_format( "A1", { "type": "cell", "format": cell_format, "criteria": ">", "value": 5, "stop_if_true": True, }, ) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
sympy__sympy
sympy/stats/drv_types.py
{ "start": 5031, "end": 6579 }
class ____(SingleDiscreteDistribution): _argnames = ('p',) set = S.Naturals @staticmethod def check(p): _value_check((0 < p, p <= 1), "p must be between 0 and 1") def pdf(self, k): return (1 - self.p)**(k - 1) * self.p def _characteristic_function(self, t): p = self.p return p * exp(I*t) / (1 - (1 - p)*exp(I*t)) def _moment_generating_function(self, t): p = self.p return p * exp(t) / (1 - (1 - p) * exp(t)) def Geometric(name, p): r""" Create a discrete random variable with a Geometric distribution. Explanation =========== The density of the Geometric distribution is given by .. math:: f(k) := p (1 - p)^{k - 1} Parameters ========== p : A probability between 0 and 1 Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Geometric, density, E, variance >>> from sympy import Symbol, S >>> p = S.One / 5 >>> z = Symbol("z") >>> X = Geometric("x", p) >>> density(X)(z) (4/5)**(z - 1)/5 >>> E(X) 5 >>> variance(X) 20 References ========== .. [1] https://en.wikipedia.org/wiki/Geometric_distribution .. [2] https://mathworld.wolfram.com/GeometricDistribution.html """ return rv(name, GeometricDistribution, p) #------------------------------------------------------------------------------- # Hermite distribution ---------------------------------------------------------
GeometricDistribution
python
pypa__pip
src/pip/_vendor/pygments/filters/__init__.py
{ "start": 2862, "end": 31761 }
class ____(Filter): """Convert mathematical symbols such as \\<longrightarrow> in Isabelle or \\longrightarrow in LaTeX into Unicode characters. This is mostly useful for HTML or console output when you want to approximate the source rendering you'd see in an IDE. Options accepted: `lang` : string The symbol language. Must be one of ``'isabelle'`` or ``'latex'``. The default is ``'isabelle'``. """ latex_symbols = { '\\alpha' : '\U000003b1', '\\beta' : '\U000003b2', '\\gamma' : '\U000003b3', '\\delta' : '\U000003b4', '\\varepsilon' : '\U000003b5', '\\zeta' : '\U000003b6', '\\eta' : '\U000003b7', '\\vartheta' : '\U000003b8', '\\iota' : '\U000003b9', '\\kappa' : '\U000003ba', '\\lambda' : '\U000003bb', '\\mu' : '\U000003bc', '\\nu' : '\U000003bd', '\\xi' : '\U000003be', '\\pi' : '\U000003c0', '\\varrho' : '\U000003c1', '\\sigma' : '\U000003c3', '\\tau' : '\U000003c4', '\\upsilon' : '\U000003c5', '\\varphi' : '\U000003c6', '\\chi' : '\U000003c7', '\\psi' : '\U000003c8', '\\omega' : '\U000003c9', '\\Gamma' : '\U00000393', '\\Delta' : '\U00000394', '\\Theta' : '\U00000398', '\\Lambda' : '\U0000039b', '\\Xi' : '\U0000039e', '\\Pi' : '\U000003a0', '\\Sigma' : '\U000003a3', '\\Upsilon' : '\U000003a5', '\\Phi' : '\U000003a6', '\\Psi' : '\U000003a8', '\\Omega' : '\U000003a9', '\\leftarrow' : '\U00002190', '\\longleftarrow' : '\U000027f5', '\\rightarrow' : '\U00002192', '\\longrightarrow' : '\U000027f6', '\\Leftarrow' : '\U000021d0', '\\Longleftarrow' : '\U000027f8', '\\Rightarrow' : '\U000021d2', '\\Longrightarrow' : '\U000027f9', '\\leftrightarrow' : '\U00002194', '\\longleftrightarrow' : '\U000027f7', '\\Leftrightarrow' : '\U000021d4', '\\Longleftrightarrow' : '\U000027fa', '\\mapsto' : '\U000021a6', '\\longmapsto' : '\U000027fc', '\\relbar' : '\U00002500', '\\Relbar' : '\U00002550', '\\hookleftarrow' : '\U000021a9', '\\hookrightarrow' : '\U000021aa', '\\leftharpoondown' : '\U000021bd', '\\rightharpoondown' : '\U000021c1', '\\leftharpoonup' : '\U000021bc', '\\rightharpoonup' : '\U000021c0', '\\rightleftharpoons' : '\U000021cc', '\\leadsto' : '\U0000219d', '\\downharpoonleft' : '\U000021c3', '\\downharpoonright' : '\U000021c2', '\\upharpoonleft' : '\U000021bf', '\\upharpoonright' : '\U000021be', '\\restriction' : '\U000021be', '\\uparrow' : '\U00002191', '\\Uparrow' : '\U000021d1', '\\downarrow' : '\U00002193', '\\Downarrow' : '\U000021d3', '\\updownarrow' : '\U00002195', '\\Updownarrow' : '\U000021d5', '\\langle' : '\U000027e8', '\\rangle' : '\U000027e9', '\\lceil' : '\U00002308', '\\rceil' : '\U00002309', '\\lfloor' : '\U0000230a', '\\rfloor' : '\U0000230b', '\\flqq' : '\U000000ab', '\\frqq' : '\U000000bb', '\\bot' : '\U000022a5', '\\top' : '\U000022a4', '\\wedge' : '\U00002227', '\\bigwedge' : '\U000022c0', '\\vee' : '\U00002228', '\\bigvee' : '\U000022c1', '\\forall' : '\U00002200', '\\exists' : '\U00002203', '\\nexists' : '\U00002204', '\\neg' : '\U000000ac', '\\Box' : '\U000025a1', '\\Diamond' : '\U000025c7', '\\vdash' : '\U000022a2', '\\models' : '\U000022a8', '\\dashv' : '\U000022a3', '\\surd' : '\U0000221a', '\\le' : '\U00002264', '\\ge' : '\U00002265', '\\ll' : '\U0000226a', '\\gg' : '\U0000226b', '\\lesssim' : '\U00002272', '\\gtrsim' : '\U00002273', '\\lessapprox' : '\U00002a85', '\\gtrapprox' : '\U00002a86', '\\in' : '\U00002208', '\\notin' : '\U00002209', '\\subset' : '\U00002282', '\\supset' : '\U00002283', '\\subseteq' : '\U00002286', '\\supseteq' : '\U00002287', '\\sqsubset' : '\U0000228f', '\\sqsupset' : '\U00002290', '\\sqsubseteq' : '\U00002291', '\\sqsupseteq' : '\U00002292', '\\cap' : '\U00002229', '\\bigcap' : '\U000022c2', '\\cup' : '\U0000222a', '\\bigcup' : '\U000022c3', '\\sqcup' : '\U00002294', '\\bigsqcup' : '\U00002a06', '\\sqcap' : '\U00002293', '\\Bigsqcap' : '\U00002a05', '\\setminus' : '\U00002216', '\\propto' : '\U0000221d', '\\uplus' : '\U0000228e', '\\bigplus' : '\U00002a04', '\\sim' : '\U0000223c', '\\doteq' : '\U00002250', '\\simeq' : '\U00002243', '\\approx' : '\U00002248', '\\asymp' : '\U0000224d', '\\cong' : '\U00002245', '\\equiv' : '\U00002261', '\\Join' : '\U000022c8', '\\bowtie' : '\U00002a1d', '\\prec' : '\U0000227a', '\\succ' : '\U0000227b', '\\preceq' : '\U0000227c', '\\succeq' : '\U0000227d', '\\parallel' : '\U00002225', '\\mid' : '\U000000a6', '\\pm' : '\U000000b1', '\\mp' : '\U00002213', '\\times' : '\U000000d7', '\\div' : '\U000000f7', '\\cdot' : '\U000022c5', '\\star' : '\U000022c6', '\\circ' : '\U00002218', '\\dagger' : '\U00002020', '\\ddagger' : '\U00002021', '\\lhd' : '\U000022b2', '\\rhd' : '\U000022b3', '\\unlhd' : '\U000022b4', '\\unrhd' : '\U000022b5', '\\triangleleft' : '\U000025c3', '\\triangleright' : '\U000025b9', '\\triangle' : '\U000025b3', '\\triangleq' : '\U0000225c', '\\oplus' : '\U00002295', '\\bigoplus' : '\U00002a01', '\\otimes' : '\U00002297', '\\bigotimes' : '\U00002a02', '\\odot' : '\U00002299', '\\bigodot' : '\U00002a00', '\\ominus' : '\U00002296', '\\oslash' : '\U00002298', '\\dots' : '\U00002026', '\\cdots' : '\U000022ef', '\\sum' : '\U00002211', '\\prod' : '\U0000220f', '\\coprod' : '\U00002210', '\\infty' : '\U0000221e', '\\int' : '\U0000222b', '\\oint' : '\U0000222e', '\\clubsuit' : '\U00002663', '\\diamondsuit' : '\U00002662', '\\heartsuit' : '\U00002661', '\\spadesuit' : '\U00002660', '\\aleph' : '\U00002135', '\\emptyset' : '\U00002205', '\\nabla' : '\U00002207', '\\partial' : '\U00002202', '\\flat' : '\U0000266d', '\\natural' : '\U0000266e', '\\sharp' : '\U0000266f', '\\angle' : '\U00002220', '\\copyright' : '\U000000a9', '\\textregistered' : '\U000000ae', '\\textonequarter' : '\U000000bc', '\\textonehalf' : '\U000000bd', '\\textthreequarters' : '\U000000be', '\\textordfeminine' : '\U000000aa', '\\textordmasculine' : '\U000000ba', '\\euro' : '\U000020ac', '\\pounds' : '\U000000a3', '\\yen' : '\U000000a5', '\\textcent' : '\U000000a2', '\\textcurrency' : '\U000000a4', '\\textdegree' : '\U000000b0', } isabelle_symbols = { '\\<zero>' : '\U0001d7ec', '\\<one>' : '\U0001d7ed', '\\<two>' : '\U0001d7ee', '\\<three>' : '\U0001d7ef', '\\<four>' : '\U0001d7f0', '\\<five>' : '\U0001d7f1', '\\<six>' : '\U0001d7f2', '\\<seven>' : '\U0001d7f3', '\\<eight>' : '\U0001d7f4', '\\<nine>' : '\U0001d7f5', '\\<A>' : '\U0001d49c', '\\<B>' : '\U0000212c', '\\<C>' : '\U0001d49e', '\\<D>' : '\U0001d49f', '\\<E>' : '\U00002130', '\\<F>' : '\U00002131', '\\<G>' : '\U0001d4a2', '\\<H>' : '\U0000210b', '\\<I>' : '\U00002110', '\\<J>' : '\U0001d4a5', '\\<K>' : '\U0001d4a6', '\\<L>' : '\U00002112', '\\<M>' : '\U00002133', '\\<N>' : '\U0001d4a9', '\\<O>' : '\U0001d4aa', '\\<P>' : '\U0001d4ab', '\\<Q>' : '\U0001d4ac', '\\<R>' : '\U0000211b', '\\<S>' : '\U0001d4ae', '\\<T>' : '\U0001d4af', '\\<U>' : '\U0001d4b0', '\\<V>' : '\U0001d4b1', '\\<W>' : '\U0001d4b2', '\\<X>' : '\U0001d4b3', '\\<Y>' : '\U0001d4b4', '\\<Z>' : '\U0001d4b5', '\\<a>' : '\U0001d5ba', '\\<b>' : '\U0001d5bb', '\\<c>' : '\U0001d5bc', '\\<d>' : '\U0001d5bd', '\\<e>' : '\U0001d5be', '\\<f>' : '\U0001d5bf', '\\<g>' : '\U0001d5c0', '\\<h>' : '\U0001d5c1', '\\<i>' : '\U0001d5c2', '\\<j>' : '\U0001d5c3', '\\<k>' : '\U0001d5c4', '\\<l>' : '\U0001d5c5', '\\<m>' : '\U0001d5c6', '\\<n>' : '\U0001d5c7', '\\<o>' : '\U0001d5c8', '\\<p>' : '\U0001d5c9', '\\<q>' : '\U0001d5ca', '\\<r>' : '\U0001d5cb', '\\<s>' : '\U0001d5cc', '\\<t>' : '\U0001d5cd', '\\<u>' : '\U0001d5ce', '\\<v>' : '\U0001d5cf', '\\<w>' : '\U0001d5d0', '\\<x>' : '\U0001d5d1', '\\<y>' : '\U0001d5d2', '\\<z>' : '\U0001d5d3', '\\<AA>' : '\U0001d504', '\\<BB>' : '\U0001d505', '\\<CC>' : '\U0000212d', '\\<DD>' : '\U0001d507', '\\<EE>' : '\U0001d508', '\\<FF>' : '\U0001d509', '\\<GG>' : '\U0001d50a', '\\<HH>' : '\U0000210c', '\\<II>' : '\U00002111', '\\<JJ>' : '\U0001d50d', '\\<KK>' : '\U0001d50e', '\\<LL>' : '\U0001d50f', '\\<MM>' : '\U0001d510', '\\<NN>' : '\U0001d511', '\\<OO>' : '\U0001d512', '\\<PP>' : '\U0001d513', '\\<QQ>' : '\U0001d514', '\\<RR>' : '\U0000211c', '\\<SS>' : '\U0001d516', '\\<TT>' : '\U0001d517', '\\<UU>' : '\U0001d518', '\\<VV>' : '\U0001d519', '\\<WW>' : '\U0001d51a', '\\<XX>' : '\U0001d51b', '\\<YY>' : '\U0001d51c', '\\<ZZ>' : '\U00002128', '\\<aa>' : '\U0001d51e', '\\<bb>' : '\U0001d51f', '\\<cc>' : '\U0001d520', '\\<dd>' : '\U0001d521', '\\<ee>' : '\U0001d522', '\\<ff>' : '\U0001d523', '\\<gg>' : '\U0001d524', '\\<hh>' : '\U0001d525', '\\<ii>' : '\U0001d526', '\\<jj>' : '\U0001d527', '\\<kk>' : '\U0001d528', '\\<ll>' : '\U0001d529', '\\<mm>' : '\U0001d52a', '\\<nn>' : '\U0001d52b', '\\<oo>' : '\U0001d52c', '\\<pp>' : '\U0001d52d', '\\<qq>' : '\U0001d52e', '\\<rr>' : '\U0001d52f', '\\<ss>' : '\U0001d530', '\\<tt>' : '\U0001d531', '\\<uu>' : '\U0001d532', '\\<vv>' : '\U0001d533', '\\<ww>' : '\U0001d534', '\\<xx>' : '\U0001d535', '\\<yy>' : '\U0001d536', '\\<zz>' : '\U0001d537', '\\<alpha>' : '\U000003b1', '\\<beta>' : '\U000003b2', '\\<gamma>' : '\U000003b3', '\\<delta>' : '\U000003b4', '\\<epsilon>' : '\U000003b5', '\\<zeta>' : '\U000003b6', '\\<eta>' : '\U000003b7', '\\<theta>' : '\U000003b8', '\\<iota>' : '\U000003b9', '\\<kappa>' : '\U000003ba', '\\<lambda>' : '\U000003bb', '\\<mu>' : '\U000003bc', '\\<nu>' : '\U000003bd', '\\<xi>' : '\U000003be', '\\<pi>' : '\U000003c0', '\\<rho>' : '\U000003c1', '\\<sigma>' : '\U000003c3', '\\<tau>' : '\U000003c4', '\\<upsilon>' : '\U000003c5', '\\<phi>' : '\U000003c6', '\\<chi>' : '\U000003c7', '\\<psi>' : '\U000003c8', '\\<omega>' : '\U000003c9', '\\<Gamma>' : '\U00000393', '\\<Delta>' : '\U00000394', '\\<Theta>' : '\U00000398', '\\<Lambda>' : '\U0000039b', '\\<Xi>' : '\U0000039e', '\\<Pi>' : '\U000003a0', '\\<Sigma>' : '\U000003a3', '\\<Upsilon>' : '\U000003a5', '\\<Phi>' : '\U000003a6', '\\<Psi>' : '\U000003a8', '\\<Omega>' : '\U000003a9', '\\<bool>' : '\U0001d539', '\\<complex>' : '\U00002102', '\\<nat>' : '\U00002115', '\\<rat>' : '\U0000211a', '\\<real>' : '\U0000211d', '\\<int>' : '\U00002124', '\\<leftarrow>' : '\U00002190', '\\<longleftarrow>' : '\U000027f5', '\\<rightarrow>' : '\U00002192', '\\<longrightarrow>' : '\U000027f6', '\\<Leftarrow>' : '\U000021d0', '\\<Longleftarrow>' : '\U000027f8', '\\<Rightarrow>' : '\U000021d2', '\\<Longrightarrow>' : '\U000027f9', '\\<leftrightarrow>' : '\U00002194', '\\<longleftrightarrow>' : '\U000027f7', '\\<Leftrightarrow>' : '\U000021d4', '\\<Longleftrightarrow>' : '\U000027fa', '\\<mapsto>' : '\U000021a6', '\\<longmapsto>' : '\U000027fc', '\\<midarrow>' : '\U00002500', '\\<Midarrow>' : '\U00002550', '\\<hookleftarrow>' : '\U000021a9', '\\<hookrightarrow>' : '\U000021aa', '\\<leftharpoondown>' : '\U000021bd', '\\<rightharpoondown>' : '\U000021c1', '\\<leftharpoonup>' : '\U000021bc', '\\<rightharpoonup>' : '\U000021c0', '\\<rightleftharpoons>' : '\U000021cc', '\\<leadsto>' : '\U0000219d', '\\<downharpoonleft>' : '\U000021c3', '\\<downharpoonright>' : '\U000021c2', '\\<upharpoonleft>' : '\U000021bf', '\\<upharpoonright>' : '\U000021be', '\\<restriction>' : '\U000021be', '\\<Colon>' : '\U00002237', '\\<up>' : '\U00002191', '\\<Up>' : '\U000021d1', '\\<down>' : '\U00002193', '\\<Down>' : '\U000021d3', '\\<updown>' : '\U00002195', '\\<Updown>' : '\U000021d5', '\\<langle>' : '\U000027e8', '\\<rangle>' : '\U000027e9', '\\<lceil>' : '\U00002308', '\\<rceil>' : '\U00002309', '\\<lfloor>' : '\U0000230a', '\\<rfloor>' : '\U0000230b', '\\<lparr>' : '\U00002987', '\\<rparr>' : '\U00002988', '\\<lbrakk>' : '\U000027e6', '\\<rbrakk>' : '\U000027e7', '\\<lbrace>' : '\U00002983', '\\<rbrace>' : '\U00002984', '\\<guillemotleft>' : '\U000000ab', '\\<guillemotright>' : '\U000000bb', '\\<bottom>' : '\U000022a5', '\\<top>' : '\U000022a4', '\\<and>' : '\U00002227', '\\<And>' : '\U000022c0', '\\<or>' : '\U00002228', '\\<Or>' : '\U000022c1', '\\<forall>' : '\U00002200', '\\<exists>' : '\U00002203', '\\<nexists>' : '\U00002204', '\\<not>' : '\U000000ac', '\\<box>' : '\U000025a1', '\\<diamond>' : '\U000025c7', '\\<turnstile>' : '\U000022a2', '\\<Turnstile>' : '\U000022a8', '\\<tturnstile>' : '\U000022a9', '\\<TTurnstile>' : '\U000022ab', '\\<stileturn>' : '\U000022a3', '\\<surd>' : '\U0000221a', '\\<le>' : '\U00002264', '\\<ge>' : '\U00002265', '\\<lless>' : '\U0000226a', '\\<ggreater>' : '\U0000226b', '\\<lesssim>' : '\U00002272', '\\<greatersim>' : '\U00002273', '\\<lessapprox>' : '\U00002a85', '\\<greaterapprox>' : '\U00002a86', '\\<in>' : '\U00002208', '\\<notin>' : '\U00002209', '\\<subset>' : '\U00002282', '\\<supset>' : '\U00002283', '\\<subseteq>' : '\U00002286', '\\<supseteq>' : '\U00002287', '\\<sqsubset>' : '\U0000228f', '\\<sqsupset>' : '\U00002290', '\\<sqsubseteq>' : '\U00002291', '\\<sqsupseteq>' : '\U00002292', '\\<inter>' : '\U00002229', '\\<Inter>' : '\U000022c2', '\\<union>' : '\U0000222a', '\\<Union>' : '\U000022c3', '\\<squnion>' : '\U00002294', '\\<Squnion>' : '\U00002a06', '\\<sqinter>' : '\U00002293', '\\<Sqinter>' : '\U00002a05', '\\<setminus>' : '\U00002216', '\\<propto>' : '\U0000221d', '\\<uplus>' : '\U0000228e', '\\<Uplus>' : '\U00002a04', '\\<noteq>' : '\U00002260', '\\<sim>' : '\U0000223c', '\\<doteq>' : '\U00002250', '\\<simeq>' : '\U00002243', '\\<approx>' : '\U00002248', '\\<asymp>' : '\U0000224d', '\\<cong>' : '\U00002245', '\\<smile>' : '\U00002323', '\\<equiv>' : '\U00002261', '\\<frown>' : '\U00002322', '\\<Join>' : '\U000022c8', '\\<bowtie>' : '\U00002a1d', '\\<prec>' : '\U0000227a', '\\<succ>' : '\U0000227b', '\\<preceq>' : '\U0000227c', '\\<succeq>' : '\U0000227d', '\\<parallel>' : '\U00002225', '\\<bar>' : '\U000000a6', '\\<plusminus>' : '\U000000b1', '\\<minusplus>' : '\U00002213', '\\<times>' : '\U000000d7', '\\<div>' : '\U000000f7', '\\<cdot>' : '\U000022c5', '\\<star>' : '\U000022c6', '\\<bullet>' : '\U00002219', '\\<circ>' : '\U00002218', '\\<dagger>' : '\U00002020', '\\<ddagger>' : '\U00002021', '\\<lhd>' : '\U000022b2', '\\<rhd>' : '\U000022b3', '\\<unlhd>' : '\U000022b4', '\\<unrhd>' : '\U000022b5', '\\<triangleleft>' : '\U000025c3', '\\<triangleright>' : '\U000025b9', '\\<triangle>' : '\U000025b3', '\\<triangleq>' : '\U0000225c', '\\<oplus>' : '\U00002295', '\\<Oplus>' : '\U00002a01', '\\<otimes>' : '\U00002297', '\\<Otimes>' : '\U00002a02', '\\<odot>' : '\U00002299', '\\<Odot>' : '\U00002a00', '\\<ominus>' : '\U00002296', '\\<oslash>' : '\U00002298', '\\<dots>' : '\U00002026', '\\<cdots>' : '\U000022ef', '\\<Sum>' : '\U00002211', '\\<Prod>' : '\U0000220f', '\\<Coprod>' : '\U00002210', '\\<infinity>' : '\U0000221e', '\\<integral>' : '\U0000222b', '\\<ointegral>' : '\U0000222e', '\\<clubsuit>' : '\U00002663', '\\<diamondsuit>' : '\U00002662', '\\<heartsuit>' : '\U00002661', '\\<spadesuit>' : '\U00002660', '\\<aleph>' : '\U00002135', '\\<emptyset>' : '\U00002205', '\\<nabla>' : '\U00002207', '\\<partial>' : '\U00002202', '\\<flat>' : '\U0000266d', '\\<natural>' : '\U0000266e', '\\<sharp>' : '\U0000266f', '\\<angle>' : '\U00002220', '\\<copyright>' : '\U000000a9', '\\<registered>' : '\U000000ae', '\\<hyphen>' : '\U000000ad', '\\<inverse>' : '\U000000af', '\\<onequarter>' : '\U000000bc', '\\<onehalf>' : '\U000000bd', '\\<threequarters>' : '\U000000be', '\\<ordfeminine>' : '\U000000aa', '\\<ordmasculine>' : '\U000000ba', '\\<section>' : '\U000000a7', '\\<paragraph>' : '\U000000b6', '\\<exclamdown>' : '\U000000a1', '\\<questiondown>' : '\U000000bf', '\\<euro>' : '\U000020ac', '\\<pounds>' : '\U000000a3', '\\<yen>' : '\U000000a5', '\\<cent>' : '\U000000a2', '\\<currency>' : '\U000000a4', '\\<degree>' : '\U000000b0', '\\<amalg>' : '\U00002a3f', '\\<mho>' : '\U00002127', '\\<lozenge>' : '\U000025ca', '\\<wp>' : '\U00002118', '\\<wrong>' : '\U00002240', '\\<struct>' : '\U000022c4', '\\<acute>' : '\U000000b4', '\\<index>' : '\U00000131', '\\<dieresis>' : '\U000000a8', '\\<cedilla>' : '\U000000b8', '\\<hungarumlaut>' : '\U000002dd', '\\<some>' : '\U000003f5', '\\<newline>' : '\U000023ce', '\\<open>' : '\U00002039', '\\<close>' : '\U0000203a', '\\<here>' : '\U00002302', '\\<^sub>' : '\U000021e9', '\\<^sup>' : '\U000021e7', '\\<^bold>' : '\U00002759', '\\<^bsub>' : '\U000021d8', '\\<^esub>' : '\U000021d9', '\\<^bsup>' : '\U000021d7', '\\<^esup>' : '\U000021d6', } lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols} def __init__(self, **options): Filter.__init__(self, **options) lang = get_choice_opt(options, 'lang', ['isabelle', 'latex'], 'isabelle') self.symbols = self.lang_map[lang] def filter(self, lexer, stream): for ttype, value in stream: if value in self.symbols: yield ttype, self.symbols[value] else: yield ttype, value
SymbolFilter
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_evaluation.py
{ "start": 1620, "end": 1888 }
class ____( AutoMaterializeRuleEvaluationData, NamedTuple("_TextRuleEvaluationData", [("text", str)]), ): @property def metadata(self) -> MetadataMapping: return {"text": MetadataValue.text(self.text)} @whitelist_for_serdes
TextRuleEvaluationData
python
geekcomputers__Python
venv/Lib/site-packages/pip/_vendor/pygments/formatters/terminal256.py
{ "start": 3193, "end": 10210 }
class ____(Formatter): """ Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly. The formatter takes colors from a style defined by the `style` option and converts them to nearest ANSI 256-color escape sequences. Bold and underline attributes from the style are preserved (and displayed). .. versionadded:: 0.9 .. versionchanged:: 2.2 If the used style defines foreground colors in the form ``#ansi*``, then `Terminal256Formatter` will map these to non extended foreground color. See :ref:`AnsiTerminalStyle` for more information. .. versionchanged:: 2.4 The ANSI color names have been updated with names that are easier to understand and align with colornames of other projects and terminals. See :ref:`this table <new-ansi-color-names>` for more information. Options accepted: `style` The style to use, can be a string or a Style subclass (default: ``'default'``). `linenos` Set to ``True`` to have line numbers on the terminal output as well (default: ``False`` = no line numbers). """ name = 'Terminal256' aliases = ['terminal256', 'console256', '256'] filenames = [] def __init__(self, **options): Formatter.__init__(self, **options) self.xterm_colors = [] self.best_match = {} self.style_string = {} self.usebold = 'nobold' not in options self.useunderline = 'nounderline' not in options self.useitalic = 'noitalic' not in options self._build_color_table() # build an RGB-to-256 color conversion table self._setup_styles() # convert selected style's colors to term. colors self.linenos = options.get('linenos', False) self._lineno = 0 def _build_color_table(self): # colors 0..15: 16 basic colors self.xterm_colors.append((0x00, 0x00, 0x00)) # 0 self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1 self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2 self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3 self.xterm_colors.append((0x00, 0x00, 0xee)) # 4 self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5 self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6 self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7 self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8 self.xterm_colors.append((0xff, 0x00, 0x00)) # 9 self.xterm_colors.append((0x00, 0xff, 0x00)) # 10 self.xterm_colors.append((0xff, 0xff, 0x00)) # 11 self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12 self.xterm_colors.append((0xff, 0x00, 0xff)) # 13 self.xterm_colors.append((0x00, 0xff, 0xff)) # 14 self.xterm_colors.append((0xff, 0xff, 0xff)) # 15 # colors 16..232: the 6x6x6 color cube valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff) for i in range(217): r = valuerange[(i // 36) % 6] g = valuerange[(i // 6) % 6] b = valuerange[i % 6] self.xterm_colors.append((r, g, b)) # colors 233..253: grayscale for i in range(1, 22): v = 8 + i * 10 self.xterm_colors.append((v, v, v)) def _closest_color(self, r, g, b): distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff) match = 0 for i in range(0, 254): values = self.xterm_colors[i] rd = r - values[0] gd = g - values[1] bd = b - values[2] d = rd*rd + gd*gd + bd*bd if d < distance: match = i distance = d return match def _color_index(self, color): index = self.best_match.get(color, None) if color in ansicolors: # strip the `ansi/#ansi` part and look up code index = color self.best_match[color] = index if index is None: try: rgb = int(str(color), 16) except ValueError: rgb = 0 r = (rgb >> 16) & 0xff g = (rgb >> 8) & 0xff b = rgb & 0xff index = self._closest_color(r, g, b) self.best_match[color] = index return index def _setup_styles(self): for ttype, ndef in self.style: escape = EscapeSequence() # get foreground from ansicolor if set if ndef['ansicolor']: escape.fg = self._color_index(ndef['ansicolor']) elif ndef['color']: escape.fg = self._color_index(ndef['color']) if ndef['bgansicolor']: escape.bg = self._color_index(ndef['bgansicolor']) elif ndef['bgcolor']: escape.bg = self._color_index(ndef['bgcolor']) if self.usebold and ndef['bold']: escape.bold = True if self.useunderline and ndef['underline']: escape.underline = True if self.useitalic and ndef['italic']: escape.italic = True self.style_string[str(ttype)] = (escape.color_string(), escape.reset_string()) def _write_lineno(self, outfile): self._lineno += 1 outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno)) def format(self, tokensource, outfile): return Formatter.format(self, tokensource, outfile) def format_unencoded(self, tokensource, outfile): if self.linenos: self._write_lineno(outfile) for ttype, value in tokensource: not_found = True while ttype and not_found: try: # outfile.write( "<" + str(ttype) + ">" ) on, off = self.style_string[str(ttype)] # Like TerminalFormatter, add "reset colors" escape sequence # on newline. spl = value.split('\n') for line in spl[:-1]: if line: outfile.write(on + line + off) if self.linenos: self._write_lineno(outfile) else: outfile.write('\n') if spl[-1]: outfile.write(on + spl[-1] + off) not_found = False # outfile.write( '#' + str(ttype) + '#' ) except KeyError: # ottype = ttype ttype = ttype.parent # outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' ) if not_found: outfile.write(value) if self.linenos: outfile.write("\n")
Terminal256Formatter
python
walkccc__LeetCode
solutions/730. Count Different Palindromic Subsequences/730.py
{ "start": 0, "end": 928 }
class ____: def countPalindromicSubsequences(self, s: str) -> int: MOD = 1_000_000_007 n = len(s) # dp[i][j] := the number of different non-empty palindromic subsequences in # s[i..j] dp = [[0] * n for _ in range(n)] for i in range(n): dp[i][i] = 1 for d in range(1, n): for i in range(n - d): j = i + d if s[i] == s[j]: lo = i + 1 hi = j - 1 while lo <= hi and s[lo] != s[i]: lo += 1 while lo <= hi and s[hi] != s[i]: hi -= 1 if lo > hi: dp[i][j] = dp[i + 1][j - 1] * 2 + 2 elif lo == hi: dp[i][j] = dp[i + 1][j - 1] * 2 + 1 else: dp[i][j] = dp[i + 1][j - 1] * 2 - dp[lo + 1][hi - 1] else: dp[i][j] = dp[i][j - 1] + dp[i + 1][j] - dp[i + 1][j - 1] dp[i][j] = (dp[i][j] + MOD) % MOD return dp[0][n - 1]
Solution
python
pytorch__pytorch
torch/nn/modules/pooling.py
{ "start": 54944, "end": 56928 }
class ____(_AdaptiveMaxPoolNd): r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes. The output is of size :math:`D_{out} \times H_{out} \times W_{out}`, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size of the image of the form :math:`D_{out} \times H_{out} \times W_{out}`. Can be a tuple :math:`(D_{out}, H_{out}, W_{out})` or a single :math:`D_{out}` for a cube :math:`D_{out} \times D_{out} \times D_{out}`. :math:`D_{out}`, :math:`H_{out}` and :math:`W_{out}` can be either a ``int``, or ``None`` which means the size will be the same as that of the input. return_indices: if ``True``, will return the indices along with the outputs. Useful to pass to nn.MaxUnpool3d. Default: ``False`` Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where :math:`(D_{out}, H_{out}, W_{out})=\text{output\_size}`. Examples: >>> # target output size of 5x7x9 >>> m = nn.AdaptiveMaxPool3d((5, 7, 9)) >>> input = torch.randn(1, 64, 8, 9, 10) >>> output = m(input) >>> # target output size of 7x7x7 (cube) >>> m = nn.AdaptiveMaxPool3d(7) >>> input = torch.randn(1, 64, 10, 9, 8) >>> output = m(input) >>> # target output size of 7x9x8 >>> m = nn.AdaptiveMaxPool3d((7, None, None)) >>> input = torch.randn(1, 64, 10, 9, 8) >>> output = m(input) """ output_size: _size_3_opt_t def forward(self, input: Tensor): """Runs the forward pass.""" return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
AdaptiveMaxPool3d
python
google__jax
jax/_src/lax/parallel.py
{ "start": 49262, "end": 118799 }
class ____(core.Effect): __str__ = lambda _: "one-sided communication" def __hash__(self): return hash(SingleSideCollectiveEffect) def __eq__(self, other): return isinstance(other, SingleSideCollectiveEffect) single_side_collective_effect = SingleSideCollectiveEffect() core.effects.control_flow_allowed_effects.add_type(SingleSideCollectiveEffect) def _psend_lowering_gpu(ctx, x, *, axis_name, perm): if all(p not in ctx.module_context.platforms for p in ("cuda", "rocm")): raise NotImplementedError("psend is currently only implemented on GPUs") full_perm, other_args = _pcollectives_lowering_common( ctx, axis_name=axis_name, perm=perm, op_name="psend" ) token = hlo.create_token() send_op = hlo.SendOp( [x], token, source_target_pairs=mlir.dense_int_elements(full_perm), **other_args, ) axis_ctx = ctx.module_context.axis_context if not isinstance(axis_ctx, SPMDAxisContext): raise NotImplementedError("psend currently only supports manual sharding") sharding = xc.OpSharding() sharding.type = xc.OpSharding.Type.MANUAL mlir.set_sharding(send_op, sharding) return send_op.results effects_lib.lowerable_effects.add_type(SingleSideCollectiveEffect) def _psend_abstract_eval(x, *, axis_name, **params): _check_axis_names(axis_name, 'psend') return abstract_token, { *map(core.NamedAxisEffect, axis_name), single_side_collective_effect, } psend_p = core.Primitive("psend") psend_p.def_impl(partial(dispatch.apply_primitive, psend_p)) psend_p.def_effectful_abstract_eval(_psend_abstract_eval) mlir.register_lowering(psend_p, _psend_lowering_gpu, platform="gpu") def _psend_lowering(ctx, x, *, axis_name, perm): raise NotImplementedError("psend is currently only implemented on GPU") mlir.register_lowering(psend_p, _psend_lowering) batching.fancy_primitive_batchers[psend_p] = _ppermute_batcher batching.skippable_batchers[psend_p] = partial(_names_in_param, "axis_name") def _precv_lowering_gpu(ctx, token, *, out_shape, axis_name, perm): full_perm, other_args = _pcollectives_lowering_common( ctx, axis_name=axis_name, perm=perm, op_name="precv" ) recv_op = hlo.RecvOp( [mlir.aval_to_ir_type(out_shape), token.type], token, source_target_pairs=mlir.dense_int_elements(full_perm), **other_args, ) axis_ctx = ctx.module_context.axis_context if not isinstance(axis_ctx, SPMDAxisContext): raise NotImplementedError("precv currently only supports manual sharding") sharding = xc.OpSharding() sharding.type = xc.OpSharding.Type.MANUAL mlir.set_sharding(recv_op, sharding) # recv_op should return an array of [RankedTensorType, StableHlo.token]; we # only need the tensor. results = recv_op.results return [results[0]] def _precv_abstract_eval( token, *, out_shape, axis_name, **params ): return out_shape, {*map(core.NamedAxisEffect, axis_name), single_side_collective_effect} precv_p = core.Primitive("precv") precv_p.multiple_results = False precv_p.def_effectful_abstract_eval(_precv_abstract_eval) mlir.register_lowering(precv_p, _precv_lowering_gpu, platform='gpu') def _precv_lowering(ctx, token, *, out_shape, axis_name, perm): raise NotImplementedError("precv is currently only implemented on GPU") mlir.register_lowering(precv_p, _precv_lowering) batching.fancy_primitive_batchers[precv_p] = _ppermute_batcher batching.skippable_batchers[precv_p] = partial(_names_in_param, "axis_name") def _pbroadcast_transpose_rule(t, x, source, axis_name): is_source = axis_index(axis_name) == source tsum = psum(t, axis_name) return [lax.select(is_source, lax.full_like(t, tsum), lax.full_like(t, 0))] def _pbroadcast_batcher(axis_data, vals_in, dims_in, axis_name, source): axis_size = axis_data.size (v,), (d,) = vals_in, dims_in if not isinstance(axis_name, (tuple, list)): axis_name = (axis_name,) if axis_data.name not in axis_name: return pbroadcast_p.bind(v, axis_name=axis_name, source=source), d remaining_axes = tuple(axis for axis in axis_name if axis != axis_data.name) if remaining_axes: raise NotImplementedError("pbroadcast batcher only supports a single axis") assert axis_name[0] == axis_data.name, "pbroadcast batcher called with a wrong axis!" assert source >= 0 and source < axis_size, "collective broadcast doesn't fit in the axis size!" if axis_size == 1 and remaining_axes: return pbroadcast_p.bind(v, source=source, axis_name=remaining_axes), d if d is batching.not_mapped: return v, d return v.take([source] * axis_size, d), d def _pbroadcast_lowering(ctx, x, *, axis_name, source): replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name, None) def source_to_front(group): return [group[source]] + list(group[:source]) + list(group[source + 1:]) replica_groups = [source_to_front(group) for group in replica_groups] is_spmd = isinstance( ctx.module_context.axis_context, (SPMDAxisContext, ShardingContext), ) if is_spmd: # We want to emit the collective-broadcast with global device IDs and a # channel ID, as otherwise it interprets the devices as replicas instead # of partitions - and XLA is configured with only a single replica. channel_handle = hlo.ChannelHandle.get(_get_channel(ctx), mlir.DEVICE_TO_DEVICE_TYPE) other_args = dict(channel_handle=channel_handle) else: other_args = {} return hlo.CollectiveBroadcastOp( x, replica_groups=_replica_groups_hlo(replica_groups), **other_args ).results pbroadcast_p = core.Primitive('pbroadcast') pbroadcast_p.def_abstract_eval(_raise_to_shaped_abstract_eval) ad.deflinear2(pbroadcast_p, _pbroadcast_transpose_rule) mlir.register_lowering(pbroadcast_p, _pbroadcast_lowering, platform='gpu') batching.fancy_primitive_batchers[pbroadcast_p] = _pbroadcast_batcher batching.skippable_batchers[pbroadcast_p] = partial(_names_in_param, 'axis_name') def _moveaxis(src, dst, x): perm = [i for i in range(x.ndim) if i != src] perm.insert(dst, src) return lax.transpose(x, perm) def _splitaxis(axis, factor, x): new_shape = list(x.shape) assert new_shape[axis] % factor == 0, (new_shape[axis], factor) new_shape[axis:axis+1] = [factor, new_shape[axis] // factor] return x.reshape(new_shape) def _foldaxis(axis, x): new_shape = list(x.shape) new_shape[axis:axis+2] = [x.shape[axis] * x.shape[axis + 1]] return x.reshape(new_shape) def _all_to_all_lowering( ctx, x, *, split_axis, concat_axis, axis_name, axis_index_groups, tiled ): del tiled # expand_dims and squeeze is done in `all_to_all` if `True` # Workaround for AllToAll not being implemented on CPU. replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name, axis_index_groups) if len(replica_groups[0]) == 1: return [x] split_count = len(replica_groups[0]) if not all(split_count == len(g) for g in replica_groups): raise ValueError('Replica groups must be equally sized') is_spmd = isinstance( ctx.module_context.axis_context, (SPMDAxisContext, ShardingContext), ) if is_spmd: # We want to emit the all-gather with global device IDs and a # channel ID, as otherwise it interprets the devices as replicas instead # of partitions - and XLA is configured with only a single replica. channel_handle = hlo.ChannelHandle.get(_get_channel(ctx), mlir.DEVICE_TO_DEVICE_TYPE) other_args = dict(channel_handle=channel_handle) else: other_args = {} return hlo.AllToAllOp( [x], split_dimension=mlir.i64_attr(split_axis), concat_dimension=mlir.i64_attr(concat_axis), split_count=mlir.i64_attr(split_count), replica_groups=_replica_groups_hlo(replica_groups), **other_args).results def _all_to_all_transpose_rule( cts, x, axis_name, split_axis, concat_axis, axis_index_groups, tiled ): return (all_to_all( cts, axis_name=axis_name, split_axis=concat_axis, concat_axis=split_axis, axis_index_groups=axis_index_groups, tiled=tiled),) def _all_to_all_batcher(vals_in, dims_in, *, axis_name, split_axis, concat_axis, axis_index_groups, tiled): x, = vals_in d, = dims_in result = all_to_all_p.bind( x, axis_name=axis_name, split_axis=split_axis + (d <= split_axis), concat_axis=concat_axis + (d <= concat_axis), axis_index_groups=axis_index_groups, tiled=tiled, ) return result, d def _all_to_all_batched_collective(axis_data, vals_in, dims_in, axis_name, split_axis, concat_axis, axis_index_groups, tiled): if axis_index_groups is not None: raise NotImplementedError("Please open a feature request!") axis_size, frame_name = axis_data.size, axis_data.name if isinstance(axis_name, (list, tuple)): axes_names = axis_name else: axes_names = [axis_name] if frame_name not in axes_names: return _all_to_all_batcher( vals_in, dims_in, axis_name=axis_name, split_axis=split_axis, concat_axis=concat_axis, axis_index_groups=axis_index_groups, tiled=tiled) x, = vals_in d, = dims_in if d is batching.not_mapped: # TODO(sharadmv,apaszke): Remove this broadcast that comes from # all_gather_transpose and instead avoid using all_to_all in # all_gather_transpose. x = lax.broadcast(x, (axis_size, *x.shape)) d = 0 if isinstance(axis_name, (list, tuple)): pos = axis_name.index(frame_name) major_axes, minor_axes = axis_name[:pos], axis_name[pos + 1:] else: major_axes, minor_axes = (), () # Optimized case when no splitting is necessary if not major_axes and not minor_axes: if split_axis == concat_axis: axis = split_axis + (d <= split_axis) d_pre_split = d x = _splitaxis(axis, axis_size, x) d += (axis <= d) return _foldaxis(axis, moveaxis(x, (d, axis), (axis, d))), d_pre_split else: x_concat = _foldaxis(concat_axis, _moveaxis(d, concat_axis, x)) return _splitaxis(split_axis, axis_size, x_concat), split_axis # Here we have to handle either the major or the minor dimensions # We will be accumulating chunks into the three leading dims: [Major, Current, Minor, ...] x, d = lax.expand_dims(_moveaxis(d, 0, x), (0, 2)), 1 split_axis += 3; concat_axis += 3 # Offset by extra three leading dims if major_axes: x = all_to_all_p.bind(x, axis_name=major_axes, split_axis=split_axis, concat_axis=0, axis_index_groups=axis_index_groups, tiled=tiled) # Split out the local part into axis new_d (NOTE: d is already in axis 1) assert d == 1 x = _splitaxis(split_axis, axis_size, x) new_d = split_axis concat_axis += (split_axis <= concat_axis) # Offset the existing axes by the new batch axis split_axis += 1 if minor_axes: x = all_to_all_p.bind(x, axis_name=minor_axes, split_axis=split_axis, concat_axis=2, axis_index_groups=axis_index_groups, tiled=tiled) # Fold the chunk axes into a single one x = _foldaxis(0, _foldaxis(0, x)) split_axis -= 2; concat_axis -= 2; new_d -= 2 # Fold gathered axes into concat_axis x = _foldaxis(concat_axis - 1, _moveaxis(0, concat_axis - 1, x)) new_d -= 1 # We've removed 0th dimension, so new_d needs to be adjusted return x, new_d def _all_to_all_effectful_abstract_eval( input_aval, axis_name, split_axis, concat_axis, axis_index_groups, tiled ): del tiled # expand_dims and squeeze is done in `all_to_all` if `True` if not isinstance(axis_name, (list, tuple)): axis_name = (axis_name,) _check_axis_names(axis_name, 'all_to_all') check_unreduced_args([input_aval], 'all_to_all') shape = list(input_aval.shape) axis_size = ( _axis_size(axis_name) if axis_index_groups is None else len(axis_index_groups[0]) ) assert shape[split_axis] % axis_size == 0, (shape[split_axis], axis_size) shape[split_axis] //= axis_size shape[concat_axis] *= axis_size vma = collective_vma_rule('all_to_all', axis_name, input_aval) out_aval = input_aval.update(shape=tuple(shape), weak_type=False, vma=vma) effects = {*map(core.NamedAxisEffect, axis_name)} return out_aval, effects def _all_to_all_impl(*args, **kwargs): raise RuntimeError("all_to_all must be used within a mapped context" " like vmap or shard_map.") all_to_all_p = core.Primitive('all_to_all') all_to_all_p.def_impl(_all_to_all_impl) all_to_all_p.def_effectful_abstract_eval(_all_to_all_effectful_abstract_eval) mlir.register_lowering(all_to_all_p, _all_to_all_lowering) ad.deflinear2(all_to_all_p, _all_to_all_transpose_rule) batching.fancy_primitive_batchers[all_to_all_p] = _all_to_all_batched_collective batching.skippable_batchers[all_to_all_p] = partial(_names_in_param, 'axis_name') def _ragged_all_to_all_lowering( ctx, operand, output, input_offsets, send_sizes, output_offsets, recv_sizes, *, axis_name, axis_index_groups ): replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name, axis_index_groups) # Assumes all groups are the same size split_count = len(replica_groups[0]) if not all(split_count == len(g) for g in replica_groups): raise ValueError('Replica groups must be equally sized') ragged_all_to_all_attrs = { "replica_groups": _replica_groups_hlo(replica_groups) } is_spmd = isinstance( ctx.module_context.axis_context, (SPMDAxisContext, ShardingContext)) if is_spmd: ragged_all_to_all_attrs['channel_id'] = ir.IntegerAttr.get( ir.IntegerType.get_signless(64), _get_channel(ctx) ) return hlo.CustomCallOp( result=[output.type], inputs=[operand, output, input_offsets, send_sizes, output_offsets, recv_sizes], call_target_name=ir.StringAttr.get('ragged_all_to_all'), backend_config=ir.DictAttr.get(ragged_all_to_all_attrs), api_version=ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 4), ).results def _ragged_all_to_all_effectful_abstract_eval( operand, output, input_offsets, send_sizes, output_offsets, recv_sizes, axis_name, axis_index_groups ): del operand, axis_index_groups if not dtypes.issubdtype(input_offsets.dtype, np.integer): raise ValueError("ragged_all_to_all input_offsets must be integer type.") if not dtypes.issubdtype(send_sizes.dtype, np.integer): raise ValueError("ragged_all_to_all send_sizes must be integer type.") if not dtypes.issubdtype(output_offsets.dtype, np.integer): raise ValueError("ragged_all_to_all output_offsets must be integer type.") if not dtypes.issubdtype(recv_sizes.dtype, np.integer): raise ValueError("ragged_all_to_all recv_sizes must be integer type.") if len(input_offsets.shape) != 1 or input_offsets.shape[0] < 1: raise ValueError( "ragged_all_to_all input_offsets must be rank 1 with positive dimension" " size, but got shape {}".format(input_offsets.shape) ) if len(send_sizes.shape) != 1 or send_sizes.shape[0] < 1: raise ValueError( "ragged_all_to_all send_sizes must be rank 1 with positive dimension" " size, but got shape {}".format(send_sizes.shape) ) if len(output_offsets.shape) != 1 or output_offsets.shape[0] < 1: raise ValueError( "ragged_all_to_all output_offsets must be rank 1 with positive" " dimension size, but got shape {}".format(output_offsets.shape) ) if len(recv_sizes.shape) != 1 or recv_sizes.shape[0] < 1: raise ValueError( "ragged_all_to_all recv_sizes must be rank 1 with positive dimension" " size, but got shape {}".format(recv_sizes.shape) ) _check_axis_names(axis_name, 'ragged_all_to_all') out_aval = output.update(shape=output.shape, weak_type=False) effects = {*map(core.NamedAxisEffect, axis_name)} return out_aval, effects def _ragged_all_to_all_jvp(primals, tangents, **params): operand, output, *sizes_and_offsets = primals operand_dot, output_dot, *_ = tangents result = ragged_all_to_all_p.bind( operand, output, *sizes_and_offsets, **params) if type(operand_dot) is type(output_dot) is ad.Zero: result_dot = ad.Zero.from_primal_value(result) else: operand_dot = ad.instantiate_zeros(operand_dot) output_dot = ad.instantiate_zeros(output_dot) result_dot = ragged_all_to_all_p.bind( operand_dot, output_dot, *sizes_and_offsets, **params) return result, result_dot def _ragged_all_to_all_transpose( t, operand, output, input_offsets, send_sizes, output_offsets, recv_sizes, *, axis_name, axis_index_groups): if type(t) is ad.Zero: operand_t = ad.Zero(operand.aval) if ad.is_undefined_primal(operand) else None output_t = ad.Zero(output.aval) if ad.is_undefined_primal(output) else None else: zero = ad.zeros_like_aval(operand.aval) output_offsets_ = all_to_all(output_offsets, axis_name, 0, 0, tiled=True) input_offsets_ = all_to_all(input_offsets, axis_name, 0, 0, tiled=True) operand_t = ragged_all_to_all_p.bind( t, zero, output_offsets_, recv_sizes, input_offsets_, send_sizes, axis_name=axis_name, axis_index_groups=axis_index_groups) mask = control_flow.cumsum( lax.full(t.shape[0], 0, dtype='int32').at[output_offsets_].set(1) .at[output_offsets_ + recv_sizes].add(-1)) mask = lax.expand_dims(mask, (*range(1, t.ndim),)) mask = lax.broadcast_in_dim(mask, shape=t.shape, broadcast_dimensions=tuple(range(t.ndim))) output_t = lax.select(mask, lax._zeros(t), t) return [operand_t, output_t] + [None] * 4 def _ragged_all_to_all_batched_collective(axis_data, vals_in, dims_in, axis_name, axis_index_groups): if axis_data.name in axis_name: raise NotImplementedError("Please open a feature request!") if axis_index_groups: raise NotImplementedError("Please open a feature request!") size = axis_data.size def bdim_at_second(x, d): assert x.ndim == 2 return (batching.broadcast(x, size, 1, None) if d is None else x if d == 1 else x.T) def merge(x): return x.reshape(-1, *x.shape[2:]) def split(x): return x.reshape(size, -1, *x.shape[1:]) operand, output = map(partial(batching.bdim_at_front, size=size), vals_in[:2], dims_in[:2]) N, M = operand.shape[1], output.shape[1] input_offsets, send_sizes, output_offsets, recv_sizes = \ map(bdim_at_second, vals_in[2:], dims_in[2:]) input_offsets += lax.iota(input_offsets.dtype, size)[None, :] * N output_offsets += lax.iota(output_offsets.dtype, size)[None, :] * M vals_in = operand, output, input_offsets, send_sizes, output_offsets, recv_sizes result = split(ragged_all_to_all(*map(merge, vals_in), axis_name=axis_name)) return result, 0 def _ragged_all_to_all_impl(*args, **kwargs): raise RuntimeError("ragged_all_to_all must be used within a mapped context" " like vmap or shard_map.") ragged_all_to_all_p = core.Primitive('ragged_all_to_all') ragged_all_to_all_p.def_impl(_ragged_all_to_all_impl) ragged_all_to_all_p.def_effectful_abstract_eval(_ragged_all_to_all_effectful_abstract_eval) ad.primitive_jvps[ragged_all_to_all_p] = _ragged_all_to_all_jvp ad.primitive_transposes[ragged_all_to_all_p] = _ragged_all_to_all_transpose mlir.register_lowering(ragged_all_to_all_p, _ragged_all_to_all_lowering) batching.fancy_primitive_batchers[ragged_all_to_all_p] = _ragged_all_to_all_batched_collective batching.skippable_batchers[ragged_all_to_all_p] = partial(_names_in_param, 'axis_name') def insert_collective_pvary(axis_name, x): if not config._check_vma.value: return x axis_name = (axis_name,) if not isinstance(axis_name, tuple) else axis_name aval = core.get_aval(x) names_union = set(axis_name) | aval.vma x = pvary(x, tuple(n for n in names_union if n not in aval.vma)) return x def all_gather(x, axis_name, *, axis_index_groups=None, axis=0, tiled=False): """Gather values of x across all replicas. If ``x`` is a pytree then the result is equivalent to mapping this function to each leaf in the tree. This is equivalent to, but faster than, all_to_all(broadcast(x)). Args: x: array(s) with a mapped axis named ``axis_name``. axis_name: hashable Python object used to name a pmapped axis (see the :func:`jax.pmap` documentation for more details). axis_index_groups: optional list of lists containing axis indices (e.g. for an axis of size 4, [[0, 1], [2, 3]] would run all gather over the first two and last two replicas). Groups must cover all axis indices exactly once, and all groups must be the same size. axis: a positional axis into which the chunks along ``axis_name`` will be concatenated. tiled: when ``False``, the chunks will be stacked into a fresh positional axis at index ``axis`` in the output. When ``True``, ``axis`` has to refer to an existing positional dimension and the chunks will be concatenated into that dimension. Returns: Array(s) representing the result of an all-gather along the axis ``axis_name``. Shapes are the same as ``x.shape``, but: - when ``tiled`` is ``False``, there is a new dimension equal to the size of axis ``axis_name`` in position ``axis``, - when ``tiled`` is ``True``, the size of dimension in position ``axis`` is multiplied by the size of axis ``axis_name``. For example, with 4 XLA devices available: >>> x = np.arange(4) >>> y = jax.pmap(lambda x: jax.lax.all_gather(x, 'i'), axis_name='i')(x) >>> print(y) [[0 1 2 3] [0 1 2 3] [0 1 2 3] [0 1 2 3]] An example of using axis_index_groups, groups split by even & odd device ids: >>> x = np.arange(16).reshape(4, 4) >>> print(x) [[ 0 1 2 3] [ 4 5 6 7] [ 8 9 10 11] [12 13 14 15]] >>> def f(x): ... return jax.lax.all_gather( ... x, 'i', axis_index_groups=[[0, 2], [3, 1]]) >>> y = jax.pmap(f, axis_name='i')(x) >>> print(y) [[[ 0 1 2 3] [ 8 9 10 11]] [[12 13 14 15] [ 4 5 6 7]] [[ 0 1 2 3] [ 8 9 10 11]] [[12 13 14 15] [ 4 5 6 7]]] """ if not isinstance(axis_name, tuple): axis_name = (axis_name,) if not axis_name: return x axis_index_groups = _canonicalize_axis_index_groups(axis_index_groups) axis_size = _axis_size(axis_name, axis_index_groups) def bind(leaf): leaf = insert_collective_pvary(axis_name, leaf) return all_gather_p.bind( leaf, all_gather_dimension=canonicalize_axis( axis, np.ndim(leaf) if tiled else np.ndim(leaf) + 1), axis_name=axis_name, axis_index_groups=axis_index_groups, axis_size=axis_size, tiled=tiled) return tree_util.tree_map(bind, x) def _all_gather_impl(x, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled): raise AssertionError("Unexpected call to _all_gather_impl") def _all_gather_lowering(ctx, x, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled, platform=None): x_aval, = ctx.avals_in out_aval, = ctx.avals_out axis_context = ctx.module_context.axis_context is_spmd = isinstance(axis_context, (SPMDAxisContext, ShardingContext)) if not tiled: new_shape = list(x_aval.shape) new_shape.insert(all_gather_dimension, 1) broadcast_dimensions = [i for i in range(len(new_shape)) if i != all_gather_dimension] x = hlo.broadcast_in_dim( mlir.aval_to_ir_type(x_aval.update(shape=new_shape)), x, mlir.dense_int_array(broadcast_dimensions)) replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name, axis_index_groups) if is_spmd: # We want to emit the all-gather with global device IDs and a # channel ID, as otherwise it interprets the devices as replicas instead # of partitions - and XLA is configured with only a single replica. other_args = dict( channel_handle=hlo.ChannelHandle.get( _get_channel(ctx), mlir.DEVICE_TO_DEVICE_TYPE), use_global_device_ids=ir.BoolAttr.get(True)) else: other_args = {} return hlo.AllGatherOp( [mlir.aval_to_ir_type(out_aval)], [x], all_gather_dim=mlir.i64_attr(all_gather_dimension), replica_groups=_replica_groups_hlo(replica_groups), **other_args).results def collective_vma_rule(prim_name, axis_name, x_aval): if not config._check_vma.value: return frozenset() axis_name = (axis_name,) if not isinstance(axis_name, tuple) else axis_name if any(a not in x_aval.vma for a in axis_name): raise ValueError( f"Collective {prim_name} must be applied to a device-varying " f" type, but got {x_aval.vma} for collective acting " f"over axis name {axis_name}. Please open an issue at " "https://github.com/jax-ml/jax/issues and as a temporary " "workaround pass the check_vma=False argument to `jax.shard_map`") return x_aval.vma def _all_gather_effectful_abstract_eval( x_aval, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled ): if not isinstance(axis_name, (list, tuple)): axis_name = (axis_name,) _check_axis_names(axis_name, 'all_gather') check_unreduced_args([x_aval], 'all_gather') new_shape = list(x_aval.shape) if tiled: new_shape[all_gather_dimension] *= axis_size else: new_shape.insert(all_gather_dimension, axis_size) out_vma = collective_vma_rule('all_gather', axis_name, x_aval) return (x_aval.update(shape=new_shape, vma=out_vma), {*map(core.NamedAxisEffect, axis_name)}) def _all_gather_transpose_rule(cts, x, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled): return (psum_scatter(cts, axis_name=axis_name, scatter_dimension=all_gather_dimension, axis_index_groups=axis_index_groups, tiled=tiled),) def _all_gather_batcher(prim, vals_in, dims_in, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled): (x,), (d,) = vals_in, dims_in if d is not batching.not_mapped: if d <= all_gather_dimension: all_gather_dimension += 1 elif not tiled: # Tiled all-gather doesn't modify the set of dimensions d += 1 if prim is all_gather_p: result = all_gather_p.bind( x, all_gather_dimension=all_gather_dimension, axis_name=axis_name, axis_index_groups=axis_index_groups, axis_size=axis_size, tiled=tiled) return result, d else: assert prim is all_gather_invariant_p result = all_gather_invariant_p.bind( x, all_gather_dimension=all_gather_dimension, axis_name=axis_name, axis_size=axis_size, tiled=tiled) return result, d def _all_gather_batched_collective(prim, axis_data, vals_in, dims_in, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled): frame_size, frame_name = axis_data.size, axis_data.name if frame_name not in axis_name: return _all_gather_batcher( prim, vals_in, dims_in, all_gather_dimension=all_gather_dimension, axis_name=axis_name, axis_index_groups=axis_index_groups, axis_size=axis_size, tiled=tiled) if axis_index_groups is not None: raise NotImplementedError("axis_index_groups not supported in vmap") assert axis_size == frame_size, "axis size doesn't match" if not isinstance(axis_name, tuple): axis_name = (axis_name,) if len(axis_name) > 1: raise NotImplementedError("Please open a feature request!") assert axis_name == (frame_name,), "batcher called with wrong axis name" (x,), (d,) = vals_in, dims_in if d is batching.not_mapped: out_shape = list(np.shape(x)) out_shape.insert(all_gather_dimension, axis_size) broadcast_dims = [i for i in range(len(out_shape)) if i != all_gather_dimension] y = lax.broadcast_in_dim(x, out_shape, broadcast_dims) else: y = _moveaxis(d, all_gather_dimension, x) if tiled: y = _foldaxis(all_gather_dimension, y) return y, batching.not_mapped all_gather_p = core.Primitive('all_gather') all_gather_p.def_effectful_abstract_eval(_all_gather_effectful_abstract_eval) all_gather_p.def_impl(_all_gather_impl) mlir.register_lowering(all_gather_p, _all_gather_lowering) for p in ("cuda", "rocm", "tpu"): mlir.register_lowering(all_gather_p, partial(_all_gather_lowering, platform=p), platform=p) ad.deflinear2(all_gather_p, _all_gather_transpose_rule) batching.fancy_primitive_batchers[all_gather_p] = partial( _all_gather_batched_collective, all_gather_p) batching.skippable_batchers[all_gather_p] = partial(_names_in_param, 'axis_name') def all_gather_invariant(x, axis_name, *, axis: int = 0, tiled: bool = False): """Gather values of x across all replicas. If ``x`` is a pytree then the result is equivalent to mapping this function to each leaf in the tree. all_gather_invariant differs from all_gather in the following ways: * all_gather_invariant is Varying -> Invariant. For example: `out: f32[8] = all_gather_invariant(inp: f32[4]{V: x}, 'x')` where the size of mesh axis `x` is 2. While all_gather is Varying -> Varying. * all_gather_invariant transposes to dynamic_slice which is Invariant -> Varying. While all_gather transposes to reduce_scatter which is Varying -> Varying. """ if not isinstance(axis_name, tuple): axis_name = (axis_name,) if not axis_name: return x axis_size = _axis_size(axis_name, None) axes_ = frozenset(axis_name) def bind(leaf): in_vma = core.typeof(leaf).vma if vary_names := axes_ - in_vma: leaf = pvary(leaf, tuple(vary_names)) return all_gather_invariant_p.bind( leaf, all_gather_dimension=canonicalize_axis(axis, np.ndim(leaf) if tiled else np.ndim(leaf) + 1), axis_name=axis_name, axis_size=axis_size, tiled=tiled) return tree_util.tree_map(bind, x) all_gather_invariant_p = core.Primitive('all_gather_invariant') def _all_gather_invariant_effectful_abstract_eval( x_aval, *, all_gather_dimension, axis_name, axis_size, tiled ): _check_axis_names(axis_name, 'all_gather_invariant') check_unreduced_args([x_aval], 'all_gather_invariant') new_shape = list(x_aval.shape) if tiled: new_shape[all_gather_dimension] *= axis_size else: new_shape.insert(all_gather_dimension, axis_size) out_vma = frozenset(v for v in x_aval.vma if v not in axis_name) return (x_aval.update(shape=new_shape, vma=out_vma), {*map(core.NamedAxisEffect, axis_name)}) all_gather_invariant_p.def_effectful_abstract_eval( _all_gather_invariant_effectful_abstract_eval) def _all_gather_invariant_impl(x, *, all_gather_dimension, axis_name, axis_size, tiled): raise NotImplementedError all_gather_invariant_p.def_impl(_all_gather_invariant_impl) def _all_gather_invariant_lowering( ctx, x, *, all_gather_dimension, axis_name, axis_size, tiled, platform=None): return _all_gather_lowering( ctx, x, all_gather_dimension=all_gather_dimension, axis_name=axis_name, axis_index_groups=None, axis_size=axis_size, tiled=tiled, platform=platform) mlir.register_lowering(all_gather_invariant_p, _all_gather_invariant_lowering) for p in ("cuda", "rocm", "tpu"): mlir.register_lowering(all_gather_invariant_p, partial(_all_gather_invariant_lowering, platform=p), platform=p) def _all_gather_invariant_transpose_rule( cts, x, *, all_gather_dimension, axis_name, axis_size, tiled): slice_size, rem = divmod(cts.shape[all_gather_dimension], axis_size) assert not rem idx = axis_index(axis_name) * slice_size out = slicing.dynamic_slice_in_dim( cts, idx, slice_size=slice_size, axis=all_gather_dimension) return (out,) if tiled else (lax.squeeze(out, [all_gather_dimension]),) ad.deflinear2(all_gather_invariant_p, _all_gather_invariant_transpose_rule) def _all_gather_invariant_batched_collective( axis_data, vals_in, dims_in, all_gather_dimension, axis_name, axis_size, tiled): return _all_gather_batched_collective( all_gather_invariant_p, axis_data, vals_in, dims_in, all_gather_dimension, axis_name, None, axis_size, tiled) batching.fancy_primitive_batchers[all_gather_invariant_p] = _all_gather_invariant_batched_collective batching.skippable_batchers[all_gather_invariant_p] = partial(_names_in_param, 'axis_name') def _reduce_scatter_lowering( prim, ctx, x, *, scatter_dimension, axis_name, axis_index_groups, axis_size, tiled): x_aval, = ctx.avals_in aval_out, = ctx.avals_out scalar_aval = x_aval.update(shape=()) replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name, axis_index_groups) scatter_out_shape = list(x_aval.shape) scatter_out_shape[scatter_dimension] //= axis_size axis_context = ctx.module_context.axis_context is_spmd = isinstance( axis_context, (SPMDAxisContext, ShardingContext), ) if is_spmd: # We want to emit the all-gather with global device IDs and a # channel ID, as otherwise it interprets the devices as replicas instead # of partitions - and XLA is configured with only a single replica. other_args = dict( channel_handle=hlo.ChannelHandle.get( _get_channel(ctx), mlir.DEVICE_TO_DEVICE_TYPE), use_global_device_ids=ir.BoolAttr.get(True)) else: other_args = {} op = hlo.ReduceScatterOp( mlir.aval_to_ir_type(x_aval.update(shape=scatter_out_shape)), x, scatter_dimension=mlir.i64_attr(scatter_dimension), replica_groups=_replica_groups_hlo(replica_groups), **other_args) scalar_type = mlir.aval_to_ir_type(scalar_aval) reducer_block = op.regions[0].blocks.append(scalar_type, scalar_type) with ir.InsertionPoint(reducer_block): lower_reducer = mlir.lower_fun(prim.bind, multiple_results=False) reducer_ctx = ctx.replace(primitive=None, avals_in=[scalar_aval] * 2, avals_out=[scalar_aval]) out_nodes = lower_reducer(reducer_ctx, *reducer_block.arguments) hlo.return_(mlir.flatten_ir_values(out_nodes)) if tiled: return op.results else: return [hlo.reshape(mlir.aval_to_ir_type(aval_out), op.result)] def _reduce_scatter_effectful_abstract_eval( x_aval, *, axis_name, scatter_dimension, axis_index_groups, axis_size, tiled ): if not isinstance(axis_name, (list, tuple)): axis_name = (axis_name,) _check_axis_names(axis_name, 'reduce_scatter') check_unreduced_args([x_aval], 'reduce_scatter') new_shape = list(x_aval.shape) scatter_dim_input_size = x_aval.shape[scatter_dimension] if tiled: if scatter_dim_input_size % axis_size != 0: raise ValueError(f"tiled reduce_scatter operand scatter dimension size " f"{scatter_dim_input_size} must be divisible by " f"shard_count {axis_size}") new_shape[scatter_dimension] = scatter_dim_input_size // axis_size else: if scatter_dim_input_size != axis_size: raise ValueError(f"reduce_scatter operand scatter dimension size " f"{scatter_dim_input_size} must match shard count " f"{axis_size}") del new_shape[scatter_dimension] vma = collective_vma_rule('reduce_scatter', axis_name, x_aval) return (x_aval.update(shape=new_shape, vma=vma), {*map(core.NamedAxisEffect, axis_name)}) def _reduce_scatter_transpose_rule(cts, x, *, axis_name, scatter_dimension, axis_index_groups, axis_size, tiled): return (all_gather(cts, axis_name=axis_name, axis_index_groups=axis_index_groups, axis=scatter_dimension, tiled=tiled),) def _reduce_scatter_batcher(vals_in, dims_in, *, scatter_dimension, axis_name, axis_index_groups, axis_size, tiled): (x,), (d,) = vals_in, dims_in if d <= scatter_dimension: scatter_dimension += 1 elif not tiled: # Tiled all-scatter doesn't change the rank d += 1 result = reduce_scatter_p.bind( x, scatter_dimension=scatter_dimension, axis_name=axis_name, axis_index_groups=axis_index_groups, axis_size=axis_size, tiled=tiled) return result, d def _reduce_scatter_collective(axis_data, vals_in, dims_in, scatter_dimension, axis_name, axis_index_groups, axis_size, tiled): frame_size, frame_name = axis_data.size, axis_data.name if frame_name not in axis_name: return _reduce_scatter_batcher( vals_in, dims_in, scatter_dimension=scatter_dimension, axis_name=axis_name, axis_index_groups=axis_index_groups, axis_size=axis_size, tiled=tiled) if axis_index_groups is not None: raise NotImplementedError("axis_index_groups not supported in vmap") assert axis_size == frame_size, "axis size doesn't match" if not isinstance(axis_name, tuple): axis_name = (axis_name,) if len(axis_name) > 1: raise NotImplementedError("Please open a feature request!") assert axis_name == (frame_name,), "batcher called with wrong axis name" (x,), (d,) = vals_in, dims_in if d is batching.not_mapped: y, dy = x * axis_size, scatter_dimension else: y, dy = lax.reduce(x, 0., lax.add, (d,)), scatter_dimension if tiled: y = _splitaxis(dy, axis_size, y) return y, dy reduce_scatter_p = core.Primitive("reduce_scatter") reduce_scatter_p.def_effectful_abstract_eval( _reduce_scatter_effectful_abstract_eval ) ad.deflinear2(reduce_scatter_p, _reduce_scatter_transpose_rule) batching.fancy_primitive_batchers[reduce_scatter_p] = _reduce_scatter_collective batching.skippable_batchers[reduce_scatter_p] = partial(_names_in_param, 'axis_name') mlir.register_lowering(reduce_scatter_p, partial(_reduce_scatter_lowering, lax.add_p)) def psum_scatter(x, axis_name, *, scatter_dimension=0, axis_index_groups=None, tiled=False): """ Like ``psum(x, axis_name)`` but each device retains only part of the result. For example, ``psum_scatter(x, axis_name, scatter_dimension=0, tiled=False)`` computes the same value as ``psum(x, axis_name)[axis_index(axis_name)]``, but it is more efficient. Thus the ``psum`` result is left scattered along the mapped axis. One efficient algorithm for computing ``psum(x, axis_name)`` is to perform a ``psum_scatter`` followed by an ``all_gather``, essentially evaluating ``all_gather(psum_scatter(x, axis_name))``. So we can think of ``psum_scatter`` as "the first half" of a ``psum``. Args: x: array(s) with a mapped axis named ``axis_name``. axis_name: hashable Python object used to name a mapped axis (see the :func:`jax.pmap` documentation for more details). scatter_dimension: a positional axis into which the all-reduce result along ``axis_name`` will be scattered. axis_index_groups: optional list of lists of integers containing axis indices. For example, for an axis of size 4, ``axis_index_groups=[[0, 1], [2, 3]]`` would run reduce-scatter over the first two and the last two axis indices. Groups must cover all axis indices exactly once, and all groups must be the same size. tiled: boolean representing whether to use rank-preserving 'tiled' behavior. When ``False`` (the default value), the size of dimension in ``scatter_dimension`` must match the size of axis ``axis_name`` (or the group size if ``axis_index_groups`` is given). After scattering the all-reduce result along ``scatter_dimension``, the output is squeezed by removing ``scatter_dimension``, so the result has lower rank than the input. When ``True``, the size of dimension in ``scatter_dimension`` must be divisible by the size of axis ``axis_name`` (or the group size if ``axis_index_groups`` is given), and the ``scatter_dimension`` axis is preserved (so the result has the same rank as the input). Returns: Array(s) with the similar shape as ``x``, except the size of dimension in position ``scatter_dimension`` is divided by the size of axis ``axis_name`` (when ``tiled=True``), or the dimension in position ``scatter_dimension`` is eliminated (when ``tiled=False``). For example, with 4 XLA devices available: >>> x = np.arange(16).reshape(4, 4) >>> print(x) [[ 0 1 2 3] [ 4 5 6 7] [ 8 9 10 11] [12 13 14 15]] >>> y = jax.pmap(lambda x: jax.lax.psum_scatter(x, 'i'), axis_name='i')(x) >>> print(y) [24 28 32 36] if using tiled: >>> y = jax.pmap(lambda x: jax.lax.psum_scatter(x, 'i', tiled=True), axis_name='i')(x) >>> print(y) [[24] [28] [32] [36]] An example of using axis_index_groups: >>> def f(x): ... return jax.lax.psum_scatter( ... x, 'i', axis_index_groups=[[0, 2], [3, 1]], tiled=True) >>> y = jax.pmap(f, axis_name='i')(x) >>> print(y) [[ 8 10] [20 22] [12 14] [16 18]] """ if not isinstance(axis_name, tuple): axis_name = (axis_name,) if not axis_name: return x axis_size = _axis_size(axis_name, axis_index_groups) axis_index_groups = _canonicalize_axis_index_groups(axis_index_groups) def bind(leaf): leaf = insert_collective_pvary(axis_name, leaf) return reduce_scatter_p.bind( leaf, axis_name=axis_name, scatter_dimension=scatter_dimension, axis_index_groups=axis_index_groups, axis_size=axis_size, tiled=tiled) return tree_util.tree_map(bind, x) def _build_axis_index_lowering_hlo(ctx, axis_name, axis_env): from jax._src.shard_map import shard_map # pytype: disable=import-error if isinstance(axis_name, tuple): assert axis_name, 'empty axis name' if len(axis_name) > 1: raise NotImplementedError( '`axis_index` translation rule does not support multiple axis names.') axis_name, = axis_name if axis_name not in axis_env.names: raise NameError(f"unbound axis name: {axis_name}") axis_context = ctx.module_context.axis_context axis_pos = list(axis_env.names).index(axis_name) # For partial auto, enter into a fully manual shard_map. if (isinstance(axis_context, SPMDAxisContext) and axis_context.manual_axes and axis_context.manual_axes != frozenset(axis_context.mesh.axis_names)): if axis_env.sizes[axis_pos] == 1: return hlo.constant(ir.DenseElementsAttr.get(np.asarray(0, dtype=np.int32))) def f(): return axis_index_p.bind(axis_name=axis_name) return mlir.lower_fun( lambda: [shard_map(f, check_vma=False, in_specs=(), out_specs=P())()])(ctx)[0] nreplicas = axis_env.nreps // math.prod(axis_env.sizes) div = mlir.ir_constant( np.array( nreplicas * math.prod(axis_env.sizes[axis_pos + 1 :]), dtype=np.uint32 ) ) mod = mlir.ir_constant(np.array(axis_env.sizes[axis_pos], dtype=np.uint32)) if isinstance(axis_context, (ShardingContext, SPMDAxisContext)): device_id = hlo.partition_id() else: device_id = hlo.replica_id() unsigned_index = hlo.remainder(hlo.divide(device_id, div), mod) return hlo.convert( ir.RankedTensorType.get([], ir.IntegerType.get_signless(32)), unsigned_index) def _axis_index_lowering(ctx, *, axis_name): return [_build_axis_index_lowering_hlo(ctx, axis_name, ctx.module_context.axis_env)] def _axis_index_effectful_abstract_eval(*, axis_name): effect = {core.NamedAxisEffect(axis_name)} axis_name = (axis_name,) if not isinstance(axis_name, tuple) else axis_name _check_axis_names(axis_name, 'axis_index') mesh = get_abstract_mesh() sharding = NamedSharding(mesh, P()) vma = ((frozenset(axis_name) if mesh._any_axis_manual else frozenset()) if config._check_vma.value else frozenset()) return ShapedArray((), np.int32, sharding=sharding, vma=vma), effect def _axis_index_batcher(axis_data, vals_in, dims_in, *, axis_name): return lax.iota(np.int32, axis_data.size), 0 axis_index_p = core.Primitive('axis_index') axis_index_p.def_impl(partial(dispatch.apply_primitive, axis_index_p)) mlir.register_lowering(axis_index_p, _axis_index_lowering) axis_index_p.def_effectful_abstract_eval(_axis_index_effectful_abstract_eval) batching.fancy_primitive_batchers[axis_index_p] = _axis_index_batcher batching.skippable_batchers[axis_index_p] = partial(_names_in_param, 'axis_name') def _pgather_impl(src, idx, *, axes): assert all(isinstance(axis, int) for axis in axes) src_axes_front = moveaxis(src, axes, range(len(axes))) non_axes_shape = src_axes_front.shape[len(axes):] src_one_axis_front = src_axes_front.reshape((-1,) + non_axes_shape) slice_sizes = (1,) + non_axes_shape idx = lax.expand_dims(idx, (-1,)) offset_dims = tuple(range(idx.ndim - 1, idx.ndim + src_one_axis_front.ndim - 2)) dnums = slicing.GatherDimensionNumbers( offset_dims=offset_dims, collapsed_slice_dims=(0,), start_index_map=(0,), ) return slicing.gather(src_one_axis_front, idx, dimension_numbers=dnums, slice_sizes=tuple(slice_sizes)) def _pgather_abstract_eval(src, idx, *, axes): # TODO: Avals with names rule: remove all axes from src, insert those from idx # The order is important, because it is ok to re-insert one of the deleted axes! _check_axis_names(axes, 'pgather') shape = list(src.shape) for axis in sorted((a for a in axes if isinstance(a, int)), reverse=True): del shape[axis] shape = idx.shape + tuple(shape) return ShapedArray(shape, src.dtype) def _pgather_parallel_lowering(ctx, src, idx, *, axes): if any(not isinstance(axis, int) for axis in axes): raise NotImplementedError("pgather only supported in the SPMD lowering." "Please open a feature request!") return mlir.lower_fun(_pgather_impl, multiple_results=False)( ctx, src, idx, axes=axes) def _pgather_collective_batcher(axis_size, frame_name, _, vals_in, dims_in, *, axes): src, idx = vals_in dsrc, didx = dims_in if dsrc is batching.not_mapped: raise ValueError("pgather axis {frame.name} is missing from the indexed value") if didx is not batching.not_mapped: # NOTE: This is allowed and the output would be mapped along this axis! raise NotImplementedError("Please open a feature request!") # Now source is mapped, idx is not new_axes = tuple(dsrc if axis == frame_name else axis + (dsrc <= axis) if isinstance(axis, int) else axis for axis in axes) # The result is not mapped, because we eliminate all axes, and those include # the batched axis. if all(isinstance(axis, int) for axis in axes): # We rewrite a purely positional pgather as a gather, because that one # is more fully featured (e.g. supports AD). return _pgather_impl(src, idx, axes=new_axes), batching.not_mapped else: return pgather_p.bind(src, idx, axes=new_axes), batching.not_mapped pgather_p = core.Primitive('pgather') pgather_p.def_impl(_pgather_impl) pgather_p.def_abstract_eval(_pgather_abstract_eval) mlir.register_lowering(pgather_p, _pgather_parallel_lowering) # TODO: Transpose? That requires adding pscatter... batching.fancy_primitive_batchers[pgather_p] = _pgather_collective_batcher batching.skippable_batchers[pgather_p] = partial(_names_in_param, 'axes') psum_invariant_p = core.Primitive('psum_invariant') psum_invariant_p.multiple_results = True psum_invariant_p.def_impl(psum_p.impl) psum_invariant_p.def_effectful_abstract_eval( partial(_psum_invariant_abstract_eval, psum_invariant_p.name)) def _psum_invariant_lowering_rule(prim, pos_fn, ctx, *args, axes): return _allreduce_lowering(prim, pos_fn, ctx, *args, axes=axes, axis_index_groups=None) mlir.register_lowering( psum_invariant_p, partial(_psum_invariant_lowering_rule, lax.add_p, lax.reduce_sum)) def _psum_invariant_batching_rule( prim, if_unmapped, axis_data, vals_in, dims_in, axes): return _batched_reduction_collective( prim, if_unmapped, axis_data, vals_in, dims_in, axes, None) batching.fancy_primitive_batchers[psum_invariant_p] = partial( _psum_invariant_batching_rule, psum_invariant_p, lambda v, axis_size: axis_size * v) batching.skippable_batchers[psum_invariant_p] = partial(_names_in_param, 'axes') def _psum_invariant_transpose_rule(cts, *args, axes): def f(ct, arg): assert ad.is_undefined_primal(arg) return ad.Zero(arg.aval) if type(ct) is ad.Zero else ct cts = map(f, cts, args) nonzero_out_cts, treedef = tree_util.tree_flatten(cts) nonzero_in_cts = core.pvary_p.bind(*nonzero_out_cts, axes=axes) return tree_util.tree_unflatten(treedef, nonzero_in_cts) ad.deflinear2(psum_invariant_p, _psum_invariant_transpose_rule) ########################### pvary ################################## core.pvary_p.def_impl(lambda *args, axes: args) mlir.register_lowering(core.pvary_p, lambda ctx, *x, axes: x) def _pvary_abstract_eval(*args, axes): if not config._check_vma.value: return args _check_axis_names(axes, 'pvary') check_unreduced_args(args, 'pvary') assert isinstance(axes, tuple) arg_vma = [a.vma for a in args] for a in arg_vma: # If there is intersection between arg_vma and axes, error if set(axes) & a: raise ValueError( "pvary is a invariant->variant collective. This means that the axis" " names mentioned in `axes` passed to `pvary` must not be present in" f" `jax.typeof(inp).vma`. Got axes={axes} and" f" jax.typeof(inp).vma={a}") return [a.update(sharding=a.sharding.update(mesh=get_abstract_mesh()), vma=a.vma.union(frozenset(axes))) for a in args] core.pvary_p.def_abstract_eval(_pvary_abstract_eval) def _pvary_transpose_rule(cts, *args, axes): def f(ct, arg): assert ad.is_undefined_primal(arg) return ad.Zero(arg.aval) if type(ct) is ad.Zero else ct cts = map(f, cts, args) nonzero_out_cts, treedef = tree_util.tree_flatten(cts) nonzero_in_cts = psum_invariant_p.bind(*nonzero_out_cts, axes=axes) return tree_util.tree_unflatten(treedef, nonzero_in_cts) ad.deflinear2(core.pvary_p, _pvary_transpose_rule) def _pvary_batcher(vals_in, dims_in, *, axes): if any(type(axis) is int for axis in axes): raise NotImplementedError vals_out = core.pvary_p.bind(*vals_in, axes=axes) return vals_out, dims_in batching.primitive_batchers[core.pvary_p] = _pvary_batcher ####################### all_gather_reduced ########################### # Varying -> Reduced collective def all_gather_reduced(x, axis_name, *, axis: int = 0, tiled: bool = False): if not isinstance(axis_name, tuple): axis_name = (axis_name,) if not axis_name: return x axis_size = _axis_size(axis_name, None) def bind(leaf): return all_gather_reduced_p.bind( leaf, all_gather_dimension=canonicalize_axis( axis, np.ndim(leaf) if tiled else np.ndim(leaf) + 1), axis_name=axis_name, axis_size=axis_size, tiled=tiled) return tree_util.tree_map(bind, x) all_gather_reduced_p = core.Primitive('all_gather_reduced') def _all_gather_reduced_effectful_abstract_eval( x_aval, *, all_gather_dimension, axis_name, axis_size, tiled ): _check_axis_names(axis_name, 'all_gather_reduced') if not x_aval.vma: raise ValueError('all_gather_reduced only accepts inputs that are' f' varying. Got {x_aval.str_short(True)}') # If the intersection between x.vma and axis_name is empty, error if not (x_aval.vma & set(axis_name)): raise ValueError( 'all_gather_reduced is a Varying -> Reduced collective. This means ' f'that the {axis_name=} passed to `all_gather_reduced` must be present ' f'in jax.typeof(x).vma={x_aval.vma}') if x_aval.sharding.spec.reduced & set(axis_name): raise ValueError( "all_gather_reduced's input cannot be reduced across the axis_name" f" provided. Got x={x_aval.str_short(True)} and {axis_name=}") new_shape = list(x_aval.shape) if tiled: new_shape[all_gather_dimension] *= axis_size else: new_shape.insert(all_gather_dimension, axis_size) x_aval_s = x_aval.sharding new_reduced = x_aval_s.spec.reduced | frozenset(axis_name) out_sharding = x_aval_s.update(spec=x_aval_s.spec.update(reduced=new_reduced)) out_vma = frozenset(v for v in x_aval.vma if v not in axis_name) return (x_aval.update(shape=new_shape, vma=out_vma, sharding=out_sharding), {*map(core.NamedAxisEffect, axis_name)}) all_gather_reduced_p.def_effectful_abstract_eval( _all_gather_reduced_effectful_abstract_eval) def _all_gather_reduced_impl(x, *, all_gather_dimension, axis_name, axis_size, tiled): raise NotImplementedError all_gather_reduced_p.def_impl(_all_gather_reduced_impl) def _all_gather_reduced_lowering( ctx, x, *, all_gather_dimension, axis_name, axis_size, tiled, platform=None): return _all_gather_lowering( ctx, x, all_gather_dimension=all_gather_dimension, axis_name=axis_name, axis_index_groups=None, axis_size=axis_size, tiled=tiled, platform=platform) mlir.register_lowering(all_gather_reduced_p, _all_gather_reduced_lowering) for p in ("cuda", "rocm", "tpu"): mlir.register_lowering(all_gather_reduced_p, partial(_all_gather_reduced_lowering, platform=p), platform=p) def _all_gather_reduced_transpose_rule( cts, x, *, all_gather_dimension, axis_name, axis_size, tiled): return (unreduced_psum_scatter(cts, axis_name=axis_name, scatter_dimension=all_gather_dimension, tiled=tiled),) ad.deflinear2(all_gather_reduced_p, _all_gather_reduced_transpose_rule) def _all_gather_reduced_batched_collective( axis_data, vals_in, dims_in, all_gather_dimension, axis_name, axis_size, tiled): raise NotImplementedError( "Please file an issue at https://github.com/jax-ml/jax/issues") batching.fancy_primitive_batchers[all_gather_reduced_p] = _all_gather_reduced_batched_collective batching.skippable_batchers[all_gather_reduced_p] = partial(_names_in_param, 'axis_name') ####################### unreduced_psum_scatter ########################### # Unreduced -> Varying collective def unreduced_psum_scatter(x, axis_name, *, scatter_dimension=0, tiled=False): if not isinstance(axis_name, tuple): axis_name = (axis_name,) if not axis_name: return x axis_size = _axis_size(axis_name, None) def bind(leaf): return unreduced_reduce_scatter_p.bind( leaf, axis_name=axis_name, scatter_dimension=scatter_dimension, axis_size=axis_size, tiled=tiled) return tree_util.tree_map(bind, x) unreduced_reduce_scatter_p = core.Primitive('unreduced_reduce_scatter') def _unreduced_reduce_scatter_effectful_abstract_eval( x_aval, *, axis_name, scatter_dimension, axis_size, tiled ): _check_axis_names(axis_name, 'reduce_scatter') if not x_aval.sharding.spec.unreduced: raise ValueError('unreduced_psum_scatter only accepts inputs that are' f' unreduced. Got {x_aval.str_short(True)}') # If intersection between x.unreduced & axis_name is empty, error if not (x_aval.sharding.spec.unreduced & frozenset(axis_name)): raise ValueError( "unreduced_psum_scatter is a Unreduced -> Varying collective. This" f" means that the {axis_name=} passed to `unreduced_psum_scatter` must" " be present in" f" jax.typeof(x).sharding.spec.unreduced={x_aval.sharding.spec.unreduced}" ) if x_aval.vma & set(axis_name): raise ValueError( "unreduced_psum_scatter's input cannot be varying across the axis_name" f" provided. Got x={x_aval.str_short(True)} and {axis_name=}") new_shape = list(x_aval.shape) scatter_dim_input_size = x_aval.shape[scatter_dimension] if tiled: if scatter_dim_input_size % axis_size != 0: raise ValueError(f"tiled reduce_scatter operand scatter dimension size " f"{scatter_dim_input_size} must be divisible by " f"shard_count {axis_size}") new_shape[scatter_dimension] = scatter_dim_input_size // axis_size else: if scatter_dim_input_size != axis_size: raise ValueError(f"reduce_scatter operand scatter dimension size " f"{scatter_dim_input_size} must match shard count " f"{axis_size}") del new_shape[scatter_dimension] x_aval_s = x_aval.sharding out_sharding = x_aval_s.update(spec=x_aval_s.spec.update( unreduced=frozenset(i for i in x_aval_s.spec.unreduced if i not in axis_name))) out_vma = x_aval.vma | set(axis_name) return (x_aval.update(shape=new_shape, vma=out_vma, sharding=out_sharding), {*map(core.NamedAxisEffect, axis_name)}) unreduced_reduce_scatter_p.def_effectful_abstract_eval( _unreduced_reduce_scatter_effectful_abstract_eval) def _unreduced_reduce_scatter_impl( x, *, axis_name, scatter_dimension, axis_size, tiled): raise NotImplementedError unreduced_reduce_scatter_p.def_impl(_unreduced_reduce_scatter_impl) def _unreduced_reduce_scatter_transpose_rule( cts, x, *, axis_name, scatter_dimension, axis_size, tiled): return (all_gather_reduced(cts, axis_name=axis_name, axis=scatter_dimension, tiled=tiled),) ad.deflinear2(unreduced_reduce_scatter_p, _unreduced_reduce_scatter_transpose_rule) def _unreduced_reduce_scatter_batcher( axis_data, vals_in, dims_in, axis_name, scatter_dimension, axis_size, tiled): raise NotImplementedError( "Please file an issue at https://github.com/jax-ml/jax/issues") batching.fancy_primitive_batchers[unreduced_reduce_scatter_p] = _unreduced_reduce_scatter_batcher batching.skippable_batchers[unreduced_reduce_scatter_p] = partial(_names_in_param, 'axis_name') def _unreduced_reduce_scatter_lowering( prim, ctx, x, *, axis_name, scatter_dimension, axis_size, tiled): return _reduce_scatter_lowering( prim, ctx, x, axis_name=axis_name, scatter_dimension=scatter_dimension, axis_size=axis_size, tiled=tiled, axis_index_groups=None) mlir.register_lowering(unreduced_reduce_scatter_p, partial(_unreduced_reduce_scatter_lowering, lax.add_p)) ############################## unreduced_psum ########################### # Unreduced -> Invariant collective def unreduced_psum(x, axis_name): if not isinstance(axis_name, (tuple, list)): axis_name = (axis_name,) if not axis_name: return x leaves, treedef = tree_util.tree_flatten(x) out_flat = unreduced_psum_p.bind(*leaves, axes=tuple(axis_name)) return tree_util.tree_unflatten(treedef, out_flat) unreduced_psum_p = core.Primitive('unreduced_psum') unreduced_psum_p.multiple_results = True def _unreduced_psum_abstract_eval(*avals, axes): _check_axis_names(axes, 'psum') for a in avals: if not a.sharding.spec.unreduced: raise ValueError('unreduced_psum only accepts inputs that are' f' unreduced. Got {a.str_short(True)}') # If intersection between x.unreduced & axis_name is empty, error if not (a.sharding.spec.unreduced & frozenset(axes)): raise ValueError( "unreduced_psum is a Unreduced -> Invariant collective. This" f" means that the {axes=} passed to `unreduced_psum` must" " be present in" f" jax.typeof(x).sharding.spec.unreduced={a.sharding.spec.unreduced}") if a.vma & set(axes): raise ValueError( "unreduced_psum's input cannot be varying across the " f" axis_name provided. Got x={a.str_short(True)} and {axes=}") if any(isinstance(a, int) for a in axes): raise ValueError('unreduced_psum does not accept integer axis_name.' f' Got axis_name={axes}') core.check_avals_context_mesh(avals, 'unreduced_psum') out_avals = [] for a in avals: a_s = a.sharding out_sharding = a_s.update(spec=a_s.spec.update( unreduced=frozenset(u for u in a_s.spec.unreduced if u not in axes))) out_avals.append(a.update(sharding=out_sharding)) return out_avals, {core.NamedAxisEffect(axis) for axis in axes} unreduced_psum_p.def_effectful_abstract_eval(_unreduced_psum_abstract_eval) def _unreduced_psum_lowering(ctx, *args, axes): return _allreduce_lowering(lax.add_p, lax.reduce_sum, ctx, *args, axes=axes, axis_index_groups=None) mlir.register_lowering(unreduced_psum_p, _unreduced_psum_lowering) def _unreduced_psum_batcher(axis_data, vals_in, dims_in, axes): raise NotImplementedError batching.fancy_primitive_batchers[unreduced_psum_p] = _unreduced_psum_batcher batching.skippable_batchers[unreduced_psum_p] = partial(_names_in_param, 'axes') def _unreduced_psum_transpose_rule(cts, *args, axes): def f(ct, arg): assert ad.is_undefined_primal(arg) return ad.Zero(arg.aval) if type(ct) is ad.Zero else ct cts = map(f, cts, args) nonzero_out_cts, treedef = tree_util.tree_flatten(cts) nonzero_in_cts = preduced_p.bind(*nonzero_out_cts, axes=axes) return tree_util.tree_unflatten(treedef, nonzero_in_cts) ad.deflinear2(unreduced_psum_p, _unreduced_psum_transpose_rule) ############################## preduced ################################# # Invariant -> Reduced no-op cast. It's the transpose of unreduced_psum. def preduced(x, axis_name): axes = (axis_name,) if not isinstance(axis_name, tuple) else axis_name if not axes: return x x_flat, treedef = tree_util.tree_flatten(x) cur_mesh = get_abstract_mesh() new_axes = axes if cur_mesh.empty else core.order_wrt_mesh(cur_mesh, axes) assert set(new_axes) == set(axes) del axes out_flat = preduced_p.bind(*x_flat, axes=new_axes) return tree_util.tree_unflatten(treedef, out_flat) preduced_p = core.Primitive('preduced') preduced_p.multiple_results = True preduced_p.def_impl(lambda *args, axes: args) mlir.register_lowering(preduced_p, lambda ctx, *x, axes: x) def _preduced_abstract_eval(*avals, axes): assert isinstance(axes, tuple) _check_axis_names(axes, 'preduced') for a in avals: # If there is intersection between arg_vma and axes, error if a.vma & set(axes): raise ValueError( "preduced is a Invariant->Reduced collective. This means that the" " axis names mentioned in `axes` passed to `preduced` must not be" f" present in `jax.typeof(inp).vma`. Got axes={axes} and" f" jax.typeof(inp).vma={a.vma}") if a.sharding.spec.reduced & set(axes): raise ValueError( "preduced input cannot be reduced across the axis_name" f" provided. Got x={a.str_short(True)} and axis_name={axes}") out_avals = [] for a in avals: a_s = a.sharding new_reduced = a_s.spec.reduced | frozenset(axes) out_sharding = a_s.update(mesh=get_abstract_mesh(), spec=a_s.spec.update(reduced=new_reduced)) out_avals.append(a.update(sharding=out_sharding)) return out_avals preduced_p.def_abstract_eval(_preduced_abstract_eval) def _preduced_transpose_rule(cts, *args, axes): def f(ct, arg): assert ad.is_undefined_primal(arg) return ad.Zero(arg.aval) if type(ct) is ad.Zero else ct cts = map(f, cts, args) nonzero_out_cts, treedef = tree_util.tree_flatten(cts) nonzero_in_cts = unreduced_psum_p.bind(*nonzero_out_cts, axes=axes) return tree_util.tree_unflatten(treedef, nonzero_in_cts) ad.deflinear2(preduced_p, _preduced_transpose_rule) def _preduced_batcher(vals_in, dims_in, *, axes): raise NotImplementedError batching.primitive_batchers[preduced_p] = _preduced_batcher ######################## vary_unreduced_cast ####################### # Varying -> Unreduced no-op cast def vary_unreduced_cast(x, axis_name): axes = (axis_name,) if not isinstance(axis_name, tuple) else axis_name if not axis_name: return x x_flat, treedef = tree_util.tree_flatten(x) out_flat = vary_unreduced_cast_p.bind(*x_flat, axes=axes) return tree_util.tree_unflatten(treedef, out_flat) vary_unreduced_cast_p = core.Primitive('vary_unreduced_cast_p') vary_unreduced_cast_p.multiple_results = True vary_unreduced_cast_p.def_impl(lambda *args, axes: args) mlir.register_lowering(vary_unreduced_cast_p, lambda ctx, *x, axes: x) def _vary_unreduced_cast_abstract_eval(*avals, axes): assert isinstance(axes, tuple) _check_axis_names(axes, 'vary_unreduced_cast') check_unreduced_args(avals, 'vary_unreduced_cast') for aval in avals: if not aval.vma: raise ValueError('vary_unreduced_cast only accepts inputs that are' f' varying. Got {aval.str_short(True)}') # If the intersection between aval.vma and axes is empty, error if not (aval.vma & set(axes)): raise ValueError( "vary_unreduced_cast is a Varying->Unreduced collective. This" " means that the axis names mentioned in `axes` passed to" " `vary_unreduced_cast` must be present in" f" `jax.typeof(x).vma`. Got axes={axes} and" f" jax.typeof(x).vma={aval.vma}") if aval.sharding.spec.unreduced & set(axes): raise ValueError( "vary_unreduced_cast input cannot be unreduced across the axis_name" f" provided. Got x={aval.str_short(True)} and axis_name={axes}") out_avals = [] for aval in avals: aval_s = aval.sharding new_unreduced = aval_s.spec.unreduced | frozenset(axes) out_sharding = aval_s.update(mesh=get_abstract_mesh(), spec=aval_s.spec.update(unreduced=new_unreduced)) out_vma = frozenset(i for i in aval.vma if i not in axes) out_avals.append(aval.update(sharding=out_sharding, vma=out_vma)) return out_avals vary_unreduced_cast_p.def_abstract_eval(_vary_unreduced_cast_abstract_eval) def _vary_unreduced_cast_transpose_rule(cts, *args, axes): def f(ct, arg): assert ad.is_undefined_primal(arg) return ad.Zero(arg.aval) if type(ct) is ad.Zero else ct cts = map(f, cts, args) nonzero_out_cts, treedef = tree_util.tree_flatten(cts) nonzero_in_cts = core.reduced_vary_cast_p.bind(*nonzero_out_cts, axes=axes) return tree_util.tree_unflatten(treedef, nonzero_in_cts) ad.deflinear2(vary_unreduced_cast_p, _vary_unreduced_cast_transpose_rule) def _vary_unreduced_cast_batcher(vals_in, dims_in, *, axes): raise NotImplementedError batching.primitive_batchers[vary_unreduced_cast_p] = _vary_unreduced_cast_batcher ####################### reduced_vary_cast ############################# # Reduced -> Varying no-op cast # Traceable defined in core.py to avoid circular imports core.reduced_vary_cast_p.def_impl(lambda *args, axes: args) mlir.register_lowering(core.reduced_vary_cast_p, lambda ctx, *x, axes: x) def _reduced_vary_cast_abstract_eval(*avals, axes): assert isinstance(axes, tuple) _check_axis_names(axes, 'reduced_vary_cast') for aval in avals: if not aval.sharding.spec.reduced: raise ValueError('reduced_vary_cast only accepts inputs that are' f' reduced. Got {aval.str_short(True)}') # If the intersection between aval.spec.reduced and axes is empty, error if not (aval.sharding.spec.reduced & set(axes)): raise ValueError( "reduced_vary_cast is a Reduced->Varying collective. This" " means that the axis names mentioned in `axes` passed to" " `reduced_vary_cast` must be present in" f" `jax.typeof(x).sharding.spec.reduced`. Got axes={axes} and" f" jax.typeof(x).sharding.spec.reduced={aval.sharding.spec.reduced}") if aval.vma & set(axes): raise ValueError( "reduced_vary_cast input cannot be varying across the axis_name" f" provided. Got x={aval.str_short(True)} and axis_name={axes}") out_avals = [] for aval in avals: aval_s = aval.sharding new_reduced = frozenset(i for i in aval_s.spec.reduced if i not in axes) out_sharding = aval_s.update(mesh=get_abstract_mesh(), spec=aval_s.spec.update(reduced=new_reduced)) out_vma = aval.vma | frozenset(axes) out_avals.append(aval.update(sharding=out_sharding, vma=out_vma)) return out_avals core.reduced_vary_cast_p.def_abstract_eval(_reduced_vary_cast_abstract_eval) def _reduced_vary_cast_transpose_rule(cts, *args, axes): def f(ct, arg): assert ad.is_undefined_primal(arg) return ad.Zero(arg.aval) if type(ct) is ad.Zero else ct cts = map(f, cts, args) nonzero_out_cts, treedef = tree_util.tree_flatten(cts) nonzero_in_cts = vary_unreduced_cast_p.bind(*nonzero_out_cts, axes=axes) return tree_util.tree_unflatten(treedef, nonzero_in_cts) ad.deflinear2(core.reduced_vary_cast_p, _reduced_vary_cast_transpose_rule) def _reduced_vary_cast_batcher(vals_in, dims_in, *, axes): raise NotImplementedError batching.primitive_batchers[core.reduced_vary_cast_p] = _reduced_vary_cast_batcher
SingleSideCollectiveEffect
python
sqlalchemy__sqlalchemy
test/aaa_profiling/test_resultset.py
{ "start": 479, "end": 5238 }
class ____(fixtures.TablesTest, AssertsExecutionResults): __backend__ = True @classmethod def define_tables(cls, metadata): Table( "table1", metadata, *[ Column("field%d" % fnum, String(50)) for fnum in range(NUM_FIELDS) ], ) Table( "table2", metadata, *[ Column("field%d" % fnum, Unicode(50)) for fnum in range(NUM_FIELDS) ], ) @classmethod def insert_data(cls, connection): conn = connection t, t2 = cls.tables("table1", "table2") conn.execute( t.insert(), [ { "field%d" % fnum: "value%d" % fnum for fnum in range(NUM_FIELDS) } for r_num in range(NUM_RECORDS) ], ) conn.execute( t2.insert(), [ { "field%d" % fnum: "value%d" % fnum for fnum in range(NUM_FIELDS) } for r_num in range(NUM_RECORDS) ], ) # warm up type caches conn.execute(t.select()).fetchall() conn.execute(t2.select()).fetchall() conn.exec_driver_sql( "SELECT %s FROM table1" % (", ".join("field%d" % fnum for fnum in range(NUM_FIELDS))) ).fetchall() conn.exec_driver_sql( "SELECT %s FROM table2" % (", ".join("field%d" % fnum for fnum in range(NUM_FIELDS))) ).fetchall() @profiling.function_call_count(variance=0.15) def test_string(self): t, t2 = self.tables("table1", "table2") with testing.db.connect().execution_options( compiled_cache=None ) as conn: [tuple(row) for row in conn.execute(t.select()).fetchall()] @profiling.function_call_count(variance=0.15) def test_unicode(self): t, t2 = self.tables("table1", "table2") with testing.db.connect().execution_options( compiled_cache=None ) as conn: [tuple(row) for row in conn.execute(t2.select()).fetchall()] @profiling.function_call_count(variance=0.15) def test_raw_string(self): stmt = "SELECT %s FROM table1" % ( ", ".join("field%d" % fnum for fnum in range(NUM_FIELDS)) ) with testing.db.connect() as conn: [tuple(row) for row in conn.exec_driver_sql(stmt).fetchall()] @profiling.function_call_count(variance=0.15) def test_raw_unicode(self): stmt = "SELECT %s FROM table2" % ( ", ".join("field%d" % fnum for fnum in range(NUM_FIELDS)) ) with testing.db.connect() as conn: [tuple(row) for row in conn.exec_driver_sql(stmt).fetchall()] @profiling.function_call_count() def test_fetch_by_key_mappings(self): t, t2 = self.tables("table1", "table2") with testing.db.connect().execution_options( compiled_cache=None ) as conn: for row in conn.execute(t.select()).mappings().fetchall(): [row["field%d" % fnum] for fnum in range(NUM_FIELDS)] @testing.combinations( (False, 0), (True, 1), (False, 1), (False, 2), ) def test_one_or_none(self, one_or_first, rows_present): # TODO: this is not testing the ORM level "scalar_mapping" # mode which has a different performance profile t, t2 = self.tables("table1", "table2") with testing.db.connect().execution_options( compiled_cache=None ) as conn: stmt = t.select() if rows_present == 0: stmt = stmt.where(1 == 0) elif rows_present == 1: stmt = stmt.limit(1) result = conn.execute(stmt) @profiling.function_call_count() def go(): if one_or_first: result.one() else: result.first() try: go() finally: # hmmmm, connection close context manager does not # seem to be handling this for a profile that skips result.close() def test_contains_doesnt_compile(self, connection): t, t2 = self.tables("table1", "table2") row = connection.execute(t.select()).first() c1 = Column("some column", Integer) + Column( "some other column", Integer ) @profiling.function_call_count(variance=0.10) def go(): c1 in row._mapping go()
ResultSetTest
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/hooks/dms.py
{ "start": 1237, "end": 14398 }
class ____(AwsBaseHook): """ Interact with AWS Database Migration Service (DMS). Provide thin wrapper around :external+boto3:py:class:`boto3.client("dms") <DatabaseMigrationService.Client>`. Additional arguments (such as ``aws_conn_id``) may be specified and are passed down to the underlying AwsBaseHook. .. seealso:: - :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook` """ def __init__(self, *args, **kwargs): kwargs["client_type"] = "dms" super().__init__(*args, **kwargs) def describe_replication_tasks(self, **kwargs) -> tuple[str | None, list]: """ Describe replication tasks. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.describe_replication_tasks` :return: Marker and list of replication tasks """ dms_client = self.get_conn() response = dms_client.describe_replication_tasks(**kwargs) return response.get("Marker"), response.get("ReplicationTasks", []) def find_replication_tasks_by_arn(self, replication_task_arn: str, without_settings: bool | None = False): """ Find and describe replication tasks by task ARN. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.describe_replication_tasks` :param replication_task_arn: Replication task arn :param without_settings: Indicates whether to return task information with settings. :return: list of replication tasks that match the ARN """ _, tasks = self.describe_replication_tasks( Filters=[ { "Name": "replication-task-arn", "Values": [replication_task_arn], } ], WithoutSettings=without_settings, ) return tasks def get_task_status(self, replication_task_arn: str) -> str | None: """ Retrieve task status. :param replication_task_arn: Replication task ARN :return: Current task status """ replication_tasks = self.find_replication_tasks_by_arn( replication_task_arn=replication_task_arn, without_settings=True, ) if len(replication_tasks) == 1: status = replication_tasks[0]["Status"] self.log.info('Replication task with ARN(%s) has status "%s".', replication_task_arn, status) return status self.log.info("Replication task with ARN(%s) is not found.", replication_task_arn) return None def create_replication_task( self, replication_task_id: str, source_endpoint_arn: str, target_endpoint_arn: str, replication_instance_arn: str, migration_type: str, table_mappings: dict, **kwargs, ) -> str: """ Create DMS replication task. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.create_replication_task` :param replication_task_id: Replication task id :param source_endpoint_arn: Source endpoint ARN :param target_endpoint_arn: Target endpoint ARN :param replication_instance_arn: Replication instance ARN :param table_mappings: Table mappings :param migration_type: Migration type ('full-load'|'cdc'|'full-load-and-cdc'), full-load by default. :return: Replication task ARN """ dms_client = self.get_conn() create_task_response = dms_client.create_replication_task( ReplicationTaskIdentifier=replication_task_id, SourceEndpointArn=source_endpoint_arn, TargetEndpointArn=target_endpoint_arn, ReplicationInstanceArn=replication_instance_arn, MigrationType=migration_type, TableMappings=json.dumps(table_mappings), **kwargs, ) replication_task_arn = create_task_response["ReplicationTask"]["ReplicationTaskArn"] self.wait_for_task_status(replication_task_arn, DmsTaskWaiterStatus.READY) return replication_task_arn def start_replication_task( self, replication_task_arn: str, start_replication_task_type: str, **kwargs, ): """ Start replication task. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.start_replication_task` :param replication_task_arn: Replication task ARN :param start_replication_task_type: Replication task start type (default='start-replication') ('start-replication'|'resume-processing'|'reload-target') """ dms_client = self.get_conn() dms_client.start_replication_task( ReplicationTaskArn=replication_task_arn, StartReplicationTaskType=start_replication_task_type, **kwargs, ) def stop_replication_task(self, replication_task_arn): """ Stop replication task. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.stop_replication_task` :param replication_task_arn: Replication task ARN """ dms_client = self.get_conn() dms_client.stop_replication_task(ReplicationTaskArn=replication_task_arn) def delete_replication_task(self, replication_task_arn): """ Start replication task deletion and waits for it to be deleted. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.delete_replication_task` :param replication_task_arn: Replication task ARN """ dms_client = self.get_conn() dms_client.delete_replication_task(ReplicationTaskArn=replication_task_arn) self.wait_for_task_status(replication_task_arn, DmsTaskWaiterStatus.DELETED) def wait_for_task_status(self, replication_task_arn: str, status: DmsTaskWaiterStatus): """ Wait for replication task to reach status; supported statuses: deleted, ready, running, stopped. :param status: Status to wait for :param replication_task_arn: Replication task ARN """ if not isinstance(status, DmsTaskWaiterStatus): raise TypeError("Status must be an instance of DmsTaskWaiterStatus") dms_client = self.get_conn() waiter = dms_client.get_waiter(f"replication_task_{status.value}") waiter.wait( Filters=[ { "Name": "replication-task-arn", "Values": [ replication_task_arn, ], }, ], WithoutSettings=True, ) def describe_replication_configs(self, filters: list[dict] | None = None, **kwargs) -> list[dict]: """ Return list of serverless replication configs. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.describe_replication_configs` :param filters: List of filter objects :return: List of replication tasks """ filters = filters if filters is not None else [] try: resp = self.conn.describe_replication_configs(Filters=filters, **kwargs) return resp.get("ReplicationConfigs", []) except Exception as ex: self.log.error("Error while describing replication configs: %s", str(ex)) raise ex def create_replication_config( self, replication_config_id: str, source_endpoint_arn: str, target_endpoint_arn: str, compute_config: dict[str, Any], replication_type: str, table_mappings: str, additional_config_kwargs: dict[str, Any] | None = None, **kwargs, ): """ Create an AWS DMS Serverless configuration that can be used to start an DMS Serverless replication. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.create_replication_config` :param replicationConfigId: Unique identifier used to create a ReplicationConfigArn. :param sourceEndpointArn: ARN of the source endpoint :param targetEndpointArn: ARN of the target endpoint :param computeConfig: Parameters for provisioning an DMS Serverless replication. :param replicationType: type of DMS Serverless replication :param tableMappings: JSON table mappings :param tags: Key-value tag pairs :param resourceId: Unique value or name that you set for a given resource that can be used to construct an Amazon Resource Name (ARN) for that resource. :param supplementalSettings: JSON settings for specifying supplemental data :param replicationSettings: JSON settings for DMS Serverless replications :return: ReplicationConfigArn """ if additional_config_kwargs is None: additional_config_kwargs = {} try: resp = self.conn.create_replication_config( ReplicationConfigIdentifier=replication_config_id, SourceEndpointArn=source_endpoint_arn, TargetEndpointArn=target_endpoint_arn, ComputeConfig=compute_config, ReplicationType=replication_type, TableMappings=table_mappings, **additional_config_kwargs, ) arn = resp.get("ReplicationConfig", {}).get("ReplicationConfigArn") self.log.info("Successfully created replication config: %s", arn) return arn except ClientError as err: err_str = ( f"Error: {err.get('Error', '').get('Code', '')}: {err.get('Error', '').get('Message', '')}" ) self.log.error("Error while creating replication config: %s", err_str) raise err def describe_replications(self, filters: list[dict[str, Any]] | None = None, **kwargs) -> list[dict]: """ Return list of serverless replications. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.describe_replications` :param filters: List of filter objects :return: List of replications """ filters = filters if filters is not None else [] try: resp = self.conn.describe_replications(Filters=filters, **kwargs) return resp.get("Replications", []) except Exception as ex: self.log.error("Error while describing replications: %s", str(ex)) raise ex def delete_replication_config( self, replication_config_arn: str, delay: int = 60, max_attempts: int = 120 ): """ Delete an AWS DMS Serverless configuration. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.delete_replication_config` :param replication_config_arn: ReplicationConfigArn """ try: self.log.info("Deleting replication config: %s", replication_config_arn) self.conn.delete_replication_config(ReplicationConfigArn=replication_config_arn) except ClientError as err: err_str = ( f"Error: {err.get('Error', '').get('Code', '')}: {err.get('Error', '').get('Message', '')}" ) self.log.error("Error while deleting replication config: %s", err_str) raise err def start_replication( self, replication_config_arn: str, start_replication_type: str, cdc_start_time: datetime | str | None = None, cdc_start_pos: str | None = None, cdc_stop_pos: str | None = None, ): additional_args: dict[str, Any] = {} if cdc_start_time: additional_args["CdcStartTime"] = ( cdc_start_time if isinstance(cdc_start_time, datetime) else parser.parse(cdc_start_time) ) if cdc_start_pos: additional_args["CdcStartPosition"] = cdc_start_pos if cdc_stop_pos: additional_args["CdcStopPosition"] = cdc_stop_pos try: resp = self.conn.start_replication( ReplicationConfigArn=replication_config_arn, StartReplicationType=start_replication_type, **additional_args, ) return resp except Exception as ex: self.log.error("Error while starting replication: %s", str(ex)) raise ex def stop_replication(self, replication_config_arn: str): resp = self.conn.stop_replication(ReplicationConfigArn=replication_config_arn) return resp def get_provision_status(self, replication_config_arn: str) -> str: """Get the provisioning status for a serverless replication.""" result = self.describe_replications( filters=[{"Name": "replication-config-arn", "Values": [replication_config_arn]}] ) provision_status = result[0].get("ProvisionData", {}).get("ProvisionState", "") return provision_status
DmsHook
python
doocs__leetcode
solution/2400-2499/2436.Minimum Split Into Subarrays With GCD Greater Than One/Solution.py
{ "start": 0, "end": 229 }
class ____: def minimumSplits(self, nums: List[int]) -> int: ans, g = 1, 0 for x in nums: g = gcd(g, x) if g == 1: ans += 1 g = x return ans
Solution