language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/models/sam3_tracker/configuration_sam3_tracker.py
{ "start": 3378, "end": 6703 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Sam3TrackerMaskDecoder`]. It is used to instantiate a SAM3_TRACKER memory encoder according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the SAM3_TRACKER mask decoder. mlp_dim (`int`, *optional*, defaults to 2048): The dimension of the MLP in the two-way transformer. num_hidden_layers (`int`, *optional*, defaults to 2): The number of hidden layers in the two-way transformer. num_attention_heads (`int`, *optional*, defaults to 8): The number of attention heads in the two-way transformer. attention_downsample_rate (`int`, *optional*, defaults to 2): The downsample rate for the attention layers. num_multimask_outputs (`int`, *optional*, defaults to 3): The number of multimask outputs. iou_head_depth (`int`, *optional*, defaults to 3): The depth of the IoU head. iou_head_hidden_dim (`int`, *optional*, defaults to 256): The hidden dimension of the IoU head. dynamic_multimask_via_stability (`bool`, *optional*, defaults to `True`): Whether to use dynamic multimask via stability. dynamic_multimask_stability_delta (`float`, *optional*, defaults to 0.05): The stability delta for the dynamic multimask. dynamic_multimask_stability_thresh (`float`, *optional*, defaults to 0.98): The stability threshold for the dynamic multimask. """ base_config_key = "mask_decoder_config" def __init__( self, hidden_size=256, hidden_act="gelu", mlp_dim=2048, num_hidden_layers=2, num_attention_heads=8, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=256, dynamic_multimask_via_stability=True, dynamic_multimask_stability_delta=0.05, dynamic_multimask_stability_thresh=0.98, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_multimask_outputs = num_multimask_outputs self.hidden_act = hidden_act self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.dynamic_multimask_via_stability = dynamic_multimask_via_stability self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh # TwoWayTransformer configuration self.num_hidden_layers = num_hidden_layers self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.mlp_dim = mlp_dim self.attention_downsample_rate = attention_downsample_rate
Sam3TrackerMaskDecoderConfig
python
apache__airflow
providers/openlineage/tests/unit/openlineage/extractors/test_base.py
{ "start": 3018, "end": 3397 }
class ____(BaseExtractor): @classmethod def get_operator_classnames(cls): return ["AnotherOperator"] def _execute_extraction(self) -> OperatorLineage | None: return OperatorLineage( inputs=INPUTS, outputs=OUTPUTS, run_facets=RUN_FACETS, job_facets=JOB_FACETS, )
ExtractorWithExecuteExtractionOnly
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/via_type_of.py
{ "start": 1798, "end": 1836 }
class ____: ... @dataclass
Test3_Foo
python
airbytehq__airbyte
airbyte-integrations/connectors/source-klaviyo/components.py
{ "start": 4328, "end": 5762 }
class ____(ArchivedToPerPartitionStateMigration): """ Campaigns stream has 2 partition field: archived and campaign_type(email, sms). Previous API version didn't return sms in campaigns output so we need to migrate only email partition. Example input state: { "updated_at": "2020-10-10T00:00:00+00:00", "archived": { "updated_at": "2021-10-10T00:00:00+00:00" } } Example output state: { "partition":{ "archived":"true","campaign_type":"email" }, "cursor":{ "updated_at":"2021-10-10T00:00:00+00:00" } } { "partition":{ "archived":"false","campaign_type":"email" }, "cursor":{ "updated_at":"2020-10-10T00:00:00+00:00" } } """ def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]: if not self.should_migrate(stream_state): return stream_state is_archived_updated_at = self.get_archived_cursor_value(stream_state) is_not_archived_updated_at = self.get_not_archived_cursor_value(stream_state) migrated_stream_state = { "states": [ {"partition": ARCHIVED_EMAIL, "cursor": {self._cursor.cursor_field: is_archived_updated_at}}, {"partition": NOT_ARCHIVED_EMAIL, "cursor": {self._cursor.cursor_field: is_not_archived_updated_at}}, ] } return migrated_stream_state
CampaignsStateMigration
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/generator2.py
{ "start": 215, "end": 281 }
class ____: def shouldContinue(self): return True
ClassB
python
apache__airflow
task-sdk/tests/task_sdk/definitions/test_mixins.py
{ "start": 7595, "end": 11710 }
class ____: def test_set_upstream(self): with DAG("test_set_upstream"): op_a = BaseOperator(task_id="a") op_b = BaseOperator(task_id="b") op_c = BaseOperator(task_id="c") op_d = BaseOperator(task_id="d") op_d << op_c << op_b << op_a assert [op_a] == op_b.upstream_list assert [op_b] == op_c.upstream_list assert [op_c] == op_d.upstream_list def test_set_downstream(self): with DAG("test_set_downstream"): op_a = BaseOperator(task_id="a") op_b = BaseOperator(task_id="b") op_c = BaseOperator(task_id="c") op_d = BaseOperator(task_id="d") op_a >> op_b >> op_c >> op_d assert [op_a] == op_b.upstream_list assert [op_b] == op_c.upstream_list assert [op_c] == op_d.upstream_list def test_set_upstream_list(self): with DAG("test_set_upstream_list"): op_a = BaseOperator(task_id="a") op_b = BaseOperator(task_id="b") op_c = BaseOperator(task_id="c") op_d = BaseOperator(task_id="d") [op_d, op_c << op_b] << op_a assert [op_a] == op_b.upstream_list assert [op_a] == op_d.upstream_list assert [op_b] == op_c.upstream_list def test_set_downstream_list(self): with DAG("test_set_downstream_list"): op_a = BaseOperator(task_id="a") op_b = BaseOperator(task_id="b") op_c = BaseOperator(task_id="c") op_d = BaseOperator(task_id="d") op_a >> [op_b >> op_c, op_d] assert op_b.upstream_list == [] assert [op_a] == op_d.upstream_list assert {op_a, op_b} == set(op_c.upstream_list) def test_set_upstream_inner_list(self): with DAG("test_set_upstream_inner_list"): op_a = BaseOperator(task_id="a") op_b = BaseOperator(task_id="b") op_c = BaseOperator(task_id="c") op_d = BaseOperator(task_id="d") with pytest.raises(AttributeError) as e_info: [op_d << [op_c, op_b]] << op_a assert str(e_info.value) == "'list' object has no attribute 'update_relative'" assert op_b.upstream_list == [] assert op_c.upstream_list == [] assert {op_b, op_c} == set(op_d.upstream_list) def test_set_downstream_inner_list(self): with DAG("test_set_downstream_inner_list"): op_a = BaseOperator(task_id="a") op_b = BaseOperator(task_id="b") op_c = BaseOperator(task_id="c") op_d = BaseOperator(task_id="d") op_a >> [[op_b, op_c] >> op_d] assert op_b.upstream_list == [] assert op_c.upstream_list == [] assert {op_b, op_c, op_a} == set(op_d.upstream_list) def test_set_upstream_list_subarray(self): with DAG("test_set_upstream_list"): op_a = BaseOperator(task_id="a") op_b_1 = BaseOperator(task_id="b_1") op_b_2 = BaseOperator(task_id="b_2") op_c = BaseOperator(task_id="c") op_d = BaseOperator(task_id="d") with pytest.raises(AttributeError) as e_info: [op_d, op_c << [op_b_1, op_b_2]] << op_a assert str(e_info.value) == "'list' object has no attribute 'update_relative'" assert op_b_1.upstream_list == [] assert op_b_2.upstream_list == [] assert op_d.upstream_list == [] assert {op_b_1, op_b_2} == set(op_c.upstream_list) def test_set_downstream_list_subarray(self): with DAG("test_set_downstream_list"): op_a = BaseOperator(task_id="a") op_b_1 = BaseOperator(task_id="b_1") op_b_2 = BaseOperator(task_id="b2") op_c = BaseOperator(task_id="c") op_d = BaseOperator(task_id="d") op_a >> [[op_b_1, op_b_2] >> op_c, op_d] assert op_b_1.upstream_list == [] assert op_b_2.upstream_list == [] assert [op_a] == op_d.upstream_list assert {op_a, op_b_1, op_b_2} == set(op_c.upstream_list)
TestDependencyMixin
python
apache__airflow
task-sdk/src/airflow/sdk/execution_time/comms.py
{ "start": 18692, "end": 18895 }
class ____(BaseModel): """Response containing previous Dag run information.""" dag_run: DagRun | None = None type: Literal["PreviousDagRunResult"] = "PreviousDagRunResult"
PreviousDagRunResult
python
ray-project__ray
rllib/utils/replay_buffers/tests/test_prioritized_episode_buffer.py
{ "start": 272, "end": 14310 }
class ____(unittest.TestCase): @staticmethod def _get_episode(episode_len=None, id_=None, with_extra_model_outs=False): eps = SingleAgentEpisode(id_=id_, observations=[0.0], infos=[{}]) ts = np.random.randint(1, 200) if episode_len is None else episode_len for t in range(ts): eps.add_env_step( observation=float(t + 1), action=int(t), reward=0.1 * (t + 1), infos={}, extra_model_outputs=( {k: k for k in range(2)} if with_extra_model_outs else None ), ) eps.is_terminated = np.random.random() > 0.5 eps.is_truncated = False if eps.is_terminated else np.random.random() > 0.8 return eps def test_add_and_eviction_logic(self): # Fill the buffer till capacity (100 ts). buffer = PrioritizedEpisodeReplayBuffer(capacity=100) episode = self._get_episode(id_="A", episode_len=50) buffer.add(episode) self.assertEqual(buffer.get_num_episodes(), 1) self.assertEqual(buffer.get_num_timesteps(), 50) episode = self._get_episode(id_="B", episode_len=25) buffer.add(episode) self.assertEqual(buffer.get_num_episodes(), 2) self.assertEqual(buffer.get_num_timesteps(), 75) # No eviction yet (but we are full). episode = self._get_episode(id_="C", episode_len=25) buffer.add(episode) self.assertEqual(buffer.get_num_episodes(), 3) self.assertEqual(buffer.get_num_timesteps(), 100) # Trigger eviction of first episode by adding a single timestep episode. episode = self._get_episode(id_="D", episode_len=1) buffer.add(episode) self.assertEqual(buffer.get_num_episodes(), 3) self.assertEqual(buffer.get_num_timesteps(), 51) self.assertEqual({eps.id_ for eps in buffer.episodes}, {"B", "C", "D"}) # Add another big episode and trigger another eviction. episode = self._get_episode(id_="E", episode_len=200) buffer.add(episode) self.assertTrue(buffer.get_num_episodes() == 1) self.assertTrue(buffer.get_num_timesteps() == 200) self.assertTrue({eps.id_ for eps in buffer.episodes} == {"E"}) # Add another small episode and trigger another eviction. episode = self._get_episode(id_="F", episode_len=2) buffer.add(episode) self.assertTrue(buffer.get_num_episodes() == 1) self.assertTrue(buffer.get_num_timesteps() == 2) self.assertTrue({eps.id_ for eps in buffer.episodes} == {"F"}) # Add N small episodes. for i in range(10): episode = self._get_episode(id_=str(i), episode_len=10) buffer.add(episode) self.assertTrue(buffer.get_num_episodes() == 10) self.assertTrue(buffer.get_num_timesteps() == 100) # Add a 20-ts episode and expect to have evicted 3 episodes. episode = self._get_episode(id_="G", episode_len=21) buffer.add(episode) self.assertTrue(buffer.get_num_episodes() == 8) self.assertTrue(buffer.get_num_timesteps() == 91) self.assertTrue( {eps.id_ for eps in buffer.episodes} == {"3", "4", "5", "6", "7", "8", "9", "G"} ) def test_buffer_sample_logic(self): buffer = PrioritizedEpisodeReplayBuffer(capacity=10000) for _ in range(200): episode = self._get_episode() buffer.add(episode) for i in range(1000): sample = buffer.sample(batch_size_B=16, n_step=1) check(buffer.get_sampled_timesteps(), 16 * (i + 1)) for eps in sample: ( obs, action, reward, next_obs, is_terminated, is_truncated, weight, n_step, ) = ( eps.get_observations(0), eps.get_actions(-1), eps.get_rewards(-1), eps.get_observations(-1), eps.is_terminated, eps.is_truncated, eps.get_extra_model_outputs("weights", -1), eps.get_extra_model_outputs("n_step", -1), ) # Make sure terminated and truncated are never both True. assert not (is_truncated and is_terminated) # Note, floating point numbers cannot be compared directly. tolerance = 1e-8 # Assert that actions correspond to the observations. check(obs, action, atol=tolerance) # Assert that next observations are correctly one step after # observations. check(next_obs, obs + 1, atol=tolerance) # Assert that the reward comes from the next observation. check(reward * 10, next_obs, atol=tolerance) # Furthermore, assert that the importance sampling weights are # one for `beta=0.0`. check(weight, 1.0, atol=tolerance) # Assert that all n-steps are 1.0 as passed into `sample`. check(n_step, 1.0, atol=tolerance) def test_buffer_sample_logic_with_3_step(self): buffer = PrioritizedEpisodeReplayBuffer(capacity=10000) for _ in range(200): episode = self._get_episode() buffer.add(episode) for i in range(1000): sample = buffer.sample(batch_size_B=16, n_step=3) check(buffer.get_sampled_timesteps(), 16 * (i + 1)) for eps in sample: ( obs, action, reward, next_obs, is_terminated, is_truncated, weight, n_step, ) = ( eps.get_observations(0), eps.get_actions(-1), eps.get_rewards(-1), eps.get_observations(-1), eps.is_terminated, eps.is_truncated, eps.get_extra_model_outputs("weights", -1), eps.get_extra_model_outputs("n_step", -1), ) # Make sure terminated and truncated are never both True. assert not (is_truncated and is_terminated) # Note, floating point numbers cannot be compared directly. tolerance = 1e-8 # Assert that actions correspond to the observations. check(obs, action, atol=tolerance) # Assert that next observations are correctly one step after # observations. check(next_obs, obs + 3, atol=tolerance) # Assert that the reward comes from the next observation. # Assert that the reward is indeed the cumulated sum of rewards # collected between the observation and the next_observation. reward_sum = ( next_obs * 0.99**2 + (next_obs - 1) * 0.99 + next_obs - 2 ) * 0.1 check(reward, reward_sum, atol=tolerance) # Furthermore, assert that the importance sampling weights are # one for `beta=0.0`. check(weight, 1.0, atol=tolerance) # Assert that all n-steps are 1.0 as passed into `sample`. check(n_step, 3.0, atol=tolerance) def test_buffer_sample_logic_with_random_n_step(self): buffer = PrioritizedEpisodeReplayBuffer(capacity=10000) for _ in range(200): episode = self._get_episode() buffer.add(episode) for i in range(1000): sample = buffer.sample(batch_size_B=16, n_step=(1, 5)) check(buffer.get_sampled_timesteps(), 16 * (i + 1)) n_steps = [] for eps in sample: # Get the n-step that was used for sampling. n_step = eps.get_extra_model_outputs("n_step", -1) # Note, floating point numbers cannot be compared directly. tolerance = 1e-8 # Ensure that n-steps are in between 1 and 5. self.assertTrue(n_step - 5.0 < tolerance) self.assertTrue(n_step - 1.0 > -tolerance) n_steps.append(n_step) # Ensure that there is variation in the n-steps. self.assertTrue(np.var(n_steps) > 0.0) def test_buffer_sample_logic_with_infos_and_extra_model_output(self): buffer = PrioritizedEpisodeReplayBuffer(capacity=10000) for _ in range(200): episode = self._get_episode(with_extra_model_outs=True) buffer.add(episode) for i in range(1000): sample = buffer.sample( batch_size_B=16, n_step=1, include_infos=True, include_extra_model_outputs=True, ) check(buffer.get_sampled_timesteps(), 16 * (i + 1)) for eps in sample: (infos, extra_model_output_0, extra_model_output_1,) = ( eps.get_infos(), eps.get_extra_model_outputs(0), eps.get_extra_model_outputs(1), ) # Assert that we have infos from both steps. check(len(infos), 2) # Ensure both extra model outputs have both a length of 1.abs check(len(extra_model_output_0), 1) check(len(extra_model_output_1), 1) def test_update_priorities(self): # Define replay buffer (alpha=1.0). buffer = PrioritizedEpisodeReplayBuffer(capacity=100) # Generate 200 episode of random length. for _ in range(200): episode = self._get_episode() buffer.add(episode) # Now sample from the buffer and update priorities. sample = buffer.sample(batch_size_B=16, n_step=1) weights = np.array( [eps.get_extra_model_outputs("weights", -1) for eps in sample] ) # Make sure the initial weights are 1.0. tolerance = 1e-5 self.assertTrue(np.all(weights - 1 < tolerance)) # Define some deltas. deltas = np.array([0.01] * 16) # Get the last sampled indices (in the segment trees). last_sampled_indices = copy.deepcopy(buffer._last_sampled_indices) # Update th epriorities of the last sampled transitions. buffer.update_priorities(priorities=deltas) # Assert that the new priorities are indeed the ones we passed in. new_priorities = [buffer._sum_segment[idx] for idx in last_sampled_indices] self.assertTrue(np.all(new_priorities - deltas < tolerance)) # Sample several times. index_counts = [] for _ in range(1000): sample = buffer.sample(batch_size_B=16, n_step=1) index_counts.append( any( [ idx in last_sampled_indices for idx in buffer._last_sampled_indices ] ) ) self.assertGreater(0.15, sum(index_counts) / len(index_counts)) # Define replay buffer (alpha=1.0). buffer = PrioritizedEpisodeReplayBuffer(capacity=10) episode = self._get_episode(10) buffer.add(episode) # Manipulate the priorities such that 1's priority is # way higher than the others and sample. buffer._last_sampled_indices = [1] randn = np.random.random() + 0.2 buffer.update_priorities(np.array([randn])) buffer._last_sampled_indices = [1, 2, 3, 4, 5, 6, 7, 8, 9] buffer.update_priorities(np.array([0.01] * 9)) # Expect that around 90% of the samples are from index 1. for _ in range(10): sample = buffer.sample(1000) number_of_ones = np.sum(np.array(buffer._last_sampled_indices) == 0) print(f"1s: {number_of_ones / 1000}") self.assertTrue(number_of_ones / 1000 > 0.8) def test_get_state_and_set_state(self): """Test the get_state and set_state methods. This test checks that the state of the buffer can be saved and restored correctly. It does so by filling the buffer with episodes and then saving the state. Then it resets the buffer and restores the state. Finally, it checks that the two buffers are the same by comparing their properties. """ # Define replay buffer (alpha=1.0). buffer = PrioritizedEpisodeReplayBuffer(capacity=100) # Fill the buffer with episodes. for _ in range(200): episode = self._get_episode() buffer.add(episode) # Now get the state of the buffer. state = buffer.get_state() # Now reset the buffer and set the state. buffer2 = PrioritizedEpisodeReplayBuffer(capacity=100) buffer2.set_state(state) # Check that the two buffers are the same. check(buffer.get_num_episodes(), buffer2.get_num_episodes()) check(buffer.get_num_episodes_evicted(), buffer2.get_num_episodes_evicted()) check(buffer.get_num_timesteps(), buffer2.get_num_timesteps()) check(buffer.get_added_timesteps(), buffer2.get_added_timesteps()) check(buffer.get_sampled_timesteps(), buffer2.get_sampled_timesteps()) check(buffer._max_idx, buffer2._max_idx) check(buffer._max_priority, buffer2._max_priority) check(buffer._tree_idx_to_sample_idx, buffer2._tree_idx_to_sample_idx) if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestPrioritizedEpisodeReplayBuffer
python
ray-project__ray
python/ray/train/_internal/state/schema.py
{ "start": 822, "end": 910 }
class ____(str, Enum): DEAD = "DEAD" ALIVE = "ALIVE" @DeveloperAPI
ActorStatusEnum
python
sqlalchemy__sqlalchemy
test/orm/test_versioning.py
{ "start": 56048, "end": 59654 }
class ____(fixtures.MappedTest): # test for #4193, see also #4194 for related notes __sparse_driver_backend__ = True @classmethod def define_tables(cls, metadata): Table( "version_table", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("version_id", Integer, nullable=False), Column("value", String(40), nullable=False), ) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass def _implicit_version_fixture(self): Foo, version_table = self.classes.Foo, self.tables.version_table current = ( version_table.select() .where(version_table.c.id > 0) .alias("current_table") ) self.mapper_registry.map_imperatively( Foo, current, version_id_col=version_table.c.version_id ) s1 = fixture_session() return s1 def _explicit_version_fixture(self): Foo, version_table = self.classes.Foo, self.tables.version_table current = ( version_table.select() .where(version_table.c.id > 0) .alias("current_table") ) self.mapper_registry.map_imperatively( Foo, current, version_id_col=version_table.c.version_id, version_id_generator=False, ) s1 = fixture_session() return s1 def test_implicit(self): Foo = self.classes.Foo s1 = self._implicit_version_fixture() f1 = Foo(value="f1") f2 = Foo(value="f2") s1.add_all((f1, f2)) s1.commit() f1.value = "f1rev2" f2.value = "f2rev2" with conditional_sane_rowcount_warnings(update=True): s1.commit() eq_( s1.query(Foo.id, Foo.value, Foo.version_id).order_by(Foo.id).all(), [(f1.id, "f1rev2", 2), (f2.id, "f2rev2", 2)], ) def test_explicit(self): Foo = self.classes.Foo s1 = self._explicit_version_fixture() f1 = Foo(value="f1", version_id=1) f2 = Foo(value="f2", version_id=1) s1.add_all((f1, f2)) s1.flush() # note this requires that the Session was not expired until # we fix #4195 f1.value = "f1rev2" f1.version_id = 2 f2.value = "f2rev2" f2.version_id = 2 with conditional_sane_rowcount_warnings(update=True): s1.flush() eq_( s1.query(Foo.id, Foo.value, Foo.version_id).order_by(Foo.id).all(), [(f1.id, "f1rev2", 2), (f2.id, "f2rev2", 2)], ) def test_implicit_no_readonly(self): # test issue 4194 Foo = self.classes.Foo s1 = self._implicit_version_fixture() f1 = Foo(value="f1") s1.add(f1) s1.flush() is_false(bool(inspect(Foo)._readonly_props)) def go(): eq_(f1.version_id, 1) self.assert_sql_count(testing.db, go, 0) def test_explicit_assign_from_expired(self): # test issue 4195 Foo = self.classes.Foo s1 = self._explicit_version_fixture() configure_mappers() is_true(Foo.version_id.impl.active_history) f1 = Foo(value="f1", version_id=1) s1.add(f1) s1.flush() s1.expire_all() with conditional_sane_rowcount_warnings(update=True): f1.value = "f2" f1.version_id = 2 s1.flush()
VersioningMappedSelectTest
python
mlflow__mlflow
dev/clint/tests/rules/test_no_class_based_tests.py
{ "start": 628, "end": 1551 }
class ____: def helper_function(self): return 42 def setup_something(self): pass def test_something(self): pass # Good - function-based test def test_valid_function(): assert True # Good - regular function def helper_function(): return 42 """ config = Config(select={NoClassBasedTests.name}) violations = lint_file(Path("test_something.py"), code, config, index_path) assert len(violations) == 2 assert all(isinstance(v.rule, NoClassBasedTests) for v in violations) assert violations[0].range == Range(Position(3, 0)) # TestSomething class assert violations[1].range == Range(Position(14, 0)) # TestAnotherThing class def test_no_class_based_tests_non_test_file(index_path: Path) -> None: """Test that the rule doesn't apply to non-test files""" code = """import pytest # This should not be flagged because it's not in a test file
HelperClass
python
catalyst-team__catalyst
catalyst/contrib/losses/triplet.py
{ "start": 440, "end": 6671 }
class ____(nn.Module): """Triplet loss with hard positive/negative mining. Adapted from: https://github.com/NegatioN/OnlineMiningTripletLoss """ def __init__(self, margin: float = 0.3): """ Args: margin: margin for triplet """ super().__init__() self.margin = margin self.ranking_loss = nn.MarginRankingLoss(margin=margin) def _pairwise_distances(self, embeddings, squared=False): """Compute the 2D matrix of distances between all the embeddings. Args: embeddings: tensor of shape (batch_size, embed_dim) squared: if true, output is the pairwise squared euclidean distance matrix. If false, output is the pairwise euclidean distance matrix Returns: torch.Tensor: pairwise matrix of size (batch_size, batch_size) """ # Get squared L2 norm for each embedding. # We can just take the diagonal of `dot_product`. # This also provides more numerical stability # (the diagonal of the result will be exactly 0). # shape (batch_size) square = torch.mm(embeddings, embeddings.t()) diag = torch.diag(square) # Compute the pairwise distance matrix as we have: # ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2 # shape (batch_size, batch_size) distances = diag.view(-1, 1) - 2.0 * square + diag.view(1, -1) # Because of computation errors, some distances # might be negative so we put everything >= 0.0 distances[distances < 0] = 0 if not squared: # Because the gradient of sqrt is infinite # when distances == 0.0 (ex: on the diagonal) # we need to add a small epsilon where distances == 0.0 mask = distances.eq(0).float() distances = distances + mask * 1e-16 distances = (1.0 - mask) * torch.sqrt(distances) return distances def _get_anchor_positive_triplet_mask(self, labels): """ Return a 2D mask where mask[a, p] is True if a and p are distinct and have same label. Args: labels: tf.int32 `Tensor` with shape [batch_size] Returns: torch.Tensor: mask with shape [batch_size, batch_size] """ indices_equal = torch.eye(labels.size(0)).type(torch.bool) # labels and indices should be on # the same device, otherwise - exception indices_equal = indices_equal.to("cuda" if labels.is_cuda else "cpu") # Check that i and j are distinct indices_equal = indices_equal.type(TORCH_BOOL) indices_not_equal = ~indices_equal # Check if labels[i] == labels[j] # Uses broadcasting where the 1st argument # has shape (1, batch_size) and the 2nd (batch_size, 1) labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1) return labels_equal & indices_not_equal def _get_anchor_negative_triplet_mask(self, labels): """Return 2D mask where mask[a, n] is True if a and n have same label. Args: labels: tf.int32 `Tensor` with shape [batch_size] Returns: torch.Tensor: mask with shape [batch_size, batch_size] """ # Check if labels[i] != labels[k] # Uses broadcasting where the 1st argument # has shape (1, batch_size) and the 2nd (batch_size, 1) return ~(labels.unsqueeze(0) == labels.unsqueeze(1)) def _batch_hard_triplet_loss(self, embeddings, labels, margin, squared=True): """ Build the triplet loss over a batch of embeddings. For each anchor, we get the hardest positive and hardest negative to form a triplet. Args: labels: labels of the batch, of size (batch_size) embeddings: tensor of shape (batch_size, embed_dim) margin: margin for triplet loss squared: Boolean. If true, output is the pairwise squared euclidean distance matrix. If false, output is the pairwise euclidean distance matrix. Returns: torch.Tensor: scalar tensor containing the triplet loss """ # Get the pairwise distance matrix pairwise_dist = self._pairwise_distances(embeddings, squared=squared) # For each anchor, get the hardest positive # First, we need to get a mask for every valid # positive (they should have same label) mask_anchor_positive = self._get_anchor_positive_triplet_mask(labels).float() # We put to 0 any element where (a, p) is not valid # (valid if a != p and label(a) == label(p)) anchor_positive_dist = mask_anchor_positive * pairwise_dist # shape (batch_size, 1) hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True) # For each anchor, get the hardest negative # First, we need to get a mask for every valid negative # (they should have different labels) mask_anchor_negative = self._get_anchor_negative_triplet_mask(labels).float() # We add the maximum value in each row # to the invalid negatives (label(a) == label(n)) max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True) anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * ( 1.0 - mask_anchor_negative ) # shape (batch_size) hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True) # Combine biggest d(a, p) and smallest d(a, n) into final triplet loss tl = hardest_positive_dist - hardest_negative_dist + margin tl[tl < 0] = 0 loss = tl.mean() return loss def forward(self, embeddings, targets): """Forward propagation method for the triplet loss. Args: embeddings: tensor of shape (batch_size, embed_dim) targets: labels of the batch, of size (batch_size) Returns: torch.Tensor: scalar tensor containing the triplet loss """ return self._batch_hard_triplet_loss(embeddings, targets, self.margin)
TripletLoss
python
getsentry__sentry
tests/sentry/api/serializers/test_activity.py
{ "start": 452, "end": 8044 }
class ____(TestCase): def test_pr_activity(self) -> None: self.org = self.create_organization(name="Rowdy Tiger") user = self.create_user() group = self.create_group(status=GroupStatus.UNRESOLVED) repo = self.create_repo(self.project, name="organization-bar") pr = PullRequest.objects.create( organization_id=self.org.id, repository_id=repo.id, key=5, title="aaaa", message="kartoffel", ) activity = Activity.objects.create( project_id=group.project_id, group=group, type=ActivityType.SET_RESOLVED_IN_PULL_REQUEST.value, ident=pr.id, user_id=user.id, data={"pull_request": pr.id}, ) result = serialize([activity], user)[0]["data"] pull_request = result["pullRequest"] assert pull_request["repository"]["name"] == "organization-bar" assert pull_request["message"] == "kartoffel" def test_commit_activity(self) -> None: self.org = self.create_organization(name="Rowdy Tiger") user = self.create_user() group = self.create_group(status=GroupStatus.UNRESOLVED) repo = self.create_repo(self.project, name="organization-bar") commit = Commit.objects.create( organization_id=self.org.id, repository_id=repo.id, key="11111111", message="gemuse" ) activity = Activity.objects.create( project_id=group.project_id, group=group, type=ActivityType.SET_RESOLVED_IN_COMMIT.value, ident=commit.id, user_id=user.id, data={"commit": commit.id}, ) result = serialize([activity], user)[0]["data"] commit_data = result["commit"] assert commit_data["repository"]["name"] == "organization-bar" assert commit_data["message"] == "gemuse" def test_serialize_set_resolve_in_commit_activity_with_release(self) -> None: project = self.create_project(name="test_throwaway") group = self.create_group(project) user = self.create_user() release = self.create_release(project=project, user=user) release.save() commit = Commit.objects.filter(releasecommit__release_id=release.id).get() Activity.objects.create( project_id=project.id, group=group, type=ActivityType.SET_RESOLVED_IN_COMMIT.value, ident=commit.id, user_id=user.id, data={"commit": commit.id}, ) act = Activity.objects.get(type=ActivityType.SET_RESOLVED_IN_COMMIT.value) serialized = serialize(act) assert len(serialized["data"]["commit"]["releases"]) == 1 def test_serialize_set_resolve_in_commit_activity_with_no_releases(self) -> None: self.org = self.create_organization(name="komal-test") project = self.create_project(name="random-proj") user = self.create_user() repo = self.create_repo(self.project, name="idk-repo") group = self.create_group(project) commit = Commit.objects.create(organization_id=self.org.id, repository_id=repo.id) Activity.objects.create( project_id=project.id, group=group, type=ActivityType.SET_RESOLVED_IN_COMMIT.value, ident=commit.id, user_id=user.id, data={"commit": commit.id}, ) act = Activity.objects.get(type=ActivityType.SET_RESOLVED_IN_COMMIT.value) serialized = serialize(act) assert len(serialized["data"]["commit"]["releases"]) == 0 assert not Commit.objects.filter(releasecommit__id=commit.id).exists() def test_serialize_set_resolve_in_commit_activity_with_release_not_deployed(self) -> None: project = self.create_project(name="random-test") group = self.create_group(project) user = self.create_user() release = self.create_release(project=project, user=user) release.date_released = None release.save() commit = Commit.objects.filter(releasecommit__release_id=release.id).get() Activity.objects.create( project_id=project.id, group=group, type=ActivityType.SET_RESOLVED_IN_COMMIT.value, ident=commit.id, user_id=user.id, data={"commit": commit.id}, ) act = Activity.objects.get(type=ActivityType.SET_RESOLVED_IN_COMMIT.value) serialized = serialize(act) assert len(serialized["data"]["commit"]["releases"]) == 1 def test_collapse_group_stats_in_activity_with_option(self) -> None: project = self.create_project(name="random-test") group = self.create_group(project) group_2 = self.create_group(project) user = self.create_user() release = self.create_release(project=project, user=user) release.date_released = None release.save() Activity.objects.create( project_id=project.id, group=group, type=ActivityType.UNMERGE_DESTINATION.value, ident=group.id, user_id=user.id, data={"destination_id": group_2.id, "source_id": group.id, "fingerprints": ["aabbcc"]}, ) act = Activity.objects.get(type=ActivityType.UNMERGE_DESTINATION.value) serialized = serialize(act) assert "firstSeen" not in serialized["data"]["source"] def test_get_activities_for_group_proxy_user(self) -> None: project = self.create_project(name="test_activities_group") group = self.create_group(project) user = self.create_user() data = serialize( Activity.objects.create_group_activity( group=group, type=ActivityType.NOTE, data={"text": "A human sent this message"}, user=user, ) ) # Regular users, have a new empty key assert data["user"]["name"] == user.username assert data["sentry_app"] is None sentry_app = self.create_sentry_app(name="test_sentry_app") default_avatar = self.create_sentry_app_avatar(sentry_app=sentry_app) upload_avatar = self.create_sentry_app_avatar(sentry_app=sentry_app) with assume_test_silo_mode(SiloMode.CONTROL): proxy_user = User.objects.get(id=sentry_app.proxy_user_id) upload_avatar.avatar_type = 1 # an upload upload_avatar.color = True # a logo upload_avatar.save() data = serialize( Activity.objects.create_group_activity( group=group, type=ActivityType.NOTE, data={"text": "My app sent this message"}, user=proxy_user, ) ) assert data["user"]["name"] == proxy_user.email assert data["sentry_app"]["name"] == sentry_app.name assert { "avatarType": "default", "avatarUuid": default_avatar.ident, "avatarUrl": f"http://testserver/sentry-app-avatar/{default_avatar.ident}/", "color": False, "photoType": "icon", } in data["sentry_app"]["avatars"] assert { "avatarType": "upload", "avatarUuid": upload_avatar.ident, "avatarUrl": f"http://testserver/sentry-app-avatar/{upload_avatar.ident}/", "color": True, "photoType": "logo", } in data["sentry_app"]["avatars"]
GroupActivityTestCase
python
pypa__pip
src/pip/_internal/metadata/base.py
{ "start": 2615, "end": 21347 }
class ____(Protocol): @classmethod def from_directory(cls, directory: str) -> BaseDistribution: """Load the distribution from a metadata directory. :param directory: Path to a metadata directory, e.g. ``.dist-info``. """ raise NotImplementedError() @classmethod def from_metadata_file_contents( cls, metadata_contents: bytes, filename: str, project_name: str, ) -> BaseDistribution: """Load the distribution from the contents of a METADATA file. This is used to implement PEP 658 by generating a "shallow" dist object that can be used for resolution without downloading or building the actual dist yet. :param metadata_contents: The contents of a METADATA file. :param filename: File name for the dist with this metadata. :param project_name: Name of the project this dist represents. """ raise NotImplementedError() @classmethod def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution: """Load the distribution from a given wheel. :param wheel: A concrete wheel definition. :param name: File name of the wheel. :raises InvalidWheel: Whenever loading of the wheel causes a :py:exc:`zipfile.BadZipFile` exception to be thrown. :raises UnsupportedWheel: If the wheel is a valid zip, but malformed internally. """ raise NotImplementedError() def __repr__(self) -> str: return f"{self.raw_name} {self.raw_version} ({self.location})" def __str__(self) -> str: return f"{self.raw_name} {self.raw_version}" @property def location(self) -> str | None: """Where the distribution is loaded from. A string value is not necessarily a filesystem path, since distributions can be loaded from other sources, e.g. arbitrary zip archives. ``None`` means the distribution is created in-memory. Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If this is a symbolic link, we want to preserve the relative path between it and files in the distribution. """ raise NotImplementedError() @property def editable_project_location(self) -> str | None: """The project location for editable distributions. This is the directory where pyproject.toml or setup.py is located. None if the distribution is not installed in editable mode. """ # TODO: this property is relatively costly to compute, memoize it ? direct_url = self.direct_url if direct_url: if direct_url.is_local_editable(): return url_to_path(direct_url.url) else: # Search for an .egg-link file by walking sys.path, as it was # done before by dist_is_editable(). egg_link_path = egg_link_path_from_sys_path(self.raw_name) if egg_link_path: # TODO: get project location from second line of egg_link file # (https://github.com/pypa/pip/issues/10243) return self.location return None @property def installed_location(self) -> str | None: """The distribution's "installed" location. This should generally be a ``site-packages`` directory. This is usually ``dist.location``, except for legacy develop-installed packages, where ``dist.location`` is the source code location, and this is where the ``.egg-link`` file is. The returned location is normalized (in particular, with symlinks removed). """ raise NotImplementedError() @property def info_location(self) -> str | None: """Location of the .[egg|dist]-info directory or file. Similarly to ``location``, a string value is not necessarily a filesystem path. ``None`` means the distribution is created in-memory. For a modern .dist-info installation on disk, this should be something like ``{location}/{raw_name}-{version}.dist-info``. Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If this is a symbolic link, we want to preserve the relative path between it and other files in the distribution. """ raise NotImplementedError() @property def installed_by_distutils(self) -> bool: """Whether this distribution is installed with legacy distutils format. A distribution installed with "raw" distutils not patched by setuptools uses one single file at ``info_location`` to store metadata. We need to treat this specially on uninstallation. """ info_location = self.info_location if not info_location: return False return pathlib.Path(info_location).is_file() @property def installed_as_egg(self) -> bool: """Whether this distribution is installed as an egg. This usually indicates the distribution was installed by (older versions of) easy_install. """ location = self.location if not location: return False # XXX if the distribution is a zipped egg, location has a trailing / # so we resort to pathlib.Path to check the suffix in a reliable way. return pathlib.Path(location).suffix == ".egg" @property def installed_with_setuptools_egg_info(self) -> bool: """Whether this distribution is installed with the ``.egg-info`` format. This usually indicates the distribution was installed with setuptools with an old pip version or with ``single-version-externally-managed``. Note that this ensure the metadata store is a directory. distutils can also installs an ``.egg-info``, but as a file, not a directory. This property is *False* for that case. Also see ``installed_by_distutils``. """ info_location = self.info_location if not info_location: return False if not info_location.endswith(".egg-info"): return False return pathlib.Path(info_location).is_dir() @property def installed_with_dist_info(self) -> bool: """Whether this distribution is installed with the "modern format". This indicates a "modern" installation, e.g. storing metadata in the ``.dist-info`` directory. This applies to installations made by setuptools (but through pip, not directly), or anything using the standardized build backend interface (PEP 517). """ info_location = self.info_location if not info_location: return False if not info_location.endswith(".dist-info"): return False return pathlib.Path(info_location).is_dir() @property def canonical_name(self) -> NormalizedName: raise NotImplementedError() @property def version(self) -> Version: raise NotImplementedError() @property def raw_version(self) -> str: raise NotImplementedError() @property def setuptools_filename(self) -> str: """Convert a project name to its setuptools-compatible filename. This is a copy of ``pkg_resources.to_filename()`` for compatibility. """ return self.raw_name.replace("-", "_") @property def direct_url(self) -> DirectUrl | None: """Obtain a DirectUrl from this distribution. Returns None if the distribution has no `direct_url.json` metadata, or if `direct_url.json` is invalid. """ try: content = self.read_text(DIRECT_URL_METADATA_NAME) except FileNotFoundError: return None try: return DirectUrl.from_json(content) except ( UnicodeDecodeError, json.JSONDecodeError, DirectUrlValidationError, ) as e: logger.warning( "Error parsing %s for %s: %s", DIRECT_URL_METADATA_NAME, self.canonical_name, e, ) return None @property def installer(self) -> str: try: installer_text = self.read_text("INSTALLER") except (OSError, ValueError, NoneMetadataError): return "" # Fail silently if the installer file cannot be read. for line in installer_text.splitlines(): cleaned_line = line.strip() if cleaned_line: return cleaned_line return "" @property def requested(self) -> bool: return self.is_file("REQUESTED") @property def editable(self) -> bool: return bool(self.editable_project_location) @property def local(self) -> bool: """If distribution is installed in the current virtual environment. Always True if we're not in a virtualenv. """ if self.installed_location is None: return False return is_local(self.installed_location) @property def in_usersite(self) -> bool: if self.installed_location is None or user_site is None: return False return self.installed_location.startswith(normalize_path(user_site)) @property def in_site_packages(self) -> bool: if self.installed_location is None or site_packages is None: return False return self.installed_location.startswith(normalize_path(site_packages)) def is_file(self, path: InfoPath) -> bool: """Check whether an entry in the info directory is a file.""" raise NotImplementedError() def iter_distutils_script_names(self) -> Iterator[str]: """Find distutils 'scripts' entries metadata. If 'scripts' is supplied in ``setup.py``, distutils records those in the installed distribution's ``scripts`` directory, a file for each script. """ raise NotImplementedError() def read_text(self, path: InfoPath) -> str: """Read a file in the info directory. :raise FileNotFoundError: If ``path`` does not exist in the directory. :raise NoneMetadataError: If ``path`` exists in the info directory, but cannot be read. """ raise NotImplementedError() def iter_entry_points(self) -> Iterable[BaseEntryPoint]: raise NotImplementedError() def _metadata_impl(self) -> email.message.Message: raise NotImplementedError() @functools.cached_property def metadata(self) -> email.message.Message: """Metadata of distribution parsed from e.g. METADATA or PKG-INFO. This should return an empty message if the metadata file is unavailable. :raises NoneMetadataError: If the metadata file is available, but does not contain valid metadata. """ metadata = self._metadata_impl() self._add_egg_info_requires(metadata) return metadata @property def metadata_dict(self) -> dict[str, Any]: """PEP 566 compliant JSON-serializable representation of METADATA or PKG-INFO. This should return an empty dict if the metadata file is unavailable. :raises NoneMetadataError: If the metadata file is available, but does not contain valid metadata. """ return msg_to_json(self.metadata) @property def metadata_version(self) -> str | None: """Value of "Metadata-Version:" in distribution metadata, if available.""" return self.metadata.get("Metadata-Version") @property def raw_name(self) -> str: """Value of "Name:" in distribution metadata.""" # The metadata should NEVER be missing the Name: key, but if it somehow # does, fall back to the known canonical name. return self.metadata.get("Name", self.canonical_name) @property def requires_python(self) -> SpecifierSet: """Value of "Requires-Python:" in distribution metadata. If the key does not exist or contains an invalid value, an empty SpecifierSet should be returned. """ value = self.metadata.get("Requires-Python") if value is None: return SpecifierSet() try: # Convert to str to satisfy the type checker; this can be a Header object. spec = SpecifierSet(str(value)) except InvalidSpecifier as e: message = "Package %r has an invalid Requires-Python: %s" logger.warning(message, self.raw_name, e) return SpecifierSet() return spec def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: """Dependencies of this distribution. For modern .dist-info distributions, this is the collection of "Requires-Dist:" entries in distribution metadata. """ raise NotImplementedError() def iter_raw_dependencies(self) -> Iterable[str]: """Raw Requires-Dist metadata.""" return self.metadata.get_all("Requires-Dist", []) def iter_provided_extras(self) -> Iterable[NormalizedName]: """Extras provided by this distribution. For modern .dist-info distributions, this is the collection of "Provides-Extra:" entries in distribution metadata. The return value of this function is expected to be normalised names, per PEP 685, with the returned value being handled appropriately by `iter_dependencies`. """ raise NotImplementedError() def _iter_declared_entries_from_record(self) -> Iterator[str] | None: try: text = self.read_text("RECORD") except FileNotFoundError: return None # This extra Path-str cast normalizes entries. return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines())) def _iter_declared_entries_from_legacy(self) -> Iterator[str] | None: try: text = self.read_text("installed-files.txt") except FileNotFoundError: return None paths = (p for p in text.splitlines(keepends=False) if p) root = self.location info = self.info_location if root is None or info is None: return paths try: info_rel = pathlib.Path(info).relative_to(root) except ValueError: # info is not relative to root. return paths if not info_rel.parts: # info *is* root. return paths return ( _convert_installed_files_path(pathlib.Path(p).parts, info_rel.parts) for p in paths ) def iter_declared_entries(self) -> Iterator[str] | None: """Iterate through file entries declared in this distribution. For modern .dist-info distributions, this is the files listed in the ``RECORD`` metadata file. For legacy setuptools distributions, this comes from ``installed-files.txt``, with entries normalized to be compatible with the format used by ``RECORD``. :return: An iterator for listed entries, or None if the distribution contains neither ``RECORD`` nor ``installed-files.txt``. """ return ( self._iter_declared_entries_from_record() or self._iter_declared_entries_from_legacy() ) def _iter_requires_txt_entries(self) -> Iterator[RequiresEntry]: """Parse a ``requires.txt`` in an egg-info directory. This is an INI-ish format where an egg-info stores dependencies. A section name describes extra other environment markers, while each entry is an arbitrary string (not a key-value pair) representing a dependency as a requirement string (no markers). There is a construct in ``importlib.metadata`` called ``Sectioned`` that does mostly the same, but the format is currently considered private. """ try: content = self.read_text("requires.txt") except FileNotFoundError: return extra = marker = "" # Section-less entries don't have markers. for line in content.splitlines(): line = line.strip() if not line or line.startswith("#"): # Comment; ignored. continue if line.startswith("[") and line.endswith("]"): # A section header. extra, _, marker = line.strip("[]").partition(":") continue yield RequiresEntry(requirement=line, extra=extra, marker=marker) def _iter_egg_info_extras(self) -> Iterable[str]: """Get extras from the egg-info directory.""" known_extras = {""} for entry in self._iter_requires_txt_entries(): extra = canonicalize_name(entry.extra) if extra in known_extras: continue known_extras.add(extra) yield extra def _iter_egg_info_dependencies(self) -> Iterable[str]: """Get distribution dependencies from the egg-info directory. To ease parsing, this converts a legacy dependency entry into a PEP 508 requirement string. Like ``_iter_requires_txt_entries()``, there is code in ``importlib.metadata`` that does mostly the same, but not do exactly what we need. Namely, ``importlib.metadata`` does not normalize the extra name before putting it into the requirement string, which causes marker comparison to fail because the dist-info format do normalize. This is consistent in all currently available PEP 517 backends, although not standardized. """ for entry in self._iter_requires_txt_entries(): extra = canonicalize_name(entry.extra) if extra and entry.marker: marker = f'({entry.marker}) and extra == "{extra}"' elif extra: marker = f'extra == "{extra}"' elif entry.marker: marker = entry.marker else: marker = "" if marker: yield f"{entry.requirement} ; {marker}" else: yield entry.requirement def _add_egg_info_requires(self, metadata: email.message.Message) -> None: """Add egg-info requires.txt information to the metadata.""" if not metadata.get_all("Requires-Dist"): for dep in self._iter_egg_info_dependencies(): metadata["Requires-Dist"] = dep if not metadata.get_all("Provides-Extra"): for extra in self._iter_egg_info_extras(): metadata["Provides-Extra"] = extra
BaseDistribution
python
google__jax
tests/multiprocess/array_test.py
{ "start": 18432, "end": 35541 }
class ____(jt_multiprocess.MultiProcessTest): def test_create_nonaddressable_array(self): y, x = create_nonaddressable_array((8, 8)) # The array is non-addressable in at least one process. self.assertLess(len(y.sharding._internal_device_list.process_indices), jax.process_count()) for a in y.addressable_shards: np.testing.assert_array_equal(a.data, x[a.index]) fr, x = create_nonaddressable_array((8, 8), spec=P()) self.assertTrue(fr.sharding.is_fully_replicated) self.assertLess(len(fr.sharding._internal_device_list.process_indices), jax.process_count()) if fr.sharding.has_addressable_devices: np.testing.assert_array_equal(x, fr) def test_named_sharding_is_fully_addressable(self): pid = 0 ds = jax.local_devices(process_index=pid) mesh = jtu.create_mesh((len(ds),), ("x",)) s = jax.sharding.NamedSharding(mesh, P("x")) self.assertEqual(s.is_fully_addressable, jax.process_index() == pid) def test_single_device_sharding_is_fully_addressable(self): d = jax.devices()[0] s = jax.sharding.SingleDeviceSharding(d) self.assertEqual(s.is_fully_addressable, jax.process_index() == d.process_index) def test_array_with_no_local_shards_has_valid_layout(self): d = jax.devices()[0] s = jax.sharding.SingleDeviceSharding(d) shape = (8, 8) np_inp = np.arange(math.prod(shape), dtype=np.int32).reshape(shape) xs = [] if jax.process_index() == d.process_index: x = jax.device_put(np_inp, s) xs.append(x) arr = jax.make_array_from_single_device_arrays( shape, s, xs, dtype=jnp.int32) self.assertIsNotNone(arr.format.layout) def test_device_put_uncommitted_array_namedsharding(self): n_local = len(jax.local_devices()) pid = 0 mesh = jax.make_mesh( (n_local,), ("x",), devices=jax.local_devices(process_index=pid), axis_types=(jax.sharding.AxisType.Explicit,)) s = jax.sharding.NamedSharding(mesh, P("x",)) inp = jnp.arange(16).reshape(8, 2) out = jax.device_put(inp, s) # device_put of an uncommitted array to a sharding that is addressable only # in process `pid` should return an array with addressable shards only in # process `pid`. In other processes, the returned array has no addressable # shards. expected_num_shards = n_local if jax.process_index() == pid else 0 self.assertLen(out.addressable_shards, expected_num_shards) for shard in out.addressable_shards: np.testing.assert_array_equal(shard.data, inp[shard.index]) self.assertEqual(out.sharding, s) def test_device_put_numpy_array_namedsharding(self): n_local = len(jax.local_devices()) pid = 1 mesh = jax.make_mesh( (n_local,), ("x",), devices=jax.local_devices(process_index=pid), axis_types=(jax.sharding.AxisType.Explicit,)) s = jax.sharding.NamedSharding(mesh, P("x",)) inp = np.arange(16).reshape(8, 2) out = jax.device_put(inp, s) # device_put of a numpy array to a sharding that is addressable only in # process `pid` should return an array with addressable shards only in # process `pid`. In other processes, the returned array has no addressable # shards. expected_num_shards = n_local if jax.process_index() == pid else 0 self.assertLen(out.addressable_shards, expected_num_shards) for shard in out.addressable_shards: np.testing.assert_array_equal(shard.data, inp[shard.index]) self.assertEqual(out.sharding, s) def test_device_put_numpy_array_singledevice(self): inp = np.arange(16).reshape(8, 2) d = jax.devices()[0] out = jax.device_put(inp, d) # device_put of a numpy array to a sharding that is addressable only in # process `pid` should return an array with addressable shards only in # process `pid`. In other processes, the returned array has no addressable # shards. expected_num_shards = 1 if jax.process_index() == d.process_index else 0 self.assertLen(out.addressable_shards, expected_num_shards) for shard in out.addressable_shards: np.testing.assert_array_equal(shard.data, inp[shard.index]) self.assertEqual(out.sharding, jax.sharding.SingleDeviceSharding(d)) def test_device_put_committed_array_error(self): inp = jax.device_put(jnp.arange(16).reshape(8, 2), jax.local_devices()[0]) # device_put of a committed array to a nonaddressable sharding should raise # an error (until cross-host transfers are supported). with self.assertRaisesRegex(RuntimeError, "Cannot copy array to non-addressable device"): nonlocal_pid = (jax.process_index() + 1) % jax.process_count() jax.device_put(inp, jax.local_devices(process_index=nonlocal_pid)[0]) def test_make_array_from_callback(self): n_local = jax.local_device_count() pid = 1 mesh = jax.make_mesh( (n_local,), ("x",), devices=jax.local_devices(process_index=pid), axis_types=(jax.sharding.AxisType.Explicit,)) s = jax.sharding.NamedSharding(mesh, P("x",)) # Create an array that is non-addressable in processes besides `pid`. global_data = np.arange(16, dtype=np.int32).reshape(8, 2) arr = jax.make_array_from_callback( global_data.shape, s, lambda idx: global_data[idx], dtype=global_data.dtype) # The returned array should only contain addressable shards in process # `pid`. expected_num_shards = n_local if jax.process_index() == pid else 0 self.assertLen(arr.addressable_shards, expected_num_shards) np.testing.assert_array_equal(arr.shape, global_data.shape) for shard in arr.addressable_shards: np.testing.assert_array_equal(shard.data, global_data[shard.index]) def test_make_array_from_callback_prngkey(self): n_local = jax.local_device_count() pid = 1 mesh = jax.make_mesh( (n_local,), ("x",), devices=jax.local_devices(process_index=pid), axis_types=(jax.sharding.AxisType.Explicit,)) s = jax.sharding.NamedSharding(mesh, P("x",)) # Create a PRNG key array that is non-addressable in processes besides # `pid`. seeds = jnp.arange(8) global_data = jax.vmap(lambda x: jax.random.key(seed=x))(seeds) k = jax.random.key(0) arr = jax.make_array_from_callback( global_data.shape, s, lambda idx: global_data[idx], dtype=k.dtype) # The returned array should only contain addressable shards in process # `pid`. expected_num_shards = n_local if jax.process_index() == pid else 0 self.assertLen(arr.addressable_shards, expected_num_shards) np.testing.assert_array_equal(arr.shape, global_data.shape) for shard in arr.addressable_shards: np.testing.assert_array_equal(shard.data.shape, (8 // n_local,)) def test_sharding_process_indices_device_subset(self): n_devices = jax.device_count() mesh = jax.make_mesh( (n_devices // 2,), ("x",), devices=jax.devices()[:n_devices // 2], axis_types=(jax.sharding.AxisType.Explicit,)) s = jax.sharding.NamedSharding(mesh, P("x",)) expected_pids = {d.process_index for d in s.device_set} self.assertEqual(s._internal_device_list.process_indices, expected_pids) self.assertLen(s._internal_device_list.process_indices, jax.process_count() // 2) def test_jit_no_local_devices_named_sharding(self): x = np.arange(64).reshape(8, 8) n_local = jax.local_device_count() pid = 1 # Create a sharding that is non-addressable in processes besides `pid`. mesh = jax.make_mesh( (n_local,), ("x",), devices=jax.local_devices(process_index=pid), axis_types=(jax.sharding.AxisType.Explicit,)) s = jax.sharding.NamedSharding(mesh, P("x",)) y = jax.device_put(x, s) expected_num_shards = n_local if jax.process_index() == pid else 0 self.assertLen(y.addressable_shards, expected_num_shards) @jax.jit def f(x): return x + 1 # The returned array should only contain addressable shards in process # `pid`. No work is done in other processes. z = f(y) z.block_until_ready() self.assertLen(z.addressable_shards, expected_num_shards) if jax.process_index() == pid: for shard in z.addressable_shards: np.testing.assert_array_equal(shard.data, x[shard.index] + 1) def test_jit_no_local_devices_named_sharding_collective(self): x = np.arange(64).reshape(8, 8) n_local = jax.local_device_count() pid = 1 # Create a sharding that is non-addressable in processes besides `pid`. mesh = jax.make_mesh( (n_local,), ("x",), devices=jax.local_devices(process_index=pid), axis_types=(jax.sharding.AxisType.Explicit,)) s = jax.sharding.NamedSharding(mesh, P("x",)) y = jax.device_put(x, s) expected_num_shards = n_local if jax.process_index() == pid else 0 self.assertLen(y.addressable_shards, expected_num_shards) @jax.jit def f(x): return jnp.sum(x) # The returned array should only contain addressable shards in process # `pid`. No work is done in other processes. z = f(y) z.block_until_ready() self.assertLen(z.addressable_shards, expected_num_shards) if jax.process_index() == pid: expected = x.sum() for shard in z.addressable_shards: np.testing.assert_array_equal(shard.data, expected) def test_jit_no_local_devices_single_device_sharding(self): x = np.arange(64).reshape(8, 8) pid = 1 # Create a single device sharding for a device local to process `pid`. s = jax.sharding.SingleDeviceSharding( jax.local_devices(process_index=pid)[0]) y = jax.device_put(x, s) expected_num_shards = 1 if jax.process_index() == pid else 0 self.assertLen(y.addressable_shards, expected_num_shards) @jax.jit def f(x): return x + 1 # The returned array should only contain an addressable shard in process # `pid`. No work is done in other processes. z = f(y) z.block_until_ready() self.assertLen(z.addressable_shards, expected_num_shards) if jax.process_index() == pid: np.testing.assert_array_equal(z.addressable_shards[0].data, x + 1) def test_jit_fastpath_matmul(self): mesh = jax.sharding.Mesh( jax.devices()[: len(jax.devices()) // 2], axis_names=("devices")) sharding = jax.sharding.NamedSharding(mesh, P()) x = jax.device_put( jnp.arange(8 * 16, dtype=jnp.float32).reshape((8, 16)), sharding) w = jax.device_put( jnp.arange(16 * 4, dtype=jnp.float32).reshape((16, 4)), sharding) jax.experimental.multihost_utils.sync_global_devices("start") matmul = jax.jit(lambda x, w: x @ w, out_shardings=sharding) _ = matmul(x, w) y = matmul(x, w) # doesn't crash on second call expected = x @ w for shard in y.addressable_shards: np.testing.assert_array_equal(shard.data, expected[shard.index]) def test_numpy_asarray_no_local_devices(self): y, x = create_nonaddressable_array((8, 8), spec=P()) # In processes with local shards, we can fetch the value of the array using # np.asarray, since the sharding is fully replicated. In processes with no # local shards, attempting to fetch the NumPy array is an error. if y.sharding.has_addressable_devices: np.testing.assert_array_equal(np.asarray(y), x) else: with self.assertRaisesRegex( RuntimeError, r"Fetching value for `jax.Array` that spans non-addressable \(non" r" process local\) devices is not possible."): np.asarray(y) def test_shard_map_no_local_devices(self): x, x_np = create_nonaddressable_array((8, 8)) # shard_map works as expected when there are nonparticipating hosts. shard_map_f = jax.shard_map( lambda x: jax.lax.psum(x, "x"), mesh=x.sharding.mesh, in_specs=P("x"), out_specs=P()) y = shard_map_f(x) expected_y = sum(np.split(x_np, len(x.sharding.device_set))) sharding_process_indices = x.sharding._internal_device_list.process_indices expected_num_shards = (jax.local_device_count() if jax.process_index() in sharding_process_indices else 0) self.assertLen(y.addressable_shards, expected_num_shards) for shard in y.addressable_shards: np.testing.assert_array_equal(shard.data, expected_y[shard.index]) def test_array_delete(self): y, _ = create_nonaddressable_array((8, 8)) y.delete() with self.assertRaisesRegex(RuntimeError, "Array has been deleted."): y._check_if_deleted() self.assertIsNone(y._npy_value) self.assertIsNone(y._arrays) def test_single_device_array_usage_after_delete(self): y, _ = create_nonaddressable_array((8, 8)) y.delete() with self.assertRaisesRegex(RuntimeError, "Array has been deleted."): _ = y + 1 def test_repr(self): y, _ = create_nonaddressable_array((8, 8)) if y.is_fully_addressable: self.assertStartsWith(repr(y), "Array([[ 0., 1., 2., 3.,") else: self.assertEqual(repr(y), "Array(shape=(8, 8), dtype=float32)") def test_array_astype(self): y, _ = create_nonaddressable_array((8, 8)) y = y.astype(np.int32) self.assertEqual(y.dtype, np.int32) def test_sharded_add(self): y, y_np = create_nonaddressable_array((8, 8)) z, z_np = create_nonaddressable_array((8, 8), spec=P()) out = y + z expected = y_np + z_np self.assertLen(out.addressable_shards, len(y.sharding.addressable_devices)) for shard in out.addressable_shards: np.testing.assert_array_equal(shard.data, expected[shard.index]) def test_sharded_zeros_like(self): y, _ = create_nonaddressable_array((8, 8)) out = jnp.zeros_like(y) expected = jnp.zeros(y.shape, dtype=y.dtype) self.assertLen(out.addressable_shards, len(y.sharding.addressable_devices)) for i in out.addressable_shards: np.testing.assert_array_equal(i.data, expected[i.index]) def test_array_not_hashable(self): y, _ = create_nonaddressable_array((8, 8)) with self.assertRaisesRegex(TypeError, "unhashable type"): hash(y) def test_on_device_size_in_bytes(self): a, _ = create_nonaddressable_array((8, 8)) if not a.sharding.has_addressable_devices: with self.assertRaisesRegex( RuntimeError, r"GetOnDeviceSizeInBytes\(\) is not yet supported for arrays with no " r"addressable devices"): a.on_device_size_in_bytes() else: shard_size = a.addressable_shards[0].data.on_device_size_in_bytes() self.assertEqual(shard_size * len(a.global_shards), a.on_device_size_in_bytes()) def test_array_is_ready(self): y, _ = create_nonaddressable_array((8, 8)) y.is_ready() # doesn't crash def test_array_copy_to_host_async(self): y, x = create_nonaddressable_array((8, 8)) y.copy_to_host_async() # doesn't crash for shard in y.addressable_shards: np.testing.assert_array_equal(shard.data, x[shard.index]) def test_device_get_replicated(self): y, x = create_nonaddressable_array((8, 8), spec=P()) if y.sharding.has_addressable_devices: np.testing.assert_array_equal(jax.device_get(y), x) else: with self.assertRaisesRegex( RuntimeError, r"Fetching value for `jax.Array` that spans non-addressable \(non" r" process local\) devices is not possible."): jax.device_get(y) # Skipped on GPU since there are two processes with one device each, so we # can't construct a sharding that is nonaddressable in one of the processes # and also not fully replicated (since the sharding must contain one device). @jtu.skip_on_devices("gpu") def test_device_get_sharded(self): y, _ = create_nonaddressable_array((8, 8)) with self.assertRaisesRegex( RuntimeError, r"Fetching value for `jax.Array` that spans non-addressable \(non" r" process local\) devices is not possible."): jax.device_get(y) def test_array_fully_replicated_shard(self): y, x = create_nonaddressable_array((8, 8), spec=P()) if y.sharding.has_addressable_devices: fs = y.addressable_data(0) self.assertEqual(fs.shape, x.shape) self.assertLen(fs.sharding.device_set, 1) self.assertEqual(fs.devices(), {jax.local_devices()[0]}) np.testing.assert_array_equal(fs, x) np.testing.assert_array_equal(y.addressable_data(0), x) else: with self.assertRaisesRegex( RuntimeError, "FullyReplicatedShard: Array has no addressable shards." ): y.addressable_data(0) def test_array_iter_replicated(self): y, _ = create_nonaddressable_array((8, 8), spec=P()) y_iter = iter(y) self.assertLen(list(y_iter), 8) # Skipped on GPU since the sharding contains one device and is therefore fully # replicated. @jtu.skip_on_devices("gpu") def test_array_iter_sharded(self): y, _ = create_nonaddressable_array((8, 8)) with self.assertRaises(AssertionError): iter(y)
NonaddressableArrayTestMultiHost
python
ray-project__ray
python/ray/_private/thirdparty/dacite/exceptions.py
{ "start": 1560, "end": 2028 }
class ____(DaciteFieldError): def __init__(self, union_matches: Dict[Type, Any], field_path: Optional[str] = None) -> None: super().__init__(field_path=field_path) self.union_matches = union_matches def __str__(self) -> str: conflicting_types = ", ".join(_name(type_) for type_ in self.union_matches) return f'can not choose between possible Union matches for field "{self.field_path}": {conflicting_types}'
StrictUnionMatchError
python
pytorch__pytorch
torch/fx/graph.py
{ "start": 10230, "end": 35503 }
class ____: # This is an override hook so we can customize the SymNode printer. _sym_repr: Callable[["torch.types.PySymType"], str] = lambda x: repr(x) def __init__(self): self._body_transformer: Optional[TransformCodeFunc] = None self._func_name: str = "forward" def _format_multiline_args(self, args: list[str]) -> str: """Helper to format function arguments in expanded multiline format.""" return "".join(self._format_single_arg(arg) for arg in args) def _format_single_arg(self, arg: str) -> str: """Helper to format a single argument with optional comment.""" if "#" in arg: arg_part, comment_part = arg.split("#", 1) return f" {arg_part.rstrip()}, # {comment_part.lstrip()}\n" else: return f" {arg},\n" def _get_delimiters(self, container) -> tuple[str, str]: """Helper to get opening and closing delimiters for containers.""" return ("(", ")") if isinstance(container, tuple) else ("[", "]") def _format_multiline_container(self, items, descs=None, prefix="") -> str: """Helper to format containers (lists/tuples) in multiline format.""" ldelim, rdelim = self._get_delimiters(items) desc_trailers = self._get_desc_trailers(items, descs) return ( f"{prefix}{ldelim}\n" + "".join( f" {item},{trailer}\n" for item, trailer in zip(items, desc_trailers) ) + f"{rdelim}" ) def _get_desc_trailers(self, items, descs): """Helper to generate description trailers for items.""" if descs is None: return [""] * len(items) return [f" # {desc}" for desc in descs] def _call_method_with_signature_check(self, method, *args, **kwargs): """Helper to call a method with optional parameters based on signature.""" sig = inspect.signature(method) # Filter kwargs to only include parameters that exist in the method signature filtered_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters} return method(*args, **filtered_kwargs) def gen_fn_def( self, free_vars: list[str], maybe_return_annotation: str, *, expanded_def: bool = False, ) -> str: """ Given the free variables and a return annotation, generates the beginning of the FX function. By default, `gen_fn_def(['a', 'b'], '') == 'def {self._func_name}(a, b):'` """ # If the original function didn't have self as its first argument, we # would have added it. if len(free_vars) == 0 or free_vars[0] != "self": free_vars.insert(0, "self") if expanded_def: args_formatted = self._format_multiline_args(free_vars) return ( f"def {self._func_name}(\n{args_formatted}){maybe_return_annotation}:" ) else: return f"def {self._func_name}({', '.join(free_vars)}){maybe_return_annotation}:" def generate_output( self, output_args: Argument, *, descs: Optional[Any] = None ) -> str: """ Given the output arguments, generates the return statement of the FX function. Note: The returned statement should not be indented. """ if descs is not None and isinstance(output_args, (list, tuple)): return self._format_multiline_container(output_args, descs, "return ") else: return f"return {repr(output_args)}" def process_inputs(self, *args: Any) -> Any: """ Transforms the inputs so that the graph can take them as arguments, as non-default codegen may result in the inputs to the function being different from the inputs to the graph. If the graph was directly runnable, this invariant should hold true `f.graph.process_outputs(f.graph(*f.graph.process_inputs(*inputs))) == f(*inputs)` """ return args def process_outputs(self, outputs: Any) -> Any: """ Transforms the outputs of the graph to be identical to the codegen. See ``process_inputs`` for more details. """ return outputs def additional_globals(self) -> list[tuple[str, Any]]: """ If your codegen uses extra global values, add tuples of (identifier,reference to the value) here. For example, return ['List', typing.List] if you need ``List`` in the global context. """ return [] def _gen_python_code( self, nodes, root_module: str, namespace: _Namespace, *, verbose: bool = False, include_stride: bool = False, include_device: bool = False, colored: bool = False, # Render each argument on its own line expanded_def: bool = False, record_func: bool = False, ) -> PythonCode: free_vars: list[str] = [] body: list[str] = [] globals_: dict[str, Any] = {} wrapped_fns: dict[str, None] = {} # Wrap string in list to pass by reference maybe_return_annotation: list[str] = [""] include_stride = include_stride or ( os.environ.get("FX_GRAPH_SHOW_STRIDE", "0") == "1" ) include_device = include_device or ( os.environ.get("FX_GRAPH_SHOW_DEVICE", "0") == "1" ) include_meta = os.environ.get("FX_GRAPH_SHOW_META", "0") == "1" def add_global(name_hint: str, obj: Any): """Add an obj to be tracked as a global. We call this for names that reference objects external to the Graph, like functions or types. Returns: the global name that should be used to reference 'obj' in generated source. """ if ( _is_from_torch(obj) and obj != torch.device ): # to support registering torch.device # HACK: workaround for how torch custom ops are registered. We # can't import them like normal modules so they must retain their # fully qualified name. return _get_qualified_name(obj) # normalize the name hint to get a proper identifier global_name = namespace.create_name(name_hint, obj) if global_name in globals_: assert globals_[global_name] == obj return global_name globals_[global_name] = obj return global_name # Pre-fill the globals table with registered builtins. for name, (_, obj) in _custom_builtins.items(): add_global(name, obj) def type_repr(o: Any): if o == (): # Empty tuple is used for empty tuple type annotation Tuple[()] return "()" typename = _type_repr(o) if origin_type := getattr(o, "__origin__", None): # list[...], typing.List[...], TensorType[...] if isinstance(o, typing._GenericAlias): # type: ignore[attr-defined] # This is a generic pre-PEP585 type, e.g. typing.List[torch.Tensor] origin_type = _origin_type_map.get(origin_type, origin_type) origin_typename = add_global(_type_repr(origin_type), origin_type) if hasattr(o, "__args__") and o.__args__: args = [type_repr(arg) for arg in o.__args__] return f"{origin_typename}[{','.join(args)}]" else: return origin_typename # Common case: this is a regular module name like 'foo.bar.baz' return add_global(typename, o) if colored: red = _color_fns["red"] dim_green = _color_fns["dim_green"] dim = _color_fns["dim"] dim_blue = _color_fns["dim_blue"] blue = _color_fns["blue"] else: red = _identity dim_green = _identity dim = _identity dim_blue = _identity blue = _identity def _get_repr(arg: Any) -> str: if isinstance(arg, Node): # first because common return repr(arg) elif isinstance(arg, tuple) and hasattr(arg, "_fields"): # Handle NamedTuples (if it has `_fields`) via add_global. qualified_name = _get_qualified_name(type(arg)) global_name = add_global(qualified_name, type(arg)) return f"{global_name}{repr(tuple(arg))}" elif isinstance( arg, (torch._ops.OpOverload, torch._ops.HigherOrderOperator) ): qualified_name = _get_qualified_name(arg) global_name = add_global(qualified_name, arg) return f"{global_name}" elif isinstance(arg, enum.Enum): cls = arg.__class__ clsname = add_global(cls.__name__, cls) return f"{clsname}.{arg.name}" elif isinstance(arg, torch.Tensor): size = list(arg.size()) dtype = str(arg.dtype).split(".")[-1] return f"torch.Tensor(size={size}, dtype={dtype})" elif isinstance(arg, tuple): if len(arg) == 1: return f"({_get_repr(arg[0])},)" else: return "(" + ", ".join(_get_repr(a) for a in arg) + ")" elif isinstance(arg, list): return "[" + ", ".join(_get_repr(a) for a in arg) + "]" elif isinstance(arg, slice): return f"slice({_get_repr(arg.start)}, {_get_repr(arg.stop)}, {_get_repr(arg.step)})" else: return blue(repr(arg)) def _format_args( args: tuple[Argument, ...], kwargs: dict[str, Argument] ) -> str: res = [_get_repr(a) for a in args] res.extend([f"{k} = {_get_repr(v)}" for k, v in kwargs.items()]) return ", ".join(res) # Run through reverse nodes and record the first instance of a use # of a given node. This represents the *last* use of the node in the # execution order of the program, which we will use to free unused # values node_to_last_use: dict[Node, Node] = {} user_to_last_uses: dict[Node, list[Node]] = {} def register_last_uses(n: Node, user: Node): if n not in node_to_last_use: node_to_last_use[n] = user user_to_last_uses.setdefault(user, []).append(n) for node in reversed(nodes): for input_node in node._input_nodes: register_last_uses(input_node, node) def delete_unused_values(user: Node): """ Delete values after their last use. This ensures that values that are not used in the remainder of the code are freed and the memory usage of the code is optimal. """ if user.op == "placeholder": return if user.op == "output": body.append("\n") return nodes_to_delete = user_to_last_uses.get(user, []) if len(user.users.keys()) == 0: # This node is not used by any others. however it's also not # removed by DCE since side-effect. We want to free it's outputs # right after its execution done to save memory. nodes_to_delete.append(user) if len(nodes_to_delete): to_delete_str = " = ".join( [repr(n) for n in nodes_to_delete] + ["None"] ) body.append(f"; {dim(to_delete_str)}\n") else: body.append("\n") prev_summary_str = None def append_stacktrace_summary(node: Node): """ Append a summary of the stacktrace to the generated code. This is useful for debugging. """ nonlocal prev_summary_str if node.op not in {"placeholder", "output"}: annotation_str = "" annotation = node.meta.get("custom", {}) if annotation: annotation_str = f" Annotation: {annotation}" stack_trace_str = "No stacktrace found for following nodes" if stack_trace := node.stack_trace: if parsed_stack_trace := _parse_stack_trace(stack_trace): stack_trace_str = parsed_stack_trace.get_summary_str() summary_str = f"\n{dim(f'#{annotation_str} {stack_trace_str}')}\n" if summary_str != prev_summary_str: prev_summary_str = summary_str body.append(summary_str) def stringify_shape(shape: Iterable) -> str: return f"[{', '.join([str(x) for x in shape])}]" def emit_node(node: Node): maybe_type_annotation = ( "" if node.type is None else f" : {type_repr(node.type)}" ) maybe_comment = "" if verbose: # override annotation with more detailed information try: from torch.distributed.tensor._api import DTensor, DTensorSpec dtensorspec_format_shard_order_str = ( DTensorSpec.format_shard_order_str ) except ModuleNotFoundError: DTensor = None # type: ignore[assignment,misc] dtensorspec_format_shard_order_str = None from torch.fx.experimental.proxy_tensor import py_sym_types from torch.fx.passes.shape_prop import TensorMetadata meta_val = node.meta.get( "val", node.meta.get("tensor_meta", node.meta.get("example_value", None)), ) def _tensor_annotation(t: torch.Tensor) -> str: stride = stringify_shape(t.stride()) if include_stride else "" device = f"{t.device}" if include_device else "" return ( f"{red(dtype_abbrs[t.dtype])}" f"{blue(stringify_shape(t.shape))}" f"{dim_blue(stride)}" f"{dim_green(device)}" ) # use string as annotation, to make it valid python code if isinstance(meta_val, torch.Tensor) and meta_val.layout not in ( torch.sparse_csc, torch.sparse_csr, ): # Fake tensors cause tests to wobble, so do not custom print them. is_plain = type(meta_val) is torch.Tensor or isinstance( meta_val, torch._subclasses.FakeTensor ) core = _tensor_annotation(meta_val) if is_plain: maybe_type_annotation = f': "{core}"' elif type(meta_val) is DTensor: assert dtensorspec_format_shard_order_str is not None dtensor_meta = dtensorspec_format_shard_order_str( meta_val._spec.placements, # type: ignore[attr-defined] meta_val._spec.shard_order, # type: ignore[attr-defined] ) cls = meta_val.__class__.__name__ maybe_type_annotation = ( f': "{cls}({core}, {dim_green(dtensor_meta)})"' ) else: cls = meta_val.__class__.__name__ maybe_type_annotation = f': "{cls}({core})"' elif isinstance(meta_val, py_sym_types): val_str = CodeGen._sym_repr(meta_val) maybe_type_annotation = f': "Sym({val_str})"' elif isinstance(meta_val, TensorMetadata): maybe_type_annotation = f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}"' desc = None if expanded_def: desc = node.meta.get("desc", None) if desc is not None and node.op == "placeholder": maybe_comment += f" # {desc}" # output is handled specially if include_meta and hasattr(node, "meta") and node.meta: body.append('"""\n') for k, v in node.meta.items(): # use str over repr since repr is susceptible to sympy # errors such as "cannot determine truth value of Relational" # Pretty print the high-level dict with str() for values body.append( f"{k}: {pprint.pformat(str(v), width=80, compact=True)}\n" ) body.append('"""\n') if node.op == "placeholder": assert isinstance(node.target, str) maybe_default_arg = ( "" if not node.args else f" = {_get_repr(node.args[0])}" ) free_vars.append( f"{node.target}{maybe_type_annotation}{maybe_default_arg}{maybe_comment}" ) raw_name = node.target.replace("*", "") if raw_name != repr(node): body.append(f"{repr(node)} = {raw_name}\n") return elif node.op == "call_method": assert isinstance(node.target, str) body.append( f"{repr(node)}{maybe_type_annotation} = {_format_target(_get_repr(node.args[0]), node.target)}" f"({_format_args(node.args[1:], node.kwargs)})" ) return elif node.op == "call_function": assert callable(node.target) # pretty print operators if ( getattr(node.target, "__module__", "") == "_operator" and node.target.__name__ in magic_methods ): assert isinstance(node.args, tuple) body.append( f"{repr(node)}{maybe_type_annotation} = " f"{magic_methods[node.target.__name__].format(*(_get_repr(a) for a in node.args))}" ) return # pretty print inplace operators; required for jit.script to work properly # not currently supported in normal FX graphs, but generated by torchdynamo if ( getattr(node.target, "__module__", "") == "_operator" and node.target.__name__ in inplace_methods ): body.append( f"{inplace_methods[node.target.__name__].format(*(_get_repr(a) for a in node.args))}; " f"{repr(node)}{maybe_type_annotation} = {_get_repr(node.args[0])}" ) return qualified_name = _get_qualified_name(node.target) global_name = add_global(qualified_name, node.target) # special case for getattr: node.args could be 2-argument or 3-argument # 2-argument: attribute access; 3-argument: fall through to attrib function call with default value if ( global_name == "getattr" and isinstance(node.args, tuple) and isinstance(node.args[1], str) and node.args[1].isidentifier() and len(node.args) == 2 ): body.append( f"{repr(node)}{maybe_type_annotation} = {_format_target(_get_repr(node.args[0]), node.args[1])}" ) return body.append( f"{repr(node)}{maybe_type_annotation} = {global_name}({_format_args(node.args, node.kwargs)})" ) if node.meta.get("is_wrapped", False): wrapped_fns.setdefault(global_name) return elif node.op == "call_module": assert isinstance(node.target, str) body.append( f"{repr(node)}{maybe_type_annotation} = " f"{_format_target(root_module, node.target)}({_format_args(node.args, node.kwargs)})" ) return elif node.op == "get_attr": assert isinstance(node.target, str) body.append( f"{repr(node)}{maybe_type_annotation} = {_format_target(root_module, node.target)}" ) return elif node.op == "output": if node.type is not None: maybe_return_annotation[0] = f" -> {type_repr(node.type)}" body.append( self._call_method_with_signature_check( self.generate_output, node.args[0], descs=desc if expanded_def else None, ) ) return raise NotImplementedError(f"node: {node.op} {node.target}") if record_func: body.append( "_rf = torch._C._profiler._RecordFunctionFast('## ENTER_GRAPH_PLACEHOLDER_KEY ##'); _rf.__enter__()\n" ) for i, node in enumerate(nodes): # NOTE: emit_node does not emit a string with newline. It depends # on delete_unused_values to append one if verbose: append_stacktrace_summary(node) # emit a counter comment to keep track of # node index, which will be deleted later # after going through _body_transformer body.append(f"# COUNTER: {i}\n") do_record = record_func and node.op in ( "call_function", "call_method", "call_module", ) if do_record: # The double hash ## convention is used by post-processing to find the fx markers body.append( f"_rf_{node.name} = torch._C._profiler._RecordFunctionFast('## {i} ##'); _rf_{node.name}.__enter__()\n" ) emit_node(node) delete_unused_values(node) if do_record: body.append(f"_rf_{node.name}.__exit__(None, None, None)\n") if record_func: body.append("_rf.__exit__(None, None, None)\n") if len(body) == 0: # If the Graph has no non-placeholder nodes, no lines for the body # have been emitted. To continue to have valid Python code, emit a # single pass statement body.append("pass\n") if len(wrapped_fns) > 0: wrap_name = add_global("wrap", torch.fx.wrap) wrap_stmts = "\n".join([f'{wrap_name}("{name}")' for name in wrapped_fns]) else: wrap_stmts = "" if self._body_transformer: body = self._body_transformer(body) for name, value in self.additional_globals(): add_global(name, value) prologue = self._call_method_with_signature_check( self.gen_fn_def, free_vars, maybe_return_annotation[0], expanded_def=expanded_def, ) # remove counter and generate lineno to node index mapping lineno_map: dict[int, Optional[int]] = {} prologue_len = prologue.count("\n") + 1 new_lines: list[str] = [] cur_idx = None for line in "".join(body).split("\n"): counter = _counter_regexp.search(line) if counter is not None: cur_idx = int(counter.group(1)) else: lineno_map[len(new_lines) + prologue_len] = cur_idx new_lines.append(line) code = "\n".join(new_lines).lstrip("\n") code = "\n".join(" " + line for line in code.split("\n")) fn_code = f""" {wrap_stmts} {prologue} {code}""" # The +4 accounts for the empty lines before prologue in fn_code prologue_start = wrap_stmts.count("\n") + 4 return PythonCode( fn_code, globals_, _lineno_map=lineno_map, _prologue_start=prologue_start, ) # Ideally, we'd like to refactor all of the pytree logic into this codegen # class. Unfortunately, there are 3 areas we currently need extra logic in FX. # 1. In the initial symbolic trace, the pytree logic is tied up with `concrete_args`. # 2. In the FX graph, we need to access 2 attributes - in_spec and out_spec. # Since we can't access .graph within the FX forward, we need to copy the attribute to the module. # 3. We currently can't register the pytree imports with `add_global` - not sure why.
CodeGen
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/beta_code_execution_result_block_param.py
{ "start": 348, "end": 620 }
class ____(TypedDict, total=False): content: Required[Iterable[BetaCodeExecutionOutputBlockParam]] return_code: Required[int] stderr: Required[str] stdout: Required[str] type: Required[Literal["code_execution_result"]]
BetaCodeExecutionResultBlockParam
python
keras-team__keras
keras/src/layers/pooling/global_max_pooling1d.py
{ "start": 261, "end": 2357 }
class ____(BaseGlobalPooling): """Global max pooling operation for temporal data. Args: data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, steps, features)` while `"channels_first"` corresponds to inputs with shape `(batch, features, steps)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be `"channels_last"`. keepdims: A boolean, whether to keep the temporal dimension or not. If `keepdims` is `False` (default), the rank of the tensor is reduced for spatial dimensions. If `keepdims` is `True`, the temporal dimension are retained with length 1. The behavior is the same as for `tf.reduce_mean` or `np.mean`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape: `(batch_size, steps, features)` - If `data_format='channels_first'`: 3D tensor with shape: `(batch_size, features, steps)` Output shape: - If `keepdims=False`: 2D tensor with shape `(batch_size, features)`. - If `keepdims=True`: - If `data_format="channels_last"`: 3D tensor with shape `(batch_size, 1, features)` - If `data_format="channels_first"`: 3D tensor with shape `(batch_size, features, 1)` Example: >>> x = np.random.rand(2, 3, 4) >>> y = keras.layers.GlobalMaxPooling1D()(x) >>> y.shape (2, 4) """ def __init__(self, data_format=None, keepdims=False, **kwargs): super().__init__( pool_dimensions=1, data_format=data_format, keepdims=keepdims, **kwargs, ) def call(self, inputs): steps_axis = 1 if self.data_format == "channels_last" else 2 return ops.max(inputs, axis=steps_axis, keepdims=self.keepdims)
GlobalMaxPooling1D
python
spack__spack
lib/spack/spack/test/error_messages.py
{ "start": 3811, "end": 10745 }
class ____(Package): version("2.1") version("2.0") variant("v1", default=True) """, ) all_pkgs = [ _pkgx1, _pkgx2, _pkgx3, _pkgx4, _pkgy1, _pkgy2, _pkgy3, _pkgy4, _pkgz1, _pkgz2, _pkgz3, _pkgw1, _pkgw2, _pkgw3, _pkgw4, _pkgt1, _pkgt2, _pkgt3, _pkgt4, ] def _add_import(pkg_def): return ( """\ from spack.package import * from spack.package import Package """ + pkg_def ) all_pkgs = list((x, _add_import(y)) for (x, y) in all_pkgs) _repo_name_id = 0 def create_test_repo(tmp_path, pkg_name_content_tuples): global _repo_name_id repo_name = f"testrepo{str(_repo_name_id)}" repo_path = tmp_path / "spack_repo" / repo_name os.makedirs(repo_path) with open(repo_path / "__init__.py", "w", encoding="utf-8"): pass repo_yaml = os.path.join(repo_path, "repo.yaml") with open(str(repo_yaml), "w", encoding="utf-8") as f: f.write( f"""\ repo: namespace: {repo_name} api: v2.1 """ ) _repo_name_id += 1 packages_dir = repo_path / "packages" os.mkdir(packages_dir) with open(packages_dir / "__init__.py", "w", encoding="utf-8"): pass for pkg_name, pkg_str in pkg_name_content_tuples: pkg_dir = packages_dir / pkg_name os.mkdir(pkg_dir) pkg_file = pkg_dir / "package.py" with open(str(pkg_file), "w", encoding="utf-8") as f: f.write(pkg_str) repo_cache = spack.util.file_cache.FileCache(str(tmp_path / "cache")) return spack.repo.Repo(str(repo_path), cache=repo_cache) @pytest.fixture def _create_test_repo(tmp_path, mutable_config): yield create_test_repo(tmp_path, all_pkgs) @pytest.fixture def test_repo(_create_test_repo, monkeypatch, mock_stage): with spack.repo.use_repositories(_create_test_repo) as mock_repo_path: yield mock_repo_path @contextmanager def expect_failure_and_print(should_mention=None): got_an_error_as_expected = False err_msg = None try: yield except spack.error.UnsatisfiableSpecError as e: got_an_error_as_expected = True err_msg = str(e) if not got_an_error_as_expected: raise ValueError("A failure was supposed to occur in this context manager") elif not err_msg: raise ValueError("No error message for failed concretization") print(err_msg) check_error(err_msg, should_mention) def check_error(msg, should_mention: Optional[Iterable] = None): excludes = [ "failed to concretize .* for the following reasons:", "Cannot satisfy .*", "required because .* requested explicitly", "cannot satisfy a requirement for package .*", ] lines = msg.split("\n") should_mention = set(should_mention) if should_mention else set() should_mention_hits = set() remaining = [] for line in lines: for p in should_mention: if re.search(p, line): should_mention_hits.add(p) if any(re.search(p, line) for p in excludes): continue remaining.append(line) if not remaining: raise ValueError("The error message contains only generic statements") should_mention_misses = should_mention - should_mention_hits if should_mention_misses: raise ValueError(f"The error message did not contain: {sorted(should_mention_misses)}") def test_diamond_with_pkg_conflict1(concretize_scope, test_repo): concretize_one("x2") concretize_one("x3") concretize_one("x4") important_points = ["x2 depends on x4@4.1", "x3 depends on x4@4.0"] with expect_failure_and_print(should_mention=important_points): concretize_one("x1") def test_diamond_with_pkg_conflict2(concretize_scope, test_repo): important_points = [ r"y2 depends on y4@4.1 when \+v1", r"y1 depends on y2\+v1", r"y3 depends on y4@4.0", ] with expect_failure_and_print(should_mention=important_points): concretize_one("y1") @pytest.mark.xfail(reason="Not addressed yet") def test_version_range_null(concretize_scope, test_repo): with expect_failure_and_print(): concretize_one("x2@3:4") # This error message is hard to follow: neither z2 or z3 # are mentioned, so if this hierarchy had 10 other "OK" # packages, a user would be conducting a tedious manual # search @pytest.mark.xfail(reason="Not addressed yet") def test_null_variant_for_requested_version(concretize_scope, test_repo): r""" Z1_ (@:1.1 -> !v1) | \ Z2 | \ | \| Z3 (z1~v1 -> z3+v2) (z2 ^z3:2.0) (v2 only exists for @2.1:) """ concretize_one("z1") with expect_failure_and_print(should_mention=["z2"]): concretize_one("z1@1.1") def test_errmsg_requirements_1(concretize_scope, test_repo): # w4 has: depends_on("w3+v1", when="@2.0") # w3 has: requires("~v1", when="@2.1") important_points = [ r"w4 depends on w3\+v1 when @2.0", r"w4@:2.0 \^w3@2.1 requested explicitly", r"~v1 is a requirement for package w3 when @2.1", ] with expect_failure_and_print(should_mention=important_points): concretize_one("w4@:2.0 ^w3@2.1") def test_errmsg_requirements_cfg(concretize_scope, test_repo): conf_str = """\ packages: w2: require: - one_of: ["~v1"] when: "@2.0" """ update_packages_config(conf_str) important_points = [ r"~v1 is a requirement for package w2 when @2.0", r"w4 depends on w2@:2.0 when @:2.0", r"w4@2.0 \^w2\+v1 requested explicitly", ] # w4 has: depends_on("w2@:2.0", when="@:2.0") with expect_failure_and_print(should_mention=important_points): concretize_one("w4@2.0 ^w2+v1") # This reencodes prior test test_errmsg_requirements_cfg # in terms of package `requires`, def test_errmsg_requirements_directives(concretize_scope, test_repo): # t4 has: depends_on("t2@:2.0", when="@:2.0") # t2 has: requires("~v1", when="@:2.0") important_points = [ r"~v1 is a requirement for package t2 when @:2.0", r"t4 depends on t2@:2.0 when @:2.0", r"t4@:2.0 \^t2\+v1 requested explicitly", ] with expect_failure_and_print(should_mention=important_points): concretize_one("t4@:2.0 ^t2+v1") # Simulates a user error: package is specified as external with a version, # but a different version was required in config. def test_errmsg_requirements_external_mismatch(concretize_scope, test_repo): conf_str = """\ packages: t1: buildable: false externals: - spec: "t1@2.1" prefix: /a/path/that/doesnt/need/to/exist/ require: - spec: "t1@2.0" """ update_packages_config(conf_str) important_points = ["no externals satisfy the request"] with expect_failure_and_print(should_mention=important_points): concretize_one("t1")
T1
python
scipy__scipy
scipy/io/wavfile.py
{ "start": 642, "end": 2115 }
class ____: """ Tracks stream position, provides tell(), and emulates only those seeks that can be supported by reading forward. Other seeks raise io.UnsupportedOperation. Note that this class implements only the minimum necessary to keep wavfile.read() happy. """ def __init__(self, reader): self.reader = reader self.pos = 0 def read(self, size=-1, /): data = self.reader.read(size) self.pos += len(data) return data def seek(self, offset, whence=os.SEEK_SET, /): match whence: case os.SEEK_SET if offset >= self.pos: self.read(offset - self.pos) # convert to relative case os.SEEK_CUR if offset >= 0: self.read(offset) # advance by offset case os.SEEK_END if offset == 0: self.read() # advance to end of stream case _: raise io.UnsupportedOperation("SeekEmulatingReader was asked to emulate" " a seek operation it does not support.") return self.pos def tell(self): return self.pos def close(self): self.reader.close() # np.fromfile expects to be able to call flush(), and _read_data_chunk # expects to catch io.UnsupportedOperation if np.fromfile fails. def flush(self): raise io.UnsupportedOperation("SeekEmulatingReader can't flush.")
SeekEmulatingReader
python
walkccc__LeetCode
solutions/471. Encode String with Shortest Length/471-2.py
{ "start": 0, "end": 982 }
class ____: def encode(self, s: str) -> str: n = len(s) # dp[i][j] := the shortest encoded string of s[i..j] dp = [[''] * n for _ in range(n)] for d in range(n): for i in range(n - d): j = i + d curr = s[i:j + 1] dp[i][j] = curr if len(dp[i][j]) < 5: continue # Try all the possible partitions. for k in range(i, j): if len(dp[i][k]) + len(dp[k + 1][j]) < len(dp[i][j]): dp[i][j] = dp[i][k] + dp[k + 1][j] # Try to compress the string. # e.g. s = aabaabaab -> 3[aab] for k in range(i, j): pattern = s[i:k + 1] if len(curr) % len(pattern) == 0 and pattern * (len(curr) // len(pattern)) == curr: candidate = f"{len(curr) // len(pattern)}[{dp[i][k]}]" if len(candidate) < len(dp[i][j]): dp[i][j] = candidate return dp[0][n - 1]
Solution
python
PrefectHQ__prefect
tests/server/orchestration/api/test_block_types.py
{ "start": 11725, "end": 14972 }
class ____: async def test_update_block_type(self, client, block_type_x): response = await client.patch( f"/block_types/{block_type_x.id}", json=BlockTypeUpdate( logo_url="http://foo.com/bar.png", documentation_url="http://foo.com/bar.html", description="A block, verily", code_example=CODE_EXAMPLE, ).model_dump(mode="json"), ) assert response.status_code == status.HTTP_204_NO_CONTENT response = await client.get(f"/block_types/{block_type_x.id}") assert response.status_code == status.HTTP_200_OK updated_block = BlockType.model_validate(response.json()) assert updated_block.name == block_type_x.name assert updated_block.logo_url == "http://foo.com/bar.png" assert updated_block.documentation_url == "http://foo.com/bar.html" assert updated_block.description == "A block, verily" assert updated_block.code_example == CODE_EXAMPLE async def test_update_nonexistent_block_type(self, client): response = await client.patch( f"/block_types/{uuid4()}", json=BlockTypeUpdate( logo_url="http://foo.com/bar.png", documentation_url="http://foo.com/bar.html", ).model_dump(mode="json"), ) assert response.status_code == status.HTTP_404_NOT_FOUND async def test_update_system_block_type_succeeds(self, system_block_type, client): response = await client.patch( f"/block_types/{system_block_type.id}", json=BlockTypeUpdate( description="Hi there!", ).model_dump(mode="json"), ) assert response.status_code == status.HTTP_204_NO_CONTENT async def test_update_block_type_only_if_different( self, client, block_type_x, monkeypatch, session ): update = BlockTypeUpdate( logo_url="http://foo.com/bar.png", documentation_url="http://foo.com/bar.html", description="A block, verily", code_example=CODE_EXAMPLE, ) await models.block_types.update_block_type(session, block_type_x.id, update) await session.commit() mock = AsyncMock() monkeypatch.setattr("prefect.server.models.block_types.update_block_type", mock) response = await client.patch( f"/block_types/{block_type_x.id}", json=update.model_dump(mode="json"), ) # doesn't update with same parameters assert response.status_code == status.HTTP_204_NO_CONTENT assert mock.await_count == 0 response = await client.patch( f"/block_types/{block_type_x.id}", json=BlockTypeUpdate( logo_url="http://foo2.com/bar.png", documentation_url="http://foo2.com/bar.html", description="A block2, verily", code_example=CODE_EXAMPLE.replace("python", "bison"), ).model_dump(mode="json"), ) # does update with same parameters assert response.status_code == status.HTTP_204_NO_CONTENT assert mock.await_count == 1
TestUpdateBlockType
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constructor24.py
{ "start": 1848, "end": 1988 }
class ____(Generic[T]): def __init__(self, c: Callable[[], T]): ... def func2(cls: type[T_A] = A) -> Callable[[], T_A]: ... B(func2())
B
python
kamyu104__LeetCode-Solutions
Python/minimum-number-of-lines-to-cover-points.py
{ "start": 102, "end": 1819 }
class ____(object): def minimumLines(self, points): """ :type points: List[List[int]] :rtype: int """ def gcd(a, b): # Time: O(log(a + b)) while b: a, b = b, a % b return abs(a) def popcount(x): result = 0 while x: x &= (x-1) result += 1 return result def ceil_divide(a, b): return (a+b-1)//b lookup = collections.defaultdict(set) for i, (x1, y1) in enumerate(points): for j in xrange(i+1, len(points)): x2, y2 = points[j] # (x-x1)/(x2-x1) = (y-y1)/(y2-y1) # => (y2-y1)x - (x2-x1)y = x1(y2-y1) - y1(x2-x1) dx, dy = x2-x1, y2-y1 g = gcd(dx, dy) a, b = dx//g, dy//g if a < 0 or (a == 0 and b < 0): a, b = -a, -b c = b*x1-a*y1 lookup[(a, b, c)].add((x1, y1)) lookup[(a, b, c)].add((x2, y2)) lines = [l for l, p in lookup.iteritems() if len(p) > 2] # filter to improve complexity assert(len(lines) <= (len(points))//2) # 1 extra colinear point per 2 points result = float("inf") for mask in xrange(1<<len(lines)): covered = set() bit, i = 1, 0 while bit <= mask: if mask&bit: covered.update(lookup[lines[i]]) bit <<= 1 i += 1 result = min(result, popcount(mask) + ceil_divide(len(points)-len(covered), 2)) return result
Solution
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF028.py
{ "start": 743, "end": 1767 }
class ____: @classmethod # fmt: off def cls_method_a( # fmt: off cls, ) -> None: # noqa: test # fmt: skip pass def fmt_on_trailing(): # fmt: off val = 5 # fmt: on pass # fmt: on # all of these should be fine def match_case_and_elif(): string = "hello" match string: case ("C" | "CX" | "R" | "RX" | "S" | "SP" | "WAP" | "XX" | "Y" | "YY" | "YZ" | "Z" | "ZZ" ): # fmt: skip pass case _: # fmt: skip if string != "Hello": pass elif string == "Hello": # fmt: skip pass # Regression test for decorators import pytest @pytest.mark.parametrize( "test_input,expected", [ ("3+5", 8 ), ("17+2", 19), ], ) # fmt: skip def test_eval(test_input, expected): assert eval(test_input) == expected
Test
python
kamyu104__LeetCode-Solutions
Python/maximize-the-beauty-of-the-garden.py
{ "start": 29, "end": 566 }
class ____(object): def maximumBeauty(self, flowers): """ :type flowers: List[int] :rtype: int """ lookup = {} prefix = [0] result = float("-inf") for i, f in enumerate(flowers): prefix.append(prefix[-1]+f if f > 0 else prefix[-1]) if not f in lookup: lookup[f] = i continue result = max(result, 2*f+prefix[i+1]-prefix[lookup[f]] if f < 0 else prefix[i+1]-prefix[lookup[f]]) return result
Solution
python
kamyu104__LeetCode-Solutions
Python/basic-calculator-iv.py
{ "start": 2236, "end": 3859 }
class ____(object): def basicCalculatorIV(self, expression, evalvars, evalints): """ :type expression: str :type evalvars: List[str] :type evalints: List[int] :rtype: List[str] """ ops = {'+':operator.add, '-':operator.sub, '*':operator.mul} def compute(operands, operators): right, left = operands.pop(), operands.pop() operands.append(ops[operators.pop()](left, right)) def parse(s): precedence = {'+':0, '-':0, '*':1} operands, operators, operand = [], [], [] for i in xrange(len(s)): if s[i].isalnum(): operand.append(s[i]) if i == len(s)-1 or not s[i+1].isalnum(): operands.append(Poly("".join(operand))) operand = [] elif s[i] == '(': operators.append(s[i]) elif s[i] == ')': while operators[-1] != '(': compute(operands, operators) operators.pop() elif s[i] in precedence: while operators and operators[-1] in precedence and \ precedence[operators[-1]] >= precedence[s[i]]: compute(operands, operators) operators.append(s[i]) while operators: compute(operands, operators) return operands[-1] lookup = dict(itertools.izip(evalvars, evalints)) return parse(expression).eval(lookup).to_list()
Solution
python
kamyu104__LeetCode-Solutions
Python/add-and-search-word-data-structure-design.py
{ "start": 209, "end": 1293 }
class ____(object): def __init__(self): self.root = TrieNode() # @param {string} word # @return {void} # Adds a word into the data structure. def addWord(self, word): curr = self.root for c in word: if c not in curr.leaves: curr.leaves[c] = TrieNode() curr = curr.leaves[c] curr.is_string = True # @param {string} word # @return {boolean} # Returns if the word is in the data structure. A word could # contain the dot character '.' to represent any one letter. def search(self, word): return self.searchHelper(word, 0, self.root) def searchHelper(self, word, start, curr): if start == len(word): return curr.is_string if word[start] in curr.leaves: return self.searchHelper(word, start+1, curr.leaves[word[start]]) elif word[start] == '.': for c in curr.leaves: if self.searchHelper(word, start+1, curr.leaves[c]): return True return False
WordDictionary
python
django__django
tests/admin_utils/admin.py
{ "start": 543, "end": 658 }
class ____(admin.TabularInline): model = Article fields = ["title"] form = ArticleAdminForm
ArticleInline
python
huggingface__transformers
src/transformers/models/pegasus_x/modeling_pegasus_x.py
{ "start": 33356, "end": 42236 }
class ____(PegasusXPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`PegasusXEncoderLayer`]. Args: config: PegasusXConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: PegasusXConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = PegasusXScaledWordEmbedding( config.vocab_size, embed_dim, padding_idx, embed_scale=embed_scale ) self.embed_global = nn.Embedding(config.num_global_tokens, embed_dim) self.embed_positions = PegasusXSinusoidalPositionalEmbedding(embed_dim) self.layers = nn.ModuleList( [ PegasusXEncoderLayer( stagger_blocks_this_layer=i % 2 == 1 and config.stagger_local_blocks, config=config ) for i in range(config.encoder_layers) ] ) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings matrix of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embeddings. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...") self.config.max_position_embeddings = new_num_position_embeddings self.embed_positions = PegasusXSinusoidalPositionalEmbedding(self.config.d_model) self.embed_positions.to(self.device) def get_position_embeddings(self) -> nn.Embedding: """ Returns the position embeddings matrix """ return self.embed_positions def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(inputs_embeds) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) batch_size, seq_len, _ = hidden_states.shape # Setup mask if attention_mask is None: attention_mask = torch.ones(*input_shape, dtype=inputs_embeds.dtype, device=inputs_embeds.device) attention_mask = attention_mask.to(dtype=hidden_states.dtype) mask_min_value = torch.finfo(hidden_states.dtype).min inverted_mask = 1.0 - attention_mask attention_mask = inverted_mask.masked_fill( inverted_mask.to(torch.bool), mask_min_value, ) # padding to block_size if seq_len % self.config.block_size != 0: pad_len = self.config.block_size - seq_len % self.config.block_size hidden_states = nn.functional.pad(hidden_states, pad=(0, 0, 0, pad_len), value=0) attention_mask = nn.functional.pad(attention_mask, pad=(0, pad_len), value=mask_min_value) # Global tokens global_hidden_states = self.embed_global( torch.arange(self.config.num_global_tokens, device=hidden_states.device)[None].expand(batch_size, -1) ) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, global_hidden_states, attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] global_hidden_states = layer_outputs[1] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) # Undo padding-to-block-size hidden_states = hidden_states[:, :seq_len] hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + ((hidden_states, global_hidden_states),) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
PegasusXEncoder
python
PyCQA__pylint
tests/functional/n/nonlocal_without_binding.py
{ "start": 772, "end": 1396 }
class ____: nonlocal x # [nonlocal-without-binding] def func(self): nonlocal some_attr # [nonlocal-without-binding] def func2(): nonlocal_ = None local = None class Class: nonlocal nonlocal_ nonlocal_ = 1 local = 1 return local + nonlocal_ def function(): """Test for `unused-variable` when multiple-assignment contains a `nonlocal`""" myint, mylist = 0, [] print(mylist) def inner(): nonlocal myint mylist.append(myint) myint += 1 return inner() nonlocal APPLE # [nonlocal-without-binding] APPLE = 42
SomeClass
python
getsentry__sentry
src/sentry/api/serializers/models/organization_member/expand/roles.py
{ "start": 1209, "end": 3468 }
class ____(OrganizationMemberWithTeamsSerializer): def __init__( self, allowed_roles: Iterable[Role], expand: Sequence[str] | None = None, ) -> None: super().__init__(expand) self.allowed_roles = allowed_roles def get_attrs( self, item_list: Sequence[OrganizationMember], user: User | RpcUser | AnonymousUser, **kwargs: Any, ) -> MutableMapping[OrganizationMember, MutableMapping[str, Any]]: result = super().get_attrs(item_list, user, **kwargs) users_by_id = { u["id"]: u for u in user_service.serialize_many( filter=dict(user_ids=[om.user_id for om in item_list if om.user_id is not None]), serializer=UserSerializeType.DETAILED, ) } # Filter out emails from the serialized user data for user_data in users_by_id.values(): user_data.pop("emails", None) for item in item_list: result.setdefault(item, {})["serializedUser"] = users_by_id.get(str(item.user_id), {}) return result def serialize( self, obj: OrganizationMember, attrs: Mapping[str, Any], user: User | RpcUser | AnonymousUser, **kwargs: Any, ) -> OrganizationMemberWithRolesResponse: context = cast( OrganizationMemberWithRolesResponse, super().serialize(obj, attrs, user, **kwargs), ) if self.allowed_roles: context["user"] = attrs.get("serializedUser", {}) context["isOnlyOwner"] = obj.is_only_owner() organization_role_list = [ role for role in organization_roles.get_all() if not _is_retired_role_hidden(role, obj) ] context["orgRoleList"] = serialize( organization_role_list, serializer=OrganizationRoleSerializer(organization=obj.organization), allowed_roles=self.allowed_roles, ) context["roles"] = context["orgRoleList"] # deprecated context["teamRoleList"] = serialize( team_roles.get_all(), serializer=TeamRoleSerializer(organization=obj.organization) ) return context
OrganizationMemberWithRolesSerializer
python
pytorch__pytorch
torch/distributed/checkpoint/default_planner.py
{ "start": 1784, "end": 10846 }
class ____(SavePlanner): mappings: FLATTEN_MAPPING def __init__( self, flatten_state_dict: bool = True, flatten_sharded_tensors: bool = True, dedup_replicated_tensors: Optional[bool] = None, dedup_save_to_lowest_rank: bool = False, enable_plan_caching: bool = False, ) -> None: self.flatten_state_dict = flatten_state_dict self.flatten_sharded_tensors = flatten_sharded_tensors self.mappings = {} self.dedup_save_to_lowest_rank = dedup_save_to_lowest_rank if dedup_replicated_tensors is not None: logger.warning( "DefaultSavePlanner's `dedup_replicated_tensors` argument is being " "deprecated, and no longer has any effect. Please remove this argument " "from your call." ) self._cached_plans_key: str = self.__class__.__name__ self._enable_plan_caching = enable_plan_caching def set_up_planner( self, state_dict: STATE_DICT_TYPE, storage_meta: Optional[StorageMeta] = None, is_coordinator: bool = False, ) -> None: if self.flatten_state_dict: state_dict, self.mappings = flatten_state_dict(state_dict) if self.flatten_sharded_tensors: state_dict = _flatten_sharded_tensors(state_dict) self.state_dict = state_dict self.is_coordinator = is_coordinator def create_local_plan(self) -> SavePlan: plan = create_default_local_save_plan(self.state_dict, self.is_coordinator) if self.flatten_state_dict: plan = dataclasses.replace(plan, planner_data=self.mappings) self.plan = plan if self._enable_plan_caching: # If plans are equal, we can skip sending the plan to the coordinator. if ( self._cached_plans_key in SavePlanner._cached_save_plan and _compare_save_plans( plan, SavePlanner._cached_save_plan[self._cached_plans_key] ) ): logger.info( "No change in the local plan. Skipping sending the plan to the coordinator" ) return SavePlan([], usable=False) else: SavePlanner._cached_save_plan[self._cached_plans_key] = plan return self.plan def _dedup_save_plans(self, all_plans: list[SavePlan]) -> list[SavePlan]: return dedup_save_plans(all_plans, self.dedup_save_to_lowest_rank) def _create_global_plan( self, all_plans: list[SavePlan] ) -> tuple[list[SavePlan], Metadata]: deduped_plans = self._dedup_save_plans(all_plans) global_plan, metadata = create_default_global_save_plan(deduped_plans) if self.flatten_state_dict: # | does not work for Python 3.8 or older version. # merged_mappings = reduce( # lambda x, y: x | y, (p.planner_data for p in global_plan) # ) planner_data_dict = [p.planner_data for p in global_plan] merged_mappings = dict(ChainMap(*planner_data_dict)) metadata = dataclasses.replace(metadata, planner_data=merged_mappings) if not _validate_global_plan(global_plan, metadata): raise ValueError("Failed to validate global plan") return global_plan, metadata def _create_global_plan_with_caching( self, all_plans: list[SavePlan] ) -> tuple[list[SavePlan], list[SavePlan], Metadata]: """ Create global plan with caching. Returns a tuple of global_plan_delta, global_plan, metadata. """ global_plan_delta: list[SavePlan] = [] if self._cached_plans_key not in SavePlanner._cached_all_plans: # Case 1: If the plans are not cached, the cache will be hydrated with the # all_plans, global_plans (Deduped), and metadata. # Cache the original all_plans SavePlanner._cached_all_plans[self._cached_plans_key] = all_plans global_plan, metadata = self._create_global_plan(all_plans) # Cache the deduped and validated global_plan SavePlanner._cached_global_plan[self._cached_plans_key] = global_plan # Cache the metadata SavePlanner._cached_metadata[self._cached_plans_key] = metadata # If plans are not cached, global_plan delta will be the same as global plan. return global_plan, global_plan, metadata # Case 2: Plans are cached if not _contains_usable_plan(all_plans): # Case 2.1: Plans are cached and the local plans have NOT changed (No usable plans). # Global plan delta will be empty plans to avoid the collective overhead. # We can reuse the deduped global plan and metadata from the cache directly. global_plan_delta = [SavePlan([], usable=False)] * len(all_plans) global_plan = SavePlanner._cached_global_plan[self._cached_plans_key] metadata = SavePlanner._cached_metadata[self._cached_plans_key] else: # Case 2.2: Plans are cached but the local plans have changed. # We will merge the changed local plans with the cached local plans. # Updated plans will overwrite the cached plans. New global plan and metadata will be created and cached. # Global plan delta will be created by comparing the new global plan with the cached global plan. # Only the global plan delta (updated ones) will be sent to the coordinator to avoid the collective overhead. merged_plans = _merge_delta_local_plans( SavePlanner._cached_all_plans[self._cached_plans_key], all_plans ) # Cache the updated local plans SavePlanner._cached_all_plans[self._cached_plans_key] = merged_plans global_plan, metadata = self._create_global_plan(merged_plans) if self._cached_plans_key in self._cached_global_plan: for cached_plan, new_plan in zip( SavePlanner._cached_global_plan[self._cached_plans_key], global_plan ): if _compare_save_plans(cached_plan, new_plan): global_plan_delta.append(SavePlan([], usable=False)) else: global_plan_delta.append(new_plan) # Cache the new global plan and the metadata SavePlanner._cached_global_plan[self._cached_plans_key] = global_plan SavePlanner._cached_metadata[self._cached_plans_key] = metadata return global_plan_delta, global_plan, metadata def create_global_plan( self, all_plans: list[SavePlan] ) -> tuple[list[SavePlan], Metadata]: global_plan_delta: list[SavePlan] = [] if self._enable_plan_caching: # If the plans are cached, we only need to send the global plan delta to be scattered # across ranks. Ranks will use the cached final plans instead. ( global_plan_delta, global_plan, metadata, ) = self._create_global_plan_with_caching(all_plans) else: global_plan, metadata = self._create_global_plan(all_plans) # If the caching is not enabled, global delta plan will always be same as the new global plan. global_plan_delta = global_plan self.global_plan = global_plan self.metadata = metadata return global_plan_delta, self.metadata def _finish_plan_with_caching(self, new_plan: SavePlan) -> SavePlan: finished_plan: SavePlan = new_plan if not new_plan.usable: finished_plan = SavePlanner._cached_final_save_plan[self._cached_plans_key] else: finished_plan = new_plan SavePlanner._cached_final_save_plan[self._cached_plans_key] = new_plan return finished_plan def finish_plan(self, new_plan: SavePlan) -> SavePlan: finished_plan: SavePlan = new_plan if self._enable_plan_caching: finished_plan = self._finish_plan_with_caching(new_plan) self.plan = finished_plan return self.plan def resolve_data(self, write_item: WriteItem) -> Union[torch.Tensor, io.BytesIO]: object = self.lookup_object(write_item.index) return self.transform_object(write_item, object) def lookup_object(self, index: MetadataIndex) -> Any: """Extension from the planner interface to make it easy to extend the default planner.""" return find_state_dict_object(self.state_dict, index) def transform_object(self, write_item: WriteItem, object: Any): """Extension from the planner interface to make it easy to extend the default planner.""" if write_item.type == WriteItemType.BYTE_IO: bytes = io.BytesIO() torch.save(object, bytes) object = bytes return object
DefaultSavePlanner
python
encode__django-rest-framework
tests/test_filters.py
{ "start": 13592, "end": 13780 }
class ____(models.Model): title = models.CharField(max_length=20) text = models.CharField(max_length=100) attributes = models.ManyToManyField(AttributeModel)
SearchFilterModelM2M
python
kubernetes-client__python
kubernetes/client/models/v1_security_context.py
{ "start": 383, "end": 17245 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'allow_privilege_escalation': 'bool', 'app_armor_profile': 'V1AppArmorProfile', 'capabilities': 'V1Capabilities', 'privileged': 'bool', 'proc_mount': 'str', 'read_only_root_filesystem': 'bool', 'run_as_group': 'int', 'run_as_non_root': 'bool', 'run_as_user': 'int', 'se_linux_options': 'V1SELinuxOptions', 'seccomp_profile': 'V1SeccompProfile', 'windows_options': 'V1WindowsSecurityContextOptions' } attribute_map = { 'allow_privilege_escalation': 'allowPrivilegeEscalation', 'app_armor_profile': 'appArmorProfile', 'capabilities': 'capabilities', 'privileged': 'privileged', 'proc_mount': 'procMount', 'read_only_root_filesystem': 'readOnlyRootFilesystem', 'run_as_group': 'runAsGroup', 'run_as_non_root': 'runAsNonRoot', 'run_as_user': 'runAsUser', 'se_linux_options': 'seLinuxOptions', 'seccomp_profile': 'seccompProfile', 'windows_options': 'windowsOptions' } def __init__(self, allow_privilege_escalation=None, app_armor_profile=None, capabilities=None, privileged=None, proc_mount=None, read_only_root_filesystem=None, run_as_group=None, run_as_non_root=None, run_as_user=None, se_linux_options=None, seccomp_profile=None, windows_options=None, local_vars_configuration=None): # noqa: E501 """V1SecurityContext - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._allow_privilege_escalation = None self._app_armor_profile = None self._capabilities = None self._privileged = None self._proc_mount = None self._read_only_root_filesystem = None self._run_as_group = None self._run_as_non_root = None self._run_as_user = None self._se_linux_options = None self._seccomp_profile = None self._windows_options = None self.discriminator = None if allow_privilege_escalation is not None: self.allow_privilege_escalation = allow_privilege_escalation if app_armor_profile is not None: self.app_armor_profile = app_armor_profile if capabilities is not None: self.capabilities = capabilities if privileged is not None: self.privileged = privileged if proc_mount is not None: self.proc_mount = proc_mount if read_only_root_filesystem is not None: self.read_only_root_filesystem = read_only_root_filesystem if run_as_group is not None: self.run_as_group = run_as_group if run_as_non_root is not None: self.run_as_non_root = run_as_non_root if run_as_user is not None: self.run_as_user = run_as_user if se_linux_options is not None: self.se_linux_options = se_linux_options if seccomp_profile is not None: self.seccomp_profile = seccomp_profile if windows_options is not None: self.windows_options = windows_options @property def allow_privilege_escalation(self): """Gets the allow_privilege_escalation of this V1SecurityContext. # noqa: E501 AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The allow_privilege_escalation of this V1SecurityContext. # noqa: E501 :rtype: bool """ return self._allow_privilege_escalation @allow_privilege_escalation.setter def allow_privilege_escalation(self, allow_privilege_escalation): """Sets the allow_privilege_escalation of this V1SecurityContext. AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param allow_privilege_escalation: The allow_privilege_escalation of this V1SecurityContext. # noqa: E501 :type: bool """ self._allow_privilege_escalation = allow_privilege_escalation @property def app_armor_profile(self): """Gets the app_armor_profile of this V1SecurityContext. # noqa: E501 :return: The app_armor_profile of this V1SecurityContext. # noqa: E501 :rtype: V1AppArmorProfile """ return self._app_armor_profile @app_armor_profile.setter def app_armor_profile(self, app_armor_profile): """Sets the app_armor_profile of this V1SecurityContext. :param app_armor_profile: The app_armor_profile of this V1SecurityContext. # noqa: E501 :type: V1AppArmorProfile """ self._app_armor_profile = app_armor_profile @property def capabilities(self): """Gets the capabilities of this V1SecurityContext. # noqa: E501 :return: The capabilities of this V1SecurityContext. # noqa: E501 :rtype: V1Capabilities """ return self._capabilities @capabilities.setter def capabilities(self, capabilities): """Sets the capabilities of this V1SecurityContext. :param capabilities: The capabilities of this V1SecurityContext. # noqa: E501 :type: V1Capabilities """ self._capabilities = capabilities @property def privileged(self): """Gets the privileged of this V1SecurityContext. # noqa: E501 Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The privileged of this V1SecurityContext. # noqa: E501 :rtype: bool """ return self._privileged @privileged.setter def privileged(self, privileged): """Sets the privileged of this V1SecurityContext. Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param privileged: The privileged of this V1SecurityContext. # noqa: E501 :type: bool """ self._privileged = privileged @property def proc_mount(self): """Gets the proc_mount of this V1SecurityContext. # noqa: E501 procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The proc_mount of this V1SecurityContext. # noqa: E501 :rtype: str """ return self._proc_mount @proc_mount.setter def proc_mount(self, proc_mount): """Sets the proc_mount of this V1SecurityContext. procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param proc_mount: The proc_mount of this V1SecurityContext. # noqa: E501 :type: str """ self._proc_mount = proc_mount @property def read_only_root_filesystem(self): """Gets the read_only_root_filesystem of this V1SecurityContext. # noqa: E501 Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The read_only_root_filesystem of this V1SecurityContext. # noqa: E501 :rtype: bool """ return self._read_only_root_filesystem @read_only_root_filesystem.setter def read_only_root_filesystem(self, read_only_root_filesystem): """Sets the read_only_root_filesystem of this V1SecurityContext. Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param read_only_root_filesystem: The read_only_root_filesystem of this V1SecurityContext. # noqa: E501 :type: bool """ self._read_only_root_filesystem = read_only_root_filesystem @property def run_as_group(self): """Gets the run_as_group of this V1SecurityContext. # noqa: E501 The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The run_as_group of this V1SecurityContext. # noqa: E501 :rtype: int """ return self._run_as_group @run_as_group.setter def run_as_group(self, run_as_group): """Sets the run_as_group of this V1SecurityContext. The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param run_as_group: The run_as_group of this V1SecurityContext. # noqa: E501 :type: int """ self._run_as_group = run_as_group @property def run_as_non_root(self): """Gets the run_as_non_root of this V1SecurityContext. # noqa: E501 Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501 :return: The run_as_non_root of this V1SecurityContext. # noqa: E501 :rtype: bool """ return self._run_as_non_root @run_as_non_root.setter def run_as_non_root(self, run_as_non_root): """Sets the run_as_non_root of this V1SecurityContext. Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501 :param run_as_non_root: The run_as_non_root of this V1SecurityContext. # noqa: E501 :type: bool """ self._run_as_non_root = run_as_non_root @property def run_as_user(self): """Gets the run_as_user of this V1SecurityContext. # noqa: E501 The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The run_as_user of this V1SecurityContext. # noqa: E501 :rtype: int """ return self._run_as_user @run_as_user.setter def run_as_user(self, run_as_user): """Sets the run_as_user of this V1SecurityContext. The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param run_as_user: The run_as_user of this V1SecurityContext. # noqa: E501 :type: int """ self._run_as_user = run_as_user @property def se_linux_options(self): """Gets the se_linux_options of this V1SecurityContext. # noqa: E501 :return: The se_linux_options of this V1SecurityContext. # noqa: E501 :rtype: V1SELinuxOptions """ return self._se_linux_options @se_linux_options.setter def se_linux_options(self, se_linux_options): """Sets the se_linux_options of this V1SecurityContext. :param se_linux_options: The se_linux_options of this V1SecurityContext. # noqa: E501 :type: V1SELinuxOptions """ self._se_linux_options = se_linux_options @property def seccomp_profile(self): """Gets the seccomp_profile of this V1SecurityContext. # noqa: E501 :return: The seccomp_profile of this V1SecurityContext. # noqa: E501 :rtype: V1SeccompProfile """ return self._seccomp_profile @seccomp_profile.setter def seccomp_profile(self, seccomp_profile): """Sets the seccomp_profile of this V1SecurityContext. :param seccomp_profile: The seccomp_profile of this V1SecurityContext. # noqa: E501 :type: V1SeccompProfile """ self._seccomp_profile = seccomp_profile @property def windows_options(self): """Gets the windows_options of this V1SecurityContext. # noqa: E501 :return: The windows_options of this V1SecurityContext. # noqa: E501 :rtype: V1WindowsSecurityContextOptions """ return self._windows_options @windows_options.setter def windows_options(self, windows_options): """Sets the windows_options of this V1SecurityContext. :param windows_options: The windows_options of this V1SecurityContext. # noqa: E501 :type: V1WindowsSecurityContextOptions """ self._windows_options = windows_options def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1SecurityContext): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1SecurityContext): return True return self.to_dict() != other.to_dict()
V1SecurityContext
python
automl__auto-sklearn
test/test_pipeline/components/feature_preprocessing/test_select_percentile_regression.py
{ "start": 255, "end": 1944 }
class ____(unittest.TestCase): def test_default_configuration(self): transformation, original = _test_preprocessing( dataset="boston", Preprocessor=SelectPercentileRegression, ) self.assertEqual(transformation.shape[0], original.shape[0]) self.assertEqual(transformation.shape[1], int(original.shape[1] / 2)) self.assertFalse((transformation == 0).all()) def test_preprocessing_dtype(self): # Dense # np.float32 X_train, Y_train, X_test, Y_test = get_dataset("iris") self.assertEqual(X_train.dtype, np.float32) configuration_space = ( SelectPercentileRegression.get_hyperparameter_search_space() ) default = configuration_space.get_default_configuration() preprocessor = SelectPercentileRegression( random_state=1, **{hp_name: default[hp_name] for hp_name in default} ) preprocessor.fit(X_train, Y_train) Xt = preprocessor.transform(X_train) self.assertEqual(Xt.dtype, np.float32) # np.float64 X_train, Y_train, X_test, Y_test = get_dataset("iris") X_train = X_train.astype(np.float64) configuration_space = ( SelectPercentileRegression.get_hyperparameter_search_space() ) default = configuration_space.get_default_configuration() preprocessor = SelectPercentileRegression( random_state=1, **{hp_name: default[hp_name] for hp_name in default} ) preprocessor.fit(X_train, Y_train) Xt = preprocessor.transform(X_train) self.assertEqual(Xt.dtype, np.float64)
SelectPercentileRegressionTest
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/instigation.py
{ "start": 2503, "end": 2653 }
class ____(graphene.Enum): RUNNING = "RUNNING" STOPPED = "STOPPED" class Meta: name = "InstigationStatus"
GrapheneInstigationStatus
python
great-expectations__great_expectations
great_expectations/render/renderer_configuration.py
{ "start": 4144, "end": 4351 }
class ____(_RendererValueBase): """Represents each value within a row of a header_row or a table.""" renderer_schema: RendererSchema = Field(alias="schema") value: Optional[Any]
RendererTableValue
python
pyinstaller__pyinstaller
tests/functional/modules/pyi_testmod_relimp/B/D.py
{ "start": 542, "end": 589 }
class ____: name = 'pyi_testmod_relimp.B.D.X'
X
python
tornadoweb__tornado
tornado/test/web_test.py
{ "start": 62057, "end": 63652 }
class ____(WebTestCase): class Handler(RequestHandler): def initialize(self, reply): self.reply = reply def get(self): self.write(self.reply) def get_handlers(self): return [("/foo", HostMatchingTest.Handler, {"reply": "wildcard"})] def test_host_matching(self): self.app.add_handlers( "www.example.com", [("/foo", HostMatchingTest.Handler, {"reply": "[0]"})] ) self.app.add_handlers( r"www\.example\.com", [("/bar", HostMatchingTest.Handler, {"reply": "[1]"})] ) self.app.add_handlers( "www.example.com", [("/baz", HostMatchingTest.Handler, {"reply": "[2]"})] ) self.app.add_handlers( "www.e.*e.com", [("/baz", HostMatchingTest.Handler, {"reply": "[3]"})] ) response = self.fetch("/foo") self.assertEqual(response.body, b"wildcard") response = self.fetch("/bar") self.assertEqual(response.code, 404) response = self.fetch("/baz") self.assertEqual(response.code, 404) response = self.fetch("/foo", headers={"Host": "www.example.com"}) self.assertEqual(response.body, b"[0]") response = self.fetch("/bar", headers={"Host": "www.example.com"}) self.assertEqual(response.body, b"[1]") response = self.fetch("/baz", headers={"Host": "www.example.com"}) self.assertEqual(response.body, b"[2]") response = self.fetch("/baz", headers={"Host": "www.exe.com"}) self.assertEqual(response.body, b"[3]")
HostMatchingTest
python
redis__redis-py
tests/test_asyncio/test_pubsub.py
{ "start": 2607, "end": 12170 }
class ____: async def _test_subscribe_unsubscribe( self, p, sub_type, unsub_type, sub_func, unsub_func, keys ): for key in keys: assert await sub_func(key) is None # should be a message for each channel/pattern we just subscribed to for i, key in enumerate(keys): assert await wait_for_message(p) == make_message(sub_type, key, i + 1) for key in keys: assert await unsub_func(key) is None # should be a message for each channel/pattern we just unsubscribed # from for i, key in enumerate(keys): i = len(keys) - 1 - i assert await wait_for_message(p) == make_message(unsub_type, key, i) async def test_channel_subscribe_unsubscribe(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "channel") await self._test_subscribe_unsubscribe(**kwargs) async def test_pattern_subscribe_unsubscribe(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "pattern") await self._test_subscribe_unsubscribe(**kwargs) @pytest.mark.onlynoncluster async def _test_resubscribe_on_reconnection( self, p, sub_type, unsub_type, sub_func, unsub_func, keys ): for key in keys: assert await sub_func(key) is None # should be a message for each channel/pattern we just subscribed to for i, key in enumerate(keys): assert await wait_for_message(p) == make_message(sub_type, key, i + 1) # manually disconnect await p.connection.disconnect() # calling get_message again reconnects and resubscribes # note, we may not re-subscribe to channels in exactly the same order # so we have to do some extra checks to make sure we got them all messages = [] for i in range(len(keys)): messages.append(await wait_for_message(p)) unique_channels = set() assert len(messages) == len(keys) for i, message in enumerate(messages): assert message["type"] == sub_type assert message["data"] == i + 1 assert isinstance(message["channel"], bytes) channel = message["channel"].decode("utf-8") unique_channels.add(channel) assert len(unique_channels) == len(keys) for channel in unique_channels: assert channel in keys async def test_resubscribe_to_channels_on_reconnection(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "channel") await self._test_resubscribe_on_reconnection(**kwargs) async def test_resubscribe_to_patterns_on_reconnection(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "pattern") await self._test_resubscribe_on_reconnection(**kwargs) async def _test_subscribed_property( self, p, sub_type, unsub_type, sub_func, unsub_func, keys ): assert p.subscribed is False await sub_func(keys[0]) # we're now subscribed even though we haven't processed the # reply from the server just yet assert p.subscribed is True assert await wait_for_message(p) == make_message(sub_type, keys[0], 1) # we're still subscribed assert p.subscribed is True # unsubscribe from all channels await unsub_func() # we're still technically subscribed until we process the # response messages from the server assert p.subscribed is True assert await wait_for_message(p) == make_message(unsub_type, keys[0], 0) # now we're no longer subscribed as no more messages can be delivered # to any channels we were listening to assert p.subscribed is False # subscribing again flips the flag back await sub_func(keys[0]) assert p.subscribed is True assert await wait_for_message(p) == make_message(sub_type, keys[0], 1) # unsubscribe again await unsub_func() assert p.subscribed is True # subscribe to another channel before reading the unsubscribe response await sub_func(keys[1]) assert p.subscribed is True # read the unsubscribe for key1 assert await wait_for_message(p) == make_message(unsub_type, keys[0], 0) # we're still subscribed to key2, so subscribed should still be True assert p.subscribed is True # read the key2 subscribe message assert await wait_for_message(p) == make_message(sub_type, keys[1], 1) await unsub_func() # haven't read the message yet, so we're still subscribed assert p.subscribed is True assert await wait_for_message(p) == make_message(unsub_type, keys[1], 0) # now we're finally unsubscribed assert p.subscribed is False async def test_subscribe_property_with_channels(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "channel") await self._test_subscribed_property(**kwargs) @pytest.mark.onlynoncluster async def test_subscribe_property_with_patterns(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "pattern") await self._test_subscribed_property(**kwargs) async def test_aclosing(self, r: redis.Redis): p = r.pubsub() async with aclosing(p): assert p.subscribed is False await p.subscribe("foo") assert p.subscribed is True assert p.subscribed is False async def test_context_manager(self, r: redis.Redis): p = r.pubsub() async with p: assert p.subscribed is False await p.subscribe("foo") assert p.subscribed is True assert p.subscribed is False async def test_close_is_aclose(self, r: redis.Redis): """ Test backwards compatible close method """ p = r.pubsub() assert p.subscribed is False await p.subscribe("foo") assert p.subscribed is True with pytest.deprecated_call(): await p.close() assert p.subscribed is False async def test_reset_is_aclose(self, r: redis.Redis): """ Test backwards compatible reset method """ p = r.pubsub() assert p.subscribed is False await p.subscribe("foo") assert p.subscribed is True with pytest.deprecated_call(): await p.reset() assert p.subscribed is False async def test_ignore_all_subscribe_messages(self, r: redis.Redis): p = r.pubsub(ignore_subscribe_messages=True) checks = ( (p.subscribe, "foo"), (p.unsubscribe, "foo"), (p.psubscribe, "f*"), (p.punsubscribe, "f*"), ) assert p.subscribed is False for func, channel in checks: assert await func(channel) is None assert p.subscribed is True assert await wait_for_message(p) is None assert p.subscribed is False await p.aclose() async def test_ignore_individual_subscribe_messages(self, pubsub): p = pubsub checks = ( (p.subscribe, "foo"), (p.unsubscribe, "foo"), (p.psubscribe, "f*"), (p.punsubscribe, "f*"), ) assert p.subscribed is False for func, channel in checks: assert await func(channel) is None assert p.subscribed is True message = await wait_for_message(p, ignore_subscribe_messages=True) assert message is None assert p.subscribed is False async def test_sub_unsub_resub_channels(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "channel") await self._test_sub_unsub_resub(**kwargs) @pytest.mark.onlynoncluster async def test_sub_unsub_resub_patterns(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "pattern") await self._test_sub_unsub_resub(**kwargs) async def _test_sub_unsub_resub( self, p, sub_type, unsub_type, sub_func, unsub_func, keys ): # https://github.com/andymccurdy/redis-py/issues/764 key = keys[0] await sub_func(key) await unsub_func(key) await sub_func(key) assert p.subscribed is True assert await wait_for_message(p) == make_message(sub_type, key, 1) assert await wait_for_message(p) == make_message(unsub_type, key, 0) assert await wait_for_message(p) == make_message(sub_type, key, 1) assert p.subscribed is True async def test_sub_unsub_all_resub_channels(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "channel") await self._test_sub_unsub_all_resub(**kwargs) async def test_sub_unsub_all_resub_patterns(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "pattern") await self._test_sub_unsub_all_resub(**kwargs) async def _test_sub_unsub_all_resub( self, p, sub_type, unsub_type, sub_func, unsub_func, keys ): # https://github.com/andymccurdy/redis-py/issues/764 key = keys[0] await sub_func(key) await unsub_func() await sub_func(key) assert p.subscribed is True assert await wait_for_message(p) == make_message(sub_type, key, 1) assert await wait_for_message(p) == make_message(unsub_type, key, 0) assert await wait_for_message(p) == make_message(sub_type, key, 1) assert p.subscribed is True @pytest.mark.onlynoncluster
TestPubSubSubscribeUnsubscribe
python
Netflix__metaflow
test/core/tests/s3_failure.py
{ "start": 67, "end": 1500 }
class ____(MetaflowTest): """ Test that S3 failures are handled correctly. """ PRIORITY = 1 SKIP_GRAPHS = [ "simple_switch", "nested_switch", "branch_in_switch", "foreach_in_switch", "switch_in_branch", "switch_in_foreach", "recursive_switch", "recursive_switch_inside_foreach", ] HEADER = """ import os os.environ['TEST_S3_RETRY'] = '1' """ @steps(0, ["singleton-start"], required=True) def step_start(self): # we need a unique artifact for every run which we can reconstruct # independently in the start and end tasks from metaflow import current self.x = "%s/%s" % (current.flow_name, current.run_id) @steps(0, ["end"]) def step_end(self): from metaflow import current run_id = "%s/%s" % (current.flow_name, current.run_id) assert_equals(self.x, run_id) @steps(1, ["all"]) def step_all(self): pass def check_results(self, flow, checker): run = checker.get_run() if run: # we should see TEST_S3_RETRY error in the logs # when --datastore=s3 checker.assert_log("start", "stderr", "TEST_S3_RETRY", exact_match=False) run_id = "S3FailureTestFlow/%s" % checker.run_id checker.assert_artifact("start", "x", run_id) checker.assert_artifact("end", "x", run_id)
S3FailureTest
python
PrefectHQ__prefect
src/prefect/server/events/schemas/automations.py
{ "start": 21849, "end": 22055 }
class ____(AutomationCore, ActionBaseModel, extra="forbid"): owner_resource: Optional[str] = Field( default=None, description="The resource to which this automation belongs" )
AutomationCreate
python
pandas-dev__pandas
pandas/io/parsers/c_parser_wrapper.py
{ "start": 1209, "end": 12915 }
class ____(ParserBase): low_memory: bool _reader: parsers.TextReader def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None: super().__init__(kwds) self.kwds = kwds kwds = kwds.copy() self.low_memory = kwds.pop("low_memory", False) # #2442 kwds["allow_leading_cols"] = self.index_col is not False # GH20529, validate usecol arg before TextReader kwds["usecols"] = self.usecols # Have to pass int, would break tests using TextReader directly otherwise :( kwds["on_bad_lines"] = self.on_bad_lines.value for key in ( "storage_options", "encoding", "memory_map", "compression", ): kwds.pop(key, None) kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None)) if "dtype_backend" not in kwds or kwds["dtype_backend"] is lib.no_default: kwds["dtype_backend"] = "numpy" if kwds["dtype_backend"] == "pyarrow": # Fail here loudly instead of in cython after reading import_optional_dependency("pyarrow") self._reader = parsers.TextReader(src, **kwds) self.unnamed_cols = self._reader.unnamed_cols passed_names = self.names is None if self._reader.header is None: self.names = None else: ( self.names, self.index_names, self.col_names, passed_names, ) = self._extract_multi_indexer_columns( self._reader.header, self.index_names, passed_names, ) if self.names is None: self.names = list(range(self._reader.table_width)) # gh-9755 # # need to set orig_names here first # so that proper indexing can be done # with _set_noconvert_columns # # once names has been filtered, we will # then set orig_names again to names self.orig_names = self.names[:] if self.usecols: usecols = evaluate_callable_usecols(self.usecols, self.orig_names) # GH 14671 # assert for mypy, orig_names is List or None, None would error in issubset assert self.orig_names is not None if self.usecols_dtype == "string" and not set(usecols).issubset( self.orig_names ): self._validate_usecols_names(usecols, self.orig_names) if len(self.names) > len(usecols): self.names = [ n for i, n in enumerate(self.names) if (i in usecols or n in usecols) ] if len(self.names) < len(usecols): self._validate_usecols_names( usecols, self.names, ) validate_parse_dates_presence(self.parse_dates, self.names) self._set_noconvert_columns() self.orig_names = self.names if self._reader.leading_cols == 0 and is_index_col(self.index_col): ( index_names, self.names, self.index_col, ) = self._clean_index_names( self.names, self.index_col, ) if self.index_names is None: self.index_names = index_names if self._reader.header is None and not passed_names: assert self.index_names is not None self.index_names = [None] * len(self.index_names) self._implicit_index = self._reader.leading_cols > 0 def close(self) -> None: # close handles opened by C parser try: self._reader.close() except ValueError: pass def _set_noconvert_columns(self) -> None: """ Set the columns that should not undergo dtype conversions. Currently, any column that is involved with date parsing will not undergo such conversions. """ assert self.orig_names is not None # error: Cannot determine type of 'names' # much faster than using orig_names.index(x) xref GH#44106 names_dict = {x: i for i, x in enumerate(self.orig_names)} col_indices = [names_dict[x] for x in self.names] noconvert_columns = self._set_noconvert_dtype_columns( col_indices, self.names, ) for col in noconvert_columns: self._reader.set_noconvert(col) def read( self, nrows: int | None = None, ) -> tuple[ Index | MultiIndex | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, AnyArrayLike], ]: index: Index | MultiIndex | None column_names: Sequence[Hashable] | MultiIndex try: if self.low_memory: chunks = self._reader.read_low_memory(nrows) # destructive to chunks data = _concatenate_chunks(chunks, self.names) else: data = self._reader.read(nrows) except StopIteration: if self._first_chunk: self._first_chunk = False # assert for mypy, orig_names is List or None, None would error in # list(...) in dedup_names assert self.orig_names is not None names = dedup_names( self.orig_names, is_potential_multi_index(self.orig_names, self.index_col), ) index, columns, col_dict = self._get_empty_meta( names, dtype=self.dtype, ) # error: Incompatible types in assignment (expression has type # "list[Hashable] | MultiIndex", variable has type "list[Hashable]") columns = self._maybe_make_multi_index_columns( # type: ignore[assignment] columns, self.col_names ) columns = _filter_usecols(self.usecols, columns) columns_set = set(columns) col_dict = {k: v for k, v in col_dict.items() if k in columns_set} return index, columns, col_dict else: self.close() raise # Done with first read, next time raise StopIteration self._first_chunk = False names = self.names if self._reader.leading_cols: # implicit index, no index names arrays = [] if self.index_col and self._reader.leading_cols != len(self.index_col): raise ParserError( "Could not construct index. Requested to use " f"{len(self.index_col)} number of columns, but " f"{self._reader.leading_cols} left to parse." ) for i in range(self._reader.leading_cols): if self.index_col is None: values = data.pop(i) else: values = data.pop(self.index_col[i]) if self._should_parse_dates(i): values = date_converter( values, col=( self.index_names[i] if self.index_names is not None else None ), dayfirst=self.dayfirst, cache_dates=self.cache_dates, date_format=self.date_format, ) arrays.append(values) index = ensure_index_from_sequences(arrays) names = _filter_usecols(self.usecols, names) names = dedup_names(names, is_potential_multi_index(names, self.index_col)) # rename dict keys data_tups = sorted(data.items()) data = {k: v for k, (i, v) in zip(names, data_tups, strict=True)} date_data = self._do_date_conversions(names, data) # maybe create a mi on the columns column_names = self._maybe_make_multi_index_columns(names, self.col_names) else: # rename dict keys data_tups = sorted(data.items()) # ugh, mutation # assert for mypy, orig_names is List or None, None would error in list(...) assert self.orig_names is not None names = list(self.orig_names) names = dedup_names(names, is_potential_multi_index(names, self.index_col)) names = _filter_usecols(self.usecols, names) # columns as list alldata = [x[1] for x in data_tups] if self.usecols is None: self._check_data_length(names, alldata) data = {k: v for k, (i, v) in zip(names, data_tups, strict=False)} date_data = self._do_date_conversions(names, data) index, column_names = self._make_index(alldata, names) return index, column_names, date_data def _filter_usecols(usecols, names: SequenceT) -> SequenceT | list[Hashable]: # hackish usecols = evaluate_callable_usecols(usecols, names) if usecols is not None and len(names) != len(usecols): return [name for i, name in enumerate(names) if i in usecols or name in usecols] return names def _concatenate_chunks( chunks: list[dict[int, ArrayLike]], column_names: list[str] ) -> dict: """ Concatenate chunks of data read with low_memory=True. The tricky part is handling Categoricals, where different chunks may have different inferred categories. """ names = list(chunks[0].keys()) warning_columns = [] result: dict = {} for name in names: arrs = [chunk.pop(name) for chunk in chunks] # Check each arr for consistent types. dtypes = {a.dtype for a in arrs} non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)} dtype = dtypes.pop() if isinstance(dtype, CategoricalDtype): result[name] = union_categoricals(arrs, sort_categories=False) else: result[name] = concat_compat(arrs) if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object): warning_columns.append(column_names[name]) if warning_columns: warning_names = ", ".join( [f"{index}: {name}" for index, name in enumerate(warning_columns)] ) warning_message = " ".join( [ f"Columns ({warning_names}) have mixed types. " f"Specify dtype option on import or set low_memory=False." ] ) warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level()) return result def ensure_dtype_objs( dtype: DtypeArg | dict[Hashable, DtypeArg] | None, ) -> DtypeObj | dict[Hashable, DtypeObj] | None: """ Ensure we have either None, a dtype object, or a dictionary mapping to dtype objects. """ if isinstance(dtype, defaultdict): # "None" not callable [misc] default_dtype = pandas_dtype(dtype.default_factory()) # type: ignore[misc] dtype_converted: defaultdict = defaultdict(lambda: default_dtype) for key in dtype.keys(): dtype_converted[key] = pandas_dtype(dtype[key]) return dtype_converted elif isinstance(dtype, dict): return {k: pandas_dtype(dtype[k]) for k in dtype} elif dtype is not None: return pandas_dtype(dtype) return dtype
CParserWrapper
python
walkccc__LeetCode
solutions/418. Sentence Screen Fitting/418.py
{ "start": 0, "end": 356 }
class ____: def wordsTyping(self, sentence: list[str], rows: int, cols: int) -> int: combined = ' '.join(sentence) + ' ' n = len(combined) i = 0 for _ in range(rows): i += cols if combined[i % n] == ' ': i += 1 else: while i > 0 and combined[(i - 1) % n] != ' ': i -= 1 return i // n
Solution
python
ansible__ansible
lib/ansible/_internal/_json/_profiles/_legacy.py
{ "start": 1048, "end": 3081 }
class ____(_json.AnsibleVariableVisitor): """Variable visitor that supports optional trust inversion for legacy serialization.""" def __init__( self, *, trusted_as_template: bool = False, invert_trust: bool = False, origin: _tags.Origin | None = None, convert_mapping_to_dict: bool = False, convert_sequence_to_list: bool = False, convert_custom_scalars: bool = False, ): super().__init__( trusted_as_template=trusted_as_template, origin=origin, convert_mapping_to_dict=convert_mapping_to_dict, convert_sequence_to_list=convert_sequence_to_list, convert_custom_scalars=convert_custom_scalars, encrypted_string_behavior=_json.EncryptedStringBehavior.PRESERVE, ) self.invert_trust = invert_trust if trusted_as_template and invert_trust: raise ValueError('trusted_as_template is mutually exclusive with invert_trust') @property def _allow_trust(self) -> bool: """ This profile supports trust application in all contexts. Derived implementations can override this behavior for application-dependent/schema-aware trust. """ return True def _early_visit(self, value, value_type) -> _t.Any: """Similar to base implementation, but supports an intermediate wrapper for trust inversion.""" if value_type in (str, _datatag._AnsibleTaggedStr): # apply compatibility behavior if self.trusted_as_template and self._allow_trust: result = _tags.TrustedAsTemplate().tag(value) elif self.invert_trust and not _tags.TrustedAsTemplate.is_tagged_on(value) and self._allow_trust: result = _Untrusted(value) else: result = value elif value_type is _Untrusted: result = value.value else: result = _json._sentinel return result
_LegacyVariableVisitor
python
html5lib__html5lib-python
html5lib/html5parser.py
{ "start": 18101, "end": 24176 }
class ____(Phase): __slots__ = tuple() def processSpaceCharacters(self, token): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] correct = token["correct"] if (name != "html" or publicId is not None or systemId is not None and systemId != "about:legacy-compat"): self.parser.parseError("unknown-doctype") if publicId is None: publicId = "" self.tree.insertDoctype(token) if publicId != "": publicId = publicId.translate(asciiUpper2Lower) if (not correct or token["name"] != "html" or publicId.startswith( ("+//silmaril//dtd html pro v0r11 19970101//", "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", "-//as//dtd html 3.0 aswedit + extensions//", "-//ietf//dtd html 2.0 level 1//", "-//ietf//dtd html 2.0 level 2//", "-//ietf//dtd html 2.0 strict level 1//", "-//ietf//dtd html 2.0 strict level 2//", "-//ietf//dtd html 2.0 strict//", "-//ietf//dtd html 2.0//", "-//ietf//dtd html 2.1e//", "-//ietf//dtd html 3.0//", "-//ietf//dtd html 3.2 final//", "-//ietf//dtd html 3.2//", "-//ietf//dtd html 3//", "-//ietf//dtd html level 0//", "-//ietf//dtd html level 1//", "-//ietf//dtd html level 2//", "-//ietf//dtd html level 3//", "-//ietf//dtd html strict level 0//", "-//ietf//dtd html strict level 1//", "-//ietf//dtd html strict level 2//", "-//ietf//dtd html strict level 3//", "-//ietf//dtd html strict//", "-//ietf//dtd html//", "-//metrius//dtd metrius presentational//", "-//microsoft//dtd internet explorer 2.0 html strict//", "-//microsoft//dtd internet explorer 2.0 html//", "-//microsoft//dtd internet explorer 2.0 tables//", "-//microsoft//dtd internet explorer 3.0 html strict//", "-//microsoft//dtd internet explorer 3.0 html//", "-//microsoft//dtd internet explorer 3.0 tables//", "-//netscape comm. corp.//dtd html//", "-//netscape comm. corp.//dtd strict html//", "-//o'reilly and associates//dtd html 2.0//", "-//o'reilly and associates//dtd html extended 1.0//", "-//o'reilly and associates//dtd html extended relaxed 1.0//", "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", "-//spyglass//dtd html 2.0 extended//", "-//sq//dtd html 2.0 hotmetal + extensions//", "-//sun microsystems corp.//dtd hotjava html//", "-//sun microsystems corp.//dtd hotjava strict html//", "-//w3c//dtd html 3 1995-03-24//", "-//w3c//dtd html 3.2 draft//", "-//w3c//dtd html 3.2 final//", "-//w3c//dtd html 3.2//", "-//w3c//dtd html 3.2s draft//", "-//w3c//dtd html 4.0 frameset//", "-//w3c//dtd html 4.0 transitional//", "-//w3c//dtd html experimental 19960712//", "-//w3c//dtd html experimental 970421//", "-//w3c//dtd w3 html//", "-//w3o//dtd w3 html 3.0//", "-//webtechs//dtd mozilla html 2.0//", "-//webtechs//dtd mozilla html//")) or publicId in ("-//w3o//dtd w3 html strict 3.0//en//", "-/w3c/dtd html 4.0 transitional/en", "html") or publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is None or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): self.parser.compatMode = "quirks" elif (publicId.startswith( ("-//w3c//dtd xhtml 1.0 frameset//", "-//w3c//dtd xhtml 1.0 transitional//")) or publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is not None): self.parser.compatMode = "limited quirks" self.parser.phase = self.parser.phases["beforeHtml"] def anythingElse(self): self.parser.compatMode = "quirks" self.parser.phase = self.parser.phases["beforeHtml"] def processCharacters(self, token): self.parser.parseError("expected-doctype-but-got-chars") self.anythingElse() return token def processStartTag(self, token): self.parser.parseError("expected-doctype-but-got-start-tag", {"name": token["name"]}) self.anythingElse() return token def processEndTag(self, token): self.parser.parseError("expected-doctype-but-got-end-tag", {"name": token["name"]}) self.anythingElse() return token def processEOF(self): self.parser.parseError("expected-doctype-but-got-eof") self.anythingElse() return True
InitialPhase
python
keras-team__keras
keras/src/legacy/saving/json_utils_test.py
{ "start": 204, "end": 1343 }
class ____(testing.TestCase): def test_encode_decode_tuple(self): metadata = {"key1": (3, 5), "key2": [(1, (3, 4)), (1,)]} string = json_utils.Encoder().encode(metadata) loaded = json_utils.decode(string) self.assertEqual(set(loaded.keys()), {"key1", "key2"}) self.assertAllEqual(loaded["key1"], (3, 5)) self.assertAllEqual(loaded["key2"], [(1, (3, 4)), (1,)]) def test_encode_decode_enum(self): class Enum(enum.Enum): CLASS_A = "a" CLASS_B = "b" config = {"key": Enum.CLASS_A, "key2": Enum.CLASS_B} string = json_utils.Encoder().encode(config) loaded = json_utils.decode(string) self.assertAllEqual({"key": "a", "key2": "b"}, loaded) def test_encode_decode_bytes(self): b_string = b"abc" json_string = json_utils.Encoder().encode(b_string) loaded = json_utils.decode(json_string) self.assertAllEqual(b_string, loaded) @pytest.mark.skipif( backend.backend() != "tensorflow", reason="These JSON serialization tests are specific to TF components.", )
JsonUtilsTestAllBackends
python
PrefectHQ__prefect
src/prefect/client/schemas/filters.py
{ "start": 15697, "end": 16341 }
class ____(PrefectBaseModel, OperatorMixin): """Filter by `Deployment.tags`.""" all_: Optional[List[str]] = Field( default=None, examples=[["tag-1", "tag-2"]], description=( "A list of tags. Deployments will be returned only if their tags are a" " superset of the list" ), ) any_: Optional[list[str]] = Field( default=None, examples=[["tag-1", "tag-2"]], description="A list of tags to include", ) is_null_: Optional[bool] = Field( default=None, description="If true, only include deployments without tags" )
DeploymentFilterTags
python
tiangolo__fastapi
fastapi/security/http.py
{ "start": 1918, "end": 3247 }
class ____(SecurityBase): def __init__( self, *, scheme: str, scheme_name: Optional[str] = None, description: Optional[str] = None, auto_error: bool = True, ): self.model: HTTPBaseModel = HTTPBaseModel( scheme=scheme, description=description ) self.scheme_name = scheme_name or self.__class__.__name__ self.auto_error = auto_error def make_authenticate_headers(self) -> Dict[str, str]: return {"WWW-Authenticate": f"{self.model.scheme.title()}"} def make_not_authenticated_error(self) -> HTTPException: return HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Not authenticated", headers=self.make_authenticate_headers(), ) async def __call__( self, request: Request ) -> Optional[HTTPAuthorizationCredentials]: authorization = request.headers.get("Authorization") scheme, credentials = get_authorization_scheme_param(authorization) if not (authorization and scheme and credentials): if self.auto_error: raise self.make_not_authenticated_error() else: return None return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
HTTPBase
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 446925, "end": 449564 }
class ____(Response): """ Response of tasks.get_hyper_params endpoint. :param params: Hyper parameters (keyed by task ID) :type params: Sequence[dict] """ _service = "tasks" _action = "get_hyper_params" _version = "2.23" _schema = { "definitions": { "params_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. The combination of section and name should be unique", "type": ["string", "null"], }, "section": { "description": "Section that the parameter belongs to", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", } }, "properties": { "params": { "description": "Hyper parameters (keyed by task ID)", "items": { "properties": { "hyperparams": { "description": "Hyper parameters", "items": {"$ref": "#/definitions/params_item"}, "type": "array", }, "task": {"description": "Task ID", "type": "string"}, }, "type": "object", }, "type": ["array", "null"], } }, "type": "object", } def __init__(self, params=None, **kwargs): super(GetHyperParamsResponse, self).__init__(**kwargs) self.params = params @schema_property("params") def params(self): return self._property_params @params.setter def params(self, value): if value is None: self._property_params = None return self.assert_isinstance(value, "params", (list, tuple)) self.assert_isinstance(value, "params", (dict,), is_array=True) self._property_params = value
GetHyperParamsResponse
python
pytorch__pytorch
test/distributed/test_functional_api.py
{ "start": 2659, "end": 6309 }
class ____(MultiThreadedTestCase): @property def world_size(self): return 4 def setUp(self): super().setUp() self._spawn_threads() def test_expand_1d_rank_list(self): tag, rankset, group_size = ft_c._expand_group([0, 1, 2, 3]) self.assertEqual("", tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) tag, rankset, group_size = ft_c._expand_group([0, 1, 2, 3], "bla") self.assertEqual("bla", tag) def test_expand_2d_rank_list(self): tag, rankset, group_size = ft_c._expand_group([[0, 1], [2, 3]]) self.assertEqual("", tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(2, group_size) tag, rankset, group_size = ft_c._expand_group([[0, 1], [2, 3]], "blu") self.assertEqual("blu", tag) with self.assertRaisesRegex(ValueError, "group sizes must be identical"): ft_c._expand_group([[0], [1, 2, 3]]) def test_expand_process_group(self): tag, rankset, group_size = ft_c._expand_group(dist.group.WORLD) self.assertEqual(c10d._get_group_tag(dist.group.WORLD), tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) tag, rankset, group_size = ft_c._expand_group(dist.group.WORLD, "bla") self.assertEqual("bla", tag) my_pg, _ = new_subgroups(group_size=2) tag, rankset, group_size = ft_c._expand_group(my_pg) self.assertEqual(c10d._get_group_tag(my_pg), tag) self.assertEqual(dist.get_process_group_ranks(my_pg), rankset) self.assertEqual(2, group_size) my_pg = None for i in range(dist.get_world_size()): group = c10d._new_group_with_tag([i], pg_tag="my_pg") if i == dist.get_rank(): my_pg = group tag, rankset, group_size = ft_c._expand_group(my_pg) self.assertEqual("my_pg", tag) self.assertEqual([dist.get_rank()], rankset) self.assertEqual(1, group_size) tag, rankset, group_size = ft_c._expand_group(my_pg, "bla") self.assertEqual("bla", tag) def test_expand_device_mesh(self): mesh = dt.DeviceMesh("cpu", torch.arange(4)) tag, rankset, group_size = ft_c._expand_group(mesh) self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=0)), tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) mesh = dt.DeviceMesh("cpu", torch.arange(4)) tag, rankset, group_size = ft_c._expand_group(mesh) self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=0)), tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) def test_expand_device_mesh_tuple(self): mesh = dt.DeviceMesh("cpu", torch.arange(4).view(2, 2)) with self.assertRaisesRegex(AssertionError, "Only 1D mesh"): tag, rankset, group_size = ft_c._expand_group(mesh) tag, rankset, group_size = ft_c._expand_group((mesh, 0)) self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=0)), tag) expected_rankset = [0, 2] if dist.get_rank() in [0, 2] else [1, 3] self.assertEqual(expected_rankset, rankset) self.assertEqual(2, group_size) tag, rankset, group_size = ft_c._expand_group((mesh, 1)) expected_rankset = [0, 1] if dist.get_rank() in [0, 1] else [2, 3] self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=1)), tag) self.assertEqual(expected_rankset, rankset) self.assertEqual(2, group_size) @skipIfHpu
TestExpand
python
getsentry__sentry
src/sentry/api/authentication.py
{ "start": 10904, "end": 13226 }
class ____(QuietBasicAuthentication): """ Authenticates a Sentry Application using its Client ID and Secret This will be the method by which we identify which Sentry Application is making the request, for any requests not scoped to an installation. For example, the request to exchange a Grant Code for an Api Token. """ def authenticate(self, request: Request): if not request.data: raise AuthenticationFailed("Invalid request") client_id = request.data.get("client_id") client_secret = request.data.get("client_secret") invalid_pair_error = AuthenticationFailed("Invalid Client ID / Secret pair") if not client_id or not client_secret: raise invalid_pair_error try: application = ApiApplication.objects.get(client_id=client_id) except ApiApplication.DoesNotExist: raise invalid_pair_error if not constant_time_compare(application.client_secret, client_secret): raise invalid_pair_error try: user_id = application.sentry_app.proxy_user_id except SentryApp.DoesNotExist: raise invalid_pair_error if user_id is None: raise invalid_pair_error return self.transform_auth(user_id, None) def get_payload_from_client_secret_jwt( headers: HttpHeaders, installation: SentryAppInstallation ) -> dict[str, Any]: auth_header = headers.get("Authorization") if auth_header is None: raise AuthenticationFailed("Header is in invalid form") tokens = auth_header.split(" ") # Should be Bearer <token> if tokens[0].lower() != "bearer": raise AuthenticationFailed("Bearer not present in token") application = installation.sentry_app.application if application is None: raise AuthenticationFailed("Application not found") client_secret = application.client_secret try: encoded_jwt = tokens[1] except IndexError: raise AuthenticationFailed("Invalid Authorization header, should be Bearer <token>") try: payload = jwt.decode(encoded_jwt, client_secret, algorithms=["HS256"]) except Exception as e: raise AuthenticationFailed("Could not validate JWT") from e return payload
ClientIdSecretAuthentication
python
coleifer__peewee
playhouse/dataset.py
{ "start": 10674, "end": 11705 }
class ____(Exporter): def __init__(self, query, iso8601_datetimes=False): super(JSONExporter, self).__init__(query) self.iso8601_datetimes = iso8601_datetimes def _make_default(self): datetime_types = (datetime.datetime, datetime.date, datetime.time) if self.iso8601_datetimes: def default(o): if isinstance(o, datetime_types): return o.isoformat() elif isinstance(o, (Decimal, uuid.UUID)): return str(o) raise TypeError('Unable to serialize %r as JSON' % o) else: def default(o): if isinstance(o, datetime_types + (Decimal, uuid.UUID)): return str(o) raise TypeError('Unable to serialize %r as JSON' % o) return default def export(self, file_obj, **kwargs): json.dump( list(self.query), file_obj, default=self._make_default(), **kwargs)
JSONExporter
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/strategies/_internal/regex.py
{ "start": 6726, "end": 21668 }
class ____(CharactersBuilder): def __init__(self, *, negate=False, flags=0): self._whitelist_chars = set() self._blacklist_chars = set() self._negate = negate self._alphabet = None self._ignorecase = flags & re.IGNORECASE self.code_to_char = int_to_byte @property def strategy(self): """Returns resulting strategy that generates configured char set.""" allowed = self._whitelist_chars if self._negate: allowed = BYTES_ALL - allowed return st.sampled_from(sorted(allowed)) def add_category(self, category): """Update characters state to match sre_parse object ``category``.""" self._whitelist_chars |= BYTES_LOOKUP[category] @st.composite def maybe_pad(draw, regex, strategy, left_pad_strategy, right_pad_strategy): """Attempt to insert padding around the result of a regex draw while preserving the match.""" result = draw(strategy) left_pad = draw(left_pad_strategy) if left_pad and regex.search(left_pad + result): result = left_pad + result right_pad = draw(right_pad_strategy) if right_pad and regex.search(result + right_pad): result += right_pad return result def base_regex_strategy(regex, parsed=None, alphabet=None): if parsed is None: parsed = sre_parse.parse(regex.pattern, flags=regex.flags) try: s = _strategy( parsed, context=Context(flags=regex.flags), is_unicode=isinstance(regex.pattern, str), alphabet=alphabet, ) except Exception as err: add_note(err, f"{alphabet=} {regex=}") raise return clear_cache_after_draw(s) def regex_strategy( regex, fullmatch, *, alphabet, _temp_jsonschema_hack_no_end_newline=False ): if not hasattr(regex, "pattern"): regex = re.compile(regex) is_unicode = isinstance(regex.pattern, str) parsed = sre_parse.parse(regex.pattern, flags=regex.flags) if fullmatch: if not parsed: return st.just("" if is_unicode else b"") return base_regex_strategy(regex, parsed, alphabet).filter(regex.fullmatch) if not parsed: if is_unicode: return st.text(alphabet=alphabet) else: return st.binary() if is_unicode: base_padding_strategy = st.text(alphabet=alphabet) empty = st.just("") newline = st.just("\n") else: base_padding_strategy = st.binary() empty = st.just(b"") newline = st.just(b"\n") right_pad = base_padding_strategy left_pad = base_padding_strategy if parsed[-1][0] == sre.AT: if parsed[-1][1] == sre.AT_END_STRING: right_pad = empty elif parsed[-1][1] == sre.AT_END: if regex.flags & re.MULTILINE: right_pad = st.one_of( empty, st.builds(operator.add, newline, right_pad) ) else: right_pad = st.one_of(empty, newline) # This will be removed when a regex-syntax-translation library exists. # It's a pretty nasty hack, but means that we can match the semantics # of JSONschema's compatible subset of ECMA regex, which is important # for hypothesis-jsonschema and Schemathesis. See e.g. # https://github.com/schemathesis/schemathesis/issues/1241 if _temp_jsonschema_hack_no_end_newline: right_pad = empty if parsed[0][0] == sre.AT: if parsed[0][1] == sre.AT_BEGINNING_STRING: left_pad = empty elif parsed[0][1] == sre.AT_BEGINNING: if regex.flags & re.MULTILINE: left_pad = st.one_of(empty, st.builds(operator.add, left_pad, newline)) else: left_pad = empty base = base_regex_strategy(regex, parsed, alphabet).filter(regex.search) return maybe_pad(regex, base, left_pad, right_pad) def _strategy(codes, context, is_unicode, *, alphabet): """Convert SRE regex parse tree to strategy that generates strings matching that regex represented by that parse tree. `codes` is either a list of SRE regex elements representations or a particular element representation. Each element is a tuple of element code (as string) and parameters. E.g. regex 'ab[0-9]+' compiles to following elements: [ (LITERAL, 97), (LITERAL, 98), (MAX_REPEAT, (1, 4294967295, [ (IN, [ (RANGE, (48, 57)) ]) ])) ] The function recursively traverses regex element tree and converts each element to strategy that generates strings that match that element. Context stores 1. List of groups (for backreferences) 2. Active regex flags (e.g. IGNORECASE, DOTALL, UNICODE, they affect behavior of various inner strategies) """ def recurse(codes): return _strategy(codes, context, is_unicode, alphabet=alphabet) if is_unicode: empty = "" to_char = chr else: empty = b"" to_char = int_to_byte binary_char = st.binary(min_size=1, max_size=1) if not isinstance(codes, tuple): # List of codes strategies = [] i = 0 while i < len(codes): if codes[i][0] == sre.LITERAL and not context.flags & re.IGNORECASE: # Merge subsequent "literals" into one `just()` strategy # that generates corresponding text if no IGNORECASE j = i + 1 while j < len(codes) and codes[j][0] == sre.LITERAL: j += 1 if i + 1 < j: chars = empty.join(to_char(charcode) for _, charcode in codes[i:j]) if invalid := chars_not_in_alphabet(alphabet, chars): raise IncompatibleWithAlphabet( f"Literal {chars!r} contains characters {invalid!r} " f"which are not in the specified alphabet" ) strategies.append(st.just(chars)) i = j continue strategies.append(recurse(codes[i])) i += 1 # We handle this separately at the top level, but some regex can # contain empty lists internally, so we need to handle this here too. if not strategies: return st.just(empty) if len(strategies) == 1: return strategies[0] return st.tuples(*strategies).map(empty.join) else: # Single code code, value = codes if code == sre.LITERAL: # Regex 'a' (single char) c = to_char(value) if chars_not_in_alphabet(alphabet, c): raise IncompatibleWithAlphabet( f"Literal {c!r} is not in the specified alphabet" ) if ( context.flags & re.IGNORECASE and c != c.swapcase() and re.match(re.escape(c), c.swapcase(), re.IGNORECASE) is not None and not chars_not_in_alphabet(alphabet, c.swapcase()) ): # We do the explicit check for swapped-case matching because # eg 'ß'.upper() == 'SS' and ignorecase doesn't match it. return st.sampled_from([c, c.swapcase()]) return st.just(c) elif code == sre.NOT_LITERAL: # Regex '[^a]' (negation of a single char) c = to_char(value) blacklist = {c} if ( context.flags & re.IGNORECASE and re.match(re.escape(c), c.swapcase(), re.IGNORECASE) is not None ): # There are a few cases where .swapcase() returns two characters, # but is still a case-insensitive match. In such cases we add *both* # characters to our blacklist, to avoid doing the wrong thing for # patterns such as r"[^\u0130]+" where "i\u0307" matches. # # (that's respectively 'Latin letter capital I with dot above' and # 'latin latter i' + 'combining dot above'; see issue #2657) # # As a final additional wrinkle, "latin letter capital I" *also* # case-insensitive-matches, with or without combining dot character. # We therefore have to chain .swapcase() calls until a fixpoint. stack = [c.swapcase()] while stack: for char in stack.pop(): blacklist.add(char) stack.extend(set(char.swapcase()) - blacklist) if is_unicode: return OneCharStringStrategy( unwrap_strategies(alphabet).intervals & charmap.query(exclude_characters=blacklist) ) else: return binary_char.filter(lambda c: c not in blacklist) elif code == sre.IN: # Regex '[abc0-9]' (set of characters) negate = value[0][0] == sre.NEGATE if is_unicode: builder = CharactersBuilder( flags=context.flags, negate=negate, alphabet=alphabet ) else: builder = BytesBuilder(flags=context.flags, negate=negate) for charset_code, charset_value in value: if charset_code == sre.NEGATE: # Regex '[^...]' (negation) # handled by builder = CharactersBuilder(...) above pass elif charset_code == sre.LITERAL: # Regex '[a]' (single char) c = builder.code_to_char(charset_value) if chars_not_in_alphabet(builder._alphabet, c): raise IncompatibleWithAlphabet( f"Literal {c!r} is not in the specified alphabet" ) builder.add_char(c) elif charset_code == sre.RANGE: # Regex '[a-z]' (char range) low, high = charset_value chars = empty.join(map(builder.code_to_char, range(low, high + 1))) if len(chars) == len( invalid := set(chars_not_in_alphabet(alphabet, chars)) ): raise IncompatibleWithAlphabet( f"Charset '[{chr(low)}-{chr(high)}]' contains characters {invalid!r} " f"which are not in the specified alphabet" ) for c in chars: if isinstance(c, int): c = int_to_byte(c) if c not in invalid: builder.add_char(c) elif charset_code == sre.CATEGORY: # Regex '[\w]' (char category) builder.add_category(charset_value) else: # Currently there are no known code points other than # handled here. This code is just future proofing raise NotImplementedError(f"Unknown charset code: {charset_code}") return builder.strategy elif code == sre.ANY: # Regex '.' (any char) if is_unicode: assert alphabet is not None if context.flags & re.DOTALL: return alphabet return OneCharStringStrategy( unwrap_strategies(alphabet).intervals & charmap.query(exclude_characters="\n") ) else: if context.flags & re.DOTALL: return binary_char return binary_char.filter(lambda c: c != b"\n") elif code == sre.AT: # Regexes like '^...', '...$', '\bfoo', '\Bfoo' # An empty string (or newline) will match the token itself, but # we don't and can't check the position (eg '%' at the end) return st.just(empty) elif code == sre.SUBPATTERN: # Various groups: '(...)', '(:...)' or '(?P<name>...)' old_flags = context.flags context.flags = (context.flags | value[1]) & ~value[2] strat = _strategy(value[-1], context, is_unicode, alphabet=alphabet) context.flags = old_flags if value[0]: strat = update_group(value[0], strat) return strat elif code == sre.GROUPREF: # Regex '\\1' or '(?P=name)' (group reference) return reuse_group(value) elif code == sre.ASSERT: # Regex '(?=...)' or '(?<=...)' (positive lookahead/lookbehind) return recurse(value[1]) elif code == sre.ASSERT_NOT: # Regex '(?!...)' or '(?<!...)' (negative lookahead/lookbehind) return st.just(empty) elif code == sre.BRANCH: # Regex 'a|b|c' (branch) branches = [] errors = [] for branch in value[1]: try: branches.append(recurse(branch)) except IncompatibleWithAlphabet as e: errors.append(str(e)) if errors and not branches: raise IncompatibleWithAlphabet("\n".join(errors)) return st.one_of(branches) elif code in [sre.MIN_REPEAT, sre.MAX_REPEAT, POSSESSIVE_REPEAT]: # Regexes 'a?', 'a*', 'a+' and their non-greedy variants # (repeaters) at_least, at_most, subregex = value if at_most == sre.MAXREPEAT: at_most = None if at_least == 0 and at_most == 1: return st.just(empty) | recurse(subregex) return st.lists(recurse(subregex), min_size=at_least, max_size=at_most).map( empty.join ) elif code == sre.GROUPREF_EXISTS: # Regex '(?(id/name)yes-pattern|no-pattern)' # (if group exists choice) return group_conditional( value[0], recurse(value[1]), recurse(value[2]) if value[2] else st.just(empty), ) elif code == ATOMIC_GROUP: # pragma: no cover # new in Python 3.11 return _strategy(value, context, is_unicode, alphabet=alphabet) else: # Currently there are no known code points other than handled here. # This code is just future proofing raise NotImplementedError( f"Unknown code point: {code!r}. Please open an issue." )
BytesBuilder
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_heapq.py
{ "start": 10870, "end": 10979 }
class ____(_TestHeap, __TestCase): module = py_heapq @skipUnless(c_heapq, 'requires _heapq')
TestHeapPython
python
python__mypy
mypy/stubgenc.py
{ "start": 956, "end": 3218 }
class ____(SignatureGenerator): def __init__( self, func_sigs: dict[str, str] | None = None, class_sigs: dict[str, str] | None = None ) -> None: """ Takes a mapping of function/method names to signatures and class name to class signatures (usually corresponds to __init__). """ self.func_sigs = func_sigs or {} self.class_sigs = class_sigs or {} @classmethod def from_doc_dir(cls, doc_dir: str) -> ExternalSignatureGenerator: """Instantiate from a directory of .rst files.""" all_sigs: list[Sig] = [] all_class_sigs: list[Sig] = [] for path in glob.glob(f"{doc_dir}/*.rst"): with open(path) as f: loc_sigs, loc_class_sigs = parse_all_signatures(f.readlines()) all_sigs += loc_sigs all_class_sigs += loc_class_sigs sigs = dict(find_unique_signatures(all_sigs)) class_sigs = dict(find_unique_signatures(all_class_sigs)) return ExternalSignatureGenerator(sigs, class_sigs) def get_function_sig( self, default_sig: FunctionSig, ctx: FunctionContext ) -> list[FunctionSig] | None: # method: if ( ctx.class_info and ctx.name in ("__new__", "__init__") and ctx.name not in self.func_sigs and ctx.class_info.name in self.class_sigs ): return [ FunctionSig( name=ctx.name, args=infer_arg_sig_from_anon_docstring(self.class_sigs[ctx.class_info.name]), ret_type=infer_method_ret_type(ctx.name), ) ] # function: if ctx.name not in self.func_sigs: return None inferred = [ FunctionSig( name=ctx.name, args=infer_arg_sig_from_anon_docstring(self.func_sigs[ctx.name]), ret_type=None, ) ] if ctx.class_info: return self.remove_self_type(inferred, ctx.class_info.self_var) else: return inferred def get_property_type(self, default_type: str | None, ctx: FunctionContext) -> str | None: return None
ExternalSignatureGenerator
python
google__pytype
pytype/rewrite/flow/conditions.py
{ "start": 168, "end": 274 }
class ____: """A condition that must be satisified for a binding to apply.""" @_frozen_dataclass
Condition
python
pytorch__pytorch
torch/_inductor/analysis/profile_analysis.py
{ "start": 12092, "end": 12308 }
class ____: flops: int bw: float latency: float # us achieved_flops: float achieved_bandwidth: float KernelNameMap = defaultdict[str, OrderedSet[KernelStats]] @dataclass(frozen=False)
KernelStats
python
wandb__wandb
wandb/sdk/interface/interface_shared.py
{ "start": 382, "end": 18888 }
class ____(InterfaceBase, abc.ABC): """Partially implemented InterfaceBase. There is little reason for this to exist separately from InterfaceBase, which itself is not a pure abstract class and has no other direct subclasses. Most methods are implemented in this class in terms of the protected _publish and _deliver methods defined by subclasses. """ def __init__(self) -> None: super().__init__() @abc.abstractmethod def _publish( self, record: pb.Record, *, nowait: bool = False, ) -> None: """Send a record to the internal service. Args: record: The record to send. This method assigns its stream ID. nowait: If true, this does not block on socket IO and is safe to call in W&B's asyncio thread, but it will also not slow down even if the socket is blocked and allow data to accumulate in the Python memory. """ @abc.abstractmethod def _deliver(self, record: pb.Record) -> "MailboxHandle[pb.Result]": """Send a record to the internal service and return a response handle. Args: record: The record to send. This method assigns its stream ID. Returns: A mailbox handle for waiting for a response. """ @override def _publish_output( self, outdata: pb.OutputRecord, *, nowait: bool = False, ) -> None: rec = pb.Record() rec.output.CopyFrom(outdata) self._publish(rec, nowait=nowait) @override def _publish_output_raw( self, outdata: pb.OutputRawRecord, *, nowait: bool = False, ) -> None: rec = pb.Record() rec.output_raw.CopyFrom(outdata) self._publish(rec, nowait=nowait) def _publish_cancel(self, cancel: pb.CancelRequest) -> None: rec = self._make_request(cancel=cancel) self._publish(rec) def _publish_tbdata(self, tbrecord: pb.TBRecord) -> None: rec = self._make_record(tbrecord=tbrecord) self._publish(rec) def _publish_partial_history( self, partial_history: pb.PartialHistoryRequest ) -> None: rec = self._make_request(partial_history=partial_history) self._publish(rec) def _publish_history(self, history: pb.HistoryRecord) -> None: rec = self._make_record(history=history) self._publish(rec) def _publish_preempting(self, preempt_rec: pb.RunPreemptingRecord) -> None: rec = self._make_record(preempting=preempt_rec) self._publish(rec) def _publish_telemetry(self, telem: tpb.TelemetryRecord) -> None: rec = self._make_record(telemetry=telem) self._publish(rec) def _publish_environment(self, environment: pb.EnvironmentRecord) -> None: rec = self._make_record(environment=environment) self._publish(rec) def _publish_job_input( self, job_input: pb.JobInputRequest ) -> MailboxHandle[pb.Result]: record = self._make_request(job_input=job_input) return self._deliver(record) def _make_stats(self, stats_dict: dict) -> pb.StatsRecord: stats = pb.StatsRecord() stats.stats_type = pb.StatsRecord.StatsType.SYSTEM stats.timestamp.GetCurrentTime() # todo: fix this, this is wrong :) for k, v in stats_dict.items(): item = stats.item.add() item.key = k item.value_json = json_dumps_safer(json_friendly(v)[0]) return stats def _make_request( # noqa: C901 self, get_summary: Optional[pb.GetSummaryRequest] = None, pause: Optional[pb.PauseRequest] = None, resume: Optional[pb.ResumeRequest] = None, status: Optional[pb.StatusRequest] = None, stop_status: Optional[pb.StopStatusRequest] = None, internal_messages: Optional[pb.InternalMessagesRequest] = None, network_status: Optional[pb.NetworkStatusRequest] = None, poll_exit: Optional[pb.PollExitRequest] = None, partial_history: Optional[pb.PartialHistoryRequest] = None, sampled_history: Optional[pb.SampledHistoryRequest] = None, run_start: Optional[pb.RunStartRequest] = None, check_version: Optional[pb.CheckVersionRequest] = None, log_artifact: Optional[pb.LogArtifactRequest] = None, download_artifact: Optional[pb.DownloadArtifactRequest] = None, link_artifact: Optional[pb.LinkArtifactRequest] = None, defer: Optional[pb.DeferRequest] = None, attach: Optional[pb.AttachRequest] = None, server_info: Optional[pb.ServerInfoRequest] = None, keepalive: Optional[pb.KeepaliveRequest] = None, run_status: Optional[pb.RunStatusRequest] = None, sender_mark: Optional[pb.SenderMarkRequest] = None, sender_read: Optional[pb.SenderReadRequest] = None, sync_finish: Optional[pb.SyncFinishRequest] = None, status_report: Optional[pb.StatusReportRequest] = None, cancel: Optional[pb.CancelRequest] = None, summary_record: Optional[pb.SummaryRecordRequest] = None, telemetry_record: Optional[pb.TelemetryRecordRequest] = None, get_system_metrics: Optional[pb.GetSystemMetricsRequest] = None, python_packages: Optional[pb.PythonPackagesRequest] = None, job_input: Optional[pb.JobInputRequest] = None, run_finish_without_exit: Optional[pb.RunFinishWithoutExitRequest] = None, probe_system_info: Optional[pb.ProbeSystemInfoRequest] = None, ) -> pb.Record: request = pb.Request() if get_summary: request.get_summary.CopyFrom(get_summary) elif pause: request.pause.CopyFrom(pause) elif resume: request.resume.CopyFrom(resume) elif status: request.status.CopyFrom(status) elif stop_status: request.stop_status.CopyFrom(stop_status) elif internal_messages: request.internal_messages.CopyFrom(internal_messages) elif network_status: request.network_status.CopyFrom(network_status) elif poll_exit: request.poll_exit.CopyFrom(poll_exit) elif partial_history: request.partial_history.CopyFrom(partial_history) elif sampled_history: request.sampled_history.CopyFrom(sampled_history) elif run_start: request.run_start.CopyFrom(run_start) elif check_version: request.check_version.CopyFrom(check_version) elif log_artifact: request.log_artifact.CopyFrom(log_artifact) elif download_artifact: request.download_artifact.CopyFrom(download_artifact) elif link_artifact: request.link_artifact.CopyFrom(link_artifact) elif defer: request.defer.CopyFrom(defer) elif attach: request.attach.CopyFrom(attach) elif server_info: request.server_info.CopyFrom(server_info) elif keepalive: request.keepalive.CopyFrom(keepalive) elif run_status: request.run_status.CopyFrom(run_status) elif sender_mark: request.sender_mark.CopyFrom(sender_mark) elif sender_read: request.sender_read.CopyFrom(sender_read) elif cancel: request.cancel.CopyFrom(cancel) elif status_report: request.status_report.CopyFrom(status_report) elif summary_record: request.summary_record.CopyFrom(summary_record) elif telemetry_record: request.telemetry_record.CopyFrom(telemetry_record) elif get_system_metrics: request.get_system_metrics.CopyFrom(get_system_metrics) elif sync_finish: request.sync_finish.CopyFrom(sync_finish) elif python_packages: request.python_packages.CopyFrom(python_packages) elif job_input: request.job_input.CopyFrom(job_input) elif run_finish_without_exit: request.run_finish_without_exit.CopyFrom(run_finish_without_exit) elif probe_system_info: request.probe_system_info.CopyFrom(probe_system_info) else: raise Exception("Invalid request") record = self._make_record(request=request) # All requests do not get persisted record.control.local = True if status_report: record.control.flow_control = True return record def _make_record( # noqa: C901 self, run: Optional[pb.RunRecord] = None, config: Optional[pb.ConfigRecord] = None, files: Optional[pb.FilesRecord] = None, summary: Optional[pb.SummaryRecord] = None, history: Optional[pb.HistoryRecord] = None, stats: Optional[pb.StatsRecord] = None, exit: Optional[pb.RunExitRecord] = None, artifact: Optional[pb.ArtifactRecord] = None, tbrecord: Optional[pb.TBRecord] = None, alert: Optional[pb.AlertRecord] = None, final: Optional[pb.FinalRecord] = None, metric: Optional[pb.MetricRecord] = None, header: Optional[pb.HeaderRecord] = None, footer: Optional[pb.FooterRecord] = None, request: Optional[pb.Request] = None, telemetry: Optional[tpb.TelemetryRecord] = None, preempting: Optional[pb.RunPreemptingRecord] = None, use_artifact: Optional[pb.UseArtifactRecord] = None, output: Optional[pb.OutputRecord] = None, output_raw: Optional[pb.OutputRawRecord] = None, environment: Optional[pb.EnvironmentRecord] = None, ) -> pb.Record: record = pb.Record() if run: record.run.CopyFrom(run) elif config: record.config.CopyFrom(config) elif summary: record.summary.CopyFrom(summary) elif history: record.history.CopyFrom(history) elif files: record.files.CopyFrom(files) elif stats: record.stats.CopyFrom(stats) elif exit: record.exit.CopyFrom(exit) elif artifact: record.artifact.CopyFrom(artifact) elif tbrecord: record.tbrecord.CopyFrom(tbrecord) elif alert: record.alert.CopyFrom(alert) elif final: record.final.CopyFrom(final) elif header: record.header.CopyFrom(header) elif footer: record.footer.CopyFrom(footer) elif request: record.request.CopyFrom(request) elif telemetry: record.telemetry.CopyFrom(telemetry) elif metric: record.metric.CopyFrom(metric) elif preempting: record.preempting.CopyFrom(preempting) elif use_artifact: record.use_artifact.CopyFrom(use_artifact) elif output: record.output.CopyFrom(output) elif output_raw: record.output_raw.CopyFrom(output_raw) elif environment: record.environment.CopyFrom(environment) else: raise Exception("Invalid record") return record def _publish_defer(self, state: "pb.DeferRequest.DeferState.V") -> None: defer = pb.DeferRequest(state=state) rec = self._make_request(defer=defer) rec.control.local = True self._publish(rec) def publish_defer(self, state: int = 0) -> None: self._publish_defer(cast("pb.DeferRequest.DeferState.V", state)) def _publish_header(self, header: pb.HeaderRecord) -> None: rec = self._make_record(header=header) self._publish(rec) def publish_footer(self) -> None: footer = pb.FooterRecord() rec = self._make_record(footer=footer) self._publish(rec) def publish_final(self) -> None: final = pb.FinalRecord() rec = self._make_record(final=final) self._publish(rec) def _publish_pause(self, pause: pb.PauseRequest) -> None: rec = self._make_request(pause=pause) self._publish(rec) def _publish_resume(self, resume: pb.ResumeRequest) -> None: rec = self._make_request(resume=resume) self._publish(rec) def _publish_run(self, run: pb.RunRecord) -> None: rec = self._make_record(run=run) self._publish(rec) def _publish_config(self, cfg: pb.ConfigRecord) -> None: rec = self._make_record(config=cfg) self._publish(rec) def _publish_summary(self, summary: pb.SummaryRecord) -> None: rec = self._make_record(summary=summary) self._publish(rec) def _publish_metric(self, metric: pb.MetricRecord) -> None: rec = self._make_record(metric=metric) self._publish(rec) def publish_stats(self, stats_dict: dict) -> None: stats = self._make_stats(stats_dict) rec = self._make_record(stats=stats) self._publish(rec) def _publish_python_packages( self, python_packages: pb.PythonPackagesRequest ) -> None: rec = self._make_request(python_packages=python_packages) self._publish(rec) def _publish_files(self, files: pb.FilesRecord) -> None: rec = self._make_record(files=files) self._publish(rec) def _publish_use_artifact(self, use_artifact: pb.UseArtifactRecord) -> Any: rec = self._make_record(use_artifact=use_artifact) self._publish(rec) def _publish_probe_system_info( self, probe_system_info: pb.ProbeSystemInfoRequest ) -> None: record = self._make_request(probe_system_info=probe_system_info) self._publish(record) def _deliver_artifact( self, log_artifact: pb.LogArtifactRequest, ) -> MailboxHandle[pb.Result]: rec = self._make_request(log_artifact=log_artifact) return self._deliver(rec) def _deliver_download_artifact( self, download_artifact: pb.DownloadArtifactRequest ) -> MailboxHandle[pb.Result]: rec = self._make_request(download_artifact=download_artifact) return self._deliver(rec) def _deliver_link_artifact( self, link_artifact: pb.LinkArtifactRequest ) -> MailboxHandle[pb.Result]: rec = self._make_request(link_artifact=link_artifact) return self._deliver(rec) def _publish_artifact(self, proto_artifact: pb.ArtifactRecord) -> None: rec = self._make_record(artifact=proto_artifact) self._publish(rec) def _publish_alert(self, proto_alert: pb.AlertRecord) -> None: rec = self._make_record(alert=proto_alert) self._publish(rec) def _deliver_status( self, status: pb.StatusRequest, ) -> MailboxHandle[pb.Result]: req = self._make_request(status=status) return self._deliver(req) def _publish_exit(self, exit_data: pb.RunExitRecord) -> None: rec = self._make_record(exit=exit_data) self._publish(rec) def _publish_keepalive(self, keepalive: pb.KeepaliveRequest) -> None: record = self._make_request(keepalive=keepalive) self._publish(record) def _deliver_shutdown(self) -> MailboxHandle[pb.Result]: request = pb.Request(shutdown=pb.ShutdownRequest()) record = self._make_record(request=request) return self._deliver(record) def _deliver_run(self, run: pb.RunRecord) -> MailboxHandle[pb.Result]: record = self._make_record(run=run) return self._deliver(record) def _deliver_finish_sync( self, sync_finish: pb.SyncFinishRequest, ) -> MailboxHandle[pb.Result]: record = self._make_request(sync_finish=sync_finish) return self._deliver(record) def _deliver_run_start( self, run_start: pb.RunStartRequest, ) -> MailboxHandle[pb.Result]: record = self._make_request(run_start=run_start) return self._deliver(record) def _deliver_get_summary( self, get_summary: pb.GetSummaryRequest, ) -> MailboxHandle[pb.Result]: record = self._make_request(get_summary=get_summary) return self._deliver(record) def _deliver_get_system_metrics( self, get_system_metrics: pb.GetSystemMetricsRequest ) -> MailboxHandle[pb.Result]: record = self._make_request(get_system_metrics=get_system_metrics) return self._deliver(record) def _deliver_exit( self, exit_data: pb.RunExitRecord, ) -> MailboxHandle[pb.Result]: record = self._make_record(exit=exit_data) return self._deliver(record) def _deliver_poll_exit( self, poll_exit: pb.PollExitRequest, ) -> MailboxHandle[pb.Result]: record = self._make_request(poll_exit=poll_exit) return self._deliver(record) def _deliver_finish_without_exit( self, run_finish_without_exit: pb.RunFinishWithoutExitRequest ) -> MailboxHandle[pb.Result]: record = self._make_request(run_finish_without_exit=run_finish_without_exit) return self._deliver(record) def _deliver_stop_status( self, stop_status: pb.StopStatusRequest, ) -> MailboxHandle[pb.Result]: record = self._make_request(stop_status=stop_status) return self._deliver(record) def _deliver_attach( self, attach: pb.AttachRequest, ) -> MailboxHandle[pb.Result]: record = self._make_request(attach=attach) return self._deliver(record) def _deliver_network_status( self, network_status: pb.NetworkStatusRequest ) -> MailboxHandle[pb.Result]: record = self._make_request(network_status=network_status) return self._deliver(record) def _deliver_internal_messages( self, internal_message: pb.InternalMessagesRequest ) -> MailboxHandle[pb.Result]: record = self._make_request(internal_messages=internal_message) return self._deliver(record) def _deliver_request_sampled_history( self, sampled_history: pb.SampledHistoryRequest ) -> MailboxHandle[pb.Result]: record = self._make_request(sampled_history=sampled_history) return self._deliver(record) def _deliver_request_run_status( self, run_status: pb.RunStatusRequest ) -> MailboxHandle[pb.Result]: record = self._make_request(run_status=run_status) return self._deliver(record)
InterfaceShared
python
django__django
django/contrib/postgres/operations.py
{ "start": 6505, "end": 7979 }
class ____(Operation): def __init__(self, name, locale, *, provider="libc", deterministic=True): self.name = name self.locale = locale self.provider = provider self.deterministic = deterministic def state_forwards(self, app_label, state): pass def deconstruct(self): kwargs = {"name": self.name, "locale": self.locale} if self.provider and self.provider != "libc": kwargs["provider"] = self.provider if self.deterministic is False: kwargs["deterministic"] = self.deterministic return ( self.__class__.__qualname__, [], kwargs, ) def create_collation(self, schema_editor): args = {"locale": schema_editor.quote_name(self.locale)} if self.provider != "libc": args["provider"] = schema_editor.quote_name(self.provider) if self.deterministic is False: args["deterministic"] = "false" schema_editor.execute( "CREATE COLLATION %(name)s (%(args)s)" % { "name": schema_editor.quote_name(self.name), "args": ", ".join( f"{option}={value}" for option, value in args.items() ), } ) def remove_collation(self, schema_editor): schema_editor.execute( "DROP COLLATION %s" % schema_editor.quote_name(self.name), )
CollationOperation
python
fabric__fabric
fabric/tunnels.py
{ "start": 3905, "end": 5415 }
class ____(ExceptionHandlingThread): """ Bidirectionally forward data between an SSH channel and local socket. .. versionadded:: 2.0 """ def __init__(self, channel, sock, finished): self.channel = channel self.sock = sock self.finished = finished self.socket_chunk_size = 1024 self.channel_chunk_size = 1024 super().__init__() def _run(self): try: empty_sock, empty_chan = None, None while not self.finished.is_set(): r, w, x = select.select([self.sock, self.channel], [], [], 1) if self.sock in r: empty_sock = self.read_and_write( self.sock, self.channel, self.socket_chunk_size ) if self.channel in r: empty_chan = self.read_and_write( self.channel, self.sock, self.channel_chunk_size ) if empty_sock or empty_chan: break finally: self.channel.close() self.sock.close() def read_and_write(self, reader, writer, chunk_size): """ Read ``chunk_size`` from ``reader``, writing result to ``writer``. Returns ``None`` if successful, or ``True`` if the read was empty. .. versionadded:: 2.0 """ data = reader.recv(chunk_size) if len(data) == 0: return True writer.sendall(data)
Tunnel
python
conda__conda
conda/auxlib/entity.py
{ "start": 17398, "end": 17571 }
class ____(Field): _type = bool def box(self, instance, instance_type, val): return None if val is None else bool(val) BoolField = BooleanField
BooleanField
python
django__django
tests/migrations/migrations_test_apps/conflicting_app_with_dependencies/migrations/0002_second.py
{ "start": 43, "end": 573 }
class ____(migrations.Migration): dependencies = [ ("conflicting_app_with_dependencies", "0001_initial"), ("migrated_app", "0001_initial"), ] operations = [ migrations.DeleteModel("Tribble"), migrations.RemoveField("Author", "silly_field"), migrations.AddField("Author", "rating", models.IntegerField(default=0)), migrations.CreateModel( "Book", [ ("id", models.AutoField(primary_key=True)), ], ), ]
Migration
python
numpy__numpy
numpy/linalg/_linalg.py
{ "start": 1975, "end": 2066 }
class ____(NamedTuple): eigenvalues: NDArray[Any] eigenvectors: NDArray[Any]
EigResult
python
doocs__leetcode
solution/3500-3599/3598.Longest Common Prefix Between Adjacent Strings After Removals/Solution.py
{ "start": 0, "end": 927 }
class ____: def longestCommonPrefix(self, words: List[str]) -> List[int]: @cache def calc(s: str, t: str) -> int: k = 0 for a, b in zip(s, t): if a != b: break k += 1 return k def add(i: int, j: int): if 0 <= i < n and 0 <= j < n: sl.add(calc(words[i], words[j])) def remove(i: int, j: int): if 0 <= i < n and 0 <= j < n: sl.remove(calc(words[i], words[j])) n = len(words) sl = SortedList(calc(a, b) for a, b in pairwise(words)) ans = [] for i in range(n): remove(i, i + 1) remove(i - 1, i) add(i - 1, i + 1) ans.append(sl[-1] if sl and sl[-1] > 0 else 0) remove(i - 1, i + 1) add(i - 1, i) add(i, i + 1) return ans
Solution
python
sympy__sympy
sympy/stats/stochastic_process_types.py
{ "start": 61470, "end": 65766 }
class ____(DiscreteTimeStochasticProcess): """ The Bernoulli process consists of repeated independent Bernoulli process trials with the same parameter `p`. It's assumed that the probability `p` applies to every trial and that the outcomes of each trial are independent of all the rest. Therefore Bernoulli Process is Discrete State and Discrete Time Stochastic Process. Parameters ========== sym : Symbol/str success : Integer/str The event which is considered to be success. Default: 1. failure: Integer/str The event which is considered to be failure. Default: 0. p : Real Number between 0 and 1 Represents the probability of getting success. Examples ======== >>> from sympy.stats import BernoulliProcess, P, E >>> from sympy import Eq, Gt >>> B = BernoulliProcess("B", p=0.7, success=1, failure=0) >>> B.state_space {0, 1} >>> B.p.round(2) 0.70 >>> B.success 1 >>> B.failure 0 >>> X = B[1] + B[2] + B[3] >>> P(Eq(X, 0)).round(2) 0.03 >>> P(Eq(X, 2)).round(2) 0.44 >>> P(Eq(X, 4)).round(2) 0 >>> P(Gt(X, 1)).round(2) 0.78 >>> P(Eq(B[1], 0) & Eq(B[2], 1) & Eq(B[3], 0) & Eq(B[4], 1)).round(2) 0.04 >>> B.joint_distribution(B[1], B[2]) JointDistributionHandmade(Lambda((B[1], B[2]), Piecewise((0.7, Eq(B[1], 1)), (0.3, Eq(B[1], 0)), (0, True))*Piecewise((0.7, Eq(B[2], 1)), (0.3, Eq(B[2], 0)), (0, True)))) >>> E(2*B[1] + B[2]).round(2) 2.10 >>> P(B[1] < 1).round(2) 0.30 References ========== .. [1] https://en.wikipedia.org/wiki/Bernoulli_process .. [2] https://mathcs.clarku.edu/~djoyce/ma217/bernoulli.pdf """ index_set = S.Naturals0 def __new__(cls, sym, p, success=1, failure=0): _value_check(p >= 0 and p <= 1, 'Value of p must be between 0 and 1.') sym = _symbol_converter(sym) p = _sympify(p) success = _sym_sympify(success) failure = _sym_sympify(failure) return Basic.__new__(cls, sym, p, success, failure) @property def symbol(self): return self.args[0] @property def p(self): return self.args[1] @property def success(self): return self.args[2] @property def failure(self): return self.args[3] @property def state_space(self): return _set_converter([self.success, self.failure]) def distribution(self, key=None): if key is None: self._deprecation_warn_distribution() return BernoulliDistribution(self.p) return BernoulliDistribution(self.p, self.success, self.failure) def simple_rv(self, rv): return Bernoulli(rv.name, p=self.p, succ=self.success, fail=self.failure) def expectation(self, expr, condition=None, evaluate=True, **kwargs): """ Computes expectation. Parameters ========== expr : RandomIndexedSymbol, Relational, Logic Condition for which expectation has to be computed. Must contain a RandomIndexedSymbol of the process. condition : Relational, Logic The given conditions under which computations should be done. Returns ======= Expectation of the RandomIndexedSymbol. """ return _SubstituteRV._expectation(expr, condition, evaluate, **kwargs) def probability(self, condition, given_condition=None, evaluate=True, **kwargs): """ Computes probability. Parameters ========== condition : Relational Condition for which probability has to be computed. Must contain a RandomIndexedSymbol of the process. given_condition : Relational, Logic The given conditions under which computations should be done. Returns ======= Probability of the condition. """ return _SubstituteRV._probability(condition, given_condition, evaluate, **kwargs) def density(self, x): return Piecewise((self.p, Eq(x, self.success)), (1 - self.p, Eq(x, self.failure)), (S.Zero, True))
BernoulliProcess
python
google__jax
jax/_src/custom_batching.py
{ "start": 1618, "end": 6630 }
class ____: """Customize the vmap behavior of a JAX-transformable function. This decorator is used to customize the behavior of a JAX function under the :func:`jax.vmap` transformation. A ``custom_vmap``-decorated function will mostly (see below for caveats) have the same behavior as the underlying function, except when batched using :py:func:`jax.vmap`. When batched, the rule defined using :py:func:`~jax.custom_batching.custom_vmap.def_vmap` will be used. For example: >>> @jax.custom_batching.custom_vmap ... def f(x, y): ... return x + y ... >>> @f.def_vmap ... def f_vmap_rule(axis_size, in_batched, xs, ys): ... assert all(in_batched) ... assert xs.shape[0] == axis_size ... assert ys.shape[0] == axis_size ... out_batched = True ... return xs * ys, out_batched ... >>> xs = jnp.arange(3) >>> ys = jnp.arange(1, 4) >>> jax.vmap(f)(xs, ys) # prints xs * ys instead of xs + ys Array([0, 2, 6], dtype=int32) Of note, ``custom_vmap`` functions do not support reverse-mode autodiff. To customize both vmap and reverse-mode autodiff, combine ``custom_vmap`` with :py:class:`jax.custom_vjp`. For example: >>> @jax.custom_vjp ... @jax.custom_batching.custom_vmap ... def f(x, y): ... return jnp.sin(x) * y ... >>> @f.def_vmap ... def f_vmap_rule(axis_size, in_batched, xs, ys): ... return jnp.cos(xs) * ys, True ... >>> def f_fwd(x, y): ... return f(x, y), (jnp.cos(x), jnp.sin(x), y) ... >>> def f_bwd(res, g): ... cos_x, sin_x, y = res ... return (cos_x * g * y, sin_x * g) ... >>> f.defvjp(f_fwd, f_bwd) >>> jax.vmap(f)(jnp.zeros(3), jnp.ones(3)) Array([1., 1., 1.], dtype=float32) >>> jax.grad(f)(jnp.zeros(()), jnp.ones(())) Array(1., dtype=float32) Note that the :py:class:`jax.custom_vjp` must be on the outside, wrapping the ``custom_vmap``-decorated function. """ fun: Callable[..., Any] vmap_rule: Callable[..., tuple[Any, Any]] | None def __init__(self, fun: Callable[..., Any]): functools.update_wrapper(self, fun) self.fun = fun self.vmap_rule = None __getattr__ = custom_api_util.forward_attr def def_vmap( self, vmap_rule: Callable[..., tuple[Any, Any]], ) -> Callable[..., tuple[Any, Any]]: """Define the vmap rule for this custom_vmap function. Args: vmap_rule: A function that implements the vmap rule. This function should accept the following arguments: (1) an integer ``axis_size`` as its first argument, (2) a pytree of booleans with the same structure as the inputs to the function, specifying whether each argument is batched, and (3) the batched arguments. It should return a tuple of the batched output and a pytree of booleans with the same structure as the output, specifying whether each output element is batched. See the documentation for :py:func:`jax.custom_batching.custom_vmap` for some examples. Returns: This method passes the rule through, returning ``vmap_rule`` unchanged. """ self.vmap_rule = vmap_rule return vmap_rule @traceback_util.api_boundary def __call__(self, *args, **kwargs): debug_fun = api_util.debug_info("custom_vmap fun", self.fun, args, kwargs) try: args = api_util.resolve_kwargs(self.fun, args, kwargs) except TypeError as e: raise TypeError( "The input arguments to the custom_vmap-decorated function " f"{debug_fun.func_name} could not be resolved to positional-only " f"arguments. Binding failed with the error:\n{e}" ) from e if not self.vmap_rule: raise AttributeError( f"No batching rule defined for custom_vmap function {debug_fun.func_name} " "using def_vmap.") args_flat, in_tree = tree_flatten(args) flat_fun, out_tree = api_util.flatten_fun_nokwargs( lu.wrap_init(self.fun, debug_info=debug_fun), in_tree) in_avals = [core.get_aval(x) for x in args_flat] jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_fun, in_avals) closed_call = core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ()) in_tree = treedef_tuple((tree_structure(consts), in_tree)) assert self.vmap_rule is not None debug_rule = api_util.debug_info("custom_vmap rule", self.vmap_rule, (0, args, args), {}) out_flat = custom_vmap_p.bind(*consts, *args_flat, call=closed_call, rule=ClosedRule(self.vmap_rule, debug_rule), in_tree=in_tree, out_tree=out_tree()) return tree_unflatten(out_tree(), out_flat) ### utils # Define a class, instead of making a function closing over `rule`, so # that we can override __str__
custom_vmap
python
sympy__sympy
sympy/combinatorics/coset_table.py
{ "start": 419, "end": 43316 }
class ____(DefaultPrinting): # coset_table: Mathematically a coset table # represented using a list of lists # alpha: Mathematically a coset (precisely, a live coset) # represented by an integer between i with 1 <= i <= n # alpha in c # x: Mathematically an element of "A" (set of generators and # their inverses), represented using "FpGroupElement" # fp_grp: Finitely Presented Group with < X|R > as presentation. # H: subgroup of fp_grp. # NOTE: We start with H as being only a list of words in generators # of "fp_grp". Since `.subgroup` method has not been implemented. r""" Properties ========== [1] `0 \in \Omega` and `\tau(1) = \epsilon` [2] `\alpha^x = \beta \Leftrightarrow \beta^{x^{-1}} = \alpha` [3] If `\alpha^x = \beta`, then `H \tau(\alpha)x = H \tau(\beta)` [4] `\forall \alpha \in \Omega, 1^{\tau(\alpha)} = \alpha` References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of Computational Group Theory" .. [2] John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490. "Implementation and Analysis of the Todd-Coxeter Algorithm" """ # default limit for the number of cosets allowed in a # coset enumeration. coset_table_max_limit = 4096000 # limit for the current instance coset_table_limit = None # maximum size of deduction stack above or equal to # which it is emptied max_stack_size = 100 def __init__(self, fp_grp, subgroup, max_cosets=None): if not max_cosets: max_cosets = CosetTable.coset_table_max_limit self.fp_group = fp_grp self.subgroup = subgroup self.coset_table_limit = max_cosets # "p" is setup independent of Omega and n self.p = [0] # a list of the form `[gen_1, gen_1^{-1}, ... , gen_k, gen_k^{-1}]` self.A = list(chain.from_iterable((gen, gen**-1) \ for gen in self.fp_group.generators)) #P[alpha, x] Only defined when alpha^x is defined. self.P = [[None]*len(self.A)] # the mathematical coset table which is a list of lists self.table = [[None]*len(self.A)] self.A_dict = {x: self.A.index(x) for x in self.A} self.A_dict_inv = {} for x, index in self.A_dict.items(): if index % 2 == 0: self.A_dict_inv[x] = self.A_dict[x] + 1 else: self.A_dict_inv[x] = self.A_dict[x] - 1 # used in the coset-table based method of coset enumeration. Each of # the element is called a "deduction" which is the form (alpha, x) whenever # a value is assigned to alpha^x during a definition or "deduction process" self.deduction_stack = [] # Attributes for modified methods. H = self.subgroup self._grp = free_group(', ' .join(["a_%d" % i for i in range(len(H))]))[0] self.P = [[None]*len(self.A)] self.p_p = {0:self._grp.identity} @property def omega(self): """Set of live cosets. """ return [coset for coset in range(len(self.p)) if self.p[coset] == coset] def copy(self): """ Return a shallow copy of Coset Table instance ``self``. """ self_copy = self.__class__(self.fp_group, self.subgroup) self_copy.table = [list(perm_rep) for perm_rep in self.table] self_copy.p = list(self.p) self_copy.deduction_stack = list(self.deduction_stack) return self_copy def __str__(self): return "Coset Table on %s with %s as subgroup generators" \ % (self.fp_group, self.subgroup) __repr__ = __str__ @property def n(self): """The number `n` represents the length of the sublist containing the live cosets. """ if not self.table: return 0 return max(self.omega) + 1 # Pg. 152 [1] def is_complete(self): r""" The coset table is called complete if it has no undefined entries on the live cosets; that is, `\alpha^x` is defined for all `\alpha \in \Omega` and `x \in A`. """ return not any(None in self.table[coset] for coset in self.omega) # Pg. 153 [1] def define(self, alpha, x, modified=False): r""" This routine is used in the relator-based strategy of Todd-Coxeter algorithm if some `\alpha^x` is undefined. We check whether there is space available for defining a new coset. If there is enough space then we remedy this by adjoining a new coset `\beta` to `\Omega` (i.e to set of live cosets) and put that equal to `\alpha^x`, then make an assignment satisfying Property[1]. If there is not enough space then we halt the Coset Table creation. The maximum amount of space that can be used by Coset Table can be manipulated using the class variable ``CosetTable.coset_table_max_limit``. See Also ======== define_c """ A = self.A table = self.table len_table = len(table) if len_table >= self.coset_table_limit: # abort the further generation of cosets raise ValueError("the coset enumeration has defined more than " "%s cosets. Try with a greater value max number of cosets " % self.coset_table_limit) table.append([None]*len(A)) self.P.append([None]*len(self.A)) # beta is the new coset generated beta = len_table self.p.append(beta) table[alpha][self.A_dict[x]] = beta table[beta][self.A_dict_inv[x]] = alpha # P[alpha][x] = epsilon, P[beta][x**-1] = epsilon if modified: self.P[alpha][self.A_dict[x]] = self._grp.identity self.P[beta][self.A_dict_inv[x]] = self._grp.identity self.p_p[beta] = self._grp.identity def define_c(self, alpha, x): r""" A variation of ``define`` routine, described on Pg. 165 [1], used in the coset table-based strategy of Todd-Coxeter algorithm. It differs from ``define`` routine in that for each definition it also adds the tuple `(\alpha, x)` to the deduction stack. See Also ======== define """ A = self.A table = self.table len_table = len(table) if len_table >= self.coset_table_limit: # abort the further generation of cosets raise ValueError("the coset enumeration has defined more than " "%s cosets. Try with a greater value max number of cosets " % self.coset_table_limit) table.append([None]*len(A)) # beta is the new coset generated beta = len_table self.p.append(beta) table[alpha][self.A_dict[x]] = beta table[beta][self.A_dict_inv[x]] = alpha # append to deduction stack self.deduction_stack.append((alpha, x)) def scan_c(self, alpha, word): """ A variation of ``scan`` routine, described on pg. 165 of [1], which puts at tuple, whenever a deduction occurs, to deduction stack. See Also ======== scan, scan_check, scan_and_fill, scan_and_fill_c """ # alpha is an integer representing a "coset" # since scanning can be in two cases # 1. for alpha=0 and w in Y (i.e generating set of H) # 2. alpha in Omega (set of live cosets), w in R (relators) A_dict = self.A_dict A_dict_inv = self.A_dict_inv table = self.table f = alpha i = 0 r = len(word) b = alpha j = r - 1 # list of union of generators and their inverses while i <= j and table[f][A_dict[word[i]]] is not None: f = table[f][A_dict[word[i]]] i += 1 if i > j: if f != b: self.coincidence_c(f, b) return while j >= i and table[b][A_dict_inv[word[j]]] is not None: b = table[b][A_dict_inv[word[j]]] j -= 1 if j < i: # we have an incorrect completed scan with coincidence f ~ b # run the "coincidence" routine self.coincidence_c(f, b) elif j == i: # deduction process table[f][A_dict[word[i]]] = b table[b][A_dict_inv[word[i]]] = f self.deduction_stack.append((f, word[i])) # otherwise scan is incomplete and yields no information # alpha, beta coincide, i.e. alpha, beta represent the pair of cosets where # coincidence occurs def coincidence_c(self, alpha, beta): """ A variation of ``coincidence`` routine used in the coset-table based method of coset enumeration. The only difference being on addition of a new coset in coset table(i.e new coset introduction), then it is appended to ``deduction_stack``. See Also ======== coincidence """ A_dict = self.A_dict A_dict_inv = self.A_dict_inv table = self.table # behaves as a queue q = [] self.merge(alpha, beta, q) while len(q) > 0: gamma = q.pop(0) for x in A_dict: delta = table[gamma][A_dict[x]] if delta is not None: table[delta][A_dict_inv[x]] = None # only line of difference from ``coincidence`` routine self.deduction_stack.append((delta, x**-1)) mu = self.rep(gamma) nu = self.rep(delta) if table[mu][A_dict[x]] is not None: self.merge(nu, table[mu][A_dict[x]], q) elif table[nu][A_dict_inv[x]] is not None: self.merge(mu, table[nu][A_dict_inv[x]], q) else: table[mu][A_dict[x]] = nu table[nu][A_dict_inv[x]] = mu def scan(self, alpha, word, y=None, fill=False, modified=False): r""" ``scan`` performs a scanning process on the input ``word``. It first locates the largest prefix ``s`` of ``word`` for which `\alpha^s` is defined (i.e is not ``None``), ``s`` may be empty. Let ``word=sv``, let ``t`` be the longest suffix of ``v`` for which `\alpha^{t^{-1}}` is defined, and let ``v=ut``. Then three possibilities are there: 1. If ``t=v``, then we say that the scan completes, and if, in addition `\alpha^s = \alpha^{t^{-1}}`, then we say that the scan completes correctly. 2. It can also happen that scan does not complete, but `|u|=1`; that is, the word ``u`` consists of a single generator `x \in A`. In that case, if `\alpha^s = \beta` and `\alpha^{t^{-1}} = \gamma`, then we can set `\beta^x = \gamma` and `\gamma^{x^{-1}} = \beta`. These assignments are known as deductions and enable the scan to complete correctly. 3. See ``coicidence`` routine for explanation of third condition. Notes ===== The code for the procedure of scanning `\alpha \in \Omega` under `w \in A*` is defined on pg. 155 [1] See Also ======== scan_c, scan_check, scan_and_fill, scan_and_fill_c Scan and Fill ============= Performed when the default argument fill=True. Modified Scan ============= Performed when the default argument modified=True """ # alpha is an integer representing a "coset" # since scanning can be in two cases # 1. for alpha=0 and w in Y (i.e generating set of H) # 2. alpha in Omega (set of live cosets), w in R (relators) A_dict = self.A_dict A_dict_inv = self.A_dict_inv table = self.table f = alpha i = 0 r = len(word) b = alpha j = r - 1 b_p = y if modified: f_p = self._grp.identity flag = 0 while fill or flag == 0: flag = 1 while i <= j and table[f][A_dict[word[i]]] is not None: if modified: f_p = f_p*self.P[f][A_dict[word[i]]] f = table[f][A_dict[word[i]]] i += 1 if i > j: if f != b: if modified: self.modified_coincidence(f, b, f_p**-1*y) else: self.coincidence(f, b) return while j >= i and table[b][A_dict_inv[word[j]]] is not None: if modified: b_p = b_p*self.P[b][self.A_dict_inv[word[j]]] b = table[b][A_dict_inv[word[j]]] j -= 1 if j < i: # we have an incorrect completed scan with coincidence f ~ b # run the "coincidence" routine if modified: self.modified_coincidence(f, b, f_p**-1*b_p) else: self.coincidence(f, b) elif j == i: # deduction process table[f][A_dict[word[i]]] = b table[b][A_dict_inv[word[i]]] = f if modified: self.P[f][self.A_dict[word[i]]] = f_p**-1*b_p self.P[b][self.A_dict_inv[word[i]]] = b_p**-1*f_p return elif fill: self.define(f, word[i], modified=modified) # otherwise scan is incomplete and yields no information # used in the low-index subgroups algorithm def scan_check(self, alpha, word): r""" Another version of ``scan`` routine, described on, it checks whether `\alpha` scans correctly under `word`, it is a straightforward modification of ``scan``. ``scan_check`` returns ``False`` (rather than calling ``coincidence``) if the scan completes incorrectly; otherwise it returns ``True``. See Also ======== scan, scan_c, scan_and_fill, scan_and_fill_c """ # alpha is an integer representing a "coset" # since scanning can be in two cases # 1. for alpha=0 and w in Y (i.e generating set of H) # 2. alpha in Omega (set of live cosets), w in R (relators) A_dict = self.A_dict A_dict_inv = self.A_dict_inv table = self.table f = alpha i = 0 r = len(word) b = alpha j = r - 1 while i <= j and table[f][A_dict[word[i]]] is not None: f = table[f][A_dict[word[i]]] i += 1 if i > j: return f == b while j >= i and table[b][A_dict_inv[word[j]]] is not None: b = table[b][A_dict_inv[word[j]]] j -= 1 if j < i: # we have an incorrect completed scan with coincidence f ~ b # return False, instead of calling coincidence routine return False elif j == i: # deduction process table[f][A_dict[word[i]]] = b table[b][A_dict_inv[word[i]]] = f return True def merge(self, k, lamda, q, w=None, modified=False): """ Merge two classes with representatives ``k`` and ``lamda``, described on Pg. 157 [1] (for pseudocode), start by putting ``p[k] = lamda``. It is more efficient to choose the new representative from the larger of the two classes being merged, i.e larger among ``k`` and ``lamda``. procedure ``merge`` performs the merging operation, adds the deleted class representative to the queue ``q``. Parameters ========== 'k', 'lamda' being the two class representatives to be merged. Notes ===== Pg. 86-87 [1] contains a description of this method. See Also ======== coincidence, rep """ p = self.p rep = self.rep phi = rep(k, modified=modified) psi = rep(lamda, modified=modified) if phi != psi: mu = min(phi, psi) v = max(phi, psi) p[v] = mu if modified: if v == phi: self.p_p[phi] = self.p_p[k]**-1*w*self.p_p[lamda] else: self.p_p[psi] = self.p_p[lamda]**-1*w**-1*self.p_p[k] q.append(v) def rep(self, k, modified=False): r""" Parameters ========== `k \in [0 \ldots n-1]`, as for ``self`` only array ``p`` is used Returns ======= Representative of the class containing ``k``. Returns the representative of `\sim` class containing ``k``, it also makes some modification to array ``p`` of ``self`` to ease further computations, described on Pg. 157 [1]. The information on classes under `\sim` is stored in array `p` of ``self`` argument, which will always satisfy the property: `p[\alpha] \sim \alpha` and `p[\alpha]=\alpha \iff \alpha=rep(\alpha)` `\forall \in [0 \ldots n-1]`. So, for `\alpha \in [0 \ldots n-1]`, we find `rep(self, \alpha)` by continually replacing `\alpha` by `p[\alpha]` until it becomes constant (i.e satisfies `p[\alpha] = \alpha`):w To increase the efficiency of later ``rep`` calculations, whenever we find `rep(self, \alpha)=\beta`, we set `p[\gamma] = \beta \forall \gamma \in p-chain` from `\alpha` to `\beta` Notes ===== ``rep`` routine is also described on Pg. 85-87 [1] in Atkinson's algorithm, this results from the fact that ``coincidence`` routine introduces functionality similar to that introduced by the ``minimal_block`` routine on Pg. 85-87 [1]. See Also ======== coincidence, merge """ p = self.p lamda = k rho = p[lamda] if modified: s = p[:] while rho != lamda: if modified: s[rho] = lamda lamda = rho rho = p[lamda] if modified: rho = s[lamda] while rho != k: mu = rho rho = s[mu] p[rho] = lamda self.p_p[rho] = self.p_p[rho]*self.p_p[mu] else: mu = k rho = p[mu] while rho != lamda: p[mu] = lamda mu = rho rho = p[mu] return lamda # alpha, beta coincide, i.e. alpha, beta represent the pair of cosets # where coincidence occurs def coincidence(self, alpha, beta, w=None, modified=False): r""" The third situation described in ``scan`` routine is handled by this routine, described on Pg. 156-161 [1]. The unfortunate situation when the scan completes but not correctly, then ``coincidence`` routine is run. i.e when for some `i` with `1 \le i \le r+1`, we have `w=st` with `s = x_1 x_2 \dots x_{i-1}`, `t = x_i x_{i+1} \dots x_r`, and `\beta = \alpha^s` and `\gamma = \alpha^{t-1}` are defined but unequal. This means that `\beta` and `\gamma` represent the same coset of `H` in `G`. Described on Pg. 156 [1]. ``rep`` See Also ======== scan """ A_dict = self.A_dict A_dict_inv = self.A_dict_inv table = self.table # behaves as a queue q = [] if modified: self.modified_merge(alpha, beta, w, q) else: self.merge(alpha, beta, q) while len(q) > 0: gamma = q.pop(0) for x in A_dict: delta = table[gamma][A_dict[x]] if delta is not None: table[delta][A_dict_inv[x]] = None mu = self.rep(gamma, modified=modified) nu = self.rep(delta, modified=modified) if table[mu][A_dict[x]] is not None: if modified: v = self.p_p[delta]**-1*self.P[gamma][self.A_dict[x]]**-1 v = v*self.p_p[gamma]*self.P[mu][self.A_dict[x]] self.modified_merge(nu, table[mu][self.A_dict[x]], v, q) else: self.merge(nu, table[mu][A_dict[x]], q) elif table[nu][A_dict_inv[x]] is not None: if modified: v = self.p_p[gamma]**-1*self.P[gamma][self.A_dict[x]] v = v*self.p_p[delta]*self.P[mu][self.A_dict_inv[x]] self.modified_merge(mu, table[nu][self.A_dict_inv[x]], v, q) else: self.merge(mu, table[nu][A_dict_inv[x]], q) else: table[mu][A_dict[x]] = nu table[nu][A_dict_inv[x]] = mu if modified: v = self.p_p[gamma]**-1*self.P[gamma][self.A_dict[x]]*self.p_p[delta] self.P[mu][self.A_dict[x]] = v self.P[nu][self.A_dict_inv[x]] = v**-1 # method used in the HLT strategy def scan_and_fill(self, alpha, word): """ A modified version of ``scan`` routine used in the relator-based method of coset enumeration, described on pg. 162-163 [1], which follows the idea that whenever the procedure is called and the scan is incomplete then it makes new definitions to enable the scan to complete; i.e it fills in the gaps in the scan of the relator or subgroup generator. """ self.scan(alpha, word, fill=True) def scan_and_fill_c(self, alpha, word): """ A modified version of ``scan`` routine, described on Pg. 165 second para. [1], with modification similar to that of ``scan_anf_fill`` the only difference being it calls the coincidence procedure used in the coset-table based method i.e. the routine ``coincidence_c`` is used. See Also ======== scan, scan_and_fill """ A_dict = self.A_dict A_dict_inv = self.A_dict_inv table = self.table r = len(word) f = alpha i = 0 b = alpha j = r - 1 # loop until it has filled the alpha row in the table. while True: # do the forward scanning while i <= j and table[f][A_dict[word[i]]] is not None: f = table[f][A_dict[word[i]]] i += 1 if i > j: if f != b: self.coincidence_c(f, b) return # forward scan was incomplete, scan backwards while j >= i and table[b][A_dict_inv[word[j]]] is not None: b = table[b][A_dict_inv[word[j]]] j -= 1 if j < i: self.coincidence_c(f, b) elif j == i: table[f][A_dict[word[i]]] = b table[b][A_dict_inv[word[i]]] = f self.deduction_stack.append((f, word[i])) else: self.define_c(f, word[i]) # method used in the HLT strategy def look_ahead(self): """ When combined with the HLT method this is known as HLT+Lookahead method of coset enumeration, described on pg. 164 [1]. Whenever ``define`` aborts due to lack of space available this procedure is executed. This routine helps in recovering space resulting from "coincidence" of cosets. """ R = self.fp_group.relators p = self.p # complete scan all relators under all cosets(obviously live) # without making new definitions for beta in self.omega: for w in R: self.scan(beta, w) if p[beta] < beta: break # Pg. 166 def process_deductions(self, R_c_x, R_c_x_inv): """ Processes the deductions that have been pushed onto ``deduction_stack``, described on Pg. 166 [1] and is used in coset-table based enumeration. See Also ======== deduction_stack """ p = self.p table = self.table while len(self.deduction_stack) > 0: if len(self.deduction_stack) >= CosetTable.max_stack_size: self.look_ahead() del self.deduction_stack[:] continue else: alpha, x = self.deduction_stack.pop() if p[alpha] == alpha: for w in R_c_x: self.scan_c(alpha, w) if p[alpha] < alpha: break beta = table[alpha][self.A_dict[x]] if beta is not None and p[beta] == beta: for w in R_c_x_inv: self.scan_c(beta, w) if p[beta] < beta: break def process_deductions_check(self, R_c_x, R_c_x_inv): """ A variation of ``process_deductions``, this calls ``scan_check`` wherever ``process_deductions`` calls ``scan``, described on Pg. [1]. See Also ======== process_deductions """ table = self.table while len(self.deduction_stack) > 0: alpha, x = self.deduction_stack.pop() if not all(self.scan_check(alpha, w) for w in R_c_x): return False beta = table[alpha][self.A_dict[x]] if beta is not None: if not all(self.scan_check(beta, w) for w in R_c_x_inv): return False return True def switch(self, beta, gamma): r"""Switch the elements `\beta, \gamma \in \Omega` of ``self``, used by the ``standardize`` procedure, described on Pg. 167 [1]. See Also ======== standardize """ A = self.A A_dict = self.A_dict table = self.table for x in A: z = table[gamma][A_dict[x]] table[gamma][A_dict[x]] = table[beta][A_dict[x]] table[beta][A_dict[x]] = z for alpha in range(len(self.p)): if self.p[alpha] == alpha: if table[alpha][A_dict[x]] == beta: table[alpha][A_dict[x]] = gamma elif table[alpha][A_dict[x]] == gamma: table[alpha][A_dict[x]] = beta def standardize(self): r""" A coset table is standardized if when running through the cosets and within each coset through the generator images (ignoring generator inverses), the cosets appear in order of the integers `0, 1, \dots, n`. "Standardize" reorders the elements of `\Omega` such that, if we scan the coset table first by elements of `\Omega` and then by elements of A, then the cosets occur in ascending order. ``standardize()`` is used at the end of an enumeration to permute the cosets so that they occur in some sort of standard order. Notes ===== procedure is described on pg. 167-168 [1], it also makes use of the ``switch`` routine to replace by smaller integer value. Examples ======== >>> from sympy.combinatorics import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r >>> F, x, y = free_group("x, y") # Example 5.3 from [1] >>> f = FpGroup(F, [x**2*y**2, x**3*y**5]) >>> C = coset_enumeration_r(f, []) >>> C.compress() >>> C.table [[1, 3, 1, 3], [2, 0, 2, 0], [3, 1, 3, 1], [0, 2, 0, 2]] >>> C.standardize() >>> C.table [[1, 2, 1, 2], [3, 0, 3, 0], [0, 3, 0, 3], [2, 1, 2, 1]] """ A = self.A A_dict = self.A_dict gamma = 1 for alpha, x in product(range(self.n), A): beta = self.table[alpha][A_dict[x]] if beta >= gamma: if beta > gamma: self.switch(gamma, beta) gamma += 1 if gamma == self.n: return # Compression of a Coset Table def compress(self): """Removes the non-live cosets from the coset table, described on pg. 167 [1]. """ gamma = -1 A = self.A A_dict = self.A_dict A_dict_inv = self.A_dict_inv table = self.table chi = tuple([i for i in range(len(self.p)) if self.p[i] != i]) for alpha in self.omega: gamma += 1 if gamma != alpha: # replace alpha by gamma in coset table for x in A: beta = table[alpha][A_dict[x]] table[gamma][A_dict[x]] = beta # XXX: The line below uses == rather than = which means # that it has no effect. It is not clear though if it is # correct simply to delete the line or to change it to # use =. Changing it causes some tests to fail. # # https://github.com/sympy/sympy/issues/27633 table[beta][A_dict_inv[x]] == gamma # noqa: B015 # all the cosets in the table are live cosets self.p = list(range(gamma + 1)) # delete the useless columns del table[len(self.p):] # re-define values for row in table: for j in range(len(self.A)): row[j] -= bisect_left(chi, row[j]) def conjugates(self, R): R_c = list(chain.from_iterable((rel.cyclic_conjugates(), \ (rel**-1).cyclic_conjugates()) for rel in R)) R_set = set() for conjugate in R_c: R_set = R_set.union(conjugate) R_c_list = [] for x in self.A: r = {word for word in R_set if word[0] == x} R_c_list.append(r) R_set.difference_update(r) return R_c_list def coset_representative(self, coset): ''' Compute the coset representative of a given coset. Examples ======== >>> from sympy.combinatorics import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) >>> C = coset_enumeration_r(f, [x]) >>> C.compress() >>> C.table [[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]] >>> C.coset_representative(0) <identity> >>> C.coset_representative(1) y >>> C.coset_representative(2) y**-1 ''' for x in self.A: gamma = self.table[coset][self.A_dict[x]] if coset == 0: return self.fp_group.identity if gamma < coset: return self.coset_representative(gamma)*x**-1 ############################## # Modified Methods # ############################## def modified_define(self, alpha, x): r""" Define a function p_p from from [1..n] to A* as an additional component of the modified coset table. Parameters ========== \alpha \in \Omega x \in A* See Also ======== define """ self.define(alpha, x, modified=True) def modified_scan(self, alpha, w, y, fill=False): r""" Parameters ========== \alpha \in \Omega w \in A* y \in (YUY^-1) fill -- `modified_scan_and_fill` when set to True. See Also ======== scan """ self.scan(alpha, w, y=y, fill=fill, modified=True) def modified_scan_and_fill(self, alpha, w, y): self.modified_scan(alpha, w, y, fill=True) def modified_merge(self, k, lamda, w, q): r""" Parameters ========== 'k', 'lamda' -- the two class representatives to be merged. q -- queue of length l of elements to be deleted from `\Omega` *. w -- Word in (YUY^-1) See Also ======== merge """ self.merge(k, lamda, q, w=w, modified=True) def modified_rep(self, k): r""" Parameters ========== `k \in [0 \ldots n-1]` See Also ======== rep """ self.rep(k, modified=True) def modified_coincidence(self, alpha, beta, w): r""" Parameters ========== A coincident pair `\alpha, \beta \in \Omega, w \in Y \cup Y^{-1}` See Also ======== coincidence """ self.coincidence(alpha, beta, w=w, modified=True) ############################################################################### # COSET ENUMERATION # ############################################################################### # relator-based method def coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None, incomplete=False, modified=False): """ This is easier of the two implemented methods of coset enumeration. and is often called the HLT method, after Hazelgrove, Leech, Trotter The idea is that we make use of ``scan_and_fill`` makes new definitions whenever the scan is incomplete to enable the scan to complete; this way we fill in the gaps in the scan of the relator or subgroup generator, that's why the name relator-based method. An instance of `CosetTable` for `fp_grp` can be passed as the keyword argument `draft` in which case the coset enumeration will start with that instance and attempt to complete it. When `incomplete` is `True` and the function is unable to complete for some reason, the partially complete table will be returned. # TODO: complete the docstring See Also ======== scan_and_fill, Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r >>> F, x, y = free_group("x, y") # Example 5.1 from [1] >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) >>> C = coset_enumeration_r(f, [x]) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [0, 0, 1, 2] [1, 1, 2, 0] [2, 2, 0, 1] >>> C.p [0, 1, 2, 1, 1] # Example from exercises Q2 [1] >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) >>> C = coset_enumeration_r(f, []) >>> C.compress(); C.standardize() >>> C.table [[1, 2, 3, 4], [5, 0, 6, 7], [0, 5, 7, 6], [7, 6, 5, 0], [6, 7, 0, 5], [2, 1, 4, 3], [3, 4, 2, 1], [4, 3, 1, 2]] # Example 5.2 >>> f = FpGroup(F, [x**2, y**3, (x*y)**3]) >>> Y = [x*y] >>> C = coset_enumeration_r(f, Y) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [1, 1, 2, 1] [0, 0, 0, 2] [3, 3, 1, 0] [2, 2, 3, 3] # Example 5.3 >>> f = FpGroup(F, [x**2*y**2, x**3*y**5]) >>> Y = [] >>> C = coset_enumeration_r(f, Y) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [1, 3, 1, 3] [2, 0, 2, 0] [3, 1, 3, 1] [0, 2, 0, 2] # Example 5.4 >>> F, a, b, c, d, e = free_group("a, b, c, d, e") >>> f = FpGroup(F, [a*b*c**-1, b*c*d**-1, c*d*e**-1, d*e*a**-1, e*a*b**-1]) >>> Y = [a] >>> C = coset_enumeration_r(f, Y) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # example of "compress" method >>> C.compress() >>> C.table [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # Exercises Pg. 161, Q2. >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) >>> Y = [] >>> C = coset_enumeration_r(f, Y) >>> C.compress() >>> C.standardize() >>> C.table [[1, 2, 3, 4], [5, 0, 6, 7], [0, 5, 7, 6], [7, 6, 5, 0], [6, 7, 0, 5], [2, 1, 4, 3], [3, 4, 2, 1], [4, 3, 1, 2]] # John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson # Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490 # from 1973chwd.pdf # Table 1. Ex. 1 >>> F, r, s, t = free_group("r, s, t") >>> E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2]) >>> C = coset_enumeration_r(E1, [r]) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [0, 0, 0, 0, 0, 0] Ex. 2 >>> F, a, b = free_group("a, b") >>> Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5]) >>> C = coset_enumeration_r(Cox, [a]) >>> index = 0 >>> for i in range(len(C.p)): ... if C.p[i] == i: ... index += 1 >>> index 500 # Ex. 3 >>> F, a, b = free_group("a, b") >>> B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \ (a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4]) >>> C = coset_enumeration_r(B_2_4, [a]) >>> index = 0 >>> for i in range(len(C.p)): ... if C.p[i] == i: ... index += 1 >>> index 1024 References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of computational group theory" """ # 1. Initialize a coset table C for < X|R > C = CosetTable(fp_grp, Y, max_cosets=max_cosets) # Define coset table methods. if modified: _scan_and_fill = C.modified_scan_and_fill _define = C.modified_define else: _scan_and_fill = C.scan_and_fill _define = C.define if draft: C.table = draft.table[:] C.p = draft.p[:] R = fp_grp.relators A_dict = C.A_dict p = C.p for i in range(len(Y)): if modified: _scan_and_fill(0, Y[i], C._grp.generators[i]) else: _scan_and_fill(0, Y[i]) alpha = 0 while alpha < C.n: if p[alpha] == alpha: try: for w in R: if modified: _scan_and_fill(alpha, w, C._grp.identity) else: _scan_and_fill(alpha, w) # if alpha was eliminated during the scan then break if p[alpha] < alpha: break if p[alpha] == alpha: for x in A_dict: if C.table[alpha][A_dict[x]] is None: _define(alpha, x) except ValueError as e: if incomplete: return C raise e alpha += 1 return C def modified_coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None, incomplete=False): r""" Introduce a new set of symbols y \in Y that correspond to the generators of the subgroup. Store the elements of Y as a word P[\alpha, x] and compute the coset table similar to that of the regular coset enumeration methods. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup >>> from sympy.combinatorics.coset_table import modified_coset_enumeration_r >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) >>> C = modified_coset_enumeration_r(f, [x]) >>> C.table [[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1], [None, 1, None, None], [1, 3, None, None]] See Also ======== coset_enumertation_r References ========== .. [1] Holt, D., Eick, B., O'Brien, E., "Handbook of Computational Group Theory", Section 5.3.2 """ return coset_enumeration_r(fp_grp, Y, max_cosets=max_cosets, draft=draft, incomplete=incomplete, modified=True) # Pg. 166 # coset-table based method def coset_enumeration_c(fp_grp, Y, max_cosets=None, draft=None, incomplete=False): """ >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_c >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) >>> C = coset_enumeration_c(f, [x]) >>> C.table [[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]] """ # Initialize a coset table C for < X|R > X = fp_grp.generators R = fp_grp.relators C = CosetTable(fp_grp, Y, max_cosets=max_cosets) if draft: C.table = draft.table[:] C.p = draft.p[:] C.deduction_stack = draft.deduction_stack for alpha, x in product(range(len(C.table)), X): if C.table[alpha][C.A_dict[x]] is not None: C.deduction_stack.append((alpha, x)) A = C.A # replace all the elements by cyclic reductions R_cyc_red = [rel.identity_cyclic_reduction() for rel in R] R_c = list(chain.from_iterable((rel.cyclic_conjugates(), (rel**-1).cyclic_conjugates()) \ for rel in R_cyc_red)) R_set = set() for conjugate in R_c: R_set = R_set.union(conjugate) # a list of subsets of R_c whose words start with "x". R_c_list = [] for x in C.A: r = {word for word in R_set if word[0] == x} R_c_list.append(r) R_set.difference_update(r) for w in Y: C.scan_and_fill_c(0, w) for x in A: C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]]) alpha = 0 while alpha < len(C.table): if C.p[alpha] == alpha: try: for x in C.A: if C.p[alpha] != alpha: break if C.table[alpha][C.A_dict[x]] is None: C.define_c(alpha, x) C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]]) except ValueError as e: if incomplete: return C raise e alpha += 1 return C
CosetTable
python
pallets__jinja
src/jinja2/lexer.py
{ "start": 9013, "end": 13030 }
class ____: """A token stream is an iterable that yields :class:`Token`\\s. The parser however does not iterate over it but calls :meth:`next` to go one token ahead. The current active token is stored as :attr:`current`. """ def __init__( self, generator: t.Iterable[Token], name: str | None, filename: str | None, ): self._iter = iter(generator) self._pushed: deque[Token] = deque() self.name = name self.filename = filename self.closed = False self.current = Token(1, TOKEN_INITIAL, "") next(self) def __iter__(self) -> TokenStreamIterator: return TokenStreamIterator(self) def __bool__(self) -> bool: return bool(self._pushed) or self.current.type is not TOKEN_EOF @property def eos(self) -> bool: """Are we at the end of the stream?""" return not self def push(self, token: Token) -> None: """Push a token back to the stream.""" self._pushed.append(token) def look(self) -> Token: """Look at the next token.""" old_token = next(self) result = self.current self.push(result) self.current = old_token return result def skip(self, n: int = 1) -> None: """Got n tokens ahead.""" for _ in range(n): next(self) def next_if(self, expr: str) -> Token | None: """Perform the token test and return the token if it matched. Otherwise the return value is `None`. """ if self.current.test(expr): return next(self) return None def skip_if(self, expr: str) -> bool: """Like :meth:`next_if` but only returns `True` or `False`.""" return self.next_if(expr) is not None def __next__(self) -> Token: """Go one token ahead and return the old one. Use the built-in :func:`next` instead of calling this directly. """ rv = self.current if self._pushed: self.current = self._pushed.popleft() elif self.current.type is not TOKEN_EOF: try: self.current = next(self._iter) except StopIteration: self.close() return rv def close(self) -> None: """Close the stream.""" self.current = Token(self.current.lineno, TOKEN_EOF, "") self._iter = iter(()) self.closed = True def expect(self, expr: str) -> Token: """Expect a given token type and return it. This accepts the same argument as :meth:`jinja2.lexer.Token.test`. """ if not self.current.test(expr): expr = describe_token_expr(expr) if self.current.type is TOKEN_EOF: raise TemplateSyntaxError( f"unexpected end of template, expected {expr!r}.", self.current.lineno, self.name, self.filename, ) raise TemplateSyntaxError( f"expected token {expr!r}, got {describe_token(self.current)!r}", self.current.lineno, self.name, self.filename, ) return next(self) def get_lexer(environment: "Environment") -> "Lexer": """Return a lexer which is probably cached.""" key = ( environment.block_start_string, environment.block_end_string, environment.variable_start_string, environment.variable_end_string, environment.comment_start_string, environment.comment_end_string, environment.line_statement_prefix, environment.line_comment_prefix, environment.trim_blocks, environment.lstrip_blocks, environment.newline_sequence, environment.keep_trailing_newline, ) lexer = _lexer_cache.get(key) if lexer is None: _lexer_cache[key] = lexer = Lexer(environment) return lexer
TokenStream
python
dask__dask
dask/dataframe/dask_expr/_groupby.py
{ "start": 14917, "end": 16516 }
class ____(GroupbyAggregationBase): """Groupby aggregation for decomposable aggregates The results may be calculated via tree or shuffle reduction. """ chunk = staticmethod(_groupby_apply_funcs) @classmethod def combine(cls, inputs, **kwargs): return _groupby_apply_funcs(_concat(inputs), **kwargs) @classmethod def aggregate(cls, inputs, **kwargs): return _agg_finalize(_concat(inputs), **kwargs) @property def chunk_kwargs(self) -> dict: # type: ignore[override] return { "funcs": self.agg_args["chunk_funcs"], "sort": self.sort, **_as_dict("observed", self.observed), **_as_dict("dropna", self.dropna), } @property def combine_kwargs(self) -> dict: # type: ignore[override] return { "funcs": self.agg_args["aggregate_funcs"], "level": self.levels, "sort": self.sort, **_as_dict("observed", self.observed), **_as_dict("dropna", self.dropna), } @property def aggregate_kwargs(self) -> dict: # type: ignore[override] return { "aggregate_funcs": self.agg_args["aggregate_funcs"], "arg": self.arg, "columns": self._slice, "finalize_funcs": self.agg_args["finalizers"], "is_series": self.frame._meta.ndim == 1, "level": self.levels, "sort": self.sort, **_as_dict("observed", self.observed), **_as_dict("dropna", self.dropna), }
DecomposableGroupbyAggregation
python
pytorch__pytorch
tools/experimental/torchfuzz/multi_process_fuzzer.py
{ "start": 1423, "end": 23771 }
class ____: seed: int success: bool output: str duration: float ignored_pattern_idx: int operation_stats: dict[str, int] # New field for operation statistics def is_ignored_output(output: str) -> int: """ Check if the output matches any ignore pattern. Args: output: The combined stdout/stderr string. Returns: Index of the matched ignore pattern, or -1 if none matched. """ for idx, pattern in enumerate(IGNORE_PATTERNS): if pattern.search(output): return idx return -1 def run_fuzzer_with_seed( seed: int, template: str = "default", supported_ops: str | None = None, ) -> FuzzerResult: """ Run fuzzer.py with a specific seed. Args: seed: The seed value to pass to fuzzer.py template: The template to use for code generation supported_ops: Comma-separated ops string with optional weights Returns: FuzzerResult dataclass instance """ start_time = time.time() try: # Run fuzzer.py with the specified seed and template cmd = [ sys.executable, "fuzzer.py", "--single", "--seed", str(seed), "--template", template, ] # Append supported ops if provided if supported_ops: cmd.extend(["--supported-ops", supported_ops]) result = subprocess.run( cmd, capture_output=True, text=True, timeout=300, # 5 minute timeout per seed ) duration = time.time() - start_time success = result.returncode == 0 # Combine stdout and stderr for output output = "" if result.stdout: output += f"STDOUT:\n{result.stdout}\n" if result.stderr: output += f"STDERR:\n{result.stderr}\n" output += f"Return code: {result.returncode}" # Parse operation statistics from the output operation_stats = {} if result.stdout: lines = result.stdout.split("\n") in_stats_section = False for line in lines: if line.strip() == "OPERATION_STATS:": in_stats_section = True continue elif in_stats_section: if line.startswith(" ") and ":" in line: # Parse line like " torch.add: 3" op_line = line.strip() if ": " in op_line: op_name, count_str = op_line.split(": ", 1) try: count = int(count_str) operation_stats[op_name] = count except ValueError: pass # Skip malformed lines else: # End of stats section in_stats_section = False # Check if output should be ignored and which pattern matched ignored_pattern_idx = is_ignored_output(output) if ignored_pattern_idx != -1: # Mark as ignored (could also return a special flag if needed) output = "[IGNORED] " + output return FuzzerResult( seed, success, output, duration, ignored_pattern_idx, operation_stats ) except subprocess.TimeoutExpired: duration = time.time() - start_time return FuzzerResult( seed, False, "Process timed out after 300 seconds", duration, -1, {} ) except Exception as e: duration = time.time() - start_time return FuzzerResult( seed, False, f"Exception occurred: {str(e)}", duration, -1, {} ) def print_output_lines(output: str, write_func): """Helper to print non-empty lines of output using the provided write_func.""" for line in output.split("\n"): if line.strip(): write_func(f" {line}") if hasattr(write_func, "__self__") and hasattr(write_func.__self__, "write"): # For tqdm.write, add an empty line for separation write_func("") def handle_result_output( *, label: str, seed: int, duration: float, output: str, ignored: bool, verbose: bool, write_func, ): """Unified handler for result output, reducing code repetition.""" ignored_text = " [IGNORED]" if ignored else "" write_func(f"{label} - Seed {seed} (duration: {duration:.2f}s){ignored_text}") if output.strip() or label.startswith("❌") or verbose: print_output_lines(output, write_func) def run_multi_process_fuzzer( num_processes: int | None = None, seed_start: int = 0, seed_count: int = 100, verbose: bool = False, template: str = "default", supported_ops: str | None = None, ) -> None: """ Run the multi-process fuzzer. Args: num_processes: Number of worker processes to use seed_start: Starting seed value (inclusive) seed_count: Number of seeds to run verbose: Whether to print detailed output template: The template to use for code generation supported_ops: Comma-separated ops string with optional weights """ seeds = list(range(seed_start, seed_start + seed_count)) persist_print(f"🚀 Starting multi-process fuzzer with {num_processes} processes") persist_print( f"📊 Processing seeds {seed_start} to {seed_start + seed_count - 1} ({len(seeds)} total)" ) persist_print( f"🔧 Command template: python fuzzer.py --seed {{seed}} --template {template}" ) persist_print("=" * 60) start_time = time.time() results: list[FuzzerResult] = [] successful_count = 0 failed_count = 0 ignored_count = 0 ignored_seeds = [] ignored_pattern_counts: dict[int, int] = dict.fromkeys( range(len(IGNORE_PATTERNS)), 0 ) try: # Use multiprocessing Pool to distribute work with mp.Pool(processes=num_processes) as pool: # Submit all seeds to the process pool future_results = [] for seed in seeds: future = pool.apply_async( run_fuzzer_with_seed, (seed, template, supported_ops) ) future_results.append(future) # Set up progress bar if HAS_TQDM: from tqdm import tqdm # Import the real tqdm here pbar = tqdm( total=len(seeds), desc="Processing seeds", file=sys.stdout, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}] ✅/❌/❓={postfix}", dynamic_ncols=True, ) pbar.set_postfix_str( f"{successful_count}/{failed_count}/{ignored_count} | throughput: 0.00 seeds/hr" ) def write_func(msg): # pyrefly: ignore [missing-attribute] pbar.write(msg) else: persist_print("Progress: (install tqdm for better progress bar)") pbar = None write_func = persist_print # Collect results as they complete for i, future in enumerate(future_results): try: result: FuzzerResult = future.get() results.append(result) if result.ignored_pattern_idx != -1: ignored_seeds.append(result.seed) ignored_pattern_counts[result.ignored_pattern_idx] += 1 ignored_count += 1 # Only increment failed_count if not ignored if result.success: successful_count += 1 elif result.ignored_pattern_idx == -1: failed_count += 1 elapsed = time.time() - start_time throughput = (i + 1) / (elapsed / 3600) # Update progress bar if HAS_TQDM and pbar: pbar.set_postfix_str( f"{successful_count}/{failed_count}/{ignored_count} | throughput: {throughput:.2f} seeds/hr" ) pbar.update(1) else: status_emoji = "✅" if result.success else "❌" ignored_text = ( " (IGNORED)" if result.ignored_pattern_idx != -1 else "" ) persist_print( f"Completed {i + 1}/{len(seeds)} - Seed {result.seed}: {status_emoji}{ignored_text}" ) # Unified output handling if not result.success and result.ignored_pattern_idx == -1: handle_result_output( label="❌ FAILURE", seed=result.seed, duration=result.duration, output=result.output, ignored=False, verbose=verbose, write_func=write_func, ) elif not result.success and result.ignored_pattern_idx != -1: if verbose: handle_result_output( label="🚫 IGNORED", seed=result.seed, duration=result.duration, output=result.output, ignored=True, verbose=verbose, write_func=write_func, ) elif verbose: handle_result_output( label="✅ SUCCESS", seed=result.seed, duration=result.duration, output=result.output, ignored=(result.ignored_pattern_idx != -1), verbose=verbose, write_func=write_func, ) except Exception as e: failed_count += 1 if HAS_TQDM and pbar: pbar.set_postfix_str(f"{successful_count}/{failed_count}") pbar.update(1) pbar.write(f"❌ POOL ERROR - Seed {seeds[i]}: {str(e)}") else: persist_print( f"Completed {i + 1}/{len(seeds)} - Seed {seeds[i]}: ❌ POOL ERROR" ) persist_print(f"❌ POOL ERROR - Seed {seeds[i]}: {str(e)}") results.append( FuzzerResult( seeds[i], False, f"Pool error: {str(e)}", 0.0, -1, {} ) ) # Close progress bar if HAS_TQDM and pbar: pbar.close() except KeyboardInterrupt: persist_print("\n🛑 Interrupted by user (Ctrl+C)") # Print summary up to this point total_time = time.time() - start_time persist_print("=" * 60) persist_print("📈 SUMMARY (partial, interrupted)") persist_print("=" * 60) successful = [res for res in results if res.success] # Only count as failed if not ignored failed = [ res for res in results if not res.success and res.ignored_pattern_idx == -1 ] ignored = [res for res in results if res.ignored_pattern_idx != -1] persist_print( f"✅ Successful: {len(successful)}/{len(results)} ({(len(successful) / len(results) * 100 if results else 0):.1f}%)" ) persist_print( f"❌ Failed: {len(failed)}/{len(results)} ({(len(failed) / len(results) * 100 if results else 0):.1f}%)" ) persist_print(f"⏱️ Total time: {total_time:.2f}s") if results: persist_print( f"⚡ Throughput: {(len(results) / (total_time / 3600)):.2f} seeds/hr" if total_time > 0 else "⚡ Throughput: N/A" ) if failed: persist_print(f"\n❌ Failed seeds: {[res.seed for res in failed]}") if successful: persist_print(f"✅ Successful seeds: {[res.seed for res in successful]}") avg_success_time = sum(res.duration for res in successful) / len(successful) persist_print(f"⚡ Avg time for successful runs: {avg_success_time:.2f}s") if ignored: persist_print(f"\n🚫 Ignored seeds: {[res.seed for res in ignored]}") # Print ignore pattern stats persist_print("\n🚫 Ignored pattern statistics:") total_ignored = len(ignored) for idx, pattern in enumerate(IGNORE_PATTERNS): count = ignored_pattern_counts[idx] percent = (count / total_ignored * 100) if total_ignored else 0 persist_print( f" Pattern {idx}: {pattern.pattern!r} - {count} ({percent:.1f}%)" ) # Aggregate and print operation distribution _print_operation_distribution(results) sys.exit(130) total_time = time.time() - start_time # Print summary persist_print("=" * 60) persist_print("📈 SUMMARY") persist_print("=" * 60) successful = [res for res in results if res.success] # Only count as failed if not ignored failed = [ res for res in results if not res.success and res.ignored_pattern_idx == -1 ] ignored = [res for res in results if res.ignored_pattern_idx != -1] persist_print( f"✅ Successful: {len(successful)}/{len(results)} ({len(successful) / len(results) * 100:.1f}%)" ) persist_print( f"❌ Failed: {len(failed)}/{len(results)} ({len(failed) / len(results) * 100:.1f}%)" ) persist_print(f"⏱️ Total time: {total_time:.2f}s") persist_print( f"⚡ Throughput: {(len(results) / (total_time / 3600)):.2f} seeds/hr" if total_time > 0 else "⚡ Throughput: N/A" ) if failed: persist_print(f"\n❌ Failed seeds: {[res.seed for res in failed]}") if successful: persist_print(f"✅ Successful seeds: {[res.seed for res in successful]}") avg_success_time = sum(res.duration for res in successful) / len(successful) persist_print(f"⚡ Avg time for successful runs: {avg_success_time:.2f}s") if ignored: persist_print(f"\n🚫 Ignored seeds: {[res.seed for res in ignored]}") # Print ignore pattern stats persist_print("\n🚫 Ignored pattern statistics:") total_ignored = len(ignored) for idx, pattern in enumerate(IGNORE_PATTERNS): count = ignored_pattern_counts[idx] percent = (count / total_ignored * 100) if total_ignored else 0 persist_print( f" Pattern {idx}: {pattern.pattern!r} - {count} ({percent:.1f}%)" ) # Aggregate and print operation distribution _print_operation_distribution(results) def _print_operation_distribution(results: list[FuzzerResult]) -> None: """Helper function to print operation distribution statistics.""" total_operation_stats = defaultdict(int) total_operations = 0 # Collect operation stats from all successful results for result in results: if result.success and result.operation_stats: for op_name, count in result.operation_stats.items(): total_operation_stats[op_name] += count total_operations += count if total_operation_stats: persist_print("\n📊 OPERATION DISTRIBUTION") persist_print("=" * 60) persist_print(f"Total operations executed: {total_operations}") persist_print("") # Sort operations by count (descending) for better readability sorted_ops = sorted( total_operation_stats.items(), key=lambda x: x[1], reverse=True ) for op_name, count in sorted_ops: percentage = (count / total_operations * 100) if total_operations > 0 else 0 persist_print(f" {op_name:<30} {count:>6} times ({percentage:>5.1f}%)") else: persist_print( "\n📊 No operation statistics collected (no successful runs with stats)" ) def run_until_failure( num_processes: int | None = None, verbose: bool = False, template: str = "default", supported_ops: str | None = None, ) -> None: """ Run the multi-process fuzzer with a random starting seed, iterating until a failure is found. Args: num_processes: Number of worker processes to use verbose: Whether to print detailed output template: The template to use for code generation supported_ops: Comma-separated ops string with optional weights Returns: Exits with non-zero code when a failure is found """ import random # Pick a random seed to start from initial_seed = random.randint(0, 2**31 - 1) persist_print( f"🎲 Starting continuous fuzzing with random initial seed: {initial_seed}" ) persist_print(f"🚀 Using {num_processes} processes") persist_print( f"🔧 Command template: python fuzzer.py --seed {{seed}} --template {template}" ) persist_print("🎯 Running until first failure is found...") persist_print("=" * 60) start_time = time.time() current_seed = initial_seed total_successful = 0 total_ignored = 0 batch_size = 100 # Process seeds in batches of 100 try: while True: # Process a batch of seeds seeds = list(range(current_seed, current_seed + batch_size)) with mp.Pool(processes=num_processes) as pool: future_results = [] for seed in seeds: future = pool.apply_async( run_fuzzer_with_seed, (seed, template, supported_ops) ) future_results.append((seed, future)) # Set up progress bar for this batch if HAS_TQDM: from tqdm import tqdm pbar = tqdm( total=len(seeds), desc=f"Batch starting at seed {current_seed}", file=sys.stdout, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}] ✅/🚫={postfix}", dynamic_ncols=True, ) pbar.set_postfix_str(f"{total_successful}/{total_ignored}") def write_func(msg): # pyrefly: ignore [missing-attribute] pbar.write(msg) else: pbar = None # Collect results as they complete for seed, future in future_results: result: FuzzerResult = future.get() if result.ignored_pattern_idx != -1: total_ignored += 1 if result.success: total_successful += 1 elif result.ignored_pattern_idx == -1: # Found a failure that is not ignored! if HAS_TQDM and pbar: pbar.close() elapsed = time.time() - start_time persist_print("\n" + "=" * 60) persist_print("🎯 FAILURE FOUND!") persist_print("=" * 60) persist_print(f"❌ Failing seed: {result.seed}") persist_print( f"⏱️ Duration for this seed: {result.duration:.2f}s" ) persist_print(f"⏱️ Total time elapsed: {elapsed:.2f}s") persist_print(f"✅ Successful seeds tested: {total_successful}") persist_print(f"🚫 Ignored seeds: {total_ignored}") persist_print( f"📊 Total seeds tested: {total_successful + total_ignored + 1}" ) persist_print("\n💥 Failure output:") persist_print("-" * 60) print_output_lines(result.output, persist_print) persist_print("-" * 60) persist_print( f"\n🔄 Reproduce with: python fuzzer.py --seed {result.seed} --template {template}" ) # Exit with non-zero code sys.exit(1) # Update progress bar if HAS_TQDM and pbar: pbar.set_postfix_str(f"{total_successful}/{total_ignored}") pbar.update(1) elif verbose: status_emoji = "✅" if result.success else "🚫" persist_print(f"Seed {result.seed}: {status_emoji}") # Close progress bar for this batch if HAS_TQDM and pbar: pbar.close() # Move to next batch current_seed += batch_size except KeyboardInterrupt: persist_print("\n🛑 Interrupted by user (Ctrl+C)") elapsed = time.time() - start_time persist_print("=" * 60) persist_print("📈 SUMMARY (interrupted)") persist_print("=" * 60) persist_print(f"⏱️ Total time: {elapsed:.2f}s") persist_print(f"✅ Successful seeds: {total_successful}") persist_print(f"🚫 Ignored seeds: {total_ignored}") persist_print(f"📊 Total seeds tested: {total_successful + total_ignored}") persist_print( f"⚡ Throughput: {((total_successful + total_ignored) / (elapsed / 3600)):.2f} seeds/hr" ) sys.exit(130)
FuzzerResult
python
streamlit__streamlit
lib/streamlit/errors.py
{ "start": 13071, "end": 13293 }
class ____(LocalizableStreamlitException): """Exception raised when a number exceeds the Javascript limits.""" def __init__(self, message: str) -> None: super().__init__(message)
StreamlitJSNumberBoundsError
python
pytorch__pytorch
torch/_export/serde/schema.py
{ "start": 4572, "end": 4673 }
class ____: name: Annotated[str, 10] class_fqn: Annotated[str, 20] @dataclass
CustomObjArgument
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF012.py
{ "start": 1516, "end": 1760 }
class ____(F): mutable_default: list[int] = [] immutable_annotation: Sequence[int] = [] without_annotation = [] class_variable: ClassVar[list[int]] = [] final_variable: Final[list[int]] = [] from pydantic import BaseConfig
G
python
tornadoweb__tornado
tornado/http1connection.py
{ "start": 30323, "end": 33072 }
class ____(httputil.HTTPMessageDelegate): """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.""" def __init__(self, delegate: httputil.HTTPMessageDelegate, chunk_size: int) -> None: self._delegate = delegate self._chunk_size = chunk_size self._decompressor = None # type: Optional[GzipDecompressor] def headers_received( self, start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], headers: httputil.HTTPHeaders, ) -> Optional[Awaitable[None]]: if headers.get("Content-Encoding", "").lower() == "gzip": self._decompressor = GzipDecompressor() # Downstream delegates will only see uncompressed data, # so rename the content-encoding header. # (but note that curl_httpclient doesn't do this). headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"]) del headers["Content-Encoding"] return self._delegate.headers_received(start_line, headers) async def data_received(self, chunk: bytes) -> None: if self._decompressor: compressed_data = chunk while compressed_data: decompressed = self._decompressor.decompress( compressed_data, self._chunk_size ) if decompressed: ret = self._delegate.data_received(decompressed) if ret is not None: await ret compressed_data = self._decompressor.unconsumed_tail if compressed_data and not decompressed: raise httputil.HTTPInputError( "encountered unconsumed gzip data without making progress" ) else: ret = self._delegate.data_received(chunk) if ret is not None: await ret def finish(self) -> None: if self._decompressor is not None: tail = self._decompressor.flush() if tail: # The tail should always be empty: decompress returned # all that it can in data_received and the only # purpose of the flush call is to detect errors such # as truncated input. If we did legitimately get a new # chunk at this point we'd need to change the # interface to make finish() a coroutine. raise ValueError( "decompressor.flush returned data; possible truncated input" ) return self._delegate.finish() def on_connection_close(self) -> None: return self._delegate.on_connection_close()
_GzipMessageDelegate
python
encode__django-rest-framework
tests/authentication/test_authentication.py
{ "start": 902, "end": 988 }
class ____(TokenAuthentication): keyword = 'Bearer'
CustomKeywordTokenAuthentication
python
ray-project__ray
python/ray/util/placement_group.py
{ "start": 1344, "end": 20584 }
class ____: """A handle to a placement group.""" @staticmethod def empty() -> "PlacementGroup": return PlacementGroup(PlacementGroupID.nil()) def __init__( self, id: "ray._raylet.PlacementGroupID", bundle_cache: Optional[List[Dict]] = None, ): self.id = id self.bundle_cache = bundle_cache @property def is_empty(self): return self.id.is_nil() def ready(self) -> "ray._raylet.ObjectRef": """Returns an ObjectRef to check ready status. This API runs a small dummy task to wait for placement group creation. It is compatible to ray.get and ray.wait. Example: .. testcode:: import ray pg = ray.util.placement_group([{"CPU": 1}]) ray.get(pg.ready()) pg = ray.util.placement_group([{"CPU": 1}]) ray.wait([pg.ready()]) """ self._fill_bundle_cache_if_needed() _export_bundle_reservation_check_method_if_needed() assert len(self.bundle_cache) != 0, ( "ready() cannot be called on placement group object with a " "bundle length == 0, current bundle length: " f"{len(self.bundle_cache)}" ) return bundle_reservation_check.options( scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=self), ).remote(self) def wait(self, timeout_seconds: Union[float, int] = 30) -> bool: """Wait for the placement group to be ready within the specified time. Args: timeout_seconds(float|int): Timeout in seconds. Return: True if the placement group is created. False otherwise. """ return _call_placement_group_ready(self.id, timeout_seconds) @property def bundle_specs(self) -> List[Dict]: """List[Dict]: Return bundles belonging to this placement group.""" self._fill_bundle_cache_if_needed() return self.bundle_cache @property def bundle_count(self) -> int: self._fill_bundle_cache_if_needed() return len(self.bundle_cache) def _fill_bundle_cache_if_needed(self) -> None: if not self.bundle_cache: self.bundle_cache = _get_bundle_cache(self.id) def __eq__(self, other): if not isinstance(other, PlacementGroup): return False return self.id == other.id def __hash__(self): return hash(self.id) @client_mode_wrap def _call_placement_group_ready(pg_id: PlacementGroupID, timeout_seconds: int) -> bool: worker = ray._private.worker.global_worker worker.check_connected() return worker.core_worker.wait_placement_group_ready(pg_id, timeout_seconds) @client_mode_wrap def _get_bundle_cache(pg_id: PlacementGroupID) -> List[Dict]: worker = ray._private.worker.global_worker worker.check_connected() return list( ray._private.state.state.placement_group_table(pg_id)["bundles"].values() ) @PublicAPI @client_mode_wrap def placement_group( bundles: List[Dict[str, float]], strategy: str = "PACK", name: str = "", lifetime: Optional[str] = None, _soft_target_node_id: Optional[str] = None, bundle_label_selector: List[Dict[str, str]] = None, ) -> PlacementGroup: """Asynchronously creates a PlacementGroup. Args: bundles: A list of bundles which represent the resources requirements. strategy: The strategy to create the placement group. - "PACK": Packs Bundles into as few nodes as possible. - "SPREAD": Places Bundles across distinct nodes as even as possible. - "STRICT_PACK": Packs Bundles into one node. The group is not allowed to span multiple nodes. - "STRICT_SPREAD": Packs Bundles across distinct nodes. name: The name of the placement group. lifetime: Either `None`, which defaults to the placement group will fate share with its creator and will be deleted once its creator is dead, or "detached", which means the placement group will live as a global object independent of the creator. _soft_target_node_id: (Private, Experimental) Soft hint where bundles of this placement group should be placed. The target node is specified by it's hex ID. If the target node has no available resources or died, bundles can be placed elsewhere. This currently only works with STRICT_PACK pg. bundle_label_selector: A list of label selectors to apply to a placement group on a per-bundle level. Raises: ValueError: if bundle type is not a list. ValueError: if empty bundle or empty resource bundles are given. ValueError: if the wrong lifetime arguments are given. Return: PlacementGroup: Placement group object. """ worker = ray._private.worker.global_worker worker.check_connected() validate_placement_group( bundles=bundles, strategy=strategy, lifetime=lifetime, _soft_target_node_id=_soft_target_node_id, bundle_label_selector=bundle_label_selector, ) if bundle_label_selector is None: bundle_label_selector = [] if lifetime == "detached": detached = True else: detached = False placement_group_id = worker.core_worker.create_placement_group( name, bundles, strategy, detached, _soft_target_node_id, bundle_label_selector, ) return PlacementGroup(placement_group_id) @PublicAPI @client_mode_wrap def remove_placement_group(placement_group: PlacementGroup) -> None: """Asynchronously remove placement group. Args: placement_group: The placement group to delete. """ assert placement_group is not None worker = ray._private.worker.global_worker worker.check_connected() worker.core_worker.remove_placement_group(placement_group.id) @PublicAPI @client_mode_wrap def get_placement_group(placement_group_name: str) -> PlacementGroup: """Get a placement group object with a global name. Returns: None if can't find a placement group with the given name. The placement group object otherwise. """ if not placement_group_name: raise ValueError("Please supply a non-empty value to get_placement_group") worker = ray._private.worker.global_worker worker.check_connected() placement_group_info = ray._private.state.state.get_placement_group_by_name( placement_group_name, worker.namespace ) if placement_group_info is None: raise ValueError( f"Failed to look up placement group with name: {placement_group_name}" ) else: return PlacementGroup( PlacementGroupID(hex_to_binary(placement_group_info["placement_group_id"])) ) @DeveloperAPI @client_mode_wrap def placement_group_table(placement_group: PlacementGroup = None) -> dict: """Get the state of the placement group from GCS. Args: placement_group: placement group to see states. """ worker = ray._private.worker.global_worker worker.check_connected() placement_group_id = placement_group.id if (placement_group is not None) else None return ray._private.state.state.placement_group_table(placement_group_id) @PublicAPI def get_current_placement_group() -> Optional[PlacementGroup]: """Get the current placement group which a task or actor is using. It returns None if there's no current placement group for the worker. For example, if you call this method in your driver, it returns None (because drivers never belong to any placement group). Examples: .. testcode:: import ray from ray.util.placement_group import get_current_placement_group from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @ray.remote def f(): # This returns the placement group the task f belongs to. # It means this pg is identical to the pg created below. return get_current_placement_group() pg = ray.util.placement_group([{"CPU": 2}]) assert ray.get(f.options( scheduling_strategy=PlacementGroupSchedulingStrategy( placement_group=pg)).remote()) == pg # Driver doesn't belong to any placement group, # so it returns None. assert get_current_placement_group() is None Return: PlacementGroup: Placement group object. None if the current task or actor wasn't created with any placement group. """ auto_init_ray() if client_mode_should_convert(): # Client mode is only a driver. return None worker = ray._private.worker.global_worker worker.check_connected() pg_id = worker.placement_group_id if pg_id.is_nil(): return None return PlacementGroup(pg_id) def check_placement_group_index( placement_group: PlacementGroup, bundle_index: int ) -> None: assert placement_group is not None if placement_group.id.is_nil(): if bundle_index != -1: raise ValueError( "If placement group is not set, " "the value of bundle index must be -1." ) elif bundle_index >= placement_group.bundle_count or bundle_index < -1: raise ValueError( f"placement group bundle index {bundle_index} " f"is invalid. Valid placement group indexes: " f"0-{placement_group.bundle_count}" ) def validate_placement_group( bundles: List[Dict[str, float]], strategy: str = "PACK", lifetime: Optional[str] = None, _soft_target_node_id: Optional[str] = None, bundle_label_selector: List[Dict[str, str]] = None, ) -> bool: """Validates inputs for placement_group. Raises ValueError if inputs are invalid. """ if _soft_target_node_id and strategy != "STRICT_PACK": raise ValueError( "_soft_target_node_id currently only works " f"with STRICT_PACK but got {strategy}" ) if _soft_target_node_id and ray.NodeID.from_hex(_soft_target_node_id).is_nil(): raise ValueError( f"Invalid hex ID of _soft_target_node_id, got {_soft_target_node_id}" ) _validate_bundles(bundles) if bundle_label_selector is not None: if len(bundles) != len(bundle_label_selector): raise ValueError( f"Invalid bundle label selector {bundle_label_selector}. " f"The length of `bundle_label_selector` should equal the length of `bundles`." ) _validate_bundle_label_selector(bundle_label_selector) if strategy not in VALID_PLACEMENT_GROUP_STRATEGIES: raise ValueError( f"Invalid placement group strategy {strategy}. " f"Supported strategies are: {VALID_PLACEMENT_GROUP_STRATEGIES}." ) if lifetime not in [None, "detached"]: raise ValueError( "Placement group `lifetime` argument must be either `None` or " f"'detached'. Got {lifetime}." ) def _validate_bundles(bundles: List[Dict[str, float]]): """Validates each bundle and raises a ValueError if any bundle is invalid.""" if not isinstance(bundles, list): raise ValueError( "Placement group bundles must be a list, " f"got {type(bundles)}." ) if len(bundles) == 0: raise ValueError( "Bundles must be a non-empty list of resource " 'dictionaries. For example: `[{"CPU": 1.0}, {"GPU": 1.0}]`. ' "Got empty list instead." ) for bundle in bundles: if ( not isinstance(bundle, dict) or not all(isinstance(k, str) for k in bundle.keys()) or not all(isinstance(v, (int, float)) for v in bundle.values()) ): raise ValueError( "Bundles must be a non-empty list of " "resource dictionaries. For example: " '`[{"CPU": 1.0}, {"GPU": 1.0}]`.' ) if len(bundle) == 0 or all( resource_value == 0 for resource_value in bundle.values() ): raise ValueError( "Bundles cannot be an empty dictionary or " f"resources with only 0 values. Bundles: {bundles}" ) if "object_store_memory" in bundle.keys(): warnings.warn( "Setting 'object_store_memory' for" " bundles is deprecated since it doesn't actually" " reserve the required object store memory." f" Use object spilling that's enabled by default (https://docs.ray.io/en/{get_ray_doc_version()}/ray-core/objects/object-spilling.html) " # noqa: E501 "instead to bypass the object store memory size limitation.", DeprecationWarning, stacklevel=1, ) def _validate_bundle_label_selector(bundle_label_selector: List[Dict[str, str]]): """Validates each label selector and raises a ValueError if any label selector is invalid.""" if not isinstance(bundle_label_selector, list): raise ValueError( "Placement group bundle_label_selector must be a list, " f"got {type(bundle_label_selector)}." ) if len(bundle_label_selector) == 0: # No label selectors provided, no-op. return for label_selector in bundle_label_selector: if ( not isinstance(label_selector, dict) or not all(isinstance(k, str) for k in label_selector.keys()) or not all(isinstance(v, str) for v in label_selector.values()) ): raise ValueError( "Bundle label selector must be a list of string dictionary" " label selectors. For example: " '`[{ray.io/market_type": "spot"}, {"ray.io/accelerator-type": "A100"}]`.' ) # Call helper function to validate label selector key-value syntax. error_message = validate_label_selector(label_selector) if error_message: raise ValueError( f"Invalid label selector provided in bundle_label_selector list." f" Detailed error: '{error_message}'" ) def _valid_resource_shape(resources, bundle_specs): """ If the resource shape cannot fit into every bundle spec, return False """ for bundle in bundle_specs: fit_in_bundle = True for resource, requested_val in resources.items(): # Skip "bundle" resource as it is automatically added # to all nodes with bundles by the placement group. if resource == PLACEMENT_GROUP_BUNDLE_RESOURCE_NAME: continue if bundle.get(resource, 0) < requested_val: fit_in_bundle = False break if fit_in_bundle: # If resource request fits in any bundle, it is valid. return True return False def _validate_resource_shape( placement_group, resources, placement_resources, task_or_actor_repr ): bundles = placement_group.bundle_specs resources_valid = _valid_resource_shape(resources, bundles) placement_resources_valid = _valid_resource_shape(placement_resources, bundles) if not resources_valid: raise ValueError( f"Cannot schedule {task_or_actor_repr} with " "the placement group because the resource request " f"{resources} cannot fit into any bundles for " f"the placement group, {bundles}." ) if not placement_resources_valid: # Happens for the default actor case. # placement_resources is not an exposed concept to users, # so we should write more specialized error messages. raise ValueError( f"Cannot schedule {task_or_actor_repr} with " "the placement group because the actor requires " f"{placement_resources.get('CPU', 0)} CPU for " "creation, but it cannot " f"fit into any bundles for the placement group, " f"{bundles}. Consider " "creating a placement group with CPU resources." ) def _configure_placement_group_based_on_context( placement_group_capture_child_tasks: bool, bundle_index: int, resources: Dict, placement_resources: Dict, task_or_actor_repr: str, placement_group: Union[PlacementGroup, str, None] = "default", ) -> PlacementGroup: """Configure the placement group based on the given context. Based on the given context, this API returns the placement group instance for task/actor scheduling. Params: placement_group_capture_child_tasks: Whether or not the placement group needs to be captured from the global context. bundle_index: The bundle index for tasks/actor scheduling. resources: The scheduling resources. placement_resources: The scheduling placement resources for actors. task_or_actor_repr: The repr of task or actor function/class descriptor. placement_group: The placement group instance. - "default": Default placement group argument. Currently, the default behavior is to capture the parent task' placement group if placement_group_capture_child_tasks is set. - None: means placement group is explicitly not configured. - Placement group instance: In this case, do nothing. Returns: Placement group instance based on the given context. Raises: ValueError: If the bundle index is invalid for the placement group or the requested resources shape doesn't fit to any bundles. """ # Validate inputs. assert placement_group_capture_child_tasks is not None assert resources is not None # Validate and get the PlacementGroup instance. # Placement group could be None, default, or placement group. # Default behavior is "do not capture child tasks". if placement_group != "default": if not placement_group: placement_group = PlacementGroup.empty() elif placement_group == "default": if placement_group_capture_child_tasks: placement_group = get_current_placement_group() else: placement_group = PlacementGroup.empty() if not placement_group: placement_group = PlacementGroup.empty() assert isinstance(placement_group, PlacementGroup) # Validate the index. check_placement_group_index(placement_group, bundle_index) # Validate the shape. if not placement_group.is_empty: _validate_resource_shape( placement_group, resources, placement_resources, task_or_actor_repr ) return placement_group
PlacementGroup
python
Textualize__textual
src/textual/widgets/_markdown.py
{ "start": 16348, "end": 16684 }
class ____(Static): """Widget for table cells. A shim over a Static which responds to links. """ async def action_link(self, href: str) -> None: """Pass a link action on to the MarkdownTable parent.""" self.post_message(Markdown.LinkClicked(self.query_ancestor(Markdown), href))
MarkdownTableCellContents
python
huggingface__transformers
src/transformers/models/t5gemma/modeling_t5gemma.py
{ "start": 3875, "end": 10595 }
class ____(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: T5GemmaConfig, device=None): super().__init__() self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_type = self.config.rope_parameters["rope_type"] rope_init_fn: Callable = self.compute_default_rope_parameters if self.rope_type != "default": rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = inv_freq @staticmethod def compute_default_rope_parameters( config: Optional[T5GemmaConfig] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) @use_kernel_func_from_hub("rotary_pos_emb") def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], dropout: float = 0.0, scaling: Optional[float] = None, softcap: Optional[float] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: if scaling is None: scaling = module.head_dim**-0.5 key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if softcap is not None: attn_weights = attn_weights / softcap attn_weights = torch.tanh(attn_weights) attn_weights = attn_weights * softcap if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights
T5GemmaRotaryEmbedding
python
spack__spack
lib/spack/spack/test/oci/mock_registry.py
{ "start": 2028, "end": 2768 }
class ____: def __init__(self, domain: str) -> None: # The domain of the server, e.g. "registry.example.com" self.domain = domain # List of (method, url) tuples self.requests: List[Tuple[str, str]] = [] # Dispatches requests to handlers self.router = Router() # Always install a request logger self.router.add_middleware(self.log_request) def handle(self, req: Request) -> MockHTTPResponse: return self.router.handle(req) def log_request(self, req: Request): path = urllib.parse.urlparse(req.full_url).path self.requests.append((req.get_method(), path)) return req def clear_log(self): self.requests = []
DummyServer
python
huggingface__transformers
src/transformers/cache_utils.py
{ "start": 2642, "end": 6016 }
class ____(CacheLayerMixin): """ A cache layer that grows dynamically as more tokens are generated. This is the default for generative models. It stores the key and value states as tensors of shape `[batch_size, num_heads, seq_len, head_dim]`. """ is_sliding = False def lazy_initialization(self, key_states: torch.Tensor): self.dtype, self.device = key_states.dtype, key_states.device self.keys = torch.tensor([], dtype=self.dtype, device=self.device) self.values = torch.tensor([], dtype=self.dtype, device=self.device) self.is_initialized = True def update( self, key_states: torch.Tensor, value_states: torch.Tensor, cache_kwargs: Optional[dict[str, Any]] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Update the key and value caches in-place, and return the necessary keys and value states. Args: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. cache_kwargs (`dict[str, Any]`, *optional*): Additional arguments for the cache. Returns: tuple[`torch.Tensor`, `torch.Tensor`]: The key and value states. """ # Lazy initialization if not self.is_initialized: self.lazy_initialization(key_states) self.keys = torch.cat([self.keys, key_states], dim=-2) self.values = torch.cat([self.values, value_states], dim=-2) return self.keys, self.values def get_mask_sizes(self, cache_position: torch.Tensor) -> tuple[int, int]: """Return the length and offset of the cache, used to generate the mask""" kv_offset = 0 query_length = cache_position.shape[0] kv_length = self.get_seq_length() + query_length return kv_length, kv_offset def get_seq_length(self) -> int: """Returns the sequence length of the cached states.""" if not self.is_initialized or self.keys.numel() == 0: return 0 return self.keys.shape[-2] def get_max_cache_shape(self) -> int: """Returns the maximum sequence length of the cache object. DynamicLayer does not have a maximum length.""" return -1 def crop(self, max_length: int) -> None: """ Crop the past key values up to a new `max_length` in terms of tokens. `max_length` can also be negative to remove `max_length` tokens. """ if max_length < 0: max_length = self.get_seq_length() - abs(max_length) if self.get_seq_length() <= max_length: return self.keys = self.keys[..., :max_length, :] self.values = self.values[..., :max_length, :] def batch_repeat_interleave(self, repeats: int) -> None: """Repeat the cache `repeats` times in the batch dimension.""" if self.get_seq_length() > 0: self.keys = self.keys.repeat_interleave(repeats, dim=0) self.values = self.values.repeat_interleave(repeats, dim=0) def batch_select_indices(self, indices: torch.Tensor) -> None: """Only keep the `indices` in the batch dimension of the cache.""" if self.get_seq_length() > 0: self.keys = self.keys[indices, ...] self.values = self.values[indices, ...]
DynamicLayer
python
numba__numba
numba/tests/test_lists.py
{ "start": 30277, "end": 37227 }
class ____(ManagedListTestCase): def compile_and_test(self, pyfunc, *args): from copy import deepcopy expect_args = deepcopy(args) expect = pyfunc(*expect_args) njit_args = deepcopy(args) cfunc = jit(nopython=True)(pyfunc) got = cfunc(*njit_args) self.assert_list_element_precise_equal( expect=expect, got=got ) # Check reflection self.assert_list_element_precise_equal( expect=expect_args, got=njit_args ) def test_returning_list_of_list(self): def pyfunc(): a = [[np.arange(i)] for i in range(4)] return a self.compile_and_test(pyfunc) @expect_reflection_failure def test_heterogeneous_list_error(self): def pyfunc(x): return x[1] cfunc = jit(nopython=True)(pyfunc) l2 = [[np.zeros(i) for i in range(5)], [np.ones(i)+1j for i in range(5)]] l3 = [[np.zeros(i) for i in range(5)], [(1,)]] l4 = [[1], [{1}]] l5 = [[1], [{'a': 1}]] # TODO: this triggers a reflection error. # Remove this line when nested reflection is supported cfunc(l2) # error_cases with self.assertRaises(TypeError) as raises: cfunc(l2) self.assertIn( ("reflected list(array(float64, 1d, C)) != " "reflected list(array(complex128, 1d, C))"), str(raises.exception) ) with self.assertRaises(TypeError) as raises: cfunc(l3) self.assertIn( ("reflected list(array(float64, 1d, C)) != " "reflected list((int64 x 1))"), str(raises.exception) ) with self.assertRaises(TypeError) as raises: cfunc(l4) self.assertIn( "reflected list(int64) != reflected list(reflected set(int64))", str(raises.exception) ) with self.assertRaises(ValueError) as raises: cfunc(l5) self.assertIn( "Cannot type list element of <class 'dict'>", str(raises.exception) ) @expect_reflection_failure def test_list_of_list_reflected(self): def pyfunc(l1, l2): l1.append(l2) l1[-1].append(123) cfunc = jit(nopython=True)(pyfunc) l1 = [[0, 1], [2, 3]] l2 = [4, 5] expect = list(l1), list(l2) got = list(l1), list(l2) pyfunc(*expect) cfunc(*got) self.assertEqual(expect, got) @expect_reflection_failure def test_heterogeneous_list(self): def pyfunc(x): return x[1] l1 = [[np.zeros(i) for i in range(5)], [np.ones(i) for i in range(5)]] cfunc = jit(nopython=True)(pyfunc) l1_got = cfunc(l1) self.assertPreciseEqual(pyfunc(l1), l1_got) @expect_reflection_failure def test_c01(self): def bar(x): return x.pop() r = [[np.zeros(0)], [np.zeros(10)*1j]] # TODO: this triggers a reflection error. # Remove this line when nested reflection is supported self.compile_and_test(bar, r) with self.assertRaises(TypeError) as raises: self.compile_and_test(bar, r) self.assertIn( ("reflected list(array(float64, 1d, C)) != " "reflected list(array(complex128, 1d, C))"), str(raises.exception), ) def test_c02(self): def bar(x): x.append(x) return x r = [[np.zeros(0)]] with self.assertRaises(errors.TypingError) as raises: self.compile_and_test(bar, r) self.assertIn( "Invalid use of BoundFunction(list.append", str(raises.exception), ) def test_c03(self): def bar(x): f = x f[0] = 1 return f r = [[np.arange(3)]] with self.assertRaises(errors.TypingError) as raises: self.compile_and_test(bar, r) self.assertIn( "invalid setitem with value of {} to element of {}".format( typeof(1), typeof(r[0]), ), str(raises.exception), ) def test_c04(self): def bar(x): f = x f[0][0] = 10 return f r = [[np.arange(3)]] with self.assertRaises(errors.TypingError) as raises: self.compile_and_test(bar, r) self.assertIn( "invalid setitem with value of {} to element of {}".format( typeof(10), typeof(r[0][0]), ), str(raises.exception), ) @expect_reflection_failure def test_c05(self): def bar(x): f = x f[0][0] = np.array([x for x in np.arange(10).astype(np.intp)]) return f r = [[np.arange(3).astype(np.intp)]] self.compile_and_test(bar, r) def test_c06(self): def bar(x): f = x f[0][0] = np.array([x + 1j for x in np.arange(10)]) return f r = [[np.arange(3)]] with self.assertRaises(errors.TypingError) as raises: self.compile_and_test(bar, r) self.assertIn("invalid setitem with value", str(raises.exception)) @expect_reflection_failure def test_c07(self): self.disable_leak_check() def bar(x): return x[-7] r = [[np.arange(3)]] cfunc = jit(nopython=True)(bar) with self.assertRaises(IndexError) as raises: cfunc(r) self.assertIn("getitem out of range", str(raises.exception)) def test_c08(self): self.disable_leak_check() def bar(x): x[5] = 7 return x r = [1, 2, 3] cfunc = jit(nopython=True)(bar) with self.assertRaises(IndexError) as raises: cfunc(r) self.assertIn("setitem out of range", str(raises.exception)) def test_c09(self): def bar(x): x[-2] = 7j return x r = [1, 2, 3] with self.assertRaises(errors.TypingError) as raises: self.compile_and_test(bar, r) self.assertIn("invalid setitem with value", str(raises.exception)) @expect_reflection_failure def test_c10(self): def bar(x): x[0], x[1] = x[1], x[0] return x r = [[1, 2, 3], [4, 5, 6]] self.compile_and_test(bar, r) @expect_reflection_failure def test_c11(self): def bar(x): x[:] = x[::-1] return x r = [[1, 2, 3], [4, 5, 6]] self.compile_and_test(bar, r) def test_c12(self): def bar(x): del x[-1] return x r = [x for x in range(10)] self.compile_and_test(bar, r)
TestListOfList
python
pytorch__pytorch
torch/_inductor/augmented_graph_helper.py
{ "start": 150, "end": 7057 }
class ____: """ Graph helper that augments the original graph with additional dependencies and uses, plus tracks node equivalences for coalescing. TODO: if this becomes too large of compile time, consider binding graphcycles.cc """ def __init__( self, graph: fx.Graph, node_ancestors: Optional[dict[fx.Node, OrderedSet[fx.Node]]] = None, ): # Each node starts in its own singleton set self.graph = graph self.merge_sets = {node: OrderedSet([node]) for node in graph.nodes} # Extra dependencies: node depends on dep (dep must come before node) self.extra_deps: dict[fx.Node, OrderedSet[fx.Node]] = defaultdict(OrderedSet) # Extra uses: reverse of extra_deps (node is used by user) self.extra_uses: dict[fx.Node, OrderedSet[fx.Node]] = defaultdict(OrderedSet) # Note: only reflect original ancestors, not maintained through additional deps # or merge sets self.node_ancestors = node_ancestors def add_extra_dep(self, *, n: fx.Node, dep: fx.Node) -> None: """Add extra dependency: node depends on dep.""" self.extra_deps[n].add(dep) self.extra_uses[dep].add(n) def remove_extra_dep(self, *, n: fx.Node, dep: fx.Node) -> None: if dep in self.extra_deps[n]: self.extra_deps[n].discard(dep) self.extra_uses[dep].discard(n) def merge_to_set(self, existing_node: fx.Node, new_node: fx.Node) -> None: """ Merge new_node into existing_node's set. The new node must be a singleton set. """ existing_set = self.merge_sets[existing_node] new_set = self.merge_sets[new_node] assert len(new_set) == 1 # Add all nodes from new_set to existing_set existing_set.update(new_set) # Update all nodes from new_set to point to existing_set for node in new_set: self.merge_sets[node] = existing_set def unmerge_node(self, node: fx.Node) -> None: """Remove a node from its merge set, making it singleton.""" old_set = self.merge_sets[node] # If already singleton, nothing to do if len(old_set) == 1: return # Remove from old set old_set.remove(node) # Make node singleton self.merge_sets[node] = OrderedSet([node]) def get_merged_deps(self, node: fx.Node) -> OrderedSet[fx.Node]: """ Get all dependencies of a node considering merges and extra deps. Combines: 1. Direct deps (all_input_nodes) of node and its merge equivalents 2. Extra deps of node and its merge equivalents """ deps: OrderedSet[fx.Node] = OrderedSet() # For each node in the merge set for merged_node in self.merge_sets[node]: # Add direct dependencies from all_input_nodes deps.update(merged_node.all_input_nodes) # Add extra dependencies deps.update(self.extra_deps[merged_node]) return deps def has_cycle(self) -> bool: merged_deps = {n: self.get_merged_deps(n) for n in self.graph.nodes} return torch._dynamo.graph_deduplication._has_cycle(self.graph, merged_deps) def has_path(self, source: fx.Node, target: fx.Node) -> bool: """Check if there's a path from source to target.""" # we should not be checking path from node to itself assert self.merge_sets[source] is not self.merge_sets[target] # search backwards from target to source visited: OrderedSet[fx.Node] = OrderedSet() queue = [target] visited.add(target) while queue: current = queue.pop() for dep in self.get_merged_deps(current): # Check if we reached source or its equivalent if dep in self.merge_sets[source]: return True if dep in visited: continue # We are searching from target, so this node is necessarily an ancestor # of target. # If dep is an ancestor of source, any path through dep to source would imply a cycle if self.node_ancestors: source_set = self.merge_sets[source] is_ancestor_of_source = any( dep in self.node_ancestors[s] for s in source_set ) # Add to visited to avoid recomputing this check if we see dep again if is_ancestor_of_source: visited.add(dep) continue visited.add(dep) queue.append(dep) return False def transfer_erased_node_deps(self, erased_to_new: dict[fx.Node, fx.Node]) -> None: """ Transfer all extra dependencies from erased nodes to their replacements, handling cross-dependencies between erased nodes correctly. """ erased_merge_sets: dict[fx.Node, fx.Node] = {} for replaced, new in erased_to_new.items(): for equiv in self.merge_sets[replaced]: erased_merge_sets[equiv] = new # Transfer dependencies for old_node, new_node in erased_merge_sets.items(): # Transfer dependencies FROM old_node (what old_node depended on) for extra_dep in self.extra_deps[old_node]: # Redirect if dep is also being erased updated_dep = erased_merge_sets.get(extra_dep, extra_dep) self.extra_deps[new_node].add(updated_dep) self.extra_uses[updated_dep].discard(old_node) self.extra_uses[updated_dep].add(new_node) # Transfer dependencies TO old_node (what depended on old_node) for extra_use in self.extra_uses[old_node]: # Redirect if this user is also being erased updated_use = erased_merge_sets.get(extra_use, extra_use) # Update the user's deps to point to new_node self.extra_deps[updated_use].discard(old_node) self.extra_deps[updated_use].add(new_node) self.extra_uses[new_node].add(updated_use) # Clean up erased nodes for old_node in erased_merge_sets: self.extra_deps[old_node].clear() self.extra_uses[old_node].clear() del self.merge_sets[old_node] def get_all_extra_deps(self) -> dict[fx.Node, OrderedSet[fx.Node]]: """ Get all extra dependencies in a format suitable for topological sort. Returns a copy to avoid external modifications. """ return { node: OrderedSet(deps) for node, deps in self.extra_deps.items() if deps # Only include nodes with non-empty deps }
AugmentedGraphHelper
python
google__jax
tests/compilation_cache_test.py
{ "start": 1918, "end": 2667 }
class ____(CacheInterface): """An in-memory cache for testing purposes.""" # not used, but required by `CacheInterface` _path = pathlib.Path() def __init__(self): self._cache: dict[str, bytes] = {} def get(self, key: str) -> bytes | None: return self._cache.get(key) def put(self, key: str, value: bytes) -> None: self._cache[key] = value def clear(self) -> None: self._cache.clear() def __len__(self) -> int: return len(self._cache) def count_cache_items() -> int: return 0 if cc._cache is None else len(cc._cache) def clear_cache() -> None: if cc._cache is not None: cc._cache.clear() @jtu.thread_unsafe_test_class() # mocking isn't thread-safe
InMemoryCache
python
mlflow__mlflow
mlflow/entities/trace_location.py
{ "start": 1455, "end": 2289 }
class ____(TraceLocationBase): """ Represents the location of a Databricks inference table. Args: full_table_name: The fully qualified name of the inference table where the trace is stored, in the format of `<catalog>.<schema>.<table>`. """ full_table_name: str def to_proto(self): return pb.TraceLocation.InferenceTableLocation(full_table_name=self.full_table_name) @classmethod def from_proto(cls, proto) -> "InferenceTableLocation": return cls(full_table_name=proto.full_table_name) def to_dict(self) -> dict[str, Any]: return {"full_table_name": self.full_table_name} @classmethod def from_dict(cls, d: dict[str, Any]) -> "InferenceTableLocation": return cls(full_table_name=d["full_table_name"]) @dataclass
InferenceTableLocation
python
Lightning-AI__lightning
src/lightning/pytorch/plugins/precision/precision.py
{ "start": 1174, "end": 7399 }
class ____(FabricPrecision, CheckpointHooks): """Base class for all plugins handling the precision-specific parts of the training. The class attribute precision must be overwritten in child classes. The default value reflects fp32 training. """ def connect( self, model: Module, optimizers: list[Optimizer], lr_schedulers: list[Any] ) -> tuple[Module, list[Optimizer], list[Any]]: """Connects this plugin to the accelerator and the training process.""" return model, optimizers, lr_schedulers @override def pre_backward(self, tensor: Tensor, module: "pl.LightningModule") -> Tensor: # type: ignore[override] trainer = module.trainer call._call_callback_hooks(trainer, "on_before_backward", tensor) call._call_lightning_module_hook(trainer, "on_before_backward", tensor) return tensor @override def backward( # type: ignore[override] self, tensor: Tensor, model: "pl.LightningModule", optimizer: Optional[Steppable], *args: Any, **kwargs: Any, ) -> None: r"""Performs the actual backpropagation. Args: tensor: the loss value obtained from the closure model: the model to be optimized optimizer: current optimizer being used. ``None`` if using manual optimization \*args: Positional arguments intended for the actual function that performs the backward, like :meth:`~torch.Tensor.backward`. \**kwargs: Keyword arguments for the same purpose as ``*args``. """ model.backward(tensor, *args, **kwargs) @override def post_backward(self, tensor: Tensor, module: "pl.LightningModule") -> Tensor: # type: ignore[override] # once backward has been applied, release graph closure_loss = tensor.detach() trainer = module.trainer call._call_callback_hooks(trainer, "on_after_backward") call._call_lightning_module_hook(trainer, "on_after_backward") return closure_loss def _after_closure(self, model: "pl.LightningModule", optimizer: Steppable) -> None: """Utility to share some code after the closure has been run.""" trainer = model.trainer call._call_callback_hooks(trainer, "on_before_optimizer_step", optimizer) call._call_lightning_module_hook(trainer, "on_before_optimizer_step", optimizer) self._clip_gradients( model, optimizer, trainer.gradient_clip_val, gradient_clip_algorithm=trainer.gradient_clip_algorithm, ) def _wrap_closure( self, model: "pl.LightningModule", optimizer: Steppable, closure: Callable[[], Any], ) -> Any: """This double-closure allows makes sure the ``closure`` is executed before the ``on_before_optimizer_step`` hook is called. The closure (generally) runs ``backward`` so this allows inspecting gradients in this hook. This structure is consistent with the ``Precision`` subclasses that cannot pass ``optimizer.step(closure)`` directly. """ closure_result = closure() self._after_closure(model, optimizer) return closure_result @override def optimizer_step( # type: ignore[override] self, optimizer: Steppable, model: "pl.LightningModule", closure: Callable[[], Any], **kwargs: Any, ) -> Any: """Hook to run the optimizer step.""" closure = partial(self._wrap_closure, model, optimizer, closure) return optimizer.step(closure=closure, **kwargs) def _clip_gradients( self, model: Union["pl.LightningModule", Module], optimizer: Steppable, clip_val: Optional[Union[int, float]] = None, gradient_clip_algorithm: Optional[GradClipAlgorithmType] = None, ) -> None: if not isinstance(model, pl.LightningModule) or not model.automatic_optimization: # the configuration validator disallows clipping on manual return call._call_lightning_module_hook( model.trainer, "configure_gradient_clipping", optimizer, gradient_clip_val=clip_val, gradient_clip_algorithm=gradient_clip_algorithm, ) def clip_gradients( self, optimizer: Optimizer, clip_val: Union[int, float] = 0.0, gradient_clip_algorithm: GradClipAlgorithmType = GradClipAlgorithmType.NORM, ) -> None: """Clips the gradients.""" if clip_val <= 0: return if gradient_clip_algorithm == GradClipAlgorithmType.VALUE: self.clip_grad_by_value(optimizer, clip_val) elif gradient_clip_algorithm == GradClipAlgorithmType.NORM: self.clip_grad_by_norm(optimizer, clip_val) def clip_grad_by_value(self, optimizer: Optimizer, clip_val: Union[int, float]) -> None: """Clip gradients by value.""" parameters = self.main_params(optimizer) torch.nn.utils.clip_grad_value_(parameters, clip_value=clip_val) def clip_grad_by_norm(self, optimizer: Optimizer, clip_val: Union[int, float]) -> None: """Clip gradients by norm.""" parameters = self.main_params(optimizer) torch.nn.utils.clip_grad_norm_(parameters, clip_val) @contextlib.contextmanager def train_step_context(self) -> Generator[None, None, None]: """A contextmanager for the training step.""" with self.forward_context(): yield @contextlib.contextmanager def val_step_context(self) -> Generator[None, None, None]: """A contextmanager for the validation step.""" with self.forward_context(): yield @contextlib.contextmanager def test_step_context(self) -> Generator[None, None, None]: """A contextmanager for the test step.""" with self.forward_context(): yield @contextlib.contextmanager def predict_step_context(self) -> Generator[None, None, None]: """A contextmanager for the predict step.""" with self.forward_context(): yield
Precision
python
pytorch__pytorch
test/test_fx_passes.py
{ "start": 20536, "end": 21124 }
class ____: @staticmethod def forward(x): x = x + 1 # target subgraph to match x = x.relu() y = x.sigmoid() y1 = x.sigmoid() return y, y1 @staticmethod def pattern(a): a = a.relu() b = a.sigmoid() b1 = a.sigmoid() return b, b1 test_cases = [ # match_output, match_placeholder, num_matches # (False, False, 2), # FIXME: currently still matches to 2, should fix to 1 TestCase(True, False, 1), TestCase(False, True, 0), ]
MultipleOutputsIdenticalAnchor
python
django-haystack__django-haystack
test_haystack/elasticsearch_tests/test_elasticsearch_query.py
{ "start": 8719, "end": 9810 }
class ____(TestCase): def setUp(self): super().setUp() self.backend = connections["elasticsearch"].get_backend() self._elasticsearch_version = elasticsearch.VERSION elasticsearch.VERSION = (0, 9, 9) def tearDown(self): elasticsearch.VERSION = self._elasticsearch_version def test_build_query_with_dwithin_range(self): """ Test build_search_kwargs with dwithin range for Elasticsearch versions < 1.0.0 """ from django.contrib.gis.geos import Point search_kwargs = self.backend.build_search_kwargs( "where", dwithin={ "field": "location_field", "point": Point(1.2345678, 2.3456789), "distance": D(m=500), }, ) self.assertEqual( search_kwargs["query"]["filtered"]["filter"]["bool"]["must"][1][ "geo_distance" ], {"distance": 0.5, "location_field": {"lat": 2.3456789, "lon": 1.2345678}}, )
ElasticsearchSearchQuerySpatialBeforeReleaseTestCase
python
astropy__astropy
astropy/convolution/kernels.py
{ "start": 27652, "end": 29680 }
class ____(Kernel1D): """ Create kernel from 1D model. The model has to be centered on x = 0. Parameters ---------- model : `~astropy.modeling.Fittable1DModel` Kernel response function model x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*width +1⌋. Must be odd. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. Raises ------ TypeError If model is not an instance of `~astropy.modeling.Fittable1DModel` See Also -------- Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel` CustomKernel : Create kernel from list or array Examples -------- Define a Gaussian1D model: >>> from astropy.modeling.models import Gaussian1D >>> from astropy.convolution.kernels import Model1DKernel >>> gauss = Gaussian1D(1, 0, 2) And create a custom one dimensional kernel from it: >>> gauss_kernel = Model1DKernel(gauss, x_size=9) This kernel can now be used like a usual Astropy kernel. """ _separable = False _is_bool = False def __init__(self, model, **kwargs): if isinstance(model, Fittable1DModel): self._model = model else: raise TypeError("Must be Fittable1DModel") super().__init__(**kwargs)
Model1DKernel
python
openai__openai-python
src/openai/resources/responses/input_items.py
{ "start": 4328, "end": 7816 }
class ____(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncInputItemsWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers """ return AsyncInputItemsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncInputItemsWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/openai/openai-python#with_streaming_response """ return AsyncInputItemsWithStreamingResponse(self) def list( self, response_id: str, *, after: str | Omit = omit, include: List[ResponseIncludable] | Omit = omit, limit: int | Omit = omit, order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[ResponseItem, AsyncCursorPage[ResponseItem]]: """ Returns a list of input items for a given response. Args: after: An item ID to list items after, used in pagination. include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: The order to return the input items in. Default is `desc`. - `asc`: Return the input items in ascending order. - `desc`: Return the input items in descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not response_id: raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") return self._get_api_list( f"/responses/{response_id}/input_items", page=AsyncCursorPage[ResponseItem], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "include": include, "limit": limit, "order": order, }, input_item_list_params.InputItemListParams, ), ), model=cast(Any, ResponseItem), # Union types cannot be passed in as arguments in the type system )
AsyncInputItems
python
kamyu104__LeetCode-Solutions
Python/minimum-division-operations-to-make-array-non-decreasing.py
{ "start": 483, "end": 893 }
class ____(object): def minOperations(self, nums): """ :type nums: List[int] :rtype: int """ result = 0 for i in reversed(xrange(len(nums)-1)): if nums[i] <= nums[i+1]: continue if SPF[nums[i]] > nums[i+1]: return -1 nums[i] = SPF[nums[i]] result += 1 return result
Solution
python
dagster-io__dagster
python_modules/dagster/dagster/components/resolved/core_models.py
{ "start": 9042, "end": 9235 }
class ____(SharedAssetKwargs): """The attributes of an AssetSpec that can be updated after the AssetsDefinition is created, done via map_asset_specs. """ @record
AssetsDefUpdateKwargs
python
gevent__gevent
src/gevent/tests/test__greenlet.py
{ "start": 7007, "end": 7086 }
class ____(TestReturn_link): link_method = 'link_value'
TestReturn_link_value
python
spyder-ide__spyder
spyder/plugins/outlineexplorer/widgets.py
{ "start": 2209, "end": 5730 }
class ____: def __init__(self, name, kind, position, path, node=None): self.name = name self.position = position self.kind = kind self.node = node self.path = path self.id = str(uuid.uuid4()) self.index = None self.children = [] self.status = False self.selected = False self.parent = None def delete(self): for child in self.children: child.parent = None self.children = [] self.node.takeChildren() if self.parent is not None: self.parent.remove_node(self) self.parent = None if ( self.node.parent is not None and hasattr(self.node.parent, 'remove_children') ): self.node.parent.remove_children(self.node) def add_node(self, node): if node.position == self.position: # The nodes should be at the same level self.parent.add_node(node) else: node.parent = self node.path = self.path this_node = self.node children_ranges = [c.position[0] for c in self.children] node_range = node.position[0] new_index = bisect.bisect_left(children_ranges, node_range) node.index = new_index for child in self.children[new_index:]: child.index += 1 this_node.append_children(new_index, node.node) self.children.insert(new_index, node) for idx, next_idx in zip(self.children, self.children[1:]): assert idx.index < next_idx.index def remove_node(self, node): for child in self.children[node.index + 1:]: child.index -= 1 self.children.pop(node.index) for idx, next_idx in zip(self.children, self.children[1:]): assert idx.index < next_idx.index def clone_node(self, node): self.id = node.id self.index = node.index self.path = node.path self.children = node.children self.status = node.status self.selected = node.selected self.node = node.node self.parent = node.parent self.node.update_info(self.name, self.kind, self.position[0] + 1, self.status, self.selected) self.node.ref = self for child in self.children: child.parent = self if self.parent is not None: self.parent.replace_node(self.index, self) def refresh(self): self.node.update_info(self.name, self.kind, self.position[0] + 1, self.status, self.selected) def replace_node(self, index, node): self.children[index] = node def create_node(self): self.node = SymbolItem(None, self, self.name, self.kind, self.position[0] + 1, self.status, self.selected) def set_path(self, new_path): self.name = new_path self.path = new_path def __repr__(self): return str(self) def __str__(self): return '({0}, {1}, {2}, {3})'.format( self.position, self.name, self.id, self.status) def __eq__(self, other): return ( self.name == other.name and self.kind == other.kind and self.position == other.position ) # ---- Items # -----------------------------------------------------------------------------
SymbolStatus
python
kamyu104__LeetCode-Solutions
Python/minimum-window-substring.py
{ "start": 91, "end": 772 }
class ____(object): def minWindow(self, s, t): """ :type s: str :type t: str :rtype: str """ count, remain = collections.Counter(t), len(t) i, left, right = 0, -1, -1 for j, c in enumerate(s): remain -= count[c] > 0 count[c] -= 1 if remain: continue while count[s[i]] < 0: # greedily discard uneeds count[s[i]] += 1 i += 1 if right == -1 or j-i+1 < right-left+1: left, right = i, j return s[left:right+1] # Time: O(n) # Space: O(k), k is the number of different characters
Solution
python
paramiko__paramiko
paramiko/channel.py
{ "start": 48989, "end": 49222 }
class ____(ChannelFile): """ A file-like wrapper around `.Channel` stdin. See `Channel.makefile_stdin` for details. """ def close(self): super().close() self.channel.shutdown_write()
ChannelStdinFile
python
streamlit__streamlit
lib/tests/streamlit/watcher/folder_black_list_test.py
{ "start": 775, "end": 2192 }
class ____(unittest.TestCase): def test_do_blacklist(self): """ miniconda, anaconda, and .*/ folders should be blacklisted. """ folder_black_list = FolderBlackList([]) is_blacklisted = folder_black_list.is_blacklisted assert is_blacklisted("/foo/miniconda2/script.py") assert is_blacklisted("/foo/miniconda3/script.py") assert is_blacklisted("/foo/anaconda2/script.py") assert is_blacklisted("/foo/anaconda3/script.py") assert is_blacklisted("/foo/.virtualenv/script.py") assert is_blacklisted("/foo/.venv/script.py") assert is_blacklisted("/foo/.random_hidden_folder/script.py") def test_do_blacklist_user_configured_folders(self): """ Files inside user configured folders should be blacklisted. """ folder_black_list = FolderBlackList(["/bar/some_folder"]) is_blacklisted = folder_black_list.is_blacklisted assert is_blacklisted("/bar/some_folder/script.py") def test_do_not_blacklist(self): """ Ensure we're not accidentally blacklisting things we shouldn't be. """ folder_black_list = FolderBlackList([]) is_blacklisted = folder_black_list.is_blacklisted assert not is_blacklisted("/foo/not_blacklisted/script.py") assert not is_blacklisted("/foo/not_blacklisted/.hidden_script.py")
FileIsInFolderTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 92462, "end": 92897 }
class ____(sgqlc.types.Enum): """The repository's visibility level. Enumeration Choices: * `INTERNAL`: The repository is visible only to users in the same business. * `PRIVATE`: The repository is visible only to those with explicit access. * `PUBLIC`: The repository is visible to everyone. """ __schema__ = github_schema __choices__ = ("INTERNAL", "PRIVATE", "PUBLIC")
RepositoryVisibility