language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
fluentpython__example-code-2e
14-inheritance/strkeydict_dictsub.py
{ "start": 1243, "end": 2162 }
class ____(dict): def __init__(self, iterable=None, **kwds): super().__init__() self.update(iterable, **kwds) def __missing__(self, key): if isinstance(key, str): raise KeyError(key) return self[str(key)] def __contains__(self, key): return key in self.keys() or str(key) in self.keys() def __setitem__(self, key, item): super().__setitem__(str(key), item) def get(self, key, default=None): try: return self[key] except KeyError: return default def update(self, iterable=None, **kwds): if iterable is not None: try: # duck typing FTW! pairs = iterable.items() except AttributeError: pairs = iterable for key, value in pairs: self[key] = value if kwds: self.update(kwds)
StrKeyDict
python
huggingface__transformers
src/transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py
{ "start": 870, "end": 10535 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Ernie4_5_MoeModel`]. It is used to instantiate a Ernie 4.5 MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of [baidu/ERNIE-4.5-21B-A3B-PT](https://huggingface.co/baidu/ERNIE-4.5-21B-A3B-PT). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 103424): Vocabulary size of the Ernie 4.5 MoE model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Ernie4_5_MoeModel`] pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 1): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. hidden_size (`int`, *optional*, defaults to 2560): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 12288): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 28): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 20): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 4): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 131072): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether the model's input and output word embeddings should be tied. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. use_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in any of the projections including mlp and attention for example. moe_intermediate_size (`int`, *optional*, defaults to 1536): Intermediate size of the routed expert. moe_k (`int`, *optional*, defaults to 6): Number of selected experts. moe_num_experts (`int`, *optional*, defaults to 64): Number of routed experts. moe_num_shared_experts (`int`, *optional*, defaults to 2): The number of experts that are shared for all MoE forwards. moe_layer_start_index (`int`, *optional*, defaults to 1): The first index at which MoE layers start to appear. moe_layer_end_index (`int`, *optional*, defaults to -1): The last possible index for a MoE layer. moe_layer_interval (`int`, *optional*, defaults to 1): The intervals between MoE layers to appear. moe_norm_min (`float`, *optional*, defaults to 1e-12): Minimum division value during routing normalization. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabling this will also allow the model to output the auxiliary loss, including load balancing loss and router z-loss. router_aux_loss_coef (`float`, *optional*, defaults to 0.001): The aux loss factor for the total loss. ```python >>> from transformers import Ernie4_5_MoeModel, Ernie4_5_MoEConfig >>> # Initializing a Ernie4_5_MoE style configuration >>> configuration = Ernie4_5_MoEConfig() >>> # Initializing a model from the ERNIE-4.5-21B-A3B style configuration >>> model = Ernie4_5_MoeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "ernie4_5_moe" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_experts": "moe_num_experts", "num_experts_per_tok": "moe_k"} default_theta = 500000.0 # Default tensor parallel plan for base model `Ernie4_5_MoE` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.experts.gate_up_proj": "local_rowwise", "layers.*.mlp.experts.down_proj": "local_rowwise", "layers.*.mlp.experts": "gather", "layers.*.mlp.shared_experts.gate_proj": "colwise", "layers.*.mlp.shared_experts.up_proj": "colwise", "layers.*.mlp.shared_experts.down_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: Optional[int] = 103424, pad_token_id: Optional[int] = 0, bos_token_id: Optional[int] = 1, eos_token_id: Optional[int] = 2, hidden_size: Optional[int] = 2560, intermediate_size: Optional[int] = 12288, num_hidden_layers: Optional[int] = 28, num_attention_heads: Optional[int] = 20, num_key_value_heads: Optional[int] = 4, hidden_act: Optional[str] = "silu", max_position_embeddings: Optional[int] = 131072, initializer_range: Optional[float] = 0.02, rms_norm_eps: Optional[int] = 1e-5, use_cache: Optional[bool] = True, tie_word_embeddings: Optional[bool] = True, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, use_bias: Optional[int] = False, moe_intermediate_size: Optional[int] = 1536, moe_k: Optional[int] = 6, moe_num_experts: Optional[int] = 64, moe_num_shared_experts: Optional[int] = 2, moe_layer_start_index: Optional[int] = 1, moe_layer_end_index: Optional[int] = -1, moe_layer_interval: Optional[int] = 1, moe_norm_min: Optional[int] = 1e-12, output_router_logits: Optional[bool] = False, router_aux_loss_coef: Optional[float] = 0.001, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.use_bias = use_bias # MoE arguments self.moe_intermediate_size = moe_intermediate_size self.moe_k = moe_k self.moe_num_experts = moe_num_experts self.moe_num_shared_experts = moe_num_shared_experts self.moe_layer_start_index = moe_layer_start_index self.moe_layer_end_index = self.num_hidden_layers - 1 if moe_layer_end_index == -1 else moe_layer_end_index self.moe_layer_interval = moe_layer_interval self.moe_norm_min = moe_norm_min self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef self.rope_parameters = rope_parameters super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["Ernie4_5_MoeConfig"]
Ernie4_5_MoeConfig
python
PrefectHQ__prefect
tests/test_cache_policies.py
{ "start": 1191, "end": 4866 }
class ____: def test_initializes(self): policy = Inputs() assert isinstance(policy, CachePolicy) def test_key_varies_on_inputs(self): policy = Inputs() none_key = policy.compute_key(task_ctx=None, inputs=None, flow_parameters=None) x_key = policy.compute_key( task_ctx=None, inputs={"x": 42}, flow_parameters=None ) y_key = policy.compute_key( task_ctx=None, inputs={"y": 42}, flow_parameters=None ) assert x_key != y_key assert x_key != none_key assert y_key != none_key z_key = policy.compute_key( task_ctx=None, inputs={"z": "foo"}, flow_parameters=None ) assert z_key not in [x_key, y_key] def test_key_doesnt_vary_on_other_kwargs(self): policy = Inputs() key = policy.compute_key(task_ctx=None, inputs={"x": 42}, flow_parameters=None) other_keys = [] for kwarg_vals in itertools.permutations([None, 1, "foo", {}]): kwargs = dict(zip(["task_ctx", "flow_parameters", "other"], kwarg_vals)) other_keys.append(policy.compute_key(inputs={"x": 42}, **kwargs)) assert all([key == okey for okey in other_keys]) def test_key_excludes_excluded_inputs(self): policy = Inputs(exclude=["y"]) key = policy.compute_key(task_ctx=None, inputs={"x": 42}, flow_parameters=None) for val in [42, "foo", None]: new_key = policy.compute_key( task_ctx=None, inputs={"x": 42, "y": val}, flow_parameters=None ) assert new_key == key def test_key_applies_stabilizing_transformations(self, monkeypatch): patched = {dict: lambda val: "foobar"} monkeypatch.setattr("prefect.cache_policies.STABLE_TRANSFORMS", patched) policy = Inputs() # confirm dictionaries hash to the same because of the transform key = policy.compute_key( task_ctx=None, inputs={"y": dict(x="string")}, flow_parameters=None ) other_key = policy.compute_key( task_ctx=None, inputs={"y": dict(z="otherstring")}, flow_parameters=None ) assert key == other_key # confirm no changes to other types of inputs key = policy.compute_key(task_ctx=None, inputs={"x": 42}, flow_parameters=None) other_key = policy.compute_key( task_ctx=None, inputs={"x": 43}, flow_parameters=None ) assert key != other_key def test_subtraction_results_in_new_policy_for_inputs(self): policy = Inputs() new_policy = policy - "foo" assert policy != new_policy assert policy.exclude != new_policy.exclude @pytest.mark.parametrize("policy", [RunId(), RunId() + TaskSource()]) def test_subtraction_is_noop_for_non_inputs_policies(self, policy): new_policy = policy - "foo" assert policy is new_policy assert policy.compute_key( task_ctx=None, inputs={"foo": 42, "y": "changing-value"}, flow_parameters=None, ) == policy.compute_key( task_ctx=None, inputs={"foo": 42, "y": "changed"}, flow_parameters=None ) def test_excluded_can_be_manipulated_via_subtraction(self): policy = Inputs() - "y" assert policy.exclude == ["y"] key = policy.compute_key(task_ctx=None, inputs={"x": 42}, flow_parameters=None) for val in [42, "foo", None]: new_key = policy.compute_key( task_ctx=None, inputs={"x": 42, "y": val}, flow_parameters=None ) assert new_key == key
TestInputsPolicy
python
pytorch__pytorch
torch/onnx/_internal/exporter/_capture_strategies.py
{ "start": 6765, "end": 9040 }
class ____(CaptureStrategy): def _capture( self, model, args, kwargs, dynamic_shapes ) -> torch.export.ExportedProgram: with ( # Support the dynamism with 0/1 input dim torch.fx.experimental._config.patch(backed_size_oblivious=True), # type: ignore[attr-defined] ): try: return torch.export.export( model, args, kwargs=kwargs, dynamic_shapes=dynamic_shapes, strict=False, prefer_deferred_runtime_asserts_over_guards=_flags.PREFER_DEFERRED_RUNTIME_ASSERTS_OVER_GUARDS, ) except torch._dynamo.exc.UserError as exc: # Refine the dynamic shapes based on the suggested fixes. try: new_shapes = torch.export.dynamic_shapes.refine_dynamic_shapes_from_suggested_fixes( exc.msg, dynamic_shapes ) except Exception: # If the dynamic shapes cannot be refined, re-raise the exception. raise exc from None return torch.export.export( model, args, kwargs=kwargs, dynamic_shapes=new_shapes, strict=False, prefer_deferred_runtime_asserts_over_guards=_flags.PREFER_DEFERRED_RUNTIME_ASSERTS_OVER_GUARDS, ) def _enter(self, model) -> None: model_repr = _take_first_line(repr(model)) self._verbose_print( f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`..." ) def _success(self, model) -> None: model_repr = _take_first_line(repr(model)) self._verbose_print( f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`... ✅" ) def _failure(self, model, e) -> None: del e # Unused model_repr = _take_first_line(repr(model)) self._verbose_print( f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`... ❌" )
TorchExportNonStrictStrategy
python
astropy__astropy
astropy/modeling/fitting.py
{ "start": 67414, "end": 70571 }
class ____(Fitter): """ Sequential Least Squares Programming (SLSQP) optimization algorithm and least squares statistic. Raises ------ ModelLinearityError A linear model is passed to a nonlinear fitter Notes ----- See also the `~astropy.modeling.optimizers.SLSQP` optimizer. """ supported_constraints = SLSQP.supported_constraints def __init__(self): super().__init__(optimizer=SLSQP, statistic=leastsquare) self.fit_info = {} @fitter_unit_support def __call__( self, model, x, y, z=None, weights=None, *, inplace=False, **kwargs, ): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. inplace : bool, optional If `False` (the default), a copy of the model with the fitted parameters set will be returned. If `True`, the returned model will be the same instance as the model passed in, and the parameter values will be changed inplace. kwargs : dict optional keyword arguments to be passed to the optimizer or the statistic verblevel : int 0-silent 1-print summary upon completion, 2-print summary after each iteration maxiter : int maximum number of iterations epsilon : float the step size for finite-difference derivative estimates acc : float Requested accuracy equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- fitted_model : `~astropy.modeling.FittableModel` If ``inplace`` is `False` (the default), this is a copy of the input model with parameters set by the fitter. If ``inplace`` is `True`, this is the same model as the input model, with parameters updated to be those set by the fitter. """ model_copy = _validate_model( model, self._opt_method.supported_constraints, copy=not inplace, ) model_copy.sync_constraints = False farg = _convert_input(x, y, z) farg = ( model_copy, weights, ) + farg init_values, _, _ = model_to_fit_params(model_copy) fitparams, self.fit_info = self._opt_method( self.objective_function, init_values, farg, **kwargs ) fitter_to_model_params(model_copy, fitparams) model_copy.sync_constraints = True return model_copy
SLSQPLSQFitter
python
jschneier__django-storages
tests/test_s3.py
{ "start": 39308, "end": 43685 }
class ____(TestCase): """ Using mock_aws as a class decorator automatically decorates methods, but NOT classmethods or staticmethods. """ def setUp(cls): super().setUp() cls.storage = s3.S3Storage() cls.bucket = cls.storage.connection.Bucket(settings.AWS_STORAGE_BUCKET_NAME) cls.bucket.create() def test_save_bytes_file(self): self.storage.save("bytes_file.txt", File(io.BytesIO(b"foo1"))) self.assertEqual( b"foo1", self.bucket.Object("bytes_file.txt").get()["Body"].read(), ) def test_save_string_file(self): self.storage.save("string_file.txt", File(io.StringIO("foo2"))) self.assertEqual( b"foo2", self.bucket.Object("string_file.txt").get()["Body"].read(), ) def test_save_bytes_content_file(self): self.storage.save("bytes_content.txt", ContentFile(b"foo3")) self.assertEqual( b"foo3", self.bucket.Object("bytes_content.txt").get()["Body"].read(), ) def test_save_string_content_file(self): self.storage.save("string_content.txt", ContentFile("foo4")) self.assertEqual( b"foo4", self.bucket.Object("string_content.txt").get()["Body"].read(), ) def test_content_type_guess(self): """ Test saving a file where the ContentType is guessed from the filename. """ name = "test_image.jpg" content = ContentFile(b"data") content.content_type = None self.storage.save(name, content) s3_object_fetched = self.bucket.Object(name).get() self.assertEqual(b"data", s3_object_fetched["Body"].read()) self.assertEqual(s3_object_fetched["ContentType"], "image/jpeg") def test_content_type_attribute(self): """ Test saving a file with a custom content type attribute. """ content = ContentFile(b"data") content.content_type = "test/foo" self.storage.save("test_file", content) s3_object_fetched = self.bucket.Object("test_file").get() self.assertEqual(b"data", s3_object_fetched["Body"].read()) self.assertEqual(s3_object_fetched["ContentType"], "test/foo") def test_content_type_not_detectable(self): """ Test saving a file with no detectable content type. """ content = ContentFile(b"data") content.content_type = None self.storage.save("test_file", content) s3_object_fetched = self.bucket.Object("test_file").get() self.assertEqual(b"data", s3_object_fetched["Body"].read()) self.assertEqual( s3_object_fetched["ContentType"], s3.S3Storage.default_content_type, ) def test_storage_open_reading_with_newlines(self): """Test file reading with "r" and "rb" and various newline characters.""" name = "test_storage_open_read_with_newlines.txt" with io.BytesIO() as temp_file: temp_file.write(b"line1\nline2\r\nmore\rtext\n") self.storage.save(name, temp_file) file = self.storage.open(name, "r") content_str = file.read() file.close() self.assertEqual(content_str, "line1\nline2\nmore\ntext\n") with io.BytesIO() as temp_file: temp_file.write(b"line1\nline2\r\nmore\rtext\n") self.storage.save(name, temp_file) file = self.storage.open(name, "rb") content_str = file.read() file.close() self.assertEqual(content_str, b"line1\nline2\r\nmore\rtext\n") with io.BytesIO() as temp_file: temp_file.write(b"line1\nline2\r\nmore\rtext") self.storage.save(name, temp_file) file = self.storage.open(name, "r") content_lines = file.readlines() file.close() self.assertEqual(content_lines, ["line1\n", "line2\n", "more\n", "text"]) with io.BytesIO() as temp_file: temp_file.write(b"line1\nline2\r\nmore\rtext") self.storage.save(name, temp_file) file = self.storage.open(name, "rb") content_lines = file.readlines() file.close() self.assertEqual(content_lines, [b"line1\n", b"line2\r\n", b"more\r", b"text"])
S3StorageTestsWithMoto
python
streamlit__streamlit
lib/tests/streamlit/components/v2/test_bidi_component.py
{ "start": 12327, "end": 39320 }
class ____(DeltaGeneratorTestCase): """Test the bidi_component functionality.""" def setUp(self): super().setUp() # Create a mock component manager for testing self.mock_component_manager = BidiComponentManager() # Patch the Runtime to return our mock component manager self.runtime_patcher = patch.object( Runtime, "instance", return_value=MagicMock() ) self.mock_runtime = self.runtime_patcher.start() self.mock_runtime.return_value.bidi_component_registry = ( self.mock_component_manager ) def tearDown(self): super().tearDown() self.runtime_patcher.stop() def test_component_with_js_content_only(self): """Test component with only JavaScript content.""" # Register a component with JS content only self.mock_component_manager.register( BidiComponentDefinition( name="js_only_component", js="console.log('hello world');", ) ) # Call the component st._bidi_component("js_only_component") # Verify the proto was enqueued delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "js_only_component" assert bidi_component_proto.js_content == "console.log('hello world');" assert bidi_component_proto.html_content == "" def test_component_with_html_content_only(self): """Test component with only HTML content.""" # Register a component with HTML content only self.mock_component_manager.register( BidiComponentDefinition( name="html_only_component", html="<div>Hello World</div>", ) ) # Call the component st._bidi_component("html_only_component") # Verify the proto was enqueued delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "html_only_component" assert bidi_component_proto.html_content == "<div>Hello World</div>" assert bidi_component_proto.js_content == "" def test_component_with_js_url_only(self): """Test component with only JavaScript URL.""" # Create a mock component definition with js_url mock_component_def = MagicMock(spec=BidiComponentDefinition) mock_component_def.js_content = None mock_component_def.js_url = "index.js" mock_component_def.html_content = None mock_component_def.css_content = None mock_component_def.css_url = None mock_component_def.isolate_styles = True # Mock the registry to return our component with patch.object( self.mock_component_manager, "get", return_value=mock_component_def ): # Call the component st._bidi_component("js_url_component") # Verify the proto was enqueued delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "js_url_component" assert bidi_component_proto.js_source_path == "index.js" assert bidi_component_proto.html_content == "" def test_component_with_both_js_and_html(self): """Test component with both JavaScript and HTML content.""" # Register a component with both JS and HTML content self.mock_component_manager.register( BidiComponentDefinition( name="full_component", js="console.log('hello world');", html="<div>Hello World</div>", css="div { color: red; }", ) ) # Call the component st._bidi_component("full_component") # Verify the proto was enqueued delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "full_component" assert bidi_component_proto.js_content == "console.log('hello world');" assert bidi_component_proto.html_content == "<div>Hello World</div>" assert bidi_component_proto.css_content == "div { color: red; }" def test_component_with_no_js_or_html_raises_exception(self): """Test that component with neither JS nor HTML content raises StreamlitAPIException.""" # Register a component with only CSS content (no JS or HTML) self.mock_component_manager.register( BidiComponentDefinition( name="css_only_component", css="div { color: red; }", ) ) # Call the component and expect an exception with pytest.raises(StreamlitAPIException) as exc_info: st._bidi_component("css_only_component") # Verify the error message error_message = str(exc_info.value) assert "css_only_component" in error_message assert "must have either JavaScript content" in error_message assert ( "(`js_content` or `js_url`) or HTML content (`html_content`)" in error_message ) def test_component_with_empty_js_and_html_raises_exception(self): """Test that component with empty JS and HTML content raises StreamlitAPIException.""" # Create a mock component definition with empty content mock_component_def = MagicMock(spec=BidiComponentDefinition) mock_component_def.js_content = "" # Empty string mock_component_def.js_url = None mock_component_def.html_content = "" # Empty string mock_component_def.css_content = "div { color: red; }" mock_component_def.css_url = None mock_component_def.isolate_styles = True # Mock the registry to return our component with patch.object( self.mock_component_manager, "get", return_value=mock_component_def ): # Call the component and expect an exception with pytest.raises(StreamlitAPIException) as exc_info: st._bidi_component("empty_component") # Verify the error message error_message = str(exc_info.value) assert "empty_component" in error_message assert "must have either JavaScript content" in error_message def test_unregistered_component_raises_value_error(self): """Test that calling an unregistered component raises ValueError.""" # Call a component that doesn't exist with pytest.raises( ValueError, match="Component 'nonexistent_component' is not registered" ): st._bidi_component("nonexistent_component") def test_component_with_key(self): """Test component with a user-specified key.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="keyed_component", js="console.log('hello world');", ) ) # Call the component with a key st._bidi_component("keyed_component", key="my_key") # Verify the proto was enqueued with the correct ID delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "keyed_component" # The ID should be deterministic based on the key assert bidi_component_proto.id is not None def test_component_with_scalar_data(self): """Test component with scalar data parameter serialized as JSON.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="data_component", js="console.log('hello world');", ) ) # Use a simple scalar value which is treated as DataFormat.UNKNOWN and therefore JSON-encoded test_data = "hello streamlit" st._bidi_component("data_component", data=test_data) # Verify the proto was enqueued with the data delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "data_component" # Data should be JSON serialized inside the `json` oneof field assert bidi_component_proto.WhichOneof("data") == "json" assert bidi_component_proto.json == '"hello streamlit"' def test_component_with_dict_data_json(self): """Test component with dict data serialized as JSON.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="dict_data_component", js="console.log('hello world');", ) ) test_dict = {"message": "hello", "count": 42} st._bidi_component("dict_data_component", data=test_dict) delta = self.get_delta_from_queue() proto = delta.new_element.bidi_component assert proto.component_name == "dict_data_component" # Should choose JSON path assert proto.WhichOneof("data") == "json" assert json.loads(proto.json) == test_dict def test_component_with_arrow_data(self): """Test component with dataframe-like data serialized to Arrow.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="arrow_data_component", js="console.log('hello world');", ) ) # Use a simple Pandas DataFrame which should be detected as dataframe-like df = pd.DataFrame({"a": [1, 2, 3], "b": ["x", "y", "z"]}) st._bidi_component("arrow_data_component", data=df) # Verify the proto was enqueued with Arrow data delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "arrow_data_component" assert bidi_component_proto.WhichOneof("data") == "arrow_data" # The Arrow bytes should be non-empty assert len(bidi_component_proto.arrow_data.data) > 0 def test_component_with_bytes_data(self): """Test component with raw bytes data passed through unchanged.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="bytes_data_component", js="console.log('hello world');", ) ) # Raw bytes payload binary_payload = b"\x00\x01\x02streamlit" st._bidi_component("bytes_data_component", data=binary_payload) # Verify the proto was enqueued with bytes data delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "bytes_data_component" assert bidi_component_proto.WhichOneof("data") == "bytes" assert bidi_component_proto.bytes == binary_payload def test_component_with_callbacks(self): """Test component with callback handlers.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="callback_component", js="console.log('hello world');", ) ) # Create mock callback on_click_callback = MagicMock() # Call the component with event-specific callback result = st._bidi_component( "callback_component", on_click_change=on_click_callback, ) # Verify the result assert hasattr(result, "click") # Verify the proto was enqueued with registered handler names delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "callback_component" def test_component_with_dict_containing_dataframe(self): """Test component with dict containing dataframe - should use mixed data serialization.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="dict_with_df_component", js="console.log('hello world');", ) ) # Create mixed data with dataframe automatically detected df = pd.DataFrame({"a": [1, 2, 3], "b": ["x", "y", "z"]}) mixed_data = { "config": {"title": "My Chart", "theme": "dark"}, "dataframe": df, # This should be automatically detected and converted "metadata": {"rows": 3, "cols": 2}, } st._bidi_component("dict_with_df_component", data=mixed_data) # Verify the proto was enqueued with mixed data delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "dict_with_df_component" assert bidi_component_proto.WhichOneof("data") == "mixed" # Verify the mixed data structure mixed_proto = bidi_component_proto.mixed assert mixed_proto.json is not None assert len(mixed_proto.arrow_blobs) == 1 # Parse the JSON to verify placeholder structure parsed_json = json.loads(mixed_proto.json) assert parsed_json["config"]["title"] == "My Chart" assert parsed_json["metadata"]["rows"] == 3 # The dataframe should be replaced with a placeholder assert "__streamlit_arrow_ref__" in parsed_json["dataframe"] def test_component_with_multiple_dataframes_in_dict(self): """Test component with dict containing multiple dataframes.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="multi_df_component", js="console.log('hello world');", ) ) # Create mixed data with multiple dataframes df1 = pd.DataFrame({"a": [1, 2, 3], "b": ["x", "y", "z"]}) df2 = pd.DataFrame({"c": [4, 5, 6], "d": ["p", "q", "r"]}) mixed_data = { "config": {"title": "My Chart", "theme": "dark"}, "sales_data": df1, # Should be automatically detected "inventory_data": df2, # Should be automatically detected "metadata": {"rows": 3, "cols": 2}, } st._bidi_component("multi_df_component", data=mixed_data) # Verify the proto was enqueued with mixed data delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "multi_df_component" assert bidi_component_proto.WhichOneof("data") == "mixed" # Verify the mixed data structure mixed_proto = bidi_component_proto.mixed assert mixed_proto.json is not None assert len(mixed_proto.arrow_blobs) == 2 # Two dataframes # Parse the JSON to verify placeholder structure parsed_json = json.loads(mixed_proto.json) assert parsed_json["config"]["title"] == "My Chart" assert parsed_json["config"]["theme"] == "dark" assert parsed_json["metadata"]["rows"] == 3 # Both dataframes should be replaced with placeholders assert "__streamlit_arrow_ref__" in parsed_json["sales_data"] assert "__streamlit_arrow_ref__" in parsed_json["inventory_data"] def test_component_with_dict_without_dataframes(self): """Test component with dict containing no dataframes - should use JSON serialization.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="json_only_component", js="console.log('hello world');", ) ) # Create data with no dataframes data = { "config": {"theme": "dark", "height": 400}, "labels": ["A", "B", "C"], "settings": {"enabled": True}, "metadata": {"version": "1.0"}, } st._bidi_component("json_only_component", data=data) # Verify the proto was enqueued with JSON serialization delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "json_only_component" assert bidi_component_proto.WhichOneof("data") == "json" # Parse the JSON to verify it matches original data parsed_json = json.loads(bidi_component_proto.json) assert parsed_json == data def test_component_with_list_data(self): """Test component with list data - should use JSON serialization.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="list_component", js="console.log('hello world');", ) ) # Create list data (no automatic dataframe detection for lists) df = pd.DataFrame({"col": [1, 2, 3]}) list_data = [ {"name": "dataset1", "values": [1, 2, 3]}, {"name": "dataset2", "values": [4, 5, 6]}, df, # This dataframe will be converted to JSON via fallback ] st._bidi_component("list_component", data=list_data) # Verify the proto was enqueued with JSON serialization # (since we only detect dataframes in first level of dicts) delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "list_component" assert bidi_component_proto.WhichOneof("data") == "json" # The data should be JSON-serialized as a string (due to DataFrame fallback) parsed_json = json.loads(bidi_component_proto.json) assert isinstance(parsed_json, str) # It's a string representation assert "dataset1" in parsed_json assert "dataset2" in parsed_json def test_component_with_tuple_data(self): """Test component with tuple data - should use JSON serialization.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="tuple_component", js="console.log('hello world');", ) ) # Create tuple data (no automatic dataframe detection for tuples) df = pd.DataFrame({"value": [42]}) tuple_data = ("metadata", df, {"extra": "info"}) st._bidi_component("tuple_component", data=tuple_data) # Verify the proto was enqueued with JSON serialization # (since we only detect dataframes in first level of dicts) delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.component_name == "tuple_component" assert bidi_component_proto.WhichOneof("data") == "json" # The data should be JSON-serialized as a string (due to DataFrame fallback) parsed_json = json.loads(bidi_component_proto.json) # The tuple with DataFrame gets converted to string representation assert isinstance(parsed_json, str) assert "metadata" in parsed_json assert "extra" in parsed_json assert "info" in parsed_json def test_component_with_dict_no_arrow_refs_uses_json(self): """Test that dictionaries without ArrowReference objects use regular JSON serialization.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="json_only_component", js="console.log('hello world');", ) ) # Create dictionary without any ArrowReference objects regular_data = { "config": {"title": "Chart", "enabled": True}, "values": [1, 2, 3, 4], "metadata": {"count": 4}, } st._bidi_component("json_only_component", data=regular_data) # Verify the proto uses regular JSON serialization delta = self.get_delta_from_queue() bidi_component_proto = delta.new_element.bidi_component assert bidi_component_proto.WhichOneof("data") == "json" parsed_data = json.loads(bidi_component_proto.json) assert parsed_data == regular_data def test_default_with_valid_callbacks(self): """Test that default works correctly with valid callback names.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="default_component", js="console.log('hello world');", ) ) # Create mock callbacks on_selected_change = MagicMock() on_search_change = MagicMock() # Call the component with default result = st._bidi_component( "default_component", default={ "selected": ["item1", "item2"], "search": "default search", }, on_selected_change=on_selected_change, on_search_change=on_search_change, ) # Verify the result contains default values assert result["selected"] == ["item1", "item2"] assert result["search"] == "default search" def test_default_validation_error(self): """Test that invalid keys in default raise StreamlitAPIException.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="validation_component", js="console.log('hello world');", ) ) # Create mock callback for only one state on_valid_change = MagicMock() # Call the component with invalid default key with pytest.raises(StreamlitAPIException) as exc_info: st._bidi_component( "validation_component", default={ "valid": "this is ok", "invalid": "this should fail", # No on_invalid_change callback }, on_valid_change=on_valid_change, ) # Verify the error message error_message = str(exc_info.value) assert "invalid" in error_message assert "not a valid state name" in error_message assert "Available state names: `['valid']`" in error_message def test_default_no_callbacks_error(self): """Test that default with no callbacks raises StreamlitAPIException.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="no_callbacks_component", js="console.log('hello world');", ) ) # Call the component with default but no callbacks with pytest.raises(StreamlitAPIException) as exc_info: st._bidi_component( "no_callbacks_component", default={"some_state": "value"}, ) # Verify the error message mentions no available state names error_message = str(exc_info.value) assert "some_state" in error_message assert "not a valid state name" in error_message assert "Available state names: `none`" in error_message def test_default_none_is_valid(self): """Test that default=None works correctly.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="none_default_component", js="console.log('hello world');", ) ) # Create mock callback on_test_change = MagicMock() # Call the component with default=None result = st._bidi_component( "none_default_component", default=None, on_test_change=on_test_change, ) # Should work without error and have empty state assert result["test"] is None def test_default_empty_dict(self): """Test that empty default dict works correctly.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="empty_default_component", js="console.log('hello world');", ) ) # Create mock callback on_test_change = MagicMock() # Call the component with empty default result = st._bidi_component( "empty_default_component", default={}, on_test_change=on_test_change, ) # Should work without error assert result["test"] is None def test_default_with_none_values(self): """Test that None values in default are properly handled.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="none_values_component", js="console.log('hello world');", ) ) # Create mock callback on_nullable_change = MagicMock() # Call the component with None default value result = st._bidi_component( "none_values_component", default={"nullable": None}, on_nullable_change=on_nullable_change, ) # Verify None is properly set as default assert result["nullable"] is None def test_default_complex_values(self): """Test that complex values in default work correctly.""" # Register a component self.mock_component_manager.register( BidiComponentDefinition( name="complex_default_component", js="console.log('hello world');", ) ) # Create mock callbacks on_list_state_change = MagicMock() on_dict_state_change = MagicMock() # Call the component with complex default values complex_list = [1, 2, {"nested": "value"}] complex_dict = {"key": "value", "nested": {"data": [1, 2, 3]}} result = st._bidi_component( "complex_default_component", default={ "list_state": complex_list, "dict_state": complex_dict, }, on_list_state_change=on_list_state_change, on_dict_state_change=on_dict_state_change, ) # Verify complex values are properly set assert result["list_state"] == complex_list assert result["dict_state"] == complex_dict
BidiComponentTest
python
tiangolo__fastapi
docs_src/response_model/tutorial001.py
{ "start": 115, "end": 562 }
class ____(BaseModel): name: str description: Union[str, None] = None price: float tax: Union[float, None] = None tags: List[str] = [] @app.post("/items/", response_model=Item) async def create_item(item: Item) -> Any: return item @app.get("/items/", response_model=List[Item]) async def read_items() -> Any: return [ {"name": "Portal Gun", "price": 42.0}, {"name": "Plumbus", "price": 32.0}, ]
Item
python
streamlit__streamlit
lib/streamlit/elements/heading.py
{ "start": 1229, "end": 1417 }
class ____(Enum): TITLE_TAG = "h1" HEADER_TAG = "h2" SUBHEADER_TAG = "h3" Anchor: TypeAlias = str | Literal[False] | None Divider: TypeAlias = bool | str | None
HeadingProtoTag
python
huggingface__transformers
src/transformers/models/lfm2_moe/modeling_lfm2_moe.py
{ "start": 30091, "end": 33625 }
class ____(Lfm2MoePreTrainedModel): def __init__(self, config: Lfm2MoeConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Lfm2MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False self.pos_emb = Lfm2MoeRotaryEmbedding(config) self.embedding_norm = Lfm2MoeRMSNorm(config.hidden_size, eps=config.norm_eps) # Initialize weights and apply final processing self.post_init() @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Lfm2MoeHybridConvCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: batch_size = inputs_embeds.shape[0] past_key_values = Lfm2MoeHybridConvCache( config=self.config, max_batch_size=batch_size, dtype=self.dtype, device=self.device ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) # Skip masking for decoding stage. We check shape here to be compile-friendly linear_attention = attention_mask if inputs_embeds.shape[1] != 1 else None hidden_states = inputs_embeds position_embeddings = self.pos_emb(hidden_states, position_ids=position_ids) # decoder layers for decoder_layer in self.layers[: self.config.num_hidden_layers]: layer_mask = causal_mask if decoder_layer.is_attention_layer else linear_attention hidden_states = decoder_layer( hidden_states, attention_mask=layer_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.embedding_norm(hidden_states) return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring
Lfm2MoeModel
python
psf__black
src/blib2to3/pgen2/pgen.py
{ "start": 359, "end": 406 }
class ____(grammar.Grammar): pass
PgenGrammar
python
getsentry__sentry
tests/sentry/models/test_release.py
{ "start": 56460, "end": 62460 }
class ____(TestCase): @receivers_raise_on_send() def test_simple(self) -> None: org = self.create_organization(owner=Factories.create_user()) project = self.create_project(organization=org, name="foo") group = self.create_group(project=project) repo = Repository.objects.create(organization_id=org.id, name="test/repo") author = CommitAuthor.objects.create( name="foo bar baz", email="foo@example.com", organization_id=org.id ) author2 = CommitAuthor.objects.create( name="foo bar boo", email="baroo@example.com", organization_id=org.id ) author.preload_users() author2.preload_users() commit = Commit.objects.create( organization_id=org.id, repository_id=repo.id, author=author, date_added="2019-03-01 12:00:00+00:00", message="fixes %s" % (group.qualified_short_id), key="alksdflskdfjsldkfajsflkslk", ) commit2 = Commit.objects.create( organization_id=org.id, repository_id=repo.id, author=author2, date_added="2019-03-01 12:02:00+00:00", message="i fixed something", key="lskfslknsdkcsnlkdflksfdkls", ) release = Release.objects.create(version="abcdabc", organization=org) release.add_project(project) release.set_commits( [ {"id": commit.key, "repository": repo.name}, {"id": commit2.key, "repository": repo.name}, ] ) # Confirm setup works assert ReleaseCommit.objects.filter(commit=commit, release=release).exists() assert ReleaseCommit.objects.filter(commit=commit2, release=release).exists() assert release.commit_count == 2 assert release.authors == [str(author.id), str(author2.id)] assert release.last_commit_id == commit.id assert ReleaseHeadCommit.objects.filter( release_id=release.id, commit_id=commit.id, repository_id=repo.id ).exists() # Now clear the release; release.clear_commits() assert not ReleaseCommit.objects.filter(commit=commit, release=release).exists() assert not ReleaseCommit.objects.filter(commit=commit2, release=release).exists() assert not ReleaseHeadCommit.objects.filter( release_id=release.id, commit_id=commit.id, repository_id=repo.id ).exists() assert release.commit_count == 0 assert release.authors == [] assert not release.last_commit_id # Commits should still exist assert Commit.objects.filter( id=commit.id, organization_id=org.id, repository_id=repo.id ).exists() assert Commit.objects.filter( id=commit2.id, organization_id=org.id, repository_id=repo.id ).exists() @receivers_raise_on_send() def test_clear_commits_with_multiple_repositories(self) -> None: """ Test that clear_commits works correctly when a release has commits from multiple repositories, which creates multiple ReleaseHeadCommit objects. This test would fail on master with .get() raising MultipleObjectsReturned, but passes with the fix using .filter(). """ org = self.create_organization(owner=Factories.create_user()) project = self.create_project(organization=org, name="foo") # Create multiple repositories repo1 = Repository.objects.create(organization_id=org.id, name="test/repo1") repo2 = Repository.objects.create(organization_id=org.id, name="test/repo2") # Create commits in each repository commit1 = Commit.objects.create( organization_id=org.id, repository_id=repo1.id, key="commit1key", message="First commit", ) commit2 = Commit.objects.create( organization_id=org.id, repository_id=repo2.id, key="commit2key", message="Second commit", ) release = Release.objects.create(version="multi-repo-release", organization=org) release.add_project(project) # Set commits from both repositories release.set_commits( [ {"id": commit1.key, "repository": repo1.name}, {"id": commit2.key, "repository": repo2.name}, ] ) # Verify we have multiple ReleaseHeadCommit objects for this release head_commits = ReleaseHeadCommit.objects.filter(organization_id=org.id, release=release) assert head_commits.count() == 2 assert ReleaseHeadCommit.objects.filter( release_id=release.id, commit_id=commit1.id, repository_id=repo1.id ).exists() assert ReleaseHeadCommit.objects.filter( release_id=release.id, commit_id=commit2.id, repository_id=repo2.id ).exists() # Verify ReleaseCommit objects exist assert ReleaseCommit.objects.filter(commit=commit1, release=release).exists() assert ReleaseCommit.objects.filter(commit=commit2, release=release).exists() # Now clear the commits - this would fail on master with .get() release.clear_commits() # Verify all ReleaseHeadCommit objects are deleted assert ( ReleaseHeadCommit.objects.filter(organization_id=org.id, release=release).count() == 0 ) # Verify all ReleaseCommit objects are deleted assert not ReleaseCommit.objects.filter(release=release).exists() # Verify release fields are cleared assert release.commit_count == 0 assert release.authors == [] assert not release.last_commit_id # Commits should still exist in their repositories assert Commit.objects.filter(id=commit1.id).exists() assert Commit.objects.filter(id=commit2.id).exists()
ClearCommitsTestCase
python
jmcnamara__XlsxWriter
xlsxwriter/test/table/test_table05.py
{ "start": 481, "end": 1975 }
class ____(unittest.TestCase): """ Test assembling a complete Table file. """ def test_assemble_xml_file(self): """Test writing a table""" self.maxDiff = None worksheet = Worksheet() worksheet.worksheet_meta = WorksheetMeta() worksheet.str_table = SharedStringTable() worksheet.add_table("C4:F13", {"header_row": False}) worksheet._prepare_tables(1, {}) fh = StringIO() table = Table() table._set_filehandle(fh) table._set_properties(worksheet.tables[0]) table._assemble_xml_file() exp = _xml_to_list( """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C4:F13" headerRowCount="0" totalsRowShown="0"> <tableColumns count="4"> <tableColumn id="1" name="Column1"/> <tableColumn id="2" name="Column2"/> <tableColumn id="3" name="Column3"/> <tableColumn id="4" name="Column4"/> </tableColumns> <tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/> </table> """ ) got = _xml_to_list(fh.getvalue()) self.assertEqual(exp, got)
TestAssembleTable
python
django__django
tests/i18n/tests.py
{ "start": 77535, "end": 77880 }
class ____(ResolutionOrderI18NTests): def test_locale_paths_translation(self): self.assertGettext("Time", "LOCALE_PATHS") def test_locale_paths_override_app_translation(self): with self.settings(INSTALLED_APPS=["i18n.resolution"]): self.assertGettext("Time", "LOCALE_PATHS")
LocalePathsResolutionOrderI18NTests
python
PrefectHQ__prefect
tests/utilities/test_collections.py
{ "start": 3020, "end": 4401 }
class ____: """ Checks that the Pydantic test objects defined in this file behave as expected. These tests do not cover Prefect functionality and may break if Pydantic introduces breaking changes. """ def test_private_pydantic_behaves_as_expected(self): input = PrivatePydantic(x=1) # Public attr accessible immediately assert input.x == 1 # Extras not allowed with pytest.raises(ValueError): input.a = 1 # Private attrs accessible after setting input._y = 4 input._z = 5 assert input._y == 4 assert input._z == 5 def test_immutable_pydantic_behaves_as_expected(self): input = ImmutablePrivatePydantic(x=1) # Public attr accessible immediately assert input.x == 1 # Extras not allowed with pytest.raises(ValueError): input.a = 1 # Private attr not accessible until set with pytest.raises(AttributeError): input._y # Private attrs accessible after setting input._y = 4 input._z = 5 assert input._y == 4 assert input._z == 5 # Mutating not allowed because frozen=True with pytest.raises(pydantic.ValidationError): input.x = 2 # Can still mutate private attrs input._y = 6
TestPydanticObjects
python
TheAlgorithms__Python
dynamic_programming/edit_distance.py
{ "start": 346, "end": 3435 }
class ____: """ Use : solver = EditDistance() editDistanceResult = solver.solve(firstString, secondString) """ def __init__(self): self.word1 = "" self.word2 = "" self.dp = [] def __min_dist_top_down_dp(self, m: int, n: int) -> int: if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.word1[m] == self.word2[n]: self.dp[m][n] = self.__min_dist_top_down_dp(m - 1, n - 1) else: insert = self.__min_dist_top_down_dp(m, n - 1) delete = self.__min_dist_top_down_dp(m - 1, n) replace = self.__min_dist_top_down_dp(m - 1, n - 1) self.dp[m][n] = 1 + min(insert, delete, replace) return self.dp[m][n] def min_dist_top_down(self, word1: str, word2: str) -> int: """ >>> EditDistance().min_dist_top_down("intention", "execution") 5 >>> EditDistance().min_dist_top_down("intention", "") 9 >>> EditDistance().min_dist_top_down("", "") 0 """ self.word1 = word1 self.word2 = word2 self.dp = [[-1 for _ in range(len(word2))] for _ in range(len(word1))] return self.__min_dist_top_down_dp(len(word1) - 1, len(word2) - 1) def min_dist_bottom_up(self, word1: str, word2: str) -> int: """ >>> EditDistance().min_dist_bottom_up("intention", "execution") 5 >>> EditDistance().min_dist_bottom_up("intention", "") 9 >>> EditDistance().min_dist_bottom_up("", "") 0 """ self.word1 = word1 self.word2 = word2 m = len(word1) n = len(word2) self.dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)] for i in range(m + 1): for j in range(n + 1): if i == 0: # first string is empty self.dp[i][j] = j elif j == 0: # second string is empty self.dp[i][j] = i elif word1[i - 1] == word2[j - 1]: # last characters are equal self.dp[i][j] = self.dp[i - 1][j - 1] else: insert = self.dp[i][j - 1] delete = self.dp[i - 1][j] replace = self.dp[i - 1][j - 1] self.dp[i][j] = 1 + min(insert, delete, replace) return self.dp[m][n] if __name__ == "__main__": solver = EditDistance() print("****************** Testing Edit Distance DP Algorithm ******************") print() S1 = input("Enter the first string: ").strip() S2 = input("Enter the second string: ").strip() print() print(f"The minimum edit distance is: {solver.min_dist_top_down(S1, S2)}") print(f"The minimum edit distance is: {solver.min_dist_bottom_up(S1, S2)}") print() print("*************** End of Testing Edit Distance DP Algorithm ***************")
EditDistance
python
bokeh__bokeh
src/bokeh/models/widgets/pickers.py
{ "start": 4674, "end": 7681 }
class ____(HasProps): """ Common properties for date-like picker widgets. """ # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) disabled_dates = Nullable(List(Either(Date, Tuple(Date, Date))), default=None, help=""" A list of dates of ``(start, end)`` date ranges to make unavailable for selection. All other dates will be available. .. note:: Only one of ``disabled_dates`` and ``enabled_dates`` should be specified. """) enabled_dates = Nullable(List(Either(Date, Tuple(Date, Date))), default=None, help=""" A list of dates of ``(start, end)`` date ranges to make available for selection. All other dates will be unavailable. .. note:: Only one of ``disabled_dates`` and ``enabled_dates`` should be specified. """) date_format = String(default="Y-m-d", help=""" Formatting specification for the display of the picked date. +---+-----------------------------------------------------------+-----------------------------------------+ | d | Day of the month, 2 digits with leading zeros | 01 to 31 | | D | A textual representation of a day | Mon through Sun | | l | A full textual representation of the day of the week | Sunday through Saturday | | j | Day of the month without leading zeros | 1 to 31 | | J | Day of the month without leading zeros and ordinal suffix | 1st, 2nd, to 31st | | w | Numeric representation of the day of the week | 0 (for Sunday) through 6 (for Saturday) | | W | Numeric representation of the week | 0 through 52 | | F | A full textual representation of a month | January through December | | m | Numeric representation of a month, with leading zero | 01 through 12 | | n | Numeric representation of a month, without leading zeros | 1 through 12 | | M | A short textual representation of a month | Jan through Dec | | U | The number of seconds since the Unix Epoch | 1413704993 | | y | A two digit representation of a year | 99 or 03 | | Y | A full numeric representation of a year, 4 digits | 1999 or 2003 | | Z | ISO Date format | 2017-03-04T01:23:43.000Z | +---+-----------------------------------------------------------+-----------------------------------------+ See also https://flatpickr.js.org/formatting/#date-formatting-tokens. """) @abstract
DateCommon
python
allegroai__clearml
clearml/backend_api/services/v2_13/models.py
{ "start": 61400, "end": 78108 }
class ____(Request): """ Get all models :param name: Get only models whose name matches this pattern (python regular expression syntax) :type name: str :param user: List of user IDs used to filter results by the model's creating user :type user: Sequence[str] :param ready: Indication whether to retrieve only models that are marked ready If not supplied returns both ready and not-ready projects. :type ready: bool :param tags: User-defined tags list used to filter results. Prepend '-' to tag name to indicate exclusion :type tags: Sequence[str] :param system_tags: System tags list used to filter results. Prepend '-' to system tag name to indicate exclusion :type system_tags: Sequence[str] :param only_fields: List of model field names (if applicable, nesting is supported using '.'). If provided, this list defines the query's projection (only these fields will be returned for each result entry) :type only_fields: Sequence[str] :param page: Page number, returns a specific page out of the resulting list of models :type page: int :param page_size: Page size, specifies the number of results returned in each page (last page may contain fewer results) :type page_size: int :param project: List of associated project IDs :type project: Sequence[str] :param order_by: List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page :type order_by: Sequence[str] :param task: List of associated task IDs :type task: Sequence[str] :param id: List of model IDs :type id: Sequence[str] :param search_text: Free text search query :type search_text: str :param framework: List of frameworks :type framework: Sequence[str] :param uri: List of model URIs :type uri: Sequence[str] :param _all_: Multi-field pattern condition (all fields match pattern) :type _all_: MultiFieldPatternData :param _any_: Multi-field pattern condition (any field matches pattern) :type _any_: MultiFieldPatternData """ _service = "models" _action = "get_all" _version = "2.13" _schema = { "definitions": { "multi_field_pattern_data": { "properties": { "fields": { "description": "List of field names", "items": {"type": "string"}, "type": ["array", "null"], }, "pattern": { "description": "Pattern string (regex)", "type": ["string", "null"], }, }, "type": "object", } }, "dependencies": {"page": ["page_size"]}, "properties": { "_all_": { "description": "Multi-field pattern condition (all fields match pattern)", "oneOf": [ {"$ref": "#/definitions/multi_field_pattern_data"}, {"type": "null"}, ], }, "_any_": { "description": "Multi-field pattern condition (any field matches pattern)", "oneOf": [ {"$ref": "#/definitions/multi_field_pattern_data"}, {"type": "null"}, ], }, "framework": { "description": "List of frameworks", "items": {"type": "string"}, "type": ["array", "null"], }, "id": { "description": "List of model IDs", "items": {"type": "string"}, "type": ["array", "null"], }, "last_update": { "description": "Model last update time", "format": "date-time", "type": ["string", "null"], }, "name": { "description": "Get only models whose name matches this pattern (python regular expression syntax)", "type": ["string", "null"], }, "only_fields": { "description": "List of model field names (if applicable, nesting is supported using '.'). If provided, this list defines the query's projection (only these fields will be returned for each result entry)", "items": {"type": "string"}, "type": ["array", "null"], }, "order_by": { "description": "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page", "items": {"type": "string"}, "type": ["array", "null"], }, "page": { "description": "Page number, returns a specific page out of the resulting list of models", "minimum": 0, "type": ["integer", "null"], }, "page_size": { "description": "Page size, specifies the number of results returned in each page (last page may contain fewer results)", "minimum": 1, "type": ["integer", "null"], }, "project": { "description": "List of associated project IDs", "items": {"type": "string"}, "type": ["array", "null"], }, "ready": { "description": "Indication whether to retrieve only models that are marked ready If not supplied returns both ready and not-ready projects.", "type": ["boolean", "null"], }, "search_text": { "description": "Free text search query", "type": ["string", "null"], }, "system_tags": { "description": "System tags list used to filter results. Prepend '-' to system tag name to indicate exclusion", "items": {"type": "string"}, "type": ["array", "null"], }, "tags": { "description": "User-defined tags list used to filter results. Prepend '-' to tag name to indicate exclusion", "items": {"type": "string"}, "type": ["array", "null"], }, "task": { "description": "List of associated task IDs", "items": {"type": "string"}, "type": ["array", "null"], }, "uri": { "description": "List of model URIs", "items": {"type": "string"}, "type": ["array", "null"], }, "user": { "description": "List of user IDs used to filter results by the model's creating user", "items": {"type": "string"}, "type": ["array", "null"], }, }, "type": "object", } def __init__( self, name: Optional[str] = None, user: Optional[List[str]] = None, ready: Optional[bool] = None, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, only_fields: Optional[List[str]] = None, page: Optional[int] = None, page_size: Optional[int] = None, project: Optional[List[str]] = None, order_by: Optional[List[str]] = None, task: Optional[List[str]] = None, id: Optional[List[str]] = None, search_text: Optional[str] = None, framework: Optional[List[str]] = None, uri: Optional[List[str]] = None, _all_: Any = None, _any_: Any = None, last_update: Optional[str] = None, **kwargs: Any ) -> None: super(GetAllRequest, self).__init__(**kwargs) self.name = name self.user = user self.ready = ready self.tags = tags self.system_tags = system_tags self.only_fields = only_fields self.page = page self.page_size = page_size self.project = project self.order_by = order_by self.task = task self.id = id self.search_text = search_text self.framework = framework self.uri = uri self._all_ = _all_ self._any_ = _any_ self.last_update = last_update @schema_property("name") def name(self) -> Optional[str]: return self._property_name @name.setter def name(self, value: Optional[str]) -> None: if value is None: self._property_name = None return self.assert_isinstance(value, "name", six.string_types) self._property_name = value @schema_property("user") def user(self) -> Optional[List[str]]: return self._property_user @user.setter def user(self, value: Optional[List[str]]) -> None: if value is None: self._property_user = None return self.assert_isinstance(value, "user", (list, tuple)) self.assert_isinstance(value, "user", six.string_types, is_array=True) self._property_user = value @schema_property("ready") def ready(self) -> Optional[bool]: return self._property_ready @ready.setter def ready(self, value: Optional[bool]) -> None: if value is None: self._property_ready = None return self.assert_isinstance(value, "ready", (bool,)) self._property_ready = value @schema_property("tags") def tags(self) -> Optional[List[str]]: return self._property_tags @tags.setter def tags(self, value: Optional[List[str]]) -> None: if value is None: self._property_tags = None return self.assert_isinstance(value, "tags", (list, tuple)) self.assert_isinstance(value, "tags", six.string_types, is_array=True) self._property_tags = value @schema_property("system_tags") def system_tags(self) -> Optional[List[str]]: return self._property_system_tags @system_tags.setter def system_tags(self, value: Optional[List[str]]) -> None: if value is None: self._property_system_tags = None return self.assert_isinstance(value, "system_tags", (list, tuple)) self.assert_isinstance(value, "system_tags", six.string_types, is_array=True) self._property_system_tags = value @schema_property("only_fields") def only_fields(self) -> Optional[List[str]]: return self._property_only_fields @only_fields.setter def only_fields(self, value: Optional[List[str]]) -> None: if value is None: self._property_only_fields = None return self.assert_isinstance(value, "only_fields", (list, tuple)) self.assert_isinstance(value, "only_fields", six.string_types, is_array=True) self._property_only_fields = value @schema_property("page") def page(self) -> Optional[int]: return self._property_page @page.setter def page(self, value: Optional[int]) -> None: if value is None: self._property_page = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "page", six.integer_types) self._property_page = value @schema_property("page_size") def page_size(self) -> Optional[int]: return self._property_page_size @page_size.setter def page_size(self, value: Optional[int]) -> None: if value is None: self._property_page_size = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "page_size", six.integer_types) self._property_page_size = value @schema_property("project") def project(self) -> Optional[List[str]]: return self._property_project @project.setter def project(self, value: Optional[List[str]]) -> None: if value is None: self._property_project = None return self.assert_isinstance(value, "project", (list, tuple)) self.assert_isinstance(value, "project", six.string_types, is_array=True) self._property_project = value @schema_property("order_by") def order_by(self) -> Optional[List[str]]: return self._property_order_by @order_by.setter def order_by(self, value: Optional[List[str]]) -> None: if value is None: self._property_order_by = None return self.assert_isinstance(value, "order_by", (list, tuple)) self.assert_isinstance(value, "order_by", six.string_types, is_array=True) self._property_order_by = value @schema_property("task") def task(self) -> Optional[List[str]]: return self._property_task @task.setter def task(self, value: Optional[List[str]]) -> None: if value is None: self._property_task = None return self.assert_isinstance(value, "task", (list, tuple)) self.assert_isinstance(value, "task", six.string_types, is_array=True) self._property_task = value @schema_property("id") def id(self) -> Optional[List[str]]: return self._property_id @id.setter def id(self, value: Optional[List[str]]) -> None: if value is None: self._property_id = None return self.assert_isinstance(value, "id", (list, tuple)) self.assert_isinstance(value, "id", six.string_types, is_array=True) self._property_id = value @schema_property("search_text") def search_text(self) -> Optional[str]: return self._property_search_text @search_text.setter def search_text(self, value: Optional[str]) -> None: if value is None: self._property_search_text = None return self.assert_isinstance(value, "search_text", six.string_types) self._property_search_text = value @schema_property("framework") def framework(self) -> Optional[List[str]]: return self._property_framework @framework.setter def framework(self, value: Optional[List[str]]) -> None: if value is None: self._property_framework = None return self.assert_isinstance(value, "framework", (list, tuple)) self.assert_isinstance(value, "framework", six.string_types, is_array=True) self._property_framework = value @schema_property("uri") def uri(self) -> Optional[List[str]]: return self._property_uri @uri.setter def uri(self, value: Optional[List[str]]) -> None: if value is None: self._property_uri = None return self.assert_isinstance(value, "uri", (list, tuple)) self.assert_isinstance(value, "uri", six.string_types, is_array=True) self._property_uri = value @schema_property("_all_") def _all_(self) -> Any: return self._property__all_ @_all_.setter def _all_(self, value: Any) -> None: if value is None: self._property__all_ = None return if isinstance(value, dict): value = MultiFieldPatternData.from_dict(value) else: self.assert_isinstance(value, "_all_", MultiFieldPatternData) self._property__all_ = value @schema_property("_any_") def _any_(self) -> Any: return self._property__any_ @_any_.setter def _any_(self, value: Any) -> None: if value is None: self._property__any_ = None return if isinstance(value, dict): value = MultiFieldPatternData.from_dict(value) else: self.assert_isinstance(value, "_any_", MultiFieldPatternData) self._property__any_ = value @schema_property("last_update") def last_update(self) -> Optional[str]: return self._property_last_update @last_update.setter def last_update(self, value: Optional[str]) -> None: if value is None: self._property_last_update = None return self.assert_isinstance(value, "last_update", six.string_types) self._property_last_update = value
GetAllRequest
python
tornadoweb__tornado
tornado/test/locks_test.py
{ "start": 11116, "end": 13919 }
class ____(AsyncTestCase): @gen_test def test_context_manager(self): sem = locks.Semaphore() with (yield sem.acquire()) as yielded: self.assertIsNone(yielded) # Semaphore was released and can be acquired again. self.assertTrue(asyncio.ensure_future(sem.acquire()).done()) @gen_test def test_context_manager_async_await(self): # Repeat the above test using 'async with'. sem = locks.Semaphore() async def f(): async with sem as yielded: self.assertIsNone(yielded) yield f() # Semaphore was released and can be acquired again. self.assertTrue(asyncio.ensure_future(sem.acquire()).done()) @gen_test def test_context_manager_exception(self): sem = locks.Semaphore() with self.assertRaises(ZeroDivisionError): with (yield sem.acquire()): 1 / 0 # Semaphore was released and can be acquired again. self.assertTrue(asyncio.ensure_future(sem.acquire()).done()) @gen_test def test_context_manager_timeout(self): sem = locks.Semaphore() with (yield sem.acquire(timedelta(seconds=0.01))): pass # Semaphore was released and can be acquired again. self.assertTrue(asyncio.ensure_future(sem.acquire()).done()) @gen_test def test_context_manager_timeout_error(self): sem = locks.Semaphore(value=0) with self.assertRaises(gen.TimeoutError): with (yield sem.acquire(timedelta(seconds=0.01))): pass # Counter is still 0. self.assertFalse(asyncio.ensure_future(sem.acquire()).done()) @gen_test def test_context_manager_contended(self): sem = locks.Semaphore() history = [] @gen.coroutine def f(index): with (yield sem.acquire()): history.append("acquired %d" % index) yield gen.sleep(0.01) history.append("release %d" % index) yield [f(i) for i in range(2)] expected_history = [] for i in range(2): expected_history.extend(["acquired %d" % i, "release %d" % i]) self.assertEqual(expected_history, history) @gen_test def test_yield_sem(self): # Ensure we catch a "with (yield sem)", which should be # "with (yield sem.acquire())". with self.assertRaises(gen.BadYieldError): with (yield locks.Semaphore()): pass def test_context_manager_misuse(self): # Ensure we catch a "with sem", which should be # "with (yield sem.acquire())". with self.assertRaises(RuntimeError): with locks.Semaphore(): pass
SemaphoreContextManagerTest
python
doocs__leetcode
solution/3600-3699/3668.Restore Finishing Order/Solution.py
{ "start": 0, "end": 194 }
class ____: def recoverOrder(self, order: List[int], friends: List[int]) -> List[int]: d = {x: i for i, x in enumerate(order)} return sorted(friends, key=lambda x: d[x])
Solution
python
django__django
tests/forms_tests/field_tests/test_genericipaddressfield.py
{ "start": 193, "end": 9451 }
class ____(SimpleTestCase): def test_generic_ipaddress_invalid_arguments(self): with self.assertRaises(ValueError): GenericIPAddressField(protocol="hamster") with self.assertRaises(ValueError): GenericIPAddressField(protocol="ipv4", unpack_ipv4=True) def test_generic_ipaddress_as_generic(self): # The edge cases of the IPv6 validation code are not deeply tested # here, they are covered in the tests for django.utils.ipv6 f = GenericIPAddressField() with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean("") with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) self.assertEqual(f.clean(" 127.0.0.1 "), "127.0.0.1") with self.assertRaisesMessage( ValidationError, "'Enter a valid IPv4 or IPv6 address.'" ): f.clean("foo") with self.assertRaisesMessage( ValidationError, "'Enter a valid IPv4 or IPv6 address.'" ): f.clean("127.0.0.") with self.assertRaisesMessage( ValidationError, "'Enter a valid IPv4 or IPv6 address.'" ): f.clean("1.2.3.4.5") with self.assertRaisesMessage( ValidationError, "'Enter a valid IPv4 or IPv6 address.'" ): f.clean("256.125.1.5") self.assertEqual( f.clean(" fe80::223:6cff:fe8a:2e8a "), "fe80::223:6cff:fe8a:2e8a" ) self.assertEqual( f.clean(" 2a02::223:6cff:fe8a:2e8a "), "2a02::223:6cff:fe8a:2e8a" ) with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("12345:2:3:4") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("1::2:3::4") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("foo::223:6cff:fe8a:2e8a") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("1::2:3:4:5:6:7:8") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("1:2") def test_generic_ipaddress_as_ipv4_only(self): f = GenericIPAddressField(protocol="IPv4") with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean("") with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) self.assertEqual(f.clean(" 127.0.0.1 "), "127.0.0.1") with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'"): f.clean("foo") with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'"): f.clean("127.0.0.") with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'"): f.clean("1.2.3.4.5") with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'"): f.clean("256.125.1.5") with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'"): f.clean("fe80::223:6cff:fe8a:2e8a") with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'"): f.clean("2a02::223:6cff:fe8a:2e8a") def test_generic_ipaddress_as_ipv6_only(self): f = GenericIPAddressField(protocol="IPv6") with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean("") with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'"): f.clean("127.0.0.1") with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'"): f.clean("foo") with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'"): f.clean("127.0.0.") with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'"): f.clean("1.2.3.4.5") with self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'"): f.clean("256.125.1.5") self.assertEqual( f.clean(" fe80::223:6cff:fe8a:2e8a "), "fe80::223:6cff:fe8a:2e8a" ) self.assertEqual( f.clean(" 2a02::223:6cff:fe8a:2e8a "), "2a02::223:6cff:fe8a:2e8a" ) with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("12345:2:3:4") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("1::2:3::4") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("foo::223:6cff:fe8a:2e8a") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("1::2:3:4:5:6:7:8") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("1:2") def test_generic_ipaddress_max_length_custom(self): # Valid IPv4-mapped IPv6 address, len 45. addr = "0000:0000:0000:0000:0000:ffff:192.168.100.228" f = GenericIPAddressField(max_length=len(addr)) f.clean(addr) def test_generic_ipaddress_max_length_validation_error(self): # Valid IPv4-mapped IPv6 address, len 45. addr = "0000:0000:0000:0000:0000:ffff:192.168.100.228" cases = [ ({}, MAX_IPV6_ADDRESS_LENGTH), # Default value. ({"max_length": len(addr) - 1}, len(addr) - 1), ] for kwargs, max_length in cases: max_length_plus_one = max_length + 1 msg = ( f"Ensure this value has at most {max_length} characters (it has " f"{max_length_plus_one}).'" ) with self.subTest(max_length=max_length): f = GenericIPAddressField(**kwargs) with self.assertRaisesMessage(ValidationError, msg): f.clean("x" * max_length_plus_one) with self.assertRaisesMessage( ValidationError, "This is not a valid IPv6 address." ): f.clean(addr) def test_generic_ipaddress_as_generic_not_required(self): f = GenericIPAddressField(required=False) self.assertEqual(f.clean(""), "") self.assertEqual(f.clean(None), "") self.assertEqual(f.clean("127.0.0.1"), "127.0.0.1") with self.assertRaisesMessage( ValidationError, "'Enter a valid IPv4 or IPv6 address.'" ): f.clean("foo") with self.assertRaisesMessage( ValidationError, "'Enter a valid IPv4 or IPv6 address.'" ): f.clean("127.0.0.") with self.assertRaisesMessage( ValidationError, "'Enter a valid IPv4 or IPv6 address.'" ): f.clean("1.2.3.4.5") with self.assertRaisesMessage( ValidationError, "'Enter a valid IPv4 or IPv6 address.'" ): f.clean("256.125.1.5") self.assertEqual( f.clean(" fe80::223:6cff:fe8a:2e8a "), "fe80::223:6cff:fe8a:2e8a" ) self.assertEqual( f.clean(" " * MAX_IPV6_ADDRESS_LENGTH + " 2a02::223:6cff:fe8a:2e8a "), "2a02::223:6cff:fe8a:2e8a", ) with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("12345:2:3:4") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("1::2:3::4") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("foo::223:6cff:fe8a:2e8a") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("1::2:3:4:5:6:7:8") with self.assertRaisesMessage( ValidationError, "'This is not a valid IPv6 address.'" ): f.clean("1:2") def test_generic_ipaddress_normalization(self): # Test the normalizing code f = GenericIPAddressField() self.assertEqual(f.clean(" ::ffff:0a0a:0a0a "), "::ffff:10.10.10.10") self.assertEqual(f.clean(" ::ffff:10.10.10.10 "), "::ffff:10.10.10.10") self.assertEqual( f.clean(" 2001:000:a:0000:0:fe:fe:beef "), "2001:0:a::fe:fe:beef" ) self.assertEqual( f.clean(" 2001::a:0000:0:fe:fe:beef "), "2001:0:a::fe:fe:beef" ) f = GenericIPAddressField(unpack_ipv4=True) self.assertEqual(f.clean(" ::ffff:0a0a:0a0a"), "10.10.10.10")
GenericIPAddressFieldTest
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/model_service.py
{ "start": 19883, "end": 23041 }
class ____(GoogleCloudBaseOperator): """ Lists Model versions in a Location. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param model_id: Required. The ID of the model to list versions for. Could be in format `projects/{project}/locations/{location}/models/{model_id}@{version_id}` or `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` if model has several versions. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. :param gcp_conn_id: The connection ID to use connecting to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields = ("model_id", "region", "project_id", "impersonation_chain") def __init__( self, *, region: str, project_id: str, model_id: str, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.region = region self.project_id = project_id self.model_id = model_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context: Context): hook = ModelServiceHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) self.log.info("Retrieving versions list from model: %s", self.model_id) results = hook.list_model_versions( project_id=self.project_id, region=self.region, model_id=self.model_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) for result in results: model = Model.to_dict(result) self.log.info("Model name: %s;", model["name"]) self.log.info("Model version: %s, model alias %s;", model["version_id"], model["version_aliases"]) return [Model.to_dict(result) for result in results]
ListModelVersionsOperator
python
vyperlang__vyper
vyper/semantics/analysis/base.py
{ "start": 4127, "end": 5009 }
class ____(AnalysisResult): alias: str # the name in the namespace qualified_module_name: str # for error messages compiler_input: CompilerInput # to recover file info for ast export parsed: Any # (json) abi | AST _typ: Any = None # type to be filled in during analysis @property def typ(self): if self._typ is None: # pragma: nocover raise CompilerPanic("unreachable!") return self._typ def to_dict(self): ret = {"alias": self.alias, "qualified_module_name": self.qualified_module_name} ret["source_id"] = self.compiler_input.source_id ret["path"] = str(self.compiler_input.path) ret["resolved_path"] = str(self.compiler_input.resolved_path) ret["file_sha256sum"] = self.compiler_input.sha256sum return ret # analysis result of InitializesDecl @dataclass
ImportInfo
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/dag_versions.py
{ "start": 1395, "end": 1572 }
class ____(BaseModel): """DAG Version Collection serializer for responses.""" dag_versions: Iterable[DagVersionResponse] total_entries: int
DAGVersionCollectionResponse
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE796.py
{ "start": 332, "end": 420 }
class ____(Enum): A = 1.0 B = True C = False D = False # PIE796
FakeEnum5
python
catalyst-team__catalyst
catalyst/contrib/data/reader_cv.py
{ "start": 1412, "end": 2823 }
class ____(IReader): """Mask reader abstraction. Reads masks from a `csv` dataset.""" def __init__( self, input_key: str, output_key: Optional[str] = None, rootpath: Optional[str] = None, clip_range: Tuple[Union[int, float], Union[int, float]] = (0, 1), ): """ Args: input_key: key to use from annotation dict output_key: key to use to store the result, default: ``input_key`` rootpath: path to images dataset root directory (so your can use relative paths in annotations) clip_range (Tuple[int, int]): lower and upper interval edges, image values outside the interval are clipped to the interval edges """ super().__init__(input_key, output_key or input_key) self.rootpath = rootpath self.clip = clip_range def __call__(self, element): """Reads a row from your annotations dict with filename and transfer it to a mask Args: element: elem in your dataset. Returns: np.ndarray: Mask """ mask_name = str(element[self.input_key]) mask = mimread(mask_name, rootpath=self.rootpath, clip_range=self.clip) output = {self.output_key: mask} return output __all__ = ["ImageReader", "MaskReader"]
MaskReader
python
getsentry__sentry
src/sentry/workflow_engine/endpoints/serializers/action_handler_serializer.py
{ "start": 288, "end": 487 }
class ____(TypedDict): id: str name: str installationId: str installationUuid: str status: int settings: NotRequired[dict[str, Any]] title: NotRequired[str]
SentryAppContext
python
pytorch__pytorch
torchgen/api/types/types_base.py
{ "start": 4256, "end": 5000 }
class ____(CType): elem: CType def cpp_type(self, *, strip_ref: bool = False) -> str: if strip_ref: return self.elem.cpp_type(strip_ref=strip_ref) return f"{self.elem.cpp_type()} &" def remove_const_ref(self) -> CType: return self.elem.remove_const_ref() # A NamedCType is short for Named C++ semantic type. A NamedCType represents a C++ type, plus # semantic information about what it represents. For example, consider the # argument "bool pin_memory"; its normal C++ type is "bool", but its C++ # semantic type also keeps track that this represents a "pin_memory"; you can't # just use a random other boolean in a context where you need a "pin_memory"! # @dataclass(frozen=True)
MutRefCType
python
TheAlgorithms__Python
knapsack/tests/test_greedy_knapsack.py
{ "start": 77, "end": 2343 }
class ____(unittest.TestCase): """ Test cases for knapsack """ def test_sorted(self): """ kp.calc_profit takes the required argument (profit, weight, max_weight) and returns whether the answer matches to the expected ones """ profit = [10, 20, 30, 40, 50, 60] weight = [2, 4, 6, 8, 10, 12] max_weight = 100 assert kp.calc_profit(profit, weight, max_weight) == 210 def test_negative_max_weight(self): """ Returns ValueError for any negative max_weight value :return: ValueError """ # profit = [10, 20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = -15 pytest.raises(ValueError, match=r"max_weight must greater than zero.") def test_negative_profit_value(self): """ Returns ValueError for any negative profit value in the list :return: ValueError """ # profit = [10, -20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = 15 pytest.raises(ValueError, match=r"Weight can not be negative.") def test_negative_weight_value(self): """ Returns ValueError for any negative weight value in the list :return: ValueError """ # profit = [10, 20, 30, 40, 50, 60] # weight = [2, -4, 6, -8, 10, 12] # max_weight = 15 pytest.raises(ValueError, match=r"Profit can not be negative.") def test_null_max_weight(self): """ Returns ValueError for any zero max_weight value :return: ValueError """ # profit = [10, 20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = null pytest.raises(ValueError, match=r"max_weight must greater than zero.") def test_unequal_list_length(self): """ Returns IndexError if length of lists (profit and weight) are unequal. :return: IndexError """ # profit = [10, 20, 30, 40, 50] # weight = [2, 4, 6, 8, 10, 12] # max_weight = 100 pytest.raises( IndexError, match=r"The length of profit and weight must be same." ) if __name__ == "__main__": unittest.main()
TestClass
python
apache__airflow
task-sdk/src/airflow/sdk/execution_time/comms.py
{ "start": 21572, "end": 21660 }
class ____(BaseModel): type: Literal["SentFDs"] = "SentFDs" fds: list[int]
SentFDs
python
google__jax
tests/lax_test.py
{ "start": 156619, "end": 163874 }
class ____(jtu.JaxTestCase): def _Check(self, make_const, expected): # check casting to ndarray works asarray_result = np.asarray(make_const()) # check passing as an argument works (should hit constant handler) zero = np.array(0, expected.dtype) argument_result = lax.add(zero, make_const()) # check looping into a compiled computation works jit_result = jax.jit(lambda x: lax.add(x, make_const()))(zero) # ensure they're all the same self.assertAllClose(asarray_result, expected) self.assertAllClose(argument_result, expected) self.assertAllClose(jit_result, expected) # ensure repr doesn't crash repr(make_const()) @jtu.sample_product( dtype=lax_test_util.default_dtypes + [None], shape=[(), (3,), (2, 3), (2, 3, 4), (1001, 1001)], fill_value=[0, 1, np.pi], ) def testFilledConstant(self, shape, fill_value, dtype): make_const = lambda: lax.full(shape, fill_value, dtype) expected = np.full(shape, fill_value, dtype or dtypes.dtype(fill_value)) self._Check(make_const, expected) @jtu.sample_product( [dict(shape=shape, dimension=dimension) for shape in [(), (3,), (2, 3), (2, 3, 4)] # TODO(mattjj): re-enable (1001, 1001), (101, 101, 101), for dimension in range(len(shape))], dtype=lax_test_util.default_dtypes, ) def testIotaConstant(self, dtype, shape, dimension): make_const = lambda: lax.broadcasted_iota(dtype, shape, dimension) arr = np.arange(shape[dimension], dtype=dtypes.canonicalize_dtype(dtype)) singleton_shape = [1] * len(shape) singleton_shape[dimension] = shape[dimension] expected = np.broadcast_to(arr.reshape(singleton_shape), shape) self._Check(make_const, expected) @jtu.sample_product( [dict(shape=shape, axes=axes) for shape, axes in [ [(2, 3), (0, 1)], [(2, 3, 4), (0, 1)], [(2, 3, 4), (0, 2)], [(2, 3, 4), (1, 2)], [(2, 3, 4), (0, 1, 2)], [(2, 3, 4, 2), (0, 1, 2)], [(2, 3, 4, 2), (0, 2, 3)], [(1001, 1001), (0, 1)], ]], dtype=lax_test_util.default_dtypes, ) def testDeltaConstant(self, dtype, shape, axes): make_const = lambda: lax_internal._delta(dtype, shape, axes) # don't check the asarray case, just assume it's right expected = np.asarray(make_const()) self._Check(make_const, expected) def testBroadcastInDim(self): arr = lax.full((2, 1), 1.) + 1. arr_np = np.full((2, 1), 1.) + 1. expected = lax_reference.broadcast_in_dim(arr_np, (2, 1, 3), (0, 2)) make_const = lambda: lax.broadcast_in_dim(arr, (2, 1, 3), (0, 2)) self._Check(make_const, expected) @jtu.sample_product( input_type=[int, float, np.int32, np.float32, np.array], dtype=[np.int32, np.float32], jit=[True, False], value=[0, 1], ) def testConvertElementReturnType(self, input_type, dtype, value, jit): op = lambda x: lax.convert_element_type(x, dtype) if jit: op = jax.jit(op) result = op(input_type(value)) assert isinstance(result, jax.Array) @jtu.sample_product( dtype_in=lax_test_util.all_dtypes, dtype_out=lax_test_util.all_dtypes) @jtu.ignore_warning(category=np.exceptions.ComplexWarning) def testConvertElementTypeAvoidsCopies(self, dtype_in, dtype_out): x = jax.device_put(np.zeros(5, dtype_in)) self.assertEqual(x.dtype, dtype_in) y = lax.convert_element_type(x, dtype_out) self.assertEqual(y.dtype, dtype_out) x_buf = x y_buf = y if np.dtype(dtype_in) == np.dtype(dtype_out): self.assertEqual(x_buf.unsafe_buffer_pointer(), y_buf.unsafe_buffer_pointer()) else: self.assertNotEqual(x_buf.unsafe_buffer_pointer(), y_buf.unsafe_buffer_pointer()) @jtu.sample_product( index_dtype=jtu.dtypes.all_inexact + jtu.dtypes.boolean, jax_fn=[lax.argmin, lax.argmax], ) def testArgMinMaxIndexDtypeError(self, jax_fn, index_dtype): with self.assertRaisesRegex(TypeError, "index_dtype must be an integer type"): jax_fn(np.ones((2, 2)), axis=0, index_dtype=index_dtype) @parameterized.parameters([lax.argmin, lax.argmax]) def testArgMinMaxEmptyError(self, jax_fn): with self.assertRaisesRegex(ValueError, "require non-empty reduced dimension"): jax_fn(np.ones((0, 2)), axis=0, index_dtype=np.int32) @parameterized.parameters([lax.argmin, lax.argmax]) def testArgMinMaxInvalidAxisError(self, jax_fn): with self.assertRaisesRegex(ValueError, "Invalid axis -1 for operand"): jax_fn(np.ones((2, 3)), axis=-1, index_dtype=np.int32) @jtu.sample_product( jax_fn=[lax.argmin, lax.argmax], weak_type=[False, True], ) def testArgMinMaxWeakType(self, jax_fn, weak_type): op = lambda x: jax_fn(x, axis=0, index_dtype=np.int32) x_in = lax_internal._convert_element_type(np.ones((2, 2)), weak_type=weak_type) self.assertEqual(dtypes.is_weakly_typed(x_in), weak_type) x_out = op(x_in) self.assertEqual(dtypes.is_weakly_typed(x_out), False) x_out_jit = jax.jit(op)(x_in) self.assertEqual(dtypes.is_weakly_typed(x_out_jit), False) def testArgMaxOfNanChoosesNaN(self): self.assertEqual(lax.argmax(np.array([0., np.nan]), axis=0, index_dtype=np.int32), 1) unary_op_types = {} for r in lax_test_util.lax_ops(): if r.nargs == 1: unary_op_types[r.op] = (unary_op_types.get(r.op, set()) | {np.dtype(t) for t in r.dtypes}) @parameterized.named_parameters( {"testcase_name": f"_{op}", "op_name": op, "rec_dtypes": dtypes} for op, dtypes in unary_op_types.items()) def testUnaryWeakTypes(self, op_name, rec_dtypes): """Test that all lax unary ops propagate weak_type information appropriately.""" if op_name == "bitwise_not": raise unittest.SkipTest("https://github.com/jax-ml/jax/issues/12066") # Find a valid dtype for the function. for dtype in [float, int, complex, bool]: dtype = dtypes.canonicalize_dtype(dtype) if dtype in rec_dtypes: py_val = dtype.type(1).item() lax_val = lax.full((), py_val, dtype) break else: raise ValueError(f"no available dtypes in {rec_dtypes}") op = getattr(lax, op_name) py_op = op(py_val) lax_op = op(lax_val) self.assertAllClose(py_op, lax_op, check_dtypes=True) self.assertFalse(lax_op.aval.weak_type) if type(py_val) == bool: # Booleans should have weak types stripped. self.assertFalse(py_op.aval.weak_type) else: self.assertTrue(py_op.aval.weak_type) def testCumsumLengthOne(self): # regression test for issue 4672 x = lax.full((1,), 1) out = lax.cumsum(x) self.assertArraysEqual(out, x) def testLog1pNearOne(self): expected = np.log1p(np.float32(1e-5)) np.testing.assert_array_almost_equal_nulp( expected.astype(np.float32), lax.log1p(np.float32(1e-5))) np.testing.assert_array_almost_equal_nulp( expected.astype(np.complex64), lax.log1p(np.complex64(1e-5)))
LazyConstantTest
python
kamyu104__LeetCode-Solutions
Python/find-unique-binary-string.py
{ "start": 361, "end": 865 }
class ____(object): def findDifferentBinaryString(self, nums): """ :type nums: List[str] :rtype: str """ lookup = set(map(lambda x: int(x, 2), nums)) # Time: O(k * n) = O(n^2) return next(bin(i)[2:].zfill(len(nums[0])) for i in xrange(2**len(nums[0])) if i not in lookup) # Time: O(k + n) = O(n) # Time: O(k * n + n * 2^n) = O(n * 2^n), k is len(nums) # , n is len(nums[0]) # Space: O(k) = O(1) ~ O(2^n)
Solution2
python
scrapy__scrapy
tests/test_settings/__init__.py
{ "start": 21777, "end": 21837 }
class ____: pass Component1Alias = Component1
Component1
python
doocs__leetcode
solution/0000-0099/0026.Remove Duplicates from Sorted Array/Solution.py
{ "start": 0, "end": 220 }
class ____: def removeDuplicates(self, nums: List[int]) -> int: k = 0 for x in nums: if k == 0 or x != nums[k - 1]: nums[k] = x k += 1 return k
Solution
python
coleifer__peewee
tests/sqliteq.py
{ "start": 698, "end": 5858 }
class ____(object): database_config = {} n_rows = 20 n_threads = 20 def setUp(self): super(BaseTestQueueDatabase, self).setUp() User._meta.database = db with db: db.create_tables([User], safe=True) User._meta.database = \ self.database = get_db(**self.database_config) # Sanity check at startup. self.assertEqual(self.database.queue_size(), 0) def tearDown(self): super(BaseTestQueueDatabase, self).tearDown() User._meta.database = db with db: User.drop_table() if not self.database.is_closed(): self.database.close() if not db.is_closed(): db.close() filename = db.database if os.path.exists(filename): os.unlink(filename) def test_query_error(self): self.database.start() curs = self.database.execute_sql('foo bar baz') self.assertRaises(OperationalError, curs.fetchone) self.database.stop() def test_integrity_error(self): self.database.start() u = User.create(name='u') self.assertRaises(IntegrityError, User.create, name='u') def test_query_execution(self): qr = User.select().execute() self.assertEqual(self.database.queue_size(), 0) self.database.start() try: users = list(qr) huey = User.create(name='huey') mickey = User.create(name='mickey') self.assertTrue(huey.id is not None) self.assertTrue(mickey.id is not None) self.assertEqual(self.database.queue_size(), 0) finally: self.database.stop() def create_thread(self, fn, *args): raise NotImplementedError def create_event(self): raise NotImplementedError def test_multiple_threads(self): def create_rows(idx, nrows): for i in range(idx, idx + nrows): User.create(name='u-%s' % i) total = self.n_threads * self.n_rows self.database.start() threads = [self.create_thread(create_rows, i, self.n_rows) for i in range(0, total, self.n_rows)] [t.start() for t in threads] [t.join() for t in threads] self.assertEqual(User.select().count(), total) self.database.stop() def test_pause(self): event_a = self.create_event() event_b = self.create_event() def create_user(name, event, expect_paused): event.wait() if expect_paused: self.assertRaises(WriterPaused, lambda: User.create(name=name)) else: User.create(name=name) self.database.start() t_a = self.create_thread(create_user, 'a', event_a, True) t_a.start() t_b = self.create_thread(create_user, 'b', event_b, False) t_b.start() User.create(name='c') self.assertEqual(User.select().count(), 1) # Pause operations but preserve the writer thread/connection. self.database.pause() event_a.set() self.assertEqual(User.select().count(), 1) t_a.join() self.database.unpause() self.assertEqual(User.select().count(), 1) event_b.set() t_b.join() self.assertEqual(User.select().count(), 2) self.database.stop() def test_restart(self): self.database.start() User.create(name='a') self.database.stop() self.database._results_timeout = 0.0001 self.assertRaises(ResultTimeout, User.create, name='b') self.assertEqual(User.select().count(), 1) self.database.start() # Will execute the pending "b" INSERT. self.database._results_timeout = None User.create(name='c') self.assertEqual(User.select().count(), 3) self.assertEqual(sorted(u.name for u in User.select()), ['a', 'b', 'c']) def test_waiting(self): D = {} def create_user(name): D[name] = User.create(name=name).id threads = [self.create_thread(create_user, name) for name in ('huey', 'charlie', 'zaizee')] [t.start() for t in threads] def get_users(): D['users'] = [(user.id, user.name) for user in User.select()] tg = self.create_thread(get_users) tg.start() threads.append(tg) self.database.start() [t.join() for t in threads] self.database.stop() self.assertEqual(sorted(D), ['charlie', 'huey', 'users', 'zaizee']) def test_next_method(self): self.database.start() User.create(name='mickey') User.create(name='huey') query = iter(User.select().order_by(User.name)) self.assertEqual(next(query).name, 'huey') self.assertEqual(next(query).name, 'mickey') self.assertRaises(StopIteration, lambda: next(query)) self.assertEqual( next(self.database.execute_sql('PRAGMA journal_mode'))[0], 'wal') self.database.stop()
BaseTestQueueDatabase
python
dask__distributed
distributed/http/scheduler/json.py
{ "start": 1646, "end": 1746 }
class ____(RequestHandler): def get(self): self.write(self.server.identity())
IdentityJSON
python
django__django
django/db/migrations/operations/models.py
{ "start": 14069, "end": 15306 }
class ____(ModelOperation): """Drop a model's table.""" category = OperationCategory.REMOVAL def deconstruct(self): kwargs = { "name": self.name, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.remove_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def references_model(self, name, app_label): # The deleted model could be referencing the specified model through # related fields. return True def describe(self): return "Delete model %s" % self.name @property def migration_name_fragment(self): return "delete_%s" % self.name_lower
DeleteModel
python
hyperopt__hyperopt
hyperopt/tests/unit/test_tpe.py
{ "start": 1315, "end": 5595 }
class ____(unittest.TestCase): def setUp(self): self.rng = np.random.default_rng(234) def test_mu_is_used_correctly(self): assert np.allclose(10, GMM1([1], [10.0], [0.0000001], rng=self.rng)) def test_sigma_is_used_correctly(self): samples = GMM1([1], [0.0], [10.0], size=[1000], rng=self.rng) assert 9 < np.std(samples) < 11 def test_mus_make_variance(self): samples = GMM1( [0.5, 0.5], [0.0, 1.0], [0.000001, 0.000001], rng=self.rng, size=[1000] ) print(samples.shape) # import matplotlib.pyplot as plt # plt.hist(samples) # plt.show() assert 0.45 < np.mean(samples) < 0.55, np.mean(samples) assert 0.2 < np.var(samples) < 0.3, np.var(samples) def test_weights(self): samples = GMM1( [0.9999, 0.0001], [0.0, 1.0], [0.000001, 0.000001], rng=self.rng, size=[1000], ) assert samples.shape == (1000,) # import matplotlib.pyplot as plt # plt.hist(samples) # plt.show() assert -0.001 < np.mean(samples) < 0.001, np.mean(samples) assert np.var(samples) < 0.0001, np.var(samples) def test_mat_output(self): samples = GMM1( [0.9999, 0.0001], [0.0, 1.0], [0.000001, 0.000001], rng=self.rng, size=[40, 20], ) assert samples.shape == (40, 20) assert -0.001 < np.mean(samples) < 0.001, np.mean(samples) assert np.var(samples) < 0.0001, np.var(samples) def test_lpdf_scalar_one_component(self): # x # weights # mu # sigma llval = GMM1_lpdf(1.0, [1.0], [1.0], [2.0]) assert llval.shape == () assert np.allclose(llval, np.log(1 / np.sqrt(2 * np.pi * 2.0**2))) def test_lpdf_scalar_N_components(self): llval = GMM1_lpdf( 1.0, # x [0.25, 0.25, 0.5], # weights [0.0, 1.0, 2.0], # mu [1.0, 2.0, 5.0], # sigma ) print(llval) a = 0.25 / np.sqrt(2 * np.pi * 1.0**2) * np.exp(-0.5 * (1.0) ** 2) a += 0.25 / np.sqrt(2 * np.pi * 2.0**2) a += 0.5 / np.sqrt(2 * np.pi * 5.0**2) * np.exp(-0.5 * (0.2) ** 2) def test_lpdf_vector_N_components(self): llval = GMM1_lpdf( [1.0, 0.0], # x [0.25, 0.25, 0.5], # weights [0.0, 1.0, 2.0], # mu [1.0, 2.0, 5.0], # sigma ) # case x = 1.0 a = 0.25 / np.sqrt(2 * np.pi * 1.0**2) * np.exp(-0.5 * (1.0) ** 2) a += 0.25 / np.sqrt(2 * np.pi * 2.0**2) a += 0.5 / np.sqrt(2 * np.pi * 5.0**2) * np.exp(-0.5 * (0.2) ** 2) assert llval.shape == (2,) assert np.allclose(llval[0], np.log(a)) # case x = 0.0 a = 0.25 / np.sqrt(2 * np.pi * 1.0**2) a += 0.25 / np.sqrt(2 * np.pi * 2.0**2) * np.exp(-0.5 * (0.5) ** 2) a += 0.5 / np.sqrt(2 * np.pi * 5.0**2) * np.exp(-0.5 * (0.4) ** 2) assert np.allclose(llval[1], np.log(a)) def test_lpdf_matrix_N_components(self): llval = GMM1_lpdf( [[1.0, 0.0, 0.0], [0, 0, 1], [0, 0, 1000]], [0.25, 0.25, 0.5], # weights [0.0, 1.0, 2.0], # mu [1.0, 2.0, 5.0], # sigma ) print(llval) assert llval.shape == (3, 3) a = 0.25 / np.sqrt(2 * np.pi * 1.0**2) * np.exp(-0.5 * (1.0) ** 2) a += 0.25 / np.sqrt(2 * np.pi * 2.0**2) a += 0.5 / np.sqrt(2 * np.pi * 5.0**2) * np.exp(-0.5 * (0.2) ** 2) assert np.allclose(llval[0, 0], np.log(a)) assert np.allclose(llval[1, 2], np.log(a)) # case x = 0.0 a = 0.25 / np.sqrt(2 * np.pi * 1.0**2) a += 0.25 / np.sqrt(2 * np.pi * 2.0**2) * np.exp(-0.5 * (0.5) ** 2) a += 0.5 / np.sqrt(2 * np.pi * 5.0**2) * np.exp(-0.5 * (0.4) ** 2) assert np.allclose(llval[0, 1], np.log(a)) assert np.allclose(llval[0, 2], np.log(a)) assert np.allclose(llval[1, 0], np.log(a)) assert np.allclose(llval[1, 1], np.log(a)) assert np.allclose(llval[2, 0], np.log(a)) assert np.allclose(llval[2, 1], np.log(a)) assert np.isfinite(llval[2, 2])
TestGMM1
python
pytorch__pytorch
test/distributed/checkpoint/test_pg_transport.py
{ "start": 12707, "end": 14859 }
class ____(TestCase): def test_prepare_state_dict_basic(self): """Test basic state dict preparation.""" state_dict = {"weight": torch.randn(3, 4), "bias": torch.randn(4)} device = torch.device("cpu") meta, tensors = _prepare_state_dict(state_dict, device) # Check metadata self.assertEqual(len(meta.paths), 2) self.assertEqual(len(meta.non_tensor_leaves), 2) self.assertEqual(len(tensors), 2) # Check that all non_tensor_leaves are _TensorMeta instances for leaf in meta.non_tensor_leaves: self.assertIsInstance(leaf, _TensorMeta) def test_prepare_state_dict_nested(self): """Test preparing nested state dict.""" state_dict = { "layer1": {"weight": torch.randn(3, 4), "bias": torch.randn(4)}, "layer2": {"weight": torch.randn(4, 5), "bias": torch.randn(5)}, } device = torch.device("cpu") meta, tensors = _prepare_state_dict(state_dict, device) # Check metadata self.assertEqual(len(meta.paths), 4) self.assertEqual(len(meta.non_tensor_leaves), 4) self.assertEqual(len(tensors), 4) def test_prepare_state_dict_with_non_tensor_values(self): """Test preparing state dict with non-tensor values.""" state_dict = { "weight": torch.randn(3, 4), "bias": torch.randn(4), "config": {"lr": 0.01, "momentum": 0.9}, "step": 42, } device = torch.device("cpu") meta, tensors = _prepare_state_dict(state_dict, device) # Check metadata - the actual number of paths depends on how the pytree flattens the dict # The nested config dict might be flattened differently self.assertEqual(len(meta.non_tensor_leaves), len(meta.paths)) self.assertEqual(len(tensors), 2) # Check that non-tensor values are preserved non_tensor_values = [ leaf for leaf in meta.non_tensor_leaves if not isinstance(leaf, _TensorMeta) ] self.assertEqual(len(non_tensor_values), 3) # config (2) and step
TestPrepareStateDict
python
Textualize__textual
src/textual/widgets/_selection_list.py
{ "start": 1108, "end": 1209 }
class ____(TypeError): """Type of an error raised if a selection is badly-formed."""
SelectionError
python
spyder-ide__spyder
spyder/plugins/ipythonconsole/utils/websocket_client.py
{ "start": 12852, "end": 13312 }
class ____: def __init__(self): AsyncDispatcher(loop="ipythonconsole", early_return=False)( self._create_queue )() async def _create_queue(self): self.shell = asyncio.Queue() self.iopub = asyncio.Queue() self.stdin = asyncio.Queue() self.control = asyncio.Queue() def __getitem__(self, channel: str) -> asyncio.Queue[dict[str, t.Any]]: return getattr(self, channel)
_ChannelQueues
python
getsentry__sentry
tests/sentry/issues/auto_source_code_config/test_process_event.py
{ "start": 17899, "end": 19695 }
class ____(LanguageSpecificDeriveCodeMappings): platform = "python" def test_backslash_filename_simple(self) -> None: # The lack of a \ after the drive letter in the third frame signals that # this is a relative path. This may be unlikely to occur in practice, # but worth testing nonetheless. self._process_and_assert_configuration_changes( repo_trees={REPO1: ["sentry/mouse.py"]}, frames=[self.frame("\\sentry\\mouse.py", True)], platform=self.platform, expected_new_code_mappings=[self.code_mapping("\\", "")], ) def test_backslash_drive_letter_filename_simple(self) -> None: self._process_and_assert_configuration_changes( repo_trees={REPO1: ["sentry/tasks.py"]}, frames=[self.frame("C:sentry\\tasks.py", True)], platform=self.platform, expected_new_code_mappings=[self.code_mapping("C:sentry\\", "sentry/")], ) def test_backslash_drive_letter_filename_monoRepoAndBranch(self) -> None: self._process_and_assert_configuration_changes( repo_trees={REPO1: ["sentry/tasks.py"]}, frames=[self.frame("C:sentry\\tasks.py", True)], platform=self.platform, expected_new_code_mappings=[self.code_mapping("C:sentry\\", "sentry/")], ) def test_backslash_drive_letter_filename_abs_path(self) -> None: self._process_and_assert_configuration_changes( repo_trees={REPO1: ["sentry/models/release.py"]}, frames=[self.frame("D:\\Users\\code\\sentry\\models\\release.py", True)], platform=self.platform, expected_new_code_mappings=[self.code_mapping("D:\\Users\\code\\", "")], )
TestBackSlashDeriveCodeMappings
python
wandb__wandb
wandb/vendor/pygments/lexers/perl.py
{ "start": 10459, "end": 32006 }
class ____(ExtendedRegexLexer): """ For `Perl 6 <http://www.perl6.org>`_ source code. .. versionadded:: 2.0 """ name = 'Perl6' aliases = ['perl6', 'pl6'] filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'] mimetypes = ['text/x-perl6', 'application/x-perl6'] flags = re.MULTILINE | re.DOTALL | re.UNICODE PERL6_IDENTIFIER_RANGE = "['\w:-]" PERL6_KEYWORDS = ( 'BEGIN', 'CATCH', 'CHECK', 'CONTROL', 'END', 'ENTER', 'FIRST', 'INIT', 'KEEP', 'LAST', 'LEAVE', 'NEXT', 'POST', 'PRE', 'START', 'TEMP', 'UNDO', 'as', 'assoc', 'async', 'augment', 'binary', 'break', 'but', 'cached', 'category', 'class', 'constant', 'contend', 'continue', 'copy', 'deep', 'default', 'defequiv', 'defer', 'die', 'do', 'else', 'elsif', 'enum', 'equiv', 'exit', 'export', 'fail', 'fatal', 'for', 'gather', 'given', 'goto', 'grammar', 'handles', 'has', 'if', 'inline', 'irs', 'is', 'last', 'leave', 'let', 'lift', 'loop', 'looser', 'macro', 'make', 'maybe', 'method', 'module', 'multi', 'my', 'next', 'of', 'ofs', 'only', 'oo', 'ors', 'our', 'package', 'parsed', 'prec', 'proto', 'readonly', 'redo', 'ref', 'regex', 'reparsed', 'repeat', 'require', 'required', 'return', 'returns', 'role', 'rule', 'rw', 'self', 'slang', 'state', 'sub', 'submethod', 'subset', 'supersede', 'take', 'temp', 'tighter', 'token', 'trusts', 'try', 'unary', 'unless', 'until', 'use', 'warn', 'when', 'where', 'while', 'will', ) PERL6_BUILTINS = ( 'ACCEPTS', 'HOW', 'REJECTS', 'VAR', 'WHAT', 'WHENCE', 'WHERE', 'WHICH', 'WHO', 'abs', 'acos', 'acosec', 'acosech', 'acosh', 'acotan', 'acotanh', 'all', 'any', 'approx', 'arity', 'asec', 'asech', 'asin', 'asinh', 'assuming', 'atan', 'atan2', 'atanh', 'attr', 'bless', 'body', 'by', 'bytes', 'caller', 'callsame', 'callwith', 'can', 'capitalize', 'cat', 'ceiling', 'chars', 'chmod', 'chomp', 'chop', 'chr', 'chroot', 'circumfix', 'cis', 'classify', 'clone', 'close', 'cmp_ok', 'codes', 'comb', 'connect', 'contains', 'context', 'cos', 'cosec', 'cosech', 'cosh', 'cotan', 'cotanh', 'count', 'defined', 'delete', 'diag', 'dies_ok', 'does', 'e', 'each', 'eager', 'elems', 'end', 'eof', 'eval', 'eval_dies_ok', 'eval_elsewhere', 'eval_lives_ok', 'evalfile', 'exists', 'exp', 'first', 'flip', 'floor', 'flunk', 'flush', 'fmt', 'force_todo', 'fork', 'from', 'getc', 'gethost', 'getlogin', 'getpeername', 'getpw', 'gmtime', 'graphs', 'grep', 'hints', 'hyper', 'im', 'index', 'infix', 'invert', 'is_approx', 'is_deeply', 'isa', 'isa_ok', 'isnt', 'iterator', 'join', 'key', 'keys', 'kill', 'kv', 'lastcall', 'lazy', 'lc', 'lcfirst', 'like', 'lines', 'link', 'lives_ok', 'localtime', 'log', 'log10', 'map', 'max', 'min', 'minmax', 'name', 'new', 'nextsame', 'nextwith', 'nfc', 'nfd', 'nfkc', 'nfkd', 'nok_error', 'nonce', 'none', 'normalize', 'not', 'nothing', 'ok', 'once', 'one', 'open', 'opendir', 'operator', 'ord', 'p5chomp', 'p5chop', 'pack', 'pair', 'pairs', 'pass', 'perl', 'pi', 'pick', 'plan', 'plan_ok', 'polar', 'pop', 'pos', 'postcircumfix', 'postfix', 'pred', 'prefix', 'print', 'printf', 'push', 'quasi', 'quotemeta', 'rand', 're', 'read', 'readdir', 'readline', 'reduce', 'reverse', 'rewind', 'rewinddir', 'rindex', 'roots', 'round', 'roundrobin', 'run', 'runinstead', 'sameaccent', 'samecase', 'say', 'sec', 'sech', 'sech', 'seek', 'shape', 'shift', 'sign', 'signature', 'sin', 'sinh', 'skip', 'skip_rest', 'sleep', 'slurp', 'sort', 'splice', 'split', 'sprintf', 'sqrt', 'srand', 'strand', 'subst', 'substr', 'succ', 'sum', 'symlink', 'tan', 'tanh', 'throws_ok', 'time', 'times', 'to', 'todo', 'trim', 'trim_end', 'trim_start', 'true', 'truncate', 'uc', 'ucfirst', 'undef', 'undefine', 'uniq', 'unlike', 'unlink', 'unpack', 'unpolar', 'unshift', 'unwrap', 'use_ok', 'value', 'values', 'vec', 'version_lt', 'void', 'wait', 'want', 'wrap', 'write', 'zip', ) PERL6_BUILTIN_CLASSES = ( 'Abstraction', 'Any', 'AnyChar', 'Array', 'Associative', 'Bag', 'Bit', 'Blob', 'Block', 'Bool', 'Buf', 'Byte', 'Callable', 'Capture', 'Char', 'Class', 'Code', 'Codepoint', 'Comparator', 'Complex', 'Decreasing', 'Exception', 'Failure', 'False', 'Grammar', 'Grapheme', 'Hash', 'IO', 'Increasing', 'Int', 'Junction', 'KeyBag', 'KeyExtractor', 'KeyHash', 'KeySet', 'KitchenSink', 'List', 'Macro', 'Mapping', 'Match', 'Matcher', 'Method', 'Module', 'Num', 'Object', 'Ordered', 'Ordering', 'OrderingPair', 'Package', 'Pair', 'Positional', 'Proxy', 'Range', 'Rat', 'Regex', 'Role', 'Routine', 'Scalar', 'Seq', 'Set', 'Signature', 'Str', 'StrLen', 'StrPos', 'Sub', 'Submethod', 'True', 'UInt', 'Undef', 'Version', 'Void', 'Whatever', 'bit', 'bool', 'buf', 'buf1', 'buf16', 'buf2', 'buf32', 'buf4', 'buf64', 'buf8', 'complex', 'int', 'int1', 'int16', 'int2', 'int32', 'int4', 'int64', 'int8', 'num', 'rat', 'rat1', 'rat16', 'rat2', 'rat32', 'rat4', 'rat64', 'rat8', 'uint', 'uint1', 'uint16', 'uint2', 'uint32', 'uint4', 'uint64', 'uint8', 'utf16', 'utf32', 'utf8', ) PERL6_OPERATORS = ( 'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div', 'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm', 'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx', '++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^', '^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&', 'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^', '~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^', '!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv', '&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so', 'not', '<==', '==>', '<<==', '==>>', ) # Perl 6 has a *lot* of possible bracketing characters # this list was lifted from STD.pm6 (https://github.com/perl6/std) PERL6_BRACKETS = { u'\u0028': u'\u0029', u'\u003c': u'\u003e', u'\u005b': u'\u005d', u'\u007b': u'\u007d', u'\u00ab': u'\u00bb', u'\u0f3a': u'\u0f3b', u'\u0f3c': u'\u0f3d', u'\u169b': u'\u169c', u'\u2018': u'\u2019', u'\u201a': u'\u2019', u'\u201b': u'\u2019', u'\u201c': u'\u201d', u'\u201e': u'\u201d', u'\u201f': u'\u201d', u'\u2039': u'\u203a', u'\u2045': u'\u2046', u'\u207d': u'\u207e', u'\u208d': u'\u208e', u'\u2208': u'\u220b', u'\u2209': u'\u220c', u'\u220a': u'\u220d', u'\u2215': u'\u29f5', u'\u223c': u'\u223d', u'\u2243': u'\u22cd', u'\u2252': u'\u2253', u'\u2254': u'\u2255', u'\u2264': u'\u2265', u'\u2266': u'\u2267', u'\u2268': u'\u2269', u'\u226a': u'\u226b', u'\u226e': u'\u226f', u'\u2270': u'\u2271', u'\u2272': u'\u2273', u'\u2274': u'\u2275', u'\u2276': u'\u2277', u'\u2278': u'\u2279', u'\u227a': u'\u227b', u'\u227c': u'\u227d', u'\u227e': u'\u227f', u'\u2280': u'\u2281', u'\u2282': u'\u2283', u'\u2284': u'\u2285', u'\u2286': u'\u2287', u'\u2288': u'\u2289', u'\u228a': u'\u228b', u'\u228f': u'\u2290', u'\u2291': u'\u2292', u'\u2298': u'\u29b8', u'\u22a2': u'\u22a3', u'\u22a6': u'\u2ade', u'\u22a8': u'\u2ae4', u'\u22a9': u'\u2ae3', u'\u22ab': u'\u2ae5', u'\u22b0': u'\u22b1', u'\u22b2': u'\u22b3', u'\u22b4': u'\u22b5', u'\u22b6': u'\u22b7', u'\u22c9': u'\u22ca', u'\u22cb': u'\u22cc', u'\u22d0': u'\u22d1', u'\u22d6': u'\u22d7', u'\u22d8': u'\u22d9', u'\u22da': u'\u22db', u'\u22dc': u'\u22dd', u'\u22de': u'\u22df', u'\u22e0': u'\u22e1', u'\u22e2': u'\u22e3', u'\u22e4': u'\u22e5', u'\u22e6': u'\u22e7', u'\u22e8': u'\u22e9', u'\u22ea': u'\u22eb', u'\u22ec': u'\u22ed', u'\u22f0': u'\u22f1', u'\u22f2': u'\u22fa', u'\u22f3': u'\u22fb', u'\u22f4': u'\u22fc', u'\u22f6': u'\u22fd', u'\u22f7': u'\u22fe', u'\u2308': u'\u2309', u'\u230a': u'\u230b', u'\u2329': u'\u232a', u'\u23b4': u'\u23b5', u'\u2768': u'\u2769', u'\u276a': u'\u276b', u'\u276c': u'\u276d', u'\u276e': u'\u276f', u'\u2770': u'\u2771', u'\u2772': u'\u2773', u'\u2774': u'\u2775', u'\u27c3': u'\u27c4', u'\u27c5': u'\u27c6', u'\u27d5': u'\u27d6', u'\u27dd': u'\u27de', u'\u27e2': u'\u27e3', u'\u27e4': u'\u27e5', u'\u27e6': u'\u27e7', u'\u27e8': u'\u27e9', u'\u27ea': u'\u27eb', u'\u2983': u'\u2984', u'\u2985': u'\u2986', u'\u2987': u'\u2988', u'\u2989': u'\u298a', u'\u298b': u'\u298c', u'\u298d': u'\u298e', u'\u298f': u'\u2990', u'\u2991': u'\u2992', u'\u2993': u'\u2994', u'\u2995': u'\u2996', u'\u2997': u'\u2998', u'\u29c0': u'\u29c1', u'\u29c4': u'\u29c5', u'\u29cf': u'\u29d0', u'\u29d1': u'\u29d2', u'\u29d4': u'\u29d5', u'\u29d8': u'\u29d9', u'\u29da': u'\u29db', u'\u29f8': u'\u29f9', u'\u29fc': u'\u29fd', u'\u2a2b': u'\u2a2c', u'\u2a2d': u'\u2a2e', u'\u2a34': u'\u2a35', u'\u2a3c': u'\u2a3d', u'\u2a64': u'\u2a65', u'\u2a79': u'\u2a7a', u'\u2a7d': u'\u2a7e', u'\u2a7f': u'\u2a80', u'\u2a81': u'\u2a82', u'\u2a83': u'\u2a84', u'\u2a8b': u'\u2a8c', u'\u2a91': u'\u2a92', u'\u2a93': u'\u2a94', u'\u2a95': u'\u2a96', u'\u2a97': u'\u2a98', u'\u2a99': u'\u2a9a', u'\u2a9b': u'\u2a9c', u'\u2aa1': u'\u2aa2', u'\u2aa6': u'\u2aa7', u'\u2aa8': u'\u2aa9', u'\u2aaa': u'\u2aab', u'\u2aac': u'\u2aad', u'\u2aaf': u'\u2ab0', u'\u2ab3': u'\u2ab4', u'\u2abb': u'\u2abc', u'\u2abd': u'\u2abe', u'\u2abf': u'\u2ac0', u'\u2ac1': u'\u2ac2', u'\u2ac3': u'\u2ac4', u'\u2ac5': u'\u2ac6', u'\u2acd': u'\u2ace', u'\u2acf': u'\u2ad0', u'\u2ad1': u'\u2ad2', u'\u2ad3': u'\u2ad4', u'\u2ad5': u'\u2ad6', u'\u2aec': u'\u2aed', u'\u2af7': u'\u2af8', u'\u2af9': u'\u2afa', u'\u2e02': u'\u2e03', u'\u2e04': u'\u2e05', u'\u2e09': u'\u2e0a', u'\u2e0c': u'\u2e0d', u'\u2e1c': u'\u2e1d', u'\u2e20': u'\u2e21', u'\u3008': u'\u3009', u'\u300a': u'\u300b', u'\u300c': u'\u300d', u'\u300e': u'\u300f', u'\u3010': u'\u3011', u'\u3014': u'\u3015', u'\u3016': u'\u3017', u'\u3018': u'\u3019', u'\u301a': u'\u301b', u'\u301d': u'\u301e', u'\ufd3e': u'\ufd3f', u'\ufe17': u'\ufe18', u'\ufe35': u'\ufe36', u'\ufe37': u'\ufe38', u'\ufe39': u'\ufe3a', u'\ufe3b': u'\ufe3c', u'\ufe3d': u'\ufe3e', u'\ufe3f': u'\ufe40', u'\ufe41': u'\ufe42', u'\ufe43': u'\ufe44', u'\ufe47': u'\ufe48', u'\ufe59': u'\ufe5a', u'\ufe5b': u'\ufe5c', u'\ufe5d': u'\ufe5e', u'\uff08': u'\uff09', u'\uff1c': u'\uff1e', u'\uff3b': u'\uff3d', u'\uff5b': u'\uff5d', u'\uff5f': u'\uff60', u'\uff62': u'\uff63', } def _build_word_match(words, boundary_regex_fragment=None, prefix='', suffix=''): if boundary_regex_fragment is None: return r'\b(' + prefix + r'|'.join(re.escape(x) for x in words) + \ suffix + r')\b' else: return r'(?<!' + boundary_regex_fragment + r')' + prefix + r'(' + \ r'|'.join(re.escape(x) for x in words) + r')' + suffix + r'(?!' + \ boundary_regex_fragment + r')' def brackets_callback(token_class): def callback(lexer, match, context): groups = match.groupdict() opening_chars = groups['delimiter'] n_chars = len(opening_chars) adverbs = groups.get('adverbs') closer = Perl6Lexer.PERL6_BRACKETS.get(opening_chars[0]) text = context.text if closer is None: # it's not a mirrored character, which means we # just need to look for the next occurrence end_pos = text.find(opening_chars, match.start('delimiter') + n_chars) else: # we need to look for the corresponding closing character, # keep nesting in mind closing_chars = closer * n_chars nesting_level = 1 search_pos = match.start('delimiter') while nesting_level > 0: next_open_pos = text.find(opening_chars, search_pos + n_chars) next_close_pos = text.find(closing_chars, search_pos + n_chars) if next_close_pos == -1: next_close_pos = len(text) nesting_level = 0 elif next_open_pos != -1 and next_open_pos < next_close_pos: nesting_level += 1 search_pos = next_open_pos else: # next_close_pos < next_open_pos nesting_level -= 1 search_pos = next_close_pos end_pos = next_close_pos if end_pos < 0: # if we didn't find a closer, just highlight the # rest of the text in this class end_pos = len(text) if adverbs is not None and re.search(r':to\b', adverbs): heredoc_terminator = text[match.start('delimiter') + n_chars:end_pos] end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) + r'\s*$', text[end_pos:], re.MULTILINE) if end_heredoc: end_pos += end_heredoc.end() else: end_pos = len(text) yield match.start(), token_class, text[match.start():end_pos + n_chars] context.pos = end_pos + n_chars return callback def opening_brace_callback(lexer, match, context): stack = context.stack yield match.start(), Text, context.text[match.start():match.end()] context.pos = match.end() # if we encounter an opening brace and we're one level # below a token state, it means we need to increment # the nesting level for braces so we know later when # we should return to the token rules. if len(stack) > 2 and stack[-2] == 'token': context.perl6_token_nesting_level += 1 def closing_brace_callback(lexer, match, context): stack = context.stack yield match.start(), Text, context.text[match.start():match.end()] context.pos = match.end() # if we encounter a free closing brace and we're one level # below a token state, it means we need to check the nesting # level to see if we need to return to the token state. if len(stack) > 2 and stack[-2] == 'token': context.perl6_token_nesting_level -= 1 if context.perl6_token_nesting_level == 0: stack.pop() def embedded_perl6_callback(lexer, match, context): context.perl6_token_nesting_level = 1 yield match.start(), Text, context.text[match.start():match.end()] context.pos = match.end() context.stack.append('root') # If you're modifying these rules, be careful if you need to process '{' or '}' # characters. We have special logic for processing these characters (due to the fact # that you can nest Perl 6 code in regex blocks), so if you need to process one of # them, make sure you also process the corresponding one! tokens = { 'common': [ (r'#[`|=](?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)', brackets_callback(Comment.Multiline)), (r'#[^\n]*$', Comment.Singleline), (r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline), (r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline), (r'^=.*?\n\s*?\n', Comment.Multiline), (r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)', bygroups(Keyword, Name), 'token-sym-brackets'), (r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + ')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?', bygroups(Keyword, Name), 'pre-token'), # deal with a special case in the Perl 6 grammar (role q { ... }) (r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Text, Name, Text)), (_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword), (_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix='(?::[UD])?'), Name.Builtin), (_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin), # copied from PerlLexer (r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable), (r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global), (r'::\?\w+', Name.Variable.Global), (r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global), (r'\$(?:<.*?>)+', Name.Variable), (r'(?:q|qq|Q)[a-zA-Z]?\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^0-9a-zA-Z:\s])' r'(?P=first_char)*)', brackets_callback(String)), # copied from PerlLexer (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), (r'0b[01]+(_[01]+)*', Number.Bin), (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', Number.Float), (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), (r'\d+(_\d+)*', Number.Integer), (r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex), (r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex), (r'm\w+(?=\()', Name), (r'(?:m|ms|rx)\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^\w:\s])' r'(?P=first_char)*)', brackets_callback(String.Regex)), (r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/', String.Regex), (r'<[^\s=].*?\S>', String), (_build_word_match(PERL6_OPERATORS), Operator), (r'\w' + PERL6_IDENTIFIER_RANGE + '*', Name), (r"'(\\\\|\\[^\\]|[^'\\])*'", String), (r'"(\\\\|\\[^\\]|[^"\\])*"', String), ], 'root': [ include('common'), (r'\{', opening_brace_callback), (r'\}', closing_brace_callback), (r'.+?', Text), ], 'pre-token': [ include('common'), (r'\{', Text, ('#pop', 'token')), (r'.+?', Text), ], 'token-sym-brackets': [ (r'(?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)', brackets_callback(Name), ('#pop', 'pre-token')), default(('#pop', 'pre-token')), ], 'token': [ (r'\}', Text, '#pop'), (r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)), # make sure that quotes in character classes aren't treated as strings (r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex), # make sure that '#' characters in quotes aren't treated as comments (r"(?<!\\)'(\\\\|\\[^\\]|[^'\\])*'", String.Regex), (r'(?<!\\)"(\\\\|\\[^\\]|[^"\\])*"', String.Regex), (r'#.*?$', Comment.Singleline), (r'\{', embedded_perl6_callback), ('.+?', String.Regex), ], } def analyse_text(text): def strip_pod(lines): in_pod = False stripped_lines = [] for line in lines: if re.match(r'^=(?:end|cut)', line): in_pod = False elif re.match(r'^=\w+', line): in_pod = True elif not in_pod: stripped_lines.append(line) return stripped_lines # XXX handle block comments lines = text.splitlines() lines = strip_pod(lines) text = '\n'.join(lines) if shebang_matches(text, r'perl6|rakudo|niecza|pugs'): return True saw_perl_decl = False rating = False # check for my/our/has declarations if re.search("(?:my|our|has)\s+(?:" + Perl6Lexer.PERL6_IDENTIFIER_RANGE + "+\s+)?[$@%&(]", text): rating = 0.8 saw_perl_decl = True for line in lines: line = re.sub('#.*', '', line) if re.match('^\s*$', line): continue # match v6; use v6; use v6.0; use v6.0.0; if re.match('^\s*(?:use\s+)?v6(?:\.\d(?:\.\d)?)?;', line): return True # match class, module, role, enum, grammar declarations class_decl = re.match('^\s*(?:(?P<scope>my|our)\s+)?(?:module|class|role|enum|grammar)', line) if class_decl: if saw_perl_decl or class_decl.group('scope') is not None: return True rating = 0.05 continue break return rating def __init__(self, **options): super(Perl6Lexer, self).__init__(**options) self.encoding = options.get('encoding', 'utf-8')
Perl6Lexer
python
getsentry__sentry
src/sentry/utils/email/message_builder.py
{ "start": 3393, "end": 10264 }
class ____: def __init__( self, subject: str, context: Mapping[str, Any] | None = None, template: str | None = None, html_template: str | None = None, body: str = "", html_body: str | None = None, headers: Mapping[str, str] | None = None, reference: Model | None = None, from_email: str | None = None, type: str | None = None, ) -> None: assert not (body and template) assert not (html_body and html_template) assert context or not (template or html_template) self.subject = subject self.context = context or {} self.template = template self.html_template = html_template self._txt_body = body self._html_body = html_body self.headers: MutableMapping[str, Any] = {**(headers or {})} self.reference = reference # The object that generated this message self.from_email = from_email or options.get("mail.from") self._send_to: set[str] = set() self.type = type if type else "generic" if reference is not None and "List-Id" not in self.headers: try: self.headers["List-Id"] = make_listid_from_instance(reference) except ListResolver.UnregisteredTypeError as error: logger.debug(str(error)) except AssertionError as error: logger.warning(str(error)) # If a "type" is specified, add it to the headers to categorize the emails if not already set if type is not None and "X-SMTPAPI" not in self.headers: self.headers = { "X-SMTPAPI": json.dumps({"category": type}), **(self.headers), } def __render_html_body(self) -> str | None: if self.html_template: html_body: str | None = render_to_string(self.html_template, self.context) else: html_body = self._html_body if html_body is None: return None return inline_css(html_body) def __render_text_body(self) -> str: if self.template: body: str = render_to_string(self.template, self.context) return body return self._txt_body def add_users(self, user_ids: Iterable[int], project: Project | None = None) -> None: self._send_to.update(list(get_email_addresses(user_ids, project).values())) def build( self, to: str, reply_to: Iterable[str] | None = None, cc: Sequence[str] | None = None, bcc: Sequence[str] | None = None, ) -> EmailMultiAlternatives: headers = {**self.headers} if options.get("mail.enable-replies") and "X-Sentry-Reply-To" in headers: reply_to = headers["X-Sentry-Reply-To"] else: reply_to = set(reply_to or ()) reply_to = ", ".join(reply_to) if reply_to: headers.setdefault("Reply-To", reply_to) # Every message sent needs a unique message id message_id = make_msgid(get_from_email_domain()) headers.setdefault("Message-Id", message_id) subject = force_str(self.subject) reference = self.reference if isinstance(reference, Activity): reference = reference.group subject = f"Re: {subject}" if isinstance(reference, Group): thread, created = GroupEmailThread.objects.get_or_create( email=to, group=reference, defaults={"project": reference.project, "msgid": message_id}, ) if not created: headers.setdefault("In-Reply-To", thread.msgid) headers.setdefault("References", thread.msgid) msg = EmailMultiAlternatives( subject=subject.splitlines()[0], body=self.__render_text_body(), from_email=self.from_email, to=(to,), cc=cc or (), bcc=bcc or (), headers=headers, ) html_body = self.__render_html_body() if html_body: msg.attach_alternative(html_body, "text/html") return msg def get_built_messages( self, to: Iterable[str] | None = None, reply_to: Iterable[str] | None = None, cc: Sequence[str] | None = None, bcc: Sequence[str] | None = None, ) -> Sequence[EmailMultiAlternatives]: send_to = set(to or ()) send_to.update(self._send_to) results = [ self.build(to=email, reply_to=reply_to, cc=cc, bcc=bcc) for email in send_to if email ] if not results: logger.debug("Did not build any messages, no users to send to.") return results def format_to(self, to: list[str]) -> str: if not to: return "" if len(to) > MAX_RECIPIENTS: to = to[:MAX_RECIPIENTS] + [f"and {len(to[MAX_RECIPIENTS:])} more."] return ", ".join(to) def send( self, to: Iterable[str] | None = None, cc: Sequence[str] | None = None, bcc: Sequence[str] | None = None, fail_silently: bool = False, ) -> int: return send_messages( self.get_built_messages(to, cc=cc, bcc=bcc), fail_silently=fail_silently ) def send_async( self, to: Iterable[str] | None = None, reply_to: Iterable[str] | None = None, cc: Sequence[str] | None = None, bcc: Sequence[str] | None = None, ) -> None: from sentry.tasks.email import send_email, send_email_control fmt = options.get("system.logging-format") messages = self.get_built_messages(to, reply_to, cc=cc, bcc=bcc) extra: MutableMapping[str, str | tuple[str]] = {"message_type": self.type} loggable = [v for k, v in self.context.items() if hasattr(v, "id")] for context in loggable: extra[f"{type(context).__name__.lower()}_id"] = context.id log_mail_queued = partial(logger.info, "mail.queued", extra=extra) for message in messages: send_email_task = send_email.delay if SiloMode.get_current_mode() == SiloMode.CONTROL: send_email_task = send_email_control.delay safe_execute(send_email_task, message=message_to_dict(message)) extra["message_id"] = message.extra_headers["Message-Id"] metrics.incr("email.queued", instance=self.type, skip_internal=False) if fmt == LoggingFormat.HUMAN: extra["message_to"] = (self.format_to(message.to),) log_mail_queued() elif fmt == LoggingFormat.MACHINE: for recipient in message.to: extra["message_to"] = recipient log_mail_queued()
MessageBuilder
python
davidhalter__parso
parso/python/errors.py
{ "start": 18295, "end": 18695 }
class ____(Rule): code = 901 def _get_message(self, message, node): message = super()._get_message(message, node) if ( "f-string" not in message and _any_fstring_error(self._normalizer.version, node) ): message = "f-string: " + message return "SyntaxError: " + message @ErrorFinder.register_rule(type='error_node')
SyntaxRule
python
has2k1__plotnine
plotnine/themes/themeable.py
{ "start": 57108, "end": 57537 }
class ____(themeable): """ Justification of legends placed at the top Parameters ---------- theme_element : Literal["left", "center", "right"] | float How to justify the entire group with 1 or more guides. i.e. How to slide the legend along the top row. If a float, it should be in the range `[0, 1]`, where `0` is `"left"` and `1` is `"right"`. """
legend_justification_top
python
openai__openai-python
src/openai/types/responses/response_apply_patch_tool_call.py
{ "start": 618, "end": 781 }
class ____(BaseModel): path: str """Path of the file to delete.""" type: Literal["delete_file"] """Delete the specified file."""
OperationDeleteFile
python
django__django
tests/generic_views/models.py
{ "start": 1397, "end": 1470 }
class ____(models.Model): event_date = models.DateTimeField()
BookSigning
python
great-expectations__great_expectations
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/tests/conftest.py
{ "start": 738, "end": 11107 }
class ____: """ This class should ideally be named "MockBaseProfiler"; however, it has to be called "BaseProfiler", because its "load()" method returns "BaseProfiler" type, which is type of class itself (using "fluent" programming style). """ # noinspection PyMethodMayBeStatic,PyMethodParameters def load(cls, filepath: str) -> BaseProfiler: return cls # noinspection PyMethodMayBeStatic def report(self, report_options: dict = None) -> dict: return { "global_stats": { "profile_schema": {}, }, "data_stats": [ { "column_name": "vendor_id", "data_type": "int", }, { "column_name": "passenger_count", "data_type": "int", }, { "column_name": "total_amount", "data_type": "float", }, { "column_name": "congestion_surcharge", "data_type": "float", }, ], } def pytest_addoption(parser): # note: --no-spark will be deprecated in favor of --spark parser.addoption( "--no-spark", action="store_true", help="If set, suppress tests against the spark test suite", ) parser.addoption( "--spark", action="store_true", help="If set, execute tests against the spark test suite", ) parser.addoption( "--no-sqlalchemy", action="store_true", help="If set, suppress all tests using sqlalchemy", ) parser.addoption( "--postgresql", action="store_true", help="If set, execute tests against postgresql", ) # note: --no-postgresql will be deprecated in favor of --postgresql parser.addoption( "--no-postgresql", action="store_true", help="If set, supress tests against postgresql", ) parser.addoption( "--mysql", action="store_true", help="If set, execute tests against mysql", ) parser.addoption( "--mssql", action="store_true", help="If set, execute tests against mssql", ) parser.addoption( "--bigquery", action="store_true", help="If set, execute tests against bigquery", ) parser.addoption( "--aws", action="store_true", help="If set, execute tests against AWS resources like S3, RedShift and Athena", ) parser.addoption( "--trino", action="store_true", help="If set, execute tests against trino", ) parser.addoption( "--redshift", action="store_true", help="If set, execute tests against redshift", ) parser.addoption( "--athena", action="store_true", help="If set, execute tests against athena", ) parser.addoption( "--snowflake", action="store_true", help="If set, execute tests against snowflake", ) parser.addoption( "--docs-tests", action="store_true", help="If set, run integration tests for docs", ) parser.addoption("--azure", action="store_true", help="If set, execute tests against Azure") parser.addoption("--cloud", action="store_true", help="If set, execute tests against GX Cloud") parser.addoption( "--performance-tests", action="store_true", help="If set, run performance tests (which might also require additional arguments like --bigquery)", ) def pytest_generate_tests(metafunc): test_backends = build_test_backends_list(metafunc) if "test_backend" in metafunc.fixturenames: metafunc.parametrize("test_backend", test_backends, scope="module") if "test_backends" in metafunc.fixturenames: metafunc.parametrize("test_backends", [test_backends], scope="module") @pytest.fixture(scope="function") def mock_base_data_profiler() -> BaseProfiler: return BaseProfiler() @pytest.fixture(scope="function") def bobby_columnar_table_multi_batch_deterministic_data_context( set_consistent_seed_within_numeric_metric_range_multi_batch_parameter_builder, # noqa: F811 tmp_path_factory, monkeypatch, ) -> FileDataContext: project_path: str = str(tmp_path_factory.mktemp("taxi_data_context")) context_path: str = os.path.join(project_path, "great_expectations") # noqa: PTH118 os.makedirs( # noqa: PTH103 os.path.join(context_path, "expectations"), # noqa: PTH118 exist_ok=True, ) data_path: str = os.path.join(context_path, "..", "data") # noqa: PTH118 os.makedirs(os.path.join(data_path), exist_ok=True) # noqa: PTH118, PTH103 shutil.copy( file_relative_path( __file__, os.path.join( # noqa: PTH118 test_root_path, "capitalone_dataprofiler_expectations", "tests", "data_profiler_files", "fixtures", "yellow_tripdata_pandas_fixture", "great_expectations", "great_expectations.yml", ), ), str(os.path.join(context_path, "great_expectations.yml")), # noqa: PTH118 ) shutil.copy( file_relative_path( __file__, os.path.join( # noqa: PTH118 test_root_path, "capitalone_dataprofiler_expectations", "tests", "data_profiler_files", "taxi_yellow_tripdata_samples", "random_subsamples", "yellow_tripdata_7500_lines_sample_2019-01.csv", ), ), str( os.path.join( # noqa: PTH118 context_path, "..", "data", "yellow_tripdata_sample_2019-01.csv" ) ), ) shutil.copy( file_relative_path( __file__, os.path.join( # noqa: PTH118 test_root_path, "capitalone_dataprofiler_expectations", "tests", "data_profiler_files", "taxi_yellow_tripdata_samples", "random_subsamples", "yellow_tripdata_8500_lines_sample_2019-02.csv", ), ), str( os.path.join( # noqa: PTH118 context_path, "..", "data", "yellow_tripdata_sample_2019-02.csv" ) ), ) shutil.copy( file_relative_path( __file__, os.path.join( # noqa: PTH118 test_root_path, "capitalone_dataprofiler_expectations", "tests", "data_profiler_files", "taxi_yellow_tripdata_samples", "random_subsamples", "yellow_tripdata_9000_lines_sample_2019-03.csv", ), ), str( os.path.join( # noqa: PTH118 context_path, "..", "data", "yellow_tripdata_sample_2019-03.csv" ) ), ) context = get_context(context_root_dir=context_path) assert context.root_directory == context_path return context @pytest.fixture(scope="module") def bobby_columnar_table_multi_batch_probabilistic_data_context( tmp_path_factory, ) -> FileDataContext: project_path: str = str(tmp_path_factory.mktemp("taxi_data_context")) context_path: str = os.path.join(project_path, "great_expectations") # noqa: PTH118 os.makedirs( # noqa: PTH103 os.path.join(context_path, "expectations"), # noqa: PTH118 exist_ok=True, ) data_path: str = os.path.join(context_path, "..", "data") # noqa: PTH118 os.makedirs(os.path.join(data_path), exist_ok=True) # noqa: PTH118, PTH103 shutil.copy( file_relative_path( __file__, os.path.join( # noqa: PTH118 test_root_path, "capitalone_dataprofiler_expectations", "tests", "data_profiler_files", "fixtures", "yellow_tripdata_pandas_fixture", "great_expectations", "great_expectations.yml", ), ), str(os.path.join(context_path, "great_expectations.yml")), # noqa: PTH118 ) shutil.copy( file_relative_path( __file__, os.path.join( # noqa: PTH118 test_root_path, "capitalone_dataprofiler_expectations", "tests", "data_profiler_files", "taxi_yellow_tripdata_samples", "random_subsamples", "yellow_tripdata_7500_lines_sample_2019-01.csv", ), ), str( os.path.join( # noqa: PTH118 context_path, "..", "data", "yellow_tripdata_sample_2019-01.csv" ) ), ) shutil.copy( file_relative_path( __file__, os.path.join( # noqa: PTH118 test_root_path, "capitalone_dataprofiler_expectations", "tests", "data_profiler_files", "taxi_yellow_tripdata_samples", "random_subsamples", "yellow_tripdata_8500_lines_sample_2019-02.csv", ), ), str( os.path.join( # noqa: PTH118 context_path, "..", "data", "yellow_tripdata_sample_2019-02.csv" ) ), ) shutil.copy( file_relative_path( __file__, os.path.join( # noqa: PTH118 test_root_path, "capitalone_dataprofiler_expectations", "tests", "data_profiler_files", "taxi_yellow_tripdata_samples", "random_subsamples", "yellow_tripdata_9000_lines_sample_2019-03.csv", ), ), str( os.path.join( # noqa: PTH118 context_path, "..", "data", "yellow_tripdata_sample_2019-03.csv" ) ), ) context = get_context(context_root_dir=context_path) assert context.root_directory == context_path return context
BaseProfiler
python
jina-ai__jina
tests/unit/jaml/test_gateway_parse.py
{ "start": 122, "end": 1646 }
class ____(Gateway): async def setup_server(self): self.server = 'dummy server' async def run_server(self): self.logger.info(self.server) async def shutdown(self): pass def test_cls_from_tag(): assert JAML.cls_from_tag('MyDummyGateway') == MyDummyGateway assert JAML.cls_from_tag('!MyDummyGateway') == MyDummyGateway assert JAML.cls_from_tag('BaseGateway') == BaseGateway assert JAML.cls_from_tag('Nonexisting') is None def test_base_jtype(tmpdir): gateway_path = os.path.join(tmpdir, 'gateway.yml') g = BaseGateway.load_config('Gateway', runtime_args={'port': [12345]}) g.save_config(gateway_path) with open(gateway_path, 'r', encoding='utf-8') as file: conf = yaml.safe_load(file) assert 'jtype' in conf assert conf['jtype'] == 'Gateway' assert ( type(BaseGateway.load_config(gateway_path, runtime_args={'port': [12345]})) == Gateway ) def test_custom_jtype(tmpdir): gateway_path = os.path.join(tmpdir, 'gateway.yml') e = BaseGateway.load_config('MyDummyGateway', runtime_args={'port': [12345]}) print(f' e {type(e)} => {e.__dict__}') e.save_config(gateway_path) with open(gateway_path, 'r', encoding='utf-8') as file: conf = yaml.safe_load(file) assert 'jtype' in conf assert conf['jtype'] == 'MyDummyGateway' assert ( type(BaseGateway.load_config(gateway_path, runtime_args={'port': [12345]})) == MyDummyGateway )
MyDummyGateway
python
kamyu104__LeetCode-Solutions
Python/flip-game.py
{ "start": 52, "end": 732 }
class ____(object): def generatePossibleNextMoves(self, s): """ :type s: str :rtype: List[str] """ res = [] i, n = 0, len(s) - 1 while i < n: # O(n) time if s[i] == '+': while i < n and s[i+1] == '+': # O(c) time res.append(s[:i] + '--' + s[i+2:]) # O(n) time and space i += 1 i += 1 return res # Time: O(c * m * n + n) = O(c * n + n), where m = 2 in this question # Space: O(n) # This solution compares O(m) = O(2) times for two consecutive "+", where m is length of the pattern
Solution
python
aio-libs__aiohttp
aiohttp/http_parser.py
{ "start": 19460, "end": 22902 }
class ____(HttpParser[RawRequestMessage]): """Read request status line. Exception .http_exceptions.BadStatusLine could be raised in case of any errors in status line. Returns RawRequestMessage. """ def parse_message(self, lines: list[bytes]) -> RawRequestMessage: # request line line = lines[0].decode("utf-8", "surrogateescape") try: method, path, version = line.split(" ", maxsplit=2) except ValueError: raise BadHttpMethod(line) from None if len(path) > self.max_line_size: raise LineTooLong( "Status line is too long", str(self.max_line_size), str(len(path)) ) # method if not TOKENRE.fullmatch(method): raise BadHttpMethod(method) # version match = VERSRE.fullmatch(version) if match is None: raise BadStatusLine(line) version_o = HttpVersion(int(match.group(1)), int(match.group(2))) if method == "CONNECT": # authority-form, # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.3 url = URL.build(authority=path, encoded=True) elif path.startswith("/"): # origin-form, # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.1 path_part, _hash_separator, url_fragment = path.partition("#") path_part, _question_mark_separator, qs_part = path_part.partition("?") # NOTE: `yarl.URL.build()` is used to mimic what the Cython-based # NOTE: parser does, otherwise it results into the same # NOTE: HTTP Request-Line input producing different # NOTE: `yarl.URL()` objects url = URL.build( path=path_part, query_string=qs_part, fragment=url_fragment, encoded=True, ) elif path == "*" and method == "OPTIONS": # asterisk-form, url = URL(path, encoded=True) else: # absolute-form for proxy maybe, # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.2 url = URL(path, encoded=True) if url.scheme == "": # not absolute-form raise InvalidURLError( path.encode(errors="surrogateescape").decode("latin1") ) # read headers ( headers, raw_headers, close, compression, upgrade, chunked, ) = self.parse_headers(lines[1:]) if close is None: # then the headers weren't set in the request if version_o <= HttpVersion10: # HTTP 1.0 must asks to not close close = True else: # HTTP 1.1 must ask to close. close = False return RawRequestMessage( method, path, version_o, headers, raw_headers, close, compression, upgrade, chunked, url, ) def _is_chunked_te(self, te: str) -> bool: if te.rsplit(",", maxsplit=1)[-1].strip(" \t").lower() == "chunked": return True # https://www.rfc-editor.org/rfc/rfc9112#section-6.3-2.4.3 raise BadHttpMessage("Request has invalid `Transfer-Encoding`")
HttpRequestParser
python
pypa__warehouse
tests/unit/organizations/test_models.py
{ "start": 11397, "end": 20373 }
class ____: def test_acl(self, db_session): organization = DBOrganizationFactory.create() team = DBTeamFactory.create(organization=organization) owner1 = DBOrganizationRoleFactory.create(organization=organization) owner2 = DBOrganizationRoleFactory.create(organization=organization) billing_mgr1 = DBOrganizationRoleFactory.create( organization=organization, role_name=OrganizationRoleType.BillingManager ) billing_mgr2 = DBOrganizationRoleFactory.create( organization=organization, role_name=OrganizationRoleType.BillingManager ) account_mgr1 = DBOrganizationRoleFactory.create( organization=organization, role_name=OrganizationRoleType.Manager ) account_mgr2 = DBOrganizationRoleFactory.create( organization=organization, role_name=OrganizationRoleType.Manager ) member1 = DBOrganizationRoleFactory.create( organization=organization, role_name=OrganizationRoleType.Member ) member2 = DBOrganizationRoleFactory.create( organization=organization, role_name=OrganizationRoleType.Member ) acls = [item for location in lineage(team) for item in location.__acl__()] assert acls == [ ( Allow, "group:admins", ( Permissions.AdminOrganizationsRead, Permissions.AdminOrganizationsWrite, Permissions.AdminOrganizationsNameWrite, ), ), (Allow, "group:moderators", Permissions.AdminOrganizationsRead), ] + sorted( [ ( Allow, f"user:{owner1.user.id}", [ Permissions.OrganizationsRead, Permissions.OrganizationTeamsRead, Permissions.OrganizationsManage, Permissions.OrganizationTeamsManage, Permissions.OrganizationsBillingManage, Permissions.OrganizationProjectsAdd, Permissions.OrganizationProjectsRemove, ], ), ( Allow, f"user:{owner2.user.id}", [ Permissions.OrganizationsRead, Permissions.OrganizationTeamsRead, Permissions.OrganizationsManage, Permissions.OrganizationTeamsManage, Permissions.OrganizationsBillingManage, Permissions.OrganizationProjectsAdd, Permissions.OrganizationProjectsRemove, ], ), ], key=lambda x: x[1], ) + sorted( [ ( Allow, f"user:{billing_mgr1.user.id}", [ Permissions.OrganizationsRead, Permissions.OrganizationTeamsRead, Permissions.OrganizationsBillingManage, ], ), ( Allow, f"user:{billing_mgr2.user.id}", [ Permissions.OrganizationsRead, Permissions.OrganizationTeamsRead, Permissions.OrganizationsBillingManage, ], ), ], key=lambda x: x[1], ) + sorted( [ ( Allow, f"user:{account_mgr1.user.id}", [ Permissions.OrganizationsRead, Permissions.OrganizationTeamsRead, Permissions.OrganizationTeamsManage, Permissions.OrganizationProjectsAdd, ], ), ( Allow, f"user:{account_mgr2.user.id}", [ Permissions.OrganizationsRead, Permissions.OrganizationTeamsRead, Permissions.OrganizationTeamsManage, Permissions.OrganizationProjectsAdd, ], ), ], key=lambda x: x[1], ) + sorted( [ ( Allow, f"user:{member1.user.id}", [Permissions.OrganizationsRead, Permissions.OrganizationTeamsRead], ), ( Allow, f"user:{member2.user.id}", [Permissions.OrganizationsRead, Permissions.OrganizationTeamsRead], ), ], key=lambda x: x[1], ) def test_active_subscription(self, db_session): organization = DBOrganizationFactory.create() stripe_customer = DBStripeCustomerFactory.create() DBOrganizationStripeCustomerFactory.create( organization=organization, customer=stripe_customer ) subscription = DBStripeSubscriptionFactory.create(customer=stripe_customer) DBOrganizationStripeSubscriptionFactory.create( organization=organization, subscription=subscription ) assert organization.active_subscription is not None assert organization.manageable_subscription is not None def test_active_subscription_none(self, db_session): organization = DBOrganizationFactory.create() stripe_customer = DBStripeCustomerFactory.create() DBOrganizationStripeCustomerFactory.create( organization=organization, customer=stripe_customer ) subscription = DBStripeSubscriptionFactory.create( customer=stripe_customer, status="unpaid", ) DBOrganizationStripeSubscriptionFactory.create( organization=organization, subscription=subscription ) assert organization.active_subscription is None assert organization.manageable_subscription is not None def test_manageable_subscription(self, db_session): organization = DBOrganizationFactory.create() stripe_customer = DBStripeCustomerFactory.create() DBOrganizationStripeCustomerFactory.create( organization=organization, customer=stripe_customer ) subscription = DBStripeSubscriptionFactory.create(customer=stripe_customer) DBOrganizationStripeSubscriptionFactory.create( organization=organization, subscription=subscription ) assert organization.active_subscription is not None assert organization.manageable_subscription is not None def test_manageable_subscription_none(self, db_session): organization = DBOrganizationFactory.create() stripe_customer = DBStripeCustomerFactory.create() DBOrganizationStripeCustomerFactory.create( organization=organization, customer=stripe_customer ) subscription = DBStripeSubscriptionFactory.create( customer=stripe_customer, status="canceled", ) DBOrganizationStripeSubscriptionFactory.create( organization=organization, subscription=subscription ) assert organization.active_subscription is None assert organization.manageable_subscription is None def test_good_standing_with_manual_activation_active(self, db_session): with freeze_time("2024-01-15"): organization = DBOrganizationFactory.create(orgtype="Company") DBOrganizationManualActivationFactory.create( organization=organization, expires=datetime.date(2024, 12, 31), # Future date from frozen time ) assert organization.good_standing def test_good_standing_with_manual_activation_expired(self, db_session): with freeze_time("2024-01-15"): organization = DBOrganizationFactory.create(orgtype="Company") DBOrganizationManualActivationFactory.create( organization=organization, expires=datetime.date(2023, 12, 31), # Past date from frozen time ) assert not organization.good_standing def test_good_standing_community_without_manual_activation(self, db_session): organization = DBOrganizationFactory.create(orgtype="Community") assert organization.good_standing def test_good_standing_company_without_manual_activation_or_subscription( self, db_session ): organization = DBOrganizationFactory.create(orgtype="Company") assert not organization.good_standing
TestTeam
python
openai__openai-python
src/openai/types/responses/response_content_part_added_event.py
{ "start": 771, "end": 1337 }
class ____(BaseModel): content_index: int """The index of the content part that was added.""" item_id: str """The ID of the output item that the content part was added to.""" output_index: int """The index of the output item that the content part was added to.""" part: Part """The content part that was added.""" sequence_number: int """The sequence number of this event.""" type: Literal["response.content_part.added"] """The type of the event. Always `response.content_part.added`."""
ResponseContentPartAddedEvent
python
python-excel__xlwt
xlwt/Cell.py
{ "start": 1305, "end": 3700 }
class ____(object): __slots__ = ["rowx", "colx", "xf_idx", "number"] def __init__(self, rowx, colx, xf_idx, number): self.rowx = rowx self.colx = colx self.xf_idx = xf_idx self.number = float(number) def get_encoded_data(self): rk_encoded = 0 num = self.number # The four possible kinds of RK encoding are *not* mutually exclusive. # The 30-bit integer variety picks up the most. # In the code below, the four varieties are checked in descending order # of bangs per buck, or not at all. # SJM 2007-10-01 if -0x20000000 <= num < 0x20000000: # fits in 30-bit *signed* int inum = int(num) if inum == num: # survives round-trip # print "30-bit integer RK", inum, hex(inum) rk_encoded = 2 | (inum << 2) return 1, rk_encoded temp = num * 100 if -0x20000000 <= temp < 0x20000000: # That was step 1: the coded value will fit in # a 30-bit signed integer. itemp = int(round(temp, 0)) # That was step 2: "itemp" is the best candidate coded value. # Now for step 3: simulate the decoding, # to check for round-trip correctness. if itemp / 100.0 == num: # print "30-bit integer RK*100", itemp, hex(itemp) rk_encoded = 3 | (itemp << 2) return 1, rk_encoded if 0: # Cost of extra pack+unpack not justified by tiny yield. packed = pack('<d', num) w01, w23 = unpack('<2i', packed) if not w01 and not(w23 & 3): # 34 lsb are 0 # print "float RK", w23, hex(w23) return 1, w23 packed100 = pack('<d', temp) w01, w23 = unpack('<2i', packed100) if not w01 and not(w23 & 3): # 34 lsb are 0 # print "float RK*100", w23, hex(w23) return 1, w23 | 1 #print "Number" #print return 0, pack('<5Hd', 0x0203, 14, self.rowx, self.colx, self.xf_idx, num) def get_biff_data(self): isRK, value = self.get_encoded_data() if isRK: return pack('<5Hi', 0x27E, 10, self.rowx, self.colx, self.xf_idx, value) return value # NUMBER record already packed
NumberCell
python
doocs__leetcode
solution/1500-1599/1505.Minimum Possible Integer After at Most K Adjacent Swaps On Digits/Solution.py
{ "start": 0, "end": 445 }
class ____: def __init__(self, n): self.n = n self.c = [0] * (n + 1) @staticmethod def lowbit(x): return x & -x def update(self, x, delta): while x <= self.n: self.c[x] += delta x += BinaryIndexedTree.lowbit(x) def query(self, x): s = 0 while x: s += self.c[x] x -= BinaryIndexedTree.lowbit(x) return s
BinaryIndexedTree
python
facebookresearch__faiss
tests/test_extra_distances.py
{ "start": 9091, "end": 9608 }
class ____(unittest.TestCase): """ since it has a distance computer, HNSW should work """ def test_hnsw(self): d = 10 nb = 1000 nq = 100 nt = 0 xt, xb, xq = get_dataset_2(d, nt, nb, nq) mt = faiss.METRIC_L1 index = faiss.IndexHNSW(faiss.IndexFlat(d, mt)) index.add(xb) D, I = index.search(xq, 10) dis = faiss.pairwise_distances(xq, xb, mt) for q in range(nq): assert np.all(D[q] == dis[q, I[q]])
TestHNSW
python
google__jax
jax/experimental/pallas/ops/gpu/ragged_dot_mgpu.py
{ "start": 1138, "end": 12351 }
class ____: """Information regarding the group being processed in a block.""" group_id: jax.Array block: jax.Array block_start: jax.Array actual_start: jax.Array actual_end: jax.Array start_within_block: jax.Array actual_size: jax.Array @classmethod def create(cls, group_lengths, tile, tid): """Get the group info for the current block.""" tile = jnp.int32(tile) group_boundaries = [group_lengths[i] for i in range(len(group_lengths))] # We usually only have very few groups, so we unroll the loop processing # them. Normally we'd break out of the loop early, once we'd have found our # boundary, but we can't do that when unrolling, so we rely on many selects # to mask out the epilogue of the loop. group_end = group_start = block = group = end = jnp.array( 0, dtype=jnp.int32 ) for i, b in enumerate(group_boundaries): # Start/end are inclusive start = end end = start + b final = end - 1 start_block = lax.div(start, tile) final_block = lax.div(final, tile) block_end = final_block + 1 tid_begin = start_block + i tid_end = block_end + i # How many blocks after is our block? this_is_group = (tid_begin <= tid) & (tid < tid_end) block = lax.select(this_is_group, tid - tid_begin + start_block, block) group = lax.select(this_is_group, jnp.int32(i), group) group_start = lax.select(this_is_group, start, group_start) group_end = lax.select(this_is_group, end, group_end) block_start = block * tile actual_start = jnp.maximum(group_start, block_start) actual_end = jnp.minimum(group_end, block_start + tile) start_within_block = actual_start - block_start actual_size = actual_end - actual_start return cls( group_id=group, block=block, block_start=block_start, actual_start=actual_start, actual_end=actual_end, start_within_block=start_within_block, actual_size=actual_size, ) def ragged_dot( lhs, # (M, K) rhs, # (G, K, N) *, group_sizes, # (G,) block_m: int, block_n: int, block_k: int, max_concurrent_steps: int, grid_block_n: int, transpose_rhs: bool = False, load_group_sizes_to_register: bool = True, ) -> jax.Array: if lhs.dtype != rhs.dtype: raise NotImplementedError( f"lhs and rhs must have the same dtype, got {lhs.dtype} and {rhs.dtype}" ) m, k = lhs.shape g, k2, n = rhs.shape if transpose_rhs: k2, n = n, k2 if group_sizes.shape[0] != g: raise ValueError( f"Expected group_sizes to have shape {g} but got {group_sizes.shape}" ) if k != k2: raise ValueError(f"lhs.shape={k} must match rhs.shape={k2}") if k % block_k != 0: raise ValueError(f"k={k} must be a multiple of block_k={block_k}") def body(rows_per_expert_gmem, lhs_gmem, rhs_gmem, o_gmem): grid_m = pl.cdiv(m, block_m) + g - 1 grid_n = pl.cdiv(n, block_n) grid = (grid_m * grid_n,) if load_group_sizes_to_register: rows_per_expert = [rows_per_expert_gmem[i] for i in range(len(rows_per_expert_gmem))] else: rows_per_expert = rows_per_expert_gmem @plgpu.nd_loop(grid, collective_axes="sm") def mn_loop(loop_info: plgpu.NDLoopInfo): # pylint: disable=unused-variable mi, ni = plgpu.planar_snake( loop_info.index[0], (grid_m, grid_n), 1, grid_block_n, ) group_info = GroupInfo.create(rows_per_expert_gmem, block_m, mi) def acc_scope(acc_ref): plgpu.emit_pipeline( lambda _, lhs_smem, rhs_smem: plgpu.wgmma( acc_ref, lhs_smem, plgpu.transpose_ref(rhs_smem, (1, 0)) if transpose_rhs else rhs_smem, ), grid=(k // block_k,), in_specs=[ plgpu.BlockSpec( (block_m, block_k), lambda k: (group_info.block, k), delay_release=1, ), plgpu.BlockSpec( (block_n, block_k) if transpose_rhs else (block_k, block_n), lambda k: (ni, k) if transpose_rhs else (k, ni), delay_release=1, ), ], max_concurrent_steps=max_concurrent_steps, )(lhs_gmem, rhs_gmem.at[group_info.group_id]) return acc_ref[...] acc = pl.run_scoped(acc_scope, plgpu.ACC((block_m, block_n))) @functools.partial( pl.run_scoped, o_smem=plgpu.SMEM((block_m, block_n), dtype=o_gmem.dtype) ) def store_scope(o_smem): # pylint: disable=unused-variable o_smem[...] = acc.astype(o_smem.dtype) plgpu.commit_smem() smem_start = group_info.start_within_block remaining_rows = min(block_m, m) # TMA descriptors need to be generated with static tile sizes along each # axis, but we do not know at compile time how many rows we will need to # store. We only know that the number of rows to store is bounded by # min(block_m, m). # # In order to work around that, we construct a logarithmic ladder of # TMA descriptors, where each descriptor can store 2**i rows for some # i between 0 and log2(min(block_m, m)). This allows storing any # number of rows we will need to store, so long as this number of rows # is between `1` and `min(block_m, m)`. # # E.g., imagine we have block_m = 8, m = 16. The loop below will be # unrolled into 4 iterations, where the first one will generate a TMA # descriptor that can store 8 rows, the second one will generate a TMA # descriptor that can store 4 rows, etc. all the way to 1 row. # # At run time, we finally know the actual number of rows we need to # store as we go through the unrolled loop iterations. Let's imagine # that we need to store 5 rows. # # The first unrolled iteration will check whether we can store 8 rows. # Since we only need to store 5 rows, we won't store anything then. # # The second unrolled iteration will check whether we can store 4 rows. # We're able to store 4 rows, and are left with a single remaining row. # # The fourth unrolled iteration will store the single remaining row, and # we end up with a storing scheme as follows for our 5 rows: # # ----------------------------------------------------------- # 0 | | # 1 | | # 2 | Store 4 rows | # 3 | | # ----------------------------------------------------------- # 4 | Store 1 row | # ----------------------------------------------------------- while remaining_rows > 0: const_rows_len = 1 << int(math.log2(remaining_rows)) remaining_rows //= 2 @pl.when(group_info.actual_size & const_rows_len != 0) def _(): o_smem_slice = o_smem.at[pl.ds(smem_start, const_rows_len)] o_gref_slice = o_gmem.at[ pl.ds(group_info.block_start + smem_start, const_rows_len), pl.ds(ni * block_n, block_n), ] plgpu.copy_smem_to_gmem(o_smem_slice, o_gref_slice) smem_start += group_info.actual_size & const_rows_len plgpu.wait_smem_to_gmem(0, wait_read_only=True) # There are 132 SMs on a H100 SXM GPU. num_sms = 132 kernel = plgpu.kernel( body, out_shape=jax.ShapeDtypeStruct((m, n), lhs.dtype), grid=(num_sms,), grid_names=("sm",), compiler_params=plgpu.CompilerParams( lowering_semantics=plgpu.LoweringSemantics.Warpgroup, ), ) return kernel(group_sizes, lhs, rhs) def main(unused_argv): for transpose_rhs in [False, True]: m, k, n, num_groups = 16 * 1024, 2048, 16 * 1024, 16 kx, ky, kz = random.split(random.key(1234), num=3) lhs = jax.random.normal(kx, (m, k), jnp.float16) if transpose_rhs: rhs = jax.random.normal(ky, (num_groups, n, k), jnp.float16) else: rhs = jax.random.normal(ky, (num_groups, k, n), jnp.float16) group_boundaries = jax.lax.sort( jax.random.randint(kz, (num_groups - 1,), 0, m, jnp.int32) ) group_starts = lax.concatenate( [jnp.array([0], dtype=jnp.int32), group_boundaries], 0 ) group_ends = lax.concatenate( [group_boundaries, jnp.array([m], dtype=jnp.int32)], 0 ) group_sizes = group_ends - group_starts assert group_sizes.shape == (num_groups,) block_m = block_n = (64, 128, 192) block_k = (64,) max_concurrent_steps = (2, 4, 5, 6) grid_block_n = (1, 2, 4, 8, 16) configs = itertools.product( block_m, block_n, block_k, max_concurrent_steps, grid_block_n ) names = ( "block_m", "block_n", "block_k", "max_concurrent_steps", "grid_block_n" ) best_runtime = float("inf") best_kwargs = {} for config in configs: kwargs = dict(zip(names, config)) if n % (kwargs["grid_block_n"] * kwargs["block_n"]): continue try: f = functools.partial( ragged_dot, group_sizes=group_sizes, transpose_rhs=transpose_rhs, **kwargs ) _, runtime = profiler.measure(f)(lhs, rhs) except ValueError as e: if "Mosaic GPU kernel exceeds available shared memory" not in str(e): raise runtime = float("inf") # Enable this to get more detailed information. else: print(" ".join(f"{k}={v}" for k, v in kwargs.items()), int(runtime * 1000)) if runtime < best_runtime: # pytype: disable=unsupported-operands best_runtime = runtime best_kwargs = kwargs if not best_kwargs: raise ValueError("No valid configuration found") def ref_ragged_dot(lhs, rhs, group_sizes): if transpose_rhs: rhs = jnp.transpose(rhs, (0, 2, 1)) return jax.lax.ragged_dot(lhs, rhs, group_sizes=group_sizes) ref, ref_runtime = profiler.measure(ref_ragged_dot)( lhs, rhs, group_sizes=group_sizes ) result = ragged_dot( lhs, rhs, group_sizes=group_sizes, transpose_rhs=transpose_rhs, **best_kwargs ) np.testing.assert_allclose(result, ref, atol=1e-3, rtol=1e-3) tflops = float(2 * k * m * n) / (best_runtime / 1e3) / 1e12 ref_tflops = float(2 * k * m * n) / (ref_runtime / 1e3) / 1e12 print(f"Transpose RHS: {transpose_rhs}") print( "Best parameters: ", " ".join(f"{k}={v}" for k, v in best_kwargs.items()) ) print(f"Kernel: {best_runtime * 1000:.1f} us = {tflops:.1f} TFLOPS") print(f"Reference: {ref_runtime * 1000:.1f} us = {ref_tflops:.1f} TFLOPS") if __name__ == "__main__": from absl import app jax.config.config_with_absl() app.run(main)
GroupInfo
python
pypa__pip
src/pip/_vendor/packaging/licenses/__init__.py
{ "start": 1950, "end": 5727 }
class ____(ValueError): """Raised when a license-expression string is invalid >>> canonicalize_license_expression("invalid") Traceback (most recent call last): ... packaging.licenses.InvalidLicenseExpression: Invalid license expression: 'invalid' """ def canonicalize_license_expression( raw_license_expression: str, ) -> NormalizedLicenseExpression: if not raw_license_expression: message = f"Invalid license expression: {raw_license_expression!r}" raise InvalidLicenseExpression(message) # Pad any parentheses so tokenization can be achieved by merely splitting on # whitespace. license_expression = raw_license_expression.replace("(", " ( ").replace(")", " ) ") licenseref_prefix = "LicenseRef-" license_refs = { ref.lower(): "LicenseRef-" + ref[len(licenseref_prefix) :] for ref in license_expression.split() if ref.lower().startswith(licenseref_prefix.lower()) } # Normalize to lower case so we can look up licenses/exceptions # and so boolean operators are Python-compatible. license_expression = license_expression.lower() tokens = license_expression.split() # Rather than implementing boolean logic, we create an expression that Python can # parse. Everything that is not involved with the grammar itself is treated as # `False` and the expression should evaluate as such. python_tokens = [] for token in tokens: if token not in {"or", "and", "with", "(", ")"}: python_tokens.append("False") elif token == "with": python_tokens.append("or") elif token == "(" and python_tokens and python_tokens[-1] not in {"or", "and"}: message = f"Invalid license expression: {raw_license_expression!r}" raise InvalidLicenseExpression(message) else: python_tokens.append(token) python_expression = " ".join(python_tokens) try: invalid = eval(python_expression, globals(), locals()) except Exception: invalid = True if invalid is not False: message = f"Invalid license expression: {raw_license_expression!r}" raise InvalidLicenseExpression(message) from None # Take a final pass to check for unknown licenses/exceptions. normalized_tokens = [] for token in tokens: if token in {"or", "and", "with", "(", ")"}: normalized_tokens.append(token.upper()) continue if normalized_tokens and normalized_tokens[-1] == "WITH": if token not in EXCEPTIONS: message = f"Unknown license exception: {token!r}" raise InvalidLicenseExpression(message) normalized_tokens.append(EXCEPTIONS[token]["id"]) else: if token.endswith("+"): final_token = token[:-1] suffix = "+" else: final_token = token suffix = "" if final_token.startswith("licenseref-"): if not license_ref_allowed.match(final_token): message = f"Invalid licenseref: {final_token!r}" raise InvalidLicenseExpression(message) normalized_tokens.append(license_refs[final_token] + suffix) else: if final_token not in LICENSES: message = f"Unknown license: {final_token!r}" raise InvalidLicenseExpression(message) normalized_tokens.append(LICENSES[final_token]["id"] + suffix) normalized_expression = " ".join(normalized_tokens) return cast( NormalizedLicenseExpression, normalized_expression.replace("( ", "(").replace(" )", ")"), )
InvalidLicenseExpression
python
coleifer__peewee
tests/sql.py
{ "start": 84177, "end": 86634 }
class ____(BaseTestCase): database = MySQLDatabase(None) def setUp(self): super(TestOnConflictMySQL, self).setUp() self.database.server_version = None def test_replace(self): query = Person.insert(name='huey').on_conflict('replace') self.assertSQL(query, ( 'REPLACE INTO "person" ("name") VALUES (?)'), ['huey']) def test_ignore(self): query = Person.insert(name='huey').on_conflict('ignore') self.assertSQL(query, ( 'INSERT IGNORE INTO "person" ("name") VALUES (?)'), ['huey']) def test_update(self): dob = datetime.date(2010, 1, 1) query = (Person .insert(name='huey', dob=dob) .on_conflict( preserve=(Person.dob,), update={Person.name: Person.name.concat('-x')})) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON DUPLICATE KEY ' 'UPDATE "dob" = VALUES("dob"), "name" = ("name" || ?)'), [dob, 'huey', '-x']) query = (Person .insert(name='huey', dob=dob) .on_conflict(preserve='dob')) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON DUPLICATE KEY ' 'UPDATE "dob" = VALUES("dob")'), [dob, 'huey']) def test_update_use_value_mariadb(self): # Verify that we use "VALUE" (not "VALUES") for MariaDB 10.3.3. dob = datetime.date(2010, 1, 1) query = (Person .insert(name='huey', dob=dob) .on_conflict(preserve=(Person.dob,))) self.database.server_version = (10, 3, 3) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON DUPLICATE KEY ' 'UPDATE "dob" = VALUE("dob")'), [dob, 'huey']) self.database.server_version = (10, 3, 2) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON DUPLICATE KEY ' 'UPDATE "dob" = VALUES("dob")'), [dob, 'huey']) def test_where_not_supported(self): query = Person.insert(name='huey').on_conflict( preserve=(Person.dob,), where=(Person.name == 'huey')) with self.assertRaisesCtx(ValueError): self.database.get_sql_context().parse(query)
TestOnConflictMySQL
python
getsentry__sentry
src/sentry/integrations/github/integration.py
{ "start": 34863, "end": 40175 }
class ____(IntegrationProvider): key = IntegrationProviderSlug.GITHUB.value name = "GitHub" metadata = metadata integration_cls: type[IntegrationInstallation] = GitHubIntegration features = frozenset( [ IntegrationFeatures.COMMITS, IntegrationFeatures.ISSUE_BASIC, IntegrationFeatures.ISSUE_SYNC, IntegrationFeatures.STACKTRACE_LINK, IntegrationFeatures.CODEOWNERS, ] ) setup_dialog_config = {"width": 1030, "height": 1000} @property def client(self) -> GithubSetupApiClient: # The endpoints we need to hit at this step authenticate via JWT so no need for access token in client return GithubSetupApiClient() def post_install( self, integration: Integration, organization: RpcOrganization, *, extra: dict[str, Any], ) -> None: # Check if this is the Codecov GitHub app to trigger account linking github_app_id = extra.get("app_id") SENTRY_GITHUB_APP_ID = options.get("github-app.id") if not github_app_id or not SENTRY_GITHUB_APP_ID: logger.warning( "codecov.account_link.configuration_error", extra={ "integration_id": integration.id, "organization_id": organization.id, "has_github_app_id": bool(github_app_id), "has_sentry_github_app_id": bool(SENTRY_GITHUB_APP_ID), }, ) if ( github_app_id and SENTRY_GITHUB_APP_ID and str(github_app_id) == str(SENTRY_GITHUB_APP_ID) ): org_integration = OrganizationIntegration.objects.filter( integration=integration, organization_id=organization.id ).first() # Double check org integration exists before linking accounts if org_integration: codecov_account_link.apply_async( kwargs={ "integration_id": integration.id, "organization_id": organization.id, } ) else: logger.warning( "codecov.account_link.org_integration_missing", extra={"integration_id": integration.id, "organization_id": organization.id}, ) repos = repository_service.get_repositories( organization_id=organization.id, providers=[IntegrationProviderSlug.GITHUB.value, "integrations:github"], has_integration=False, ) for repo in repos: migrate_repo.apply_async( kwargs={ "repo_id": repo.id, "integration_id": integration.id, "organization_id": organization.id, } ) link_all_repos.apply_async( kwargs={ "integration_key": self.key, "integration_id": integration.id, "organization_id": organization.id, } ) def get_pipeline_views( self, ) -> Sequence[ PipelineView[IntegrationPipeline] | Callable[[], PipelineView[IntegrationPipeline]] ]: return [OAuthLoginView(), GithubOrganizationSelection(), GitHubInstallation()] def get_installation_info(self, installation_id: str) -> Mapping[str, Any]: resp: Mapping[str, Any] = self.client.get_installation_info(installation_id=installation_id) return resp def build_integration(self, state: Mapping[str, str]) -> IntegrationData: try: installation = self.get_installation_info( state["installation_id"], ) except ApiError as api_error: if api_error.code == 404: raise IntegrationError("The GitHub installation could not be found.") raise integration: IntegrationData = { "name": installation["account"]["login"], # TODO(adhiraj): This should be a constant representing the entire github cloud. "external_id": installation["id"], # GitHub identity is associated directly to the application, *not* # to the installation itself. "idp_external_id": installation["app_id"], "metadata": { # The access token will be populated upon API usage "access_token": None, "expires_at": None, "icon": installation["account"]["avatar_url"], "domain_name": installation["account"]["html_url"].replace("https://", ""), "account_type": installation["account"]["type"], "account_id": installation["account"]["id"], }, "post_install_data": {"app_id": installation["app_id"]}, } if state.get("sender"): integration["metadata"]["sender"] = state["sender"] return integration def setup(self) -> None: from sentry.plugins.base import bindings bindings.add( "integration-repository.provider", GitHubRepositoryProvider, id="integrations:github" )
GitHubIntegrationProvider
python
jazzband__django-simple-history
simple_history/tests/models.py
{ "start": 13928, "end": 14110 }
class ____(models.Model): name = models.CharField(max_length=100) history = HistoricalRecords() class Meta: verbose_name_plural = "\u570b"
UnicodeVerboseNamePlural
python
facelessuser__soupsieve
tests/test_level1/test_visited.py
{ "start": 52, "end": 568 }
class ____(util.TestCase): """Test visited selectors.""" def test_visited(self): """Test visited.""" markup = """ <div> <p>Some text <span id="1" class="foo:bar:foobar"> in a paragraph</span>. <a id="2" class="bar" href="http://google.com">Link</a> <a id="3">Placeholder text.</a> </p> </div> """ self.assert_selector( markup, "a:visited", [], flags=util.HTML )
TestVisited
python
fastapi__sqlmodel
tests/test_deprecations.py
{ "start": 84, "end": 690 }
class ____(Item): password: str def test_deprecated_from_orm_inheritance(): new_item = SubItem(name="Hello", password="secret") with pytest.warns(DeprecationWarning): item = Item.from_orm(new_item) assert item.name == "Hello" assert not hasattr(item, "password") def test_deprecated_parse_obj(): with pytest.warns(DeprecationWarning): item = Item.parse_obj({"name": "Hello"}) assert item.name == "Hello" def test_deprecated_dict(): with pytest.warns(DeprecationWarning): data = Item(name="Hello").dict() assert data == {"name": "Hello"}
SubItem
python
great-expectations__great_expectations
great_expectations/checkpoint/actions.py
{ "start": 27582, "end": 34952 }
class ____(ValidationAction): """Sends an email to a given list of email addresses. ```yaml - name: send_email_on_validation_result action: class_name: EmailAction notify_on: all # possible values: "all", "failure", "success" notify_with: renderer: # the class that implements the message to be sent # this is the default implementation, but you can # implement a custom one module_name: great_expectations.render.renderer.email_renderer class_name: EmailRenderer # put the actual following information in the uncommitted/config_variables.yml file # or pass in as environment variable smtp_address: ${smtp_address} smtp_port: ${smtp_port} sender_login: ${email_address} sender_password: ${sender_password} sender_alias: ${sender_alias} # useful to send an email as an alias receiver_emails: ${receiver_emails} use_tls: False use_ssl: True ``` Args: renderer: Specifies the renderer used to generate an email. smtp_address: Address of the SMTP server used to send the email. smtp_address: Port of the SMTP server used to send the email. sender_login: Login used send the email. sender_password: Password used to send the email. sender_alias: Optional. Alias used to send the email (default = sender_login). receiver_emails: Email addresses that will receive the email (separated by commas). use_tls: Optional. Use of TLS to send the email (using either TLS or SSL is highly recommended). use_ssl: Optional. Use of SSL to send the email (using either TLS or SSL is highly recommended). notify_on: "Specifies validation status that triggers notification. One of "all", "failure", "success". notify_with: Optional list of DataDocs site names to display in Slack messages. Defaults to all. Examples: **renderer:** ```python { "module_name": "great_expectations.render.renderer.email_renderer", "class_name": "EmailRenderer", } ``` """ # noqa: E501 # FIXME CoP type: Literal["email"] = "email" smtp_address: Union[ConfigStr, str] smtp_port: Union[ConfigStr, str] receiver_emails: Union[ConfigStr, str] sender_login: Optional[Union[ConfigStr, str]] = None sender_password: Optional[Union[ConfigStr, str]] = None sender_alias: Optional[Union[ConfigStr, str]] = None use_tls: Optional[bool] = None use_ssl: Optional[bool] = None notify_on: NotifyOn = "all" notify_with: Optional[List[str]] = None renderer: EmailRenderer = Field(default_factory=EmailRenderer) @validator("renderer", pre=True) def _validate_renderer(cls, renderer: dict | EmailRenderer) -> EmailRenderer: if isinstance(renderer, dict): _renderer = _build_renderer(config=renderer) if not isinstance(_renderer, EmailRenderer): raise ValueError( # noqa: TRY003, TRY004 # FIXME CoP "renderer must be a EmailRenderer or a valid configuration for one." ) renderer = _renderer return renderer @root_validator def _root_validate_email_params(cls, values: dict) -> dict: if not values["sender_alias"]: values["sender_alias"] = values["sender_login"] if not values["sender_login"]: logger.warning( "No login found for sending the email in action config. " "This will only work for email server that does not require authentication." ) if not values["sender_password"]: logger.warning( "No password found for sending the email in action config." "This will only work for email server that does not require authentication." ) return values @override def run( self, checkpoint_result: CheckpointResult, action_context: ActionContext | None = None, ) -> dict: success = checkpoint_result.success or False max_severity = self._get_max_severity_failure_from_checkpoint_result(checkpoint_result) if not should_notify(success=success, notify_on=self.notify_on, max_severity=max_severity): return {"email_result": ""} title, html = self.renderer.render(checkpoint_result=checkpoint_result) substituted_receiver_emails = ( self._substitute_config_str_if_needed(self.receiver_emails) or "" ) receiver_emails_list = list( map(lambda x: x.strip(), substituted_receiver_emails.split(",")) ) # this will actually send the email email_result = self._send_email( title=title, html=html, receiver_emails_list=receiver_emails_list, ) # sending payload back as dictionary return {"email_result": email_result} def _send_email( # noqa: C901 # FIXME CoP self, title, html, receiver_emails_list, ): smtp_address = self._substitute_config_str_if_needed(self.smtp_address) smtp_port = self._substitute_config_str_if_needed(self.smtp_port) sender_login = self._substitute_config_str_if_needed(self.sender_login) sender_password = self._substitute_config_str_if_needed(self.sender_password) sender_alias = self._substitute_config_str_if_needed(self.sender_alias) msg = MIMEMultipart() msg["From"] = sender_alias msg["To"] = ", ".join(receiver_emails_list) msg["Subject"] = title msg.attach(MIMEText(html, "html")) try: if self.use_ssl: if self.use_tls: logger.warning("Please choose between SSL or TLS, will default to SSL") context = ssl.create_default_context() mailserver = smtplib.SMTP_SSL(smtp_address, smtp_port, context=context) elif self.use_tls: mailserver = smtplib.SMTP(smtp_address, smtp_port) context = ssl.create_default_context() mailserver.starttls(context=context) else: logger.warning("Not using TLS or SSL to send an email is not secure") mailserver = smtplib.SMTP(smtp_address, smtp_port) if sender_login is not None and sender_password is not None: mailserver.login(sender_login, sender_password) elif not (sender_login is None and sender_password is None): logger.error( "Please specify both sender_login and sender_password or specify both as None" ) mailserver.sendmail(sender_alias, receiver_emails_list, msg.as_string()) mailserver.quit() except smtplib.SMTPConnectError: logger.error(f"Failed to connect to the SMTP server at address: {smtp_address}") # noqa: TRY400 # FIXME CoP except smtplib.SMTPAuthenticationError: logger.error(f"Failed to authenticate to the SMTP server at address: {smtp_address}") # noqa: TRY400 # FIXME CoP except Exception as e: logger.error(str(e)) # noqa: TRY400 # FIXME CoP else: return "success" @public_api
EmailAction
python
walkccc__LeetCode
solutions/2431. Maximize Total Tastiness of Purchased Fruits/2431-2.py
{ "start": 0, "end": 684 }
class ____: def maxTastiness( self, price: list[int], tastiness: list[int], maxAmount: int, maxCoupons: int, ) -> int: # dp[j][k] := the maximum tastiness of price so far with j amount of money and k coupons dp = [[0] * (maxCoupons + 1) for _ in range(maxAmount + 1)] for p, t in zip(price, tastiness): for j in range(maxAmount, p // 2 - 1, -1): for k in range(maxCoupons, -1, -1): buyWithCoupon = 0 if k == 0 else dp[j - p // 2][k - 1] + t buyWithoutCoupon = 0 if j < p else dp[j - p][k] + t dp[j][k] = max(dp[j][k], buyWithCoupon, buyWithoutCoupon) return dp[maxAmount][maxCoupons]
Solution
python
ray-project__ray
python/ray/data/_internal/execution/streaming_executor.py
{ "start": 30586, "end": 32400 }
class ____(OutputIterator): """Iterator automatically shutting down executor upon exhausting the iterable sequence. NOTE: If this iterator isn't fully exhausted, executor still have to be closed manually by the caller! """ def __init__(self, executor: StreamingExecutor): self._executor = executor def get_next(self, output_split_idx: Optional[int] = None) -> RefBundle: try: op, state = self._executor._output_node bundle = state.get_output_blocking(output_split_idx) # Update progress-bars using_rich = self._executor._use_rich_progress() if not using_rich and self._executor._global_info: self._executor._global_info.update( bundle.num_rows(), op.num_output_rows_total() ) elif using_rich and self._executor._progress_manager: self._executor._progress_manager.update_total_progress( bundle.num_rows() or 0, op.num_output_rows_total() ) return bundle # Have to be BaseException to catch ``KeyboardInterrupt`` # # NOTE: This also handles ``StopIteration`` except BaseException as e: # Asynchronously shutdown the executor (ie avoid unnecessary # synchronization on tasks termination) self._executor.shutdown( force=False, exception=e if not isinstance(e, StopIteration) else None ) raise def __del__(self): # NOTE: Upon garbage-collection we're allowing running tasks # to be terminated asynchronously (ie avoid unnecessary # synchronization on their completion) self._executor.shutdown(force=False)
_ClosingIterator
python
run-llama__llama_index
llama-index-core/llama_index/core/prompts/rich.py
{ "start": 672, "end": 4820 }
class ____(BasePromptTemplate): # type: ignore[no-redef] template_str: str = Field(description="The template string for the prompt.") def __init__( self, template_str: str, metadata: Optional[Dict[str, Any]] = None, output_parser: Optional[BaseOutputParser] = None, template_vars: Optional[List[str]] = None, template_var_mappings: Optional[Dict[str, Any]] = None, function_mappings: Optional[Dict[str, Callable]] = None, **kwargs: Any, ): template_vars = template_vars or [] if not template_vars: template_vars = Prompt(template_str).variables super().__init__( template_str=template_str, kwargs=kwargs or {}, metadata=metadata or {}, output_parser=output_parser, template_vars=template_vars, template_var_mappings=template_var_mappings, function_mappings=function_mappings, ) @property def is_chat_template(self) -> bool: return "endchat" in self.template_str def partial_format(self, **kwargs: Any) -> "RichPromptTemplate": prompt = deepcopy(self) prompt.kwargs.update(kwargs) return prompt def format( self, llm: Optional[BaseLLM] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, **kwargs: Any, ) -> str: del llm # unused if self.is_chat_template: messages = self.format_messages(**kwargs) if messages_to_prompt is not None: return messages_to_prompt(messages) return default_messages_to_prompt(messages) else: all_kwargs = { **self.kwargs, **kwargs, } mapped_all_kwargs = self._map_all_vars(all_kwargs) return Prompt(self.template_str).text(data=mapped_all_kwargs) def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: del llm # unused """Format the prompt into a list of chat messages.""" all_kwargs = { **self.kwargs, **kwargs, } mapped_all_kwargs = self._map_all_vars(all_kwargs) banks_prompt = Prompt(self.template_str) banks_messages = banks_prompt.chat_messages(data=mapped_all_kwargs) llama_messages: list[ChatMessage] = [] for bank_message in banks_messages: if isinstance(bank_message.content, str): llama_messages.append( ChatMessage(role=bank_message.role, content=bank_message.content) ) elif isinstance(bank_message.content, list): llama_blocks: list[ContentBlock] = [] for bank_block in bank_message.content: if bank_block.type == BanksContentBlockType.text: llama_blocks.append(TextBlock(text=bank_block.text)) elif bank_block.type == BanksContentBlockType.image_url: llama_blocks.append(ImageBlock(url=bank_block.image_url.url)) elif bank_block.type == BanksContentBlockType.audio: llama_blocks.append( AudioBlock(audio=bank_block.input_audio.data) ) else: raise ValueError( f"Unsupported content block type: {bank_block.type}" ) llama_messages.append( ChatMessage(role=bank_message.role, content=llama_blocks) ) else: raise ValueError( f"Unsupported message content type: {type(bank_message.content)}" ) if self.output_parser is not None: llama_messages = self.output_parser.format_messages(llama_messages) return llama_messages def get_template(self, llm: Optional[BaseLLM] = None) -> str: return self.template_str
RichPromptTemplate
python
spyder-ide__spyder
spyder/plugins/remoteclient/api/modules/file_services.py
{ "start": 10228, "end": 15743 }
class ____(SpyderBaseJupyterAPI): """ API for remote file services. This API allows for interacting with files on a remote server. Raises ------ RemoteFileServicesError If an error occurs when interacting with the file services. RemoteOSError If an OSError occured on the remote server. """ base_url = SPYDER_PLUGIN_NAME + "/fs" async def _raise_for_status(self, response: aiohttp.ClientResponse): if response.status not in ( HTTPStatus.INTERNAL_SERVER_ERROR, HTTPStatus.EXPECTATION_FAILED, ): return response.raise_for_status() try: data = await response.json() except json.JSONDecodeError: data = {} # If we're in a context we can rely on __aexit__() to release as the # exception propagates. if not response._in_context: response.release() if response.status == HTTPStatus.EXPECTATION_FAILED: raise RemoteOSError.from_json(data, response.url) raise RemoteFileServicesError( data.get("type", "UnknownError"), data.get("message", "Unknown error"), response.url, data.get("tracebacks", []), ) async def ls(self, path: Path, *, detail: bool = True): async with self.session.get( self.api_url / "ls", params={"path": f"file://{path}", "detail": str(detail).lower()}, ) as response: async for line in response.content: yield json.loads(line) async def info(self, path: Path): async with self.session.get( self.api_url / "info", params={"path": f"file://{path}"}, ) as response: return await response.json() async def exists(self, path: Path): async with self.session.get( self.api_url / "exists", params={"path": f"file://{path}"}, ) as response: return await response.json() async def is_file(self, path: Path): async with self.session.get( self.api_url / "isfile", params={"path": f"file://{path}"}, ) as response: return await response.json() async def is_dir(self, path: Path): async with self.session.get( self.api_url / "isdir", params={"path": f"file://{path}"}, ) as response: return await response.json() async def mkdir( self, path: Path, *, create_parents: bool = True, exist_ok: bool = False ): async with self.session.post( self.api_url / "mkdir", params={ "path": f"file://{path}", "create_parents": str(create_parents).lower(), "exist_ok": str(exist_ok).lower(), }, ) as response: return await response.json() async def rmdir(self, path: Path, non_empty: bool = False): async with self.session.delete( self.api_url / "rmdir", params={"path": f"file://{path}", "non_empty": str(non_empty).lower()}, ) as response: return await response.json() async def unlink(self, path: Path, missing_ok: bool = False): async with self.session.delete( self.api_url / "file", params={ "path": f"file://{path}", "missing_ok": str(missing_ok).lower(), }, ) as response: return await response.json() async def copy(self, path1: Path, path2: Path): async with self.session.post( self.api_url / "copy", params={"path": f"file://{path1}", "dest": f"file://{path2}"}, ) as response: return await response.json() async def copy2(self, path1: Path, path2: Path): async with self.session.post( self.api_url / "copy", params={ "path": f"file://{path1}", "dest": f"file://{path2}", "metadata": "true", }, ) as response: return await response.json() async def replace(self, path1: Path, path2: Path): async with self.session.post( self.api_url / "move", params={"path": f"file://{path1}", "dest": f"file://{path2}"}, ) as response: return await response.json() async def touch(self, path: Path, truncate: bool = True): async with self.session.post( self.api_url / "touch", params={ "path": f"file://{path}", "truncate": str(truncate).lower(), }, ) as response: return await response.json() async def open( self, path, mode="r", atomic=False, lock=False, encoding="utf-8" ): file = SpyderRemoteFileIOAPI( path, mode, atomic, lock, encoding, manager=self.manager ) await file.connect() return file async def zip_directory( self, path: Path, *, compression_level: int = 5 ): async with self.session.post( self.api_url / "zip", params={ "path": f"file://{path}", "compression": compression_level, }, ) as response: while data := await response.content.read(65536): yield data
SpyderRemoteFileServicesAPI
python
ray-project__ray
python/ray/autoscaler/v2/tests/util.py
{ "start": 4516, "end": 5725 }
class ____(Check): def __init__( self, resources: Dict[str, float], op: operator = operator.eq, enforce_all=False ): self.resources = resources self.op = op self.enforce_all = enforce_all def check(self, status: ClusterStatus): actual = status.total_resources() if self.enforce_all and len(actual) != len(self.resources): raise CheckFailure( f"Expected {len(self.resources)} resources, got {len(actual)}" ) for k, v in self.resources.items(): if k not in actual and v: raise CheckFailure(f"Expected resource {k} not found") if not self.op(v, actual.get(k, 0)): raise CheckFailure( f"Expected resource {k} {self.op} {v}, got {actual.get(k, 0)}" ) def __str__(self) -> str: return f"TotalResourceCheck({self.op}): {self.resources}" def check_cluster( targets: List[Check], ) -> bool: gcs_address = ray.get_runtime_context().gcs_address cluster_status = get_cluster_status(gcs_address) for target in targets: target.check(cluster_status) return True
TotalResourceCheck
python
kamyu104__LeetCode-Solutions
Python/adding-two-negabinary-numbers.py
{ "start": 29, "end": 587 }
class ____(object): def addNegabinary(self, arr1, arr2): """ :type arr1: List[int] :type arr2: List[int] :rtype: List[int] """ result = [] carry = 0 while arr1 or arr2 or carry: if arr1: carry += arr1.pop() if arr2: carry += arr2.pop() result.append(carry & 1) carry = -(carry >> 1) while len(result) > 1 and result[-1] == 0: result.pop() result.reverse() return result
Solution
python
huggingface__transformers
src/transformers/models/idefics2/modeling_idefics2.py
{ "start": 13499, "end": 14593 }
class ____(nn.Module): """Multihead Attention Pooling.""" def __init__(self, config: Idefics2VisionConfig): super().__init__() self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Ignore copy self.mlp = Idefics2MLP( hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, output_size=config.hidden_size, ) def forward(self, hidden_state): batch_size = hidden_state.shape[0] probe = self.probe.repeat(batch_size, 1, 1) hidden_state = self.attention(probe, hidden_state, hidden_state)[0] residual = hidden_state hidden_state = self.layernorm(hidden_state) hidden_state = residual + self.mlp(hidden_state) return hidden_state[:, 0]
Idefics2MultiheadAttentionPoolingHead
python
wandb__wandb
wandb/vendor/pygments/lexers/data.py
{ "start": 852, "end": 15672 }
class ____(ExtendedRegexLexer): """ Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization language. .. versionadded:: 0.11 """ name = 'YAML' aliases = ['yaml'] filenames = ['*.yaml', '*.yml'] mimetypes = ['text/x-yaml'] def something(token_class): """Do not produce empty tokens.""" def callback(lexer, match, context): text = match.group() if not text: return yield match.start(), token_class, text context.pos = match.end() return callback def reset_indent(token_class): """Reset the indentation levels.""" def callback(lexer, match, context): text = match.group() context.indent_stack = [] context.indent = -1 context.next_indent = 0 context.block_scalar_indent = None yield match.start(), token_class, text context.pos = match.end() return callback def save_indent(token_class, start=False): """Save a possible indentation level.""" def callback(lexer, match, context): text = match.group() extra = '' if start: context.next_indent = len(text) if context.next_indent < context.indent: while context.next_indent < context.indent: context.indent = context.indent_stack.pop() if context.next_indent > context.indent: extra = text[context.indent:] text = text[:context.indent] else: context.next_indent += len(text) if text: yield match.start(), token_class, text if extra: yield match.start()+len(text), token_class.Error, extra context.pos = match.end() return callback def set_indent(token_class, implicit=False): """Set the previously saved indentation level.""" def callback(lexer, match, context): text = match.group() if context.indent < context.next_indent: context.indent_stack.append(context.indent) context.indent = context.next_indent if not implicit: context.next_indent += len(text) yield match.start(), token_class, text context.pos = match.end() return callback def set_block_scalar_indent(token_class): """Set an explicit indentation level for a block scalar.""" def callback(lexer, match, context): text = match.group() context.block_scalar_indent = None if not text: return increment = match.group(1) if increment: current_indent = max(context.indent, 0) increment = int(increment) context.block_scalar_indent = current_indent + increment if text: yield match.start(), token_class, text context.pos = match.end() return callback def parse_block_scalar_empty_line(indent_token_class, content_token_class): """Process an empty line in a block scalar.""" def callback(lexer, match, context): text = match.group() if (context.block_scalar_indent is None or len(text) <= context.block_scalar_indent): if text: yield match.start(), indent_token_class, text else: indentation = text[:context.block_scalar_indent] content = text[context.block_scalar_indent:] yield match.start(), indent_token_class, indentation yield (match.start()+context.block_scalar_indent, content_token_class, content) context.pos = match.end() return callback def parse_block_scalar_indent(token_class): """Process indentation spaces in a block scalar.""" def callback(lexer, match, context): text = match.group() if context.block_scalar_indent is None: if len(text) <= max(context.indent, 0): context.stack.pop() context.stack.pop() return context.block_scalar_indent = len(text) else: if len(text) < context.block_scalar_indent: context.stack.pop() context.stack.pop() return if text: yield match.start(), token_class, text context.pos = match.end() return callback def parse_plain_scalar_indent(token_class): """Process indentation spaces in a plain scalar.""" def callback(lexer, match, context): text = match.group() if len(text) <= context.indent: context.stack.pop() context.stack.pop() return if text: yield match.start(), token_class, text context.pos = match.end() return callback tokens = { # the root rules 'root': [ # ignored whitespaces (r'[ ]+(?=#|$)', Text), # line breaks (r'\n+', Text), # a comment (r'#[^\n]*', Comment.Single), # the '%YAML' directive (r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'), # the %TAG directive (r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'), # document start and document end indicators (r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace), 'block-line'), # indentation spaces (r'[ ]*(?!\s|$)', save_indent(Text, start=True), ('block-line', 'indentation')), ], # trailing whitespaces after directives or a block scalar indicator 'ignored-line': [ # ignored whitespaces (r'[ ]+(?=#|$)', Text), # a comment (r'#[^\n]*', Comment.Single), # line break (r'\n', Text, '#pop:2'), ], # the %YAML directive 'yaml-directive': [ # the version number (r'([ ]+)([0-9]+\.[0-9]+)', bygroups(Text, Number), 'ignored-line'), ], # the %YAG directive 'tag-directive': [ # a tag handle and the corresponding prefix (r'([ ]+)(!|![\w-]*!)' r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)', bygroups(Text, Keyword.Type, Text, Keyword.Type), 'ignored-line'), ], # block scalar indicators and indentation spaces 'indentation': [ # trailing whitespaces are ignored (r'[ ]*$', something(Text), '#pop:2'), # whitespaces preceeding block collection indicators (r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)), # block collection indicators (r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)), # the beginning a block line (r'[ ]*', save_indent(Text), '#pop'), ], # an indented line in the block context 'block-line': [ # the line end (r'[ ]*(?=#|$)', something(Text), '#pop'), # whitespaces separating tokens (r'[ ]+', Text), # tags, anchors and aliases, include('descriptors'), # block collections and scalars include('block-nodes'), # flow collections and quoted scalars include('flow-nodes'), # a plain scalar (r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)', something(Name.Variable), 'plain-scalar-in-block-context'), ], # tags, anchors, aliases 'descriptors': [ # a full-form tag (r'!<[\w#;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type), # a tag in the form '!', '!suffix' or '!handle!suffix' (r'!(?:[\w-]+!)?' r'[\w#;/?:@&=+$,.!~*\'()\[\]%-]+', Keyword.Type), # an anchor (r'&[\w-]+', Name.Label), # an alias (r'\*[\w-]+', Name.Variable), ], # block collections and scalars 'block-nodes': [ # implicit key (r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)), # literal and folded scalars (r'[|>]', Punctuation.Indicator, ('block-scalar-content', 'block-scalar-header')), ], # flow collections and quoted scalars 'flow-nodes': [ # a flow sequence (r'\[', Punctuation.Indicator, 'flow-sequence'), # a flow mapping (r'\{', Punctuation.Indicator, 'flow-mapping'), # a single-quoted scalar (r'\'', String, 'single-quoted-scalar'), # a double-quoted scalar (r'\"', String, 'double-quoted-scalar'), ], # the content of a flow collection 'flow-collection': [ # whitespaces (r'[ ]+', Text), # line breaks (r'\n+', Text), # a comment (r'#[^\n]*', Comment.Single), # simple indicators (r'[?:,]', Punctuation.Indicator), # tags, anchors and aliases include('descriptors'), # nested collections and quoted scalars include('flow-nodes'), # a plain scalar (r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])', something(Name.Variable), 'plain-scalar-in-flow-context'), ], # a flow sequence indicated by '[' and ']' 'flow-sequence': [ # include flow collection rules include('flow-collection'), # the closing indicator (r'\]', Punctuation.Indicator, '#pop'), ], # a flow mapping indicated by '{' and '}' 'flow-mapping': [ # include flow collection rules include('flow-collection'), # the closing indicator (r'\}', Punctuation.Indicator, '#pop'), ], # block scalar lines 'block-scalar-content': [ # line break (r'\n', Text), # empty line (r'^[ ]+$', parse_block_scalar_empty_line(Text, Name.Constant)), # indentation spaces (we may leave the state here) (r'^[ ]*', parse_block_scalar_indent(Text)), # line content (r'[\S\t ]+', Name.Constant), ], # the content of a literal or folded scalar 'block-scalar-header': [ # indentation indicator followed by chomping flag (r'([1-9])?[+-]?(?=[ ]|$)', set_block_scalar_indent(Punctuation.Indicator), 'ignored-line'), # chomping flag followed by indentation indicator (r'[+-]?([1-9])?(?=[ ]|$)', set_block_scalar_indent(Punctuation.Indicator), 'ignored-line'), ], # ignored and regular whitespaces in quoted scalars 'quoted-scalar-whitespaces': [ # leading and trailing whitespaces are ignored (r'^[ ]+', Text), (r'[ ]+$', Text), # line breaks are ignored (r'\n+', Text), # other whitespaces are a part of the value (r'[ ]+', Name.Variable), ], # single-quoted scalars 'single-quoted-scalar': [ # include whitespace and line break rules include('quoted-scalar-whitespaces'), # escaping of the quote character (r'\'\'', String.Escape), # regular non-whitespace characters (r'[^\s\']+', String), # the closing quote (r'\'', String, '#pop'), ], # double-quoted scalars 'double-quoted-scalar': [ # include whitespace and line break rules include('quoted-scalar-whitespaces'), # escaping of special characters (r'\\[0abt\tn\nvfre "\\N_LP]', String), # escape codes (r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})', String.Escape), # regular non-whitespace characters (r'[^\s"\\]+', String), # the closing quote (r'"', String, '#pop'), ], # the beginning of a new line while scanning a plain scalar 'plain-scalar-in-block-context-new-line': [ # empty lines (r'^[ ]+$', Text), # line breaks (r'\n+', Text), # document start and document end indicators (r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'), # indentation spaces (we may leave the block line state here) (r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'), ], # a plain scalar in the block context 'plain-scalar-in-block-context': [ # the scalar ends with the ':' indicator (r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'), # the scalar ends with whitespaces followed by a comment (r'[ ]+(?=#)', Text, '#pop'), # trailing whitespaces are ignored (r'[ ]+$', Text), # line breaks are ignored (r'\n+', Text, 'plain-scalar-in-block-context-new-line'), # other whitespaces are a part of the value (r'[ ]+', Literal.Scalar.Plain), # regular non-whitespace characters (r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain), ], # a plain scalar is the flow context 'plain-scalar-in-flow-context': [ # the scalar ends with an indicator character (r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'), # the scalar ends with a comment (r'[ ]+(?=#)', Text, '#pop'), # leading and trailing whitespaces are ignored (r'^[ ]+', Text), (r'[ ]+$', Text), # line breaks are ignored (r'\n+', Text), # other whitespaces are a part of the value (r'[ ]+', Name.Variable), # regular non-whitespace characters (r'[^\s,:?\[\]{}]+', Name.Variable), ], } def get_tokens_unprocessed(self, text=None, context=None): if context is None: context = YamlLexerContext(text, 0) return super(YamlLexer, self).get_tokens_unprocessed(text, context)
YamlLexer
python
skorch-dev__skorch
skorch/tests/test_classifier.py
{ "start": 316, "end": 7657 }
class ____: @pytest.fixture(scope='module') def data(self, classifier_data): return classifier_data @pytest.fixture(scope='module') def dummy_callback(self): from skorch.callbacks import Callback cb = Mock(spec=Callback) # make dummy behave like an estimator cb.get_params.return_value = {} cb.set_params = lambda **kwargs: cb return cb @pytest.fixture(scope='module') def net_cls(self): from skorch import NeuralNetClassifier return NeuralNetClassifier @pytest.fixture(scope='module') def module_cls(self, classifier_module): return classifier_module @pytest.fixture(scope='module') def net(self, net_cls, module_cls, dummy_callback): return net_cls( module_cls, callbacks=[('dummy', dummy_callback)], max_epochs=10, lr=0.1, ) @pytest.fixture(scope='module') def net_fit(self, net, data): # Careful, don't call additional fits on this, since that would have # side effects on other tests. X, y = data return net.fit(X, y) def test_clone(self, net_fit): clone(net_fit) def test_predict_and_predict_proba(self, net_fit, data): X = data[0] y_proba = net_fit.predict_proba(X) assert np.allclose(y_proba.sum(1), 1, rtol=1e-5) y_pred = net_fit.predict(X) assert np.allclose(np.argmax(y_proba, 1), y_pred, rtol=1e-5) def test_score(self, net_fit, data): X, y = data accuracy = net_fit.score(X, y) assert 0. <= accuracy <= 1. # classifier-specific test def test_takes_log_with_nllloss(self, net_cls, module_cls, data): net = net_cls(module_cls, criterion=nn.NLLLoss, max_epochs=1) net.initialize() mock_loss = Mock(side_effect=nn.NLLLoss()) net.criterion_.forward = mock_loss net.partial_fit(*data) # call partial_fit to avoid re-initialization # check that loss was called with log-probabilities for (y_log, _), _ in mock_loss.call_args_list: assert (y_log < 0).all() y_proba = torch.exp(y_log) assert torch.isclose(torch.ones(len(y_proba)), y_proba.sum(1)).all() # classifier-specific test def test_takes_no_log_without_nllloss(self, net_cls, module_cls, data): net = net_cls(module_cls, criterion=nn.CrossEntropyLoss, max_epochs=1) net.initialize() mock_loss = Mock(side_effect=nn.NLLLoss()) net.criterion_.forward = mock_loss net.partial_fit(*data) # call partial_fit to avoid re-initialization # check that loss was called with raw probabilities for (y_out, _), _ in mock_loss.call_args_list: assert not (y_out < 0).all() assert torch.isclose(torch.ones(len(y_out)), y_out.sum(1)).all() # classifier-specific test def test_high_learning_rate(self, net_cls, module_cls, data): # regression test for nan loss with high learning rates issue #481 net = net_cls(module_cls, max_epochs=2, lr=2, optimizer=torch.optim.Adam) net.fit(*data) assert np.any(~np.isnan(net.history[:, 'train_loss'])) def test_binary_classes_set_by_default(self, net_cls, module_cls, data): net = net_cls(module_cls).fit(*data) assert (net.classes_ == [0, 1]).all() def test_non_binary_classes_set_by_default(self, net_cls, module_cls, data): X = data[0] y = np.arange(len(X)) % 10 net = net_cls(module_cls, max_epochs=0).fit(X, y) assert (net.classes_ == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).all() def test_classes_data_torch_tensor(self, net_cls, module_cls, data): X = torch.as_tensor(data[0]) y = torch.as_tensor(np.arange(len(X)) % 10) net = net_cls(module_cls, max_epochs=0).fit(X, y) assert (net.classes_ == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).all() def test_classes_with_gaps(self, net_cls, module_cls, data): X = data[0] y = np.arange(len(X)) % 10 y[(y == 0) | (y == 5)] = 4 # remove classes 0 and 5 net = net_cls(module_cls, max_epochs=0).fit(X, y) assert (net.classes_ == [1, 2, 3, 4, 6, 7, 8, 9]).all() def test_pass_classes_explicitly_overrides(self, net_cls, module_cls, data): net = net_cls(module_cls, max_epochs=0, classes=['foo', 'bar']).fit(*data) assert (net.classes_ == np.array(['foo', 'bar'])).all() def test_classes_are_set_with_tensordataset_explicit_y( self, net_cls, module_cls, data ): # see 990 X = torch.from_numpy(data[0]) y = torch.arange(len(X)) % 10 dataset = torch.utils.data.TensorDataset(X, y) net = net_cls(module_cls, max_epochs=0).fit(dataset, y) assert (net.classes_ == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).all() def test_classes_are_set_with_tensordataset_implicit_y( self, net_cls, module_cls, data ): # see 990 from skorch.dataset import ValidSplit X = torch.from_numpy(data[0]) y = torch.arange(len(X)) % 10 dataset = torch.utils.data.TensorDataset(X, y) net = net_cls( module_cls, max_epochs=0, train_split=ValidSplit(3, stratified=False) ).fit(dataset, None) assert (net.classes_ == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).all() @pytest.mark.parametrize('classes', [[], np.array([])]) def test_pass_empty_classes_raises( self, net_cls, module_cls, data, classes): net = net_cls( module_cls, max_epochs=0, classes=classes).fit(*data).fit(*data) with pytest.raises(AttributeError) as exc: net.classes_ # pylint: disable=pointless-statement msg = exc.value.args[0] expected = "NeuralNetClassifier has no attribute 'classes_'" assert msg == expected def test_with_calibrated_classifier_cv(self, net_fit, data): from sklearn.calibration import CalibratedClassifierCV cccv = CalibratedClassifierCV(net_fit, cv=2) cccv.fit(*data) def test_error_when_classes_could_not_be_inferred(self, net_cls, module_cls, data): # Provide a better error message when net.classes_ does not exist, # though it is pretty difficult to know exactly the circumstanes that # led to this, so we have to make a guess. # See https://github.com/skorch-dev/skorch/discussions/1003 class MyDataset(torch.utils.data.Dataset): """Dataset class that makes it impossible to access y""" def __len__(self): return len(data[0]) def __getitem__(self, i): return data[0][i], data[1][i] net = net_cls(module_cls, max_epochs=0, train_split=False) ds = MyDataset() net.fit(ds, y=None) msg = ( "NeuralNetClassifier could not infer the classes from y; " "this error probably occurred because the net was trained without y " "and some function tried to access the '.classes_' attribute; " "a possible solution is to provide the 'classes' argument when " "initializing NeuralNetClassifier" ) with pytest.raises(AttributeError, match=msg): net.classes_
TestNeuralNet
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/exc.py
{ "start": 3843, "end": 4209 }
class ____(UnmappedError): """An mapping operation was requested for an unknown class.""" def __init__(self, cls: Type[_T], msg: Optional[str] = None): if not msg: msg = _default_unmapped(cls) UnmappedError.__init__(self, msg) def __reduce__(self) -> Any: return self.__class__, (None, self.args[0])
UnmappedClassError
python
realpython__materials
python-textual/horizontal_layout.py
{ "start": 122, "end": 500 }
class ____(App): def compose(self): with Horizontal(): for i in range(NUM_BOXES): static = Static(f"Static {i + 1}") static.styles.border = ("solid", "green") static.styles.width = "10%" yield static if __name__ == "__main__": app = HorizontalLayoutApp() app.run()
HorizontalLayoutApp
python
google__pytype
pytype/tests/test_typeguard.py
{ "start": 4820, "end": 6475 }
class ____(test_base.BaseTest): """Tests for TypeGuard as a Callable return type.""" def test_callable(self): self.Check(""" from typing import Callable, TypeGuard def f(x: Callable[[object], TypeGuard[int]], y: object): if x(y): assert_type(y, int) """) def test_generic(self): self.Check(""" from typing import Callable, TypeGuard, TypeVar T = TypeVar('T') def f(x: Callable[[T | None], TypeGuard[T]], y: int | None): if x(y): assert_type(y, int) """) def test_invalid(self): self.CheckWithErrors(""" from typing import Any, Callable, List, TypeGuard x1: Callable[[], TypeGuard[int]] # invalid-annotation x2: Callable[[TypeGuard[int]], Any] # invalid-annotation x3: Callable[[object], List[TypeGuard[int]]] # invalid-annotation x4: Callable[[object], TypeGuard] # invalid-annotation """) def test_pyi(self): with self.DepTree([( "foo.pyi", """ from typing import Callable, TypeGuard f: Callable[[object], TypeGuard[int]] """, )]): self.Check(""" import foo def f(x: object): if foo.f(x): assert_type(x, int) """) def test_non_variable(self): errors = self.CheckWithErrors(""" from typing import Callable, TypeGuard f: Callable[[object], TypeGuard[int]] def g(x: dict[str, object]): print(f(x['k'])) # not-supported-yet[e] """) self.assertErrorSequences( errors, {"e": "TypeGuard with an arbitrary expression"} ) @test_utils.skipBeforePy((3, 10), "New in 3.10")
CallableTest
python
kubernetes-client__python
kubernetes/client/models/v1_overhead.py
{ "start": 383, "end": 3586 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'pod_fixed': 'dict(str, str)' } attribute_map = { 'pod_fixed': 'podFixed' } def __init__(self, pod_fixed=None, local_vars_configuration=None): # noqa: E501 """V1Overhead - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._pod_fixed = None self.discriminator = None if pod_fixed is not None: self.pod_fixed = pod_fixed @property def pod_fixed(self): """Gets the pod_fixed of this V1Overhead. # noqa: E501 podFixed represents the fixed resource overhead associated with running a pod. # noqa: E501 :return: The pod_fixed of this V1Overhead. # noqa: E501 :rtype: dict(str, str) """ return self._pod_fixed @pod_fixed.setter def pod_fixed(self, pod_fixed): """Sets the pod_fixed of this V1Overhead. podFixed represents the fixed resource overhead associated with running a pod. # noqa: E501 :param pod_fixed: The pod_fixed of this V1Overhead. # noqa: E501 :type: dict(str, str) """ self._pod_fixed = pod_fixed def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1Overhead): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1Overhead): return True return self.to_dict() != other.to_dict()
V1Overhead
python
huggingface__transformers
src/transformers/models/idefics/processing_idefics.py
{ "start": 1090, "end": 1223 }
class ____(TextKwargs, total=False): add_eos_token: Optional[bool] add_end_of_utterance_token: Optional[bool]
IdeficsTextKwargs
python
pikepdf__pikepdf
src/pikepdf/models/metadata.py
{ "start": 8153, "end": 8890 }
class ____(Converter): """Convert XMP document authors to DocumentInfo.""" @staticmethod def xmp_from_docinfo(docinfo_val: str | None) -> Any: # type: ignore """Derive XMP authors info from DocumentInfo.""" return [docinfo_val] @staticmethod def docinfo_from_xmp(xmp_val): """Derive DocumentInfo authors from XMP. XMP supports multiple author values, while DocumentInfo has a string, so we return the values separated by semi-colons. """ if isinstance(xmp_val, str): return xmp_val if xmp_val is None or xmp_val == [None]: return None return '; '.join(author for author in xmp_val if author is not None)
AuthorConverter
python
pypa__warehouse
tests/unit/helpdesk/test_services.py
{ "start": 576, "end": 1485 }
class ____: """Common tests for the service interface.""" def test_verify_service_class(self, service_class): assert verifyClass(IHelpDeskService, service_class) @responses.activate def test_create_service(self, service_class): responses.add( responses.POST, "https://api.helpscout.net/v2/oauth2/token", json={"access_token": "NOT_REAL_TOKEN"}, ) context = None request = pretend.stub( http=requests.Session(), registry=pretend.stub( settings={ "helpscout.app_id": "an insecure helpscout app id", "helpscout.app_secret": "an insecure helpscout app secret", }, ), ) service = service_class.create_service(context, request) assert isinstance(service, service_class)
TestHelpDeskService
python
pallets__jinja
tests/test_lexnparse.py
{ "start": 1139, "end": 5989 }
class ____: def test_raw1(self, env): tmpl = env.from_string( "{% raw %}foo{% endraw %}|{%raw%}{{ bar }}|{% baz %}{% endraw %}" ) assert tmpl.render() == "foo|{{ bar }}|{% baz %}" def test_raw2(self, env): tmpl = env.from_string("1 {%- raw -%} 2 {%- endraw -%} 3") assert tmpl.render() == "123" def test_raw3(self, env): # The second newline after baz exists because it is AFTER the # {% raw %} and is ignored. env = Environment(lstrip_blocks=True, trim_blocks=True) tmpl = env.from_string("bar\n{% raw %}\n {{baz}}2 spaces\n{% endraw %}\nfoo") assert tmpl.render(baz="test") == "bar\n\n {{baz}}2 spaces\nfoo" def test_raw4(self, env): # The trailing dash of the {% raw -%} cleans both the spaces and # newlines up to the first character of data. env = Environment(lstrip_blocks=True, trim_blocks=False) tmpl = env.from_string( "bar\n{%- raw -%}\n\n \n 2 spaces\n space{%- endraw -%}\nfoo" ) assert tmpl.render() == "bar2 spaces\n spacefoo" def test_balancing(self, env): env = Environment("{%", "%}", "${", "}") tmpl = env.from_string( """{% for item in seq %}${{'foo': item}|upper}{% endfor %}""" ) assert tmpl.render(seq=list(range(3))) == "{'FOO': 0}{'FOO': 1}{'FOO': 2}" def test_comments(self, env): env = Environment("<!--", "-->", "{", "}") tmpl = env.from_string( """\ <ul> <!--- for item in seq --> <li>{item}</li> <!--- endfor --> </ul>""" ) assert tmpl.render(seq=list(range(3))) == ( "<ul>\n <li>0</li>\n <li>1</li>\n <li>2</li>\n</ul>" ) def test_string_escapes(self, env): for char in "\0", "\u2668", "\xe4", "\t", "\r", "\n": tmpl = env.from_string(f"{{{{ {char!r} }}}}") assert tmpl.render() == char assert env.from_string('{{ "\N{HOT SPRINGS}" }}').render() == "\u2668" def test_bytefallback(self, env): from pprint import pformat tmpl = env.from_string("""{{ 'foo'|pprint }}|{{ 'bär'|pprint }}""") assert tmpl.render() == pformat("foo") + "|" + pformat("bär") def test_operators(self, env): from jinja2.lexer import operators for test, expect in operators.items(): if test in "([{}])": continue stream = env.lexer.tokenize(f"{{{{ {test} }}}}") next(stream) assert stream.current.type == expect def test_normalizing(self, env): for seq in "\r", "\r\n", "\n": env = Environment(newline_sequence=seq) tmpl = env.from_string("1\n2\r\n3\n4\n") result = tmpl.render() assert result.replace(seq, "X") == "1X2X3X4" def test_trailing_newline(self, env): for keep in [True, False]: env = Environment(keep_trailing_newline=keep) for template, expected in [ ("", {}), ("no\nnewline", {}), ("with\nnewline\n", {False: "with\nnewline"}), ("with\nseveral\n\n\n", {False: "with\nseveral\n\n"}), ]: tmpl = env.from_string(template) expect = expected.get(keep, template) result = tmpl.render() assert result == expect, (keep, template, result, expect) @pytest.mark.parametrize( ("name", "valid"), [ ("foo", True), ("föö", True), ("き", True), ("_", True), ("1a", False), # invalid ascii start ("a-", False), # invalid ascii continue ("\U0001f40da", False), # invalid unicode start ("a🐍\U0001f40d", False), # invalid unicode continue # start characters not matched by \w ("\u1885", True), ("\u1886", True), ("\u2118", True), ("\u212e", True), # continue character not matched by \w ("\xb7", False), ("a\xb7", True), ], ) def test_name(self, env, name, valid): t = "{{ " + name + " }}" if valid: # valid for version being tested, shouldn't raise env.from_string(t) else: pytest.raises(TemplateSyntaxError, env.from_string, t) def test_lineno_with_strip(self, env): tokens = env.lex( """\ <html> <body> {%- block content -%} <hr> {{ item }} {% endblock %} </body> </html>""" ) for tok in tokens: lineno, token_type, value = tok if token_type == "name" and value == "item": assert lineno == 5 break
TestLexer
python
django__django
tests/delete_regress/models.py
{ "start": 1360, "end": 1438 }
class ____(Contact): email_address = models.EmailField(max_length=100)
Email
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 198295, "end": 201503 }
class ____(sgqlc.types.Input): """Autogenerated input type of CreateSponsorsListing""" __schema__ = github_schema __field_names__ = ( "sponsorable_login", "fiscal_host_login", "fiscally_hosted_project_profile_url", "billing_country_or_region_code", "residence_country_or_region_code", "contact_email", "full_description", "client_mutation_id", ) sponsorable_login = sgqlc.types.Field(String, graphql_name="sponsorableLogin") """The username of the organization to create a GitHub Sponsors profile for, if desired. Defaults to creating a GitHub Sponsors profile for the authenticated user if omitted. """ fiscal_host_login = sgqlc.types.Field(String, graphql_name="fiscalHostLogin") """The username of the supported fiscal host's GitHub organization, if you want to receive sponsorship payouts through a fiscal host rather than directly to a bank account. For example, 'Open-Source- Collective' for Open Source Collective or 'numfocus' for numFOCUS. Case insensitive. See https://docs.github.com/sponsors/receiving- sponsorships-through-github-sponsors/using-a-fiscal-host-to- receive-github-sponsors-payouts for more information. """ fiscally_hosted_project_profile_url = sgqlc.types.Field(String, graphql_name="fiscallyHostedProjectProfileUrl") """The URL for your profile page on the fiscal host's website, e.g., https://opencollective.com/babel or https://numfocus.org/project/bokeh. Required if fiscalHostLogin is specified. """ billing_country_or_region_code = sgqlc.types.Field(SponsorsCountryOrRegionCode, graphql_name="billingCountryOrRegionCode") """The country or region where the sponsorable's bank account is located. Required if fiscalHostLogin is not specified, ignored when fiscalHostLogin is specified. """ residence_country_or_region_code = sgqlc.types.Field(SponsorsCountryOrRegionCode, graphql_name="residenceCountryOrRegionCode") """The country or region where the sponsorable resides. This is for tax purposes. Required if the sponsorable is yourself, ignored when sponsorableLogin specifies an organization. """ contact_email = sgqlc.types.Field(String, graphql_name="contactEmail") """The email address we should use to contact you about the GitHub Sponsors profile being created. This will not be shared publicly. Must be a verified email address already on your GitHub account. Only relevant when the sponsorable is yourself. Defaults to your primary email address on file if omitted. """ full_description = sgqlc.types.Field(String, graphql_name="fullDescription") """Provide an introduction to serve as the main focus that appears on your GitHub Sponsors profile. It's a great opportunity to help potential sponsors learn more about you, your work, and why their sponsorship is important to you. GitHub-flavored Markdown is supported. """ client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
CreateSponsorsListingInput
python
Pylons__pyramid
tests/test_config/pkgs/scannable/__init__.py
{ "start": 1662, "end": 1778 }
class ____: @view_config(name='basemethod', renderer=null_renderer) def basemethod(self): """ """
Base
python
tensorflow__tensorflow
tensorflow/python/autograph/tests/loop_control_flow_test.py
{ "start": 3700, "end": 7416 }
class ____(reference_test_base.TestCase, parameterized.TestCase): @parameterized.parameters(*itertools.product( ( [], [1], [1, 2], [1, 2, 3], [1, 2, 3, 4], ), ( list, _int_tensor, _int_dataset, ), ( continue_in_single_for, break_in_single_for, unconditional_return_in_single_for, effectively_unconditional_return_in_single_for, ), )) def test_single_for(self, l, type_, target): if ((type_ is _int_dataset) and (target in (unconditional_return_in_single_for, effectively_unconditional_return_in_single_for))): # TODO(mdan): Enable in a separate improvement. self.skipTest('Creating symbols in dataset loops.') if ((not l) and ((target in (unconditional_return_in_single_for, effectively_unconditional_return_in_single_for)))): self.skipTest('Undefined symbols require at least one iteration.') l = type_(l) self.assertFunctionMatchesEager(target, l) @parameterized.parameters(*itertools.product( ( 0, 1, 2, 3, 4, ), ( int, _int_tensor, ), ( continue_in_single_while, break_in_single_while, multiple_breaks_in_single_while, break_followed_by_cond_in_single_while, unconditional_return_in_single_while, effectively_unconditional_return_in_single_while, ), )) def test_single_while(self, n, type_, target): if ((not n) and ((target in (unconditional_return_in_single_while, effectively_unconditional_return_in_single_while)))): self.skipTest('Undefined symbols require at least one iteration.') n = type_(n) self.assertFunctionMatchesEager(target, n) @parameterized.parameters( (unconditional_return_in_single_for, _int_tensor, []), (effectively_unconditional_return_in_single_for, _int_tensor, []), (unconditional_return_in_single_while, _int_tensor, 0), (effectively_unconditional_return_in_single_while, _int_tensor, 0), ) def test_single_loop_illegal_return(self, target, type_, l): with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'must iterate at least once to initialize'): tf.function(target)(type_(l)) @parameterized.parameters(*itertools.product( ( [[], []], [[1], [2]], [[1, 2], [3, 4]], [[1, 2, 3], [4, 5, 6]], # TODO(mdan): Add ragged tensors / variable-shape datasets. ), ( list, _int_tensor, _list_of_int_tensor, _int_dataset, ), ( continue_in_inner_for, break_in_inner_for, break_continue_in_inner_for, ), )) def test_nested_for(self, a, type_, target): a = type_(a) self.assertFunctionMatchesEager(target, a) @parameterized.parameters(*itertools.product( ( 0, 1, 2, 3, 4, ), ( 0, 1, 2, 3, 4, ), ( int, _int_tensor, ), ( int, _int_tensor, ), ( continue_in_inner_while, break_in_inner_while, break_continue_in_inner_while, ), )) def test_nested_while(self, m, n, m_type, n_type, target): m = m_type(m) n = m_type(n) self.assertFunctionMatchesEager(target, m, n) if __name__ == '__main__': tf.test.main()
LoopControlFlowTest
python
ipython__ipython
tests/test_dir2.py
{ "start": 53, "end": 1446 }
class ____(object): x = 1 z = 23 def test_base(): res = dir2(Base()) assert "x" in res assert "z" in res assert "y" not in res assert "__class__" in res assert res.count("x") == 1 assert res.count("__class__") == 1 def test_SubClass(): class SubClass(Base): y = 2 res = dir2(SubClass()) assert "y" in res assert res.count("y") == 1 assert res.count("x") == 1 def test_SubClass_with_trait_names_attr(): # usecase: trait_names is used in a class describing psychological classification class SubClass(Base): y = 2 trait_names = 44 res = dir2(SubClass()) assert "trait_names" in res def test_misbehaving_object_without_trait_names(): # dir2 shouldn't raise even when objects are dumb and raise # something other than AttribteErrors on bad getattr. class MisbehavingGetattr: def __getattr__(self, attr): raise KeyError("I should be caught") def some_method(self): return True class SillierWithDir(MisbehavingGetattr): def __dir__(self): return ["some_method"] for bad_klass in (MisbehavingGetattr, SillierWithDir): obj = bad_klass() assert obj.some_method() with pytest.raises(KeyError): obj.other_method() res = dir2(obj) assert "some_method" in res
Base
python
dask__dask
dask/dataframe/dask_expr/_groupby.py
{ "start": 17833, "end": 17929 }
class ____(SingleAggregation): groupby_chunk = M.idxmin groupby_aggregate = M.first
IdxMin
python
boto__boto3
boto3/docs/resource.py
{ "start": 1261, "end": 14259 }
class ____(BaseDocumenter): def __init__(self, resource, botocore_session, root_docs_path): super().__init__(resource) self._botocore_session = botocore_session self._root_docs_path = root_docs_path self._resource_sub_path = self._resource_name.lower() if self._resource_name == self._service_name: self._resource_sub_path = 'service-resource' def document_resource(self, section): self._add_title(section) self._add_resource_note(section) self._add_intro(section) self._add_identifiers(section) self._add_attributes(section) self._add_references(section) self._add_actions(section) self._add_sub_resources(section) self._add_collections(section) self._add_waiters(section) def _add_title(self, section): title_section = section.add_new_section('title') title_section.style.h2(self._resource_name) def _add_intro(self, section): identifier_names = [] if self._resource_model.identifiers: for identifier in self._resource_model.identifiers: identifier_names.append(identifier.name) # Write out the class signature. class_args = get_identifier_args_for_signature(identifier_names) start_class = section.add_new_section('start_class') start_class.style.start_sphinx_py_class( class_name=f'{self.class_name}({class_args})' ) # Add as short description about the resource description_section = start_class.add_new_section('description') self._add_description(description_section) # Add an example of how to instantiate the resource example_section = start_class.add_new_section('example') self._add_example(example_section, identifier_names) # Add the description for the parameters to instantiate the # resource. param_section = start_class.add_new_section('params') self._add_params_description(param_section, identifier_names) end_class = section.add_new_section('end_class') end_class.style.end_sphinx_py_class() def _add_description(self, section): official_service_name = get_official_service_name(self._service_model) section.write( f'A resource representing an {official_service_name} {self._resource_name}' ) def _add_example(self, section, identifier_names): section.style.start_codeblock() section.style.new_line() section.write('import boto3') section.style.new_line() section.style.new_line() section.write( f'{self._service_name} = boto3.resource(\'{self._service_name}\')' ) section.style.new_line() example_values = get_identifier_values_for_example(identifier_names) section.write( f'{xform_name(self._resource_name)} = {self._service_name}.{self._resource_name}({example_values})' ) section.style.end_codeblock() def _add_params_description(self, section, identifier_names): for identifier_name in identifier_names: description = get_identifier_description( self._resource_name, identifier_name ) section.write(f':type {identifier_name}: string') section.style.new_line() section.write(f':param {identifier_name}: {description}') section.style.new_line() def _add_overview_of_member_type(self, section, resource_member_type): section.style.new_line() section.write( f'These are the resource\'s available {resource_member_type}:' ) section.style.new_line() section.style.toctree() for member in self.member_map[resource_member_type]: section.style.tocitem(f'{member}') def _add_identifiers(self, section): identifiers = self._resource.meta.resource_model.identifiers section = section.add_new_section('identifiers') member_list = [] if identifiers: self.member_map['identifiers'] = member_list add_resource_type_overview( section=section, resource_type='Identifiers', description=( 'Identifiers are properties of a resource that are ' 'set upon instantiation of the resource.' ), intro_link='identifiers_attributes_intro', ) for identifier in identifiers: member_list.append(identifier.name) # Create a new DocumentStructure for each identifier and add contents. identifier_doc = DocumentStructure(identifier.name, target='html') breadcrumb_section = identifier_doc.add_new_section('breadcrumb') breadcrumb_section.style.ref(self._resource_class_name, 'index') breadcrumb_section.write(f' / Identifier / {identifier.name}') identifier_doc.add_title_section(identifier.name) identifier_section = identifier_doc.add_new_section( identifier.name, context={'qualifier': f'{self.class_name}.'}, ) document_identifier( section=identifier_section, resource_name=self._resource_name, identifier_model=identifier, ) # Write identifiers in individual/nested files. # Path: <root>/reference/services/<service>/<resource_name>/<identifier_name>.rst identifiers_dir_path = os.path.join( self._root_docs_path, f'{self._service_name}', f'{self._resource_sub_path}', ) identifier_doc.write_to_file(identifiers_dir_path, identifier.name) if identifiers: self._add_overview_of_member_type(section, 'identifiers') def _add_attributes(self, section): service_model = self._resource.meta.client.meta.service_model attributes = {} if self._resource.meta.resource_model.shape: shape = service_model.shape_for( self._resource.meta.resource_model.shape ) attributes = self._resource.meta.resource_model.get_attributes( shape ) section = section.add_new_section('attributes') attribute_list = [] if attributes: add_resource_type_overview( section=section, resource_type='Attributes', description=( 'Attributes provide access' ' to the properties of a resource. Attributes are lazy-' 'loaded the first time one is accessed via the' ' :py:meth:`load` method.' ), intro_link='identifiers_attributes_intro', ) self.member_map['attributes'] = attribute_list for attr_name in sorted(attributes): _, attr_shape = attributes[attr_name] attribute_list.append(attr_name) # Create a new DocumentStructure for each attribute and add contents. attribute_doc = DocumentStructure(attr_name, target='html') breadcrumb_section = attribute_doc.add_new_section('breadcrumb') breadcrumb_section.style.ref(self._resource_class_name, 'index') breadcrumb_section.write(f' / Attribute / {attr_name}') attribute_doc.add_title_section(attr_name) attribute_section = attribute_doc.add_new_section( attr_name, context={'qualifier': f'{self.class_name}.'}, ) document_attribute( section=attribute_section, service_name=self._service_name, resource_name=self._resource_name, attr_name=attr_name, event_emitter=self._resource.meta.client.meta.events, attr_model=attr_shape, ) # Write attributes in individual/nested files. # Path: <root>/reference/services/<service>/<resource_name>/<attribute_name>.rst attributes_dir_path = os.path.join( self._root_docs_path, f'{self._service_name}', f'{self._resource_sub_path}', ) attribute_doc.write_to_file(attributes_dir_path, attr_name) if attributes: self._add_overview_of_member_type(section, 'attributes') def _add_references(self, section): section = section.add_new_section('references') references = self._resource.meta.resource_model.references reference_list = [] if references: add_resource_type_overview( section=section, resource_type='References', description=( 'References are related resource instances that have ' 'a belongs-to relationship.' ), intro_link='references_intro', ) self.member_map['references'] = reference_list for reference in references: reference_list.append(reference.name) # Create a new DocumentStructure for each reference and add contents. reference_doc = DocumentStructure(reference.name, target='html') breadcrumb_section = reference_doc.add_new_section('breadcrumb') breadcrumb_section.style.ref(self._resource_class_name, 'index') breadcrumb_section.write(f' / Reference / {reference.name}') reference_doc.add_title_section(reference.name) reference_section = reference_doc.add_new_section( reference.name, context={'qualifier': f'{self.class_name}.'}, ) document_reference( section=reference_section, reference_model=reference, ) # Write references in individual/nested files. # Path: <root>/reference/services/<service>/<resource_name>/<reference_name>.rst references_dir_path = os.path.join( self._root_docs_path, f'{self._service_name}', f'{self._resource_sub_path}', ) reference_doc.write_to_file(references_dir_path, reference.name) if references: self._add_overview_of_member_type(section, 'references') def _add_actions(self, section): section = section.add_new_section('actions') actions = self._resource.meta.resource_model.actions if actions: documenter = ActionDocumenter(self._resource, self._root_docs_path) documenter.member_map = self.member_map documenter.document_actions(section) self._add_overview_of_member_type(section, 'actions') def _add_sub_resources(self, section): section = section.add_new_section('sub-resources') sub_resources = self._resource.meta.resource_model.subresources if sub_resources: documenter = SubResourceDocumenter( self._resource, self._root_docs_path ) documenter.member_map = self.member_map documenter.document_sub_resources(section) self._add_overview_of_member_type(section, 'sub-resources') def _add_collections(self, section): section = section.add_new_section('collections') collections = self._resource.meta.resource_model.collections if collections: documenter = CollectionDocumenter( self._resource, self._root_docs_path ) documenter.member_map = self.member_map documenter.document_collections(section) self._add_overview_of_member_type(section, 'collections') def _add_waiters(self, section): section = section.add_new_section('waiters') waiters = self._resource.meta.resource_model.waiters if waiters: service_waiter_model = self._botocore_session.get_waiter_model( self._service_name ) documenter = WaiterResourceDocumenter( self._resource, service_waiter_model, self._root_docs_path ) documenter.member_map = self.member_map documenter.document_resource_waiters(section) self._add_overview_of_member_type(section, 'waiters') def _add_resource_note(self, section): section = section.add_new_section('feature-freeze') section.style.start_note() section.write( "Before using anything on this page, please refer to the resources " ":doc:`user guide <../../../../guide/resources>` for the most recent " "guidance on using resources." ) section.style.end_note()
ResourceDocumenter
python
pydata__xarray
xarray/coding/variables.py
{ "start": 2082, "end": 8417 }
class ____(indexing.ExplicitlyIndexedNDArrayMixin): """Decode arrays on the fly from integer to boolean datatype This is useful for decoding boolean arrays from integer typed netCDF variables. >>> x = np.array([1, 0, 1, 1, 0], dtype="i1") >>> x.dtype dtype('int8') >>> BoolTypeArray(x).dtype dtype('bool') >>> indexer = indexing.BasicIndexer((slice(None),)) >>> BoolTypeArray(x)[indexer].dtype dtype('bool') """ __slots__ = ("array",) def __init__(self, array) -> None: self.array = indexing.as_indexable(array) @property def dtype(self) -> np.dtype: return np.dtype("bool") def _oindex_get(self, key): return type(self)(self.array.oindex[key]) def _vindex_get(self, key): return type(self)(self.array.vindex[key]) def __getitem__(self, key) -> Self: return type(self)(self.array[key]) def get_duck_array(self): return duck_array_ops.astype(self.array.get_duck_array(), dtype=self.dtype) def transpose(self, order): return type(self)(self.array.transpose(order)) def _apply_mask( data: np.ndarray, encoded_fill_values: list, decoded_fill_value: Any, dtype: np.typing.DTypeLike | None, ) -> np.ndarray: """Mask all matching values in a NumPy arrays.""" data = np.asarray(data, dtype=dtype) condition = False for fv in encoded_fill_values: condition |= data == fv return np.where(condition, decoded_fill_value, data) def _is_time_like(units): # test for time-like # return "datetime" for datetime-like # return "timedelta" for timedelta-like if units is None: return False time_strings = [ "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "nanoseconds", ] units = str(units) # to prevent detecting units like `days accumulated` as time-like # special casing for datetime-units and timedelta-units (GH-8269) if "since" in units: from xarray.coding.times import _unpack_netcdf_time_units try: _unpack_netcdf_time_units(units) except ValueError: return False return "datetime" else: return "timedelta" if any(tstr == units for tstr in time_strings) else False def _check_fill_values(attrs, name, dtype): """Check _FillValue and missing_value if available. Return dictionary with raw fill values and set with encoded fill values. Issue SerializationWarning if appropriate. """ raw_fill_dict = {} for attr in ("missing_value", "_FillValue"): pop_to(attrs, raw_fill_dict, attr, name=name) encoded_fill_values = set() for k in list(raw_fill_dict): v = raw_fill_dict[k] kfill = {fv for fv in np.ravel(v) if not pd.isnull(fv)} if not kfill and np.issubdtype(dtype, np.integer): warnings.warn( f"variable {name!r} has non-conforming {k!r} " f"{v!r} defined, dropping {k!r} entirely.", SerializationWarning, stacklevel=3, ) del raw_fill_dict[k] else: encoded_fill_values |= kfill if len(encoded_fill_values) > 1: warnings.warn( f"variable {name!r} has multiple fill values " f"{encoded_fill_values} defined, decoding all values to NaN.", SerializationWarning, stacklevel=3, ) return raw_fill_dict, encoded_fill_values def _convert_unsigned_fill_value( name: T_Name, data: Any, unsigned: str, raw_fill_value: Any, encoded_fill_values: set, ) -> Any: if data.dtype.kind == "i": if unsigned == "true": unsigned_dtype = np.dtype(f"u{data.dtype.itemsize}") transform = partial(np.asarray, dtype=unsigned_dtype) if raw_fill_value is not None: new_fill = np.array(raw_fill_value, dtype=data.dtype) encoded_fill_values.remove(raw_fill_value) # use view here to prevent OverflowError encoded_fill_values.add(new_fill.view(unsigned_dtype).item()) data = lazy_elemwise_func(data, transform, unsigned_dtype) elif data.dtype.kind == "u": if unsigned == "false": signed_dtype = np.dtype(f"i{data.dtype.itemsize}") transform = partial(np.asarray, dtype=signed_dtype) data = lazy_elemwise_func(data, transform, signed_dtype) if raw_fill_value is not None: new_fill = signed_dtype.type(raw_fill_value) encoded_fill_values.remove(raw_fill_value) encoded_fill_values.add(new_fill) else: warnings.warn( f"variable {name!r} has _Unsigned attribute but is not " "of integer type. Ignoring attribute.", SerializationWarning, stacklevel=3, ) return data def _encode_unsigned_fill_value( name: T_Name, fill_value: Any, encoded_dtype: np.dtype, ) -> Any: try: if hasattr(fill_value, "item"): # if numpy type, convert to python native integer to determine overflow # otherwise numpy unsigned ints will silently cast to the signed counterpart fill_value = fill_value.item() # passes if provided fill value fits in encoded on-disk type new_fill = encoded_dtype.type(fill_value) except OverflowError: encoded_kind_str = "signed" if encoded_dtype.kind == "i" else "unsigned" warnings.warn( f"variable {name!r} will be stored as {encoded_kind_str} integers " f"but _FillValue attribute can't be represented as a " f"{encoded_kind_str} integer.", SerializationWarning, stacklevel=3, ) # user probably provided the fill as the in-memory dtype, # convert to on-disk type to match CF standard orig_kind = "u" if encoded_dtype.kind == "i" else "i" orig_dtype = np.dtype(f"{orig_kind}{encoded_dtype.itemsize}") # use view here to prevent OverflowError new_fill = np.array(fill_value, dtype=orig_dtype).view(encoded_dtype).item() return new_fill
BoolTypeArray
python
getsentry__sentry
tests/sentry/explore/translation/test_discover_translation.py
{ "start": 371, "end": 15512 }
class ____(TestCase): def create_discover_query(self, name: str, query: dict, explore_query=None): discover_saved_query = DiscoverSavedQuery.objects.create( organization=self.org, created_by_id=self.user.id, name=name, version=2, query=query, date_created=before_now(minutes=10), date_updated=before_now(minutes=10), visits=1, last_visited=before_now(minutes=5), dataset=DiscoverSavedQueryTypes.TRANSACTION_LIKE, explore_query=explore_query, ) discover_saved_query.set_projects([self.project1.id, self.project2.id]) return discover_saved_query def setUp(self): super().setUp() self.user = self.create_user() self.org = self.create_organization(owner=self.user) self.project1 = self.create_project(organization=self.org) self.project2 = self.create_project(organization=self.org) self.env = self.create_environment(project=self.project1) self.existing_explore_query = { "query": "is_transaction:1", "range": "14d", "aggregateField": [ {"groupBy": "id"}, {"groupBy": "title"}, {"groupBy": "timestamp"}, {"yAxes": ["count()"], "chartType": 2}, ], "fields": ["id", "title", "timestamp"], "orderby": "-timestamp", "display": "default", "environment": [], } self.existing_explore_query_saved_query = ExploreSavedQuery.objects.create( id=12345, organization=self.org, created_by_id=self.user.id, name="Existing explore query", query=self.existing_explore_query, ) def test_translate_simple_discover_to_explore_query(self): self.simple_query = { "query": "event.type:transaction", "range": "14d", "yAxis": ["count()"], "fields": ["id", "title", "timestamp"], "orderby": "-timestamp", "display": "default", "environment": [], } self.simple_saved_query = self.create_discover_query("Simple query", self.simple_query) new_explore_query = translate_discover_query_to_explore_query(self.simple_saved_query) assert new_explore_query.organization == self.org assert new_explore_query.created_by_id == self.user.id assert new_explore_query.name == "Simple query" assert new_explore_query.dataset == ExploreSavedQueryDataset.SEGMENT_SPANS assert new_explore_query.is_multi_query is False assert new_explore_query.organization == self.org assert new_explore_query.created_by_id == self.user.id base_query = new_explore_query.query assert base_query["environment"] == [] assert base_query["range"] == "14d" query = base_query["query"][0] assert query["fields"] == ["id", "transaction", "timestamp"] assert query["query"] == "(is_transaction:1) AND is_transaction:1" assert query["mode"] == "samples" assert query["aggregateField"] == [ {"yAxes": ["count(span.duration)"], "chartType": 2}, ] assert query["aggregateOrderby"] is None assert query["orderby"] == "-timestamp" def test_translate_multiple_axis_discover_to_explore_query(self): self.multiple_axis_query = { "query": "", "range": "14d", "yAxis": ["count()", "percentile(transaction.duration,0.45)"], "fields": ["id", "title", "timestamp", "percentile(transaction.duration,0.45)"], "orderby": "-timestamp", "display": "default", "environment": [], } self.multiple_axis_saved_query = self.create_discover_query( "Multiple axis query", self.multiple_axis_query ) new_explore_query = translate_discover_query_to_explore_query( self.multiple_axis_saved_query ) assert new_explore_query.name == "Multiple axis query" query = new_explore_query.query["query"][0] assert query["fields"] == ["id", "transaction", "timestamp"] assert query["query"] == "is_transaction:1" assert query["mode"] == "samples" assert query["aggregateField"] == [ {"yAxes": ["p50(span.duration)"], "chartType": 2}, {"yAxes": ["count(span.duration)"], "chartType": 2}, ] assert query["aggregateOrderby"] is None assert query["orderby"] == "-timestamp" def test_translate_function_orderby_discover_to_explore_query(self): self.function_orderby_query = { "query": "", "range": "14d", "yAxis": ["count()", "percentile(transaction.duration,0.45)"], "fields": ["id", "title", "timestamp", "percentile(transaction.duration,0.45)"], "orderby": "-percentile_transaction_duration_0_45", "display": "default", "environment": [], } self.function_orderby_saved_query = self.create_discover_query( "Function orderby query", self.function_orderby_query ) new_explore_query = translate_discover_query_to_explore_query( self.function_orderby_saved_query ) assert new_explore_query.name == "Function orderby query" query = new_explore_query.query["query"][0] assert query["fields"] == ["id", "transaction", "timestamp"] assert query["query"] == "is_transaction:1" assert query["mode"] == "samples" assert query["aggregateField"] == [ {"yAxes": ["p50(span.duration)"], "chartType": 2}, {"yAxes": ["count(span.duration)"], "chartType": 2}, ] assert query["aggregateOrderby"] == "-p50(span.duration)" assert query["orderby"] is None def test_translate_filter_swap_discover_to_explore_query(self): self.filter_swap_query = { "query": "geo.country_code:CA AND geo.city:Toronto", "range": "14d", "yAxis": ["count()"], "fields": ["id", "timestamp"], "orderby": "-timestamp", "display": "bar", "environment": [], } self.filter_swap_saved_query = self.create_discover_query( "Filter swap query", self.filter_swap_query ) new_explore_query = translate_discover_query_to_explore_query(self.filter_swap_saved_query) assert new_explore_query.name == "Filter swap query" query = new_explore_query.query["query"][0] assert query["fields"] == ["id", "timestamp"] assert ( query["query"] == "(user.geo.country_code:CA AND user.geo.city:Toronto) AND is_transaction:1" ) assert query["mode"] == "samples" assert query["aggregateField"] == [ {"yAxes": ["count(span.duration)"], "chartType": 0}, ] assert query["aggregateOrderby"] is None assert query["orderby"] == "-timestamp" def test_translate_drop_swap_function_field_orderby_filter_discover_to_explore_query(self): self.drop_swap_function_field_orderby_filter_query = { "query": "platform.name:python AND count_miserable(users):>100", "range": "14d", "yAxis": ["apdex()", "count_miserable(users)", "max(measurements.cls)"], "fields": [ "id", "title", "http.url", "total.count", "apdex()", "count_miserable(users)", "max(measurements.cls)", "any(transaction.duration)", ], "orderby": "-count_miserable_users", "display": "top5", "environment": [], } self.drop_swap_function_field_orderby_filter_saved_query = self.create_discover_query( "Query with lots of drops+swaps", self.drop_swap_function_field_orderby_filter_query ) new_explore_query = translate_discover_query_to_explore_query( self.drop_swap_function_field_orderby_filter_saved_query ) assert new_explore_query.name == "Query with lots of drops+swaps" assert new_explore_query.changed_reason is not None assert new_explore_query.changed_reason["columns"] == [ "total.count", "count_miserable(users)", "any(transaction.duration)", ] assert new_explore_query.changed_reason["equations"] == [] assert new_explore_query.changed_reason["orderby"] == [ { "orderby": "-count_miserable(users)", "reason": ["count_miserable(users)"], } ] query = new_explore_query.query["query"][0] assert query["fields"] == ["id", "transaction", "request.url"] assert ( query["query"] == "(platform:python AND count_miserable(users):>100) AND is_transaction:1" ) assert query["mode"] == "aggregate" assert query["aggregateField"] == [ {"groupBy": "transaction"}, {"groupBy": "request.url"}, {"yAxes": ["equation|apdex(span.duration,300)"], "chartType": 2}, {"yAxes": ["max(measurements.cls)"], "chartType": 2}, ] assert query["aggregateOrderby"] is None assert query["orderby"] is None def test_translate_non_default_display_discover_to_explore_query(self): self.non_default_display_query = { "query": "", "range": "14d", "yAxis": ["count()"], "fields": ["id", "timestamp"], "orderby": "-timestamp", "display": "daily", "environment": [], } self.non_default_display_saved_query = self.create_discover_query( "Non default display query", self.non_default_display_query ) new_explore_query = translate_discover_query_to_explore_query( self.non_default_display_saved_query ) assert new_explore_query.name == "Non default display query" assert new_explore_query.query["interval"] == "1d" query = new_explore_query.query["query"][0] assert query["fields"] == ["id", "timestamp"] assert query["query"] == "is_transaction:1" assert query["mode"] == "samples" assert query["aggregateField"] == [ {"yAxes": ["count(span.duration)"], "chartType": 0}, ] assert query["aggregateOrderby"] is None assert query["orderby"] == "-timestamp" def test_translate_start_end_time_discover_to_explore_query(self): self.start_end_time_query = { "query": "", "start": "2025-01-01", "end": "2025-01-20", "yAxis": ["count()"], "fields": ["id", "timestamp"], "orderby": "-timestamp", "display": "default", "environment": [self.env.name], } self.start_end_time_saved_query = self.create_discover_query( "Start end time query", self.start_end_time_query ) new_explore_query = translate_discover_query_to_explore_query( self.start_end_time_saved_query ) assert new_explore_query.name == "Start end time query" assert new_explore_query.query["start"] == "2025-01-01" assert new_explore_query.query["end"] == "2025-01-20" assert new_explore_query.query["environment"] == [self.env.name] def test_translate_equation_indexed_orderby_discover_to_explore_query(self): self.equation_indexed_orderby_query = { "query": "", "range": "14d", "yAxis": ["count()"], "fields": ["id", "timestamp", "equation|count() + 5"], "orderby": "-equation[0]", "display": "default", "environment": [], } self.equation_indexed_orderby_saved_query = self.create_discover_query( "Equation indexed orderby query", self.equation_indexed_orderby_query ) new_explore_query = translate_discover_query_to_explore_query( self.equation_indexed_orderby_saved_query ) assert new_explore_query.name == "Equation indexed orderby query" query = new_explore_query.query["query"][0] assert query["aggregateOrderby"] == "-equation|count(span.duration) + 5" assert query["orderby"] is None def test_translate_discover_query_to_explore_query_with_existing_explore_query(self): self.existing_explore_discover_query = { "query": "event.type:transaction", "range": "14d", "yAxis": ["count()"], "fields": ["id", "title", "timestamp"], "orderby": "-timestamp", "display": "default", "environment": [], } self.existing_explore_discover_query_saved_query = self.create_discover_query( "Existing explore query", self.existing_explore_discover_query, self.existing_explore_query_saved_query, ) new_explore_query = translate_discover_query_to_explore_query( self.existing_explore_discover_query_saved_query ) assert new_explore_query.name == "Existing explore query" assert new_explore_query.id == self.existing_explore_query_saved_query.id query = new_explore_query.query["query"][0] assert query["fields"] == ["id", "transaction", "timestamp"] assert query["query"] == "(is_transaction:1) AND is_transaction:1" assert query["aggregateField"] == [ {"yAxes": ["count(span.duration)"], "chartType": 2}, ] assert query["aggregateOrderby"] is None assert query["orderby"] == "-timestamp" def test_translate_dicover_query_with_count_web_vitals_orderby(self): self.count_web_vitals_query = { "query": "", "range": "14d", "yAxis": ["count_web_vitals(measurements.lcp,good)"], "fields": ["title", "project", "timestamp", "count_web_vitals(measurements.lcp,good)"], "orderby": "-count_web_vitals_measurements_lcp_good", "display": "default", } self.count_web_vitals_saved_query = self.create_discover_query( "Count web vitals query", self.count_web_vitals_query ) new_explore_query = translate_discover_query_to_explore_query( self.count_web_vitals_saved_query ) assert new_explore_query.name == "Count web vitals query" query = new_explore_query.query["query"][0] assert query["fields"] == ["id", "transaction", "project", "timestamp"] assert query["query"] == "is_transaction:1" assert query["mode"] == "samples" assert query["aggregateOrderby"] is None assert query["orderby"] is None
DiscoverToExploreTranslationTest
python
pydantic__pydantic
tests/mypy/modules/plugin_success.py
{ "start": 1630, "end": 1696 }
class ____(BaseModel, frozen=True): x: int
KwargsNoMutationModel
python
numpy__numpy
numpy/_core/tests/test_numeric.py
{ "start": 83869, "end": 87568 }
class ____: @pytest.mark.parametrize( "bx,by,equal_nan,expected", _test_array_equal_parametrizations() ) def test_array_equal_equal_nan(self, bx, by, equal_nan, expected): """ This test array_equal for a few combinations: - are the two inputs the same object or not (same object may not be equal if contains NaNs) - Whether we should consider or not, NaNs, being equal. """ if equal_nan is None: res = np.array_equal(bx, by) else: res = np.array_equal(bx, by, equal_nan=equal_nan) assert_(res is expected) assert_(type(res) is bool) def test_array_equal_different_scalar_types(self): # https://github.com/numpy/numpy/issues/27271 a = np.array("foo") b = np.array(1) assert not np.array_equal(a, b) assert not np.array_equiv(a, b) def test_none_compares_elementwise(self): a = np.array([None, 1, None], dtype=object) assert_equal(a == None, [True, False, True]) # noqa: E711 assert_equal(a != None, [False, True, False]) # noqa: E711 a = np.ones(3) assert_equal(a == None, [False, False, False]) # noqa: E711 assert_equal(a != None, [True, True, True]) # noqa: E711 def test_array_equiv(self): res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) assert_(res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3])) assert_(not res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([3, 4])) assert_(not res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([1, 3])) assert_(not res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 1]), np.array([1])) assert_(res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]])) assert_(res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([2])) assert_(not res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) assert_(not res) assert_(type(res) is bool) res = np.array_equiv( np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ) assert_(not res) assert_(type(res) is bool) @pytest.mark.parametrize("dtype", ["V0", "V3", "V10"]) def test_compare_unstructured_voids(self, dtype): zeros = np.zeros(3, dtype=dtype) assert_array_equal(zeros, zeros) assert not (zeros != zeros).any() if dtype == "V0": # Can't test != of actually different data return nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype) assert not (zeros == nonzeros).any() assert (zeros != nonzeros).all() def assert_array_strict_equal(x, y): assert_array_equal(x, y) # Check flags, 32 bit arches typically don't provide 16 byte alignment if ((x.dtype.alignment <= 8 or np.intp().dtype.itemsize != 4) and sys.platform != 'win32'): assert_(x.flags == y.flags) else: assert_(x.flags.owndata == y.flags.owndata) assert_(x.flags.writeable == y.flags.writeable) assert_(x.flags.c_contiguous == y.flags.c_contiguous) assert_(x.flags.f_contiguous == y.flags.f_contiguous) assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) # check endianness assert_(x.dtype.isnative == y.dtype.isnative)
TestArrayComparisons
python
huggingface__transformers
src/transformers/models/dpr/modeling_dpr.py
{ "start": 1236, "end": 1992 }
class ____(ModelOutput): r""" pooler_output (`torch.FloatTensor` of shape `(batch_size, embeddings_size)`): The DPR encoder outputs the *pooler_output* that corresponds to the context representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed contexts for nearest neighbors queries with questions embeddings. """ pooler_output: torch.FloatTensor hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Class for outputs of [`DPRQuestionEncoder`]. """ )
DPRContextEncoderOutput
python
gevent__gevent
src/gevent/tests/test__select.py
{ "start": 3067, "end": 3738 }
class ____(greentest.TestCase): def test_int(self): sock = socket.socket() try: select.select([int(sock.fileno())], [], [], 0.001) finally: sock.close() def test_iterable(self): sock = socket.socket() def fileno_iter(): yield int(sock.fileno()) try: select.select(fileno_iter(), [], [], 0.001) finally: sock.close() def test_string(self): self.switch_expected = False self.assertRaises(TypeError, select.select, ['hello'], [], [], 0.001) @greentest.skipOnWindows("Things like os.close don't work on Windows")
TestSelectTypes
python
viewflow__viewflow
viewflow/views/base.py
{ "start": 732, "end": 1265 }
class ____(object): """ Mixin for FormView to infer View.fields definition from form Layout. """ form_class: Any = None @viewprop def layout(self): if self.form_class is not None and hasattr(self.form_class, "layout"): return self.form_class.layout @viewprop def fields(self) -> Any: if self.form_class is None: if self.layout is not None: return _collect_elements(self.layout) else: return "__all__"
FormLayoutMixin
python
langchain-ai__langchain
libs/langchain/langchain_classic/agents/structured_chat/output_parser.py
{ "start": 589, "end": 2201 }
class ____(AgentOutputParser): """Output parser for the structured chat agent.""" format_instructions: str = FORMAT_INSTRUCTIONS """Default formatting instructions""" pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL) """Regex pattern to parse the output.""" @override def get_format_instructions(self) -> str: """Returns formatting instructions for the given output parser.""" return self.format_instructions @override def parse(self, text: str) -> AgentAction | AgentFinish: try: action_match = self.pattern.search(text) if action_match is not None: response = json.loads(action_match.group(1).strip(), strict=False) if isinstance(response, list): # gpt turbo frequently ignores the directive to emit a single action logger.warning("Got multiple action responses: %s", response) response = response[0] if response["action"] == "Final Answer": return AgentFinish({"output": response["action_input"]}, text) return AgentAction( response["action"], response.get("action_input", {}), text, ) return AgentFinish({"output": text}, text) except Exception as e: msg = f"Could not parse LLM output: {text}" raise OutputParserException(msg) from e @property def _type(self) -> str: return "structured_chat"
StructuredChatOutputParser
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 41309, "end": 41482 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ("NUMBER",)
TeamDiscussionCommentOrderField
python
dagster-io__dagster
python_modules/automation/automation_tests/dagster_docs_tests/test_path_converters.py
{ "start": 213, "end": 2393 }
class ____: def test_core_dagster_module(self): root = Path("/dagster") file_path = root / "python_modules" / "dagster" / "dagster" / "core" / "executor.py" result = dagster_path_converter(file_path, root) assert result == "dagster.core.executor" def test_core_dagster_init_module(self): root = Path("/dagster") file_path = root / "python_modules" / "dagster" / "dagster" / "core" / "__init__.py" result = dagster_path_converter(file_path, root) assert result == "dagster.core" def test_library_module(self): root = Path("/dagster") file_path = root / "python_modules" / "libraries" / "dagster-aws" / "dagster_aws" / "s3.py" result = dagster_path_converter(file_path, root) assert result == "dagster_aws.dagster_aws.s3" def test_library_init_module(self): root = Path("/dagster") file_path = ( root / "python_modules" / "libraries" / "dagster-snowflake" / "dagster_snowflake" / "__init__.py" ) result = dagster_path_converter(file_path, root) assert result == "dagster_snowflake.dagster_snowflake" def test_non_python_modules_path(self): root = Path("/dagster") file_path = root / "docs" / "content" / "example.py" result = dagster_path_converter(file_path, root) assert result is None def test_invalid_library_structure(self): root = Path("/dagster") file_path = root / "python_modules" / "libraries" / "incomplete.py" result = dagster_path_converter(file_path, root) assert result == "incomplete" def test_unknown_python_modules_structure(self): root = Path("/dagster") file_path = root / "python_modules" / "unknown" / "module.py" result = dagster_path_converter(file_path, root) assert result is None def test_file_outside_root(self): root = Path("/dagster") file_path = Path("/other") / "module.py" result = dagster_path_converter(file_path, root) assert result is None
TestDagsterPathConverter
python
run-llama__llama_index
llama-index-integrations/tools/llama-index-tools-scrapegraph/tests/test_integration.py
{ "start": 364, "end": 638 }
class ____(BaseModel): """Test schema for integration testing.""" title: str = Field(description="Page title") content: str = Field(description="Main content") links: List[str] = Field(description="Important links", default_factory=list)
IntegrationTestSchema