language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/beta_server_tool_usage.py
{ "start": 157, "end": 352 }
class ____(BaseModel): web_fetch_requests: int """The number of web fetch tool requests.""" web_search_requests: int """The number of web search tool requests."""
BetaServerToolUsage
python
django__django
tests/aggregation/tests.py
{ "start": 1097, "end": 1455 }
class ____(Now): template = "CURRENT_TIMESTAMP" output_field = DateTimeField() def as_sql(self, compiler, connection, **extra_context): if connection.features.test_now_utc_template: extra_context["template"] = connection.features.test_now_utc_template return super().as_sql(compiler, connection, **extra_context)
NowUTC
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/tests/common/test_connection.py
{ "start": 413, "end": 976 }
class ____(BaseModel): """A minimal mock cursor base model used for testing DB interactions. Attributes: broken (bool): If True, simulates a broken cursor that fails queries. last_query (str | sql.SQL | None): Stores the last executed query for inspection. response (dict | None): Value to return from fetchone() when appropriate. """ broken: bool = False last_query: str | sql.SQL | None = None response: dict | None = None model_config = ConfigDict( arbitrary_types_allowed=True, )
MockCursorBase
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/kwargsUnpack1.py
{ "start": 2072, "end": 2151 }
class ____(Protocol): def __call__(self, *, v1: int) -> None: ...
TDProtocol4
python
marshmallow-code__apispec
tests/test_core.py
{ "start": 6798, "end": 24569 }
class ____(RefsSchemaTestMixin): properties = { "id": {"type": "integer", "format": "int64"}, "name": {"type": "string", "example": "doggie"}, } def test_schema(self, spec): spec.components.schema("Pet", {"properties": self.properties}) schemas = get_schemas(spec) assert "Pet" in schemas assert schemas["Pet"]["properties"] == self.properties def test_schema_is_chainable(self, spec): spec.components.schema("Pet", {"properties": {}}).schema( "Plant", {"properties": {}} ) schemas = get_schemas(spec) assert "Pet" in schemas assert "Plant" in schemas def test_schema_description(self, spec): model_description = "An animal which lives with humans." spec.components.schema( "Pet", {"properties": self.properties, "description": model_description} ) schemas = get_schemas(spec) assert schemas["Pet"]["description"] == model_description def test_schema_stores_enum(self, spec): enum = ["name", "photoUrls"] spec.components.schema("Pet", {"properties": self.properties, "enum": enum}) schemas = get_schemas(spec) assert schemas["Pet"]["enum"] == enum def test_schema_discriminator(self, spec): spec.components.schema( "Pet", {"properties": self.properties, "discriminator": "name"} ) schemas = get_schemas(spec) assert schemas["Pet"]["discriminator"] == "name" def test_schema_duplicate_name(self, spec): spec.components.schema("Pet", {"properties": self.properties}) with pytest.raises( DuplicateComponentNameError, match='Another schema with name "Pet" is already registered.', ): spec.components.schema("Pet", properties=self.properties) def test_response(self, spec): response = {"description": "Pet not found"} spec.components.response("NotFound", response) responses = get_responses(spec) assert responses["NotFound"] == response def test_response_is_chainable(self, spec): spec.components.response("resp1").response("resp2") responses = get_responses(spec) assert "resp1" in responses assert "resp2" in responses def test_response_duplicate_name(self, spec): spec.components.response("test_response") with pytest.raises( DuplicateComponentNameError, match='Another response with name "test_response" is already registered.', ): spec.components.response("test_response") def test_parameter(self, spec): # Note: this is an OpenAPI v2 parameter header # but is does the job for the test even for OpenAPI v3 parameter = {"format": "int64", "type": "integer"} spec.components.parameter("PetId", "path", parameter) params = get_parameters(spec) assert params["PetId"] == { "format": "int64", "type": "integer", "in": "path", "name": "PetId", "required": True, } def test_parameter_is_chainable(self, spec): spec.components.parameter("param1", "path").parameter("param2", "path") params = get_parameters(spec) assert "param1" in params assert "param2" in params def test_parameter_duplicate_name(self, spec): spec.components.parameter("test_parameter", "path") with pytest.raises( DuplicateComponentNameError, match='Another parameter with name "test_parameter" is already registered.', ): spec.components.parameter("test_parameter", "path") # Referenced headers are only supported in OAS 3.x @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_header(self, spec): header = {"schema": {"type": "string"}} spec.components.header("test_header", header.copy()) headers = get_headers(spec) assert headers["test_header"] == header # Referenced headers are only supported in OAS 3.x @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_header_is_chainable(self, spec): header = {"schema": {"type": "string"}} spec.components.header("header1", header).header("header2", header) headers = get_headers(spec) assert "header1" in headers assert "header2" in headers # Referenced headers are only supported in OAS 3.x @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_header_duplicate_name(self, spec): spec.components.header("test_header", {"schema": {"type": "string"}}) with pytest.raises( DuplicateComponentNameError, match='Another header with name "test_header" is already registered.', ): spec.components.header("test_header", {"schema": {"type": "integer"}}) # Referenced examples are only supported in OAS 3.x @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_example(self, spec): spec.components.example("test_example", {"value": {"a": "b"}}) examples = get_examples(spec) assert examples["test_example"]["value"] == {"a": "b"} @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_example_is_chainable(self, spec): spec.components.example("test_example_1", {}).example("test_example_2", {}) examples = get_examples(spec) assert "test_example_1" in examples assert "test_example_2" in examples @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_example_duplicate_name(self, spec): spec.components.example("test_example", {}) with pytest.raises( DuplicateComponentNameError, match='Another example with name "test_example" is already registered.', ): spec.components.example("test_example", {}) def test_security_scheme(self, spec): sec_scheme = {"type": "apiKey", "in": "header", "name": "X-API-Key"} spec.components.security_scheme("ApiKeyAuth", sec_scheme) assert get_security_schemes(spec)["ApiKeyAuth"] == sec_scheme def test_security_scheme_is_chainable(self, spec): spec.components.security_scheme("sec_1", {}).security_scheme("sec_2", {}) security_schemes = get_security_schemes(spec) assert "sec_1" in security_schemes assert "sec_2" in security_schemes def test_security_scheme_duplicate_name(self, spec): sec_scheme_1 = {"type": "apiKey", "in": "header", "name": "X-API-Key"} sec_scheme_2 = {"type": "apiKey", "in": "header", "name": "X-API-Key-2"} spec.components.security_scheme("ApiKeyAuth", sec_scheme_1) with pytest.raises( DuplicateComponentNameError, match='Another security scheme with name "ApiKeyAuth" is already registered.', ): spec.components.security_scheme("ApiKeyAuth", sec_scheme_2) def test_to_yaml(self, spec): enum = ["name", "photoUrls"] spec.components.schema("Pet", properties=self.properties, enum=enum) assert spec.to_dict() == yaml.safe_load(spec.to_yaml()) def test_components_can_be_accessed_by_plugin_in_init_spec(self): class TestPlugin(BasePlugin): def init_spec(self, spec): spec.components.schema( "TestSchema", {"properties": {"key": {"type": "string"}}, "type": "object"}, ) spec = APISpec( "Test API", version="0.0.1", openapi_version="2.0", plugins=[TestPlugin()] ) assert get_schemas(spec) == { "TestSchema": {"properties": {"key": {"type": "string"}}, "type": "object"} } def test_components_resolve_refs_in_schema(self, spec): spec.components.schema("refs_schema", copy.deepcopy(self.REFS_SCHEMA)) self.assert_schema_refs(spec, get_schemas(spec)["refs_schema"]) def test_components_resolve_response_schema(self, spec): schema = {"schema": "PetSchema"} if spec.openapi_version.major >= 3: schema = {"content": {"application/json": schema}} spec.components.response("Response", schema) resp = get_responses(spec)["Response"] if spec.openapi_version.major < 3: schema = resp["schema"] else: schema = resp["content"]["application/json"]["schema"] assert schema == build_ref(spec, "schema", "PetSchema") # "headers" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_components_resolve_response_header(self, spec): response = {"headers": {"header_1": "Header_1"}} spec.components.response("Response", response) resp = get_responses(spec)["Response"] header_1 = resp["headers"]["header_1"] assert header_1 == build_ref(spec, "header", "Header_1") # "headers" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_components_resolve_response_header_schema(self, spec): response = {"headers": {"header_1": {"name": "Pet", "schema": "PetSchema"}}} spec.components.response("Response", response) resp = get_responses(spec)["Response"] header_1 = resp["headers"]["header_1"] assert header_1["schema"] == build_ref(spec, "schema", "PetSchema") # "headers" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_components_resolve_response_header_examples(self, spec): response = { "headers": { "header_1": {"name": "Pet", "examples": {"example_1": "Example_1"}} } } spec.components.response("Response", response) resp = get_responses(spec)["Response"] header_1 = resp["headers"]["header_1"] assert header_1["examples"]["example_1"] == build_ref( spec, "example", "Example_1" ) # "examples" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_components_resolve_response_examples(self, spec): response = { "content": {"application/json": {"examples": {"example_1": "Example_1"}}} } spec.components.response("Response", response) resp = get_responses(spec)["Response"] example_1 = resp["content"]["application/json"]["examples"]["example_1"] assert example_1 == build_ref(spec, "example", "Example_1") def test_components_resolve_refs_in_response_schema(self, spec): schema = copy.deepcopy(self.REFS_SCHEMA) if spec.openapi_version.major >= 3: response = {"content": {"application/json": {"schema": schema}}} else: response = {"schema": schema} spec.components.response("Response", response) resp = get_responses(spec)["Response"] if spec.openapi_version.major < 3: schema = resp["schema"] else: schema = resp["content"]["application/json"]["schema"] self.assert_schema_refs(spec, schema) # "headers" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_components_resolve_refs_in_response_header_schema(self, spec): header = {"schema": copy.deepcopy(self.REFS_SCHEMA)} response = {"headers": {"header": header}} spec.components.response("Response", response) resp = get_responses(spec)["Response"] self.assert_schema_refs(spec, resp["headers"]["header"]["schema"]) # "examples" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_components_resolve_parameter_examples(self, spec): parameter = { "examples": {"example_1": "Example_1"}, } spec.components.parameter("param", "path", parameter) param = get_parameters(spec)["param"] example_1 = param["examples"]["example_1"] assert example_1 == build_ref(spec, "example", "Example_1") def test_components_resolve_parameter_schemas(self, spec): parameter = {"schema": "PetSchema"} spec.components.parameter("param", "path", parameter) param = get_parameters(spec)["param"] assert param["schema"] == build_ref(spec, "schema", "PetSchema") @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_components_resolve_parameter_schemas_v3(self, spec): parameter = {"content": {"application/json": {"schema": "PetSchema"}}} spec.components.parameter("param", "path", parameter) param = get_parameters(spec)["param"] schema = param["content"]["application/json"]["schema"] assert schema == build_ref(spec, "schema", "PetSchema") def test_components_resolve_refs_in_parameter_schema(self, spec): parameter = {"schema": copy.deepcopy(self.REFS_SCHEMA)} spec.components.parameter("param", "path", parameter) self.assert_schema_refs(spec, get_parameters(spec)["param"]["schema"]) # "headers" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_components_resolve_header_schema(self, spec): header = {"name": "Pet", "schema": "PetSchema"} spec.components.header("header", header) header = get_headers(spec)["header"] assert header["schema"] == build_ref(spec, "schema", "PetSchema") # "headers" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_components_resolve_header_examples(self, spec): header = {"name": "Pet", "examples": {"example_1": "Example_1"}} spec.components.header("header", header) header = get_headers(spec)["header"] assert header["examples"]["example_1"] == build_ref( spec, "example", "Example_1" ) # "headers" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_components_resolve_refs_in_header_schema(self, spec): header = {"schema": copy.deepcopy(self.REFS_SCHEMA)} spec.components.header("header", header) self.assert_schema_refs(spec, get_headers(spec)["header"]["schema"]) def test_schema_lazy(self, spec): spec.components.schema("Pet_1", {"properties": self.properties}, lazy=False) spec.components.schema("Pet_2", {"properties": self.properties}, lazy=True) schemas = get_schemas(spec) assert "Pet_1" in schemas assert "Pet_2" not in schemas spec.components.schema("PetFriend", {"oneOf": ["Pet_1", "Pet_2"]}) schemas = get_schemas(spec) assert "Pet_2" in schemas assert schemas["Pet_2"]["properties"] == self.properties def test_response_lazy(self, spec): response_1 = {"description": "Response 1"} response_2 = {"description": "Response 2"} spec.components.response("Response_1", response_1, lazy=False) spec.components.response("Response_2", response_2, lazy=True) responses = get_responses(spec) assert "Response_1" in responses assert "Response_2" not in responses spec.path("/path", operations={"get": {"responses": {"200": "Response_2"}}}) responses = get_responses(spec) assert "Response_2" in responses def test_parameter_lazy(self, spec): parameter = {"format": "int64", "type": "integer"} spec.components.parameter("Param_1", "path", parameter, lazy=False) spec.components.parameter("Param_2", "path", parameter, lazy=True) params = get_parameters(spec) assert "Param_1" in params assert "Param_2" not in params spec.path("/path", operations={"get": {"parameters": ["Param_1", "Param_2"]}}) assert "Param_2" in params # Referenced headers are only supported in OAS 3.x @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_header_lazy(self, spec): header = {"schema": {"type": "string"}} spec.components.header("Header_1", header, lazy=False) spec.components.header("Header_2", header, lazy=True) headers = get_headers(spec) assert "Header_1" in headers assert "Header_2" not in headers spec.path( "/path", operations={ "get": {"responses": {"200": {"headers": {"header_2": "Header_2"}}}} }, ) assert "Header_2" in headers # Referenced examples are only supported in OAS 3.x @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_example_lazy(self, spec): spec.components.example("Example_1", {"value": {"a": "b"}}, lazy=False) spec.components.example("Example_2", {"value": {"a": "b"}}, lazy=True) examples = get_examples(spec) assert "Example_1" in examples assert "Example_2" not in examples spec.path( "/path", operations={ "get": { "responses": { "200": { "content": { "application/json": { "examples": {"example_2": "Example_2"} } } } } } }, ) assert "Example_2" in examples
TestComponents
python
huggingface__transformers
src/transformers/models/perceiver/modeling_perceiver.py
{ "start": 37214, "end": 42407 }
class ____(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1} self.num_labels = config.num_labels self.perceiver = PerceiverModel( config, input_preprocessor=PerceiverTextPreprocessor(config), decoder=PerceiverClassificationDecoder( config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True, ), ) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, input_ids: Optional[torch.Tensor] = None, ) -> Union[tuple, PerceiverClassifierOutput]: r""" inputs (`torch.FloatTensor`): Inputs to the perceiver. Can be anything: images, text, audio, video, etc. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Examples: ```python >>> from transformers import AutoTokenizer, PerceiverForSequenceClassification >>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver") >>> model = PerceiverForSequenceClassification.from_pretrained("deepmind/language-perceiver") >>> text = "hello world" >>> inputs = tokenizer(text, return_tensors="pt").input_ids >>> outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 2] ```""" if inputs is not None and input_ids is not None: raise ValueError("You cannot use both `inputs` and `input_ids`") elif inputs is None and input_ids is not None: inputs = input_ids return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @auto_docstring( custom_intro=""" Example use of Perceiver for image classification, for tasks such as ImageNet. This model uses learned position embeddings. In other words, this model is not given any privileged information about the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet. [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with `prep_type="conv1x1"`) to preprocess the input images, and [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of [`PerceiverModel`] into classification logits. """ )
PerceiverForSequenceClassification
python
getsentry__sentry
src/sentry/workflow_engine/endpoints/validators/base/data_condition.py
{ "start": 748, "end": 1382 }
class ____( CamelSnakeSerializer, Generic[ComparisonType, ConditionResult], ): id = serializers.IntegerField(required=False) type = serializers.ChoiceField(choices=[(t.value, t.value) for t in Condition]) comparison = serializers.JSONField(required=True) condition_result = serializers.JSONField(required=True) condition_group_id = serializers.IntegerField(required=False) @abstractmethod def validate_comparison(self, value: Any) -> ComparisonType: pass @abstractmethod def validate_condition_result(self, value: Any) -> ConditionResult: pass
AbstractDataConditionValidator
python
tensorflow__tensorflow
tensorflow/python/keras/layers/convolutional.py
{ "start": 2060, "end": 15820 }
class ____(Layer): """Abstract N-D convolution layer (private, used as implementation base). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Note: layer attributes cannot be modified after the layer has been called once (except the `trainable` attribute). Args: rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). Could be "None", eg in the case of depth wise convolution. kernel_size: An integer or tuple/list of n integers, specifying the length of the convolution window. strides: An integer or tuple/list of n integers, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding with zeros evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. `"causal"` results in causal (dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, ...)`. Note: `channels_first` is only available on GPUs. dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. groups: A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with `filters / groups` filters. The output is the concatenation of all the `groups` results along the channel axis. Input channels and `filters` must both be divisible by `groups`. activation: Activation function to use. If you don't specify anything, no activation is applied. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. If None, the default initializer (glorot_uniform) will be used. bias_initializer: An initializer for the bias vector. If None, the default initializer (zeros) will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. """ def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, groups=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, conv_op=None, **kwargs): super(Conv, self).__init__( trainable=trainable, name=name, activity_regularizer=regularizers.get(activity_regularizer), **kwargs) self.rank = rank if isinstance(filters, float): filters = int(filters) if filters is not None and filters < 0: raise ValueError(f'Received a negative value for `filters`.' f'Was expecting a positive value, got {filters}.') self.filters = filters self.groups = groups or 1 self.kernel_size = conv_utils.normalize_tuple( kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(min_ndim=self.rank + 2) self._validate_init() self._is_causal = self.padding == 'causal' self._channels_first = self.data_format == 'channels_first' self._tf_data_format = conv_utils.convert_data_format( self.data_format, self.rank + 2) def _validate_init(self): if self.filters is not None and self.filters % self.groups != 0: raise ValueError( 'The number of filters must be evenly divisible by the number of ' 'groups. Received: groups={}, filters={}'.format( self.groups, self.filters)) if not all(self.kernel_size): raise ValueError('The argument `kernel_size` cannot contain 0(s). ' 'Received: %s' % (self.kernel_size,)) if not all(self.strides): raise ValueError('The argument `strides` cannot contains 0(s). ' 'Received: %s' % (self.strides,)) if (self.padding == 'causal' and not isinstance(self, (Conv1D, SeparableConv1D))): raise ValueError('Causal padding is only supported for `Conv1D`' 'and `SeparableConv1D`.') def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) input_channel = self._get_input_channel(input_shape) if input_channel % self.groups != 0: raise ValueError( 'The number of input channels must be evenly divisible by the number ' 'of groups. Received groups={}, but the input has {} channels ' '(full input shape is {}).'.format(self.groups, input_channel, input_shape)) kernel_shape = self.kernel_size + (input_channel // self.groups, self.filters) self.kernel = self.add_weight( name='kernel', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True, dtype=self.dtype) if self.use_bias: self.bias = self.add_weight( name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype) else: self.bias = None channel_axis = self._get_channel_axis() self.input_spec = InputSpec(min_ndim=self.rank + 2, axes={channel_axis: input_channel}) # Convert Keras formats to TF native formats. if self.padding == 'causal': tf_padding = 'VALID' # Causal padding handled in `call`. elif isinstance(self.padding, str): tf_padding = self.padding.upper() else: tf_padding = self.padding tf_dilations = list(self.dilation_rate) tf_strides = list(self.strides) tf_op_name = self.__class__.__name__ if tf_op_name == 'Conv1D': tf_op_name = 'conv1d' # Backwards compat. self._convolution_op = functools.partial( nn_ops.convolution_v2, strides=tf_strides, padding=tf_padding, dilations=tf_dilations, data_format=self._tf_data_format, name=tf_op_name) self.built = True def call(self, inputs): input_shape = inputs.shape if self._is_causal: # Apply causal padding to inputs for Conv1D. inputs = array_ops.pad(inputs, self._compute_causal_padding(inputs)) outputs = self._convolution_op(inputs, self.kernel) if self.use_bias: output_rank = outputs.shape.rank if self.rank == 1 and self._channels_first: # nn.bias_add does not accept a 1D input tensor. bias = array_ops.reshape(self.bias, (1, self.filters, 1)) outputs += bias else: # Handle multiple batch dimensions. if output_rank is not None and output_rank > 2 + self.rank: def _apply_fn(o): return nn.bias_add(o, self.bias, data_format=self._tf_data_format) outputs = conv_utils.squeeze_batch_dims( outputs, _apply_fn, inner_rank=self.rank + 1) else: outputs = nn.bias_add( outputs, self.bias, data_format=self._tf_data_format) if not context.executing_eagerly(): # Infer the static output shape: out_shape = self.compute_output_shape(input_shape) outputs.set_shape(out_shape) if self.activation is not None: return self.activation(outputs) return outputs def _spatial_output_shape(self, spatial_input_shape): return [ conv_utils.conv_output_length( length, self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) for i, length in enumerate(spatial_input_shape) ] def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() batch_rank = len(input_shape) - self.rank - 1 if self.data_format == 'channels_last': return tensor_shape.TensorShape( input_shape[:batch_rank] + self._spatial_output_shape(input_shape[batch_rank:-1]) + [self.filters]) else: return tensor_shape.TensorShape( input_shape[:batch_rank] + [self.filters] + self._spatial_output_shape(input_shape[batch_rank + 1:])) def _recreate_conv_op(self, inputs): # pylint: disable=unused-argument return False def get_config(self): config = { 'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'groups': self.groups, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint) } base_config = super(Conv, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _compute_causal_padding(self, inputs): """Calculates padding for 'causal' option for 1-d conv layers.""" left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1) if getattr(inputs.shape, 'ndims', None) is None: batch_rank = 1 else: batch_rank = len(inputs.shape) - 2 if self.data_format == 'channels_last': causal_padding = [[0, 0]] * batch_rank + [[left_pad, 0], [0, 0]] else: causal_padding = [[0, 0]] * batch_rank + [[0, 0], [left_pad, 0]] return causal_padding def _get_channel_axis(self): if self.data_format == 'channels_first': return -1 - self.rank else: return -1 def _get_input_channel(self, input_shape): channel_axis = self._get_channel_axis() if input_shape.dims[channel_axis].value is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') return int(input_shape[channel_axis]) def _get_padding_op(self): if self.padding == 'causal': op_padding = 'valid' else: op_padding = self.padding if not isinstance(op_padding, (list, tuple)): op_padding = op_padding.upper() return op_padding
Conv
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_protect04.py
{ "start": 315, "end": 1098 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("protect04.xlsx") def test_create_file(self): """Test the a simple XlsxWriter file with worksheet protection.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() unlocked = workbook.add_format({"locked": 0, "hidden": 0}) hidden = workbook.add_format({"locked": 0, "hidden": 1}) worksheet.protect() worksheet.unprotect_range("A1") worksheet.write("A1", 1) worksheet.write("A2", 2, unlocked) worksheet.write("A3", 3, hidden) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
networkx__networkx
networkx/algorithms/link_analysis/tests/test_pagerank.py
{ "start": 6233, "end": 7282 }
class ____(TestPageRank): def test_scipy_pagerank(self): G = self.G p = _pagerank_scipy(G, alpha=0.9, tol=1.0e-08) for n in G: assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) personalize = {n: random.random() for n in G} p = _pagerank_scipy(G, alpha=0.9, tol=1.0e-08, personalization=personalize) nstart = {n: random.random() for n in G} p = _pagerank_scipy(G, alpha=0.9, tol=1.0e-08, nstart=nstart) for n in G: assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) def test_scipy_pagerank_max_iter(self): with pytest.raises(nx.PowerIterationFailedConvergence): _pagerank_scipy(self.G, max_iter=0) def test_dangling_scipy_pagerank(self): pr = _pagerank_scipy(self.G, dangling=self.dangling_edges) for n in self.G: assert pr[n] == pytest.approx(self.G.dangling_pagerank[n], abs=1e-4) def test_empty_scipy(self): G = nx.Graph() assert _pagerank_scipy(G) == {}
TestPageRankScipy
python
Lightning-AI__lightning
examples/pytorch/domain_templates/computer_vision_fine_tuning.py
{ "start": 6046, "end": 9565 }
class ____(LightningModule): def __init__( self, backbone: str = "resnet50", train_bn: bool = False, milestones: tuple = (2, 4), batch_size: int = 32, lr: float = 1e-3, lr_scheduler_gamma: float = 1e-1, num_workers: int = 6, **kwargs, ) -> None: """TransferLearningModel. Args: backbone: Name (as in ``torchvision.models``) of the feature extractor train_bn: Whether the BatchNorm layers should be trainable milestones: List of two epochs milestones lr: Initial learning rate lr_scheduler_gamma: Factor by which the learning rate is reduced at each milestone """ super().__init__() self.backbone = backbone self.train_bn = train_bn self.milestones = milestones self.batch_size = batch_size self.lr = lr self.lr_scheduler_gamma = lr_scheduler_gamma self.num_workers = num_workers self.__build_model() self.train_acc = Accuracy(task="binary") self.valid_acc = Accuracy(task="binary") self.save_hyperparameters() def __build_model(self): """Define model layers & loss.""" # 1. Load pre-trained network: backbone = get_torchvision_model(self.backbone, weights="DEFAULT") _layers = list(backbone.children())[:-1] self.feature_extractor = nn.Sequential(*_layers) # 2. Classifier: _fc_layers = [nn.Linear(2048, 256), nn.ReLU(), nn.Linear(256, 32), nn.Linear(32, 1)] self.fc = nn.Sequential(*_fc_layers) # 3. Loss: self.loss_func = F.binary_cross_entropy_with_logits def forward(self, x): """Forward pass. Returns logits. """ # 1. Feature extraction: x = self.feature_extractor(x) x = x.squeeze(-1).squeeze(-1) # 2. Classifier (returns logits): return self.fc(x) def loss(self, logits, labels): return self.loss_func(input=logits, target=labels) def training_step(self, batch, batch_idx): # 1. Forward pass: x, y = batch y_logits = self.forward(x) y_scores = torch.sigmoid(y_logits) y_true = y.view((-1, 1)).type_as(x) # 2. Compute loss train_loss = self.loss(y_logits, y_true) # 3. Compute accuracy: self.log("train_acc", self.train_acc(y_scores, y_true.int()), prog_bar=True) return train_loss def validation_step(self, batch, batch_idx): # 1. Forward pass: x, y = batch y_logits = self.forward(x) y_scores = torch.sigmoid(y_logits) y_true = y.view((-1, 1)).type_as(x) # 2. Compute loss self.log("val_loss", self.loss(y_logits, y_true), prog_bar=True) # 3. Compute accuracy: self.log("val_acc", self.valid_acc(y_scores, y_true.int()), prog_bar=True) def configure_optimizers(self): parameters = list(self.parameters()) trainable_parameters = list(filter(lambda p: p.requires_grad, parameters)) rank_zero_info( f"The model will start training with only {len(trainable_parameters)} " f"trainable parameters out of {len(parameters)}." ) optimizer = optim.Adam(trainable_parameters, lr=self.lr) scheduler = MultiStepLR(optimizer, milestones=self.milestones, gamma=self.lr_scheduler_gamma) return [optimizer], [scheduler]
TransferLearningModel
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/attributes.py
{ "start": 2149, "end": 2411 }
class ____: dictionary = {"text": "modelled as tainted", "other": "benign"} def test_issue_with_text_key_of_dictionary(c: C): _test_sink(c.dictionary["text"]) def test_no_issue_with_other_key_of_dictionary(c: C): _test_sink(c.dictionary["other"])
C
python
spyder-ide__spyder
spyder/utils/installers.py
{ "start": 1706, "end": 2147 }
class ____(SpyderInstallerError): """Error for PyLSP issues""" def _msg(self, msg): files = glob.glob(os.path.join(get_conf_path('lsp_logs'), '*.log')) cat = '' for file in files: cat += f'{file}\n' with open(file, 'r') as f: cat += textwrap.indent(f.read(), ' ') msg = f'PyLSP Error: {msg}\n' + textwrap.indent(cat, ' ') return msg
InstallerPylspError
python
tensorflow__tensorflow
tensorflow/python/checkpoint/saveable_compat_test.py
{ "start": 1349, "end": 2892 }
class ____(test.TestCase): def test_lookup_table_compatibility(self): saveable_compat.force_checkpoint_conversion(False) table_module = generate_checkpoint.TableModule() ckpt = checkpoint.Checkpoint(table_module) checkpoint_directory = self.get_temp_dir() checkpoint_path = os.path.join(checkpoint_directory, "ckpt") ckpt.write(checkpoint_path) # Ensure that the checkpoint metadata and keys are the same. legacy_metadata = checkpoint.object_metadata(_LEGACY_TABLE_CHECKPOINT_PATH) metadata = checkpoint.object_metadata(checkpoint_path) def _get_table_node(object_metadata): for child in object_metadata.nodes[0].children: if child.local_name == "lookup_table": return object_metadata.nodes[child.node_id] table_proto = _get_table_node(metadata) legacy_table_proto = _get_table_node(legacy_metadata) self.assertAllEqual( [table_proto.attributes[0].name, table_proto.attributes[0].checkpoint_key], [legacy_table_proto.attributes[0].name, legacy_table_proto.attributes[0].checkpoint_key]) legacy_reader = checkpoint_utils.load_checkpoint( _LEGACY_TABLE_CHECKPOINT_PATH) reader = checkpoint_utils.load_checkpoint(checkpoint_path) self.assertEqual( legacy_reader.get_variable_to_shape_map().keys(), reader.get_variable_to_shape_map().keys()) # Ensure that previous checkpoint can be loaded into current table. ckpt.read(_LEGACY_TABLE_CHECKPOINT_PATH).assert_consumed()
SaveableCompatTest
python
getsentry__sentry
tests/sentry/snuba/test_tasks.py
{ "start": 19645, "end": 45741 }
class ____(TestCase): aggregate_mappings: dict[SnubaQuery.Type, dict[Dataset, dict[str, Any]]] = { SnubaQuery.Type.ERROR: { Dataset.Events: { "count_unique(user)": lambda org_id: [ Function( function="uniq", parameters=[ Column( name="tags[sentry:user]", entity=Entity(Dataset.Events.value, alias=Dataset.Events.value), ) ], alias="count_unique_user", ) ] }, }, SnubaQuery.Type.PERFORMANCE: { Dataset.Transactions: { "count_unique(user)": lambda org_id, **kwargs: [ Function( function="uniq", parameters=[Column(name="user")], alias="count_unique_user", ) ], "percentile(transaction.duration,.95)": lambda org_id, **kwargs: [ Function( "quantile(0.95)", parameters=[Column(name="duration")], alias="percentile_transaction_duration__95", ) ], "p95()": lambda org_id, **kwargs: [ Function( "quantile(0.95)", parameters=[Column(name="duration")], alias="p95", ) ], }, Dataset.Metrics: { "count_unique(user)": lambda org_id, metric_id, **kwargs: [ Function( function="uniqIf", parameters=[ Column(name="value"), Function( function="equals", parameters=[Column(name="metric_id"), metric_id], ), ], alias="count_unique_user", ) ], "percentile(transaction.duration,.95)": lambda org_id, metric_id, **kwargs: [ Function( "arrayElement", parameters=[ Function( "quantilesIf(0.95)", parameters=[ Column("value"), Function( "equals", parameters=[Column("metric_id"), metric_id], ), ], ), 1, ], alias="percentile_transaction_duration__95", ) ], "p95()": lambda org_id, metric_id, **kwargs: [ Function( "arrayElement", parameters=[ Function( "quantilesIf(0.95)", parameters=[ Column(name="value"), Function( "equals", parameters=[ Column(name="metric_id"), metric_id, ], ), ], ), 1, ], alias="p95", ) ], }, }, SnubaQuery.Type.CRASH_RATE: { Dataset.Sessions: { "percentage(sessions_crashed, sessions) as _crash_rate_alert_aggregate": lambda org_id, **kwargs: [ Function( function="if", parameters=[ Function( function="greater", parameters=[Column(name="sessions"), 0], ), Function( function="divide", parameters=[ Column(name="sessions_crashed"), Column(name="sessions"), ], ), None, ], alias="_crash_rate_alert_aggregate", ) ], "percentage(users_crashed, users) as _crash_rate_alert_aggregate": lambda org_id, **kwargs: [ Function( function="if", parameters=[ Function(function="greater", parameters=[Column(name="users"), 0]), Function( function="divide", parameters=[ Column(name="users_crashed"), Column(name="users"), ], ), None, ], alias="_crash_rate_alert_aggregate", ) ], }, Dataset.Metrics: { "percentage(sessions_crashed, sessions) as _crash_rate_alert_aggregate": lambda org_id, metric_mri, **kwargs: [ Function( function="sumIf", parameters=[ Column(name="value"), Function( function="and", parameters=[ Function( function="equals", parameters=[ Column(name="metric_id"), resolve_tag_value( UseCaseKey.RELEASE_HEALTH, org_id, metric_mri, ), ], alias=None, ), Function( function="equals", parameters=[ Column( name=resolve_tag_key( UseCaseKey.RELEASE_HEALTH, org_id, "session.status", ) ), resolve_tag_value( UseCaseKey.RELEASE_HEALTH, org_id, "init", ), ], ), ], ), ], alias="count", ), Function( function="sumIf", parameters=[ Column(name="value"), Function( function="and", parameters=[ Function( function="equals", parameters=[ Column(name="metric_id"), resolve_tag_value( UseCaseKey.RELEASE_HEALTH, org_id, metric_mri, ), ], alias=None, ), Function( function="equals", parameters=[ Column( name=resolve_tag_key( UseCaseKey.RELEASE_HEALTH, org_id, "session.status", ) ), resolve_tag_value( UseCaseKey.RELEASE_HEALTH, org_id, "crashed", ), ], ), ], ), ], alias="crashed", ), ], "percentage(users_crashed, users) AS _crash_rate_alert_aggregate": lambda org_id, metric_mri, **kwargs: [ Function( function="uniqIf", parameters=[ Column(name="value"), Function( function="equals", parameters=[ Column(name="metric_id"), resolve_tag_value( UseCaseKey.RELEASE_HEALTH, org_id, metric_mri ), ], alias=None, ), ], alias="count", ), Function( function="uniqIf", parameters=[ Column(name="value"), Function( function="and", parameters=[ Function( function="equals", parameters=[ Column(name="metric_id"), resolve_tag_value( UseCaseKey.RELEASE_HEALTH, org_id, metric_mri, ), ], alias=None, ), Function( function="equals", parameters=[ Column( name=resolve_tag_key( UseCaseKey.RELEASE_HEALTH, org_id, "session.status", ) ), resolve_tag_value( UseCaseKey.RELEASE_HEALTH, org_id, "crashed", ), ], ), ], ), ], alias="crashed", ), ], }, }, } aggregate_mappings_fallback = { "count()": lambda org_id, **kwargs: [Function("count", parameters=[], alias="count")], } def run_test( self, query_type, dataset, aggregate, query, expected_conditions, entity_extra_fields=None, environment=None, granularity=None, aggregate_kwargs=None, # This flag is used to expect None clauses instead of [], it has been done in order to account for how the # metrics layer generates snql. use_none_clauses=False, expected_match=None, ): aggregate_kwargs = aggregate_kwargs if aggregate_kwargs else {} time_window = 3600 entity_subscription = get_entity_subscription( query_type=query_type, dataset=dataset, aggregate=aggregate, time_window=time_window, extra_fields=entity_extra_fields, ) query_builder = entity_subscription.build_query_builder( query=query, project_ids=[self.project.id], environment=environment, params={ "organization_id": self.organization.id, "project_id": [self.project.id], }, ) snql_query = query_builder.get_snql_query() select = self.string_aggregate_to_snql(query_type, dataset, aggregate, aggregate_kwargs) if dataset == Dataset.Sessions: col_name = "sessions" if "sessions" in aggregate else "users" select.insert( 0, Function( function="identity", parameters=[Column(name=col_name)], alias="_total_count", ), ) # Select order seems to be unstable, so just arbitrarily sort by name, alias so that it's consistent snql_query.query.select.sort(key=lambda q: (q.function, q.alias)) if expected_match is None: entity_name = get_entity_key_from_query_builder(query_builder).value entity_args = {"name": entity_name} if dataset == Dataset.Events: entity_args["alias"] = entity_name expected_match = Entity(**entity_args) expected_query = Query( match=expected_match, select=select, where=expected_conditions, groupby=None if use_none_clauses else [], having=[], orderby=None if use_none_clauses else [], ) if granularity is not None: expected_query = expected_query.set_granularity(granularity) assert snql_query.query == expected_query def string_aggregate_to_snql(self, query_type, dataset, aggregate, aggregate_kwargs): aggregate_builder_func = self.aggregate_mappings[query_type][dataset].get( aggregate, self.aggregate_mappings_fallback.get(aggregate, lambda org_id, **kwargs: []), ) return sorted( aggregate_builder_func(self.organization.id, **aggregate_kwargs), key=lambda val: (val.function, val.alias), ) def test_simple_events(self) -> None: entity = Entity(Dataset.Events.value, alias=Dataset.Events.value) self.run_test( SnubaQuery.Type.ERROR, Dataset.Events, "count_unique(user)", "", [ Condition(Column(name="type", entity=entity), Op.EQ, "error"), Condition(Column(name="project_id", entity=entity), Op.IN, [self.project.id]), ], ) def test_join_status(self) -> None: entity = Entity(Dataset.Events.value, alias=Dataset.Events.value) g_entity = Entity("group_attributes", alias="ga") self.run_test( SnubaQuery.Type.ERROR, Dataset.Events, "count_unique(user)", "status:unresolved", [ And( [ Condition(Column("type", entity=entity), Op.EQ, "error"), Condition( Column("group_status", entity=g_entity), Op.IN, [GroupStatus.UNRESOLVED], ), ] ), Condition(Column(name="project_id", entity=entity), Op.IN, [self.project.id]), Condition( Column(name="project_id", entity=g_entity), Op.IN, [self.project.id], ), ], expected_match=Join([Relationship(entity, "attributes", g_entity)]), ) def test_simple_performance_transactions(self) -> None: self.run_test( SnubaQuery.Type.PERFORMANCE, Dataset.Transactions, "count_unique(user)", "", [ Condition(Column(name="project_id"), Op.IN, [self.project.id]), ], ) def test_aliased_query_events(self) -> None: self.create_release(self.project, version="something") entity = Entity(Dataset.Events.value, alias=Dataset.Events.value) expected_conditions = [ And( conditions=[ Condition(Column(name="type", entity=entity), Op.EQ, "error"), Condition( Function( function="ifNull", parameters=[ Column(name="tags[sentry:release]", entity=entity), "", ], ), Op.IN, ["something"], ), ] ), Condition(Column(name="project_id", entity=entity), Op.IN, [self.project.id]), ] self.run_test( SnubaQuery.Type.ERROR, Dataset.Events, "count_unique(user)", "release:latest", expected_conditions, ) def test_aliased_query_performance_transactions(self) -> None: self.create_release(self.project, version="something") expected_conditions = [ Condition(Column("release"), Op.IN, ["something"]), Condition(Column("project_id"), Op.IN, [self.project.id]), ] self.run_test( SnubaQuery.Type.PERFORMANCE, Dataset.Transactions, "percentile(transaction.duration,.95)", "release:latest", expected_conditions, ) def test_user_query(self) -> None: entity = Entity(Dataset.Events.value, alias=Dataset.Events.value) expected_conditions = [ And( conditions=[ Condition(Column(name="type", entity=entity), Op.EQ, "error"), Condition( Function( function="ifNull", parameters=[ Column(name="tags[sentry:user]", entity=entity), "", ], ), Op.EQ, "anengineer@work.io", ), ] ), Condition(Column(name="project_id", entity=entity), Op.IN, [self.project.id]), ] self.run_test( SnubaQuery.Type.ERROR, Dataset.Events, "count()", "user:anengineer@work.io", expected_conditions, ) def test_user_query_performance_transactions(self) -> None: expected_conditions = [ Condition(Column("user"), Op.EQ, "anengineer@work.io"), Condition(Column("project_id"), Op.IN, [self.project.id]), ] self.run_test( SnubaQuery.Type.PERFORMANCE, Dataset.Transactions, "p95()", "user:anengineer@work.io", expected_conditions, ) def test_boolean_query(self) -> None: self.create_release(self.project, version="something") entity = Entity(Dataset.Events.value, alias=Dataset.Events.value) expected_conditions = [ And( [ Condition(Column(name="type", entity=entity), Op.EQ, "error"), Or( [ Condition( Function( "ifNull", parameters=[ Column(name="tags[sentry:release]", entity=entity), "", ], ), Op.IN, ["something"], ), Condition( Function( "ifNull", parameters=[ Column(name="tags[sentry:release]", entity=entity), "", ], ), Op.IN, ["123"], ), ] ), ] ), Condition(Column(name="project_id", entity=entity), Op.IN, [self.project.id]), ] self.run_test( SnubaQuery.Type.ERROR, Dataset.Events, "count_unique(user)", "release:latest OR release:123", expected_conditions, ) def test_event_types(self) -> None: self.create_release(self.project, version="something") entity = Entity(Dataset.Events.value, alias=Dataset.Events.value) expected_conditions = [ And( [ Or( [ Condition(Column(name="type", entity=entity), Op.EQ, "error"), Condition(Column(name="type", entity=entity), Op.EQ, "default"), ] ), Or( [ Condition( Function( "ifNull", parameters=[ Column(name="tags[sentry:release]", entity=entity), "", ], ), Op.IN, ["something"], ), Condition( Function( "ifNull", parameters=[ Column(name="tags[sentry:release]", entity=entity), "", ], ), Op.IN, ["123"], ), ] ), ] ), Condition(Column(name="project_id", entity=entity), Op.IN, [self.project.id]), ] self.run_test( SnubaQuery.Type.ERROR, Dataset.Events, "count_unique(user)", "release:latest OR release:123", expected_conditions, entity_extra_fields={ "event_types": [ SnubaQueryEventType.EventType.ERROR, SnubaQueryEventType.EventType.DEFAULT, ] }, ) def test_issue_id_snql(self) -> None: entity = Entity(Dataset.Events.value, alias=Dataset.Events.value) expected_conditions = [ And( [ Condition(Column(name="type", entity=entity), Op.EQ, "error"), Condition( Column(name="group_id", entity=entity), Op.IN, [self.group.id, 2], ), ] ), Condition(Column(name="project_id", entity=entity), Op.IN, [self.project.id]), ] self.run_test( SnubaQuery.Type.ERROR, Dataset.Events, "count_unique(user)", f"issue.id:[{self.group.id}, 2]", expected_conditions, )
BuildSnqlQueryTest
python
pandas-dev__pandas
pandas/tests/indexes/datetimes/test_indexing.py
{ "start": 22808, "end": 25245 }
class ____: @pytest.mark.parametrize("box", [date, datetime, Timestamp]) @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) def test_get_slice_bounds_datetime_within( self, box, side, expected, tz_aware_fixture ): # GH 35690 tz = tz_aware_fixture index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz) key = box(year=2000, month=1, day=7) warn = None if box is not date else Pandas4Warning msg = "Slicing with a datetime.date object is deprecated" with tm.assert_produces_warning(warn, match=msg): if tz is not None: with pytest.raises(TypeError, match="Cannot compare tz-naive"): # GH#36148 we require tzawareness-compat as of 2.0 index.get_slice_bound(key, side=side) else: result = index.get_slice_bound(key, side=side) assert result == expected @pytest.mark.parametrize("box", [datetime, Timestamp]) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("year, expected", [(1999, 0), (2020, 30)]) def test_get_slice_bounds_datetime_outside( self, box, side, year, expected, tz_aware_fixture ): # GH 35690 tz = tz_aware_fixture index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz) key = box(year=year, month=1, day=7) if tz is not None: with pytest.raises(TypeError, match="Cannot compare tz-naive"): # GH#36148 we require tzawareness-compat as of 2.0 index.get_slice_bound(key, side=side) else: result = index.get_slice_bound(key, side=side) assert result == expected @pytest.mark.parametrize("box", [datetime, Timestamp]) def test_slice_datetime_locs(self, box, tz_aware_fixture): # GH 34077 tz = tz_aware_fixture index = DatetimeIndex(["2010-01-01", "2010-01-03"]).tz_localize(tz) key = box(2010, 1, 1) if tz is not None: with pytest.raises(TypeError, match="Cannot compare tz-naive"): # GH#36148 we require tzawareness-compat as of 2.0 index.slice_locs(key, box(2010, 1, 2)) else: result = index.slice_locs(key, box(2010, 1, 2)) expected = (0, 1) assert result == expected
TestGetSliceBounds
python
falconry__falcon
falcon/_typing.py
{ "start": 1861, "end": 2340 }
class ____(Enum): UNSET = auto() _T = TypeVar('_T') _UNSET = _Unset.UNSET UnsetOr = Union[Literal[_Unset.UNSET], _T] _ReqT = TypeVar('_ReqT', bound='Request', contravariant=True) _RespT = TypeVar('_RespT', bound='Response', contravariant=True) _AReqT = TypeVar('_AReqT', bound='AsgiRequest', contravariant=True) _ARespT = TypeVar('_ARespT', bound='AsgiResponse', contravariant=True) Link = dict[str, str] CookieArg = Mapping[str, Union[str, Cookie]] # Error handlers
_Unset
python
django__django
tests/auth_tests/test_management.py
{ "start": 59514, "end": 60027 }
class ____(TestCase): databases = {"default", "other"} def test_set_permissions_fk_to_using_parameter(self): Permission.objects.using("other").delete() with self.assertNumQueries(4, using="other") as captured_queries: create_permissions(apps.get_app_config("auth"), verbosity=0, using="other") self.assertIn("INSERT INTO", captured_queries[-1]["sql"].upper()) self.assertGreater(Permission.objects.using("other").count(), 0)
CreatePermissionsMultipleDatabasesTests
python
xlwings__xlwings
xlwings/constants.py
{ "start": 1186, "end": 8394 }
class ____: xlNextToAxis = 4 # from enum Constants xlNoDocuments = 3 # from enum Constants xlNone = -4142 # from enum Constants xlNotes = -4144 # from enum Constants xlOff = -4146 # from enum Constants xl3DEffects1 = 13 # from enum Constants xl3DBar = -4099 # from enum Constants xl3DEffects2 = 14 # from enum Constants xl3DSurface = -4103 # from enum Constants xlAbove = 0 # from enum Constants xlAccounting1 = 4 # from enum Constants xlAccounting2 = 5 # from enum Constants xlAccounting3 = 6 # from enum Constants xlAccounting4 = 17 # from enum Constants xlAdd = 2 # from enum Constants xlAll = -4104 # from enum Constants xlAllExceptBorders = 7 # from enum Constants xlAutomatic = -4105 # from enum Constants xlBar = 2 # from enum Constants xlBelow = 1 # from enum Constants xlBidi = -5000 # from enum Constants xlBidiCalendar = 3 # from enum Constants xlBoth = 1 # from enum Constants xlBottom = -4107 # from enum Constants xlCascade = 7 # from enum Constants xlCenter = -4108 # from enum Constants xlCenterAcrossSelection = 7 # from enum Constants xlChart4 = 2 # from enum Constants xlChartSeries = 17 # from enum Constants xlChartShort = 6 # from enum Constants xlChartTitles = 18 # from enum Constants xlChecker = 9 # from enum Constants xlCircle = 8 # from enum Constants xlClassic1 = 1 # from enum Constants xlClassic2 = 2 # from enum Constants xlClassic3 = 3 # from enum Constants xlClosed = 3 # from enum Constants xlColor1 = 7 # from enum Constants xlColor2 = 8 # from enum Constants xlColor3 = 9 # from enum Constants xlColumn = 3 # from enum Constants xlCombination = -4111 # from enum Constants xlComplete = 4 # from enum Constants xlConstants = 2 # from enum Constants xlContents = 2 # from enum Constants xlContext = -5002 # from enum Constants xlCorner = 2 # from enum Constants xlCrissCross = 16 # from enum Constants xlCross = 4 # from enum Constants xlCustom = -4114 # from enum Constants xlDebugCodePane = 13 # from enum Constants xlDefaultAutoFormat = -1 # from enum Constants xlDesktop = 9 # from enum Constants xlDiamond = 2 # from enum Constants xlDirect = 1 # from enum Constants xlDistributed = -4117 # from enum Constants xlDivide = 5 # from enum Constants xlDoubleAccounting = 5 # from enum Constants xlDoubleClosed = 5 # from enum Constants xlDoubleOpen = 4 # from enum Constants xlDoubleQuote = 1 # from enum Constants xlDrawingObject = 14 # from enum Constants xlEntireChart = 20 # from enum Constants xlExcelMenus = 1 # from enum Constants xlExtended = 3 # from enum Constants xlFill = 5 # from enum Constants xlFirst = 0 # from enum Constants xlFixedValue = 1 # from enum Constants xlFloating = 5 # from enum Constants xlFormats = -4122 # from enum Constants xlFormula = 5 # from enum Constants xlFullScript = 1 # from enum Constants xlGeneral = 1 # from enum Constants xlGray16 = 17 # from enum Constants xlGray25 = -4124 # from enum Constants xlGray50 = -4125 # from enum Constants xlGray75 = -4126 # from enum Constants xlGray8 = 18 # from enum Constants xlGregorian = 2 # from enum Constants xlGrid = 15 # from enum Constants xlGridline = 22 # from enum Constants xlHigh = -4127 # from enum Constants xlHindiNumerals = 3 # from enum Constants xlIcons = 1 # from enum Constants xlImmediatePane = 12 # from enum Constants xlInside = 2 # from enum Constants xlInteger = 2 # from enum Constants xlJustify = -4130 # from enum Constants xlLTR = -5003 # from enum Constants xlLast = 1 # from enum Constants xlLastCell = 11 # from enum Constants xlLatin = -5001 # from enum Constants xlLeft = -4131 # from enum Constants xlLeftToRight = 2 # from enum Constants xlLightDown = 13 # from enum Constants xlLightHorizontal = 11 # from enum Constants xlLightUp = 14 # from enum Constants xlLightVertical = 12 # from enum Constants xlList1 = 10 # from enum Constants xlList2 = 11 # from enum Constants xlList3 = 12 # from enum Constants xlLocalFormat1 = 15 # from enum Constants xlLocalFormat2 = 16 # from enum Constants xlLogicalCursor = 1 # from enum Constants xlLong = 3 # from enum Constants xlLotusHelp = 2 # from enum Constants xlLow = -4134 # from enum Constants xlMacrosheetCell = 7 # from enum Constants xlManual = -4135 # from enum Constants xlMaximum = 2 # from enum Constants xlMinimum = 4 # from enum Constants xlMinusValues = 3 # from enum Constants xlMixed = 2 # from enum Constants xlMixedAuthorizedScript = 4 # from enum Constants xlMixedScript = 3 # from enum Constants xlModule = -4141 # from enum Constants xlMultiply = 4 # from enum Constants xlNarrow = 1 # from enum Constants xlOn = 1 # from enum Constants xlOpaque = 3 # from enum Constants xlOpen = 2 # from enum Constants xlOutside = 3 # from enum Constants xlPartial = 3 # from enum Constants xlPartialScript = 2 # from enum Constants xlPercent = 2 # from enum Constants xlPlus = 9 # from enum Constants xlPlusValues = 2 # from enum Constants xlRTL = -5004 # from enum Constants xlReference = 4 # from enum Constants xlRight = -4152 # from enum Constants xlScale = 3 # from enum Constants xlSemiGray75 = 10 # from enum Constants xlSemiautomatic = 2 # from enum Constants xlShort = 1 # from enum Constants xlShowLabel = 4 # from enum Constants xlShowLabelAndPercent = 5 # from enum Constants xlShowPercent = 3 # from enum Constants xlShowValue = 2 # from enum Constants xlSimple = -4154 # from enum Constants xlSingle = 2 # from enum Constants xlSingleAccounting = 4 # from enum Constants xlSingleQuote = 2 # from enum Constants xlSolid = 1 # from enum Constants xlSquare = 1 # from enum Constants xlStError = 4 # from enum Constants xlStar = 5 # from enum Constants xlStrict = 2 # from enum Constants xlSubtract = 3 # from enum Constants xlSystem = 1 # from enum Constants xlTextBox = 16 # from enum Constants xlTiled = 1 # from enum Constants xlTitleBar = 8 # from enum Constants xlToolbar = 1 # from enum Constants xlToolbarButton = 2 # from enum Constants xlTop = -4160 # from enum Constants xlTopToBottom = 1 # from enum Constants xlTransparent = 2 # from enum Constants xlTriangle = 3 # from enum Constants xlVeryHidden = 2 # from enum Constants xlVisible = 12 # from enum Constants xlVisualCursor = 2 # from enum Constants xlWatchPane = 11 # from enum Constants xlWide = 3 # from enum Constants xlWorkbookTab = 6 # from enum Constants xlWorksheet4 = 1 # from enum Constants xlWorksheetCell = 3 # from enum Constants xlWorksheetShort = 5 # from enum Constants
Constants
python
python-poetry__poetry
tests/types.py
{ "start": 3429, "end": 3707 }
class ____(Protocol): def __call__( self, transformer_or_suffix: NormalizedNameTransformer | str, repository_name: str = "special", repository_url: str = "https://legacy.foo.bar", ) -> LegacyRepository: ...
SpecializedLegacyRepositoryMocker
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/port.py
{ "start": 1096, "end": 1334 }
class ____: @property def some_source(): return _test_source() def refer_to_method_as_field(foo: Foo): # This comes up in Instagram due to @cached_property decorators taint = foo.some_source _test_sink(taint)
Foo
python
pydantic__pydantic
pydantic/networks.py
{ "start": 28956, "end": 29437 }
class ____(AnyUrl): """A type that will accept any ClickHouse DSN. * User info required * TLD not required * Host not required """ _constraints = UrlConstraints( allowed_schemes=[ 'clickhouse+native', 'clickhouse+asynch', 'clickhouse+http', 'clickhouse', 'clickhouses', 'clickhousedb', ], default_host='localhost', default_port=9000, )
ClickHouseDsn
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/paramSpec53.py
{ "start": 374, "end": 476 }
class ____(Mixin): def __init__(self, data: int) -> None: pass Next.factory("", data=2)
Next
python
getsentry__sentry
src/sentry/api/serializers/models/group.py
{ "start": 3218, "end": 3288 }
class ____(TypedDict): displayName: str url: str
GroupAnnotation
python
wandb__wandb
wandb/apis/public/registries/_freezable_list.py
{ "start": 5887, "end": 6323 }
class ____(FreezableList[str]): def remove(self, value: str) -> None: try: super().remove(value) except ValueError: raise ValueError( f"Cannot remove artifact type: {value!r} that has been saved to the registry" ) def __repr__(self) -> str: return f"{nameof(type(self))}(saved={list(self._frozen)!r}, draft={list(self._draft)!r})"
AddOnlyArtifactTypesList
python
tensorflow__tensorflow
tensorflow/python/keras/layers/pooling.py
{ "start": 34330, "end": 36865 }
class ____(GlobalPooling1D): """Global average pooling operation for temporal data. Examples: >>> input_shape = (2, 3, 4) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalAveragePooling1D()(x) >>> print(y.shape) (2, 4) Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. keepdims: A boolean, whether to keep the temporal dimension or not. If `keepdims` is `False` (default), the rank of the tensor is reduced for spatial dimensions. If `keepdims` is `True`, the temporal dimension are retained with length 1. The behavior is the same as for `tf.reduce_mean` or `np.mean`. Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(batch_size, steps)` indicating whether a given step should be masked (excluded from the average). Input shape: - If `data_format='channels_last'`: 3D tensor with shape: `(batch_size, steps, features)` - If `data_format='channels_first'`: 3D tensor with shape: `(batch_size, features, steps)` Output shape: - If `keepdims`=False: 2D tensor with shape `(batch_size, features)`. - If `keepdims`=True: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, 1, features)` - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, 1)` """ def __init__(self, data_format='channels_last', **kwargs): super(GlobalAveragePooling1D, self).__init__(data_format=data_format, **kwargs) self.supports_masking = True def call(self, inputs, mask=None): steps_axis = 1 if self.data_format == 'channels_last' else 2 if mask is not None: mask = math_ops.cast(mask, inputs[0].dtype) mask = array_ops.expand_dims( mask, 2 if self.data_format == 'channels_last' else 1) inputs *= mask return backend.sum( inputs, axis=steps_axis, keepdims=self.keepdims) / math_ops.reduce_sum( mask, axis=steps_axis, keepdims=self.keepdims) else: return backend.mean(inputs, axis=steps_axis, keepdims=self.keepdims) def compute_mask(self, inputs, mask=None): return None
GlobalAveragePooling1D
python
sympy__sympy
sympy/printing/aesaracode.py
{ "start": 2818, "end": 18921 }
class ____(Printer): """ .. deprecated:: 1.14. The ``Aesara Code printing`` is deprecated.See its documentation for more information. See :ref:`deprecated-aesaraprinter` for details. Code printer which creates Aesara symbolic expression graphs. Parameters ========== cache : dict Cache dictionary to use. If None (default) will use the global cache. To create a printer which does not depend on or alter global state pass an empty dictionary. Note: the dictionary is not copied on initialization of the printer and will be updated in-place, so using the same dict object when creating multiple printers or making multiple calls to :func:`.aesara_code` or :func:`.aesara_function` means the cache is shared between all these applications. Attributes ========== cache : dict A cache of Aesara variables which have been created for SymPy symbol-like objects (e.g. :class:`sympy.core.symbol.Symbol` or :class:`sympy.matrices.expressions.MatrixSymbol`). This is used to ensure that all references to a given symbol in an expression (or multiple expressions) are printed as the same Aesara variable, which is created only once. Symbols are differentiated only by name and type. The format of the cache's contents should be considered opaque to the user. """ printmethod = "_aesara" def __init__(self, *args, **kwargs): self.cache = kwargs.pop('cache', {}) super().__init__(*args, **kwargs) def _get_key(self, s, name=None, dtype=None, broadcastable=None): """ Get the cache key for a SymPy object. Parameters ========== s : sympy.core.basic.Basic SymPy object to get key for. name : str Name of object, if it does not have a ``name`` attribute. """ if name is None: name = s.name return (name, type(s), s.args, dtype, broadcastable) def _get_or_create(self, s, name=None, dtype=None, broadcastable=None): """ Get the Aesara variable for a SymPy symbol from the cache, or create it if it does not exist. """ # Defaults if name is None: name = s.name if dtype is None: dtype = 'floatX' if broadcastable is None: broadcastable = () key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable) if key in self.cache: return self.cache[key] value = aet.tensor(name=name, dtype=dtype, shape=broadcastable) self.cache[key] = value return value def _print_Symbol(self, s, **kwargs): dtype = kwargs.get('dtypes', {}).get(s) bc = kwargs.get('broadcastables', {}).get(s) return self._get_or_create(s, dtype=dtype, broadcastable=bc) def _print_AppliedUndef(self, s, **kwargs): name = str(type(s)) + '_' + str(s.args[0]) dtype = kwargs.get('dtypes', {}).get(s) bc = kwargs.get('broadcastables', {}).get(s) return self._get_or_create(s, name=name, dtype=dtype, broadcastable=bc) def _print_Basic(self, expr, **kwargs): op = mapping[type(expr)] children = [self._print(arg, **kwargs) for arg in expr.args] return op(*children) def _print_Number(self, n, **kwargs): # Integers already taken care of below, interpret as float return float(n.evalf()) def _print_MatrixSymbol(self, X, **kwargs): dtype = kwargs.get('dtypes', {}).get(X) return self._get_or_create(X, dtype=dtype, broadcastable=(None, None)) def _print_DenseMatrix(self, X, **kwargs): if not hasattr(aet, 'stacklists'): raise NotImplementedError( "Matrix translation not yet supported in this version of Aesara") return aet.stacklists([ [self._print(arg, **kwargs) for arg in L] for L in X.tolist() ]) _print_ImmutableMatrix = _print_ImmutableDenseMatrix = _print_DenseMatrix def _print_MatMul(self, expr, **kwargs): children = [self._print(arg, **kwargs) for arg in expr.args] result = children[0] for child in children[1:]: result = aet.dot(result, child) return result def _print_MatPow(self, expr, **kwargs): children = [self._print(arg, **kwargs) for arg in expr.args] result = 1 if isinstance(children[1], int) and children[1] > 0: for i in range(children[1]): result = aet.dot(result, children[0]) else: raise NotImplementedError('''Only non-negative integer powers of matrices can be handled by Aesara at the moment''') return result def _print_MatrixSlice(self, expr, **kwargs): parent = self._print(expr.parent, **kwargs) rowslice = self._print(slice(*expr.rowslice), **kwargs) colslice = self._print(slice(*expr.colslice), **kwargs) return parent[rowslice, colslice] def _print_BlockMatrix(self, expr, **kwargs): nrows, ncols = expr.blocks.shape blocks = [[self._print(expr.blocks[r, c], **kwargs) for c in range(ncols)] for r in range(nrows)] return aet.join(0, *[aet.join(1, *row) for row in blocks]) def _print_slice(self, expr, **kwargs): return slice(*[self._print(i, **kwargs) if isinstance(i, sympy.Basic) else i for i in (expr.start, expr.stop, expr.step)]) def _print_Pi(self, expr, **kwargs): return math.pi def _print_Piecewise(self, expr, **kwargs): import numpy as np e, cond = expr.args[0].args # First condition and corresponding value # Print conditional expression and value for first condition p_cond = self._print(cond, **kwargs) p_e = self._print(e, **kwargs) # One condition only if len(expr.args) == 1: # Return value if condition else NaN return aet.switch(p_cond, p_e, np.nan) # Return value_1 if condition_1 else evaluate remaining conditions p_remaining = self._print(sympy.Piecewise(*expr.args[1:]), **kwargs) return aet.switch(p_cond, p_e, p_remaining) def _print_Rational(self, expr, **kwargs): return true_divide(self._print(expr.p, **kwargs), self._print(expr.q, **kwargs)) def _print_Integer(self, expr, **kwargs): return expr.p def _print_factorial(self, expr, **kwargs): return self._print(sympy.gamma(expr.args[0] + 1), **kwargs) def _print_Derivative(self, deriv, **kwargs): from aesara.gradient import Rop rv = self._print(deriv.expr, **kwargs) for var in deriv.variables: var = self._print(var, **kwargs) rv = Rop(rv, var, aet.ones_like(var)) return rv def emptyPrinter(self, expr): return expr def doprint(self, expr, dtypes=None, broadcastables=None): """ Convert a SymPy expression to a Aesara graph variable. The ``dtypes`` and ``broadcastables`` arguments are used to specify the data type, dimension, and broadcasting behavior of the Aesara variables corresponding to the free symbols in ``expr``. Each is a mapping from SymPy symbols to the value of the corresponding argument to ``aesara.tensor.var.TensorVariable``. See the corresponding `documentation page`__ for more information on broadcasting in Aesara. .. __: https://aesara.readthedocs.io/en/latest/reference/tensor/shapes.html#broadcasting Parameters ========== expr : sympy.core.expr.Expr SymPy expression to print. dtypes : dict Mapping from SymPy symbols to Aesara datatypes to use when creating new Aesara variables for those symbols. Corresponds to the ``dtype`` argument to ``aesara.tensor.var.TensorVariable``. Defaults to ``'floatX'`` for symbols not included in the mapping. broadcastables : dict Mapping from SymPy symbols to the value of the ``broadcastable`` argument to ``aesara.tensor.var.TensorVariable`` to use when creating Aesara variables for those symbols. Defaults to the empty tuple for symbols not included in the mapping (resulting in a scalar). Returns ======= aesara.graph.basic.Variable A variable corresponding to the expression's value in a Aesara symbolic expression graph. """ if dtypes is None: dtypes = {} if broadcastables is None: broadcastables = {} return self._print(expr, dtypes=dtypes, broadcastables=broadcastables) global_cache: dict[Any, Any] = {} def aesara_code(expr, cache=None, **kwargs): """ Convert a SymPy expression into a Aesara graph variable. Parameters ========== expr : sympy.core.expr.Expr SymPy expression object to convert. cache : dict Cached Aesara variables (see :class:`AesaraPrinter.cache <AesaraPrinter>`). Defaults to the module-level global cache. dtypes : dict Passed to :meth:`.AesaraPrinter.doprint`. broadcastables : dict Passed to :meth:`.AesaraPrinter.doprint`. Returns ======= aesara.graph.basic.Variable A variable corresponding to the expression's value in a Aesara symbolic expression graph. """ sympy_deprecation_warning( """ The aesara_code function is deprecated. """, deprecated_since_version="1.14", active_deprecations_target='deprecated-aesaraprinter', ) if not aesara: raise ImportError("aesara is required for aesara_code") if cache is None: cache = global_cache return AesaraPrinter(cache=cache, settings={}).doprint(expr, **kwargs) def dim_handling(inputs, dim=None, dims=None, broadcastables=None): r""" Get value of ``broadcastables`` argument to :func:`.aesara_code` from keyword arguments to :func:`.aesara_function`. Included for backwards compatibility. Parameters ========== inputs Sequence of input symbols. dim : int Common number of dimensions for all inputs. Overrides other arguments if given. dims : dict Mapping from input symbols to number of dimensions. Overrides ``broadcastables`` argument if given. broadcastables : dict Explicit value of ``broadcastables`` argument to :meth:`.AesaraPrinter.doprint`. If not None function will return this value unchanged. Returns ======= dict Dictionary mapping elements of ``inputs`` to their "broadcastable" values (tuple of ``bool``\ s). """ if dim is not None: return dict.fromkeys(inputs, (False,) * dim) if dims is not None: maxdim = max(dims.values()) return { s: (False,) * d + (True,) * (maxdim - d) for s, d in dims.items() } if broadcastables is not None: return broadcastables return {} def aesara_function(inputs, outputs, scalar=False, *, dim=None, dims=None, broadcastables=None, **kwargs): """ Create a Aesara function from SymPy expressions. The inputs and outputs are converted to Aesara variables using :func:`.aesara_code` and then passed to ``aesara.function``. Parameters ========== inputs Sequence of symbols which constitute the inputs of the function. outputs Sequence of expressions which constitute the outputs(s) of the function. The free symbols of each expression must be a subset of ``inputs``. scalar : bool Convert 0-dimensional arrays in output to scalars. This will return a Python wrapper function around the Aesara function object. cache : dict Cached Aesara variables (see :class:`AesaraPrinter.cache <AesaraPrinter>`). Defaults to the module-level global cache. dtypes : dict Passed to :meth:`.AesaraPrinter.doprint`. broadcastables : dict Passed to :meth:`.AesaraPrinter.doprint`. dims : dict Alternative to ``broadcastables`` argument. Mapping from elements of ``inputs`` to integers indicating the dimension of their associated arrays/tensors. Overrides ``broadcastables`` argument if given. dim : int Another alternative to the ``broadcastables`` argument. Common number of dimensions to use for all arrays/tensors. ``aesara_function([x, y], [...], dim=2)`` is equivalent to using ``broadcastables={x: (False, False), y: (False, False)}``. Returns ======= callable A callable object which takes values of ``inputs`` as positional arguments and returns an output array for each of the expressions in ``outputs``. If ``outputs`` is a single expression the function will return a Numpy array, if it is a list of multiple expressions the function will return a list of arrays. See description of the ``squeeze`` argument above for the behavior when a single output is passed in a list. The returned object will either be an instance of ``aesara.compile.function.types.Function`` or a Python wrapper function around one. In both cases, the returned value will have a ``aesara_function`` attribute which points to the return value of ``aesara.function``. Examples ======== >>> from sympy.abc import x, y, z >>> from sympy.printing.aesaracode import aesara_function A simple function with one input and one output: >>> f1 = aesara_function([x], [x**2 - 1], scalar=True) >>> f1(3) 8.0 A function with multiple inputs and one output: >>> f2 = aesara_function([x, y, z], [(x**z + y**z)**(1/z)], scalar=True) >>> f2(3, 4, 2) 5.0 A function with multiple inputs and multiple outputs: >>> f3 = aesara_function([x, y], [x**2 + y**2, x**2 - y**2], scalar=True) >>> f3(2, 3) [13.0, -5.0] See also ======== dim_handling """ sympy_deprecation_warning( """ The aesara_function function is deprecated. """, deprecated_since_version="1.14", active_deprecations_target='deprecated-aesaraprinter', ) if not aesara: raise ImportError("Aesara is required for aesara_function") # Pop off non-aesara keyword args cache = kwargs.pop('cache', {}) dtypes = kwargs.pop('dtypes', {}) broadcastables = dim_handling( inputs, dim=dim, dims=dims, broadcastables=broadcastables, ) # Print inputs/outputs code = partial(aesara_code, cache=cache, dtypes=dtypes, broadcastables=broadcastables) tinputs = list(map(code, inputs)) toutputs = list(map(code, outputs)) #fix constant expressions as variables toutputs = [output if isinstance(output, aesara.graph.basic.Variable) else aet.as_tensor_variable(output) for output in toutputs] if len(toutputs) == 1: toutputs = toutputs[0] # Compile aesara func func = aesara.function(tinputs, toutputs, **kwargs) is_0d = [len(o.variable.broadcastable) == 0 for o in func.outputs] # No wrapper required if not scalar or not any(is_0d): func.aesara_function = func return func # Create wrapper to convert 0-dimensional outputs to scalars def wrapper(*args): out = func(*args) # out can be array(1.0) or [array(1.0), array(2.0)] if is_sequence(out): return [o[()] if is_0d[i] else o for i, o in enumerate(out)] else: return out[()] wrapper.__wrapped__ = func wrapper.__doc__ = func.__doc__ wrapper.aesara_function = func return wrapper
AesaraPrinter
python
tiangolo__fastapi
docs_src/dependencies/tutorial003.py
{ "start": 167, "end": 635 }
class ____: def __init__(self, q: Union[str, None] = None, skip: int = 0, limit: int = 100): self.q = q self.skip = skip self.limit = limit @app.get("/items/") async def read_items(commons=Depends(CommonQueryParams)): response = {} if commons.q: response.update({"q": commons.q}) items = fake_items_db[commons.skip : commons.skip + commons.limit] response.update({"items": items}) return response
CommonQueryParams
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/tasks.py
{ "start": 13302, "end": 16865 }
class ____(GoogleCloudBaseOperator): """ Lists queues from Cloud Tasks. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:CloudTasksQueuesListOperator` :param location: The location name in which the queues were created. :param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks. If set to None or missing, the default project_id from the Google Cloud connection is used. :param results_filter: (Optional) Filter used to specify a subset of queues. :param page_size: (Optional) The maximum number of resources contained in the underlying API response. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "location", "project_id", "gcp_conn_id", "impersonation_chain", ) operator_extra_links = (CloudTasksLink(),) def __init__( self, *, location: str, project_id: str = PROVIDE_PROJECT_ID, results_filter: str | None = None, page_size: int | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: MetaData = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.location = location self.project_id = project_id self.results_filter = results_filter self.page_size = page_size self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context: Context): hook = CloudTasksHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) queues = hook.list_queues( location=self.location, project_id=self.project_id, results_filter=self.results_filter, page_size=self.page_size, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) CloudTasksLink.persist( context=context, project_id=self.project_id or hook.project_id, ) return [Queue.to_dict(q) for q in queues]
CloudTasksQueuesListOperator
python
skorch-dev__skorch
skorch/tests/callbacks/test_logging.py
{ "start": 12866, "end": 15265 }
class ____: @pytest.fixture def net_cls(self): from skorch import NeuralNetClassifier return NeuralNetClassifier @pytest.fixture def data(self, classifier_data): X, y = classifier_data # accelerate training since we don't care for the loss X, y = X[:40], y[:40] return X, y @pytest.fixture def wandb_logger_cls(self): from skorch.callbacks import WandbLogger return WandbLogger @pytest.fixture def wandb_run_cls(self): import wandb os.environ['WANDB_MODE'] = 'dryrun' # run offline wandb_version = tuple(map(int, wandb.__version__.split('.')[:2])) if wandb_version >= (0, 10): return wandb.init(anonymous="allow") else: with wandb.init(anonymous="allow") as run: return run @pytest.fixture def mock_run(self): mock = Mock() mock.log = Mock() mock.watch = Mock() mock.dir = '.' return mock def test_ignore_keys( self, net_cls, classifier_module, data, wandb_logger_cls, mock_run, ): # ignore 'dur' and 'valid_loss', 'unknown' doesn't exist but # this should not cause a problem wandb_cb = wandb_logger_cls( mock_run, keys_ignored=['dur', 'valid_loss', 'unknown']) net_cls( classifier_module, callbacks=[wandb_cb], max_epochs=3, ).fit(*data) # 3 epochs = 3 calls assert mock_run.log.call_count == 3 assert mock_run.watch.call_count == 1 call_args = [args[0][0] for args in mock_run.log.call_args_list] assert 'valid_loss' not in call_args def test_keys_ignored_is_string(self, wandb_logger_cls, mock_run): wandb_cb = wandb_logger_cls( mock_run, keys_ignored='a-key').initialize() expected = {'a-key', 'batches'} assert wandb_cb.keys_ignored_ == expected def test_fit_with_real_experiment( self, net_cls, classifier_module, data, wandb_logger_cls, wandb_run_cls, ): net = net_cls( classifier_module, callbacks=[wandb_logger_cls(wandb_run_cls)], max_epochs=5, ) net.fit(*data)
TestWandb
python
pytorch__pytorch
test/jit/test_batch_mm.py
{ "start": 209, "end": 10132 }
class ____(JitTestCase): @staticmethod def _get_test_tensors(n: int): return [ ( torch.tensor([[1 + x, 2 + x, 3 + x], [4 + x, 5 + x, 6 + x]]) if x % 2 == 0 else torch.tensor([[1 + x, 2 + x], [3 + x, 4 + x], [5 + x, 6 + x]]) ) for x in range(n) ] def test_batch_mm_no_mutation(self): def test_batch_mm( T1: torch.Tensor, T2: torch.Tensor, T3: torch.Tensor, T4: torch.Tensor, T5: torch.Tensor, T6: torch.Tensor, T7: torch.Tensor, T8: torch.Tensor, ): return ( torch.mm(T1, T2) + torch.mm(T3, T4) + torch.mm(T5, T6) + torch.mm(T7, T8) ) test_batch_mm_scripted = torch.jit.script(test_batch_mm) tensors = TestBatchMM._get_test_tensors(8) expected = test_batch_mm(*tensors) FileCheck().check_count("aten::mm", 4, exactly=True).run( test_batch_mm_scripted.graph ) self.run_pass("batch_mm", test_batch_mm_scripted.graph) FileCheck().check_count("prim::MMTreeReduce", 1, exactly=True).run( test_batch_mm_scripted.graph ) actual = test_batch_mm_scripted(*tensors) self.assertEqual(expected, actual, atol=1e-9, rtol=1e-9) def test_batch_mm_permitted_mutation(self): def test_batch_mm( T1: torch.Tensor, T2: torch.Tensor, T3: torch.Tensor, T4: torch.Tensor, T5: torch.Tensor, T6: torch.Tensor, T7: torch.Tensor, T8: torch.Tensor, ): result = {} result["product"] = ( torch.mm(T1, T2) + torch.mm(T3, T4) + torch.mm(T5, T6) + torch.mm(T7, T8) ) result["constant"] = torch.tensor([42.0]) return result test_batch_mm_scripted = torch.jit.script(test_batch_mm) tensors = TestBatchMM._get_test_tensors(8) expected = test_batch_mm(*tensors) FileCheck().check_count("aten::mm", 4, exactly=True).run( test_batch_mm_scripted.graph ) self.run_pass("batch_mm", test_batch_mm_scripted.graph) FileCheck().check_count("prim::MMTreeReduce", 1, exactly=True).run( test_batch_mm_scripted.graph ) actual = test_batch_mm_scripted(*tensors) self.assertEqual(expected, actual, atol=1e-9, rtol=1e-9) def test_batch_mm_prohibited_mutation(self): @torch.jit.script def test_batch_mm(n: int): T1 = torch.zeros((n, n)) T2 = torch.zeros((n, n)) T3 = torch.zeros((n, n)) T4 = torch.zeros((n, n)) T5 = torch.zeros((n, n)) T6 = torch.zeros((n, n)) T7 = torch.zeros((n, n)) T8 = torch.zeros((n, n)) torch.relu_(T1) result = ( torch.mm(T1, T2) + torch.mm(T3, T4) + torch.mm(T5, T6) + torch.mm(T7, T8) ) return result FileCheck().check_count("aten::mm", 4, exactly=True).run(test_batch_mm.graph) self.run_pass("batch_mm", test_batch_mm.graph) FileCheck().check_count("aten::mm", 4, exactly=True).check_not( "prim::MMTreeReduce" ).run(test_batch_mm.graph) def test_batch_mm_prohibited_mutation_multiple_adds(self): @torch.jit.script def test_batch_mm(n: int): T1 = torch.zeros((n, n)) T2 = torch.zeros((n, n)) T3 = torch.zeros((n, n)) T4 = torch.zeros((n, n)) T5 = torch.zeros((n, n)) T6 = torch.zeros((n, n)) T7 = torch.zeros((n, n)) T8 = torch.zeros((n, n)) T9 = torch.zeros((n, n)) T10 = torch.zeros((n, n)) torch.relu_(T1) result = {} result["no_mutated_parameters"] = ( torch.mm(T2, T3) + torch.mm(T4, T5) + torch.mm(T6, T7) + torch.mm(T8, T9) ) result["all_parameters"] = ( torch.mm(T1, T2) + torch.mm(T3, T4) + torch.mm(T5, T6) + torch.mm(T7, T8) + torch.mm(T9, T10) ) return result self.run_pass("batch_mm", test_batch_mm.graph) FileCheck().check_count("prim::MMTreeReduce", 1, exactly=True).check_count( "aten::mm", 5, exactly=True ).run(test_batch_mm.graph) def test_batch_mm_prohibited_mutation_if_node(self): @torch.jit.script def test_batch_mm(n: int, use_t1: bool): T1 = torch.zeros((n, n)) T2 = torch.zeros((n, n)) T3 = torch.zeros((n, n)) T4 = torch.zeros((n, n)) T5 = torch.zeros((n, n)) T6 = torch.zeros((n, n)) T7 = torch.zeros((n, n)) T8 = torch.zeros((n, n)) T9 = torch.zeros((n, n)) T10 = torch.zeros((n, n)) if use_t1: torch.relu_(T1) return ( torch.mm(T1, T2) + torch.mm(T3, T4) + torch.mm(T5, T6) + torch.mm(T7, T8) + torch.mm(T9, T10) ) else: return ( torch.mm(T2, T3) + torch.mm(T4, T5) + torch.mm(T6, T7) + torch.mm(T8, T9) ) self.run_pass("batch_mm", test_batch_mm.graph) FileCheck().check_count("aten::mm", 5, exactly=True).check_count( "prim::MMTreeReduce", 1, exactly=True ).run(test_batch_mm.graph) def test_batch_mm_side_permitted_mutation(self): @torch.jit.script def test_batch_mm(n: int): result = {} A = torch.zeros((n, n)) T1 = torch.zeros((n, n)) T2 = torch.zeros((n, n)) T3 = torch.zeros((n, n)) T4 = torch.zeros((n, n)) T5 = torch.zeros((n, n)) T6 = torch.zeros((n, n)) T7 = torch.zeros((n, n)) T8 = torch.zeros((n, n)) result["T1"] = torch.mm(A, T1) result["T2"] = torch.mm(A, T2) result["T3"] = torch.mm(A, T3) result["T4"] = torch.mm(A, T4) result["T5"] = torch.mm(A, T5) result["T6"] = torch.mm(A, T6) result["T7"] = torch.mm(A, T7) result["T8"] = torch.mm(A, T8) return result FileCheck().check_count("aten::mm", 8, exactly=True).run(test_batch_mm.graph) self.run_pass("batch_mm", test_batch_mm.graph) FileCheck().check_count("prim::MMBatchSide", 1, exactly=True).check_not( "aten::mm" ).run(test_batch_mm.graph) def test_batch_mm_side_prohibited_mutation_uncommon_side(self): @torch.jit.script def test_batch_mm(n: int): A = torch.zeros((n, n)) T1 = torch.zeros((n, n)) T2 = torch.zeros((n, n)) T3 = torch.zeros((n, n)) T4 = torch.zeros((n, n)) T5 = torch.zeros((n, n)) T6 = torch.zeros((n, n)) T7 = torch.zeros((n, n)) T8 = torch.zeros((n, n)) T9 = torch.zeros((n, n)) T10 = torch.zeros((n, n)) torch.relu_(T1) result = {} result["T1"] = torch.mm(A, T1) result["T2"] = torch.mm(A, T2) result["T3"] = torch.mm(A, T3) result["T4"] = torch.mm(A, T4) result["T5"] = torch.mm(A, T5) result["T6"] = torch.mm(A, T6) result["T7"] = torch.mm(A, T7) result["T8"] = torch.mm(A, T8) result["T9"] = torch.mm(A, T9) result["T10"] = torch.mm(A, T10) return result FileCheck().check_count("aten::mm", 10, exactly=True).run(test_batch_mm.graph) self.run_pass("batch_mm", test_batch_mm.graph) FileCheck().check_count("aten::mm", 1, exactly=True).run(test_batch_mm.graph) FileCheck().check_count("prim::MMBatchSide", 1, exactly=True).run( test_batch_mm.graph ) def test_batch_mm_side_prohibited_mutation_common_side(self): @torch.jit.script def test_batch_mm(n: int): A = torch.zeros((n, n)) T1 = torch.zeros((n, n)) T2 = torch.zeros((n, n)) T3 = torch.zeros((n, n)) T4 = torch.zeros((n, n)) T5 = torch.zeros((n, n)) T6 = torch.zeros((n, n)) T7 = torch.zeros((n, n)) T8 = torch.zeros((n, n)) T9 = torch.zeros((n, n)) T10 = torch.zeros((n, n)) torch.relu_(A) result = {} result["T1"] = torch.mm(A, T1) result["T2"] = torch.mm(A, T2) result["T3"] = torch.mm(A, T3) result["T4"] = torch.mm(A, T4) result["T5"] = torch.mm(A, T5) result["T6"] = torch.mm(A, T6) result["T7"] = torch.mm(A, T7) result["T8"] = torch.mm(A, T8) result["T9"] = torch.mm(A, T9) result["T10"] = torch.mm(A, T10) return result FileCheck().check_count("aten::mm", 10, exactly=True).run(test_batch_mm.graph) self.run_pass("batch_mm", test_batch_mm.graph) FileCheck().check_count("aten::mm", 10, exactly=True).check_not( "prim::MMBatchSide" ).run(test_batch_mm.graph) if __name__ == "__main__": raise_on_run_directly("test/test_jit.py")
TestBatchMM
python
astropy__astropy
astropy/coordinates/builtin_frames/galactocentric.py
{ "start": 1730, "end": 17997 }
class ____(ScienceState): """Global setting of default values for the frame attributes in the `~astropy.coordinates.Galactocentric` frame. These constancts may be updated in future versions of ``astropy``. Note that when using `~astropy.coordinates.Galactocentric`, changing values here will not affect any attributes that are set explicitly by passing values in to the `~astropy.coordinates.Galactocentric` initializer. Modifying these defaults will only affect the frame attribute values when using the frame as, e.g., ``Galactocentric`` or ``Galactocentric()`` with no explicit arguments. This class controls the parameter settings by specifying a string name, with the following pre-specified options: - 'pre-v4.0': The current default value, which sets the default frame attribute values to their original (pre-astropy-v4.0) values. - 'v4.0': The attribute values as updated in Astropy version 4.0. - 'latest': An alias of the most recent parameter set (currently: 'v4.0') Alternatively, user-defined parameter settings may be registered, with :meth:`~astropy.coordinates.galactocentric_frame_defaults.register`, and used identically as pre-specified parameter sets. At minimum, registrations must have unique names and a dictionary of parameters with keys "galcen_coord", "galcen_distance", "galcen_v_sun", "z_sun", "roll". See examples below. This class also tracks the references for all parameter values in the attribute ``references``, as well as any further information the registry. The pre-specified options can be extended to include similar state information as user-defined parameter settings -- for example, to add parameter uncertainties. The preferred method for getting a parameter set and metadata, by name, is :meth:`~astropy.coordinates.galactocentric_frame_defaults.get_from_registry` since it ensures the immutability of the registry. See :ref:`astropy:astropy-coordinates-galactocentric-defaults` for more information. Examples -------- The default `~astropy.coordinates.Galactocentric` frame parameters can be modified globally:: >>> from astropy.coordinates import galactocentric_frame_defaults >>> _ = galactocentric_frame_defaults.set('v4.0') # doctest: +SKIP >>> Galactocentric() # doctest: +SKIP <Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg (266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg)> >>> _ = galactocentric_frame_defaults.set('pre-v4.0') # doctest: +SKIP >>> Galactocentric() # doctest: +SKIP <Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg (266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)> The default parameters can also be updated by using this class as a context manager:: >>> with galactocentric_frame_defaults.set('pre-v4.0'): ... print(Galactocentric()) # doctest: +FLOAT_CMP <Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg (266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)> Again, changing the default parameter values will not affect frame attributes that are explicitly specified:: >>> import astropy.units as u >>> with galactocentric_frame_defaults.set('pre-v4.0'): ... print(Galactocentric(galcen_distance=8.0*u.kpc)) # doctest: +FLOAT_CMP <Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg (266.4051, -28.936175)>, galcen_distance=8.0 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)> Additional parameter sets may be registered, for instance to use the Dehnen & Binney (1998) measurements of the solar motion. We can also add metadata, such as the 1-sigma errors. In this example we will modify the required key "parameters", change the recommended key "references" to match "parameters", and add the extra key "error" (any key can be added):: >>> state = galactocentric_frame_defaults.get_from_registry("v4.0") >>> state["parameters"]["galcen_v_sun"] = (10.00, 225.25, 7.17) * (u.km / u.s) >>> state["references"]["galcen_v_sun"] = "https://ui.adsabs.harvard.edu/full/1998MNRAS.298..387D" >>> state["error"] = {"galcen_v_sun": (0.36, 0.62, 0.38) * (u.km / u.s)} >>> galactocentric_frame_defaults.register(name="DB1998", **state) Just as in the previous examples, the new parameter set can be retrieved with:: >>> state = galactocentric_frame_defaults.get_from_registry("DB1998") >>> print(state["error"]["galcen_v_sun"]) # doctest: +FLOAT_CMP [0.36 0.62 0.38] km / s """ _latest_value = "v4.0" _value = None _references = None _state = {} # all other data # Note: _StateProxy() produces read-only view of enclosed mapping. _registry = { "v4.0": { "parameters": _StateProxy( { "galcen_coord": ICRS( ra=266.4051 * u.degree, dec=-28.936175 * u.degree ), "galcen_distance": 8.122 * u.kpc, "galcen_v_sun": r.CartesianRepresentation( [12.9, 245.6, 7.78] * (u.km / u.s) ), "z_sun": 20.8 * u.pc, "roll": 0 * u.deg, } ), "references": _StateProxy( { "galcen_coord": ( "https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R" ), "galcen_distance": ( "https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G" ), "galcen_v_sun": [ "https://ui.adsabs.harvard.edu/abs/2018RNAAS...2..210D", "https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G", "https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R", ], "z_sun": "https://ui.adsabs.harvard.edu/abs/2019MNRAS.482.1417B", "roll": None, } ), }, "pre-v4.0": { "parameters": _StateProxy( { "galcen_coord": ICRS( ra=266.4051 * u.degree, dec=-28.936175 * u.degree ), "galcen_distance": 8.3 * u.kpc, "galcen_v_sun": r.CartesianRepresentation( [11.1, 220 + 12.24, 7.25] * (u.km / u.s) ), "z_sun": 27.0 * u.pc, "roll": 0 * u.deg, } ), "references": _StateProxy( { "galcen_coord": ( "https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R" ), "galcen_distance": ( "https://ui.adsabs.harvard.edu/#abs/2009ApJ...692.1075G" ), "galcen_v_sun": [ "https://ui.adsabs.harvard.edu/#abs/2010MNRAS.403.1829S", "https://ui.adsabs.harvard.edu/#abs/2015ApJS..216...29B", ], "z_sun": "https://ui.adsabs.harvard.edu/#abs/2001ApJ...553..184C", "roll": None, } ), }, } @classproperty # read-only def parameters(cls): return cls._value @classproperty # read-only def references(cls): return cls._references @classmethod def get_from_registry(cls, name: str) -> dict[str, dict]: """ Return Galactocentric solar parameters and metadata given string names for the parameter sets. This method ensures the returned state is a mutable copy, so any changes made do not affect the registry state. Returns ------- state : dict Copy of the registry for the string name. Should contain, at minimum: - "parameters": dict Galactocentric solar parameters - "references" : Dict[str, Union[str, Sequence[str]]] References for "parameters". Fields are str or sequence of str. Raises ------ KeyError If invalid string input to registry to retrieve solar parameters for Galactocentric frame. """ # Resolve the meaning of 'latest': latest parameter set is from v4.0 # - update this as newer parameter choices are added if name == "latest": name = cls._latest_value # Get the state from the registry. # Copy to ensure registry is immutable to modifications of "_value". # Raises KeyError if `name` is invalid string input to registry # to retrieve solar parameters for Galactocentric frame. return copy.deepcopy(cls._registry[name]) # ensure mutable @classmethod def validate(cls, value): if value is None: value = cls._latest_value if isinstance(value, str): state = cls.get_from_registry(value) cls._references = state["references"] cls._state = state parameters = state["parameters"] elif isinstance(value, dict): parameters = value elif isinstance(value, Galactocentric): # turn the frame instance into a dict of frame attributes parameters = {} for k in value.frame_attributes: parameters[k] = getattr(value, k) cls._references = value.frame_attribute_references.copy() cls._state = {"parameters": parameters, "references": cls._references} else: raise ValueError( "Invalid input to retrieve solar parameters for Galactocentric frame:" " input must be a string, dict, or Galactocentric instance" ) return parameters @classmethod def register( cls, name: str, parameters: dict, references=None, **meta: dict ) -> None: """Register a set of parameters. Parameters ---------- name : str The registration name for the parameter and metadata set. parameters : dict The solar parameters for Galactocentric frame. references : dict or None, optional References for contents of `parameters`. None becomes empty dict. **meta : dict, optional Any other properties to register. """ # check on contents of `parameters` must_have = {"galcen_coord", "galcen_distance", "galcen_v_sun", "z_sun", "roll"} missing = must_have.difference(parameters) if missing: raise ValueError(f"Missing parameters: {missing}") references = references or {} # None -> {} state = {"parameters": parameters, "references": references} state.update(meta) # meta never has keys "parameters" or "references" cls._registry[name] = state doc_components = """ x : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`x` position component. y : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`y` position component. z : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`z` position component. v_x : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`v_x` velocity component. v_y : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`v_y` velocity component. v_z : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`v_z` velocity component. """ doc_footer = """ Other parameters ---------------- galcen_coord : `~astropy.coordinates.ICRS`, optional, keyword-only The ICRS coordinates of the Galactic center. galcen_distance : `~astropy.units.Quantity`, optional, keyword-only The distance from the sun to the Galactic center. galcen_v_sun : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity` ['speed'], optional, keyword-only The velocity of the sun *in the Galactocentric frame* as Cartesian velocity components. z_sun : `~astropy.units.Quantity` ['length'], optional, keyword-only The distance from the sun to the Galactic midplane. roll : `~astropy.coordinates.Angle`, optional, keyword-only The angle to rotate about the final x-axis, relative to the orientation for Galactic. For example, if this roll angle is 0, the final x-z plane will align with the Galactic coordinates x-z plane. Unless you really know what this means, you probably should not change this! .. versionchanged :: 8.0 ``galcen_v_sun`` is now a `~astropy.coordinates.CartesianRepresentation` rather than a `~astropy.coordinates.CartesianDifferential`. Examples -------- To transform to the Galactocentric frame with the default frame attributes, pass the uninstantiated class name to the ``transform_to()`` method of a `~astropy.coordinates.SkyCoord` object:: >>> import astropy.units as u >>> import astropy.coordinates as coord >>> c = coord.SkyCoord(ra=[158.3122, 24.5] * u.degree, ... dec=[-17.3, 81.52] * u.degree, ... distance=[11.5, 24.12] * u.kpc, ... frame='icrs') >>> c.transform_to(coord.Galactocentric) # doctest: +FLOAT_CMP <SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg (266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc [( -9.43489286, -9.40062188, 6.51345359), (-21.11044918, 18.76334013, 7.83175149)]> To specify a custom set of parameters, you have to include extra keyword arguments when initializing the Galactocentric frame object:: >>> c.transform_to(coord.Galactocentric(galcen_distance=8.1*u.kpc)) # doctest: +FLOAT_CMP <SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg (266.4051, -28.936175)>, galcen_distance=8.1 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc [( -9.41284763, -9.40062188, 6.51346272), (-21.08839478, 18.76334013, 7.83184184)]> Similarly, transforming from the Galactocentric frame to another coordinate frame:: >>> c = coord.SkyCoord(x=[-8.3, 4.5] * u.kpc, ... y=[0., 81.52] * u.kpc, ... z=[0.027, 24.12] * u.kpc, ... frame=coord.Galactocentric) >>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP <SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc) [( 88.22423301, 29.88672864, 0.17813456), (289.72864549, 49.9865043 , 85.93949064)]> Or, with custom specification of the Galactic center:: >>> c = coord.SkyCoord(x=[-8.0, 4.5] * u.kpc, ... y=[0., 81.52] * u.kpc, ... z=[21.0, 24120.0] * u.pc, ... frame=coord.Galactocentric, ... z_sun=21 * u.pc, galcen_distance=8. * u.kpc) >>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP <SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc) [( 86.2585249 , 28.85773187, 2.75625475e-05), (289.77285255, 50.06290457, 8.59216010e+01)]> """ @format_doc(base_doc, components=doc_components, footer=doc_footer)
galactocentric_frame_defaults
python
walkccc__LeetCode
solutions/1007. Minimum Domino Rotations For Equal Row/1007.py
{ "start": 0, "end": 260 }
class ____: def minDominoRotations(self, tops: list[int], bottoms: list[int]) -> int: for num in range(1, 7): if all(num in pair for pair in zip(tops, bottoms)): return len(tops) - max(tops.count(num), bottoms.count(num)) return -1
Solution
python
pypa__pip
src/pip/_internal/exceptions.py
{ "start": 1626, "end": 1685 }
class ____(Exception): """The base pip error."""
PipError
python
celery__celery
t/unit/worker/test_state.py
{ "start": 797, "end": 864 }
class ____(state.Persistent): storage = MockShelve()
MyPersistent
python
qdrant__qdrant-client
qdrant_client/http/api_client.py
{ "start": 2016, "end": 2944 }
class ____(Generic[ClientT]): def __init__(self, host: str, **kwargs: Any): self.client = ApiClient(host, **kwargs) self.aliases_api = SyncAliasesApi(self.client) self.beta_api = SyncBetaApi(self.client) self.collections_api = SyncCollectionsApi(self.client) self.distributed_api = SyncDistributedApi(self.client) self.indexes_api = SyncIndexesApi(self.client) self.points_api = SyncPointsApi(self.client) self.search_api = SyncSearchApi(self.client) self.service_api = SyncServiceApi(self.client) self.snapshots_api = SyncSnapshotsApi(self.client) def close(self) -> None: self.client.close() T = TypeVar("T") Send = Callable[[Request], Response] SendAsync = Callable[[Request], Awaitable[Response]] MiddlewareT = Callable[[Request, Send], Response] AsyncMiddlewareT = Callable[[Request, SendAsync], Awaitable[Response]]
SyncApis
python
wandb__wandb
wandb/sdk/artifacts/_generated/input_types.py
{ "start": 1718, "end": 2139 }
class ____(GQLInput): graphql: Optional[int] = None sdk_graphql: Optional[int] = Field(alias="sdkGraphql", default=None) filestream_count: Optional[int] = Field(alias="filestreamCount", default=None) filestream_size: Optional[int] = Field(alias="filestreamSize", default=None) sdk_graphql_query_seconds: Optional[float] = Field( alias="sdkGraphqlQuerySeconds", default=None )
RateLimitsInput
python
getsentry__sentry
src/sentry/constants.py
{ "start": 20154, "end": 32988 }
class ____(Enum): HTTP = "http" DB = "db" ASSETS = "assets" # previously named resources APP_START = "app_start" SCREEN_LOAD = "screen_load" VITAL = "vital" CACHE = "cache" QUEUE = "queue" AGENTS = "agents" MCP = "mcp" StatsPeriod = namedtuple("StatsPeriod", ("segments", "interval")) # We need to limit the range of valid timestamps of an event because that # timestamp is used to control data retention. MAX_SECS_IN_FUTURE = 60 MAX_SECS_IN_PAST = 2592000 # 30 days ALLOWED_FUTURE_DELTA = timedelta(seconds=MAX_SECS_IN_FUTURE) DEFAULT_STORE_NORMALIZER_ARGS = dict( geoip_lookup=rust_geoip, max_secs_in_future=MAX_SECS_IN_FUTURE, max_secs_in_past=MAX_SECS_IN_PAST, enable_trimming=True, ) INTERNAL_INTEGRATION_TOKEN_COUNT_MAX = 20 ALL_ACCESS_PROJECTS = {-1} ALL_ACCESS_PROJECT_ID = -1 ALL_ACCESS_PROJECTS_SLUG = "$all" # Most number of events for the top-n graph MAX_TOP_EVENTS = 10 # org option default values REQUIRE_SCRUB_DATA_DEFAULT = False REQUIRE_SCRUB_DEFAULTS_DEFAULT = False ATTACHMENTS_ROLE_DEFAULT = settings.SENTRY_DEFAULT_ROLE DEBUG_FILES_ROLE_DEFAULT = "admin" EVENTS_ADMIN_ROLE_DEFAULT = settings.SENTRY_DEFAULT_ROLE REQUIRE_SCRUB_IP_ADDRESS_DEFAULT = False SCRAPE_JAVASCRIPT_DEFAULT = True JOIN_REQUESTS_DEFAULT = True HIDE_AI_FEATURES_DEFAULT = False GITHUB_COMMENT_BOT_DEFAULT = False GITLAB_COMMENT_BOT_DEFAULT = False ISSUE_ALERTS_THREAD_DEFAULT = True METRIC_ALERTS_THREAD_DEFAULT = True DATA_CONSENT_DEFAULT = False UPTIME_AUTODETECTION = True TARGET_SAMPLE_RATE_DEFAULT = 1.0 SAMPLING_MODE_DEFAULT = "organization" ROLLBACK_ENABLED_DEFAULT = True DEFAULT_AUTOFIX_AUTOMATION_TUNING_DEFAULT = AutofixAutomationTuningSettings.OFF DEFAULT_SEER_SCANNER_AUTOMATION_DEFAULT = True ENABLE_SEER_ENHANCED_ALERTS_DEFAULT = True ENABLE_SEER_CODING_DEFAULT = True ENABLED_CONSOLE_PLATFORMS_DEFAULT: list[str] = [] ENABLE_PR_REVIEW_TEST_GENERATION_DEFAULT = False INGEST_THROUGH_TRUSTED_RELAYS_ONLY_DEFAULT = "disabled" # `sentry:events_member_admin` - controls whether the 'member' role gets the event:admin scope EVENTS_MEMBER_ADMIN_DEFAULT = True ALERTS_MEMBER_WRITE_DEFAULT = True # Defined at https://github.com/getsentry/relay/blob/master/py/sentry_relay/consts.py DataCategory = sentry_relay.consts.DataCategory CRASH_RATE_ALERT_SESSION_COUNT_ALIAS = "_total_count" CRASH_RATE_ALERT_AGGREGATE_ALIAS = "_crash_rate_alert_aggregate" # Dynamic sampling denylist composed manually from # 1. `src/sentry/event_manager.py:save`. We have function # `_derive_interface_tags_many(jobs)` which iterates across all interfaces # and execute `iter_tags`, so i've searched usage of `iter_tags`. # 2. `src/sentry/event_manager.py:_pull_out_data` we have `set_tag`. # 3. `src/sentry/event_manager.py:_get_event_user_many` we have `set_tag`. # 4. `src/sentry/event_manager.py:_get_or_create_release_many` we have `set_tag`. # 5. `src/sentry/interfaces/exception.py:Mechanism` we have `iter_tags`. # 6. `src/sentry/plugins/sentry_urls/models.py:UrlsPlugin`. # 7. `sentry/src/sentry/plugins/sentry_interface_types/models.py`. # 8. `src/sentry/plugins/sentry_useragents/models.py:UserAgentPlugin`. # Note: # should be sorted alphabetically so that it is easy to maintain in future # if you update this list please add explanation or source of it DS_DENYLIST = frozenset( [ "app.device", "browser", "browser.name", "device", "device.family", "environment", "gpu.name", "gpu.vendor", "handled", "interface_type", "level", "logger", "mechanism", "monitor.id", "os", "os.name", "os.rooted", "runtime", "runtime.name", "sentry:dist", "sentry:release", "sentry:user", "transaction", "url", ] ) # DESCRIBES the globs used to check if a transaction is for a healthcheck endpoint # https://kubernetes.io/docs/reference/using-api/health-checks/ # Also it covers: livez, readyz HEALTH_CHECK_GLOBS = [ "*healthcheck*", "*heartbeat*", "*/health{/,}", "*/healthy{/,}", "*/healthz{/,}", "*/health_check{/,}", "*/_health{/,}", r"*/\[_health\]{/,}", "*/live{/,}", "*/livez{/,}", "*/ready{/,}", "*/readyz{/,}", "*/ping{/,}", "*/up{/,}", ] NEL_CULPRITS = { # https://w3c.github.io/network-error-logging/#predefined-network-error-types "dns.unreachable": "DNS server is unreachable", "dns.name_not_resolved": "DNS server responded but is unable to resolve the address", "dns.failed": "Request to the DNS server failed due to reasons not covered by previous errors", "dns.address_changed": "Indicates that the resolved IP address for a request's origin has changed since the corresponding NEL policy was received", "tcp.timed_out": "TCP connection to the server timed out", "tcp.closed": "The TCP connection was closed by the server", "tcp.reset": "The TCP connection was reset", "tcp.refused": "The TCP connection was refused by the server", "tcp.aborted": "The TCP connection was aborted", "tcp.address_invalid": "The IP address is invalid", "tcp.address_unreachable": "The IP address is unreachable", "tcp.failed": "The TCP connection failed due to reasons not covered by previous errors", "tls.version_or_cipher_mismatch": "The TLS connection was aborted due to version or cipher mismatch", "tls.bad_client_auth_cert": "The TLS connection was aborted due to invalid client certificate", "tls.cert.name_invalid": "The TLS connection was aborted due to invalid name", "tls.cert.date_invalid": "The TLS connection was aborted due to invalid certificate date", "tls.cert.authority_invalid": "The TLS connection was aborted due to invalid issuing authority", "tls.cert.invalid": "The TLS connection was aborted due to invalid certificate", "tls.cert.revoked": "The TLS connection was aborted due to revoked server certificate", "tls.cert.pinned_key_not_in_cert_chain": "The TLS connection was aborted due to a key pinning error", "tls.protocol.error": "The TLS connection was aborted due to a TLS protocol error", "tls.failed": "The TLS connection failed due to reasons not covered by previous errors", "http.error": "The user agent successfully received a response, but it had a {} status code", "http.protocol.error": "The connection was aborted due to an HTTP protocol error", "http.response.invalid": "Response is empty, has a content-length mismatch, has improper encoding, and/or other conditions that prevent user agent from processing the response", "http.response.redirect_loop": "The request was aborted due to a detected redirect loop", "http.failed": "The connection failed due to errors in HTTP protocol not covered by previous errors", "abandoned": "User aborted the resource fetch before it is complete", "unknown": "error type is unknown", # Chromium-specific errors, not documented in the spec # https://chromium.googlesource.com/chromium/src/+/HEAD/net/network_error_logging/network_error_logging_service.cc "dns.protocol": "ERR_DNS_MALFORMED_RESPONSE", "dns.server": "ERR_DNS_SERVER_FAILED", "tls.unrecognized_name_alert": "ERR_SSL_UNRECOGNIZED_NAME_ALERT", "h2.ping_failed": "ERR_HTTP2_PING_FAILED", "h2.protocol.error": "ERR_HTTP2_PROTOCOL_ERROR", "h3.protocol.error": "ERR_QUIC_PROTOCOL_ERROR", "http.response.invalid.empty": "ERR_EMPTY_RESPONSE", "http.response.invalid.content_length_mismatch": "ERR_CONTENT_LENGTH_MISMATCH", "http.response.invalid.incomplete_chunked_encoding": "ERR_INCOMPLETE_CHUNKED_ENCODING", "http.response.invalid.invalid_chunked_encoding": "ERR_INVALID_CHUNKED_ENCODING", "http.request.range_not_satisfiable": "ERR_REQUEST_RANGE_NOT_SATISFIABLE", "http.response.headers.truncated": "ERR_RESPONSE_HEADERS_TRUNCATED", "http.response.headers.multiple_content_disposition": "ERR_RESPONSE_HEADERS_MULTIPLE_CONTENT_DISPOSITION", "http.response.headers.multiple_content_length": "ERR_RESPONSE_HEADERS_MULTIPLE_CONTENT_LENGTH", } # Generated from https://raw.githubusercontent.com/github-linguist/linguist/master/lib/linguist/languages.yml and our list of platforms/languages EXTENSION_LANGUAGE_MAP = { "c": "c", "cats": "c", "h": "objective-c", "idc": "c", "cs": "c#", "cake": "coffeescript", "csx": "c#", "linq": "c#", "cpp": "c++", "c++": "c++", "cc": "c++", "cp": "c++", "cppm": "c++", "cxx": "c++", "h++": "c++", "hh": "c++", "hpp": "c++", "hxx": "c++", "inc": "php", "inl": "c++", "ino": "c++", "ipp": "c++", "ixx": "c++", "re": "c++", "tcc": "c++", "tpp": "c++", "txx": "c++", "chs": "c2hs haskell", "clj": "clojure", "bb": "clojure", "boot": "clojure", "cl2": "clojure", "cljc": "clojure", "cljs": "clojure", "cljs.hl": "clojure", "cljscm": "clojure", "cljx": "clojure", "hic": "clojure", "coffee": "coffeescript", "_coffee": "coffeescript", "cjsx": "coffeescript", "iced": "coffeescript", "cfm": "coldfusion", "cfml": "coldfusion", "cfc": "coldfusion cfc", "cr": "crystal", "dart": "dart", "ex": "elixir", "exs": "elixir", "fs": "f#", "fsi": "f#", "fsx": "f#", "go": "go", "groovy": "groovy", "grt": "groovy", "gtpl": "groovy", "gvy": "groovy", "gsp": "groovy server pages", "hcl": "hcl", "nomad": "hcl", "tf": "hcl", "tfvars": "hcl", "workflow": "hcl", "hs": "haskell", "hs-boot": "haskell", "hsc": "haskell", "java": "java", "jav": "java", "jsh": "java", "jsp": "java server pages", "tag": "java server pages", "js": "javascript", "_js": "javascript", "bones": "javascript", "cjs": "javascript", "es": "javascript", "es6": "javascript", "frag": "javascript", "gs": "javascript", "jake": "javascript", "javascript": "javascript", "jsb": "javascript", "jscad": "javascript", "jsfl": "javascript", "jslib": "javascript", "jsm": "javascript", "jspre": "javascript", "jss": "javascript", "jsx": "javascript", "mjs": "javascript", "njs": "javascript", "pac": "javascript", "sjs": "javascript", "ssjs": "javascript", "xsjs": "javascript", "xsjslib": "javascript", "js.erb": "javascript+erb", "kt": "kotlin", "ktm": "kotlin", "kts": "kotlin", "litcoffee": "literate coffeescript", "coffee.md": "literate coffeescript", "lhs": "literate haskell", "lua": "lua", "fcgi": "ruby", "nse": "lua", "p8": "lua", "pd_lua": "lua", "rbxs": "lua", "rockspec": "lua", "wlua": "lua", "numpy": "numpy", "numpyw": "numpy", "numsc": "numpy", "ml": "ocaml", "eliom": "ocaml", "eliomi": "ocaml", "ml4": "ocaml", "mli": "ocaml", "mll": "ocaml", "mly": "ocaml", "m": "objective-c", "mm": "objective-c++", "cl": "opencl", "opencl": "opencl", "php": "php", "aw": "php", "ctp": "php", "php3": "php", "php4": "php", "php5": "php", "phps": "php", "phpt": "php", "pl": "perl", "al": "perl", "cgi": "python", "perl": "perl", "ph": "perl", "plx": "perl", "pm": "perl", "psgi": "perl", "t": "perl", "ps1": "powershell", "psd1": "powershell", "psm1": "powershell", "py": "python", "gyp": "python", "gypi": "python", "lmi": "python", "py3": "python", "pyde": "python", "pyi": "python", "pyp": "python", "pyt": "python", "pyw": "python", "rpy": "python", "spec": "ruby", "tac": "python", "wsgi": "python", "xpy": "python", "rb": "ruby", "builder": "ruby", "eye": "ruby", "gemspec": "ruby", "god": "ruby", "jbuilder": "ruby", "mspec": "ruby", "pluginspec": "ruby", "podspec": "ruby", "prawn": "ruby", "rabl": "ruby", "rake": "ruby", "rbi": "ruby", "rbuild": "ruby", "rbw": "ruby", "rbx": "ruby", "ru": "ruby", "ruby": "ruby", "thor": "ruby", "watchr": "ruby", "rs": "rust", "rs.in": "rust", "scala": "scala", "kojo": "scala", "sbt": "scala", "sc": "scala", "smk": "snakemake", "snakefile": "snakemake", "swift": "swift", "tsx": "tsx", "ts": "typescript", "cts": "typescript", "mts": "typescript", "upc": "unified parallel c", "vb": "visual basic .net", "vbhtml": "visual basic .net", "bas": "visual basic 6.0", "cls": "visual basic 6.0", "ctl": "visual basic 6.0", "dsr": "visual basic 6.0", "frm": "visual basic 6.0", }
InsightModules
python
ray-project__ray
python/ray/data/block.py
{ "start": 7700, "end": 8705 }
class ____(BlockMetadata): schema: Optional[Schema] = None def __init__(self, metadata: BlockMetadata, schema: Optional["Schema"] = None): super().__init__( input_files=metadata.input_files, size_bytes=metadata.size_bytes, num_rows=metadata.num_rows, exec_stats=metadata.exec_stats, ) self.schema = schema def from_block( block: Block, stats: Optional["BlockExecStats"] = None ) -> "BlockMetadataWithSchema": accessor = BlockAccessor.for_block(block) meta = accessor.get_metadata(exec_stats=stats) schema = accessor.schema() return BlockMetadataWithSchema(metadata=meta, schema=schema) @property def metadata(self) -> BlockMetadata: return BlockMetadata( num_rows=self.num_rows, size_bytes=self.size_bytes, exec_stats=self.exec_stats, input_files=self.input_files, ) @DeveloperAPI
BlockMetadataWithSchema
python
realpython__materials
top-python-game-engines/pygame/pygame_game.py
{ "start": 1422, "end": 5047 }
class ____(pygame.sprite.Sprite): def __init__(self): """Initialize the coin sprite""" super(Coin, self).__init__() # Get the image to draw for the coin coin_image = str(Path.cwd() / "images" / "coin_gold.png") # Load the image, preserve alpha channel for transparency self.surf = pygame.image.load(coin_image).convert_alpha() # The starting position is randomly generated self.rect = self.surf.get_rect( center=( randint(10, WIDTH - 10), randint(10, HEIGHT - 10), ) ) # Initialize the Pygame engine pygame.init() # Set up the drawing window screen = pygame.display.set_mode(size=[WIDTH, HEIGHT]) # Hide the mouse cursor pygame.mouse.set_visible(False) # Set up the clock for a decent frame rate clock = pygame.time.Clock() # Create a custom event for adding a new coin ADDCOIN = pygame.USEREVENT + 1 pygame.time.set_timer(ADDCOIN, coin_countdown) # Set up the coin_list coin_list = pygame.sprite.Group() # Initialize the score score = 0 # Set up the coin pickup sound coin_pickup_sound = pygame.mixer.Sound( str(Path.cwd() / "sounds" / "coin_pickup.wav") ) # Create a player sprite and set its initial position player = Player() player.update(pygame.mouse.get_pos()) # Run until you get to an end condition running = True while running: # Did the user click the window close button? for event in pygame.event.get(): if event.type == pygame.QUIT: running = False # Should you add a new coin? elif event.type == ADDCOIN: # Create a new coin and add it to the coin_list new_coin = Coin() coin_list.add(new_coin) # Speed things up if fewer than three coins are on-screen if len(coin_list) < 3: coin_countdown -= coin_interval # Need to have some interval if coin_countdown < 100: coin_countdown = 100 # Stop the previous timer by setting the interval to 0 pygame.time.set_timer(ADDCOIN, 0) # Start a new timer pygame.time.set_timer(ADDCOIN, coin_countdown) # Update the player position player.update(pygame.mouse.get_pos()) # Check if the player has collided with a coin, removing the coin if so coins_collected = pygame.sprite.spritecollide( sprite=player, group=coin_list, dokill=True ) for coin in coins_collected: # Each coin is worth 10 points score += 10 # Play the coin collected sound coin_pickup_sound.play() # Are there too many coins on the screen? if len(coin_list) >= COIN_COUNT: # This counts as an end condition, so you end your game loop running = False # To render the screen, first fill the background with pink screen.fill((255, 170, 164)) # Draw the coins next for coin in coin_list: screen.blit(coin.surf, coin.rect) # Then draw the player screen.blit(player.surf, player.rect) # Finally, draw the score at the bottom left score_font = pygame.font.SysFont("any_font", 36) score_block = score_font.render(f"Score: {score}", False, (0, 0, 0)) screen.blit(score_block, (50, HEIGHT - 50)) # Flip the display to make everything appear pygame.display.flip() # Ensure you maintain a 30 frames per second rate clock.tick(30) # Done! Print the final score print(f"Game over! Final score: {score}") # Make the mouse visible again pygame.mouse.set_visible(True) # Quit the game pygame.quit()
Coin
python
dateutil__dateutil
src/dateutil/parser/_parser.py
{ "start": 2002, "end": 8054 }
class ____(object): # Fractional seconds are sometimes split by a comma _split_decimal = re.compile("([.,])") def __init__(self, instream): if isinstance(instream, (bytes, bytearray)): instream = instream.decode() if isinstance(instream, text_type): instream = StringIO(instream) elif getattr(instream, 'read', None) is None: raise TypeError('Parser must be a string or character stream, not ' '{itype}'.format(itype=instream.__class__.__name__)) self.instream = instream self.charstack = [] self.tokenstack = [] self.eof = False def get_token(self): """ This function breaks the time string into lexical units (tokens), which can be parsed by the parser. Lexical units are demarcated by changes in the character set, so any continuous string of letters is considered one unit, any continuous string of numbers is considered one unit. The main complication arises from the fact that dots ('.') can be used both as separators (e.g. "Sep.20.2009") or decimal points (e.g. "4:30:21.447"). As such, it is necessary to read the full context of any dot-separated strings before breaking it into tokens; as such, this function maintains a "token stack", for when the ambiguous context demands that multiple tokens be parsed at once. """ if self.tokenstack: return self.tokenstack.pop(0) seenletters = False token = None state = None while not self.eof: # We only realize that we've reached the end of a token when we # find a character that's not part of the current token - since # that character may be part of the next token, it's stored in the # charstack. if self.charstack: nextchar = self.charstack.pop(0) else: nextchar = self.instream.read(1) while nextchar == '\x00': nextchar = self.instream.read(1) if not nextchar: self.eof = True break elif not state: # First character of the token - determines if we're starting # to parse a word, a number or something else. token = nextchar if self.isword(nextchar): state = 'a' elif self.isnum(nextchar): state = '0' elif self.isspace(nextchar): token = ' ' break # emit token else: break # emit token elif state == 'a': # If we've already started reading a word, we keep reading # letters until we find something that's not part of a word. seenletters = True if self.isword(nextchar): token += nextchar elif nextchar == '.': token += nextchar state = 'a.' else: self.charstack.append(nextchar) break # emit token elif state == '0': # If we've already started reading a number, we keep reading # numbers until we find something that doesn't fit. if self.isnum(nextchar): token += nextchar elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): token += nextchar state = '0.' else: self.charstack.append(nextchar) break # emit token elif state == 'a.': # If we've seen some letters and a dot separator, continue # parsing, and the tokens will be broken up later. seenletters = True if nextchar == '.' or self.isword(nextchar): token += nextchar elif self.isnum(nextchar) and token[-1] == '.': token += nextchar state = '0.' else: self.charstack.append(nextchar) break # emit token elif state == '0.': # If we've seen at least one dot separator, keep going, we'll # break up the tokens later. if nextchar == '.' or self.isnum(nextchar): token += nextchar elif self.isword(nextchar) and token[-1] == '.': token += nextchar state = 'a.' else: self.charstack.append(nextchar) break # emit token if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or token[-1] in '.,')): l = self._split_decimal.split(token) token = l[0] for tok in l[1:]: if tok: self.tokenstack.append(tok) if state == '0.' and token.count('.') == 0: token = token.replace(',', '.') return token def __iter__(self): return self def __next__(self): token = self.get_token() if token is None: raise StopIteration return token def next(self): return self.__next__() # Python 2.x support @classmethod def split(cls, s): return list(cls(s)) @classmethod def isword(cls, nextchar): """ Whether or not the next character is part of a word """ return nextchar.isalpha() @classmethod def isnum(cls, nextchar): """ Whether the next character is part of a number """ return nextchar.isdigit() @classmethod def isspace(cls, nextchar): """ Whether the next character is whitespace """ return nextchar.isspace()
_timelex
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/type_api.py
{ "start": 42134, "end": 49067 }
class ____(TypeEngineMixin): """mixin that defines attributes and behaviors specific to third-party datatypes. "Third party" refers to datatypes that are defined outside the scope of SQLAlchemy within either end-user application code or within external extensions to SQLAlchemy. Subclasses currently include :class:`.TypeDecorator` and :class:`.UserDefinedType`. .. versionadded:: 1.4.28 """ cache_ok: Optional[bool] = None '''Indicate if statements using this :class:`.ExternalType` are "safe to cache". The default value ``None`` will emit a warning and then not allow caching of a statement which includes this type. Set to ``False`` to disable statements using this type from being cached at all without a warning. When set to ``True``, the object's class and selected elements from its state will be used as part of the cache key. For example, using a :class:`.TypeDecorator`:: class MyType(TypeDecorator): impl = String cache_ok = True def __init__(self, choices): self.choices = tuple(choices) self.internal_only = True The cache key for the above type would be equivalent to:: >>> MyType(["a", "b", "c"])._static_cache_key (<class '__main__.MyType'>, ('choices', ('a', 'b', 'c'))) The caching scheme will extract attributes from the type that correspond to the names of parameters in the ``__init__()`` method. Above, the "choices" attribute becomes part of the cache key but "internal_only" does not, because there is no parameter named "internal_only". The requirements for cacheable elements is that they are hashable and also that they indicate the same SQL rendered for expressions using this type every time for a given cache value. To accommodate for datatypes that refer to unhashable structures such as dictionaries, sets and lists, these objects can be made "cacheable" by assigning hashable structures to the attributes whose names correspond with the names of the arguments. For example, a datatype which accepts a dictionary of lookup values may publish this as a sorted series of tuples. Given a previously un-cacheable type as:: class LookupType(UserDefinedType): """a custom type that accepts a dictionary as a parameter. this is the non-cacheable version, as "self.lookup" is not hashable. """ def __init__(self, lookup): self.lookup = lookup def get_col_spec(self, **kw): return "VARCHAR(255)" def bind_processor(self, dialect): ... # works with "self.lookup" ... Where "lookup" is a dictionary. The type will not be able to generate a cache key:: >>> type_ = LookupType({"a": 10, "b": 20}) >>> type_._static_cache_key <stdin>:1: SAWarning: UserDefinedType LookupType({'a': 10, 'b': 20}) will not produce a cache key because the ``cache_ok`` flag is not set to True. Set this flag to True if this type object's state is safe to use in a cache key, or False to disable this warning. symbol('no_cache') If we **did** set up such a cache key, it wouldn't be usable. We would get a tuple structure that contains a dictionary inside of it, which cannot itself be used as a key in a "cache dictionary" such as SQLAlchemy's statement cache, since Python dictionaries aren't hashable:: >>> # set cache_ok = True >>> type_.cache_ok = True >>> # this is the cache key it would generate >>> key = type_._static_cache_key >>> key (<class '__main__.LookupType'>, ('lookup', {'a': 10, 'b': 20})) >>> # however this key is not hashable, will fail when used with >>> # SQLAlchemy statement cache >>> some_cache = {key: "some sql value"} Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: unhashable type: 'dict' The type may be made cacheable by assigning a sorted tuple of tuples to the ".lookup" attribute:: class LookupType(UserDefinedType): """a custom type that accepts a dictionary as a parameter. The dictionary is stored both as itself in a private variable, and published in a public variable as a sorted tuple of tuples, which is hashable and will also return the same value for any two equivalent dictionaries. Note it assumes the keys and values of the dictionary are themselves hashable. """ cache_ok = True def __init__(self, lookup): self._lookup = lookup # assume keys/values of "lookup" are hashable; otherwise # they would also need to be converted in some way here self.lookup = tuple((key, lookup[key]) for key in sorted(lookup)) def get_col_spec(self, **kw): return "VARCHAR(255)" def bind_processor(self, dialect): ... # works with "self._lookup" ... Where above, the cache key for ``LookupType({"a": 10, "b": 20})`` will be:: >>> LookupType({"a": 10, "b": 20})._static_cache_key (<class '__main__.LookupType'>, ('lookup', (('a', 10), ('b', 20)))) .. versionadded:: 1.4.14 - added the ``cache_ok`` flag to allow some configurability of caching for :class:`.TypeDecorator` classes. .. versionadded:: 1.4.28 - added the :class:`.ExternalType` mixin which generalizes the ``cache_ok`` flag to both the :class:`.TypeDecorator` and :class:`.UserDefinedType` classes. .. seealso:: :ref:`sql_caching` ''' # noqa: E501 @util.non_memoized_property def _static_cache_key( self, ) -> Union[CacheConst, Tuple[Any, ...]]: cache_ok = self.__class__.__dict__.get("cache_ok", None) if cache_ok is None: for subtype in self.__class__.__mro__: if ExternalType in subtype.__bases__: break else: subtype = self.__class__.__mro__[1] util.warn( "%s %r will not produce a cache key because " "the ``cache_ok`` attribute is not set to True. This can " "have significant performance implications including some " "performance degradations in comparison to prior SQLAlchemy " "versions. Set this attribute to True if this type object's " "state is safe to use in a cache key, or False to " "disable this warning." % (subtype.__name__, self), code="cprf", ) elif cache_ok is True: return super()._static_cache_key return NO_CACHE
ExternalType
python
huggingface__transformers
src/transformers/models/pix2struct/modeling_pix2struct.py
{ "start": 24442, "end": 25122 }
class ____(nn.Module): def __init__(self, config: Pix2StructTextConfig): super().__init__() self.DenseReluDense = Pix2StructTextDenseGatedActDense(config) self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Copied from transformers.models.t5.modeling_t5.T5LayerFF.forward def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states
Pix2StructTextLayerFF
python
django-mptt__django-mptt
tests/myapp/tests.py
{ "start": 50056, "end": 52388 }
class ____(TreeTestCase): """ Tests for the ``recursetree`` template filter. """ fixtures = ["categories.json"] template = re.sub( r"(?m)^[\s]+", "", """ {% load mptt_tags %} <ul> {% recursetree nodes %} <li> {{ node.name }} {% if not node.is_leaf_node %} <ul class="children"> {{ children }} </ul> {% endif %} </li> {% endrecursetree %} </ul> """, ) def test_leaf_html(self): html = ( Template(self.template) .render( Context( { "nodes": Category.objects.filter(pk=10), } ) ) .replace("\n", "") ) self.assertEqual(html, "<ul><li>Hardware &amp; Accessories</li></ul>") def test_nonleaf_html(self): qs = Category.objects.get(pk=8).get_descendants(include_self=True) html = ( Template(self.template) .render( Context( { "nodes": qs, } ) ) .replace("\n", "") ) self.assertEqual( html, ( '<ul><li>PlayStation 3<ul class="children">' "<li>Games</li><li>Hardware &amp; Accessories</li></ul></li></ul>" ), ) def test_parsing_fail(self): self.assertRaises( TemplateSyntaxError, Template, "{% load mptt_tags %}{% recursetree %}{% endrecursetree %}", ) def test_cached_ancestors(self): template = Template( """ {% load mptt_tags %} {% recursetree nodes %} {{ node.get_ancestors|join:" > " }} {{ node.name }} {% if not node.is_leaf_node %} {{ children }} {% endif %} {% endrecursetree %} """ ) with self.assertNumQueries(1): qs = Category.objects.all() template.render(Context({"nodes": qs}))
RecurseTreeTestCase
python
networkx__networkx
networkx/algorithms/tree/tests/test_distance_measures.py
{ "start": 52, "end": 1922 }
class ____: @pytest.mark.parametrize("graph_type", (nx.Graph, nx.MultiGraph)) def test_center_simple_tree(self, graph_type): G = nx.Graph([(1, 2), (1, 3), (2, 4), (2, 5)], create_using=graph_type) assert set(nx.tree.center(G)) == {1, 2} @pytest.mark.parametrize("r", range(2, 5)) @pytest.mark.parametrize("h", range(1, 5)) def test_center_balanced_tree(self, r, h): G = nx.balanced_tree(r, h) assert nx.tree.center(G) == [0] @pytest.mark.parametrize("n", [1, 2, 99, 100]) def test_center_path_graph(self, n): G = nx.path_graph(n) expected = {(n - 1) // 2, math.ceil((n - 1) / 2)} assert set(nx.tree.center(G)) == expected @pytest.mark.parametrize("n", [0, 2, 3, 5, 99, 100]) def test_center_star_graph(self, n): G = nx.star_graph(n) assert nx.tree.center(G) == [0] @pytest.mark.parametrize( "G", ( nx.cycle_graph(5), nx.complete_graph(5), nx.Graph([(0, 1), (2, 3)]), nx.empty_graph(2), nx.Graph(), nx.MultiGraph([(0, 1), (0, 1)]), nx.Graph([(0, 1), (1, 2), (3, 4)]), nx.Graph([(0, 1), (1, 2), (3, 4), (4, 5)]), pytest.param( nx.Graph([(0, 0)]), marks=pytest.mark.xfail(reason="no check for self-loops"), ), ), ) def test_center_non_tree(self, G): with pytest.raises(nx.NotATree, match=r"not a tree"): nx.tree.center(G) @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiDiGraph)) def test_center_directed(self, graph_type): G = nx.path_graph(4, create_using=graph_type) with pytest.raises( nx.NetworkXNotImplemented, match=r"not implemented for directed" ): nx.tree.center(G)
TestCenter
python
pypa__installer
tests/test_utils.py
{ "start": 440, "end": 974 }
class ____: def test_basics(self): result = parse_metadata_file( textwrap.dedent( """\ Name: package Version: 1.0.0 Multi-Use-Field: 1 Multi-Use-Field: 2 Multi-Use-Field: 3 """ ) ) assert isinstance(result, Message) assert result.get("Name") == "package" assert result.get("version") == "1.0.0" assert result.get_all("MULTI-USE-FIELD") == ["1", "2", "3"]
TestParseMetadata
python
django-debug-toolbar__django-debug-toolbar
tests/panels/test_profiling.py
{ "start": 3689, "end": 4165 }
class ____(IntegrationTestCase): def test_view_executed_once(self): self.assertEqual(User.objects.count(), 0) response = self.client.get("/new_user/") self.assertContains(response, "Profiling") self.assertEqual(User.objects.count(), 1) with self.assertRaises(IntegrityError), transaction.atomic(): response = self.client.get("/new_user/") self.assertEqual(User.objects.count(), 1)
ProfilingPanelIntegrationTestCase
python
huggingface__transformers
tests/models/ovis2/test_processor_ovis2.py
{ "start": 963, "end": 4675 }
class ____(ProcessorTesterMixin, unittest.TestCase): processor_class = Ovis2Processor @classmethod def _setup_tokenizer(cls): tokenizer_class = cls._get_component_class_from_processor("tokenizer") return tokenizer_class.from_pretrained("thisisiron/Ovis2-1B-hf") @staticmethod def prepare_processor_dict(): return { "chat_template": "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n'}}{% if message['content'] is string %}{{ message['content'] }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' %}{{ '<image>\n' }}{% elif content['type'] == 'text' %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{'<|im_end|>\n'}}{% endfor %}{% if add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}", } # fmt: skip def test_processor_to_json_string(self): processor = self.get_processor() obj = json.loads(processor.to_json_string()) for key, value in self.prepare_processor_dict().items(): # chat_tempalate are tested as a separate test because they are saved in separate files if key != "chat_template": self.assertEqual(obj[key], value) self.assertEqual(getattr(processor, key, None), value) def test_chat_template_is_saved(self): processor_loaded = self.processor_class.from_pretrained(self.tmpdirname) processor_dict_loaded = json.loads(processor_loaded.to_json_string()) # chat templates aren't serialized to json in processors self.assertFalse("chat_template" in processor_dict_loaded) # they have to be saved as separate file and loaded back from that file # so we check if the same template is loaded processor_dict = self.prepare_processor_dict() self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None)) def test_chat_template(self): processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-1B-hf") expected_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|>\n<|im_start|>assistant\n" messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True) self.assertEqual(expected_prompt, formatted_prompt) @require_av def test_chat_template_dict(self): processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-1B-hf") messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True) expected_output = [[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 27, 1805, 397, 3838, 374, 6839, 304, 419, 2168, 30, 151645, 198, 151644, 77091, 198]] # fmt: skip self.assertListEqual(expected_output, formatted_prompt_tokenized) out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"])
Ovis2ProcessorTest
python
astropy__astropy
astropy/modeling/projections.py
{ "start": 2133, "end": 2738 }
class ____(Parameter): """ Same as `Parameter` but can indicate its modified status via the ``dirty`` property. This flag also gets set automatically when a parameter is modified. This ability to track parameter's modified status is needed for automatic update of WCSLIB's prjprm structure (which may be a more-time intensive operation) *only as required*. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dirty = True def validate(self, value): super().validate(value) self.dirty = True
_ParameterDS
python
allegroai__clearml
clearml/backend_api/services/v2_13/models.py
{ "start": 126783, "end": 133152 }
class ____(Request): """ Create or update a new model for a task :param task: Task id :type task: str :param uri: URI for the model. Exactly one of uri or override_model_id is a required. :type uri: str :param name: Model name Unique within the company. :type name: str :param comment: Model comment :type comment: str :param tags: User-defined tags list :type tags: Sequence[str] :param system_tags: System tags list. This field is reserved for system use, please don't use it. :type system_tags: Sequence[str] :param override_model_id: Override model ID. If provided, this model is updated in the task. Exactly one of override_model_id or uri is required. :type override_model_id: str :param iteration: Iteration (used to update task statistics) :type iteration: int """ _service = "models" _action = "update_for_task" _version = "2.13" _schema = { "definitions": {}, "properties": { "comment": {"description": "Model comment", "type": "string"}, "iteration": { "description": "Iteration (used to update task statistics)", "type": "integer", }, "name": { "description": "Model name Unique within the company.", "type": "string", }, "override_model_id": { "description": "Override model ID. If provided, this model is updated in the task. Exactly one of override_model_id or uri is required.", "type": "string", }, "system_tags": { "description": "System tags list. This field is reserved for system use, please don't use it.", "items": {"type": "string"}, "type": "array", }, "tags": { "description": "User-defined tags list", "items": {"type": "string"}, "type": "array", }, "task": {"description": "Task id", "type": "string"}, "uri": { "description": "URI for the model. Exactly one of uri or override_model_id is a required.", "type": "string", }, }, "required": ["task"], "type": "object", } def __init__( self, task: str, uri: Optional[str] = None, name: Optional[str] = None, comment: Optional[str] = None, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, override_model_id: Optional[str] = None, iteration: Optional[int] = None, **kwargs: Any ) -> None: super(UpdateForTaskRequest, self).__init__(**kwargs) self.task = task self.uri = uri self.name = name self.comment = comment self.tags = tags self.system_tags = system_tags self.override_model_id = override_model_id self.iteration = iteration @schema_property("task") def task(self) -> str: return self._property_task @task.setter def task(self, value: str) -> None: if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value @schema_property("uri") def uri(self) -> Optional[str]: return self._property_uri @uri.setter def uri(self, value: Optional[str]) -> None: if value is None: self._property_uri = None return self.assert_isinstance(value, "uri", six.string_types) self._property_uri = value @schema_property("name") def name(self) -> Optional[str]: return self._property_name @name.setter def name(self, value: Optional[str]) -> None: if value is None: self._property_name = None return self.assert_isinstance(value, "name", six.string_types) self._property_name = value @schema_property("comment") def comment(self) -> Optional[str]: return self._property_comment @comment.setter def comment(self, value: Optional[str]) -> None: if value is None: self._property_comment = None return self.assert_isinstance(value, "comment", six.string_types) self._property_comment = value @schema_property("tags") def tags(self) -> Optional[List[str]]: return self._property_tags @tags.setter def tags(self, value: Optional[List[str]]) -> None: if value is None: self._property_tags = None return self.assert_isinstance(value, "tags", (list, tuple)) self.assert_isinstance(value, "tags", six.string_types, is_array=True) self._property_tags = value @schema_property("system_tags") def system_tags(self) -> Optional[List[str]]: return self._property_system_tags @system_tags.setter def system_tags(self, value: Optional[List[str]]) -> None: if value is None: self._property_system_tags = None return self.assert_isinstance(value, "system_tags", (list, tuple)) self.assert_isinstance(value, "system_tags", six.string_types, is_array=True) self._property_system_tags = value @schema_property("override_model_id") def override_model_id(self) -> Optional[str]: return self._property_override_model_id @override_model_id.setter def override_model_id(self, value: Optional[str]) -> None: if value is None: self._property_override_model_id = None return self.assert_isinstance(value, "override_model_id", six.string_types) self._property_override_model_id = value @schema_property("iteration") def iteration(self) -> Optional[int]: return self._property_iteration @iteration.setter def iteration(self, value: Optional[int]) -> None: if value is None: self._property_iteration = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "iteration", six.integer_types) self._property_iteration = value
UpdateForTaskRequest
python
coleifer__peewee
tests/regressions.py
{ "start": 55384, "end": 55455 }
class ____(TestModel): name = TextField() value = IntegerField()
DF
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_autofilter11.py
{ "start": 315, "end": 2462 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("autofilter11.xlsx") self.set_text_file("autofilter_data.txt") def test_create_file(self): """ Test the creation of a simple XlsxWriter file with an autofilter. """ workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() # Set the autofilter. worksheet.autofilter("A1:D51") # Add filter criteria. worksheet.filter_column_list("C", [3000, 5000, 8000]) # Open a text file with autofilter example data. textfile = open(self.txt_filename) # Read the headers from the first line of the input file. headers = textfile.readline().strip("\n").split() # Write out the headers. worksheet.write_row("A1", headers) # Start writing data after the headers. row = 1 # Read the rest of the text file and write it to the worksheet. for line in textfile: # Split the input data based on whitespace. data = line.strip("\n").split() # Convert the number data from the text file. for i, item in enumerate(data): try: data[i] = float(item) except ValueError: pass # Get some of the field data. region = data[0] volume = int(data[2]) # Check for rows that match the filter. if volume == 3000 or volume == 5000 or volume == 8000: # Row matches the filter, no further action required. pass else: # We need to hide rows that don't match the filter. worksheet.set_row(row, options={"hidden": True}) # Write out the row data. worksheet.write_row(row, 0, data) # Move on to the next worksheet row. row += 1 textfile.close() workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
apache__airflow
task-sdk/src/airflow/sdk/definitions/xcom_arg.py
{ "start": 16195, "end": 16948 }
class ____(Sequence): values: Sequence[Sequence | dict] fillvalue: Any = attrs.field(default=NOTSET, kw_only=True) @staticmethod def _get_or_fill(container: Sequence | dict, index: Any, fillvalue: Any) -> Any: try: return container[index] except (IndexError, KeyError): return fillvalue def __getitem__(self, index: Any) -> Any: if index >= len(self): raise IndexError(index) return tuple(self._get_or_fill(value, index, self.fillvalue) for value in self.values) def __len__(self) -> int: lengths = (len(v) for v in self.values) if is_arg_set(self.fillvalue): return max(lengths) return min(lengths) @attrs.define
_ZipResult
python
ansible__ansible
lib/ansible/module_utils/urls.py
{ "start": 6891, "end": 6978 }
class ____(Exception): """Failed to connect to the server""" pass
ConnectionError
python
plotly__plotly.py
plotly/io/_base_renderers.py
{ "start": 5827, "end": 6807 }
class ____(ImageRenderer): """ Renderer to display figures as static PDF images. This renderer requires either the kaleido package or the orca command-line utility and is compatible with JupyterLab and the LaTeX-based nbconvert export to PDF. mime type: 'application/pdf' """ def __init__(self, width=None, height=None, scale=None, engine=None): super(PdfRenderer, self).__init__( mime_type="application/pdf", b64_encode=True, format="pdf", width=width, height=height, scale=scale, engine=engine, ) # HTML # Build script to set global PlotlyConfig object. This must execute before # plotly.js is loaded. _window_plotly_config = """\ window.PlotlyConfig = {MathJaxConfig: 'local'};""" _mathjax_config = """\ if (window.MathJax && window.MathJax.Hub && window.MathJax.Hub.Config) {window.MathJax.Hub.Config({SVG: {font: "STIX-Web"}});}"""
PdfRenderer
python
huggingface__transformers
tests/models/videomae/test_image_processing_videomae.py
{ "start": 3010, "end": 9531 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VideoMAEImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = VideoMAEImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL videos video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) # Test not batched input encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) def test_call_numpy_4_channels(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input encoded_videos = image_processing( video_inputs[0], return_tensors="pt", image_mean=(0.0, 0.0, 0.0, 0.0), image_std=(1.0, 1.0, 1.0, 1.0), input_data_format="channels_first", ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched encoded_videos = image_processing( video_inputs, return_tensors="pt", image_mean=(0.0, 0.0, 0.0, 0.0), image_std=(1.0, 1.0, 1.0, 1.0), input_data_format="channels_first", ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) # Test not batched input encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) )
VideoMAEImageProcessingTest
python
keras-team__keras
keras/src/layers/pooling/average_pooling_test.py
{ "start": 4318, "end": 7860 }
class ____(testing.TestCase): @parameterized.parameters( (2, 1, "valid", "channels_last", (3, 5, 4), (3, 4, 4)), (2, 1, "same", "channels_first", (3, 5, 4), (3, 5, 4)), ((2,), (2,), "valid", "channels_last", (3, 5, 4), (3, 2, 4)), ) def test_average_pooling1d( self, pool_size, strides, padding, data_format, input_shape, output_shape, ): self.run_layer_test( layers.AveragePooling1D, init_kwargs={ "pool_size": pool_size, "strides": strides, "padding": padding, "data_format": data_format, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, assert_built_after_instantiation=True, ) @parameterized.parameters( (2, 1, "valid", "channels_last", (3, 5, 5, 4), (3, 4, 4, 4)), (2, 1, "same", "channels_last", (3, 5, 5, 4), (3, 5, 5, 4)), (2, 1, "valid", "channels_first", (3, 5, 5, 4), (3, 5, 4, 3)), (2, 1, "same", "channels_first", (3, 5, 5, 4), (3, 5, 5, 4)), ((2, 3), (2, 2), "valid", "channels_last", (3, 5, 5, 4), (3, 2, 2, 4)), ((2, 3), (2, 2), "same", "channels_last", (3, 5, 5, 4), (3, 3, 3, 4)), ((2, 3), (3, 3), "same", "channels_first", (3, 5, 5, 4), (3, 5, 2, 2)), ) def test_average_pooling2d( self, pool_size, strides, padding, data_format, input_shape, output_shape, ): self.run_layer_test( layers.AveragePooling2D, init_kwargs={ "pool_size": pool_size, "strides": strides, "padding": padding, "data_format": data_format, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, assert_built_after_instantiation=True, ) @parameterized.parameters( (2, 1, "valid", "channels_last", (3, 5, 5, 5, 4), (3, 4, 4, 4, 4)), (2, 1, "same", "channels_first", (3, 5, 5, 5, 4), (3, 5, 5, 5, 4)), ( (2, 3, 2), (2, 2, 1), "valid", "channels_last", (3, 5, 5, 5, 4), (3, 2, 2, 4, 4), ), ) def test_average_pooling3d( self, pool_size, strides, padding, data_format, input_shape, output_shape, ): self.run_layer_test( layers.AveragePooling3D, init_kwargs={ "pool_size": pool_size, "strides": strides, "padding": padding, "data_format": data_format, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, # Incomplete op support on tensorflow. run_mixed_precision_check=False, assert_built_after_instantiation=True, )
AveragePoolingBasicTest
python
spyder-ide__spyder
external-deps/spyder-kernels/spyder_kernels/customize/umr.py
{ "start": 258, "end": 3554 }
class ____: """ User Module Reloader (UMR) aims at deleting user modules to force Python to deeply reload them during import pathlist [list]: blacklist in terms of module path namelist [list]: blacklist in terms of module name """ def __init__(self, namelist=None, pathlist=None, shell=None): if namelist is None: namelist = [] else: try: namelist = namelist.split(',') except Exception: namelist = [] # Spyder modules spy_modules = ['spyder_kernels'] # Matplotlib modules mpl_modules = ['matplotlib', 'tkinter', 'Tkinter'] # Add other, necessary modules to the UMR blacklist # astropy: See spyder-ide/spyder#6962 # pytorch: See spyder-ide/spyder#7041 # fastmat: See spyder-ide/spyder#7190 # pythoncom: See spyder-ide/spyder#7190 # tensorflow: See spyder-ide/spyder#8697 other_modules = ['pytorch', 'pythoncom', 'tensorflow'] self.namelist = namelist + spy_modules + mpl_modules + other_modules self.pathlist = pathlist self._shell = shell # List of previously loaded modules self.previous_modules = list(sys.modules.keys()) # Check if the UMR is enabled or not enabled = os.environ.get("SPY_UMR_ENABLED", "") self.enabled = enabled.lower() == "true" # Check if the UMR should print the list of reloaded modules or not verbose = os.environ.get("SPY_UMR_VERBOSE", "") self.verbose = verbose.lower() == "true" def is_module_reloadable(self, module, modname): """Decide if a module is reloadable or not.""" if ( path_is_library(getattr(module, '__file__', None), self.pathlist) or self.is_module_in_namelist(modname) ): return False else: return True def is_module_in_namelist(self, modname): """Decide if a module can be reloaded or not according to its name.""" return set(modname.split('.')) & set(self.namelist) def run(self): """ Delete user modules to force Python to deeply reload them Do not del modules which are considered as system modules, i.e. modules installed in subdirectories of Python interpreter's binary Do not del C modules """ modnames_to_reload = [] for modname, module in list(sys.modules.items()): if modname not in self.previous_modules: # Decide if a module can be reloaded or not if self.is_module_reloadable(module, modname): modnames_to_reload.append(modname) del sys.modules[modname] else: continue # Report reloaded modules if self.verbose and modnames_to_reload: modnames = modnames_to_reload colors = {"dark": "33", "light": "31"} color = colors["dark"] if self._shell: color = colors[self._shell.get_spyder_theme()] content = ": "+", ".join(modnames) print(f"\x1b[4;{color}mReloaded modules\x1b[24m{content}\x1b[0m") return modnames_to_reload
UserModuleReloader
python
anthropics__anthropic-sdk-python
tests/lib/streaming/test_messages.py
{ "start": 6388, "end": 12217 }
class ____: @pytest.mark.asyncio @pytest.mark.respx(base_url=base_url) async def test_basic_response(self, respx_mock: MockRouter) -> None: respx_mock.post("/v1/messages").mock( return_value=httpx.Response(200, content=to_async_iter(get_response("basic_response.txt"))) ) async with async_client.messages.stream( max_tokens=1024, messages=[ { "role": "user", "content": "Say hello there!", } ], model="claude-3-opus-latest", ) as stream: with pytest.warns(DeprecationWarning): assert isinstance(cast(Any, stream), AsyncStream) assert_basic_response([event async for event in stream], await stream.get_final_message()) @pytest.mark.asyncio @pytest.mark.respx(base_url=base_url) async def test_context_manager(self, respx_mock: MockRouter) -> None: respx_mock.post("/v1/messages").mock( return_value=httpx.Response(200, content=to_async_iter(get_response("basic_response.txt"))) ) async with async_client.messages.stream( max_tokens=1024, messages=[ { "role": "user", "content": "Say hello there!", } ], model="claude-3-opus-latest", ) as stream: assert not stream.response.is_closed # response should be closed even if the body isn't read assert stream.response.is_closed @pytest.mark.asyncio @pytest.mark.respx(base_url=base_url) async def test_deprecated_model_warning_stream(self, respx_mock: MockRouter) -> None: for deprecated_model in DEPRECATED_MODELS: respx_mock.post("/v1/messages").mock( return_value=httpx.Response(200, content=to_async_iter(get_response("basic_response.txt"))) ) with pytest.warns(DeprecationWarning, match=f"The model '{deprecated_model}' is deprecated"): async with async_client.messages.stream( max_tokens=1024, messages=[{"role": "user", "content": "Hello"}], model=deprecated_model, ) as stream: # Consume the stream to ensure the warning is triggered await stream.get_final_message() @pytest.mark.asyncio @pytest.mark.respx(base_url=base_url) async def test_tool_use(self, respx_mock: MockRouter) -> None: respx_mock.post("/v1/messages").mock( return_value=httpx.Response(200, content=to_async_iter(get_response("tool_use_response.txt"))) ) async with async_client.messages.stream( max_tokens=1024, messages=[ { "role": "user", "content": "Say hello there!", } ], model="claude-sonnet-4-20250514", ) as stream: with pytest.warns(DeprecationWarning): assert isinstance(cast(Any, stream), AsyncStream) assert_tool_use_response([event async for event in stream], await stream.get_final_message()) @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) def test_stream_method_definition_in_sync(sync: bool) -> None: client: Anthropic | AsyncAnthropic = sync_client if sync else async_client sig = inspect.signature(client.messages.stream) generated_sig = inspect.signature(client.messages.create) errors: list[str] = [] for name, generated_param in generated_sig.parameters.items(): if name == "stream": # intentionally excluded continue custom_param = sig.parameters.get(name) if not custom_param: errors.append(f"the `{name}` param is missing") continue if custom_param.annotation != generated_param.annotation: errors.append( f"types for the `{name}` param are do not match; generated={repr(generated_param.annotation)} custom={repr(custom_param.annotation)}" ) continue if errors: raise AssertionError( f"{len(errors)} errors encountered with the {'sync' if sync else 'async'} client `messages.stream()` method:\n\n" + "\n\n".join(errors) ) # go through all the ContentBlock types to make sure the type alias is up to date # with any type that has an input property of type object @pytest.mark.skipif(PYDANTIC_V1, reason="only applicable in pydantic v2") def test_tracks_tool_input_type_alias_is_up_to_date() -> None: from typing import get_args from pydantic import BaseModel from anthropic.types.content_block import ContentBlock # Get the content block union type content_block_union = get_args(ContentBlock)[0] # Get all types from ContentBlock union content_block_types = get_args(content_block_union) # Types that should have an input property types_with_input: Set[Any] = set() # Check each type to see if it has an input property in its model_fields for block_type in content_block_types: if issubclass(block_type, BaseModel) and "input" in block_type.model_fields: types_with_input.add(block_type) # Get the types included in TRACKS_TOOL_INPUT tracked_types = TRACKS_TOOL_INPUT # Make sure all types with input are tracked for block_type in types_with_input: assert block_type in tracked_types, ( f"ContentBlock type {block_type.__name__} has an input property, " f"but is not included in TRACKS_TOOL_INPUT. You probably need to update the TRACKS_TOOL_INPUT type alias." )
TestAsyncMessages
python
psf__black
tests/data/miscellaneous/force_pyi.py
{ "start": 223, "end": 270 }
class ____: ... @baz def foo() -> None: ...
E
python
getsentry__sentry
tests/sentry/snuba/test_query_subscription_consumer.py
{ "start": 4869, "end": 7006 }
class ____(BaseQuerySubscriptionTest, unittest.TestCase): def run_test(self, message): parse_message_value(json.dumps(message).encode(), self.jsoncodec) def run_invalid_schema_test(self, message): with pytest.raises(InvalidSchemaError): self.run_test(message) def run_invalid_payload_test(self, remove_fields=None, update_fields=None): payload = deepcopy(self.valid_payload) if remove_fields: for field in remove_fields: payload.pop(field) if update_fields: payload.update(update_fields) self.run_invalid_schema_test({"version": 3, "payload": payload}) def test_invalid_payload(self) -> None: self.run_invalid_payload_test(remove_fields=["subscription_id"]) self.run_invalid_payload_test(remove_fields=["result"]) self.run_invalid_payload_test(remove_fields=["timestamp"]) self.run_invalid_payload_test(remove_fields=["entity"]) self.run_invalid_payload_test(update_fields={"subscription_id": ""}) self.run_invalid_payload_test(update_fields={"result": {}}) self.run_invalid_payload_test(update_fields={"result": {"hello": "hi"}}) self.run_invalid_payload_test(update_fields={"timestamp": -1}) self.run_invalid_payload_test(update_fields={"entity": -1}) def test_invalid_version(self) -> None: with pytest.raises(InvalidSchemaError) as excinfo: self.run_test({"version": 50, "payload": self.valid_payload}) assert str(excinfo.value) == "Message wrapper does not match schema" def test_valid(self) -> None: self.run_test({"version": 3, "payload": self.valid_payload}) def test_valid_nan(self) -> None: payload = deepcopy(self.valid_payload) payload["result"]["data"][0]["hello"] = float("nan") self.run_test({"version": 3, "payload": payload}) def test_invalid_wrapper(self) -> None: self.run_invalid_schema_test({}) self.run_invalid_schema_test({"version": 1}) self.run_invalid_schema_test({"payload": self.valid_payload})
ParseMessageValueTest
python
falconry__falcon
falcon/asgi_spec.py
{ "start": 1637, "end": 2056 }
class ____: """WebSocket close codes used by the Falcon ASGI framework. See also: https://tools.ietf.org/html/rfc6455#section-7.4 """ NORMAL = 1000 SERVER_ERROR = 1011 FORBIDDEN = 3403 PATH_NOT_FOUND = 3404 HANDLER_NOT_FOUND = 3405 # TODO: use a typed dict for event dicts AsgiEvent = Mapping[str, Any] # TODO: use a typed dict for send msg dicts AsgiSendMsg = dict[str, Any]
WSCloseCode
python
tox-dev__tox
src/tox/config/source/toml_pyproject.py
{ "start": 1785, "end": 1857 }
class ____(TomlSection): PREFIX = ("tool", "tox")
TomlPyProjectSection
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_types.py
{ "start": 58609, "end": 61628 }
class ____(fixtures.TablesTest): __backend__ = True """Test timezone-aware datetimes. psycopg will return a datetime with a tzinfo attached to it, if postgresql returns it. python then will not let you compare a datetime with a tzinfo to a datetime that doesn't have one. this test illustrates two ways to have datetime types with and without timezone info. """ __only_on__ = "postgresql" @classmethod def define_tables(cls, metadata): # current_timestamp() in postgresql is assumed to return # TIMESTAMP WITH TIMEZONE Table( "tztable", metadata, Column("id", Integer, primary_key=True), Column( "date", DateTime(timezone=True), onupdate=func.current_timestamp(), ), Column("name", String(20)), ) Table( "notztable", metadata, Column("id", Integer, primary_key=True), Column( "date", DateTime(timezone=False), onupdate=cast( func.current_timestamp(), DateTime(timezone=False) ), ), Column("name", String(20)), ) def test_with_timezone(self, connection): tztable, notztable = self.tables("tztable", "notztable") # get a date with a tzinfo somedate = connection.scalar(func.current_timestamp().select()) assert somedate.tzinfo connection.execute( tztable.insert(), dict(id=1, name="row1", date=somedate) ) row = connection.execute( select(tztable.c.date).where(tztable.c.id == 1) ).first() eq_(row[0], somedate) eq_( somedate.tzinfo.utcoffset(somedate), row[0].tzinfo.utcoffset(row[0]), ) result = connection.execute( tztable.update() .where(tztable.c.id == 1) .returning(tztable.c.date), dict( name="newname", ), ) row = result.first() assert row[0] >= somedate def test_without_timezone(self, connection): # get a date without a tzinfo tztable, notztable = self.tables("tztable", "notztable") somedate = datetime.datetime(2005, 10, 20, 11, 52, 0) assert not somedate.tzinfo connection.execute( notztable.insert(), dict(id=1, name="row1", date=somedate) ) row = connection.execute( select(notztable.c.date).where(notztable.c.id == 1) ).first() eq_(row[0], somedate) eq_(row[0].tzinfo, None) result = connection.execute( notztable.update() .where(notztable.c.id == 1) .returning(notztable.c.date), dict( name="newname", ), ) row = result.first() assert row[0] >= somedate
TimezoneTest
python
getlogbook__logbook
src/logbook/ticketing.py
{ "start": 1624, "end": 1925 }
class ____(LogRecord): """Represents an occurrence of a ticket.""" def __init__(self, db, row): self.update_from_dict(json.loads(row.data)) self.db = db self.time = row.time self.ticket_id = row.ticket_id self.occurrence_id = row.occurrence_id
Occurrence
python
coleifer__peewee
tests/regressions.py
{ "start": 35113, "end": 37056 }
class ____(ModelTestCase): requires = [Site, Page, PageItem] def setUp(self): super(TestModelFilterJoinOrdering, self).setUp() with self.database.atomic(): s1, s2 = [Site.create(url=s) for s in ('s1', 's2')] p11, p12, p21 = [Page.create(site=s, title=t) for s, t in ((s1, 'p1-1'), (s1, 'p1-2'), (s2, 'p2-1'))] items = ( (p11, 's1p1i1'), (p11, 's1p1i2'), (p11, 's1p1i3'), (p12, 's1p2i1'), (p21, 's2p1i1')) PageItem.insert_many(items).execute() def test_model_filter_join_ordering(self): q = PageItem.filter(page__site__url='s1').order_by(PageItem.content) self.assertSQL(q, ( 'SELECT "t1"."id", "t1"."page_id", "t1"."content" ' 'FROM "page_item" AS "t1" ' 'INNER JOIN "page" AS "t2" ON ("t1"."page_id" = "t2"."id") ' 'INNER JOIN "site" AS "t3" ON ("t2"."site_id" = "t3"."id") ' 'WHERE ("t3"."url" = ?) ORDER BY "t1"."content"'), ['s1']) def assertQ(q): with self.assertQueryCount(1): self.assertEqual([pi.content for pi in q], ['s1p1i1', 's1p1i2', 's1p1i3', 's1p2i1']) assertQ(q) sid = Site.get(Site.url == 's1').id q = (PageItem .filter(page__site__url='s1', page__site__id=sid) .order_by(PageItem.content)) assertQ(q) q = (PageItem .filter(page__site__id=sid) .filter(page__site__url='s1') .order_by(PageItem.content)) assertQ(q) q = (PageItem .filter(page__site__id=sid) .filter(DQ(page__title='p1-1') | DQ(page__title='p1-2')) .filter(page__site__url='s1') .order_by(PageItem.content)) assertQ(q)
TestModelFilterJoinOrdering
python
celery__celery
t/unit/utils/test_saferepr.py
{ "start": 2005, "end": 2045 }
class ____(frozenset): pass
frozenset2
python
django__django
tests/order_with_respect_to/tests.py
{ "start": 407, "end": 1080 }
class ____(SimpleTestCase): @isolate_apps("order_with_respect_to") def test_duplicate_order_field(self): class Bar(models.Model): class Meta: app_label = "order_with_respect_to" class Foo(models.Model): bar = models.ForeignKey(Bar, models.CASCADE) order = models.OrderWrt() class Meta: order_with_respect_to = "bar" app_label = "order_with_respect_to" count = 0 for field in Foo._meta.local_fields: if isinstance(field, models.OrderWrt): count += 1 self.assertEqual(count, 1)
OrderWithRespectToTests
python
sanic-org__sanic
sanic/logging/formatter.py
{ "start": 3989, "end": 6069 }
class ____(AutoFormatter): """ The DebugFormatter is used for development and debugging purposes. It can be used directly, or it will be automatically selected if the environment is set up for development and is using the AutoFormatter. """ IDENT_LIMIT = 5 MESSAGE_START = 23 DATE_FORMAT = "%H:%M:%S" def _set_levelname(self, record: logging.LogRecord) -> None: if len(record.levelname) > 5: record.levelname = record.levelname[:4] super()._set_levelname(record) def formatException(self, ei): # no cov orig = super().formatException(ei) if not self.ATTY or self.NO_COLOR: return orig colored_traceback = [] lines = orig.splitlines() for idx, line in enumerate(lines): if line.startswith(" File"): line = self._color_file_line(line) elif line.startswith(" "): line = self._color_code_line(line) elif ( "Error" in line or "Exception" in line or len(lines) - 1 == idx ): line = self._color_exception_line(line) colored_traceback.append(line) return "\n".join(colored_traceback) def _color_exception_line(self, line: str) -> str: # no cov match = EXCEPTION_LINE_RE.match(line) if not match: return line exc = match.group("exc") message = match.group("message") return f"{c.SANIC}{c.BOLD}{exc}{c.END}: {c.BOLD}{message}{c.END}" def _color_file_line(self, line: str) -> str: # no cov match = FILE_LINE_RE.search(line) if not match: return line path = match.group("path") line_num = match.group("line_num") location = match.group("location") return ( f' File "{path}", line {c.CYAN}{c.BOLD}{line_num}{c.END}, ' f"in {c.BLUE}{c.BOLD}{location}{c.END}" ) def _color_code_line(self, line: str) -> str: # no cov return f"{c.YELLOW}{line}{c.END}"
DebugFormatter
python
django__django
tests/db_functions/math/test_acos.py
{ "start": 269, "end": 2346 }
class ____(TestCase): def test_null(self): IntegerModel.objects.create() obj = IntegerModel.objects.annotate(null_acos=ACos("normal")).first() self.assertIsNone(obj.null_acos) def test_decimal(self): DecimalModel.objects.create(n1=Decimal("-0.9"), n2=Decimal("0.6")) obj = DecimalModel.objects.annotate( n1_acos=ACos("n1"), n2_acos=ACos("n2") ).first() self.assertIsInstance(obj.n1_acos, Decimal) self.assertIsInstance(obj.n2_acos, Decimal) self.assertAlmostEqual(obj.n1_acos, Decimal(math.acos(obj.n1))) self.assertAlmostEqual(obj.n2_acos, Decimal(math.acos(obj.n2))) def test_float(self): FloatModel.objects.create(f1=-0.5, f2=0.33) obj = FloatModel.objects.annotate( f1_acos=ACos("f1"), f2_acos=ACos("f2") ).first() self.assertIsInstance(obj.f1_acos, float) self.assertIsInstance(obj.f2_acos, float) self.assertAlmostEqual(obj.f1_acos, math.acos(obj.f1)) self.assertAlmostEqual(obj.f2_acos, math.acos(obj.f2)) def test_integer(self): IntegerModel.objects.create(small=0, normal=1, big=-1) obj = IntegerModel.objects.annotate( small_acos=ACos("small"), normal_acos=ACos("normal"), big_acos=ACos("big"), ).first() self.assertIsInstance(obj.small_acos, float) self.assertIsInstance(obj.normal_acos, float) self.assertIsInstance(obj.big_acos, float) self.assertAlmostEqual(obj.small_acos, math.acos(obj.small)) self.assertAlmostEqual(obj.normal_acos, math.acos(obj.normal)) self.assertAlmostEqual(obj.big_acos, math.acos(obj.big)) def test_transform(self): with register_lookup(DecimalField, ACos): DecimalModel.objects.create(n1=Decimal("0.5"), n2=Decimal("0")) DecimalModel.objects.create(n1=Decimal("-0.9"), n2=Decimal("0")) obj = DecimalModel.objects.filter(n1__acos__lt=2).get() self.assertEqual(obj.n1, Decimal("0.5"))
ACosTests
python
cherrypy__cherrypy
cherrypy/test/test_core.py
{ "start": 522, "end": 28903 }
class ____(helper.CPWebCase): @staticmethod def setup_server(): class Root: @cherrypy.expose def index(self): return 'hello' favicon_ico = tools.staticfile.handler(filename=favicon_path) @cherrypy.expose def defct(self, newct): newct = 'text/%s' % newct cherrypy.config.update( { 'tools.response_headers.on': True, 'tools.response_headers.headers': [ ('Content-Type', newct), ], }, ) @cherrypy.expose def baseurl(self, path_info, relative=None): return cherrypy.url(path_info, relative=bool(relative)) root = Root() root.expose_dec = ExposeExamples() class TestType(type): """Metaclass which automatically exposes all functions in each subclass, and adds an instance of the subclass as an attribute of root. """ def __init__(cls, name, bases, dct): type.__init__(cls, name, bases, dct) for value in dct.values(): if isinstance(value, types.FunctionType): value.exposed = True setattr(root, name.lower(), cls()) Test = TestType('Test', (object,), {}) @cherrypy.config(**{'tools.trailing_slash.on': False}) class URL(Test): def index(self, path_info, relative=None): if relative != 'server': relative = bool(relative) return cherrypy.url(path_info, relative=relative) def leaf(self, path_info, relative=None): if relative != 'server': relative = bool(relative) return cherrypy.url(path_info, relative=relative) def qs(self, qs): return cherrypy.url(qs=qs) def log_status(): Status.statuses.append(cherrypy.response.status) cherrypy.tools.log_status = cherrypy.Tool( 'on_end_resource', log_status, ) class Status(Test): def index(self): return 'normal' def blank(self): cherrypy.response.status = '' # According to RFC 2616, new status codes are OK as long as they # are between 100 and 599. # Here is an illegal code... def illegal(self): cherrypy.response.status = 781 return 'oops' # ...and here is an unknown but legal code. def unknown(self): cherrypy.response.status = '431 My custom error' return 'funky' # Non-numeric code def bad(self): cherrypy.response.status = 'error' return 'bad news' statuses = [] @cherrypy.config(**{'tools.log_status.on': True}) def on_end_resource_stage(self): return repr(self.statuses) class Redirect(Test): @cherrypy.config( **{ 'tools.err_redirect.on': True, 'tools.err_redirect.url': '/errpage', 'tools.err_redirect.internal': False, }, ) class Error: @cherrypy.expose def index(self): raise NameError('redirect_test') error = Error() def index(self): return 'child' def custom(self, url, code): raise cherrypy.HTTPRedirect(url, code) @cherrypy.config(**{'tools.trailing_slash.extra': True}) def by_code(self, code): raise cherrypy.HTTPRedirect('somewhere%20else', code) def nomodify(self): raise cherrypy.HTTPRedirect('', 304) def proxy(self): raise cherrypy.HTTPRedirect('proxy', 305) def stringify(self): return str(cherrypy.HTTPRedirect('/')) def fragment(self, frag): raise cherrypy.HTTPRedirect('/some/url#%s' % frag) def url_with_quote(self): raise cherrypy.HTTPRedirect('/some"url/that\'we/want') def url_with_xss(self): raise cherrypy.HTTPRedirect( "/some<script>alert(1);</script>url/that'we/want", ) def url_with_unicode(self): raise cherrypy.HTTPRedirect(ntou('тест', 'utf-8')) def login_redir(): if not getattr(cherrypy.request, 'login', None): raise cherrypy.InternalRedirect('/internalredirect/login') tools.login_redir = _cptools.Tool('before_handler', login_redir) def redir_custom(): raise cherrypy.InternalRedirect('/internalredirect/custom_err') class InternalRedirect(Test): def index(self): raise cherrypy.InternalRedirect('/') @cherrypy.expose @cherrypy.config(**{'hooks.before_error_response': redir_custom}) def choke(self): return 3 / 0 def relative(self, a, b): raise cherrypy.InternalRedirect('cousin?t=6') def cousin(self, t): assert cherrypy.request.prev.closed return cherrypy.request.prev.query_string def petshop(self, user_id): if user_id == 'parrot': # Trade it for a slug when redirecting raise cherrypy.InternalRedirect( '/image/getImagesByUser?user_id=slug', ) elif user_id == 'terrier': # Trade it for a fish when redirecting raise cherrypy.InternalRedirect( '/image/getImagesByUser?user_id=fish', ) else: # This should pass the user_id through to getImagesByUser raise cherrypy.InternalRedirect( '/image/getImagesByUser?user_id=%s' % str(user_id), ) # We support Python 2.3, but the @-deco syntax would look like # this: # @tools.login_redir() def secure(self): return 'Welcome!' secure = tools.login_redir()(secure) # Since calling the tool returns the same function you pass in, # you could skip binding the return value, and just write: # tools.login_redir()(secure) def login(self): return 'Please log in' def custom_err(self): return 'Something went horribly wrong.' @cherrypy.config(**{'hooks.before_request_body': redir_custom}) def early_ir(self, arg): return 'whatever' class Image(Test): def getImagesByUser(self, user_id): return '0 images for %s' % user_id class Flatten(Test): def as_string(self): return 'content' def as_list(self): return ['con', 'tent'] def as_yield(self): yield b'content' @cherrypy.config(**{'tools.flatten.on': True}) def as_dblyield(self): yield self.as_yield() def as_refyield(self): for chunk in self.as_yield(): yield chunk class Ranges(Test): def get_ranges(self, bytes): return repr(httputil.get_ranges('bytes=%s' % bytes, 8)) def slice_file(self): path = os.path.join(os.getcwd(), os.path.dirname(__file__)) return static.serve_file( os.path.join(path, 'static/index.html'), ) class Cookies(Test): def single(self, name): cookie = cherrypy.request.cookie[name] # Python2's SimpleCookie.__setitem__ won't take unicode keys. cherrypy.response.cookie[str(name)] = cookie.value def multiple(self, names): list(map(self.single, names)) def append_headers(header_list, debug=False): if debug: cherrypy.log( 'Extending response headers with %s' % repr(header_list), 'TOOLS.APPEND_HEADERS', ) cherrypy.serving.response.header_list.extend(header_list) cherrypy.tools.append_headers = cherrypy.Tool( 'on_end_resource', append_headers, ) class MultiHeader(Test): def header_list(self): pass header_list = cherrypy.tools.append_headers( header_list=[ (b'WWW-Authenticate', b'Negotiate'), (b'WWW-Authenticate', b'Basic realm="foo"'), ], )(header_list) def commas(self): cherrypy.response.headers['WWW-Authenticate'] = ( 'Negotiate,Basic realm="foo"' ) cherrypy.tree.mount(root) def testStatus(self): self.getPage('/status/') self.assertBody('normal') self.assertStatus(200) self.getPage('/status/blank') self.assertBody('') self.assertStatus(200) self.getPage('/status/illegal') self.assertStatus(500) msg = 'Illegal response status from server (781 is out of range).' self.assertErrorPage(500, msg) if not getattr(cherrypy.server, 'using_apache', False): self.getPage('/status/unknown') self.assertBody('funky') self.assertStatus(431) self.getPage('/status/bad') self.assertStatus(500) msg = "Illegal response status from server ('error' is non-numeric)." self.assertErrorPage(500, msg) def test_on_end_resource_status(self): self.getPage('/status/on_end_resource_stage') self.assertBody('[]') self.getPage('/status/on_end_resource_stage') self.assertBody(repr(['200 OK'])) def testSlashes(self): # Test that requests for index methods without a trailing slash # get redirected to the same URI path with a trailing slash. # Make sure GET params are preserved. self.getPage('/redirect?id=3') self.assertStatus(301) self.assertMatchesBody( '<a href=([\'"])%s/redirect/[?]id=3\\1>' '%s/redirect/[?]id=3</a>' % (self.base(), self.base()), ) if self.prefix(): # Corner case: the "trailing slash" redirect could be tricky if # we're using a virtual root and the URI is "/vroot" (no slash). self.getPage('') self.assertStatus(301) self.assertMatchesBody( '<a href=([\'"])%s/\\1>%s/</a>' % (self.base(), self.base()), ) # Test that requests for NON-index methods WITH a trailing slash # get redirected to the same URI path WITHOUT a trailing slash. # Make sure GET params are preserved. self.getPage('/redirect/by_code/?code=307') self.assertStatus(301) self.assertMatchesBody( '<a href=([\'"])%s/redirect/by_code[?]code=307\\1>' '%s/redirect/by_code[?]code=307</a>' % (self.base(), self.base()), ) # If the trailing_slash tool is off, CP should just continue # as if the slashes were correct. But it needs some help # inside cherrypy.url to form correct output. self.getPage('/url?path_info=page1') self.assertBody('%s/url/page1' % self.base()) self.getPage('/url/leaf/?path_info=page1') self.assertBody('%s/url/page1' % self.base()) def testRedirect(self): self.getPage('/redirect/') self.assertBody('child') self.assertStatus(200) self.getPage('/redirect/by_code?code=300') self.assertMatchesBody( r"<a href=(['\"])(.*)somewhere%20else\1>\2somewhere%20else</a>", ) self.assertStatus(300) self.getPage('/redirect/by_code?code=301') self.assertMatchesBody( r"<a href=(['\"])(.*)somewhere%20else\1>\2somewhere%20else</a>", ) self.assertStatus(301) self.getPage('/redirect/by_code?code=302') self.assertMatchesBody( r"<a href=(['\"])(.*)somewhere%20else\1>\2somewhere%20else</a>", ) self.assertStatus(302) self.getPage('/redirect/by_code?code=303') self.assertMatchesBody( r"<a href=(['\"])(.*)somewhere%20else\1>\2somewhere%20else</a>", ) self.assertStatus(303) self.getPage('/redirect/by_code?code=307') self.assertMatchesBody( r"<a href=(['\"])(.*)somewhere%20else\1>\2somewhere%20else</a>", ) self.assertStatus(307) self.getPage('/redirect/by_code?code=308') self.assertMatchesBody( r"<a href=(['\"])(.*)somewhere%20else\1>\2somewhere%20else</a>", ) self.assertStatus(308) self.getPage('/redirect/nomodify') self.assertBody('') self.assertStatus(304) self.getPage('/redirect/proxy') self.assertBody('') self.assertStatus(305) # HTTPRedirect on error self.getPage('/redirect/error/') self.assertStatus(('302 Found', '303 See Other')) self.assertInBody('/errpage') # Make sure str(HTTPRedirect()) works. self.getPage('/redirect/stringify', protocol='HTTP/1.0') self.assertStatus(200) self.assertBody("(['%s/'], 302)" % self.base()) if cherrypy.server.protocol_version == 'HTTP/1.1': self.getPage('/redirect/stringify', protocol='HTTP/1.1') self.assertStatus(200) self.assertBody("(['%s/'], 303)" % self.base()) # check that #fragments are handled properly # http://skrb.org/ietf/http_errata.html#location-fragments frag = 'foo' self.getPage('/redirect/fragment/%s' % frag) self.assertMatchesBody( r"<a href=(['\"])(.*)\/some\/url\#%s\1>\2\/some\/url\#%s</a>" % (frag, frag), ) loc = self.assertHeader('Location') assert loc.endswith('#%s' % frag) self.assertStatus(('302 Found', '303 See Other')) # check injection protection # See https://github.com/cherrypy/cherrypy/issues/1003 self.getPage( '/redirect/custom?' 'code=303&url=/foobar/%0d%0aSet-Cookie:%20somecookie=someval', ) self.assertStatus(303) loc = self.assertHeader('Location') assert 'Set-Cookie' in loc self.assertNoHeader('Set-Cookie') def assertValidXHTML(): from xml.etree import ElementTree try: ElementTree.fromstring( '<html><body>%s</body></html>' % self.body.decode('utf-8'), ) except ElementTree.ParseError: self._handlewebError( 'automatically generated redirect did not ' 'generate well-formed html', ) # check redirects to URLs generated valid HTML - we check this # by seeing if it appears as valid XHTML. self.getPage('/redirect/by_code?code=303') self.assertStatus(303) assertValidXHTML() # do the same with a url containing quote characters. self.getPage('/redirect/url_with_quote') self.assertStatus(303) assertValidXHTML() def test_redirect_with_xss(self): """A redirect to a URL with HTML injected should result in page contents escaped.""" self.getPage('/redirect/url_with_xss') self.assertStatus(303) assert b'<script>' not in self.body assert b'&lt;script&gt;' in self.body def test_redirect_with_unicode(self): """ A redirect to a URL with Unicode should return a Location header containing that Unicode URL. """ # test disabled due to #1440 return self.getPage('/redirect/url_with_unicode') self.assertStatus(303) loc = self.assertHeader('Location') assert ntou('тест', encoding='utf-8') in loc def test_InternalRedirect(self): # InternalRedirect self.getPage('/internalredirect/') self.assertBody('hello') self.assertStatus(200) # Test passthrough self.getPage( '/internalredirect/petshop?user_id=Sir-not-appearing-in-this-film', ) self.assertBody('0 images for Sir-not-appearing-in-this-film') self.assertStatus(200) # Test args self.getPage('/internalredirect/petshop?user_id=parrot') self.assertBody('0 images for slug') self.assertStatus(200) # Test POST self.getPage( '/internalredirect/petshop', method='POST', body='user_id=terrier', ) self.assertBody('0 images for fish') self.assertStatus(200) # Test ir before body read self.getPage( '/internalredirect/early_ir', method='POST', body='arg=aha!', ) self.assertBody('Something went horribly wrong.') self.assertStatus(200) self.getPage('/internalredirect/secure') self.assertBody('Please log in') self.assertStatus(200) # Relative path in InternalRedirect. # Also tests request.prev. self.getPage('/internalredirect/relative?a=3&b=5') self.assertBody('a=3&b=5') self.assertStatus(200) # InternalRedirect on error self.getPage('/internalredirect/choke') self.assertStatus(200) self.assertBody('Something went horribly wrong.') def testFlatten(self): for url in [ '/flatten/as_string', '/flatten/as_list', '/flatten/as_yield', '/flatten/as_dblyield', '/flatten/as_refyield', ]: self.getPage(url) self.assertBody('content') def testRanges(self): self.getPage('/ranges/get_ranges?bytes=3-6') self.assertBody('[(3, 7)]') # Test multiple ranges and a suffix-byte-range-spec, for good measure. self.getPage('/ranges/get_ranges?bytes=2-4,-1') self.assertBody('[(2, 5), (7, 8)]') # Test a suffix-byte-range longer than the content # length. Note that in this test, the content length # is 8 bytes. self.getPage('/ranges/get_ranges?bytes=-100') self.assertBody('[(0, 8)]') # Get a partial file. if cherrypy.server.protocol_version == 'HTTP/1.1': self.getPage('/ranges/slice_file', [('Range', 'bytes=2-5')]) self.assertStatus(206) self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.assertHeader('Content-Range', 'bytes 2-5/14') self.assertBody('llo,') # What happens with overlapping ranges (and out of order, too)? self.getPage('/ranges/slice_file', [('Range', 'bytes=4-6,2-5')]) self.assertStatus(206) ct = self.assertHeader('Content-Type') expected_type = 'multipart/byteranges; boundary=' assert ct.startswith(expected_type) boundary = ct[len(expected_type) :] expected_body = ( '\r\n--%s\r\n' 'Content-type: text/html\r\n' 'Content-range: bytes 4-6/14\r\n' '\r\n' 'o, \r\n' '--%s\r\n' 'Content-type: text/html\r\n' 'Content-range: bytes 2-5/14\r\n' '\r\n' 'llo,\r\n' '--%s--\r\n' % (boundary, boundary, boundary) ) self.assertBody(expected_body) self.assertHeader('Content-Length') # Test "416 Requested Range Not Satisfiable" self.getPage('/ranges/slice_file', [('Range', 'bytes=2300-2900')]) self.assertStatus(416) # "When this status code is returned for a byte-range request, # the response SHOULD include a Content-Range entity-header # field specifying the current length of the selected resource" self.assertHeader('Content-Range', 'bytes */14') elif cherrypy.server.protocol_version == 'HTTP/1.0': # Test Range behavior with HTTP/1.0 request self.getPage('/ranges/slice_file', [('Range', 'bytes=2-5')]) self.assertStatus(200) self.assertBody('Hello, world\r\n') def testFavicon(self): # favicon.ico is served by staticfile. icofilename = os.path.join(localDir, '../favicon.ico') with open(icofilename, 'rb') as icofile: data = icofile.read() self.getPage('/favicon.ico') self.assertBody(data) def skip_if_bad_cookies(self): """ cookies module fails to reject invalid cookies https://github.com/cherrypy/cherrypy/issues/1405 """ cookies = sys.modules.get('http.cookies') _is_legal_key = getattr(cookies, '_is_legal_key', lambda x: False) if not _is_legal_key(','): return issue = 'http://bugs.python.org/issue26302' tmpl = 'Broken cookies module ({issue})' self.skip(tmpl.format(**locals())) def testCookies(self): self.skip_if_bad_cookies() self.getPage( '/cookies/single?name=First', [('Cookie', 'First=Dinsdale;')], ) self.assertHeader('Set-Cookie', 'First=Dinsdale') self.getPage( '/cookies/multiple?names=First&names=Last', [ ('Cookie', 'First=Dinsdale; Last=Piranha;'), ], ) self.assertHeader('Set-Cookie', 'First=Dinsdale') self.assertHeader('Set-Cookie', 'Last=Piranha') self.getPage( '/cookies/single?name=Something-With%2CComma', [('Cookie', 'Something-With,Comma=some-value')], ) self.assertStatus(400) def testDefaultContentType(self): self.getPage('/') self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.getPage('/defct/plain') self.getPage('/') self.assertHeader('Content-Type', 'text/plain;charset=utf-8') self.getPage('/defct/html') def test_multiple_headers(self): self.getPage('/multiheader/header_list') self.assertEqual( [(k, v) for k, v in self.headers if k == 'WWW-Authenticate'], [ ('WWW-Authenticate', 'Negotiate'), ('WWW-Authenticate', 'Basic realm="foo"'), ], ) self.getPage('/multiheader/commas') self.assertHeader('WWW-Authenticate', 'Negotiate,Basic realm="foo"') def test_cherrypy_url(self): # Input relative to current self.getPage('/url/leaf?path_info=page1') self.assertBody('%s/url/page1' % self.base()) self.getPage('/url/?path_info=page1') self.assertBody('%s/url/page1' % self.base()) # Other host header host = 'www.mydomain.example' self.getPage('/url/leaf?path_info=page1', headers=[('Host', host)]) self.assertBody('%s://%s/url/page1' % (self.scheme, host)) # Input is 'absolute'; that is, relative to script_name self.getPage('/url/leaf?path_info=/page1') self.assertBody('%s/page1' % self.base()) self.getPage('/url/?path_info=/page1') self.assertBody('%s/page1' % self.base()) # Single dots self.getPage('/url/leaf?path_info=./page1') self.assertBody('%s/url/page1' % self.base()) self.getPage('/url/leaf?path_info=other/./page1') self.assertBody('%s/url/other/page1' % self.base()) self.getPage('/url/?path_info=/other/./page1') self.assertBody('%s/other/page1' % self.base()) self.getPage('/url/?path_info=/other/././././page1') self.assertBody('%s/other/page1' % self.base()) # Double dots self.getPage('/url/leaf?path_info=../page1') self.assertBody('%s/page1' % self.base()) self.getPage('/url/leaf?path_info=other/../page1') self.assertBody('%s/url/page1' % self.base()) self.getPage('/url/leaf?path_info=/other/../page1') self.assertBody('%s/page1' % self.base()) self.getPage('/url/leaf?path_info=/other/../../../page1') self.assertBody('%s/page1' % self.base()) self.getPage('/url/leaf?path_info=/other/../../../../../page1') self.assertBody('%s/page1' % self.base()) # qs param is not normalized as a path self.getPage('/url/qs?qs=/other') self.assertBody('%s/url/qs?/other' % self.base()) self.getPage('/url/qs?qs=/other/../page1') self.assertBody('%s/url/qs?/other/../page1' % self.base()) self.getPage('/url/qs?qs=../page1') self.assertBody('%s/url/qs?../page1' % self.base()) self.getPage('/url/qs?qs=../../page1') self.assertBody('%s/url/qs?../../page1' % self.base()) # Output relative to current path or script_name self.getPage('/url/?path_info=page1&relative=True') self.assertBody('page1') self.getPage('/url/leaf?path_info=/page1&relative=True') self.assertBody('../page1') self.getPage('/url/leaf?path_info=page1&relative=True') self.assertBody('page1') self.getPage('/url/leaf?path_info=leaf/page1&relative=True') self.assertBody('leaf/page1') self.getPage('/url/leaf?path_info=../page1&relative=True') self.assertBody('../page1') self.getPage('/url/?path_info=other/../page1&relative=True') self.assertBody('page1') # Output relative to / self.getPage('/baseurl?path_info=ab&relative=True') self.assertBody('ab') # Output relative to / self.getPage('/baseurl?path_info=/ab&relative=True') self.assertBody('ab') # absolute-path references ("server-relative") # Input relative to current self.getPage('/url/leaf?path_info=page1&relative=server') self.assertBody('/url/page1') self.getPage('/url/?path_info=page1&relative=server') self.assertBody('/url/page1') # Input is 'absolute'; that is, relative to script_name self.getPage('/url/leaf?path_info=/page1&relative=server') self.assertBody('/page1') self.getPage('/url/?path_info=/page1&relative=server') self.assertBody('/page1') def test_expose_decorator(self): # Test @expose self.getPage('/expose_dec/no_call') self.assertStatus(200) self.assertBody('Mr E. R. Bradshaw') # Test @expose() self.getPage('/expose_dec/call_empty') self.assertStatus(200) self.assertBody('Mrs. B.J. Smegma') # Test @expose("alias") self.getPage('/expose_dec/call_alias') self.assertStatus(200) self.assertBody('Mr Nesbitt') # Does the original name work? self.getPage('/expose_dec/nesbitt') self.assertStatus(200) self.assertBody('Mr Nesbitt') # Test @expose(["alias1", "alias2"]) self.getPage('/expose_dec/alias1') self.assertStatus(200) self.assertBody('Mr Ken Andrews') self.getPage('/expose_dec/alias2') self.assertStatus(200) self.assertBody('Mr Ken Andrews') # Does the original name work? self.getPage('/expose_dec/andrews') self.assertStatus(200) self.assertBody('Mr Ken Andrews') # Test @expose(alias="alias") self.getPage('/expose_dec/alias3') self.assertStatus(200) self.assertBody('Mr. and Mrs. Watson')
CoreRequestHandlingTest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/callbackProtocol11.py
{ "start": 277, "end": 439 }
class ____(Protocol): def __call__(self, x: T) -> A[list[T], T]: ... def func1() -> BProto: def make_a(x: T) -> A[list[T], T]: ... return make_a
BProto
python
realpython__materials
tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/library/src/tic_tac_toe/game/players_async.py
{ "start": 206, "end": 829 }
class ____(metaclass=abc.ABCMeta): def __init__(self, mark: Mark) -> None: self.mark = mark async def make_move(self, game_state: GameState) -> GameState: if self.mark is game_state.current_mark: if move := await self.get_move(game_state): return move.after_state raise InvalidMove("No more possible moves") else: raise InvalidMove("It's the other player's turn") @abc.abstractmethod async def get_move(self, game_state: GameState) -> Move | None: """Return the current player's move in the given game state."""
AsyncPlayer
python
davidhalter__jedi
jedi/file_io.py
{ "start": 2255, "end": 2337 }
class ____(file_io.KnownContentFileIO, FileIOFolderMixin): pass
KnownContentFileIO
python
getsentry__sentry
tests/sentry/integrations/slack/actions/notification/test_slack_notify_service_action.py
{ "start": 867, "end": 22974 }
class ____(RuleTestCase): rule_cls = SlackNotifyServiceAction def setUp(self) -> None: with assume_test_silo_mode(SiloMode.REGION): self.organization = self.create_organization(id=1, owner=self.user) self.project = self.create_project(organization=self.organization) with assume_test_silo_mode(SiloMode.CONTROL): self.integration = self.create_integration( organization=self.organization, name="slack", provider="slack", external_id="slack:1", metadata={"access_token": "xoxb-access-token"}, ) self.uuid = "5bac5dcc-e201-4cb2-8da2-bac39788a13d" self.action_data = { "workspace": str(self.integration.id), "id": "sentry.integrations.slack.notify_action.SlackNotifyServiceAction", "channel_id": "C0123456789", "tags": "", "channel": "test-notifications", "uuid": self.uuid, } self.rule = self.create_project_rule(project=self.project, action_data=[self.action_data]) self.notification_uuid = str(uuid4()) self.event = self.store_event( data={ "message": "Hello world", "level": "warning", "platform": "python", "culprit": "foo.bar", }, project_id=self.project.id, ) assert self.event.group is not None self.rule_fire_history = RuleFireHistory.objects.create( project=self.project, rule=self.rule, group=self.event.group, event_id=self.event.event_id, notification_uuid=self.notification_uuid, ) self.action = self.create_action() def test_when_rule_fire_history_is_passed_in(self) -> None: instance = SlackNotifyServiceAction( self.project, data={}, rule=self.rule, rule_fire_history=self.rule_fire_history ) assert instance.rule_fire_history is not None def test_when_rule_fire_history_is_not_passed_in(self) -> None: instance = SlackNotifyServiceAction(self.project, data={}, rule=self.rule) assert instance.rule_fire_history is None def test_when_rule_fire_history_is_none(self) -> None: instance = SlackNotifyServiceAction( self.project, data={}, rule=self.rule, rule_fire_history=None ) assert instance.rule_fire_history is None @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage") @patch("slack_sdk.web.client.WebClient._perform_urllib_http_request") def test_after( self, mock_api_call: MagicMock, mock_post: MagicMock, mock_record: MagicMock ) -> None: mock_api_call.return_value = { "body": orjson.dumps({"ok": True}).decode(), "headers": {}, "status": 200, } rule = self.get_rule(data=self.action_data) results = list(rule.after(event=self.event)) assert len(results) == 1 results[0].callback(self.event, futures=[]) blocks = mock_post.call_args.kwargs["blocks"] blocks = orjson.loads(blocks) assert ( blocks[0]["text"]["text"] == f":large_yellow_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.event.group.id}/?referrer=slack|*Hello world*>" ) assert NotificationMessage.objects.all().count() == 0 assert len(mock_record.mock_calls) == 4 thread_ts_start, thread_ts_success, send_notification_start, send_notification_success = ( mock_record.mock_calls ) assert thread_ts_start.args[0] == EventLifecycleOutcome.STARTED assert thread_ts_success.args[0] == EventLifecycleOutcome.SUCCESS assert send_notification_start.args[0] == EventLifecycleOutcome.STARTED assert send_notification_success.args[0] == EventLifecycleOutcome.SUCCESS @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage") def test_after_slo_halt(self, mock_post: MagicMock, mock_record: MagicMock) -> None: mock_post.side_effect = SlackApiError( message="account_inactive", response=SlackResponse( client=None, http_verb="POST", api_url="https://slack.com/api/chat.postMessage", req_args={}, data={"ok": False, "error": "account_inactive"}, headers={}, status_code=200, ), ) rule = self.get_rule(data=self.action_data) results = list(rule.after(event=self.event)) assert len(results) == 1 results[0].callback(self.event, futures=[]) blocks = mock_post.call_args.kwargs["blocks"] blocks = orjson.loads(blocks) assert ( blocks[0]["text"]["text"] == f":large_yellow_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.event.group.id}/?referrer=slack|*Hello world*>" ) assert NotificationMessage.objects.all().count() == 0 assert len(mock_record.mock_calls) == 4 thread_ts_start, thread_ts_success, send_notification_start, send_notification_success = ( mock_record.mock_calls ) assert thread_ts_start.args[0] == EventLifecycleOutcome.STARTED assert thread_ts_success.args[0] == EventLifecycleOutcome.SUCCESS assert send_notification_start.args[0] == EventLifecycleOutcome.STARTED assert send_notification_success.args[0] == EventLifecycleOutcome.HALTED @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch("sentry.integrations.slack.sdk_client.metrics") def test_after_error(self, mock_metrics: MagicMock, mock_record: MagicMock) -> None: # tests error flow because we're actually trying to POST rule = self.get_rule(data=self.action_data) results = list(rule.after(event=self.event)) assert len(results) == 1 results[0].callback(self.event, futures=[]) mock_metrics.incr.assert_called_with( SLACK_DATADOG_METRIC, sample_rate=1.0, tags={"ok": False, "status": 200} ) assert NotificationMessage.objects.all().count() == 0 assert len(mock_record.mock_calls) == 4 thread_ts_start, thread_ts_failure, send_notification_start, send_notification_failure = ( mock_record.mock_calls ) assert thread_ts_start.args[0] == EventLifecycleOutcome.STARTED assert thread_ts_failure.args[0] == EventLifecycleOutcome.SUCCESS assert send_notification_start.args[0] == EventLifecycleOutcome.STARTED assert send_notification_failure.args[0] == EventLifecycleOutcome.FAILURE assert_failure_metric(mock_record, IntegrationError()) @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage") @patch("slack_sdk.web.client.WebClient._perform_urllib_http_request") def test_after_with_threads( self, mock_api_call: MagicMock, mock_post: MagicMock, mock_record: MagicMock ) -> None: mock_api_call.return_value = { "body": orjson.dumps({"ok": True}).decode(), "headers": {}, "status": 200, } rule = self.get_rule(data=self.action_data, rule_fire_history=self.rule_fire_history) results = list(rule.after(event=self.event)) assert len(results) == 1 results[0].callback(self.event, futures=[RuleFuture(rule=self.rule, kwargs={})]) blocks = mock_post.call_args.kwargs["blocks"] blocks = orjson.loads(blocks) assert ( blocks[0]["text"]["text"] == f":large_yellow_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.event.group.id}/?referrer=slack&alert_rule_id={self.rule.id}&alert_type=issue|*Hello world*>" ) assert NotificationMessage.objects.all().count() == 1 assert len(mock_record.mock_calls) == 4 thread_ts_start, thread_ts_success, send_notification_start, send_notification_success = ( mock_record.mock_calls ) assert thread_ts_start.args[0] == EventLifecycleOutcome.STARTED assert thread_ts_success.args[0] == EventLifecycleOutcome.SUCCESS assert send_notification_start.args[0] == EventLifecycleOutcome.STARTED assert send_notification_success.args[0] == EventLifecycleOutcome.SUCCESS @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage") @patch("slack_sdk.web.client.WebClient._perform_urllib_http_request") def test_after_reply_in_thread( self, mock_api_call: MagicMock, mock_post: MagicMock, mock_record: MagicMock ) -> None: mock_api_call.return_value = { "body": orjson.dumps({"ok": True}).decode(), "headers": {}, "status": 200, } with assume_test_silo_mode(SiloMode.REGION): msg = NotificationMessage.objects.create( rule_fire_history_id=self.rule_fire_history.id, rule_action_uuid=self.uuid, ) event = self.store_event( data={ "message": "Hello world", "level": "warning", "platform": "python", "culprit": "foo.bar", }, project_id=self.project.id, ) rule_fire_history = RuleFireHistory.objects.create( project=self.project, rule=self.rule, group=self.event.group, event_id=event.event_id, notification_uuid=self.notification_uuid, ) rule = self.get_rule(data=self.action_data, rule_fire_history=rule_fire_history) results = list(rule.after(event=event)) assert len(results) == 1 results[0].callback(self.event, futures=[RuleFuture(rule=self.rule, kwargs={})]) blocks = mock_post.call_args.kwargs["blocks"] blocks = orjson.loads(blocks) assert ( blocks[0]["text"]["text"] == f":large_yellow_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.event.group.id}/?referrer=slack&alert_rule_id={self.rule.id}&alert_type=issue|*Hello world*>" ) assert NotificationMessage.objects.all().count() == 2 assert ( NotificationMessage.objects.filter(parent_notification_message_id=msg.id).count() == 1 ) assert len(mock_record.mock_calls) == 4 thread_ts_start, thread_ts_success, send_notification_start, send_notification_success = ( mock_record.mock_calls ) assert thread_ts_start.args[0] == EventLifecycleOutcome.STARTED assert thread_ts_success.args[0] == EventLifecycleOutcome.SUCCESS assert send_notification_start.args[0] == EventLifecycleOutcome.STARTED assert send_notification_success.args[0] == EventLifecycleOutcome.SUCCESS @with_feature("organizations:workflow-engine-trigger-actions") @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage") @patch("slack_sdk.web.client.WebClient._perform_urllib_http_request") def test_after_noa( self, mock_api_call: MagicMock, mock_post: MagicMock, mock_record: MagicMock ) -> None: mock_api_call.return_value = { "body": orjson.dumps({"ok": True}).decode(), "headers": {}, "status": 200, } action_data = self.action_data.copy() action_data["legacy_rule_id"] = "123" rule = self.create_project_rule(project=self.project, action_data=[action_data]) rule.id = self.action.id rule.environment_id = None rule_cls_instance = self.get_rule(data=action_data) results = list(rule_cls_instance.after(event=self.event)) assert len(results) == 1 results[0].callback(self.event, futures=[RuleFuture(rule=rule, kwargs={})]) blocks = mock_post.call_args.kwargs["blocks"] blocks = orjson.loads(blocks) assert ( blocks[0]["text"]["text"] == f":large_yellow_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.event.group.id}/?referrer=slack&alert_rule_id={action_data['legacy_rule_id']}&alert_type=issue|*Hello world*>" ) assert NotificationMessage.objects.all().count() == 1 assert len(mock_record.mock_calls) == 4 thread_ts_start, thread_ts_success, send_notification_start, send_notification_success = ( mock_record.mock_calls ) assert thread_ts_start.args[0] == EventLifecycleOutcome.STARTED assert thread_ts_success.args[0] == EventLifecycleOutcome.SUCCESS assert send_notification_start.args[0] == EventLifecycleOutcome.STARTED assert send_notification_success.args[0] == EventLifecycleOutcome.SUCCESS @with_feature("organizations:workflow-engine-trigger-actions") @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage") @patch("slack_sdk.web.client.WebClient._perform_urllib_http_request") def test_after_noa_test_action( self, mock_api_call: MagicMock, mock_post: MagicMock, mock_record: MagicMock ) -> None: mock_api_call.return_value = { "body": orjson.dumps({"ok": True}).decode(), "headers": {}, "status": 200, } action_data = self.action_data.copy() action_data["legacy_rule_id"] = "123" rule = self.create_project_rule(project=self.project, action_data=[action_data]) # Represents a test action rule.id = -1 rule.environment_id = None rule_cls_instance = self.get_rule(data=action_data) results = list(rule_cls_instance.after(event=self.event)) assert len(results) == 1 results[0].callback(self.event, futures=[RuleFuture(rule=rule, kwargs={})]) blocks = mock_post.call_args.kwargs["blocks"] blocks = orjson.loads(blocks) assert ( blocks[0]["text"]["text"] == f":large_yellow_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.event.group.id}/?referrer=slack&alert_rule_id={action_data['legacy_rule_id']}&alert_type=issue|*Hello world*>" ) # Test action should not create a notification message assert NotificationMessage.objects.all().count() == 0 assert len(mock_record.mock_calls) == 2 thread_ts_start, thread_ts_success = mock_record.mock_calls assert thread_ts_start.args[0] == EventLifecycleOutcome.STARTED assert thread_ts_success.args[0] == EventLifecycleOutcome.SUCCESS @with_feature("organizations:workflow-engine-trigger-actions") @with_feature("organizations:workflow-engine-ui-links") @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage") @patch("slack_sdk.web.client.WebClient._perform_urllib_http_request") def test_after_noa_new_ui( self, mock_api_call: MagicMock, mock_post: MagicMock, mock_record: MagicMock ) -> None: mock_api_call.return_value = { "body": orjson.dumps({"ok": True}).decode(), "headers": {}, "status": 200, } action_data = self.action_data.copy() action_data["workflow_id"] = "123" rule = self.create_project_rule(project=self.project, action_data=[action_data]) rule.id = self.action.id rule.environment_id = None rule_cls_instance = self.get_rule(data=action_data) results = list(rule_cls_instance.after(event=self.event)) assert len(results) == 1 results[0].callback(self.event, futures=[RuleFuture(rule=rule, kwargs={})]) blocks = mock_post.call_args.kwargs["blocks"] blocks = orjson.loads(blocks) assert ( blocks[0]["text"]["text"] == f":large_yellow_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.event.group.id}/?referrer=slack&workflow_id={action_data['workflow_id']}&alert_type=issue|*Hello world*>" ) assert NotificationMessage.objects.all().count() == 1 assert len(mock_record.mock_calls) == 4 thread_ts_start, thread_ts_success, send_notification_start, send_notification_success = ( mock_record.mock_calls ) assert thread_ts_start.args[0] == EventLifecycleOutcome.STARTED assert thread_ts_success.args[0] == EventLifecycleOutcome.SUCCESS assert send_notification_start.args[0] == EventLifecycleOutcome.STARTED assert send_notification_success.args[0] == EventLifecycleOutcome.SUCCESS @with_feature("organizations:workflow-engine-trigger-actions") @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage") @patch("slack_sdk.web.client.WebClient._perform_urllib_http_request") def test_after_with_threads_noa( self, mock_api_call: MagicMock, mock_post: MagicMock, mock_record: MagicMock ) -> None: mock_api_call.return_value = { "body": orjson.dumps({"ok": True}).decode(), "headers": {}, "status": 200, } action_data = self.action_data.copy() action_data["legacy_rule_id"] = "123" rule_cls_instance = self.get_rule(data=action_data) results = list(rule_cls_instance.after(event=self.event)) assert len(results) == 1 rule = self.create_project_rule(project=self.project, action_data=[action_data]) rule.id = self.action.id rule.environment_id = None results[0].callback(self.event, futures=[RuleFuture(rule=rule, kwargs={})]) blocks = mock_post.call_args.kwargs["blocks"] blocks = orjson.loads(blocks) assert ( blocks[0]["text"]["text"] == f":large_yellow_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.event.group.id}/?referrer=slack&alert_rule_id={action_data['legacy_rule_id']}&alert_type=issue|*Hello world*>" ) assert NotificationMessage.objects.all().count() == 1 assert len(mock_record.mock_calls) == 4 thread_ts_start, thread_ts_success, send_notification_start, send_notification_success = ( mock_record.mock_calls ) assert thread_ts_start.args[0] == EventLifecycleOutcome.STARTED assert thread_ts_success.args[0] == EventLifecycleOutcome.SUCCESS assert send_notification_start.args[0] == EventLifecycleOutcome.STARTED assert send_notification_success.args[0] == EventLifecycleOutcome.SUCCESS @with_feature("organizations:workflow-engine-trigger-actions") @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage") @patch("slack_sdk.web.client.WebClient._perform_urllib_http_request") def test_after_reply_in_thread_noa( self, mock_api_call: MagicMock, mock_post: MagicMock, mock_record: MagicMock ) -> None: mock_api_call.return_value = { "body": orjson.dumps({"ok": True}).decode(), "headers": {}, "status": 200, } with assume_test_silo_mode(SiloMode.REGION): msg = NotificationMessage.objects.create( action_id=self.action.id, group_id=self.event.group.id, ) event = self.store_event( data={ "message": "Hello world", "level": "warning", "platform": "python", "culprit": "foo.bar", }, project_id=self.project.id, ) action_data = self.action_data.copy() action_data["legacy_rule_id"] = "123" rule_cls_instance = self.get_rule(data=action_data) results = list(rule_cls_instance.after(event=event)) assert len(results) == 1 rule = self.create_project_rule(project=self.project, action_data=[action_data]) rule.id = self.action.id rule.environment_id = None results[0].callback(self.event, futures=[RuleFuture(rule=rule, kwargs={})]) blocks = mock_post.call_args.kwargs["blocks"] blocks = orjson.loads(blocks) assert ( blocks[0]["text"]["text"] == f":large_yellow_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.event.group.id}/?referrer=slack&alert_rule_id={action_data['legacy_rule_id']}&alert_type=issue|*Hello world*>" ) assert NotificationMessage.objects.all().count() == 2 assert ( NotificationMessage.objects.filter(parent_notification_message_id=msg.id).count() == 1 ) assert len(mock_record.mock_calls) == 4 thread_ts_start, thread_ts_success, send_notification_start, send_notification_success = ( mock_record.mock_calls ) assert thread_ts_start.args[0] == EventLifecycleOutcome.STARTED assert thread_ts_success.args[0] == EventLifecycleOutcome.SUCCESS assert send_notification_start.args[0] == EventLifecycleOutcome.STARTED assert send_notification_success.args[0] == EventLifecycleOutcome.SUCCESS
TestInit
python
scrapy__scrapy
tests/test_addons.py
{ "start": 1032, "end": 1541 }
class ____: def test_update_settings(self): settings = BaseSettings() settings.set("KEY1", "default", priority="default") settings.set("KEY2", "project", priority="project") addon_config = {"KEY1": "addon", "KEY2": "addon", "KEY3": "addon"} testaddon = get_addon_cls(addon_config)() testaddon.update_settings(settings) assert settings["KEY1"] == "addon" assert settings["KEY2"] == "project" assert settings["KEY3"] == "addon"
TestAddon
python
pytorch__pytorch
torch/ao/quantization/pt2e/_numeric_debugger.py
{ "start": 7044, "end": 8412 }
class ____: actual: torch.Tensor ref: torch.Tensor @property def mse_loss(self) -> object: return self.loss(F.mse_loss) @property def sqnr(self) -> object: return self.loss(compute_sqnr) def loss( self, loss_function: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] ) -> object: return _loss_fn(loss_function, self.actual, self.ref) def __repr__(self) -> str: # Don't include the tensors themselves as they are quite large to print # out. return ( f"QuantizationComparisonResult(mse_loss={self.mse_loss}, sqnr={self.sqnr})" ) def __post_init__(self) -> None: if not isinstance(self.actual, (torch.Tensor, list, tuple, dict)): raise ValueError( f"`self.actual` value must be a Tensor, list, tuple or dict, got: {self.actual}" ) if not isinstance(self.ref, (torch.Tensor, list, tuple, dict)): raise ValueError( f"`self.ref` value must be a Tensor, list, tuple or dict, got: {self.ref}" ) if not _tensor_shape_equals(self.ref, self.actual): raise ValueError( f"Cannot compare tensors with different shapes: ref={self.ref} vs actual={self.actual}" ) @dataclass(frozen=True)
QuantizationComparisonResult
python
dask__dask
dask/_task_spec.py
{ "start": 29362, "end": 30545 }
class ____(MutableMapping): def __init__(self, dsk): self.dsk = dsk self._removed = set() # Set a copy of dsk to avoid dct resizing self._cache = dsk.copy() self._cache.clear() def __getitem__(self, key): if (val := self._cache.get(key)) is not None: return val else: v = self.dsk[key] try: deps = v.dependencies except AttributeError: from dask.core import get_dependencies deps = get_dependencies(self.dsk, task=v) if self._removed: # deps is a frozenset but for good measure, let's not use -= since # that _may_ perform an inplace mutation deps = deps - self._removed self._cache[key] = deps return deps def __iter__(self): return iter(self.dsk) def __delitem__(self, key: Any) -> None: self._cache.clear() self._removed.add(key) def __setitem__(self, key: Any, value: Any) -> None: raise NotImplementedError def __len__(self) -> int: return len(self.dsk)
DependenciesMapping
python
pytorch__pytorch
test/dynamo/test_repros.py
{ "start": 21081, "end": 22567 }
class ____(collections.OrderedDict): """based on file_utils.py in HuggingFace""" def __getitem__(self, k): if isinstance(k, str): inner_dict = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self, name, value): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(name, value) super().__setattr__(name, value) def __setitem__(self, key, value): # Will raise a KeyException if needed super().__setitem__(key, value) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(key, value) def to_tuple(self): return tuple(self[k] for k in self.keys()) def create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size, ): """taken from HF modeling_big_bird.py""" num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack( [p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)] ) rand_mask = rand_mask.view( batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size ) rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask
ModelOutput
python
pytest-dev__pytest
testing/python/collect.py
{ "start": 4931, "end": 10714 }
class ____: def test_class_with_init_warning(self, pytester: Pytester) -> None: pytester.makepyfile( """ class TestClass1(object): def __init__(self): pass """ ) result = pytester.runpytest() result.stdout.fnmatch_lines( [ "*cannot collect test class 'TestClass1' because it has " "a __init__ constructor (from: test_class_with_init_warning.py)" ] ) def test_class_with_new_warning(self, pytester: Pytester) -> None: pytester.makepyfile( """ class TestClass1(object): def __new__(self): pass """ ) result = pytester.runpytest() result.stdout.fnmatch_lines( [ "*cannot collect test class 'TestClass1' because it has " "a __new__ constructor (from: test_class_with_new_warning.py)" ] ) def test_class_subclassobject(self, pytester: Pytester) -> None: pytester.getmodulecol( """ class test(object): pass """ ) result = pytester.runpytest() result.stdout.fnmatch_lines(["*collected 0*"]) def test_static_method(self, pytester: Pytester) -> None: """Support for collecting staticmethod tests (#2528, #2699)""" pytester.getmodulecol( """ import pytest class Test(object): @staticmethod def test_something(): pass @pytest.fixture def fix(self): return 1 @staticmethod def test_fix(fix): assert fix == 1 """ ) result = pytester.runpytest() result.stdout.fnmatch_lines(["*collected 2 items*", "*2 passed in*"]) def test_setup_teardown_class_as_classmethod(self, pytester: Pytester) -> None: pytester.makepyfile( test_mod1=""" class TestClassMethod(object): @classmethod def setup_class(cls): pass def test_1(self): pass @classmethod def teardown_class(cls): pass """ ) result = pytester.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) def test_issue1035_obj_has_getattr(self, pytester: Pytester) -> None: modcol = pytester.getmodulecol( """ class Chameleon(object): def __getattr__(self, name): return True chameleon = Chameleon() """ ) colitems = modcol.collect() assert len(colitems) == 0 def test_issue1579_namedtuple(self, pytester: Pytester) -> None: pytester.makepyfile( """ import collections TestCase = collections.namedtuple('TestCase', ['a']) """ ) result = pytester.runpytest() result.stdout.fnmatch_lines( "*cannot collect test class 'TestCase' " "because it has a __new__ constructor*" ) def test_issue2234_property(self, pytester: Pytester) -> None: pytester.makepyfile( """ class TestCase(object): @property def prop(self): raise NotImplementedError() """ ) result = pytester.runpytest() assert result.ret == ExitCode.NO_TESTS_COLLECTED def test_does_not_discover_properties(self, pytester: Pytester) -> None: """Regression test for #12446.""" pytester.makepyfile( """\ class TestCase: @property def oops(self): raise SystemExit('do not call me!') """ ) result = pytester.runpytest() assert result.ret == ExitCode.NO_TESTS_COLLECTED def test_does_not_discover_instance_descriptors(self, pytester: Pytester) -> None: """Regression test for #12446.""" pytester.makepyfile( """\ # not `@property`, but it acts like one # this should cover the case of things like `@cached_property` / etc. class MyProperty: def __init__(self, func): self._func = func def __get__(self, inst, owner): if inst is None: return self else: return self._func.__get__(inst, owner)() class TestCase: @MyProperty def oops(self): raise SystemExit('do not call me!') """ ) result = pytester.runpytest() assert result.ret == ExitCode.NO_TESTS_COLLECTED def test_abstract_class_is_not_collected(self, pytester: Pytester) -> None: """Regression test for #12275 (non-unittest version).""" pytester.makepyfile( """ import abc class TestBase(abc.ABC): @abc.abstractmethod def abstract1(self): pass @abc.abstractmethod def abstract2(self): pass def test_it(self): pass class TestPartial(TestBase): def abstract1(self): pass class TestConcrete(TestPartial): def abstract2(self): pass """ ) result = pytester.runpytest() assert result.ret == ExitCode.OK result.assert_outcomes(passed=1)
TestClass
python
PrefectHQ__prefect
tests/test_futures.py
{ "start": 6905, "end": 8478 }
class ____: def test_wait_with_timeout(self): wrapped_future = Future() future = PrefectConcurrentFuture(uuid.uuid4(), wrapped_future) future.wait(timeout=0.01) # should not raise a TimeoutError assert ( future.state.is_pending() ) # should return a Pending state when task run is not found def test_wait_without_timeout(self): wrapped_future = Future() future = PrefectConcurrentFuture(uuid.uuid4(), wrapped_future) wrapped_future.set_result(Completed()) future.wait(timeout=0) assert future.state.is_completed() def test_result_with_final_state(self): final_state = Completed(data=42) wrapped_future = Future() future = PrefectConcurrentFuture(uuid.uuid4(), wrapped_future) wrapped_future.set_result(final_state) result = future.result() assert result == 42 def test_result_without_final_state(self): wrapped_future = Future() future = PrefectConcurrentFuture(uuid.uuid4(), wrapped_future) wrapped_future.set_result(42) result = future.result() assert result == 42 def test_result_with_final_state_and_raise_on_failure(self): final_state = Failed(data=ValueError("oops")) wrapped_future = Future() future = PrefectConcurrentFuture(uuid.uuid4(), wrapped_future) wrapped_future.set_result(final_state) with pytest.raises(ValueError, match="oops"): future.result(raise_on_failure=True)
TestPrefectConcurrentFuture
python
mahmoud__glom
glom/reduction.py
{ "start": 8567, "end": 11648 }
class ____(Fold): """By default, Merge turns an iterable of mappings into a single, merged :class:`dict`, leveraging the behavior of the :meth:`~dict.update` method. The start state can be customized with *init*, as well as the update operation, with *op*. Args: subspec: The location of the iterable of mappings. Defaults to ``T``. init (callable): A type or callable which returns a base instance into which all other values will be merged. op (callable): A callable, which takes two arguments, and performs a merge of the second into the first. Can also be the string name of a method to fetch on the instance created from *init*. Defaults to ``"update"``. .. note:: Besides the differing defaults, the primary difference between :class:`Merge` and other :class:`Fold` subtypes is that its *op* argument is assumed to be a two-argument function which has no return value and modifies the left parameter in-place. Because the initial state is a new object created with the *init* parameter, none of the target values are modified. """ def __init__(self, subspec=T, init=dict, op=None): if op is None: op = 'update' if isinstance(op, basestring): test_init = init() op = getattr(type(test_init), op, None) if not callable(op): raise ValueError('expected callable "op" arg or an "init" with an .update()' ' method not %r and %r' % (op, init)) super().__init__(subspec=subspec, init=init, op=op) def _fold(self, iterator): # the difference here is that ret is mutated in-place, the # variable not being reassigned, as in base Fold. ret, op = self.init(), self.op for v in iterator: op(ret, v) return ret def _agg(self, target, tree): if self not in tree: acc = tree[self] = self.init() else: acc = tree[self] self.op(acc, target) return acc def merge(target, **kwargs): """By default, ``merge()`` turns an iterable of mappings into a single, merged :class:`dict`, leveraging the behavior of the :meth:`~dict.update` method. A new mapping is created and none of the passed mappings are modified. >>> target = [{'a': 'alpha'}, {'b': 'B'}, {'a': 'A'}] >>> res = merge(target) >>> pprint(res) {'a': 'A', 'b': 'B'} Args: target: The list of dicts, or some other iterable of mappings. The start state can be customized with the *init* keyword argument, as well as the update operation, with the *op* keyword argument. For more on those customizations, see the :class:`Merge` spec. """ subspec = kwargs.pop('spec', T) init = kwargs.pop('init', dict) op = kwargs.pop('op', None) if kwargs: raise TypeError('unexpected keyword args: %r' % sorted(kwargs.keys())) spec = Merge(subspec, init, op) return glom(target, spec)
Merge
python
numba__llvmlite
llvmlite/tests/test_ir.py
{ "start": 84867, "end": 95255 }
class ____(TestBase): """ Test various other features of the IRBuilder class. """ def test_attributes(self): block = self.block(name='start') builder = ir.IRBuilder(block) self.assertIs(builder.function, block.parent) self.assertIsInstance(builder.function, ir.Function) self.assertIs(builder.module, block.parent.module) self.assertIsInstance(builder.module, ir.Module) def test_goto_block(self): block = self.block(name='my_block') builder = ir.IRBuilder(block) a, b = builder.function.args[:2] builder.add(a, b, 'c') bb_new = builder.append_basic_block(name='foo') with builder.goto_block(bb_new): builder.fadd(a, b, 'd') with builder.goto_entry_block(): builder.sub(a, b, 'e') builder.fsub(a, b, 'f') builder.branch(bb_new) builder.mul(a, b, 'g') with builder.goto_block(bb_new): builder.fmul(a, b, 'h') self.check_block(block, """\ my_block: %"c" = add i32 %".1", %".2" %"e" = sub i32 %".1", %".2" %"g" = mul i32 %".1", %".2" """) self.check_block(bb_new, """\ foo: %"d" = fadd i32 %".1", %".2" %"f" = fsub i32 %".1", %".2" %"h" = fmul i32 %".1", %".2" br label %"foo" """) def test_if_then(self): block = self.block(name='one') builder = ir.IRBuilder(block) z = ir.Constant(int1, 0) a = builder.add(z, z, 'a') with builder.if_then(a) as bbend: builder.add(z, z, 'b') # Block will be terminated implicitly self.assertIs(builder.block, bbend) c = builder.add(z, z, 'c') with builder.if_then(c): builder.add(z, z, 'd') builder.branch(block) # No implicit termination self.check_func_body(builder.function, """\ one: %"a" = add i1 0, 0 br i1 %"a", label %"one.if", label %"one.endif" one.if: %"b" = add i1 0, 0 br label %"one.endif" one.endif: %"c" = add i1 0, 0 br i1 %"c", label %"one.endif.if", label %"one.endif.endif" one.endif.if: %"d" = add i1 0, 0 br label %"one" one.endif.endif: """) def test_if_then_nested(self): # Implicit termination in a nested if/then block = self.block(name='one') builder = ir.IRBuilder(block) z = ir.Constant(int1, 0) a = builder.add(z, z, 'a') with builder.if_then(a): b = builder.add(z, z, 'b') with builder.if_then(b): builder.add(z, z, 'c') builder.ret_void() self.check_func_body(builder.function, """\ one: %"a" = add i1 0, 0 br i1 %"a", label %"one.if", label %"one.endif" one.if: %"b" = add i1 0, 0 br i1 %"b", label %"one.if.if", label %"one.if.endif" one.endif: ret void one.if.if: %"c" = add i1 0, 0 br label %"one.if.endif" one.if.endif: br label %"one.endif" """) def test_if_then_long_label(self): full_label = 'Long' * 20 block = self.block(name=full_label) builder = ir.IRBuilder(block) z = ir.Constant(int1, 0) a = builder.add(z, z, 'a') with builder.if_then(a): b = builder.add(z, z, 'b') with builder.if_then(b): builder.add(z, z, 'c') builder.ret_void() self.check_func_body(builder.function, """\ {full_label}: %"a" = add i1 0, 0 br i1 %"a", label %"{label}.if", label %"{label}.endif" {label}.if: %"b" = add i1 0, 0 br i1 %"b", label %"{label}.if.if", label %"{label}.if.endif" {label}.endif: ret void {label}.if.if: %"c" = add i1 0, 0 br label %"{label}.if.endif" {label}.if.endif: br label %"{label}.endif" """.format(full_label=full_label, label=full_label[:25] + '..')) def test_if_then_likely(self): def check(likely): block = self.block(name='one') builder = ir.IRBuilder(block) z = ir.Constant(int1, 0) with builder.if_then(z, likely=likely): pass self.check_block(block, """\ one: br i1 0, label %"one.if", label %"one.endif", !prof !0 """) return builder builder = check(True) self.check_metadata(builder.module, """\ !0 = !{ !"branch_weights", i32 99, i32 1 } """) builder = check(False) self.check_metadata(builder.module, """\ !0 = !{ !"branch_weights", i32 1, i32 99 } """) def test_if_else(self): block = self.block(name='one') builder = ir.IRBuilder(block) z = ir.Constant(int1, 0) a = builder.add(z, z, 'a') with builder.if_else(a) as (then, otherwise): with then: builder.add(z, z, 'b') with otherwise: builder.add(z, z, 'c') # Each block will be terminated implicitly with builder.if_else(a) as (then, otherwise): with then: builder.branch(block) with otherwise: builder.ret_void() # No implicit termination self.check_func_body(builder.function, """\ one: %"a" = add i1 0, 0 br i1 %"a", label %"one.if", label %"one.else" one.if: %"b" = add i1 0, 0 br label %"one.endif" one.else: %"c" = add i1 0, 0 br label %"one.endif" one.endif: br i1 %"a", label %"one.endif.if", label %"one.endif.else" one.endif.if: br label %"one" one.endif.else: ret void one.endif.endif: """) def test_if_else_likely(self): def check(likely): block = self.block(name='one') builder = ir.IRBuilder(block) z = ir.Constant(int1, 0) with builder.if_else(z, likely=likely) as (then, otherwise): with then: builder.branch(block) with otherwise: builder.ret_void() self.check_func_body(builder.function, """\ one: br i1 0, label %"one.if", label %"one.else", !prof !0 one.if: br label %"one" one.else: ret void one.endif: """) return builder builder = check(True) self.check_metadata(builder.module, """\ !0 = !{ !"branch_weights", i32 99, i32 1 } """) builder = check(False) self.check_metadata(builder.module, """\ !0 = !{ !"branch_weights", i32 1, i32 99 } """) def test_positioning(self): """ Test IRBuilder.position_{before,after,at_start,at_end}. """ func = self.function() builder = ir.IRBuilder() z = ir.Constant(int32, 0) bb_one = func.append_basic_block(name='one') bb_two = func.append_basic_block(name='two') bb_three = func.append_basic_block(name='three') # .at_start(empty block) builder.position_at_start(bb_one) builder.add(z, z, 'a') # .at_end(empty block) builder.position_at_end(bb_two) builder.add(z, z, 'm') builder.add(z, z, 'n') # .at_start(block) builder.position_at_start(bb_two) o = builder.add(z, z, 'o') builder.add(z, z, 'p') # .at_end(block) builder.position_at_end(bb_one) b = builder.add(z, z, 'b') # .after(instr) builder.position_after(o) builder.add(z, z, 'q') # .before(instr) builder.position_before(b) builder.add(z, z, 'c') self.check_block(bb_one, """\ one: %"a" = add i32 0, 0 %"c" = add i32 0, 0 %"b" = add i32 0, 0 """) self.check_block(bb_two, """\ two: %"o" = add i32 0, 0 %"q" = add i32 0, 0 %"p" = add i32 0, 0 %"m" = add i32 0, 0 %"n" = add i32 0, 0 """) self.check_block(bb_three, """\ three: """) def test_instruction_removal(self): func = self.function() builder = ir.IRBuilder() blk = func.append_basic_block(name='entry') builder.position_at_end(blk) k = ir.Constant(int32, 1234) a = builder.add(k, k, 'a') retvoid = builder.ret_void() self.assertTrue(blk.is_terminated) builder.remove(retvoid) self.assertFalse(blk.is_terminated) b = builder.mul(a, a, 'b') c = builder.add(b, b, 'c') builder.remove(c) builder.ret_void() self.assertTrue(blk.is_terminated) self.check_block(blk, """\ entry: %"a" = add i32 1234, 1234 %"b" = mul i32 %"a", %"a" ret void """) def test_metadata(self): block = self.block(name='my_block') builder = ir.IRBuilder(block) builder.debug_metadata = builder.module.add_metadata([]) builder.alloca(ir.PointerType(int32), name='c') if not ir_layer_typed_pointers_enabled: self.check_block(block, """\ my_block: %"c" = alloca ptr, !dbg !0 """) else: self.check_block(block, """\ my_block: %"c" = alloca i32*, !dbg !0 """)
TestBuilderMisc
python
sphinx-doc__sphinx
sphinx/ext/viewcode.py
{ "start": 1143, "end": 7757 }
class ____(Element): """Node for viewcode anchors. This node will be processed in the resolving phase. For viewcode supported builders, they will be all converted to the anchors. For not supported builders, they will be removed. """ def _get_full_modname(modname: str, attribute: str) -> str | None: if modname is None: # Prevents a TypeError: if the last getattr() call will return None # then it's better to return it directly return None try: # Attempt to find full path of module module_path = modname.split('.') num_parts = len(module_path) for i in range(num_parts, 0, -1): mod_root = '.'.join(module_path[:i]) try: # import_module() caches the module in sys.modules module = importlib.import_module(mod_root) break except ModuleNotFoundError: continue except BaseException as exc: # Importing modules may cause any side effects, including # SystemExit, so we need to catch all errors. msg = f"viewcode failed to import '{mod_root}'." raise ImportError(msg) from exc else: return None if i != num_parts: for mod in module_path[i:]: module = getattr(module, mod) # Allow an attribute to have multiple parts and incidentally allow # repeated .s in the attribute. value = module for attr in attribute.split('.'): if attr: value = getattr(value, attr) return getattr(value, '__module__', None) except AttributeError: # sphinx.ext.viewcode can't follow class instance attribute # then AttributeError logging output only debug mode. logger.debug("Didn't find %s in %s", attribute, modname) return None except Exception as e: # sphinx.ext.viewcode follow python domain directives. # because of that, if there are no real modules exists that specified # by py:function or other directives, viewcode emits a lot of warnings. # It should be displayed only verbose mode. logger.verbose(traceback.format_exc().rstrip()) logger.verbose('viewcode can\'t import %s, failed with error "%s"', modname, e) return None def is_supported_builder(builder: type[Builder], viewcode_enable_epub: bool) -> bool: return ( builder.format == 'html' and builder.name != 'singlehtml' and (not builder.name.startswith('epub') or viewcode_enable_epub) ) def doctree_read(app: Sphinx, doctree: Node) -> None: env = app.env events = app.events if not hasattr(env, '_viewcode_modules'): env._viewcode_modules = {} # type: ignore[attr-defined] def has_tag(modname: str, fullname: str, docname: str, refname: str) -> bool: entry = env._viewcode_modules.get(modname, None) # type: ignore[attr-defined] if entry is False: return False code_tags = events.emit_firstresult('viewcode-find-source', modname) if code_tags is None: try: analyzer = ModuleAnalyzer.for_module(modname) analyzer.find_tags() except Exception: env._viewcode_modules[modname] = False # type: ignore[attr-defined] return False code = analyzer.code tags = analyzer.tags else: code, tags = code_tags if entry is None or entry[0] != code: entry = code, tags, {}, refname env._viewcode_modules[modname] = entry # type: ignore[attr-defined] _, tags, used, _ = entry if fullname in tags: used[fullname] = docname return True return False for objnode in list(doctree.findall(addnodes.desc)): if objnode.get('domain') != 'py': continue names: set[str] = set() for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue modname = signode.get('module') fullname = signode.get('fullname') refname = modname if env.config.viewcode_follow_imported_members: new_modname = events.emit_firstresult( 'viewcode-follow-imported', modname, fullname ) if not new_modname: new_modname = _get_full_modname(modname, fullname) modname = new_modname if not modname: continue fullname = signode.get('fullname') if not has_tag(modname, fullname, env.current_document.docname, refname): continue if fullname in names: # only one link per name, please continue names.add(fullname) pagename = posixpath.join(OUTPUT_DIRNAME, modname.replace('.', '/')) signode += viewcode_anchor( reftarget=pagename, refid=fullname, refdoc=env.current_document.docname ) def env_merge_info( app: Sphinx, env: BuildEnvironment, docnames: Set[str], other: BuildEnvironment ) -> None: if not hasattr(other, '_viewcode_modules'): return # create a _viewcode_modules dict on the main environment if not hasattr(env, '_viewcode_modules'): env._viewcode_modules = {} # type: ignore[attr-defined] # now merge in the information from the subprocess for modname, entry in other._viewcode_modules.items(): if modname not in env._viewcode_modules: # type: ignore[attr-defined] env._viewcode_modules[modname] = entry # type: ignore[attr-defined] else: if env._viewcode_modules[modname]: # type: ignore[attr-defined] used = env._viewcode_modules[modname][2] # type: ignore[attr-defined] for fullname, docname in entry[2].items(): if fullname not in used: used[fullname] = docname def env_purge_doc(app: Sphinx, env: BuildEnvironment, docname: str) -> None: modules = getattr(env, '_viewcode_modules', {}) for modname, entry in list(modules.items()): if entry is False: continue _code, _tags, used, _refname = entry for fullname in list(used): if used[fullname] == docname: used.pop(fullname) if len(used) == 0: modules.pop(modname)
viewcode_anchor
python
jazzband__django-oauth-toolkit
tests/test_models.py
{ "start": 12148, "end": 13453 }
class ____(BaseTestModels): def test_str(self): access_token = AccessToken(token="test_token") self.assertEqual("%s" % access_token, access_token.token) def test_user_can_be_none(self): app = Application.objects.create( name="test_app", redirect_uris="http://localhost http://example.com http://example.org", user=self.user, client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, ) access_token = AccessToken.objects.create(token="test_token", application=app, expires=timezone.now()) self.assertIsNone(access_token.user) def test_expires_can_be_none(self): access_token = AccessToken(token="test_token") self.assertIsNone(access_token.expires) self.assertTrue(access_token.is_expired()) def test_token_checksum_field(self): token = secrets.token_urlsafe(32) access_token = AccessToken.objects.create( user=self.user, token=token, expires=timezone.now() + timedelta(hours=1), ) expected_checksum = hashlib.sha256(token.encode()).hexdigest() self.assertEqual(access_token.token_checksum, expected_checksum)
TestAccessTokenModel
python
mlflow__mlflow
mlflow/types/chat.py
{ "start": 5808, "end": 5953 }
class ____(BaseModel): role: str | None = None content: str | None = None tool_calls: list[ToolCallDelta] | None = None
ChatChoiceDelta
python
tensorflow__tensorflow
tensorflow/python/ops/image_grad_test_base.py
{ "start": 22299, "end": 25755 }
class ____(test.TestCase): TYPES = [np.float32, np.float64] def testShapeIsCorrectAfterOp(self): in_shape = [2, 20, 30, 3] out_shape = [2, 20, 30, 3] for nptype in self.TYPES: x = np.random.randint(0, high=255, size=[2, 20, 30, 3]).astype(nptype) rgb_input_tensor = constant_op.constant(x, shape=in_shape) hsv_out = gen_image_ops.rgb_to_hsv(rgb_input_tensor) with self.cached_session(): self.assertEqual(out_shape, list(hsv_out.get_shape())) hsv_out = self.evaluate(hsv_out) self.assertEqual(out_shape, list(hsv_out.shape)) def testRGBToHSVGradSimpleCase(self): def f(x): return gen_image_ops.rgb_to_hsv(x) for nptype in self.TYPES: # Building a simple input tensor to avoid any discontinuity x = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]).astype(nptype) rgb_input_tensor = constant_op.constant(x, shape=x.shape) # Computing Analytical and Numerical gradients of f(x) analytical, numerical = gradient_checker_v2.compute_gradient( f, [rgb_input_tensor]) self.assertAllClose(numerical, analytical, atol=1e-4) def testRGBToHSVGradRandomCase(self): def f(x): return gen_image_ops.rgb_to_hsv(x) np.random.seed(0) # Building a simple input tensor to avoid any discontinuity x = np.random.rand(1, 5, 5, 3).astype(np.float32) rgb_input_tensor = constant_op.constant(x, shape=x.shape) # Computing Analytical and Numerical gradients of f(x) self.assertLess( gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(f, [rgb_input_tensor])), 1e-4) def testRGBToHSVGradSpecialCaseRGreatest(self): # This test tests a specific subset of the input space # with a dummy function implemented with native TF operations. in_shape = [2, 10, 20, 3] def f(x): return gen_image_ops.rgb_to_hsv(x) def f_dummy(x): # This dummy function is a implementation of RGB to HSV using # primitive TF functions for one particular case when R>G>B. r = x[..., 0] g = x[..., 1] b = x[..., 2] # Since MAX = r and MIN = b, we get the following h,s,v values. v = r s = 1 - math_ops.div_no_nan(b, r) h = 60 * math_ops.div_no_nan(g - b, r - b) h = h / 360 return array_ops_stack.stack([h, s, v], axis=-1) # Building a custom input tensor where R>G>B x_reds = np.ones((in_shape[0], in_shape[1], in_shape[2])).astype(np.float32) x_greens = 0.5 * np.ones( (in_shape[0], in_shape[1], in_shape[2])).astype(np.float32) x_blues = 0.2 * np.ones( (in_shape[0], in_shape[1], in_shape[2])).astype(np.float32) x = np.stack([x_reds, x_greens, x_blues], axis=-1) rgb_input_tensor = constant_op.constant(x, shape=in_shape) # Computing Analytical and Numerical gradients of f(x) analytical, numerical = gradient_checker_v2.compute_gradient( f, [rgb_input_tensor]) # Computing Analytical and Numerical gradients of f_dummy(x) analytical_dummy, numerical_dummy = gradient_checker_v2.compute_gradient( f_dummy, [rgb_input_tensor]) self.assertAllClose(numerical, analytical, atol=1e-4) self.assertAllClose(analytical_dummy, analytical, atol=1e-4) self.assertAllClose(numerical_dummy, numerical, atol=1e-4) if __name__ == '__main__': test.main()
RGBToHSVOpTestBase
python
Textualize__textual
tests/text_area/test_edit_via_api.py
{ "start": 593, "end": 18163 }
class ____(App): def compose(self) -> ComposeResult: text_area = TextArea() text_area.load_text(TEXT) yield text_area async def test_insert_text_start_maintain_selection_offset(): """Ensure that we can maintain the offset between the location an insert happens and the location of the selection.""" app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.move_cursor((0, 5)) text_area.insert("Hello", location=(0, 0)) assert text_area.text == "Hello" + TEXT assert text_area.selection == Selection.cursor((0, 10)) async def test_insert_text_start(): """The document is correctly updated on inserting at the start. If we don't maintain the selection offset, the cursor jumps to the end of the edit and the selection is empty.""" app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.move_cursor((0, 5)) text_area.insert("Hello", location=(0, 0), maintain_selection_offset=False) assert text_area.text == "Hello" + TEXT assert text_area.selection == Selection.cursor((0, 5)) async def test_insert_empty_string(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.load_text("0123456789") text_area.insert("", location=(0, 3)) assert text_area.text == "0123456789" async def test_replace_empty_string(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.load_text("0123456789") text_area.replace("", start=(0, 3), end=(0, 7)) assert text_area.text == "012789" @pytest.mark.parametrize( "cursor_location,insert_location,cursor_destination", [ ((0, 3), (0, 2), (0, 4)), # API insert just before cursor ((0, 3), (0, 3), (0, 4)), # API insert at cursor location ((0, 3), (0, 4), (0, 3)), # API insert just after cursor ((0, 3), (0, 5), (0, 3)), # API insert just after cursor ], ) async def test_insert_character_near_cursor_maintain_selection_offset( cursor_location, insert_location, cursor_destination, ): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.load_text("012345") text_area.move_cursor(cursor_location) text_area.insert("X", location=insert_location) assert text_area.selection == Selection.cursor(cursor_destination) @pytest.mark.parametrize( "cursor_location,insert_location,cursor_destination", [ ((1, 0), (0, 0), (2, 0)), # API insert before cursor row ((0, 0), (0, 0), (1, 0)), # API insert right at cursor row ((0, 0), (1, 0), (0, 0)), # API insert after cursor row ], ) async def test_insert_newline_around_cursor_maintain_selection_offset( cursor_location, insert_location, cursor_destination ): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.move_cursor(cursor_location) text_area.insert("X\n", location=insert_location) assert text_area.selection == Selection.cursor(cursor_destination) async def test_insert_newlines_start(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.insert("\n\n\n") assert text_area.text == "\n\n\n" + TEXT assert text_area.selection == Selection.cursor((3, 0)) async def test_insert_newlines_end(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.insert("\n\n\n", location=(4, 0)) assert text_area.text == TEXT + "\n\n\n" async def test_insert_windows_newlines(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) # Although we're inserting windows newlines, the configured newline on # the Document inside the TextArea will be "\n", so when we check TextArea.text # we expect to see "\n". text_area.insert("\r\n\r\n\r\n") assert text_area.text == "\n\n\n" + TEXT async def test_insert_old_mac_newlines(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.insert("\r\r\r") assert text_area.text == "\n\n\n" + TEXT async def test_insert_text_non_cursor_location(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.insert("Hello", location=(4, 0)) assert text_area.text == TEXT + "Hello" assert text_area.selection == Selection.cursor((0, 0)) async def test_insert_text_non_cursor_location_dont_maintain_offset(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.selection = Selection((2, 3), (3, 5)) result = text_area.insert( "Hello", location=(4, 0), maintain_selection_offset=False, ) assert result == EditResult( end_location=(4, 5), replaced_text="", ) assert text_area.text == TEXT + "Hello" # Since maintain_selection_offset is False, the selection # is reset to a cursor and goes to the end of the insert. assert text_area.selection == Selection.cursor((4, 5)) async def test_insert_multiline_text(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.move_cursor((2, 5)) text_area.insert("Hello,\nworld!", maintain_selection_offset=False) expected_content = """\ I must not fear. Fear is the mind-killer. Fear Hello, world!is the little-death that brings total obliteration. I will face my fear. """ assert text_area.cursor_location == (3, 6) # Cursor moved to end of insert assert text_area.text == expected_content async def test_insert_multiline_text_maintain_offset(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.move_cursor((2, 5)) result = text_area.insert("Hello,\nworld!") assert result == EditResult( end_location=(3, 6), replaced_text="", ) # The insert happens at the cursor (default location) # Offset is maintained - we inserted 1 line so cursor shifts # down 1 line, and along by the length of the last insert line. assert text_area.cursor_location == (3, 6) expected_content = """\ I must not fear. Fear is the mind-killer. Fear Hello, world!is the little-death that brings total obliteration. I will face my fear. """ assert text_area.text == expected_content async def test_replace_multiline_text(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) # replace "Fear is the mind-killer\nFear is the little death...\n" # with "Hello,\nworld!\n" result = text_area.replace("Hello,\nworld!\n", start=(1, 0), end=(3, 0)) expected_replaced_text = """\ Fear is the mind-killer. Fear is the little-death that brings total obliteration. """ assert result == EditResult( end_location=(3, 0), replaced_text=expected_replaced_text, ) expected_content = """\ I must not fear. Hello, world! I will face my fear. """ assert text_area.selection == Selection.cursor((0, 0)) # cursor didnt move assert text_area.text == expected_content async def test_replace_multiline_text_maintain_selection(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) # To begin with, the user selects the word "face" text_area.selection = Selection((3, 7), (3, 11)) assert text_area.selected_text == "face" # Text is inserted via the API in a way that shifts # the start and end locations of the word "face" in # both the horizontal and vertical directions. text_area.replace( "Hello,\nworld!\n123\n456", start=(1, 0), end=(3, 0), ) expected_content = """\ I must not fear. Hello, world! 123 456I will face my fear. """ # Despite this insert, the selection locations are updated # and the word face is still highlighted. This ensures that # if text is insert programmatically, a user that is typing # won't lose their place - the cursor will maintain the same # relative position in the document as before. assert text_area.selected_text == "face" assert text_area.selection == Selection((4, 10), (4, 14)) assert text_area.text == expected_content async def test_delete_within_line(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.selection = Selection((0, 11), (0, 15)) assert text_area.selected_text == "fear" # Delete some text before the selection location. result = text_area.delete((0, 6), (0, 10)) # Even though the word has 'shifted' left, it's still selected. assert text_area.selection == Selection((0, 7), (0, 11)) assert text_area.selected_text == "fear" # We've recorded exactly what text was replaced in the EditResult assert result == EditResult( end_location=(0, 6), replaced_text=" not", ) expected_text = """\ I must fear. Fear is the mind-killer. Fear is the little-death that brings total obliteration. I will face my fear. """ assert text_area.text == expected_text async def test_delete_within_line_dont_maintain_offset(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.delete((0, 6), (0, 10), maintain_selection_offset=False) expected_text = """\ I must fear. Fear is the mind-killer. Fear is the little-death that brings total obliteration. I will face my fear. """ assert text_area.selection == Selection.cursor((0, 6)) # cursor moved assert text_area.text == expected_text async def test_delete_multiple_lines_selection_above(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) # User has selected text on the first line... text_area.selection = Selection((0, 2), (0, 6)) assert text_area.selected_text == "must" # Some lines below are deleted... result = text_area.delete((1, 0), (3, 0)) # The selection is not affected at all. assert text_area.selection == Selection((0, 2), (0, 6)) # We've recorded the text that was deleted in the ReplaceResult. # Lines of index 1 and 2 were deleted. Since the end # location of the selection is (3, 0), the newline # marker is included in the deletion. expected_replaced_text = """\ Fear is the mind-killer. Fear is the little-death that brings total obliteration. """ assert result == EditResult( end_location=(1, 0), replaced_text=expected_replaced_text, ) assert ( text_area.text == """\ I must not fear. I will face my fear. """ ) async def test_delete_empty_document(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.load_text("") result = text_area.delete((0, 0), (1, 0)) assert result.replaced_text == "" assert text_area.text == "" async def test_clear(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.clear() async def test_clear_empty_document(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.load_text("") text_area.clear() @pytest.mark.parametrize( "select_from,select_to", [ [(0, 3), (2, 1)], [(2, 1), (0, 3)], # Ensuring independence from selection direction. ], ) async def test_insert_text_multiline_selection_top(select_from, select_to): """ An example to attempt to explain what we're testing here... X = edit range, * = character in TextArea, S = selection *********XX XXXXX***SSS SSSSSSSSSSS SSSS******* If an edit happens at XXXX, we need to ensure that the SSS on the same line is adjusted appropriately so that it's still highlighting the same characters as before. """ app = TextAreaApp() async with app.run_test(): # ABCDE # FGHIJ # KLMNO # PQRST # UVWXY # Z text_area = app.query_one(TextArea) text_area.load_text(SIMPLE_TEXT) text_area.selection = Selection(select_from, select_to) # Check what text is selected. expected_selected_text = "DE\nFGHIJ\nK" assert text_area.selected_text == expected_selected_text result = text_area.replace( "Hello", start=(0, 0), end=(0, 2), ) assert result == EditResult(end_location=(0, 5), replaced_text="AB") # The edit range has grown from width 2 to width 5, so the # top line of the selection was adjusted (column+=3) such that the # same characters are highlighted: # ... the selection is not changed after programmatic insert # ... the same text is selected as before. assert text_area.selected_text == expected_selected_text # The resulting text in the TextArea is correct. assert text_area.text == "HelloCDE\nFGHIJ\nKLMNO\nPQRST\nUVWXY\nZ\n" @pytest.mark.parametrize( "select_from,select_to", [ [(0, 3), (2, 5)], [(2, 5), (0, 3)], # Ensuring independence from selection direction. ], ) async def test_insert_text_multiline_selection_bottom(select_from, select_to): """ The edited text is within the selected text on the bottom line of the selection. The bottom of the selection should be adjusted such that any text that was previously selected is still selected. """ app = TextAreaApp() async with app.run_test(): # ABCDE # FGHIJ # KLMNO # PQRST # UVWXY # Z text_area = app.query_one(TextArea) text_area.load_text(SIMPLE_TEXT) text_area.selection = Selection(select_from, select_to) # Check what text is selected. assert text_area.selected_text == "DE\nFGHIJ\nKLMNO" result = text_area.replace( "*", start=(2, 0), end=(2, 3), ) assert result == EditResult(end_location=(2, 1), replaced_text="KLM") # The 'NO' from the selection is still available on the # bottom selection line, however the 'KLM' is replaced # with '*'. Since 'NO' is still available, it's maintained # within the selection. assert text_area.selected_text == "DE\nFGHIJ\n*NO" # The resulting text in the TextArea is correct. # 'KLM' replaced with '*' assert text_area.text == "ABCDE\nFGHIJ\n*NO\nPQRST\nUVWXY\nZ\n" async def test_delete_fully_within_selection(): """User-facing selection should be best-effort adjusted when a programmatic replacement is made to the document.""" app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.load_text("0123456789") text_area.selection = Selection((0, 2), (0, 7)) assert text_area.selected_text == "23456" result = text_area.delete((0, 4), (0, 6)) assert result == EditResult( replaced_text="45", end_location=(0, 4), ) # We deleted 45, but the other characters are still available assert text_area.selected_text == "236" assert text_area.text == "01236789" async def test_replace_fully_within_selection(): """Adjust the selection when a replacement happens inside it.""" app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.load_text("0123456789") text_area.selection = Selection((0, 2), (0, 7)) assert text_area.selected_text == "23456" result = text_area.replace("XX", start=(0, 2), end=(0, 5)) assert result == EditResult( replaced_text="234", end_location=(0, 4), ) assert text_area.selected_text == "XX56" async def test_text_setter(): app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) new_text = "hello\nworld\n" text_area.text = new_text assert text_area.text == new_text async def test_edits_on_read_only_mode(): """API edits should still be permitted on read-only mode.""" app = TextAreaApp() async with app.run_test(): text_area = app.query_one(TextArea) text_area.text = "0123456789" text_area.read_only = True text_area.replace("X", (0, 1), (0, 5)) assert text_area.text == "0X56789" text_area.insert("X") assert text_area.text == "X0X56789" text_area.delete((0, 0), (0, 2)) assert text_area.text == "X56789"
TextAreaApp
python
facebook__pyre-check
tools/pysa_integration_tests/runner_lib.py
{ "start": 13163, "end": 18050 }
class ____: def __init__(self) -> None: self.function_annotations: Dict[str, FunctionTestAnnotations] = {} def set(self, function: str, annotations: FunctionTestAnnotations) -> None: self.function_annotations[function] = annotations def add(self, other: "DirectoryTestAnnotations") -> None: for name, annotations in other.function_annotations.items(): if name in self.function_annotations: raise AssertionError( f"Could NOT merge annotations with conflicting definitions: {name}" ) self.function_annotations[name] = annotations def number_annotations(self) -> int: return sum( len(function_annotations.annotations) for function_annotations in self.function_annotations.values() ) def dump(self, output: IO[str]) -> None: output.write( json.dumps( { name: [ annotation.asdict() for annotation in function_annotations.annotations ] for name, function_annotations in self.function_annotations.items() } ) ) output.write("\n") def parse_test_annotation( decorator: ast.Call, decorated_function: str ) -> Optional[TestAnnotation]: if not isinstance(decorator.func, ast.Name): return None expected: bool = True if decorator.func.id == "ExpectIssue": expected = True elif decorator.func.id == "ExpectNoIssue": # pyre-ignore expected = False else: # Not a test annotation. return None code: Optional[int] = None line: Optional[int] = None task: Optional[str] = None currently_found: Optional[bool] = None if len(decorator.args) > 0: raise TestConfigurationException( f"Invalid annotation `{decorator.func.id}` on `{decorated_function}`: " + "Unsupported positional argument" ) for keyword_argument in decorator.keywords: if keyword_argument.arg is None: raise TestConfigurationException( f"Invalid annotation `{decorator.func.id}` on `{decorated_function}`: " + "Unsupported **kwargs" ) if keyword_argument.arg == "code": if not isinstance(keyword_argument.value, ast.Constant) or not isinstance( keyword_argument.value.value, int ): raise TestConfigurationException( f"Invalid annotation `{decorator.func.id}` on `{decorated_function}`: " + f"Invalid type for parameter {keyword_argument.arg}" ) code = keyword_argument.value.value elif keyword_argument.arg == "line": if not isinstance(keyword_argument.value, ast.Constant) or not isinstance( keyword_argument.value.value, int ): raise TestConfigurationException( f"Invalid annotation `{decorator.func.id}` on `{decorated_function}`: " + f"Invalid type for parameter {keyword_argument.arg}" ) line = keyword_argument.value.value elif keyword_argument.arg == "task": if not isinstance(keyword_argument.value, ast.Constant) or not isinstance( keyword_argument.value.value, str ): raise TestConfigurationException( f"Invalid annotation `{decorator.func.id}` on `{decorated_function}`: " + f"Invalid type for parameter {keyword_argument.arg}" ) task = keyword_argument.value.value elif keyword_argument.arg == "currently_found": if not isinstance(keyword_argument.value, ast.Constant) or not isinstance( keyword_argument.value.value, bool ): raise TestConfigurationException( f"Invalid annotation `{decorator.func.id}` on `{decorated_function}`: " + f"Invalid type for parameter {keyword_argument.arg}" ) currently_found = keyword_argument.value.value else: raise TestConfigurationException( f"Invalid annotation `{decorator.func.id}` on `{decorated_function}`: " + f"Unexpected parameter {keyword_argument.arg}" ) if code is None: raise TestConfigurationException( f"Invalid annotation `{decorator.func.id}` on `{decorated_function}`: " + "Missing required parameter 'code'" ) return TestAnnotation( expected=expected, code=code, line=line, task=task, currently_found=currently_found, )
DirectoryTestAnnotations
python
huggingface__transformers
src/transformers/trainer_callback.py
{ "start": 1000, "end": 8580 }
class ____: """ A class containing the [`Trainer`] inner state that will be saved along the model and optimizer when checkpointing and passed to the [`TrainerCallback`]. <Tip> In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`, then one update step requires going through *n* batches. </Tip> Args: epoch (`float`, *optional*): Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed). global_step (`int`, *optional*, defaults to 0): During training, represents the number of update steps completed. max_steps (`int`, *optional*, defaults to 0): The number of update steps to do during the current training. logging_steps (`int`, *optional*, defaults to 500): Log every X updates steps eval_steps (`int`, *optional*): Run an evaluation every X steps. save_steps (`int`, *optional*, defaults to 500): Save checkpoint every X updates steps. train_batch_size (`int`, *optional*): The batch size for the training dataloader. Only needed when `auto_find_batch_size` has been used. num_input_tokens_seen (`int`, *optional*, defaults to 0): When tracking the inputs tokens, the number of tokens seen during training (number of input tokens, not the number of prediction tokens). total_flos (`float`, *optional*, defaults to 0): The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow). log_history (`list[dict[str, float]]`, *optional*): The list of logs done since the beginning of training. best_metric (`float`, *optional*): When tracking the best model, the value of the best metric encountered so far. best_global_step (`int`, *optional*): When tracking the best model, the step at which the best metric was encountered. Used for setting `best_model_checkpoint`. best_model_checkpoint (`str`, *optional*): When tracking the best model, the value of the name of the checkpoint for the best model encountered so far. is_local_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. is_world_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). is_hyper_param_search (`bool`, *optional*, defaults to `False`): Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard. stateful_callbacks (`list[StatefulTrainerCallback]`, *optional*): Callbacks attached to the `Trainer` that should have their states be saved or restored. Relevant callbacks should implement a `state` and `from_state` function. """ epoch: float | None = None global_step: int = 0 max_steps: int = 0 logging_steps: int = 500 eval_steps: int = 500 save_steps: int = 500 train_batch_size: int | None = None num_train_epochs: int = 0 num_input_tokens_seen: int = 0 total_flos: float = 0 log_history: list[dict[str, float]] = None best_metric: float | None = None best_global_step: int | None = None best_model_checkpoint: str | None = None is_local_process_zero: bool = True is_world_process_zero: bool = True is_hyper_param_search: bool = False trial_name: str | None = None trial_params: dict[str, str | float | int | bool] | None = None stateful_callbacks: list["TrainerCallback"] | None = None def __post_init__(self): if self.log_history is None: self.log_history = [] if self.stateful_callbacks is None: self.stateful_callbacks = {} elif isinstance(self.stateful_callbacks, dict): # We are loading the callbacks in from the state file, no need to process them pass else: # Saveable callbacks get stored as dict of kwargs stateful_callbacks = {} for callback in self.stateful_callbacks: if not isinstance(callback, (ExportableState)): raise TypeError( f"All callbacks passed to be saved must inherit `ExportableState`, but received {type(callback)}" ) name = callback.__class__.__name__ if name in stateful_callbacks: # We can have multiple versions of the same callback # if so, we store them as a list of states to restore if not isinstance(stateful_callbacks[name], list): stateful_callbacks[name] = [stateful_callbacks[name]] stateful_callbacks[name].append(callback.state()) else: stateful_callbacks[name] = callback.state() self.stateful_callbacks = stateful_callbacks def save_to_json(self, json_path: str): """Save the content of this instance in JSON format inside `json_path`.""" json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n" with open(json_path, "w", encoding="utf-8") as f: f.write(json_string) @classmethod def load_from_json(cls, json_path: str): """Create an instance from the content of `json_path`.""" with open(json_path, encoding="utf-8") as f: text = f.read() return cls(**json.loads(text)) def compute_steps(self, args, max_steps): """ Calculates and stores the absolute value for logging, eval, and save steps based on if it was a proportion or not. """ for step_kind in ("logging", "eval", "save"): num_steps = getattr(args, f"{step_kind}_steps") if num_steps is not None: if num_steps < 1: num_steps = math.ceil(max_steps * num_steps) setattr(self, f"{step_kind}_steps", num_steps) def init_training_references(self, trainer, max_steps, num_train_epochs, trial): """ Stores the initial training references needed in `self` """ if trainer.hp_name is not None and trainer._trial is not None: # use self._trial because the Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial # parameter to Train when using DDP. self.trial_name = trainer.hp_name(trainer._trial) self.trial_params = None if trial is not None: from transformers.integrations import hp_params self.trial_params = hp_params(trial) self.max_steps = max_steps self.num_train_epochs = num_train_epochs self.is_local_process_zero = trainer.is_local_process_zero() self.is_world_process_zero = trainer.is_world_process_zero()
TrainerState
python
pydantic__pydantic
tests/mypy/modules/plugin_success.py
{ "start": 3377, "end": 3557 }
class ____(FrozenModel, frozen=False, from_attributes=True): a: int = 1 KwargsNotFrozenModel(x=1).x = 2 KwargsNotFrozenModel.model_validate(model.__dict__)
KwargsNotFrozenModel
python
scipy__scipy
scipy/linalg/_matfuncs_sqrtm.py
{ "start": 215, "end": 3423 }
class ____(np.linalg.LinAlgError): pass from ._matfuncs_sqrtm_triu import within_block_loop # noqa: E402 def _sqrtm_triu(T, blocksize=64): """ Matrix square root of an upper triangular matrix. This is a helper function for `sqrtm` and `logm`. Parameters ---------- T : (N, N) array_like upper triangular Matrix whose square root to evaluate blocksize : int, optional If the blocksize is not degenerate with respect to the size of the input array, then use a blocked algorithm. (Default: 64) Returns ------- sqrtm : (N, N) ndarray Value of the sqrt function at `T` References ---------- .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) "Blocked Schur Algorithms for Computing the Matrix Square Root, Lecture Notes in Computer Science, 7782. pp. 171-182. """ T_diag = np.diag(T) keep_it_real = np.isrealobj(T) and np.min(T_diag, initial=0.) >= 0 # Cast to complex as necessary + ensure double precision if not keep_it_real: T = np.asarray(T, dtype=np.complex128, order="C") T_diag = np.asarray(T_diag, dtype=np.complex128) else: T = np.asarray(T, dtype=np.float64, order="C") T_diag = np.asarray(T_diag, dtype=np.float64) R = np.diag(np.sqrt(T_diag)) # Compute the number of blocks to use; use at least one block. n, n = T.shape nblocks = max(n // blocksize, 1) # Compute the smaller of the two sizes of blocks that # we will actually use, and compute the number of large blocks. bsmall, nlarge = divmod(n, nblocks) blarge = bsmall + 1 nsmall = nblocks - nlarge if nsmall * bsmall + nlarge * blarge != n: raise Exception('internal inconsistency') # Define the index range covered by each block. start_stop_pairs = [] start = 0 for count, size in ((nsmall, bsmall), (nlarge, blarge)): for i in range(count): start_stop_pairs.append((start, start + size)) start += size # Within-block interactions (Cythonized) try: within_block_loop(R, T, start_stop_pairs, nblocks) except RuntimeError as e: raise SqrtmError(*e.args) from e # Between-block interactions (Cython would give no significant speedup) for j in range(nblocks): jstart, jstop = start_stop_pairs[j] for i in range(j-1, -1, -1): istart, istop = start_stop_pairs[i] S = T[istart:istop, jstart:jstop] if j - i > 1: S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart, jstart:jstop]) # Invoke LAPACK. # For more details, see the solve_sylvester implementation # and the fortran dtrsyl and ztrsyl docs. Rii = R[istart:istop, istart:istop] Rjj = R[jstart:jstop, jstart:jstop] if keep_it_real: x, scale, info = dtrsyl(Rii, Rjj, S) else: x, scale, info = ztrsyl(Rii, Rjj, S) R[istart:istop, jstart:jstop] = x * scale # Return the matrix square root. return R
SqrtmError
python
xlwings__xlwings
xlwings/base_classes.py
{ "start": 17944, "end": 18238 }
class ____: @property def api(self): raise NotImplementedError() @property def text(self): raise NotImplementedError() @text.setter def text(self, value): raise NotImplementedError() def delete(self): raise NotImplementedError()
Note
python
sqlalchemy__sqlalchemy
test/dialect/mssql/test_deprecations.py
{ "start": 839, "end": 5777 }
class ____(fixtures.TestBase, AssertsCompiledSQL): """Legacy behavior tried to prevent schema-qualified tables from being rendered as dotted names, and were instead aliased. This behavior no longer seems to be required. """ def setup_test(self): metadata = MetaData() self.t1 = table( "t1", column("a", Integer), column("b", String), column("c", String), ) self.t2 = Table( "t2", metadata, Column("a", Integer), Column("b", Integer), Column("c", Integer), schema="schema", ) def _assert_sql(self, element, legacy_sql, modern_sql=None): dialect = self._legacy_dialect() self.assert_compile(element, legacy_sql, dialect=dialect) dialect = mssql.dialect() self.assert_compile(element, modern_sql or "foob", dialect=dialect) def _legacy_dialect(self): with _legacy_schema_aliasing_warning(): return mssql.dialect(legacy_schema_aliasing=True) @testing.combinations( ( { "sqlalchemy.url": "mssql+pyodbc://foodsn", "sqlalchemy.legacy_schema_aliasing": "true", }, True, ), ( { "sqlalchemy.url": "mssql+pyodbc://foodsn", "sqlalchemy.legacy_schema_aliasing": "false", }, False, ), ) def test_legacy_schema_flag(self, cfg, expected): with testing.expect_deprecated("The legacy_schema_aliasing parameter"): e = engine_from_config( cfg, module=Mock(version="MS SQL Server 11.0.92") ) is_(e.dialect.legacy_schema_aliasing, expected) def test_result_map(self): s = self.t2.select() c = s.compile(dialect=self._legacy_dialect()) assert self.t2.c.a in set(c._create_result_map()["a"][1]) def test_result_map_use_labels(self): s = self.t2.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) c = s.compile(dialect=self._legacy_dialect()) assert self.t2.c.a in set(c._create_result_map()["schema_t2_a"][1]) def test_straight_select(self): self._assert_sql( self.t2.select(), "SELECT t2_1.a, t2_1.b, t2_1.c FROM [schema].t2 AS t2_1", "SELECT [schema].t2.a, [schema].t2.b, " "[schema].t2.c FROM [schema].t2", ) def test_straight_select_use_labels(self): self._assert_sql( self.t2.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL), "SELECT t2_1.a AS schema_t2_a, t2_1.b AS schema_t2_b, " "t2_1.c AS schema_t2_c FROM [schema].t2 AS t2_1", "SELECT [schema].t2.a AS schema_t2_a, " "[schema].t2.b AS schema_t2_b, " "[schema].t2.c AS schema_t2_c FROM [schema].t2", ) def test_join_to_schema(self): t1, t2 = self.t1, self.t2 self._assert_sql( t1.join(t2, t1.c.a == t2.c.a).select(), "SELECT t1.a, t1.b, t1.c, t2_1.a AS a_1, t2_1.b AS b_1, " "t2_1.c AS c_1 FROM t1 " "JOIN [schema].t2 AS t2_1 ON t2_1.a = t1.a", "SELECT t1.a, t1.b, t1.c, [schema].t2.a AS a_1, " "[schema].t2.b AS b_1, " "[schema].t2.c AS c_1 FROM t1 JOIN [schema].t2 " "ON [schema].t2.a = t1.a", ) def test_union_schema_to_non(self): t1, t2 = self.t1, self.t2 s = ( select(t2.c.a, t2.c.b) .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) .union( select(t1.c.a, t1.c.b).set_label_style( LABEL_STYLE_TABLENAME_PLUS_COL ) ) .alias() .select() ) self._assert_sql( s, "SELECT anon_1.schema_t2_a, anon_1.schema_t2_b FROM " "(SELECT t2_1.a AS schema_t2_a, t2_1.b AS schema_t2_b " "FROM [schema].t2 AS t2_1 UNION SELECT t1.a AS t1_a, " "t1.b AS t1_b FROM t1) AS anon_1", "SELECT anon_1.schema_t2_a, anon_1.schema_t2_b FROM " "(SELECT [schema].t2.a AS schema_t2_a, [schema].t2.b AS " "schema_t2_b FROM [schema].t2 UNION SELECT t1.a AS t1_a, " "t1.b AS t1_b FROM t1) AS anon_1", ) def test_column_subquery_to_alias(self): a1 = self.t2.alias("a1") s = select(self.t2, select(a1.c.a).scalar_subquery()) self._assert_sql( s, "SELECT t2_1.a, t2_1.b, t2_1.c, " "(SELECT a1.a FROM [schema].t2 AS a1) " "AS anon_1 FROM [schema].t2 AS t2_1", "SELECT [schema].t2.a, [schema].t2.b, [schema].t2.c, " "(SELECT a1.a FROM [schema].t2 AS a1) AS anon_1 FROM [schema].t2", )
LegacySchemaAliasingTest
python
getsentry__sentry
src/sentry/grouping/fingerprinting/utils.py
{ "start": 924, "end": 972 }
class ____(TypedDict): family: str
_FamilyInfo
python
bokeh__bokeh
src/bokeh/server/server.py
{ "start": 17852, "end": 20870 }
class ____(Options): num_procs: int = Int(default=1, help=""" The number of worker processes to start for the HTTP server. If an explicit ``io_loop`` is also configured, then ``num_procs=1`` is the only compatible value. Use ``BaseServer`` to coordinate an explicit ``IOLoop`` with a multi-process HTTP server. A value of 0 will auto detect number of cores. Note that due to limitations inherent in Tornado, Windows does not support ``num_procs`` values greater than one! In this case consider running multiple Bokeh server instances behind a load balancer. """) # type: ignore[assignment] address : str | None = Nullable(String, help=""" The address the server should listen on for HTTP requests. """) # type: ignore[assignment] port: int = Int(default=DEFAULT_SERVER_PORT, help=""" The port number the server should listen on for HTTP requests. """) # type: ignore[assignment] unix_socket : str | None = Nullable(String, help=""" The unix socket the server should bind to. Other network args such as port, address, ssl options etc are incompatible with unix sockets. Unix socket support is not available on windows. """) # type: ignore[assignment] prefix: str = String(default="", help=""" A URL prefix to use for all Bokeh server paths. """) # type: ignore[assignment] index: str | None = Nullable(String, help=""" A path to a Jinja2 template to use for the index "/" """) # type: ignore[assignment] allow_websocket_origin: list[str] | None = Nullable(List(String), help=""" A list of hosts that can connect to the websocket. This is typically required when embedding a Bokeh server app in an external web site using :func:`~bokeh.embed.server_document` or similar. If None, "localhost" is used. """) # type: ignore[assignment] use_xheaders: bool = Bool(default=False, help=""" Whether to have the Bokeh server override the remote IP and URI scheme and protocol for all requests with ``X-Real-Ip``, ``X-Forwarded-For``, ``X-Scheme``, ``X-Forwarded-Proto`` headers (if they are provided). """) # type: ignore[assignment] ssl_certfile: str | None = Nullable(String, help=""" The path to a certificate file for SSL termination. """) # type: ignore[assignment] ssl_keyfile: str | None = Nullable(String, help=""" The path to a private key file for SSL termination. """) # type: ignore[assignment] ssl_password: str | None = Nullable(String, help=""" A password to decrypt the SSL keyfile, if necessary. """) # type: ignore[assignment] websocket_max_message_size: int = Int(default=DEFAULT_WEBSOCKET_MAX_MESSAGE_SIZE_BYTES, help=""" Set the Tornado ``websocket_max_message_size`` value. """) # type: ignore[assignment] #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
_ServerOpts
python
modin-project__modin
modin/tests/test_logging.py
{ "start": 889, "end": 4880 }
class ____: _loggers = {} def __init__(self, namespace): self.messages = collections.defaultdict(list) self.namespace = namespace def log(self, log_level, message, *args, **kw): self.messages[log_level].append(message.format(*args, **kw)) def exception(self, message, *args, **kw): self.messages["exception"].append(message.format(*args, **kw)) @classmethod def make(cls, namespace): return cls._loggers.setdefault(namespace, cls(namespace)) @classmethod def get(cls, namespace="modin.logger.default"): return cls._loggers[namespace].messages @classmethod def clear(cls): cls._loggers = {} def _get_logger(namespace="modin.logger.default"): return _FakeLogger.make(namespace) def mock_get_logger(ctx): ctx.setattr(logging, "getLogger", _get_logger) @pytest.fixture def get_log_messages(): old = LogMode.get() LogMode.enable() modin.logging.get_logger() # initialize the logging pior to mocking getLogger() yield _FakeLogger.get _FakeLogger.clear() LogMode.put(old) def test_function_decorator(monkeypatch, get_log_messages): @modin.logging.enable_logging def func(do_raise): if do_raise: raise ValueError() with monkeypatch.context() as ctx: # NOTE: we cannot patch in the fixture as mockin logger.getLogger() # without monkeypatch.context() breaks pytest mock_get_logger(ctx) func(do_raise=False) with pytest.raises(ValueError): func(do_raise=True) assert "func" in get_log_messages()[logging.INFO][0] assert "START" in get_log_messages()[logging.INFO][0] assert get_log_messages("modin.logger.errors")["exception"] == [ "STOP::PANDAS-API::func" ] def test_function_decorator_on_outer_function_6237(monkeypatch, get_log_messages): @modin.logging.enable_logging def inner_func(): raise ValueError() @modin.logging.enable_logging def outer_func(): inner_func() with monkeypatch.context() as ctx: # NOTE: we cannot patch in the fixture as mockin logger.getLogger() # without monkeypatch.context() breaks pytest mock_get_logger(ctx) with pytest.raises(ValueError): outer_func() assert get_log_messages("modin.logger.errors")["exception"] == [ "STOP::PANDAS-API::inner_func" ] def test_class_decorator(monkeypatch, get_log_messages): @modin.logging.enable_logging("CUSTOM") class Foo: def method1(self): pass @classmethod def method2(cls): pass @staticmethod def method3(): pass class Bar(Foo): def method4(self): pass with monkeypatch.context() as ctx: mock_get_logger(ctx) Foo().method1() Foo.method2() Foo.method3() Bar().method1() Bar().method4() assert get_log_messages()[logging.INFO] == [ "START::CUSTOM::Foo.method1", "STOP::CUSTOM::Foo.method1", "START::CUSTOM::Foo.method2", "STOP::CUSTOM::Foo.method2", "START::CUSTOM::Foo.method3", "STOP::CUSTOM::Foo.method3", "START::CUSTOM::Foo.method1", "STOP::CUSTOM::Foo.method1", ] def test_class_inheritance(monkeypatch, get_log_messages): class Foo(modin.logging.ClassLogger, modin_layer="CUSTOM"): def method1(self): pass class Bar(Foo): def method2(self): pass with monkeypatch.context() as ctx: mock_get_logger(ctx) Foo().method1() Bar().method1() Bar().method2() assert get_log_messages()[logging.INFO] == [ "START::CUSTOM::Foo.method1", "STOP::CUSTOM::Foo.method1", "START::CUSTOM::Foo.method1", "STOP::CUSTOM::Foo.method1", "START::CUSTOM::Bar.method2", "STOP::CUSTOM::Bar.method2", ]
_FakeLogger
python
scipy__scipy
scipy/fftpack/tests/test_real_transforms.py
{ "start": 9938, "end": 10070 }
class ____(_TestDCTIVBase): def setup_method(self): self.rdt = int self.dec = 5 self.type = 3
TestDCTIVInt
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs_auto_attribs.py
{ "start": 597, "end": 731 }
class ____: a: str = 0 b = field() c: int = foo() d = list() @frozen(auto_attribs=None) # auto_attribs = None => True
C
python
huggingface__transformers
tests/models/idefics2/test_modeling_idefics2.py
{ "start": 5658, "end": 15704 }
class ____(ModelTesterMixin, unittest.TestCase): """ Model tester for `Idefics2`. """ all_model_classes = (Idefics2Model,) if is_torch_available() else () test_resize_embeddings = True _is_composite = True def setUp(self): self.model_tester = Idefics2VisionText2TextModelTester(self) self.config_tester = ConfigTester( self, config_class=Idefics2Config, has_text_modality=False, common_properties=["image_token_id"] ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="input_embeds cannot be passed in without input_ids") def test_inputs_embeds(): pass @unittest.skip(reason="input_embeds cannot be passed in without input_ids") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip(reason="Model does not support padding right") def test_flash_attn_2_generate_padding_right(self): pass @unittest.skip(reason="Model does not support padding right") def test_flash_attn_2_inference_padding_right(self): pass # We need to override as we need to prepare such that the image token is the last token def test_resize_tokens_embeddings(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.text_config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Ignore copy # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2) n_images = self.model_tester.num_images * self.model_tester.perceiver_config["resampler_n_latents"] model.image_token_id = model_vocab_size - 15 - 1 inputs_dict["input_ids"][:, -n_images:] = model.image_token_id # make sure that decoder_input_ids are resized as well if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size) self.assertTrue(model.config.text_config.vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) # Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) # We need to override as we need to prepare such that the image token is the last token def test_resize_embeddings_untied(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() original_config.tie_word_embeddings = False for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) model.eval() # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2) n_images = self.model_tester.num_images * self.model_tester.perceiver_config["resampler_n_latents"] model.image_token_id = model_vocab_size - 15 - 1 inputs_dict["input_ids"][:, -n_images:] = model.image_token_id # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_sdpa_can_dispatch_composite_models(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_sdpa.vision_model.config._attn_implementation == "sdpa") self.assertTrue(model_sdpa.connector.perceiver_resampler.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") self.assertTrue(model_eager.connector.perceiver_resampler.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") @require_torch
Idefics2ModelTest