language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
davidhalter__jedi
test/completion/classes.py
{ "start": 7547, "end": 7640 }
class ____(object): a = 3 def return_sup(self): return 1 SuperCopy = Super
Super
python
huggingface__transformers
tests/models/bridgetower/test_modeling_bridgetower.py
{ "start": 20297, "end": 24349 }
class ____(unittest.TestCase): all_training_supported_model_classes = ( (BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerForContrastiveLearning) if is_torch_available() else () ) def setUp(self): self.model_tester = BridgeTowerModelTester(self) self.config_tester = ConfigTester(self, config_class=BridgeTowerConfig, hidden_size=37, vocab_size=99) def _prepare_inputs_for_training(self, model_class): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if model_class == BridgeTowerForMaskedLM: inputs_dict["labels"] = inputs_dict["input_ids"] elif model_class == BridgeTowerForImageAndTextRetrieval: inputs_dict["labels"] = ids_tensor([1], 2) elif model_class == BridgeTowerForContrastiveLearning: inputs_dict["return_loss"] = True return config, inputs_dict def _get_non_used_layer_names(self, model_class): non_used_layer_names = ["text_model.pooler"] if model_class == BridgeTowerForMaskedLM: non_used_layer_names = non_used_layer_names + [ # This number `1` actually depends on the number of layers in `cross_modal_image_layers` (by minus 1) "cross_modal_image_layers.1", "cross_modal_image_pooler", "cross_modal_text_pooler", ] return non_used_layer_names def _is_layer_used(self, model_class, layer_name): non_used_layer_names = self._get_non_used_layer_names(model_class) for non_used_layer_name in non_used_layer_names: if non_used_layer_name in layer_name: return False return True def test_training(self): for model_class in self.all_training_supported_model_classes: config, inputs_dict = self._prepare_inputs_for_training(model_class) model = model_class(config) model.to(torch_device) model.train() loss = model(**inputs_dict).loss loss.backward() # verify the gradients of used layers' weight are not None for name, param in model.named_parameters(): if self._is_layer_used(model_class, name): self.assertIsNotNone(param.grad, f"Gradients should not be None - got {param.grad} for {name}") @slow def test_inference_interpolate_pos_encoding(self): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. model_name = "BridgeTower/bridgetower-base" model = BridgeTowerModel.from_pretrained(model_name).to(torch_device) image_processor = BridgeTowerProcessor.from_pretrained(model_name, size={"shortest_edge": 180}) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device) # interpolate_pos_encodiung false should return value error with self.assertRaises(ValueError, msg="doesn't match model"): with torch.no_grad(): model(**inputs, interpolate_pos_encoding=False) # forward pass with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 122, 768)) self.assertEqual(outputs.image_features.shape, expected_shape) expected_slice = torch.tensor( [[-0.6518, 0.4978, -0.4544], [-2.6672, -0.0843, -0.4210], [-2.4510, -0.1002, -0.3458]] ).to(torch_device) torch.testing.assert_close(outputs.image_features[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
BridgeTowerModelTrainingTest
python
huggingface__transformers
src/transformers/models/pegasus_x/modeling_pegasus_x.py
{ "start": 66869, "end": 67416 }
class ____(PegasusXPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = PegasusXDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) __all__ = ["PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel"]
PegasusXDecoderWrapper
python
google__pytype
pytype/tests/test_reingest1.py
{ "start": 6546, "end": 9198 }
class ____(test_base.BaseTest): """Tests for strict none.""" def setUp(self): super().setUp() self.options.tweak(strict_none_binding=False) def test_pyi_return_constant(self): foo = self.Infer(""" x = None def f(): return x """) with test_utils.Tempdir() as d: d.create_file("foo.pyi", pytd_utils.Print(foo)) self.Check( """ import foo def g(): return foo.f().upper() """, pythonpath=[d.path], ) def test_pyi_yield_constant(self): foo = self.Infer(""" x = None def f(): yield x """) with test_utils.Tempdir() as d: d.create_file("foo.pyi", pytd_utils.Print(foo)) self.Check( """ import foo def g(): return [v.upper() for v in foo.f()] """, pythonpath=[d.path], ) def test_pyi_return_contained_constant(self): foo = self.Infer(""" x = None def f(): return [x] """) with test_utils.Tempdir() as d: d.create_file("foo.pyi", pytd_utils.Print(foo)) self.Check( """ import foo def g(): return [v.upper() for v in foo.f()] """, pythonpath=[d.path], ) def test_pyi_return_attribute(self): foo = self.Infer(""" class Foo: x = None def f(): return Foo.x """) with test_utils.Tempdir() as d: d.create_file("foo.pyi", pytd_utils.Print(foo)) self.Check( """ import foo def g(): return foo.f().upper() """, pythonpath=[d.path], ) def test_no_return(self): foo = self.Infer(""" def fail(): raise ValueError() """) with test_utils.Tempdir() as d: d.create_file("foo.pyi", pytd_utils.Print(foo)) self.Check( """ import foo def g(): x = "hello" if __random__ else None if x is None: foo.fail() return x.upper() """, pythonpath=[d.path], ) def test_context_manager_subclass(self): foo = self.Infer(""" class Foo: def __enter__(self): return self def __exit__(self, type, value, traceback): return None """) with test_utils.Tempdir() as d: d.create_file("foo.pyi", pytd_utils.Print(foo)) self.Check( """ import foo class Bar(foo.Foo): x = None with Bar() as bar: bar.x """, pythonpath=[d.path], ) if __name__ == "__main__": test_base.main()
StrictNoneTest
python
django__django
tests/queries/models.py
{ "start": 14148, "end": 14433 }
class ____(models.Model): parent = models.ForeignKey( "self", models.SET_NULL, null=True, blank=True, related_name="children" ) data = models.CharField(max_length=100) created_at = models.DateTimeField(auto_now_add=True) # Models for #17600 regressions
MyObject
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/rds.py
{ "start": 11373, "end": 13767 }
class ____(RdsBaseOperator): """ Deletes a DB instance or cluster snapshot or terminating the copy operation. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:RdsDeleteDbSnapshotOperator` :param db_type: Type of the DB - either "instance" or "cluster" :param db_snapshot_identifier: The identifier for the DB instance or DB cluster snapshot :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html """ template_fields = aws_template_fields( "db_snapshot_identifier", ) def __init__( self, *, db_type: str, db_snapshot_identifier: str, wait_for_completion: bool = True, **kwargs, ): super().__init__(**kwargs) self.db_type = RdsDbType(db_type) self.db_snapshot_identifier = db_snapshot_identifier self.wait_for_completion = wait_for_completion def execute(self, context: Context) -> str: self.log.info("Starting to delete snapshot '%s'", self.db_snapshot_identifier) if self.db_type.value == "instance": delete_instance_snap = self.hook.conn.delete_db_snapshot( DBSnapshotIdentifier=self.db_snapshot_identifier, ) delete_response = json.dumps(delete_instance_snap, default=str) if self.wait_for_completion: self.hook.wait_for_db_snapshot_state(self.db_snapshot_identifier, target_state="deleted") else: delete_cluster_snap = self.hook.conn.delete_db_cluster_snapshot( DBClusterSnapshotIdentifier=self.db_snapshot_identifier, ) delete_response = json.dumps(delete_cluster_snap, default=str) if self.wait_for_completion: self.hook.wait_for_db_cluster_snapshot_state( self.db_snapshot_identifier, target_state="deleted" ) return delete_response
RdsDeleteDbSnapshotOperator
python
getsentry__sentry
src/sentry/types/ratelimit.py
{ "start": 1788, "end": 2032 }
class ____: """ Rate Limit metadata for Snuba's RateLimitExceeded error """ policy: str | None quota_unit: str | None quota_used: int | None rejection_threshold: int | None storage_key: str | None
SnubaRateLimitMeta
python
huggingface__transformers
src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py
{ "start": 18781, "end": 20070 }
class ____(PreTrainedModel): config: Qwen3VLMoeConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Qwen3VLMoeTextDecoderLayer", "Qwen3VLMoeVisionBlock"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) _supports_attention_backend = True _can_record_outputs = { "router_logits": OutputRecorder(Qwen3VLMoeTextTopKRouter, layer_name="mlp.router", index=0), "hidden_states": Qwen3VLMoeTextDecoderLayer, "attentions": Qwen3VLMoeTextAttention, } @torch.no_grad() def _init_weights(self, module): """Initialize the weights.""" super()._init_weights(module) if hasattr(self.config, "initializer_range"): std = self.config.initializer_range else: std = getattr(self.config.get_text_config(), "initializer_range", 0.02) if isinstance(module, Qwen3VLMoeTextExperts): init.normal_(module.gate_up_proj, mean=0.0, std=std) init.normal_(module.down_proj, mean=0.0, std=std)
Qwen3VLMoePreTrainedModel
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
{ "start": 18051, "end": 18314 }
class ____(graphene.ObjectType): class Meta: interfaces = (GrapheneMessageEvent, GrapheneStepEvent) name = "ObjectStoreOperationEvent" operation_result = graphene.NonNull(GrapheneObjectStoreOperationResult)
GrapheneObjectStoreOperationEvent
python
kamyu104__LeetCode-Solutions
Python/construct-string-with-minimum-cost-easy.py
{ "start": 1972, "end": 3632 }
class ____(object): def minimumCost(self, target, words, costs): """ :type target: str :type words: List[str] :type costs: List[int] :rtype: int """ INF = float("inf") class Trie(object): def __init__(self): self.__nodes = [] self.__mns = [] self.__new_node() def __new_node(self): self.__nodes.append([-1]*26) self.__mns.append(INF) return len(self.__nodes)-1 def add(self, w, c): curr = 0 for x in w: x = ord(x)-ord('a') if self.__nodes[curr][x] == -1: self.__nodes[curr][x] = self.__new_node() curr = self.__nodes[curr][x] self.__mns[curr] = min(self.__mns[curr], c) def query(self, i): curr = 0 for j in xrange(i, len(target)): x = ord(target[j])-ord('a') if self.__nodes[curr][x] == -1: break curr = self.__nodes[curr][x] if self.__mns[curr] != INF: dp[j+1] = min(dp[j+1], dp[i]+self.__mns[curr]) trie = Trie() for w, c in itertools.izip(words, costs): trie.add(w, c) dp = [INF]*(len(target)+1) dp[0] = 0 for i in xrange(len(target)): if dp[i] == INF: continue trie.query(i) return dp[-1] if dp[-1] != INF else -1
Solution3
python
scipy__scipy
benchmarks/benchmarks/cython_special.py
{ "start": 510, "end": 1489 }
class ____(type): """ Add time_* benchmarks corresponding to cython_special._bench_*_cy """ def __new__(cls, cls_name, bases, dct): params = [(10, 100, 1000), ('python', 'numpy', 'cython')] param_names = ['N', 'api'] def get_time_func(name, args): @with_attributes(params=[(name,), (args,)] + params, param_names=['name', 'argument'] + param_names) def func(self, name, args, N, api): if api == 'python': self.py_func(N, *args) elif api == 'numpy': self.np_func(*self.obj) else: self.cy_func(N, *args) func.__name__ = 'time_' + name return func for name in FUNC_ARGS.keys(): func = get_time_func(name, FUNC_ARGS[name]) dct[func.__name__] = func return type.__new__(cls, cls_name, bases, dct)
_CythonSpecialMeta
python
numba__numba
numba/tests/test_datamodel.py
{ "start": 1237, "end": 1329 }
class ____(test_factory()): fe_type = types.UniTuple(types.int32, 2)
TestUniTupleOf2xInt32
python
PrefectHQ__prefect
tests/utilities/schema_tools/test_validation.py
{ "start": 19721, "end": 23642 }
class ____: @pytest.fixture def schema(self) -> dict: return { "title": "Parameters", "type": "object", "properties": { "param": { "title": "param", "position": 0, "allOf": [{"$ref": "#/definitions/City"}], } }, "required": ["param"], "definitions": { "State": { "title": "State", "type": "object", "properties": { "name": {"title": "Name", "type": "string"}, "bird": {"title": "Bird", "type": "string"}, }, "required": ["name", "bird"], }, "City": { "title": "City", "type": "object", "properties": { "population": {"title": "Population", "type": "integer"}, "name": {"title": "Name", "type": "string"}, "state": {"$ref": "#/definitions/State"}, }, "required": ["population", "name", "state"], }, }, } @pytest.mark.parametrize( "obj, expected", [ ( { "param": { "population": 100000, "name": "Raccoon City", "state": {"name": "South Dakota", "bird": "Blue Jay"}, } }, True, ), ( { "param": { "population": "not an integer", "name": "Raccoon City", "state": {"name": "South Dakota", "bird": "Blue Jay"}, } }, False, ), ( { "param": { "population": 100000, "name": "Raccoon City", "state": {"name": "South Dakota"}, } }, False, ), # Missing required state property 'bird' ({}, False), # Missing required 'param' ({"param": None}, False), ], ) def test_is_valid(self, schema, obj, expected): assert is_valid(obj, schema) == expected @pytest.mark.parametrize( "obj, expected_errors", [ ( { "param": { "population": 100000, "name": "Raccoon City", "state": {"name": "South Dakota", "bird": "Blue Jay"}, } }, [], ), ( { "param": { "population": "not an integer", "name": "Raccoon City", "state": {"name": "South Dakota", "bird": "Blue Jay"}, } }, ["'not an integer' is not of type 'integer'"], ), ( { "param": { "population": 100000, "name": "Raccoon City", "state": {"name": "South Dakota"}, } }, ["'bird' is a required property"], ), ({}, ["'param' is a required property"]), ({"param": None}, ["None is not of type 'object'"]), ], ) def test_validate(self, schema, obj, expected_errors): errors = validate(obj, schema) assert [e.message for e in errors] == expected_errors
TestNestedObject
python
tensorflow__tensorflow
tensorflow/python/data/ops/rebatch_op.py
{ "start": 1331, "end": 5969 }
class ____(dataset_ops.UnaryDataset): """A `Dataset` that rebatches elements from its input into new batch sizes. `_RebatchDataset(input_dataset, batch_sizes)` is functionally equivalent to `input_dataset.unbatch().batch(N)`, where the value of N cycles through the `batch_sizes` input list. The elements produced by this dataset have the same rank as the elements of the input dataset. """ def __init__(self, input_dataset, batch_sizes, drop_remainder=False, name=None): """See `Dataset.rebatch` for details.""" self._input_dataset = input_dataset self._batch_sizes = ops.convert_to_tensor( batch_sizes, dtype=dtypes.int64, name="batch_sizes") self._drop_remainder = ops.convert_to_tensor( drop_remainder, dtype=dtypes.bool, name="drop_remainder") self._name = name new_batch_dim = self._compute_static_batch_dim() # pylint: disable=protected-access self._element_spec = nest.map_structure( lambda ts: ts._unbatch()._batch(new_batch_dim), dataset_ops.get_structure(input_dataset)) # pylint: enable=protected-access # auto_shard rewrite assumes that there's normalize_to_dense before # rebatch_dataset. # LINT.IfChange input_dataset = dataset_ops.normalize_to_dense(input_dataset) variant_tensor = ged_ops.rebatch_dataset_v2( input_dataset._variant_tensor, # pylint: disable=protected-access batch_sizes=batch_sizes, drop_remainder=drop_remainder, **self._flat_structure) # LINT.ThenChange(//tensorflow/core/grappler/optimizers/data/auto_shard.cc) super().__init__(input_dataset, variant_tensor) def _compute_static_batch_dim(self): """Computes the static batch dimension of a dataset if it can be determined. Given the RebatchDataset parameters, determines the batch dimension of this dataset statically. Returns None if this cannot be determined or is variable. Returns: An integer representing the batch dimension of the dataset. If it cannot be determined statically, returns None. Raises: ValueError: The batch_sizes parameter is malformed, input_dataset is not batched, or input_dataset batch sizes are incompatible with each other. """ new_batch_dim = tensor_util.constant_value(self._batch_sizes) if new_batch_dim is None: return None if isinstance(new_batch_dim, np.ndarray): if len(new_batch_dim.shape) == 1: if np.all(new_batch_dim == new_batch_dim[0]): new_batch_dim = new_batch_dim[0] else: return None elif len(new_batch_dim.shape) > 1: raise ValueError( f"Invalid `batch_sizes`. Expected `batch_sizes` to be a scalar or " f"a vector. Received `batch_sizes` of rank " f"{len(new_batch_dim.shape)}.") if self._may_form_partial_batches(new_batch_dim): return None return new_batch_dim def _may_form_partial_batches(self, desired_batch_size): """Returns whether this dataset may form partial batches.""" if tensor_util.constant_value(self._drop_remainder): return False def get_batch_dim(type_spec): try: shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access except NotImplementedError: return None if not isinstance(shape, tensor_shape.TensorShape): return None if shape.rank is None: return None if len(shape) < 1: raise ValueError("Invalid `batch_sizes`. Expected dataset with " "rank of >= 1 but found a dataset with " "scalar elements. Fix the issue by adding the `batch` " "transformation to the dataset.") return shape.dims[0].value input_batch_dims = [ get_batch_dim(ts) for ts in nest.flatten(dataset_ops.get_structure(self._input_dataset)) ] known_input_batch_dims = [d for d in input_batch_dims if d is not None] if not known_input_batch_dims: return True known_input_batch_dims = numpy_compat.np_asarray(known_input_batch_dims) if not np.all(known_input_batch_dims == known_input_batch_dims[0]): raise ValueError( f"Invalid `input_dataset.` The batch dimension of component 0 " f"is {known_input_batch_dims[0]}, while the batch dimension " f"of component i is {known_input_batch_dims}.") return known_input_batch_dims[0] % desired_batch_size != 0 @property def element_spec(self): return self._element_spec
_RebatchDataset
python
kubernetes-client__python
kubernetes/client/models/v2_horizontal_pod_autoscaler.py
{ "start": 383, "end": 7586 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'spec': 'V2HorizontalPodAutoscalerSpec', 'status': 'V2HorizontalPodAutoscalerStatus' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'spec': 'spec', 'status': 'status' } def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501 """V2HorizontalPodAutoscaler - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._kind = None self._metadata = None self._spec = None self._status = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if spec is not None: self.spec = spec if status is not None: self.status = status @property def api_version(self): """Gets the api_version of this V2HorizontalPodAutoscaler. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V2HorizontalPodAutoscaler. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V2HorizontalPodAutoscaler. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V2HorizontalPodAutoscaler. # noqa: E501 :type: str """ self._api_version = api_version @property def kind(self): """Gets the kind of this V2HorizontalPodAutoscaler. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V2HorizontalPodAutoscaler. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V2HorizontalPodAutoscaler. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V2HorizontalPodAutoscaler. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V2HorizontalPodAutoscaler. # noqa: E501 :return: The metadata of this V2HorizontalPodAutoscaler. # noqa: E501 :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V2HorizontalPodAutoscaler. :param metadata: The metadata of this V2HorizontalPodAutoscaler. # noqa: E501 :type: V1ObjectMeta """ self._metadata = metadata @property def spec(self): """Gets the spec of this V2HorizontalPodAutoscaler. # noqa: E501 :return: The spec of this V2HorizontalPodAutoscaler. # noqa: E501 :rtype: V2HorizontalPodAutoscalerSpec """ return self._spec @spec.setter def spec(self, spec): """Sets the spec of this V2HorizontalPodAutoscaler. :param spec: The spec of this V2HorizontalPodAutoscaler. # noqa: E501 :type: V2HorizontalPodAutoscalerSpec """ self._spec = spec @property def status(self): """Gets the status of this V2HorizontalPodAutoscaler. # noqa: E501 :return: The status of this V2HorizontalPodAutoscaler. # noqa: E501 :rtype: V2HorizontalPodAutoscalerStatus """ return self._status @status.setter def status(self, status): """Sets the status of this V2HorizontalPodAutoscaler. :param status: The status of this V2HorizontalPodAutoscaler. # noqa: E501 :type: V2HorizontalPodAutoscalerStatus """ self._status = status def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V2HorizontalPodAutoscaler): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V2HorizontalPodAutoscaler): return True return self.to_dict() != other.to_dict()
V2HorizontalPodAutoscaler
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI036.py
{ "start": 846, "end": 1139 }
class ____: def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ... async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, *args: list[None]) -> None: ...
GoodFour
python
pytorch__pytorch
torch/nn/modules/batchnorm.py
{ "start": 15640, "end": 19366 }
class ____(_BatchNorm): r"""Applies Batch Normalization over a 4D input. 4D is a mini-batch of 2D inputs with additional channel dimension. Method described in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ . .. math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The mean and standard-deviation are calculated per-dimension over the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the standard-deviation is calculated via the biased estimator, equivalent to ``torch.var(input, correction=0)``. However, the value stored in the moving average of the standard-deviation is calculated via the unbiased estimator, equivalent to ``torch.var(input, correction=1)``. Also by default, during training this layer keeps running estimates of its computed mean and variance, which are then used for normalization during evaluation. The running estimates are kept with a default :attr:`momentum` of 0.1. If :attr:`track_running_stats` is set to ``False``, this layer then does not keep running estimates, and batch statistics are instead used during evaluation time as well. .. note:: This :attr:`momentum` argument is different from one used in optimizer classes and the conventional notion of momentum. Mathematically, the update rule for running statistics here is :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the new observed value. Because the Batch Normalization is done over the `C` dimension, computing statistics on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization. Args: num_features: :math:`C` from an expected input of size :math:`(N, C, H, W)` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Can be set to ``None`` for cumulative moving average (i.e. simple average). Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters. Default: ``True`` track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics, and initializes statistics buffers :attr:`running_mean` and :attr:`running_var` as ``None``. When these buffers are ``None``, this module always uses batch statistics. in both training and eval modes. Default: ``True`` Shape: - Input: :math:`(N, C, H, W)` - Output: :math:`(N, C, H, W)` (same shape as input) Examples:: >>> # With Learnable Parameters >>> m = nn.BatchNorm2d(100) >>> # Without Learnable Parameters >>> m = nn.BatchNorm2d(100, affine=False) >>> input = torch.randn(20, 100, 35, 45) >>> output = m(input) """ def _check_input_dim(self, input) -> None: if input.dim() != 4: raise ValueError(f"expected 4D input (got {input.dim()}D input)") # pyrefly: ignore [inconsistent-inheritance]
BatchNorm2d
python
doocs__leetcode
lcof2/剑指 Offer II 054. 所有大于等于节点的值之和/Solution.py
{ "start": 192, "end": 519 }
class ____: def convertBST(self, root: TreeNode) -> TreeNode: def dfs(root): nonlocal s if root is None: return dfs(root.right) s += root.val root.val = s dfs(root.left) s = 0 dfs(root) return root
Solution
python
ansible__ansible
lib/ansible/plugins/vars/host_group_vars.py
{ "start": 2583, "end": 5921 }
class ____(BaseVarsPlugin): REQUIRES_ENABLED = True is_stateless = True def load_found_files(self, loader, data, found_files): for found in found_files: new_data = loader.load_from_file(found, cache='all', unsafe=True, trusted_as_template=True) if new_data: # ignore empty files data = combine_vars(data, new_data) return data def get_vars(self, loader, path, entities, cache=True): """ parses the inventory file """ if not isinstance(entities, list): entities = [entities] # realpath is expensive try: realpath_basedir = CANONICAL_PATHS[path] except KeyError: CANONICAL_PATHS[path] = realpath_basedir = os.path.realpath(basedir(path)) data = {} for entity in entities: try: entity_name = entity.name except AttributeError: raise AnsibleParserError("Supplied entity must be Host or Group, got %s instead" % (type(entity))) try: first_char = entity_name[0] except (TypeError, IndexError, KeyError): raise AnsibleParserError("Supplied entity must be Host or Group, got %s instead" % (type(entity))) # avoid 'chroot' type inventory hostnames /path/to/chroot if first_char != os.path.sep: try: found_files = [] # load vars try: entity_type = entity.base_type except AttributeError: raise AnsibleParserError("Supplied entity must be Host or Group, got %s instead" % (type(entity))) if entity_type is InventoryObjectType.HOST: subdir = 'host_vars' elif entity_type is InventoryObjectType.GROUP: subdir = 'group_vars' else: raise AnsibleParserError("Supplied entity must be Host or Group, got %s instead" % (type(entity))) opath = os.path.join(realpath_basedir, subdir) key = '%s.%s' % (entity_name, opath) if cache: if opath in NAK: continue if key in FOUND: data = self.load_found_files(loader, data, FOUND[key]) continue if os.path.isdir(opath): self._display.debug("\tprocessing dir %s" % opath) FOUND[key] = found_files = loader.find_vars_files(opath, entity_name) elif not os.path.exists(opath): # cache missing dirs so we don't have to keep looking for things beneath the NAK.add(opath) else: self._display.warning("Found %s that is not a directory, skipping: %s" % (subdir, opath)) # cache non-directory matches NAK.add(opath) data = self.load_found_files(loader, data, found_files) except Exception as e: raise AnsibleParserError(to_native(e)) return data
VarsModule
python
psf__black
src/black/lines.py
{ "start": 17675, "end": 17864 }
class ____: """Intermediate split result from a right hand split.""" head: Line body: Line tail: Line opening_bracket: Leaf closing_bracket: Leaf @dataclass
RHSResult
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_textbox31.py
{ "start": 315, "end": 1035 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("textbox31.xlsx") self.ignore_elements = {"xl/drawings/drawing1.xml": ["<a:pPr/>"]} def test_create_file(self): """Test the creation of a simple XlsxWriter file with textbox(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.write("A1", "This is some text") worksheet.insert_textbox( "E9", "This is some text", {"textlink": "=$A$1", "font": {"bold": True}} ) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
modin-project__modin
modin/experimental/core/io/sql/sql_dispatcher.py
{ "start": 969, "end": 4779 }
class ____(SQLDispatcher): """Class handles experimental utils for reading SQL queries or database tables.""" __read_sql_with_offset = None @classmethod def preprocess_func(cls): # noqa: RT01 """Prepare a function for transmission to remote workers.""" if cls.__read_sql_with_offset is None: # sql deps are optional, so import only when needed from modin.experimental.core.io.sql.utils import read_sql_with_offset cls.__read_sql_with_offset = cls.put(read_sql_with_offset) return cls.__read_sql_with_offset @classmethod def _read( cls, sql, con, index_col, coerce_float, params, parse_dates, columns, chunksize, dtype_backend, dtype, partition_column, lower_bound, upper_bound, max_sessions, ): # noqa: PR01 """ Read SQL query or database table into a DataFrame. Documentation for parameters can be found at `modin.read_sql`. Returns ------- BaseQueryCompiler A new query compiler with imported data for further processing. """ # sql deps are optional, so import only when needed from modin.experimental.core.io.sql.utils import get_query_info, is_distributed if not is_distributed(partition_column, lower_bound, upper_bound): message = "Defaulting to Modin core implementation; \ 'partition_column', 'lower_bound', 'upper_bound' must be different from None" warnings.warn(message) return cls.base_read( sql, con, index_col, coerce_float=coerce_float, params=params, parse_dates=parse_dates, columns=columns, chunksize=chunksize, dtype_backend=dtype_backend, dtype=dtype, ) # starts the distributed alternative cols_names, query = get_query_info(sql, con, partition_column) num_parts = min(NPartitions.get(), max_sessions if max_sessions else 1) num_splits = min(len(cols_names), num_parts) diff = (upper_bound - lower_bound) + 1 min_size = diff // num_parts rest = diff % num_parts partition_ids = [] index_ids = [] end = lower_bound - 1 func = cls.preprocess_func() for part in range(num_parts): if rest: size = min_size + 1 rest -= 1 else: size = min_size start = end + 1 end = start + size - 1 partition_id = cls.deploy( func, f_args=( partition_column, start, end, num_splits, query, con, index_col, coerce_float, params, parse_dates, columns, chunksize, dtype_backend, dtype, ), num_returns=num_splits + 1, ) partition_ids.append( [cls.frame_partition_cls(obj) for obj in partition_id[:-1]] ) index_ids.append(partition_id[-1]) new_index = pandas.RangeIndex(sum(cls.materialize(index_ids))) new_query_compiler = cls.query_compiler_cls( cls.frame_cls(np.array(partition_ids), new_index, cols_names) ) new_query_compiler._modin_frame.synchronize_labels(axis=0) return new_query_compiler
ExperimentalSQLDispatcher
python
ray-project__ray
doc/source/ray-core/doc_code/streaming_generator.py
{ "start": 978, "end": 1068 }
class ____: def f(self): for i in range(5): yield i @ray.remote
Actor
python
buildout__buildout
src/zc/buildout/easy_install.py
{ "start": 82809, "end": 94276 }
class ____(Wheel): """Extension for Wheel class to get the actual project name.""" def get_project_name(self): """Get project name by looking in the .dist-info of the wheel. This is adapted from the Wheel.install_as_egg method and the methods it calls. Ideally, this would be the same as self.project_name. """ with zipfile.ZipFile(self.filename) as zf: dist_info = self.get_dist_info(zf) with zf.open(posixpath.join(dist_info, 'METADATA')) as fp: value = fp.read().decode('utf-8') metadata = email.parser.Parser().parsestr(value) return metadata.get("Name") def _maybe_copy_and_rename_wheel(dist, dest): """Maybe copy and rename wheel. Return the new dist or None. So why do we do this? We need to check a special case: - zest_releaser-9.4.0-py3-none-any.whl with an underscore results in: zest_releaser-9.4.0-py3.13.egg In the resulting `bin/fullrease` script the zest.releaser distribution is not found. - So in this function we copy and rename the wheel to: zest.releaser-9.4.0-py3-none-any.whl with a dot, which results in: zest.releaser-9.4.0-py3.13.egg The resulting `bin/fullrease` script works fine. See https://github.com/buildout/buildout/issues/686 So check if we should rename the wheel before handling it. At first, source dists seemed to not have this problem. Or not anymore, after some fixes in Buildout last year: - zest_releaser-9.4.0.tar.gz with an underscore results in (in my case): zest_releaser-9.4.0-py3.13-macosx-14.7-x86_64.egg And this works fine, despite having an underscore. - But: products_cmfplone-6.1.1.tar.gz with an underscore leads to products_cmfplone-6.1.1-py3.13-macosx-14.7-x86_64.egg and with this, a Plone instance totally fails to start. Ah, but this is only because the generated zope.conf contains a temporarystorage option which is added because plone.recipe.zope2instance could not determine the Products.CMFPlone version. If I work around that, the instance actually starts. The zest.releaser egg generated from the source dist has a dist-info directory: zest_releaser-9.4.0-py3.13-macosx-14.7-x86_64.dist-info The egg generated from any of the two wheels only has an EGG-INFO directory. I guess the dist-info directory somehow helps. It is there because our make_egg_after_pip_install function, which only gets called after installing a source dist, has its own home grown way of creating an egg. """ wheel = BuildoutWheel(dist.location) actual_project_name = wheel.get_project_name() if actual_project_name and wheel.project_name == actual_project_name: return filename = os.path.basename(dist.location) new_filename = filename.replace(wheel.project_name, actual_project_name) if filename == new_filename: return logger.debug("Renaming wheel %s to %s", dist.location, new_filename) tmp_wheeldir = tempfile.mkdtemp() try: new_location = os.path.join(tmp_wheeldir, new_filename) shutil.copy(dist.location, new_location) # Now we create a clone of the original distribution, # but with the new location and the wanted project name. new_dist = Distribution( new_location, project_name=actual_project_name, version=dist.version, py_version=dist.py_version, platform=dist.platform, precedence=dist.precedence, ) # We were called by _move_to_eggs_dir_and_compile. # Now we call it again with the new dist. # I tried simply returning new_dist, but then it immediately # got removed because we remove its temporary directory. return _move_to_eggs_dir_and_compile(new_dist, dest) finally: # Remember that temporary directories must be removed zc.buildout.rmtree.rmtree(tmp_wheeldir) def _move_to_eggs_dir_and_compile(dist, dest): """Move distribution to the eggs destination directory. Originally we compiled the py files if we actually moved the dist. But this was never updated for Python 3, so it had no effect. So we removed this part. See https://github.com/buildout/buildout/issues/699 Its new location is expected not to exist there yet, otherwise we would not be calling this function: the egg is already there. But the new location might exist at this point if another buildout is running in parallel. So we copy to a temporary directory first. See discussion at https://github.com/buildout/buildout/issues/307 We return the new distribution with properly loaded metadata. """ # First make sure the destination directory exists. This could suffer from # the same kind of race condition as the rest: if we check that it does not # exist, and we then create it, it will fail when a second buildout is # doing the same thing. try: os.makedirs(dest) except OSError: if not os.path.isdir(dest): # Unknown reason. Reraise original error. raise logger.debug( "Turning dist %s (%s) into egg, and moving to eggs dir (%s).", dist, dist.location, dest, ) tmp_dest = tempfile.mkdtemp(dir=dest) try: if (os.path.isdir(dist.location) and dist.precedence >= pkg_resources.BINARY_DIST): # We got a pre-built directory. It must have been obtained locally. # Just copy it. logger.debug("dist is pre-built directory.") # TODO Can we still support this? Do we need to? Maybe warn, or let pip install this. tmp_loc = os.path.join(tmp_dest, os.path.basename(dist.location)) shutil.copytree(dist.location, tmp_loc) else: # It is an archive of some sort. # Figure out how to unpack it, or fall back to easy_install. basename, ext = os.path.splitext(dist.location) if ext == ".gz" and basename.endswith(".tar"): basename = basename[:-4] # Set new location with name ending in '.experimental'. # XXX TODO some our code or tests expects '.egg' at the end. # tmp_loc = os.path.join(tmp_dest, os.path.basename(basename) + ".experimental") tmp_loc = os.path.join(tmp_dest, os.path.basename(basename) + ".egg") if ext in UNPACKERS: # TODO Maybe simply always call pip install for all dists, without # checking for unpackers. # TODO Maybe never rename a wheel or other dist anymore. # if ext == '.whl': # logger.debug("Checking if wheel needs to be renamed.") # new_dist = _maybe_copy_and_rename_wheel(dist, dest) # if new_dist is not None: # logger.debug("Found dist after renaming wheel: %s", new_dist) # return new_dist # logger.debug("Renaming wheel was not needed or did not help.") unpacker = UNPACKERS[ext] logger.debug("Calling unpacker for %s on %s", ext, dist.location) unpacker(dist.location, tmp_loc) else: logger.debug("Calling pip install for %s on %s", ext, dist.location) [tmp_loc] = call_pip_install(dist.location, tmp_dest) # We have installed the dist. Now try to rename/move it. logger.debug("Egg for %s installed at %s", dist, tmp_loc) newloc = os.path.join(dest, os.path.basename(tmp_loc)) try: os.rename(tmp_loc, newloc) except OSError: logger.error( "Moving/renaming egg for %s (%s) to %s failed.", dist, dist.location, newloc, ) # Might be for various reasons. If it is because newloc already # exists, we can investigate. if not os.path.exists(newloc): # No, it is a different reason. Give up. logger.error("New location %s does not exist.", newloc) raise # Try to use it as environment and check if our project is in it. newdist = _get_matching_dist_in_location(dist, newloc) if newdist is None: # Path exists, but is not our package. We could # try something, but it seems safer to bail out # with the original error. logger.error( "New location %s exists, but has no distribution for %s", newloc, dist) raise # newloc looks okay to use. # This may happen more often on Mac, and is the reason why we # override Environment.can_add, see above. # Do print a warning. logger.warning( "Path %s unexpectedly already exists.\n" "It contains the expected distribution for %s.\n" "Maybe a buildout running in parallel has added it. " "We will accept it.\n" "If this contains a wrong package, please remove it yourself.", newloc, dist) else: # There were no problems during the rename. newdist = _get_matching_dist_in_location(dist, newloc) if newdist is None: raise AssertionError(f"{newloc} has no distribution for {dist}") # The new dist automatically has precedence DEVELOP_DIST, which sounds # wrong. And this interferes with a check for printing picked versions. # So set it to EGG_DIST. We already did this for a long time, then I # removed it because I thought it would no longer be needed, but it is. # Also, we used to do this only when the dist was installed by pip, # but it seems needed always, otherwise dists installed from eggs won't # be reported in picked versions either. It could be that we report # too much then, but we will see. newdist.precedence = pkg_resources.EGG_DIST finally: # Remember that temporary directories must be removed zc.buildout.rmtree.rmtree(tmp_dest) return newdist def sort_working_set(ws, eggs_dir, develop_eggs_dir): develop_paths = set() pattern = os.path.join(develop_eggs_dir, '*.egg-link') for egg_link in glob.glob(pattern): with open(egg_link, 'rt') as f: path = f.readline().strip() if path: develop_paths.add(path) sorted_paths = [] egg_paths = [] other_paths = [] for dist in ws: path = dist.location if path in develop_paths: sorted_paths.append(path) elif os.path.commonprefix([path, eggs_dir]) == eggs_dir: egg_paths.append(path) else: other_paths.append(path) sorted_paths.extend(egg_paths) sorted_paths.extend(other_paths) return pkg_resources.WorkingSet(sorted_paths) NOT_PICKED_AND_NOT_ALLOWED = """\ Picked: {name} = {version} The `{name}` egg does not have a version pin and `allow-picked-versions = false`. To resolve this, add {name} = {version} to the [versions] section, OR set `allow-picked-versions = true`."""
BuildoutWheel
python
allegroai__clearml
clearml/backend_api/services/v2_9/tasks.py
{ "start": 276237, "end": 278130 }
class ____(Response): """ Response of tasks.set_requirements endpoint. :param updated: Number of tasks updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict """ _service = "tasks" _action = "set_requirements" _version = "2.9" _schema = { "definitions": {}, "properties": { "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None: super(SetRequirementsResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self) -> Optional[dict]: return self._property_fields @fields.setter def fields(self, value: Optional[dict]) -> None: if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value
SetRequirementsResponse
python
conda__conda
conda/exceptions.py
{ "start": 38952, "end": 39203 }
class ____(InvalidSpec): def __init__(self, invalid_spec: str | MatchSpec, details: str): message = "Invalid spec '%(invalid_spec)s': %(details)s" super().__init__(message, invalid_spec=invalid_spec, details=details)
InvalidMatchSpec
python
wandb__wandb
wandb/vendor/pygments/lexers/haskell.py
{ "start": 22059, "end": 22758 }
class ____(LiterateLexer): """ For Literate Idris (Bird-style or LaTeX) source. Additional options accepted: `litstyle` If given, must be ``"bird"`` or ``"latex"``. If not given, the style is autodetected: if the first non-whitespace character in the source is a backslash or percent character, LaTeX is assumed, else Bird. .. versionadded:: 2.0 """ name = 'Literate Idris' aliases = ['lidr', 'literate-idris', 'lidris'] filenames = ['*.lidr'] mimetypes = ['text/x-literate-idris'] def __init__(self, **options): hslexer = IdrisLexer(**options) LiterateLexer.__init__(self, hslexer, **options)
LiterateIdrisLexer
python
mlflow__mlflow
mlflow/metrics/genai/base.py
{ "start": 112, "end": 3861 }
class ____: """ Stores the sample example during few shot learning during LLM evaluation Args: input: The input provided to the model output: The output generated by the model score: The score given by the evaluator justification: The justification given by the evaluator grading_context: The grading_context provided to the evaluator for evaluation. Either a dictionary of grading context column names and grading context strings or a single grading context string. .. code-block:: python :caption: Example for creating an EvaluationExample from mlflow.metrics.genai import EvaluationExample example = EvaluationExample( input="What is MLflow?", output="MLflow is an open-source platform for managing machine " "learning workflows, including experiment tracking, model packaging, " "versioning, and deployment, simplifying the ML lifecycle.", score=4, justification="The definition effectively explains what MLflow is " "its purpose, and its developer. It could be more concise for a 5-score.", grading_context={ "ground_truth": "MLflow is an open-source platform for managing " "the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, " "a company that specializes in big data and machine learning solutions. MLflow is " "designed to address the challenges that data scientists and machine learning " "engineers face when developing, training, and deploying machine learning models." }, ) print(str(example)) .. code-block:: text :caption: Output Input: What is MLflow? Provided output: "MLflow is an open-source platform for managing machine " "learning workflows, including experiment tracking, model packaging, " "versioning, and deployment, simplifying the ML lifecycle." Provided ground_truth: "MLflow is an open-source platform for managing " "the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, " "a company that specializes in big data and machine learning solutions. MLflow is " "designed to address the challenges that data scientists and machine learning " "engineers face when developing, training, and deploying machine learning models." Score: 4 Justification: "The definition effectively explains what MLflow is " "its purpose, and its developer. It could be more concise for a 5-score." """ output: str score: float justification: str input: str | None = None grading_context: dict[str, str] | str | None = None def _format_grading_context(self): if isinstance(self.grading_context, dict): return "\n".join( [f"key: {key}\nvalue:\n{value}" for key, value in self.grading_context.items()] ) else: return self.grading_context def __str__(self) -> str: return PromptTemplate( [ """ Example Input: {input} """, """ Example Output: {output} """, """ Additional information used by the model: {grading_context} """, """ Example score: {score} Example justification: {justification} """, ] ).format( input=self.input, output=self.output, grading_context=self._format_grading_context(), score=self.score, justification=self.justification, )
EvaluationExample
python
huggingface__transformers
src/transformers/models/aimv2/modeling_aimv2.py
{ "start": 14396, "end": 15897 }
class ____(nn.Module): def __init__(self, config: Aimv2VisionConfig): super().__init__() self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.k_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.qkv_bias) self.v_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.qkv_bias) self.cls_token = nn.Parameter(torch.zeros(1, 1, self.hidden_size)) self.output_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=True) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, seq_len, hidden_dim = hidden_states.shape cls_token = self.cls_token.expand(batch_size, -1, -1) key = self.k_proj(hidden_states).reshape(batch_size, seq_len, self.num_heads, hidden_dim // self.num_heads) value = self.v_proj(hidden_states).reshape(batch_size, seq_len, self.num_heads, hidden_dim // self.num_heads) query = cls_token.reshape(batch_size, 1, self.num_heads, hidden_dim // self.num_heads) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) query = query.permute(0, 2, 1, 3) attn_output = F.scaled_dot_product_attention(query, key, value) attn_output = attn_output.transpose(1, 2).reshape(batch_size, 1, hidden_dim) attn_output = attn_output.mean(dim=1) output = self.output_proj(attn_output) return output @auto_docstring
Aimv2AttentionPoolingHead
python
django__django
tests/auth_tests/test_management.py
{ "start": 5809, "end": 9905 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.user = User.objects.create_user(username="joe", password="qwerty") def setUp(self): self.stdout = StringIO() self.addCleanup(self.stdout.close) self.stderr = StringIO() self.addCleanup(self.stderr.close) @mock.patch.object(getpass, "getpass", return_value="password") def test_get_pass(self, mock_get_pass): call_command("changepassword", username="joe", stdout=self.stdout) self.assertIs(User.objects.get(username="joe").check_password("password"), True) @mock.patch.object(getpass, "getpass", return_value="") def test_get_pass_no_input(self, mock_get_pass): with self.assertRaisesMessage(CommandError, "aborted"): call_command("changepassword", username="joe", stdout=self.stdout) @mock.patch.object(changepassword.Command, "_get_pass", return_value="new_password") def test_system_username(self, mock_get_pass): """The system username is used if --username isn't provided.""" username = getpass.getuser() User.objects.create_user(username=username, password="qwerty") call_command("changepassword", stdout=self.stdout) self.assertIs( User.objects.get(username=username).check_password("new_password"), True ) def test_nonexistent_username(self): with self.assertRaisesMessage(CommandError, "user 'test' does not exist"): call_command("changepassword", username="test", stdout=self.stdout) @mock.patch.object(changepassword.Command, "_get_pass", return_value="not qwerty") def test_that_changepassword_command_changes_joes_password(self, mock_get_pass): """ Executing the changepassword management command should change joe's password """ self.assertTrue(self.user.check_password("qwerty")) call_command("changepassword", username="joe", stdout=self.stdout) command_output = self.stdout.getvalue().strip() self.assertEqual( command_output, "Changing password for user 'joe'\n" "Password changed successfully for user 'joe'", ) self.assertTrue(User.objects.get(username="joe").check_password("not qwerty")) @mock.patch.object( changepassword.Command, "_get_pass", side_effect=lambda *args: str(args) ) def test_that_max_tries_exits_1(self, mock_get_pass): """ A CommandError should be thrown by handle() if the user enters in mismatched passwords three times. """ msg = "Aborting password change for user 'joe' after 3 attempts" with self.assertRaisesMessage(CommandError, msg): call_command( "changepassword", username="joe", stdout=self.stdout, stderr=self.stderr ) @mock.patch.object(changepassword.Command, "_get_pass", return_value="1234567890") def test_password_validation(self, mock_get_pass): """ A CommandError should be raised if the user enters in passwords which fail validation three times. """ abort_msg = "Aborting password change for user 'joe' after 3 attempts" with self.assertRaisesMessage(CommandError, abort_msg): call_command( "changepassword", username="joe", stdout=self.stdout, stderr=self.stderr ) self.assertIn("This password is entirely numeric.", self.stderr.getvalue()) @mock.patch.object(changepassword.Command, "_get_pass", return_value="not qwerty") def test_that_changepassword_command_works_with_nonascii_output( self, mock_get_pass ): """ #21627 -- Executing the changepassword management command should allow non-ASCII characters from the User object representation. """ # 'Julia' with accented 'u': User.objects.create_user(username="J\xfalia", password="qwerty") call_command("changepassword", username="J\xfalia", stdout=self.stdout)
ChangepasswordManagementCommandTestCase
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE790.py
{ "start": 1580, "end": 1760 }
class ____(Protocol): def func(self) -> str: """Docstring""" ... def impl(self) -> str: """Docstring""" return self.func() import abc
Repro
python
pypa__pip
src/pip/_vendor/rich/live.py
{ "start": 1249, "end": 15180 }
class ____(JupyterMixin, RenderHook): """Renders an auto-updating live display of any given renderable. Args: renderable (RenderableType, optional): The renderable to live display. Defaults to displaying nothing. console (Console, optional): Optional Console instance. Defaults to an internal Console instance writing to stdout. screen (bool, optional): Enable alternate screen mode. Defaults to False. auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()` or `update()` with refresh flag. Defaults to True refresh_per_second (float, optional): Number of times per second to refresh the live display. Defaults to 4. transient (bool, optional): Clear the renderable on exit (has no effect when screen=True). Defaults to False. redirect_stdout (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True. redirect_stderr (bool, optional): Enable redirection of stderr. Defaults to True. vertical_overflow (VerticalOverflowMethod, optional): How to handle renderable when it is too tall for the console. Defaults to "ellipsis". get_renderable (Callable[[], RenderableType], optional): Optional callable to get renderable. Defaults to None. """ def __init__( self, renderable: Optional[RenderableType] = None, *, console: Optional[Console] = None, screen: bool = False, auto_refresh: bool = True, refresh_per_second: float = 4, transient: bool = False, redirect_stdout: bool = True, redirect_stderr: bool = True, vertical_overflow: VerticalOverflowMethod = "ellipsis", get_renderable: Optional[Callable[[], RenderableType]] = None, ) -> None: assert refresh_per_second > 0, "refresh_per_second must be > 0" self._renderable = renderable self.console = console if console is not None else get_console() self._screen = screen self._alt_screen = False self._redirect_stdout = redirect_stdout self._redirect_stderr = redirect_stderr self._restore_stdout: Optional[IO[str]] = None self._restore_stderr: Optional[IO[str]] = None self._lock = RLock() self.ipy_widget: Optional[Any] = None self.auto_refresh = auto_refresh self._started: bool = False self.transient = True if screen else transient self._refresh_thread: Optional[_RefreshThread] = None self.refresh_per_second = refresh_per_second self.vertical_overflow = vertical_overflow self._get_renderable = get_renderable self._live_render = LiveRender( self.get_renderable(), vertical_overflow=vertical_overflow ) self._nested = False @property def is_started(self) -> bool: """Check if live display has been started.""" return self._started def get_renderable(self) -> RenderableType: renderable = ( self._get_renderable() if self._get_renderable is not None else self._renderable ) return renderable or "" def start(self, refresh: bool = False) -> None: """Start live rendering display. Args: refresh (bool, optional): Also refresh. Defaults to False. """ with self._lock: if self._started: return self._started = True if not self.console.set_live(self): self._nested = True return if self._screen: self._alt_screen = self.console.set_alt_screen(True) self.console.show_cursor(False) self._enable_redirect_io() self.console.push_render_hook(self) if refresh: try: self.refresh() except Exception: # If refresh fails, we want to stop the redirection of sys.stderr, # so the error stacktrace is properly displayed in the terminal. # (or, if the code that calls Rich captures the exception and wants to display something, # let this be displayed in the terminal). self.stop() raise if self.auto_refresh: self._refresh_thread = _RefreshThread(self, self.refresh_per_second) self._refresh_thread.start() def stop(self) -> None: """Stop live rendering display.""" with self._lock: if not self._started: return self._started = False self.console.clear_live() if self._nested: if not self.transient: self.console.print(self.renderable) return if self.auto_refresh and self._refresh_thread is not None: self._refresh_thread.stop() self._refresh_thread = None # allow it to fully render on the last even if overflow self.vertical_overflow = "visible" with self.console: try: if not self._alt_screen and not self.console.is_jupyter: self.refresh() finally: self._disable_redirect_io() self.console.pop_render_hook() if not self._alt_screen and self.console.is_terminal: self.console.line() self.console.show_cursor(True) if self._alt_screen: self.console.set_alt_screen(False) if self.transient and not self._alt_screen: self.console.control(self._live_render.restore_cursor()) if self.ipy_widget is not None and self.transient: self.ipy_widget.close() # pragma: no cover def __enter__(self) -> Self: self.start(refresh=self._renderable is not None) return self def __exit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: self.stop() def _enable_redirect_io(self) -> None: """Enable redirecting of stdout / stderr.""" if self.console.is_terminal or self.console.is_jupyter: if self._redirect_stdout and not isinstance(sys.stdout, FileProxy): self._restore_stdout = sys.stdout sys.stdout = cast("TextIO", FileProxy(self.console, sys.stdout)) if self._redirect_stderr and not isinstance(sys.stderr, FileProxy): self._restore_stderr = sys.stderr sys.stderr = cast("TextIO", FileProxy(self.console, sys.stderr)) def _disable_redirect_io(self) -> None: """Disable redirecting of stdout / stderr.""" if self._restore_stdout: sys.stdout = cast("TextIO", self._restore_stdout) self._restore_stdout = None if self._restore_stderr: sys.stderr = cast("TextIO", self._restore_stderr) self._restore_stderr = None @property def renderable(self) -> RenderableType: """Get the renderable that is being displayed Returns: RenderableType: Displayed renderable. """ live_stack = self.console._live_stack renderable: RenderableType if live_stack and self is live_stack[0]: # The first Live instance will render everything in the Live stack renderable = Group(*[live.get_renderable() for live in live_stack]) else: renderable = self.get_renderable() return Screen(renderable) if self._alt_screen else renderable def update(self, renderable: RenderableType, *, refresh: bool = False) -> None: """Update the renderable that is being displayed Args: renderable (RenderableType): New renderable to use. refresh (bool, optional): Refresh the display. Defaults to False. """ if isinstance(renderable, str): renderable = self.console.render_str(renderable) with self._lock: self._renderable = renderable if refresh: self.refresh() def refresh(self) -> None: """Update the display of the Live Render.""" with self._lock: self._live_render.set_renderable(self.renderable) if self._nested: if self.console._live_stack: self.console._live_stack[0].refresh() return if self.console.is_jupyter: # pragma: no cover try: from IPython.display import display from ipywidgets import Output except ImportError: import warnings warnings.warn('install "ipywidgets" for Jupyter support') else: if self.ipy_widget is None: self.ipy_widget = Output() display(self.ipy_widget) with self.ipy_widget: self.ipy_widget.clear_output(wait=True) self.console.print(self._live_render.renderable) elif self.console.is_terminal and not self.console.is_dumb_terminal: with self.console: self.console.print(Control()) elif ( not self._started and not self.transient ): # if it is finished allow files or dumb-terminals to see final result with self.console: self.console.print(Control()) def process_renderables( self, renderables: List[ConsoleRenderable] ) -> List[ConsoleRenderable]: """Process renderables to restore cursor and display progress.""" self._live_render.vertical_overflow = self.vertical_overflow if self.console.is_interactive: # lock needs acquiring as user can modify live_render renderable at any time unlike in Progress. with self._lock: reset = ( Control.home() if self._alt_screen else self._live_render.position_cursor() ) renderables = [reset, *renderables, self._live_render] elif ( not self._started and not self.transient ): # if it is finished render the final output for files or dumb_terminals renderables = [*renderables, self._live_render] return renderables if __name__ == "__main__": # pragma: no cover import random import time from itertools import cycle from typing import Dict, List, Tuple from .align import Align from .console import Console from .live import Live as Live from .panel import Panel from .rule import Rule from .syntax import Syntax from .table import Table console = Console() syntax = Syntax( '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: """Iterate and generate a tuple with a flag for last value.""" iter_values = iter(values) try: previous_value = next(iter_values) except StopIteration: return for value in iter_values: yield False, previous_value previous_value = value yield True, previous_value''', "python", line_numbers=True, ) table = Table("foo", "bar", "baz") table.add_row("1", "2", "3") progress_renderables = [ "You can make the terminal shorter and taller to see the live table hide" "Text may be printed while the progress bars are rendering.", Panel("In fact, [i]any[/i] renderable will work"), "Such as [magenta]tables[/]...", table, "Pretty printed structures...", {"type": "example", "text": "Pretty printed"}, "Syntax...", syntax, Rule("Give it a try!"), ] examples = cycle(progress_renderables) exchanges = [ "SGD", "MYR", "EUR", "USD", "AUD", "JPY", "CNH", "HKD", "CAD", "INR", "DKK", "GBP", "RUB", "NZD", "MXN", "IDR", "TWD", "THB", "VND", ] with Live(console=console) as live_table: exchange_rate_dict: Dict[Tuple[str, str], float] = {} for index in range(100): select_exchange = exchanges[index % len(exchanges)] for exchange in exchanges: if exchange == select_exchange: continue time.sleep(0.4) if random.randint(0, 10) < 1: console.log(next(examples)) exchange_rate_dict[(select_exchange, exchange)] = 200 / ( (random.random() * 320) + 1 ) if len(exchange_rate_dict) > len(exchanges) - 1: exchange_rate_dict.pop(list(exchange_rate_dict.keys())[0]) table = Table(title="Exchange Rates") table.add_column("Source Currency") table.add_column("Destination Currency") table.add_column("Exchange Rate") for (source, dest), exchange_rate in exchange_rate_dict.items(): table.add_row( source, dest, Text( f"{exchange_rate:.4f}", style="red" if exchange_rate < 1.0 else "green", ), ) live_table.update(Align.center(table))
Live
python
apache__thrift
lib/py/src/transport/THeaderTransport.py
{ "start": 2081, "end": 12931 }
class ____(TTransportBase, CReadableTransport): def __init__(self, transport, allowed_client_types, default_protocol=THeaderSubprotocolID.BINARY): self._transport = transport self._client_type = THeaderClientType.HEADERS self._allowed_client_types = allowed_client_types self._read_buffer = BytesIO(b"") self._read_headers = {} self._write_buffer = BytesIO() self._write_headers = {} self._write_transforms = [] self.flags = 0 self.sequence_id = 0 self._protocol_id = default_protocol self._max_frame_size = HARD_MAX_FRAME_SIZE def isOpen(self): return self._transport.isOpen() def open(self): return self._transport.open() def close(self): return self._transport.close() def get_headers(self): return self._read_headers def set_header(self, key, value): if not isinstance(key, bytes): raise ValueError("header names must be bytes") if not isinstance(value, bytes): raise ValueError("header values must be bytes") self._write_headers[key] = value def clear_headers(self): self._write_headers.clear() def add_transform(self, transform_id): if transform_id not in WRITE_TRANSFORMS_BY_ID: raise ValueError("unknown transform") self._write_transforms.append(transform_id) def set_max_frame_size(self, size): if not 0 < size < HARD_MAX_FRAME_SIZE: raise ValueError("maximum frame size should be < %d and > 0" % HARD_MAX_FRAME_SIZE) self._max_frame_size = size @property def protocol_id(self): if self._client_type == THeaderClientType.HEADERS: return self._protocol_id elif self._client_type in (THeaderClientType.FRAMED_BINARY, THeaderClientType.UNFRAMED_BINARY): return THeaderSubprotocolID.BINARY elif self._client_type in (THeaderClientType.FRAMED_COMPACT, THeaderClientType.UNFRAMED_COMPACT): return THeaderSubprotocolID.COMPACT else: raise TTransportException( TTransportException.INVALID_CLIENT_TYPE, "Protocol ID not know for client type %d" % self._client_type, ) def read(self, sz): # if there are bytes left in the buffer, produce those first. bytes_read = self._read_buffer.read(sz) bytes_left_to_read = sz - len(bytes_read) if bytes_left_to_read == 0: return bytes_read # if we've determined this is an unframed client, just pass the read # through to the underlying transport until we're reset again at the # beginning of the next message. if self._client_type in (THeaderClientType.UNFRAMED_BINARY, THeaderClientType.UNFRAMED_COMPACT): return bytes_read + self._transport.read(bytes_left_to_read) # we're empty and (maybe) framed. fill the buffers with the next frame. self.readFrame(bytes_left_to_read) return bytes_read + self._read_buffer.read(bytes_left_to_read) def _set_client_type(self, client_type): if client_type not in self._allowed_client_types: raise TTransportException( TTransportException.INVALID_CLIENT_TYPE, "Client type %d not allowed by server." % client_type, ) self._client_type = client_type def readFrame(self, req_sz): # the first word could either be the length field of a framed message # or the first bytes of an unframed message. first_word = self._transport.readAll(I32.size) frame_size, = I32.unpack(first_word) is_unframed = False if frame_size & TBinaryProtocol.VERSION_MASK == TBinaryProtocol.VERSION_1: self._set_client_type(THeaderClientType.UNFRAMED_BINARY) is_unframed = True elif (first_word[0] == TCompactProtocol.PROTOCOL_ID and first_word[1] & TCompactProtocol.VERSION_MASK == TCompactProtocol.VERSION): self._set_client_type(THeaderClientType.UNFRAMED_COMPACT) is_unframed = True if is_unframed: bytes_left_to_read = req_sz - I32.size if bytes_left_to_read > 0: rest = self._transport.read(bytes_left_to_read) else: rest = b"" self._read_buffer = BytesIO(first_word + rest) return # ok, we're still here so we're framed. if frame_size > self._max_frame_size: raise TTransportException( TTransportException.SIZE_LIMIT, "Frame was too large.", ) read_buffer = BytesIO(self._transport.readAll(frame_size)) # the next word is either going to be the version field of a # binary/compact protocol message or the magic value + flags of a # header protocol message. second_word = read_buffer.read(I32.size) version, = I32.unpack(second_word) read_buffer.seek(0) if version >> 16 == HEADER_MAGIC: self._set_client_type(THeaderClientType.HEADERS) self._read_buffer = self._parse_header_format(read_buffer) elif version & TBinaryProtocol.VERSION_MASK == TBinaryProtocol.VERSION_1: self._set_client_type(THeaderClientType.FRAMED_BINARY) self._read_buffer = read_buffer elif (second_word[0] == TCompactProtocol.PROTOCOL_ID and second_word[1] & TCompactProtocol.VERSION_MASK == TCompactProtocol.VERSION): self._set_client_type(THeaderClientType.FRAMED_COMPACT) self._read_buffer = read_buffer else: raise TTransportException( TTransportException.INVALID_CLIENT_TYPE, "Could not detect client transport type.", ) def _parse_header_format(self, buffer): # make BytesIO look like TTransport for varint helpers buffer_transport = TMemoryBuffer() buffer_transport._buffer = buffer buffer.read(2) # discard the magic bytes self.flags, = U16.unpack(buffer.read(U16.size)) self.sequence_id, = I32.unpack(buffer.read(I32.size)) header_length = U16.unpack(buffer.read(U16.size))[0] * 4 end_of_headers = buffer.tell() + header_length if end_of_headers > len(buffer.getvalue()): raise TTransportException( TTransportException.SIZE_LIMIT, "Header size is larger than whole frame.", ) self._protocol_id = readVarint(buffer_transport) transforms = [] transform_count = readVarint(buffer_transport) for _ in range(transform_count): transform_id = readVarint(buffer_transport) if transform_id not in READ_TRANSFORMS_BY_ID: raise TApplicationException( TApplicationException.INVALID_TRANSFORM, "Unknown transform: %d" % transform_id, ) transforms.append(transform_id) transforms.reverse() headers = {} while buffer.tell() < end_of_headers: header_type = readVarint(buffer_transport) if header_type == TInfoHeaderType.KEY_VALUE: count = readVarint(buffer_transport) for _ in range(count): key = _readString(buffer_transport) value = _readString(buffer_transport) headers[key] = value else: break # ignore unknown headers self._read_headers = headers # skip padding / anything we didn't understand buffer.seek(end_of_headers) payload = buffer.read() for transform_id in transforms: transform_fn = READ_TRANSFORMS_BY_ID[transform_id] payload = transform_fn(payload) return BytesIO(payload) def write(self, buf): self._write_buffer.write(buf) def flush(self): payload = self._write_buffer.getvalue() self._write_buffer = BytesIO() buffer = BytesIO() if self._client_type == THeaderClientType.HEADERS: for transform_id in self._write_transforms: transform_fn = WRITE_TRANSFORMS_BY_ID[transform_id] payload = transform_fn(payload) headers = BytesIO() writeVarint(headers, self._protocol_id) writeVarint(headers, len(self._write_transforms)) for transform_id in self._write_transforms: writeVarint(headers, transform_id) if self._write_headers: writeVarint(headers, TInfoHeaderType.KEY_VALUE) writeVarint(headers, len(self._write_headers)) for key, value in self._write_headers.items(): _writeString(headers, key) _writeString(headers, value) self._write_headers = {} padding_needed = (4 - (len(headers.getvalue()) % 4)) % 4 headers.write(b"\x00" * padding_needed) header_bytes = headers.getvalue() buffer.write(I32.pack(10 + len(header_bytes) + len(payload))) buffer.write(U16.pack(HEADER_MAGIC)) buffer.write(U16.pack(self.flags)) buffer.write(I32.pack(self.sequence_id)) buffer.write(U16.pack(len(header_bytes) // 4)) buffer.write(header_bytes) buffer.write(payload) elif self._client_type in (THeaderClientType.FRAMED_BINARY, THeaderClientType.FRAMED_COMPACT): buffer.write(I32.pack(len(payload))) buffer.write(payload) elif self._client_type in (THeaderClientType.UNFRAMED_BINARY, THeaderClientType.UNFRAMED_COMPACT): buffer.write(payload) else: raise TTransportException( TTransportException.INVALID_CLIENT_TYPE, "Unknown client type.", ) # the frame length field doesn't count towards the frame payload size frame_bytes = buffer.getvalue() frame_payload_size = len(frame_bytes) - 4 if frame_payload_size > self._max_frame_size: raise TTransportException( TTransportException.SIZE_LIMIT, "Attempting to send frame that is too large.", ) self._transport.write(frame_bytes) self._transport.flush() @property def cstringio_buf(self): return self._read_buffer def cstringio_refill(self, partialread, reqlen): result = bytearray(partialread) while len(result) < reqlen: result += self.read(reqlen - len(result)) self._read_buffer = BytesIO(result) return self._read_buffer
THeaderTransport
python
google__jax
tests/pallas/mgpu_matmul_test.py
{ "start": 1610, "end": 2725 }
class ____(jtu.JaxTestCase): def setUp(self): super().setUp() if not jtu.test_device_matches(["cuda"]): self.skipTest("Test requires an NVIDIA GPU") self.enter_context(pallas_call._PALLAS_USE_MOSAIC_GPU(True)) @parameterized.product( m=(1024, 4096), k=(1024, 4096), n=(1024, 4096), dtype=(jnp.float16,), ) def test_blackwell_matmul( self, m, n, k, dtype, ): if not jtu.is_cuda_compute_capability_equal("10.0"): self.skipTest("Only works on GPU with capability sm100a") k1, k2, = jax.random.split(jax.random.key(42), 2) a = jax.random.normal(k1, (m, k), dtype) b = jax.random.normal(k2, (k, n), dtype) out = blackwell_matmul_mgpu.matmul_kernel( a, b, blackwell_matmul_mgpu.TuningConfig( tile_m=128, tile_n=128, tile_k=128, max_concurrent_steps=2, collective=False, ), ) out_ref = a @ b np.testing.assert_allclose(out, out_ref, atol=2e-3, rtol=1e-3) @jtu.with_config(jax_traceback_filtering="off")
MatrixMultiplicationSm100ATest
python
readthedocs__readthedocs.org
readthedocs/organizations/filters.py
{ "start": 6111, "end": 8276 }
class ____(OrganizationFilterSet): """ Filter and sorting set for organization member listing page. This filter set's underlying queryset from the member listing view is the manager method ``Organization.members``. The model described in this filter is effectively ``User``, but through a union of ``TeamMembers.user`` and ``Organizations.owners``. This filter set will result in the following filters in the UI: Team A list of ``Team`` names, using ``Team.slug`` as the lookup field. This is linked to directly from the team listing page, to show the users that are members of a particular team. Access This is an extension of ``Team.access`` in a way, but with a new option (``ACCESS_OWNER``) to describe ownership privileges through organization ownership. Our modeling is not ideal here, so instead of aiming for model purity and a confusing UI/UX, this aims for hiding confusing modeling from the user with clear UI/UX. Otherwise, two competing filters are required for "user has privileges granted through a team" and "user has privileges granted through ownership". """ ACCESS_OWNER = "owner" teams__slug = FilteredModelChoiceFilter( label=_("Team"), empty_label=_("All teams"), field_name="teams", to_field_name="slug", queryset_method="get_team_queryset", label_attribute="name", ) access = ChoiceFilter( label=_("Access"), empty_label=_("All access levels"), choices=ACCESS_LEVELS + ((ACCESS_OWNER, _("Owner")),), method="get_access", ) def get_access(self, queryset, field_name, value): # Note: the queryset here is effectively against the ``User`` model, and # is from Organization.members, a union of TeamMember.user and # Organization.owners. if value == self.ACCESS_OWNER: return queryset.filter(owner_organizations=self.organization) if value is not None: return queryset.filter(teams__access=value) return queryset
OrganizationTeamMemberListFilterSet
python
sphinx-doc__sphinx
tests/roots/test-ext-autodoc/target/typed_vars.py
{ "start": 80, "end": 226 }
class ____: def __init__(self, name): self.__doc__ = f'This is {name}' def __get__(self): # NoQA: PLE0302 pass
_Descriptor
python
tornadoweb__tornado
tornado/test/netutil_test.py
{ "start": 1808, "end": 2107 }
class ____(_ResolverTestMixin): def setUp(self): super().setUp() self.resolver = BlockingResolver() # getaddrinfo-based tests need mocking to reliably generate errors; # some configurations are slow to produce errors and take longer than # our default timeout.
BlockingResolverTest
python
openai__openai-python
src/openai/resources/responses/responses.py
{ "start": 156373, "end": 157380 }
class ____: def __init__(self, responses: AsyncResponses) -> None: self._responses = responses self.create = _legacy_response.async_to_raw_response_wrapper( responses.create, ) self.retrieve = _legacy_response.async_to_raw_response_wrapper( responses.retrieve, ) self.delete = _legacy_response.async_to_raw_response_wrapper( responses.delete, ) self.cancel = _legacy_response.async_to_raw_response_wrapper( responses.cancel, ) self.parse = _legacy_response.async_to_raw_response_wrapper( responses.parse, ) @cached_property def input_items(self) -> AsyncInputItemsWithRawResponse: return AsyncInputItemsWithRawResponse(self._responses.input_items) @cached_property def input_tokens(self) -> AsyncInputTokensWithRawResponse: return AsyncInputTokensWithRawResponse(self._responses.input_tokens)
AsyncResponsesWithRawResponse
python
scikit-learn__scikit-learn
sklearn/model_selection/tests/test_validation.py
{ "start": 4710, "end": 5292 }
class ____(BaseEstimator): """Dummy classifier to test the validation curve""" def __init__(self, param=0.5): self.X_subset = None self.param = param def fit(self, X_subset, y_subset): self.X_subset = X_subset self.train_sizes = X_subset.shape[0] return self def predict(self, X): raise NotImplementedError def score(self, X=None, y=None): return self.param if self._is_training_data(X) else 1 - self.param def _is_training_data(self, X): return X is self.X_subset
MockEstimatorWithParameter
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 964634, "end": 966355 }
class ____(sgqlc.types.Type): """An individual vulnerability within an Advisory""" __schema__ = github_schema __field_names__ = ("advisory", "first_patched_version", "package", "severity", "updated_at", "vulnerable_version_range") advisory = sgqlc.types.Field(sgqlc.types.non_null("SecurityAdvisory"), graphql_name="advisory") """The Advisory associated with this Vulnerability""" first_patched_version = sgqlc.types.Field(SecurityAdvisoryPackageVersion, graphql_name="firstPatchedVersion") """The first version containing a fix for the vulnerability""" package = sgqlc.types.Field(sgqlc.types.non_null(SecurityAdvisoryPackage), graphql_name="package") """A description of the vulnerable package""" severity = sgqlc.types.Field(sgqlc.types.non_null(SecurityAdvisorySeverity), graphql_name="severity") """The severity of the vulnerability within this package""" updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt") """When the vulnerability was last updated""" vulnerable_version_range = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="vulnerableVersionRange") """A string that describes the vulnerable package versions. This string follows a basic syntax with a few forms. + `= 0.2.0` denotes a single vulnerable version. + `<= 1.0.8` denotes a version range up to and including the specified version + `< 0.1.11` denotes a version range up to, but excluding, the specified version + `>= 4.3.0, < 4.3.5` denotes a version range with a known minimum and maximum version. + `>= 0.0.1` denotes a version range with a known minimum, but no known maximum """
SecurityVulnerability
python
bokeh__bokeh
src/bokeh/models/tools.py
{ "start": 31097, "end": 35116 }
class ____(Tap, SelectTool): ''' *toolbar icon*: |tap_icon| The tap selection tool allows the user to select at single points by left-clicking a mouse, or tapping with a finger. See :ref:`ug_styling_plots_selected_unselected_glyphs` for information on styling selected and unselected glyphs. .. |tap_icon| image:: /_images/icons/tap.svg :height: 24px :alt: Icon of two concentric circles with a + in the lower right representing the tap tool in the toolbar. .. note:: Selections can be comprised of multiple regions, even those made by different selection tools. Hold down the SHIFT key while making a selection to append the new selection to any previous selection that might exist. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) mode = Enum(SelectionMode, default="toggle", help=""" Defines what should happen when a new selection is made. The default is to toggle the existing selection. Other options are to replace the selection, append to it, intersect with it, subtract from it or compute a symmetric difference with it. """) behavior = Enum("select", "inspect", default="select", help=""" This tool can be configured to either make selections or inspections on associated data sources. The difference is that selection changes propagate across bokeh and other components (e.g. selection glyph) will be notified. Inspections don't act like this, so it's useful to configure `callback` when setting `behavior='inspect'`. """) gesture = Enum("tap", "doubletap", default="tap", help=""" Specifies which kind of gesture will be used to trigger the tool, either a single or double tap. """) modifiers = Modifiers(default={}, help=""" Allows to configure a combination of modifier keys, which need to be pressed during the selected gesture for this tool to trigger. For example, to accept tap events only when ``Ctrl`` and ``Shift`` keys are pressed, use: .. code-block:: python tool = TapTool(modifiers=dict(ctrl=True, shift=True)) plot.add_tools(tool) or alternatively using a concise syntax: .. code-block:: python tool = TapTool(modifiers="ctrl+shift") plot.add_tools(tool) .. warning:: Configuring modifiers is a platform dependent feature and can make this tool unusable for example on mobile devices. """).accepts(String, _parse_modifiers) callback = Nullable(Instance(Callback), help=""" A callback to execute *whenever a glyph is "hit"* by a mouse click or tap. This is often useful with the :class:`~bokeh.models.callbacks.OpenURL` model to open URLs based on a user clicking or tapping a specific glyph. However, it may also be a :class:`~bokeh.models.callbacks.CustomJS` which can execute arbitrary JavaScript code in response to clicking or tapping glyphs. The callback will be executed for each individual glyph that is it hit by a click or tap, and will receive the ``TapTool`` model as ``cb_obj``. The optional ``cb_data`` will have the data source as its ``.source`` attribute and the selection geometry as its ``.geometries`` attribute. The ``.geometries`` attribute has 5 members. ``.type`` is the geometry type, which always a ``.point`` for a tap event. ``.sx`` and ``.sy`` are the screen X and Y coordinates where the tap occurred. ``.x`` and ``.y`` are the converted data coordinates for the item that has been selected. The ``.x`` and ``.y`` values are based on the axis assigned to that glyph. .. note:: This callback does *not* execute on every tap, only when a glyph is "hit". If you would like to execute a callback on every mouse tap, please see :ref:`ug_interaction_js_callbacks_customjs_js_on_event`. """)
TapTool
python
python-openxml__python-docx
src/docx/image/tiff.py
{ "start": 9706, "end": 10379 }
class ____(_IfdEntry): """IFD entry expressed as a numerator, denominator pair.""" @classmethod def _parse_value(cls, stream_rdr, offset, value_count, value_offset): """Return the rational (numerator / denominator) value at `value_offset` in `stream_rdr` as a floating-point number. Only supports single values at present. """ if value_count == 1: numerator = stream_rdr.read_long(value_offset) denominator = stream_rdr.read_long(value_offset, 4) return numerator / denominator else: # pragma: no cover return "Multi-value Rational NOT IMPLEMENTED"
_RationalIfdEntry
python
getsentry__sentry
tests/sentry/api/endpoints/test_event_committers.py
{ "start": 613, "end": 13117 }
class ____(APITestCase): def test_simple(self) -> None: self.login_as(user=self.user) project = self.create_project() min_ago = before_now(minutes=1).isoformat() event = self.store_event( data={ "fingerprint": ["group1"], "timestamp": min_ago, }, project_id=project.id, default_event_type=EventType.DEFAULT, ) # Create a commit and GroupOwner to simulate SCM-based suspect commit detection repo = self.create_repo(project=project, name="example/repo") commit = self.create_commit(project=project, repo=repo) assert event.group is not None GroupOwner.objects.create( group_id=event.group.id, project=project, organization_id=project.organization_id, type=GroupOwnerType.SUSPECT_COMMIT.value, user_id=self.user.id, context={ "commitId": commit.id, "suspectCommitStrategy": SuspectCommitStrategy.RELEASE_BASED, }, ) url = reverse( "sentry-api-0-event-file-committers", kwargs={ "event_id": event.event_id, "project_id_or_slug": event.project.slug, "organization_id_or_slug": event.project.organization.slug, }, ) response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert len(response.data["committers"]) == 1 assert response.data["committers"][0]["author"]["username"] == "admin@localhost" commits = response.data["committers"][0]["commits"] assert len(commits) == 1 assert commits[0]["message"] == commit.message assert commits[0]["suspectCommitType"] == "via commit in release" group_owner = GroupOwner.objects.get( group=event.group, type=GroupOwnerType.SUSPECT_COMMIT.value ) assert "group_owner_id" in response.data["committers"][0] assert response.data["committers"][0]["group_owner_id"] == group_owner.id def test_no_group(self) -> None: self.login_as(user=self.user) project = self.create_project() min_ago = before_now(minutes=1).isoformat() event_data = load_data("transaction") event_data["start_timestamp"] = min_ago event_data["timestamp"] = min_ago event = self.store_event(data=event_data, project_id=project.id) url = reverse( "sentry-api-0-event-file-committers", kwargs={ "event_id": event.event_id, "project_id_or_slug": event.project.slug, "organization_id_or_slug": event.project.organization.slug, }, ) response = self.client.get(url, format="json") assert response.status_code == 404, response.content assert response.data["detail"] == "Issue not found" def test_no_committers(self) -> None: """Test that events without GroupOwners return 404""" self.login_as(user=self.user) project = self.create_project() min_ago = before_now(minutes=1).isoformat() event = self.store_event( data={"fingerprint": ["group1"], "timestamp": min_ago}, project_id=project.id ) url = reverse( "sentry-api-0-event-file-committers", kwargs={ "event_id": event.event_id, "project_id_or_slug": event.project.slug, "organization_id_or_slug": event.project.organization.slug, }, ) response = self.client.get(url, format="json") assert response.status_code == 404, response.content assert response.data["detail"] == "No committers found" def test_with_committers(self) -> None: self.login_as(user=self.user) self.repo = Repository.objects.create( organization_id=self.organization.id, name="example", integration_id=self.integration.id, ) self.commit = self.create_commit( project=self.project, repo=self.repo, author=self.create_commit_author(project=self.project, user=self.user), key="asdfwreqr", message="placeholder commit message", ) event = self.store_event( data={ "fingerprint": ["group1"], "timestamp": before_now(minutes=1).isoformat(), }, project_id=self.project.id, ) assert event.group is not None GroupOwner.objects.create( group=event.group, user_id=self.user.id, project=self.project, organization=self.organization, type=GroupOwnerType.SUSPECT_COMMIT.value, context={"commitId": self.commit.id}, ) url = reverse( "sentry-api-0-event-file-committers", kwargs={ "event_id": event.event_id, "project_id_or_slug": event.project.slug, "organization_id_or_slug": event.project.organization.slug, }, ) response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert len(response.data["committers"]) == 1 assert response.data["committers"][0]["author"]["username"] == "admin@localhost" commits = response.data["committers"][0]["commits"] assert len(commits) == 1 assert commits[0]["message"] == "placeholder commit message" assert commits[0]["suspectCommitType"] == "via SCM integration" group_owner = GroupOwner.objects.get( group=event.group, type=GroupOwnerType.SUSPECT_COMMIT.value ) assert "group_owner_id" in response.data["committers"][0] assert response.data["committers"][0]["group_owner_id"] == group_owner.id def test_with_commit_context_pull_request(self) -> None: self.login_as(user=self.user) self.repo = Repository.objects.create( organization_id=self.organization.id, name="example", integration_id=self.integration.id, ) commit_author = self.create_commit_author(project=self.project, user=self.user) self.commit = self.create_commit( project=self.project, repo=self.repo, author=commit_author, key="asdfwreqr", message="placeholder commit message", ) pull_request = PullRequest.objects.create( organization_id=self.organization.id, repository_id=self.repo.id, key="9", author=commit_author, message="waddap", title="cool pr", merge_commit_sha=self.commit.key, ) event = self.store_event( data={ "fingerprint": ["group1"], "timestamp": before_now(minutes=1).isoformat(), }, project_id=self.project.id, ) assert event.group is not None GroupOwner.objects.create( group=event.group, user_id=self.user.id, project=self.project, organization=self.organization, type=GroupOwnerType.SUSPECT_COMMIT.value, context={"commitId": self.commit.id}, ) url = reverse( "sentry-api-0-event-file-committers", kwargs={ "event_id": event.event_id, "project_id_or_slug": event.project.slug, "organization_id_or_slug": event.project.organization.slug, }, ) response = self.client.get(url, format="json") assert response.status_code == 200, response.content commits = response.data["committers"][0]["commits"] assert len(commits) == 1 assert "pullRequest" in commits[0] assert commits[0]["pullRequest"]["id"] == pull_request.key assert commits[0]["suspectCommitType"] == "via SCM integration" def test_endpoint_with_no_user_groupowner(self) -> None: """Test API endpoint returns commit author fallback for GroupOwner with user_id=None.""" self.login_as(user=self.user) project = self.create_project() min_ago = before_now(minutes=1).isoformat() event = self.store_event( data={"fingerprint": ["group1"], "timestamp": min_ago}, project_id=project.id, default_event_type=EventType.DEFAULT, ) # Create commit with external author and GroupOwner with user_id=None repo = self.create_repo(project=project, name="example/repo") commit_author = CommitAuthor.objects.create( organization_id=project.organization_id, name="External Dev", email="external@example.com", ) commit = self.create_commit(project=project, repo=repo, author=commit_author) assert event.group is not None GroupOwner.objects.create( group_id=event.group.id, project=project, organization_id=project.organization_id, type=GroupOwnerType.SUSPECT_COMMIT.value, user_id=None, # No Sentry user mapping context={ "commitId": commit.id, "suspectCommitStrategy": SuspectCommitStrategy.RELEASE_BASED, }, ) url = reverse( "sentry-api-0-event-file-committers", kwargs={ "event_id": event.event_id, "project_id_or_slug": event.project.slug, "organization_id_or_slug": event.project.organization.slug, }, ) response = self.client.get(url, format="json") assert response.status_code == 200, response.content # Should return commit author fallback author = response.data["committers"][0]["author"] assert author["email"] == "external@example.com" assert author["name"] == "External Dev" assert "username" not in author # No Sentry user fields assert "id" not in author # No Sentry user fields group_owner = GroupOwner.objects.get( group_id=event.group.id, type=GroupOwnerType.SUSPECT_COMMIT.value ) assert "group_owner_id" in response.data["committers"][0] assert response.data["committers"][0]["group_owner_id"] == group_owner.id def test_release_based_suspect_commit_displayed(self) -> None: """Test that RELEASE_BASED suspect commits are displayed via the endpoint.""" self.login_as(user=self.user) project = self.create_project() repo = self.create_repo(project=project, name="example/repo") release = self.create_release(project=project, version="v1.0") commit = self.create_commit(project=project, repo=repo) release.set_commits([{"id": commit.key, "repository": repo.name}]) min_ago = before_now(minutes=1).isoformat() event = self.store_event( data={"fingerprint": ["group1"], "timestamp": min_ago}, project_id=project.id, default_event_type=EventType.DEFAULT, ) assert event.group is not None GroupOwner.objects.create( group_id=event.group.id, project=project, organization_id=project.organization_id, type=GroupOwnerType.SUSPECT_COMMIT.value, user_id=self.user.id, context={ "commitId": commit.id, "suspectCommitStrategy": SuspectCommitStrategy.RELEASE_BASED, }, ) url = reverse( "sentry-api-0-event-file-committers", kwargs={ "event_id": event.event_id, "project_id_or_slug": event.project.slug, "organization_id_or_slug": event.project.organization.slug, }, ) response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert len(response.data["committers"]) == 1 commits = response.data["committers"][0]["commits"] assert len(commits) == 1 assert commits[0]["id"] == commit.key assert commits[0]["suspectCommitType"] == "via commit in release"
EventCommittersTest
python
pytest-dev__pytest
src/_pytest/logging.py
{ "start": 34930, "end": 35262 }
class ____(logging.NullHandler): """A logging handler used when live logging is disabled.""" def reset(self) -> None: pass def set_when(self, when: str) -> None: pass def handleError(self, record: logging.LogRecord) -> None: # Handled by LogCaptureHandler. pass
_LiveLoggingNullHandler
python
huggingface__transformers
src/transformers/models/layoutlm/modeling_layoutlm.py
{ "start": 11187, "end": 12678 }
class ____(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LayoutLMAttention(config) self.intermediate = LayoutLMIntermediate(config) self.output = LayoutLMOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[torch.Tensor]: self_attention_outputs = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, **kwargs, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.align.modeling_align.AlignTextEncoder with AlignText->LayoutLM
LayoutLMLayer
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol6.py
{ "start": 733, "end": 1215 }
class ____: species: str attributes: list[str] type_of_hooves: str a: Mammal[str] = Sloth() # This should generate an error because Armadillo # uses bytes for its attributes, not str. b: Mammal[str] = Armadillo() # This should generate an error because Tapir # doesn't provide an attributes. c: Mammal[str] = Tapir() # This should generate an error because "species" # is incompatible. d: Ungulate[bytes] = Camel() e: Ungulate[str] = Cow() f: CamelLike = Camel()
Cow
python
getsentry__sentry
src/sentry/models/transaction_threshold.py
{ "start": 3069, "end": 4054 }
class ____(DefaultFieldsModelExisting): __relocation_scope__ = RelocationScope.Excluded project = FlexibleForeignKey("sentry.Project", unique=True, db_constraint=False) organization = FlexibleForeignKey("sentry.Organization") threshold = models.IntegerField() metric = models.PositiveSmallIntegerField(default=TransactionMetric.DURATION.value) edited_by_id = HybridCloudForeignKey("sentry.User", null=True, on_delete="SET_NULL") class Meta: app_label = "sentry" db_table = "sentry_projecttransactionthreshold" @classmethod def filter(cls, project_ids, organization_id, order_by, value_list): cache_key = get_project_threshold_cache_key( "sentry_projecttransactionthreshold", project_ids, organization_id, order_by, value_list, ) return _filter_and_cache(cls, cache_key, project_ids, organization_id, order_by, value_list)
ProjectTransactionThreshold
python
great-expectations__great_expectations
great_expectations/expectations/core/expect_column_median_to_be_between.py
{ "start": 2711, "end": 16008 }
class ____(ColumnAggregateExpectation): __doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION} ExpectColumnMedianToBeBetween is a \ Column Aggregate Expectation. Column Aggregate Expectations are one of the most common types of Expectation. They are evaluated for a single column, and produce an aggregate Metric, such as a mean, standard deviation, number of unique values, column type, etc. If that Metric meets the conditions you set, the Expectation considers that data valid. Args: column (str): \ {COLUMN_DESCRIPTION} min_value (int or None): \ {MIN_VALUE_DESCRIPTION} max_value (int or None): \ {MAX_VALUE_DESCRIPTION} strict_min (boolean): \ {STRICT_MIN_DESCRIPTION} strict_max (boolean): \ {STRICT_MAX_DESCRIPTION} Other Parameters: result_format (str or None): \ Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \ For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format). catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions). meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without \ modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta). severity (str or None): \ {FAILURE_SEVERITY_DESCRIPTION} \ For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity). Returns: An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result) Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta. Notes: * min_value and max_value are both inclusive unless strict_min or strict_max are set to True. * If min_value is None, then max_value is treated as an upper bound * If max_value is None, then min_value is treated as a lower bound * observed_value field in the result object is customized for this expectation to be a float \ representing the true median for the column See Also: [ExpectColumnMeanToBeBetween](https://greatexpectations.io/expectations/expect_column_mean_to_be_between) [ExpectColumnStdevToBeBetween](https://greatexpectations.io/expectations/expect_column_stdev_to_be_between) Supported Data Sources: [{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/) Data Quality Issues: {DATA_QUALITY_ISSUES[0]} Example Data: test test2 0 1 1 1 1.3 7 2 .8 2.5 3 2 3 Code Examples: Passing Case: Input: ExpectColumnMedianToBeBetween( column="test", min_value=1, max_value=3 ) Output: {{ "exception_info": {{ "raised_exception": false, "exception_traceback": null, "exception_message": null }}, "result": {{ "observed_value": 1.15 }}, "meta": {{}}, "success": true }} Failing Case: Input: ExpectColumnMedianToBeBetween( column="test2", min_value=3, max_value=5 ) Output: {{ "exception_info": {{ "raised_exception": false, "exception_traceback": null, "exception_message": null }}, "result": {{ "observed_value": 2.75 }}, "meta": {{}}, "success": false }} """ # noqa: E501 # FIXME CoP min_value: Optional[Comparable] = pydantic.Field( default=None, description=MIN_VALUE_DESCRIPTION ) max_value: Optional[Comparable] = pydantic.Field( default=None, description=MAX_VALUE_DESCRIPTION ) strict_min: Union[bool, SuiteParameterDict] = pydantic.Field( default=False, description=STRICT_MIN_DESCRIPTION ) strict_max: Union[bool, SuiteParameterDict] = pydantic.Field( default=False, description=STRICT_MAX_DESCRIPTION ) library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = { "maturity": "production", "tags": ["core expectation", "column aggregate expectation"], "contributors": ["@great_expectations"], "requirements": [], "has_full_test_suite": True, "manually_reviewed_code": True, } _library_metadata = library_metadata # Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\ # noqa: E501 # FIXME CoP metric_dependencies = ("column.median",) success_keys = ( "min_value", "strict_min", "max_value", "strict_max", ) args_keys = ( "column", "min_value", "max_value", "strict_min", "strict_max", ) class Config: title = "Expect column median to be between" @staticmethod def schema_extra( schema: Dict[str, Any], model: Type[ExpectColumnMedianToBeBetween] ) -> None: ColumnAggregateExpectation.Config.schema_extra(schema, model) schema["properties"]["metadata"]["properties"].update( { "data_quality_issues": { "title": "Data Quality Issues", "type": "array", "const": DATA_QUALITY_ISSUES, }, "library_metadata": { "title": "Library Metadata", "type": "object", "const": model._library_metadata, }, "short_description": { "title": "Short Description", "type": "string", "const": EXPECTATION_SHORT_DESCRIPTION, }, "supported_data_sources": { "title": "Supported Data Sources", "type": "array", "const": SUPPORTED_DATA_SOURCES, }, } ) @classmethod @override def _prescriptive_template( cls, renderer_configuration: RendererConfiguration, ) -> RendererConfiguration: add_param_args: AddParamArgs = ( ("column", RendererValueType.STRING), ("min_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]), ("max_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]), ("strict_min", RendererValueType.BOOLEAN), ("strict_max", RendererValueType.BOOLEAN), ) for name, param_type in add_param_args: renderer_configuration.add_param(name=name, param_type=param_type) params = renderer_configuration.params if not params.min_value and not params.max_value: template_str = "median may have any numerical value." else: at_least_str = "greater than or equal to" if params.strict_min: at_least_str = cls._get_strict_min_string( renderer_configuration=renderer_configuration ) at_most_str = "less than or equal to" if params.strict_max: at_most_str = cls._get_strict_max_string( renderer_configuration=renderer_configuration ) if params.min_value and params.max_value: template_str = ( f"median must be {at_least_str} $min_value and {at_most_str} $max_value." ) elif not params.min_value: template_str = f"median must be {at_most_str} $max_value." else: template_str = f"median must be {at_least_str} $min_value." if renderer_configuration.include_column_name: template_str = f"$column {template_str}" renderer_configuration.template_str = template_str return renderer_configuration @classmethod @override @renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE) @render_suite_parameter_string def _prescriptive_renderer( # type: ignore[override] # TODO: Fix this type ignore cls, configuration: ExpectationConfiguration, result: Optional[ExpectationValidationResult] = None, runtime_configuration: Optional[dict] = None, **kwargs, ): runtime_configuration = runtime_configuration or {} include_column_name = runtime_configuration.get("include_column_name") is not False styling = runtime_configuration.get("styling") params = substitute_none_for_missing( configuration.kwargs, [ "column", "min_value", "max_value", "row_condition", "condition_parser", "strict_min", "strict_max", ], ) if (params["min_value"] is None) and (params["max_value"] is None): template_str = "median may have any numerical value." else: at_least_str, at_most_str = handle_strict_min_max(params) if params["min_value"] is not None and params["max_value"] is not None: template_str = ( f"median must be {at_least_str} $min_value and {at_most_str} $max_value." ) elif params["min_value"] is None: template_str = f"median must be {at_most_str} $max_value." elif params["max_value"] is None: template_str = f"median must be {at_least_str} $min_value." else: raise ValueError("unresolvable template_str") # noqa: TRY003 # FIXME CoP if include_column_name: template_str = f"$column {template_str}" if params["row_condition"] is not None: conditional_template_str = parse_row_condition_string(params["row_condition"]) template_str, styling = _style_row_condition( conditional_template_str, template_str, params, styling, ) return [ RenderedStringTemplateContent( content_block_type="string_template", string_template={ "template": template_str, "params": params, "styling": styling, }, ) ] @override def _validate( self, metrics: Dict, runtime_configuration: Optional[dict] = None, execution_engine: Optional[ExecutionEngine] = None, ): return self._validate_metric_value_between( metric_name="column.median", metrics=metrics, runtime_configuration=runtime_configuration, execution_engine=execution_engine, )
ExpectColumnMedianToBeBetween
python
scipy__scipy
scipy/sparse/tests/test_base.py
{ "start": 216169, "end": 217435 }
class ____(_MatrixMixin, TestBSR): spcreator = bsr_matrix TestBSR.init_class() TestBSRMatrix.init_class() #------------------------------------------------------------------------------ # Tests for non-canonical representations (with duplicates, unsorted indices) #------------------------------------------------------------------------------ def _same_sum_duplicate(data, *inds, **kwargs): """Duplicates entries to produce the same matrix""" indptr = kwargs.pop('indptr', None) if np.issubdtype(data.dtype, np.bool_) or \ np.issubdtype(data.dtype, np.unsignedinteger): if indptr is None: return (data,) + inds else: return (data,) + inds + (indptr,) zeros_pos = (data == 0).nonzero() # duplicate data data = data.repeat(2, axis=0) data[::2] -= 1 data[1::2] = 1 # don't spoil all explicit zeros if zeros_pos[0].size > 0: pos = tuple(p[0] for p in zeros_pos) pos1 = (2*pos[0],) + pos[1:] pos2 = (2*pos[0]+1,) + pos[1:] data[pos1] = 0 data[pos2] = 0 inds = tuple(indices.repeat(2) for indices in inds) if indptr is None: return (data,) + inds else: return (data,) + inds + (indptr * 2,)
TestBSRMatrix
python
openai__openai-python
src/openai/types/responses/response_input_item_param.py
{ "start": 4784, "end": 5268 }
class ____(TypedDict, total=False): id: Required[str] """The unique ID of the image generation call.""" result: Required[Optional[str]] """The generated image encoded in base64.""" status: Required[Literal["in_progress", "completed", "generating", "failed"]] """The status of the image generation call.""" type: Required[Literal["image_generation_call"]] """The type of the image generation call. Always `image_generation_call`."""
ImageGenerationCall
python
Netflix__metaflow
metaflow/plugins/argo/argo_workflows_decorator.py
{ "start": 297, "end": 8282 }
class ____(StepDecorator): name = "argo_workflows_internal" defaults = {"auto-emit-argo-events": True} def task_pre_step( self, step_name, task_datastore, metadata, run_id, task_id, flow, graph, retry_count, max_user_code_retries, ubf_context, inputs, ): self.task_id = task_id self.run_id = run_id triggers = [] # Expose event triggering metadata through current singleton for key, payload in os.environ.items(): if key.startswith("METAFLOW_ARGO_EVENT_PAYLOAD_"): if payload != "null": # Argo-Workflow's None try: payload = json.loads(payload) except (TypeError, ValueError): # There could be arbitrary events that Metaflow doesn't know of payload = {} triggers.append( { "timestamp": payload.get("timestamp"), "id": payload.get("id"), "name": payload.get("name"), # will exist since filter "type": key[len("METAFLOW_ARGO_EVENT_PAYLOAD_") :].split( "_", 1 )[ 0 ], # infer type from env var key # Add more event metadata here in the future } ) meta = {} if triggers: # Enable current.trigger current._update_env({"trigger": Trigger(triggers)}) # Luckily there aren't many events for us to be concerned about the # size of the metadata field yet! However we don't really need this # metadata outside of the start step so we can save a few bytes in the # db. if step_name == "start": meta["execution-triggers"] = json.dumps(triggers) meta["argo-workflow-template"] = os.environ["ARGO_WORKFLOW_TEMPLATE"] meta["argo-workflow-name"] = os.environ["ARGO_WORKFLOW_NAME"] meta["argo-workflow-namespace"] = os.environ["ARGO_WORKFLOW_NAMESPACE"] meta["auto-emit-argo-events"] = self.attributes["auto-emit-argo-events"] meta["argo-workflow-template-owner"] = os.environ["METAFLOW_OWNER"] entries = [ MetaDatum( field=k, value=v, type=k, tags=["attempt_id:{0}".format(retry_count)] ) for k, v in meta.items() ] # Register book-keeping metadata for debugging. metadata.register_metadata(run_id, step_name, task_id, entries) def task_finished( self, step_name, flow: FlowSpec, graph: FlowGraph, is_task_ok, retry_count, max_user_code_retries, ): if not is_task_ok: # The task finished with an exception - execution won't # continue so no need to do anything here. return # For `foreach`s, we need to dump the cardinality of the fanout # into a file so that Argo Workflows can properly configure # the subsequent fanout task via an Output parameter # # Docker and PNS workflow executors can get output parameters from the base # layer (e.g. /tmp), but the Kubelet nor the K8SAPI nor the emissary executors # can. It is also unlikely we can get output parameters from the base layer if # we run pods with a security context. We work around this constraint by # mounting an emptyDir volume. if graph[step_name].type == "foreach": if graph[step_name].parallel_foreach: # If a node is marked as a `parallel_foreach`, pass down the value of # `num_parallel` to the subsequent steps. with open("/mnt/out/num_parallel", "w") as f: json.dump(flow._parallel_ubf_iter.num_parallel, f) # Set splits to 1 since parallelism is handled by JobSet. flow._foreach_num_splits = 1 with open("/mnt/out/task_id_entropy", "w") as file: import uuid file.write(uuid.uuid4().hex[:6]) with open("/mnt/out/splits", "w") as file: json.dump(list(range(flow._foreach_num_splits)), file) with open("/mnt/out/split_cardinality", "w") as file: json.dump(flow._foreach_num_splits, file) # For conditional branches we need to record the value of the switch to disk, in order to pass it as an # output from the switching step to be used further down the DAG if graph[step_name].type == "split-switch": # TODO: A nicer way to access the chosen step? _out_funcs, _ = flow._transition chosen_step = _out_funcs[0] with open("/mnt/out/switch_step", "w") as file: file.write(chosen_step) # For steps that have a `@parallel` decorator set to them, we will be relying on Jobsets # to run the task. In this case, we cannot set anything in the # `/mnt/out` directory, since such form of output mounts are not available to Jobset executions. if not graph[step_name].parallel_step: # Unfortunately, we can't always use pod names as task-ids since the pod names # are not static across retries. We write the task-id to a file that is read # by the next task here. with open("/mnt/out/task_id", "w") as file: file.write(self.task_id) # Emit Argo Events given that the flow has succeeded. Given that we only # emit events when the task succeeds, we can piggy back on this decorator # hook which is guaranteed to execute only after rest of the task has # finished execution. if self.attributes["auto-emit-argo-events"]: # Event name is set to metaflow.project.branch.step so that users can # place explicit dependencies on namespaced events. Also, argo events # sensors don't allow for filtering against absent fields - which limits # our ability to subset non-project namespaced events. # TODO: Check length limits for fields in Argo Events event = ArgoEvent( name="metaflow.%s.%s" % (current.get("project_flow_name", flow.name), step_name) ) # There should only be one event generated even when the task is retried. # Take care to only add to the list and not modify existing values. event.add_to_payload("id", current.pathspec) event.add_to_payload("pathspec", current.pathspec) event.add_to_payload("flow_name", flow.name) event.add_to_payload("run_id", self.run_id) event.add_to_payload("step_name", step_name) event.add_to_payload("task_id", self.task_id) # Add @project decorator related fields. These are used to subset # @trigger_on_finish related filters. for key in ( "project_name", "branch_name", "is_user_branch", "is_production", "project_flow_name", ): if current.get(key): event.add_to_payload(key, current.get(key)) # Add more fields here... event.add_to_payload("auto-generated-by-metaflow", True) # Keep in mind that any errors raised here will fail the run but the task # will still be marked as success. That's why we explicitly swallow any # errors and instead print them to std.err. event.safe_publish(ignore_errors=True)
ArgoWorkflowsInternalDecorator
python
allegroai__clearml
clearml/backend_api/services/v2_9/events.py
{ "start": 51079, "end": 54346 }
class ____(Response): """ Response of events.get_multi_task_plots endpoint. :param plots: Plots mapping (keyed by task name) :type plots: dict :param returned: Number of results returned :type returned: int :param total: Total number of results available for this query :type total: float :param scroll_id: Scroll ID for getting more results :type scroll_id: str """ _service = "events" _action = "get_multi_task_plots" _version = "2.9" _schema = { "definitions": {}, "properties": { "plots": { "description": "Plots mapping (keyed by task name)", "type": ["object", "null"], }, "returned": { "description": "Number of results returned", "type": ["integer", "null"], }, "scroll_id": { "description": "Scroll ID for getting more results", "type": ["string", "null"], }, "total": { "description": "Total number of results available for this query", "type": ["number", "null"], }, }, "type": "object", } def __init__( self, plots: Optional[dict] = None, returned: Optional[int] = None, total: Optional[float] = None, scroll_id: Optional[str] = None, **kwargs: Any ) -> None: super(GetMultiTaskPlotsResponse, self).__init__(**kwargs) self.plots = plots self.returned = returned self.total = total self.scroll_id = scroll_id @schema_property("plots") def plots(self) -> Optional[dict]: return self._property_plots @plots.setter def plots(self, value: Optional[dict]) -> None: if value is None: self._property_plots = None return self.assert_isinstance(value, "plots", (dict,)) self._property_plots = value @schema_property("returned") def returned(self) -> Optional[int]: return self._property_returned @returned.setter def returned(self, value: Optional[int]) -> None: if value is None: self._property_returned = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "returned", six.integer_types) self._property_returned = value @schema_property("total") def total(self) -> Optional[float]: return self._property_total @total.setter def total(self, value: Optional[float]) -> None: if value is None: self._property_total = None return self.assert_isinstance(value, "total", six.integer_types + (float,)) self._property_total = value @schema_property("scroll_id") def scroll_id(self) -> Optional[str]: return self._property_scroll_id @scroll_id.setter def scroll_id(self, value: Optional[str]) -> None: if value is None: self._property_scroll_id = None return self.assert_isinstance(value, "scroll_id", six.string_types) self._property_scroll_id = value
GetMultiTaskPlotsResponse
python
numba__numba
numba/cuda/simulator/kernelapi.py
{ "start": 1544, "end": 3628 }
class ____(object): ''' CUDA Shared arrays. Limitations: assumes that only one call to cuda.shared.array is on a line, and that that line is only executed once per thread. i.e.:: a = cuda.shared.array(...); b = cuda.shared.array(...) will erroneously alias a and b, and:: for i in range(10): sharedarrs[i] = cuda.shared.array(...) will alias all arrays created at that point (though it is not certain that this would be supported by Numba anyway). ''' def __init__(self, dynshared_size): self._allocations = {} self._dynshared_size = dynshared_size self._dynshared = np.zeros(dynshared_size, dtype=np.byte) def array(self, shape, dtype): if isinstance(dtype, types.Type): dtype = numpy_support.as_dtype(dtype) # Dynamic shared memory is requested with size 0 - this all shares the # same underlying memory if shape == 0: # Count must be the maximum number of whole elements that fit in the # buffer (Numpy complains if the buffer is not a multiple of the # element size) count = self._dynshared_size // dtype.itemsize return np.frombuffer(self._dynshared.data, dtype=dtype, count=count) # Otherwise, identify allocations by source file and line number # We pass the reference frame explicitly to work around # http://bugs.python.org/issue25108 stack = traceback.extract_stack(sys._getframe()) caller = stack[-2][0:2] res = self._allocations.get(caller) if res is None: res = np.empty(shape, dtype) self._allocations[caller] = res return res addlock = threading.Lock() sublock = threading.Lock() andlock = threading.Lock() orlock = threading.Lock() xorlock = threading.Lock() maxlock = threading.Lock() minlock = threading.Lock() compare_and_swaplock = threading.Lock() caslock = threading.Lock() inclock = threading.Lock() declock = threading.Lock() exchlock = threading.Lock()
FakeCUDAShared
python
pypa__warehouse
tests/unit/utils/test_paginate.py
{ "start": 2238, "end": 4065 }
class ____: def test_slices_and_length(self): wrapper = paginate._OpenSearchWrapper(FakeQuery([1, 2, 3, 4, 5, 6])) assert wrapper[1:3] == [2, 3] assert len(wrapper) == 6 def test_slice_start_clamps_to_max(self): wrapper = paginate._OpenSearchWrapper(FakeQuery([1, 2, 3, 4, 5, 6])) wrapper.max_results = 5 assert wrapper[6:10] == [] assert len(wrapper) == 5 def test_slice_end_clamps_to_max(self): wrapper = paginate._OpenSearchWrapper(FakeQuery([1, 2, 3, 4, 5, 6])) wrapper.max_results = 5 assert wrapper[1:10] == [2, 3, 4, 5] assert len(wrapper) == 5 def test_second_slice_fails(self): wrapper = paginate._OpenSearchWrapper(FakeQuery([1, 2, 3, 4, 5, 6])) wrapper[1:3] with pytest.raises(RuntimeError): wrapper[1:3] def test_len_before_slice_fails(self): wrapper = paginate._OpenSearchWrapper(FakeQuery([1, 2, 3, 4, 5, 6])) with pytest.raises(RuntimeError): len(wrapper) def test_best_guess_suggestion(self): fake_option = pretend.stub() query = FakeSuggestQuery([1, 2, 3, 4, 5, 6], options=[fake_option]) wrapper = paginate._OpenSearchWrapper(query) wrapper[1:3] assert wrapper.best_guess == fake_option def test_best_guess_suggestion_no_suggestions(self): query = FakeSuggestQuery([1, 2, 3, 4, 5, 6], suggestion=[]) wrapper = paginate._OpenSearchWrapper(query) wrapper[1:3] assert wrapper.best_guess is None def test_best_guess_suggestion_no_options(self): query = FakeSuggestQuery([1, 2, 3, 4, 5, 6], options=[]) wrapper = paginate._OpenSearchWrapper(query) wrapper[1:3] assert wrapper.best_guess is None
TestOpenSearchWrapper
python
pytorch__pytorch
test/dynamo/test_fx_passes_pre_grad.py
{ "start": 183, "end": 1169 }
class ____(torch._dynamo.test_case.TestCase): @mock.patch("torch._inductor.utils.ShapeProp.propagate") def test_pass_execution_and_save(self, mock_shape_prop): class TestModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.ones(4, 4)) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.param + x def fx_pass(graph: torch.fx.GraphModule) -> None: return sample_input = torch.randn(4, 4) m = TestModule() m(sample_input) exported_program = torch.export.export(m, (sample_input,), strict=True) gm = exported_program.graph_module pass_execution_and_save(fx_pass, gm, sample_input, "Apply testing pass") mock_shape_prop.assert_called_once() if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
FxPassesPreGradTests
python
walkccc__LeetCode
solutions/897. Increasing Order Search Tree/897.py
{ "start": 0, "end": 266 }
class ____: def increasingBST(self, root: TreeNode, tail: TreeNode = None) -> TreeNode: if not root: return tail res = self.increasingBST(root.left, root) root.left = None root.right = self.increasingBST(root.right, tail) return res
Solution
python
PrefectHQ__prefect
tests/test_tasks.py
{ "start": 6192, "end": 17505 }
class ____: def test_sync_task_called_inside_sync_flow(self): @task def foo(x): return x @flow def bar(): return foo(1) assert bar() == 1 async def test_async_task_called_inside_async_flow(self): @task async def foo(x): return x @flow async def bar(): return await foo(1) assert await bar() == 1 async def test_sync_task_called_inside_async_flow(self): @task def foo(x): return x @flow async def bar(): return foo(1) assert await bar() == 1 def test_task_call_with_debug_mode(self): @task def foo(x): return x @flow def bar(): return foo(1) with temporary_settings({PREFECT_DEBUG_MODE: True}): assert bar() == 1 def test_task_called_with_task_dependency(self): @task def foo(x): return x @task def bar(y): return y + 1 @flow def test_flow(): return bar(foo(1)) assert test_flow() == 2 def test_task_with_variadic_args(self): @task def foo(*foo, bar): return foo, bar @flow def test_flow(): return foo(1, 2, 3, bar=4) assert test_flow() == ((1, 2, 3), 4) def test_task_with_variadic_keyword_args(self): @task def foo(foo, bar, **foobar): return foo, bar, foobar @flow def test_flow(): return foo(1, 2, x=3, y=4, z=5) assert test_flow() == (1, 2, dict(x=3, y=4, z=5)) def test_task_doesnt_modify_args(self): @task def identity(x): return x @task def appender(x): x.append(3) return x val = [1, 2] assert identity(val) is val assert val == [1, 2] assert appender(val) is val assert val == [1, 2, 3] async def test_task_failure_raises_in_flow(self): @task def foo(): raise ValueError("Test") @flow def bar(): foo() return "bar" state = bar(return_state=True) assert state.is_failed() with pytest.raises(ValueError, match="Test"): await state.result() def test_task_with_name_supports_callable_objects(self): class A: def __call__(self, *_args: Any, **_kwargs: Any) -> Any: return "hello" a = A() task = Task(fn=a, name="Task") assert task.fn is a def test_task_supports_callable_objects(self): class A: def __call__(self, *_args: Any, **_kwargs: Any) -> Any: return "hello" a = A() task = Task(fn=a) assert task.fn is a def test_task_run_with_name_from_callable_object(self): class Foo: message = "hello" def __call__(self, prefix: str, suffix: str) -> Any: return prefix + self.message + suffix obj = Foo() foo = Task(fn=obj, name="Task") @flow def bar(): return foo("a", suffix="b") assert bar() == "ahellob" def test_task_run_from_callable_object(self): class Foo: message = "hello" def __call__(self, prefix: str, suffix: str) -> Any: return prefix + self.message + suffix obj = Foo() foo = Task(fn=obj) @flow def bar(): return foo("a", suffix="b") assert bar() == "ahellob" class BaseFooModel(pydantic.BaseModel): model_config = pydantic.ConfigDict(ignored_types=(Task,)) x: int class BaseFoo: def __init__(self, x: int): self.x = x @pytest.mark.parametrize("T", [BaseFoo, BaseFooModel]) def test_task_supports_instance_methods(self, T): class Foo(T): @task def instance_method(self): return self.x f = Foo(x=1) assert Foo(x=5).instance_method() == 5 # ensure the instance binding is not global assert f.instance_method() == 1 assert isinstance(Foo(x=10).instance_method, Task) @pytest.mark.parametrize("T", [BaseFoo, BaseFooModel]) def test_task_supports_instance_methods_called_with_instance(self, T): """ Regression test for https://github.com/PrefectHQ/prefect/issues/17649 """ class Foo(T): @task def instance_method(self): return self.x f = Foo(x=1) # call like a class method with provided instance assert Foo.instance_method(f) == 1 # call as instance method to ensure there was no class binding in above call assert f.instance_method() == 1 @pytest.mark.parametrize("T", [BaseFoo, BaseFooModel]) def test_task_supports_class_methods(self, T): class Foo(T): @task @classmethod def class_method(cls): return cls.__name__ @classmethod @task def class_method_of_a_different_order(cls): return cls.__name__ assert Foo.class_method() == "Foo" assert isinstance(Foo.class_method, Task) if sys.version_info < (3, 13): assert Foo.class_method_of_a_different_order() == "Foo" assert isinstance(Foo.class_method_of_a_different_order, Task) else: assert Foo.class_method_of_a_different_order() == "Foo" # Doesn't show up as a task because @classmethod isn't chainable in Python 3.13+ assert not isinstance(Foo.class_method_of_a_different_order, Task) @pytest.mark.parametrize("T", [BaseFoo, BaseFooModel]) def test_task_supports_static_methods(self, T): class Foo(T): @staticmethod @task def static_method(): return "static" @staticmethod @task def static_method_of_a_different_order(): return "static" assert Foo.static_method() == "static" assert isinstance(Foo.static_method, Task) assert Foo.static_method_of_a_different_order() == "static" assert isinstance(Foo.static_method_of_a_different_order, Task) def test_instance_method_doesnt_create_copy_of_self(self): class Foo(pydantic.BaseModel): model_config = dict( ignored_types=(prefect.Flow, prefect.Task), ) @task def get_x(self): return self f = Foo() # assert that the value is equal to the original assert f.get_x() == f # assert that the value IS the original and was never copied assert f.get_x() is f def test_instance_method_doesnt_create_copy_of_args(self): class Foo(pydantic.BaseModel): model_config = dict( ignored_types=(prefect.Flow, prefect.Task), ) x: dict @task def get_x(self): return self.x val = dict(a=1) f = Foo(x=val) # this is surprising but pydantic sometimes copies values during # construction/validation (it doesn't for nested basemodels, by default) # Therefore this assert is to set a baseline for the test, because if # you try to write the test as `assert f.get_x() is val` it will fail # and it's not Prefect's fault. assert f.x is not val # assert that the value is equal to the original assert f.get_x() == f.x # assert that the value IS the original and was never copied assert f.get_x() is f.x def test_task_run_name_can_access_self_arg_for_instance_methods(self): class Foo: a = 10 @task(task_run_name="{self.a}|{x}") def instance_method(self, x): return TaskRunContext.get() f = Foo() context = f.instance_method(x=5) assert context.task_run.name == "10|5" @pytest.mark.parametrize("T", [BaseFoo, BaseFooModel]) async def test_task_supports_async_instance_methods(self, T): class Foo(T): @task async def instance_method(self): return self.x f = Foo(x=1) assert await Foo(x=5).instance_method() == 5 # ensure the instance binding is not global assert await f.instance_method() == 1 assert isinstance(Foo(x=10).instance_method, Task) @pytest.mark.parametrize("T", [BaseFoo, BaseFooModel]) async def test_task_supports_async_class_methods(self, T): class Foo(T): @task @classmethod async def class_method(cls): return cls.__name__ @classmethod @task async def class_method_of_a_different_order(cls): return cls.__name__ assert await Foo.class_method() == "Foo" assert isinstance(Foo.class_method, Task) if sys.version_info < (3, 13): assert await Foo.class_method_of_a_different_order() == "Foo" assert isinstance(Foo.class_method_of_a_different_order, Task) else: assert await Foo.class_method_of_a_different_order() == "Foo" # Doesn't show up as a task because @classmethod isn't chainable in Python 3.13+ assert not isinstance(Foo.class_method_of_a_different_order, Task) @pytest.mark.parametrize("T", [BaseFoo, BaseFooModel]) async def test_task_supports_async_static_methods(self, T): class Foo(T): @task @staticmethod async def static_method(): return "static" @staticmethod @task async def static_method_of_a_different_order(): return "static" assert await Foo.static_method() == "static" assert isinstance(Foo.static_method, Task) assert await Foo.static_method_of_a_different_order() == "static" assert isinstance(Foo.static_method_of_a_different_order, Task) def test_returns_when_cache_result_in_memory_is_false_sync_task(self): @task(cache_result_in_memory=False) def my_task(): return 42 assert my_task() == 42 async def test_returns_when_cache_result_in_memory_is_false_async_task(self): @task(cache_result_in_memory=False) async def my_task(): return 42 assert await my_task() == 42 def test_raises_correct_error_when_cache_result_in_memory_is_false_sync_task(self): @task(cache_result_in_memory=False) def my_task(): raise ValueError("Test") with pytest.raises(ValueError, match="Test"): my_task() async def test_raises_correct_error_when_cache_result_in_memory_is_false_async_task( self, ): @task(cache_result_in_memory=False) async def my_task(): raise ValueError("Test") with pytest.raises(ValueError, match="Test"): await my_task()
TestTaskCall
python
getsentry__sentry
src/sentry/lang/native/sources.py
{ "start": 7429, "end": 29662 }
class ____(Exception): pass def get_internal_url_prefix() -> str: """ Returns the `internal-url-prefix` normalized in such a way that it works in local development environments. """ internal_url_prefix = options.get("system.internal-url-prefix") if not internal_url_prefix: internal_url_prefix = options.get("system.url-prefix") replacements = ["localhost", "127.0.0.1"] if "DJANGO_LIVE_TEST_SERVER_ADDRESS" in os.environ: replacements.append(os.environ["DJANGO_LIVE_TEST_SERVER_ADDRESS"]) for replacement in replacements: internal_url_prefix = internal_url_prefix.replace(replacement, "host.docker.internal") assert internal_url_prefix return internal_url_prefix.rstrip("/") def get_internal_source(project: Project): """ Returns the source configuration for a Sentry project. """ sentry_source_url = "{}{}".format( get_internal_url_prefix(), reverse( "sentry-api-0-dsym-files", kwargs={ "organization_id_or_slug": project.organization.slug, "project_id_or_slug": project.slug, }, ), ) if last_upload := get_last_upload(project.id): # Adding a random query string parameter here makes sure that the # Symbolicator-internal `list_files` cache that is querying this API # is not being hit. This means that uploads will be immediately visible # to Symbolicator, and not depending on its internal cache TTL. sentry_source_url += f"?_last_upload={last_upload}" return { "type": "sentry", "id": INTERNAL_SOURCE_NAME, "url": sentry_source_url, "token": get_system_token(), } def get_internal_artifact_lookup_source_url(project: Project): """ Returns the url used as a part of source configuration for the Sentry artifact-lookup API. """ return "{}{}".format( get_internal_url_prefix(), reverse( "sentry-api-0-project-artifact-lookup", kwargs={ "organization_id_or_slug": project.organization.slug, "project_id_or_slug": project.slug, }, ), ) def get_scraping_config(project: Project) -> dict[str, Any]: allow_scraping_org_level = project.organization.get_option("sentry:scrape_javascript", True) allow_scraping_project_level = project.get_option("sentry:scrape_javascript", True) allow_scraping = allow_scraping_org_level and allow_scraping_project_level verify_ssl = project.get_option("sentry:verify_ssl", True) allowed_origins = [] scraping_headers = {} if allow_scraping: allowed_origins = list(get_origins(project)) token = project.get_option("sentry:token") if token: token_header = project.get_option("sentry:token_header") or "X-Sentry-Token" scraping_headers[token_header] = token return { "enabled": allow_scraping, "headers": scraping_headers, "allowed_origins": allowed_origins, "verify_ssl": verify_ssl, } def get_internal_artifact_lookup_source(project: Project): """ Returns the source configuration for the Sentry artifact-lookup API. """ return { "type": "sentry", "id": INTERNAL_SOURCE_NAME, "url": get_internal_artifact_lookup_source_url(project), "token": get_system_token(), } def is_internal_source_id(source_id: str): """Determines if a DIF object source identifier is reserved for internal sentry use. This is trivial, but multiple functions in this file need to use the same definition. """ return source_id.startswith("sentry") def normalize_user_source(source): """Sources supplied from the user frontend might not match the format that symbolicator expects. For instance we currently do not permit headers to be configured in the UI, but we allow basic auth to be configured for HTTP. This means that we need to convert from username/password into the HTTP basic auth header. """ if source.get("type") == "http": username = source.pop("username", None) password = source.pop("password", None) if username or password: auth = base64.b64encode( ("{}:{}".format(username or "", password or "")).encode("utf-8") ) source["headers"] = { "authorization": "Basic %s" % auth.decode("ascii"), } return source def secret_fields(source_type): """ Returns a string list of all of the fields that contain a secret in a given source. """ if source_type == "appStoreConnect": yield from ["appconnectPrivateKey"] elif source_type == "http": yield "password" elif source_type == "s3": yield "secret_key" elif source_type == "gcs": yield "private_key" yield from [] def validate_sources(sources, schema=SOURCES_WITHOUT_APPSTORE_CONNECT): """ Validates sources against the JSON schema and checks that their IDs are ok. """ try: jsonschema.validate(sources, schema) except jsonschema.ValidationError: raise InvalidSourcesError(f"Failed to validate source {redact_source_secrets(sources)}") ids = set() for source in sources: if is_internal_source_id(source["id"]): raise InvalidSourcesError('Source ids must not start with "sentry:"') if source["id"] in ids: raise InvalidSourcesError("Duplicate source id: {}".format(source["id"])) ids.add(source["id"]) def parse_sources(config, filter_appconnect): """ Parses the given sources in the config string (from JSON). """ if not config: return [] try: sources = orjson.loads(config) except Exception as e: raise InvalidSourcesError("Sources are not valid serialised JSON") from e # remove App Store Connect sources (we don't need them in Symbolicator) if filter_appconnect: sources = [src for src in sources if src.get("type") != "appStoreConnect"] validate_sources(sources) return sources def parse_backfill_sources(sources_json, original_sources): """ Parses a json string of sources passed in from a client and backfills any redacted secrets by finding their previous values stored in original_sources. """ if not sources_json: return [] try: sources = orjson.loads(sources_json) except Exception as e: raise InvalidSourcesError("Sources are not valid serialised JSON") from e orig_by_id = {src["id"]: src for src in original_sources} for source in sources: backfill_source(source, orig_by_id) validate_sources(sources, schema=SOURCES_SCHEMA) return sources def backfill_source(source, original_sources_by_id): """ Backfills redacted secrets in a source by finding their previous values stored in original_sources_by_id. """ for secret in secret_fields(source["type"]): if secret in source and source[secret] == {"hidden-secret": True}: secret_value = safe.get_path(original_sources_by_id, source["id"], secret) if secret_value is None: with sentry_sdk.isolation_scope(): sentry_sdk.set_tag("missing_secret", secret) sentry_sdk.set_tag("source_id", source["id"]) sentry_sdk.capture_message( "Obfuscated symbol source secret does not have a corresponding saved value in project options" ) raise InvalidSourcesError("Hidden symbol source secret is missing a value") else: source[secret] = secret_value def redact_source_secrets(config_sources: Any) -> Any: """ Returns a json data with all of the secrets redacted from every source. The original value is not mutated in the process; A clone is created and returned by this function. """ redacted_sources = deepcopy(config_sources) for source in redacted_sources: for secret in secret_fields(source["type"]): if secret in source: source[secret] = {"hidden-secret": True} return redacted_sources def get_sources_for_project(project): """ Returns a list of symbol sources for this project. """ sources = [] # The symbolicator evaluates sources in the order they are declared. Always # try to download symbols from Sentry first. project_source = get_internal_source(project) sources.append(project_source) # Check that the organization still has access to symbol sources. This # controls both builtin and external sources. organization = project.organization if not features.has("organizations:symbol-sources", organization): return sources # Custom sources have their own feature flag. Check them independently. if features.has("organizations:custom-symbol-sources", organization): sources_config = project.get_option("sentry:symbol_sources") else: sources_config = None if sources_config: try: custom_sources = parse_sources(sources_config, filter_appconnect=True) sources.extend( normalize_user_source(source) for source in custom_sources if source["type"] != "appStoreConnect" ) except InvalidSourcesError: # Source configs should be validated when they are saved. If this # did not happen, this indicates a bug. Record this, but do not stop # processing at this point. logger.exception("Invalid symbolicator source config") def resolve_alias(source, organization): for key in source.get("sources") or (): other_source = settings.SENTRY_BUILTIN_SOURCES.get(key) if other_source: if other_source.get("type") == "alias": yield from resolve_alias(other_source, organization) else: yield fetch_token_for_gcp_source_if_necessary(other_source, organization) def fetch_token_for_gcp_source_if_necessary(source, organization): if source.get("type") == "gcs": if "client_email" in source and "private_key" in source: return source else: client_email = source.get("client_email") token = get_gcp_token(client_email) # if target_credentials.token is None it means that the # token could not be fetched successfully if token is not None: # Create a new dict to avoid reference issues source = deepcopy(source) source["bearer_token"] = token # Remove other credentials if we have a token if "client_email" in source: del source["client_email"] if "private_key" in source: del source["private_key"] return source # Add builtin sources last to ensure that custom sources have precedence # over our defaults. builtin_sources = project.get_option("sentry:builtin_symbol_sources") for key, source in settings.SENTRY_BUILTIN_SOURCES.items(): if key not in builtin_sources: continue # special internal alias type expands to more than one item. This # is used to make `apple` expand to `ios`/`macos` and other # sources if configured as such. if source.get("type") == "alias": sources.extend(resolve_alias(source, organization)) else: sources.append(fetch_token_for_gcp_source_if_necessary(source, organization)) return sources # Expire the cached token 10 minutes earlier so that we can confidently pass it # to symbolicator with its configured timeout of 5 minutes @ttl_cache(ttl=TOKEN_TTL_SECONDS - 600) def get_gcp_token(client_email): # Fetch the regular credentials for GCP source_credentials, _ = google.auth.default() if source_credentials is None: return None # Impersonate the service account to give the token for symbolicator a proper scope target_credentials = impersonated_credentials.Credentials( source_credentials=source_credentials, target_principal=client_email, target_scopes=["https://www.googleapis.com/auth/cloud-platform"], lifetime=TOKEN_TTL_SECONDS, ) target_credentials.refresh(Request()) if target_credentials.token is None: return None return target_credentials.token def reverse_aliases_map(builtin_sources): """Returns a map of source IDs to their original un-aliased source ID. :param builtin_sources: The value of `settings.SENTRY_BUILTIN_SOURCES`. """ reverse_aliases = dict() for key, source in builtin_sources.items(): if source.get("type") != "alias": continue try: self_id = source["id"] except KeyError: continue for aliased_source in source.get("sources", []): try: aliased_source = builtin_sources[aliased_source] aliased_id = aliased_source["id"] except KeyError: continue reverse_aliases[aliased_id] = self_id return reverse_aliases def filter_ignored_sources(sources, reversed_alias_map=None): """ Filters out sources that are meant to be blocked based on a global killswitch. If any sources were de-aliased, a reverse mapping of { unaliased id: alias } should be provided for this to also recognize and filter out aliased sources. """ ignored_source_ids = options.get("symbolicator.ignored_sources") if not ignored_source_ids: return sources filtered = [] for src in sources: resolved = src["id"] alias = reversed_alias_map is not None and reversed_alias_map.get(resolved) or resolved # This covers three scenarios: # 1. The source had an alias, and the config may have used that alias to block it (alias map # lookup resolved) # 2. The source had no alias, and the config may have used the source's ID to block it # (alias map lookup returned None and fell back to resolved) # 3. The source had an alias, but the config used the source's internal unaliased ID to # block it (alias map lookup resolved but not in ignored_source_ids, resolved is in # ignored_source_ids) if alias not in ignored_source_ids and resolved not in ignored_source_ids: filtered.append(src) return filtered def redact_internal_sources(response): """Redacts information about internal sources from a response. Symbolicator responses can contain a section about DIF object file candidates where were attempted to be downloaded from the sources. This includes a full URI of where the download was attempted from. For internal sources we want to redact this in order to not leak any internal details. Note that this modifies the argument passed in, thus redacting in-place. It still returns the modified response. """ for module in response.get("modules", []): redact_internal_sources_from_module(module) def redact_internal_sources_from_module(module): """Redacts information about internal sources from a single module. This in-place redacts candidates from only a single module of the symbolicator response. The strategy here is for each internal source to replace the location with the DebugID. Furthermore if there are any "notfound" entries collapse them into a single entry and only show this entry if there are no entries with another status. """ sources_notfound = set() sources_other = set() new_candidates = [] for candidate in module.get("candidates", []): source_id = candidate["source"] if is_internal_source_id(source_id): # Only keep location for sentry:project. if source_id != "sentry:project": candidate.pop("location", None) # Collapse nofound statuses, collect info on sources which both have a notfound # as well as other statusses. This allows us to later filter the notfound ones. try: status = candidate.get("download", {})["status"] except KeyError: pass else: if status == "notfound": candidate.pop("location", None) # This location is bogus, remove it. if source_id in sources_notfound: continue else: sources_notfound.add(source_id) else: sources_other.add(source_id) new_candidates.append(candidate) def should_keep(candidate): """Returns `False` if the candidate should be kept in the list of candidates. This removes the candidates with a status of ``notfound`` *if* they also have another status. """ source_id = candidate["source"] status = candidate.get("download", {}).get("status") return status != "notfound" or source_id not in sources_other if "candidates" in module: module["candidates"] = [c for c in new_candidates if should_keep(c)] def sources_for_symbolication(project): """ Returns a list of symbol sources to attach to a native symbolication request, as well as a closure to post-process the resulting JSON response. """ sources = get_sources_for_project(project) or [] # Build some maps for use in _process_response() reverse_source_aliases = reverse_aliases_map(settings.SENTRY_BUILTIN_SOURCES) source_names = {source["id"]: source.get("name", "unknown") for source in sources} # Add a name for the special "sentry:project" source. source_names[INTERNAL_SOURCE_NAME] = "Sentry" # Add names for aliased sources. for source in settings.SENTRY_BUILTIN_SOURCES.values(): if source.get("type") == "alias": source_names[source["id"]] = source.get("name", "unknown") # Remove sources that should be ignored. This leaves a few extra entries in the alias # maps and source names maps, but that's fine. The orphaned entries in the maps will just # never be used. sources = filter_ignored_sources(sources, reverse_source_aliases) def _process_response(json): """Post-processes the JSON response. This modifies the candidates list from Symbolicator responses to undo aliased sources, hide information about unknown sources and add names to sources rather then just have their IDs. """ try: collect_apple_symbol_stats(json) except Exception as e: sentry_sdk.capture_exception(e) for module in json.get("modules") or (): for candidate in module.get("candidates") or (): # Reverse internal source aliases from the response. source_id = candidate["source"] original_source_id = reverse_source_aliases.get(source_id) if original_source_id is not None: candidate["source"] = original_source_id source_id = original_source_id # Add a "source_name" field to save the UI a lookup. candidate["source_name"] = source_names.get(source_id, "unknown") redact_internal_sources(json) return json return (sources, _process_response) def collect_apple_symbol_stats(json): eligible_symbols = 0 neither_has_symbol = 0 both_have_symbol = 0 # Done to temporally collect information about the events for which we don't find symbols in symx: old_has_symbol = [] symx_has_symbol = 0 for module in json.get("modules") or (): if ( module.get("debug_status", "unused") == "unused" and module.get("unwind_status", "unused") == "unused" ): continue if module["type"] != "macho": continue eligible_symbols += 1 old_found_source = None symx_has_this_symbol = False for candidate in module.get("candidates") or (): if candidate["download"]["status"] == "ok": source_id = candidate["source"] if source_id.startswith("sentry:symx"): symx_has_this_symbol = True # only compare symx to the system symbol source elif ( source_id.startswith("sentry:") and not source_id.startswith("sentry:symbol-collector") and source_id.endswith("os-source") ): old_found_source = source_id if symx_has_this_symbol: if old_found_source: both_have_symbol += 1 else: symx_has_symbol += 1 elif old_found_source: old_has_symbol.append( { "arch": module.get("arch"), "code_file": module.get("code_file"), "debug_id": module.get("debug_id"), "found_in": old_found_source, } ) else: neither_has_symbol += 1 # NOTE: It might be possible to apply a heuristic based on `code_file` here to figure out if this is # supposed to be a system symbol, and maybe also log those cases specifically as internal messages. For # now, we are only interested in rough numbers. if eligible_symbols: apple_symbol_stats = { "both": both_have_symbol, "neither": neither_has_symbol, "symx": symx_has_symbol, "old": old_has_symbol, } json["apple_symbol_stats"] = apple_symbol_stats
InvalidSourcesError
python
realpython__materials
rp-portfolio/projects/apps.py
{ "start": 36, "end": 148 }
class ____(AppConfig): default_auto_field = "django.db.models.BigAutoField" name = "projects"
ProjectsConfig
python
getsentry__sentry
src/sentry_plugins/bitbucket/endpoints/webhook.py
{ "start": 3317, "end": 5647 }
class ____(View): _handlers = {"repo:push": PushEventWebhook} def get_handler(self, event_type): return self._handlers.get(event_type) @method_decorator(csrf_exempt) def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponseBase: if request.method != "POST": return HttpResponse(status=405) return super().dispatch(request, *args, **kwargs) def post(self, request: Request, organization_id: int): org_exists = organization_service.check_organization_by_id( id=organization_id, only_visible=True ) if not org_exists: logger.error( "bitbucket.webhook.invalid-organization", extra={"organization_id": organization_id} ) return HttpResponse(status=400) body = bytes(request.body) if not body: logger.error( "bitbucket.webhook.missing-body", extra={"organization_id": organization_id} ) return HttpResponse(status=400) try: handler = self.get_handler(request.META["HTTP_X_EVENT_KEY"]) except KeyError: logger.exception( "bitbucket.webhook.missing-event", extra={"organization_id": organization_id} ) return HttpResponse(status=400) if not handler: return HttpResponse(status=204) address_string = str(request.META["REMOTE_ADDR"]) ip = ipaddress.ip_address(address_string) valid_ip = False for ip_range in BITBUCKET_IP_RANGES: if ip in ip_range: valid_ip = True break if not valid_ip and address_string not in BITBUCKET_IPS: logger.error( "bitbucket.webhook.invalid-ip-range", extra={"organization_id": organization_id} ) return HttpResponse(status=401) try: event = json.loads(body.decode("utf-8")) except json.JSONDecodeError: logger.exception( "bitbucket.webhook.invalid-json", extra={"organization_id": organization_id}, ) return HttpResponse(status=400) handler()(organization_id, event) return HttpResponse(status=204)
BitbucketPluginWebhookEndpoint
python
spack__spack
lib/spack/spack/llnl/util/lang.py
{ "start": 13142, "end": 20111 }
class ____(typing.MutableMapping[K, V]): """This is a hashable, comparable dictionary. Hash is performed on a tuple of the values in the dictionary.""" __slots__ = ("dict",) def __init__(self): self.dict: Dict[K, V] = {} def __getitem__(self, key: K) -> V: return self.dict[key] def __setitem__(self, key: K, value: V) -> None: self.dict[key] = value def __iter__(self) -> Iterator[K]: return iter(self.dict) def __len__(self) -> int: return len(self.dict) def __delitem__(self, key: K) -> None: del self.dict[key] def _cmp_iter(self): for _, v in sorted(self.items()): yield v def match_predicate(*args): """Utility function for making string matching predicates. Each arg can be a: * regex * list or tuple of regexes * predicate that takes a string. This returns a predicate that is true if: * any arg regex matches * any regex in a list or tuple of regexes matches. * any predicate in args matches. """ def match(string): for arg in args: if isinstance(arg, str): if re.search(arg, string): return True elif isinstance(arg, list) or isinstance(arg, tuple): if any(re.search(i, string) for i in arg): return True elif callable(arg): if arg(string): return True else: raise ValueError( "args to match_predicate must be regex, " "list of regexes, or callable." ) return False return match def dedupe(sequence, key=None): """Yields a stable de-duplication of an hashable sequence by key Args: sequence: hashable sequence to be de-duplicated key: callable applied on values before uniqueness test; identity by default. Returns: stable de-duplication of the sequence Examples: Dedupe a list of integers:: [x for x in dedupe([1, 2, 1, 3, 2])] == [1, 2, 3] [x for x in spack.llnl.util.lang.dedupe([1,-2,1,3,2], key=abs)] == [1, -2, 3] """ seen = set() for x in sequence: x_key = x if key is None else key(x) if x_key not in seen: yield x seen.add(x_key) def pretty_date(time: Union[datetime, int], now: Optional[datetime] = None) -> str: """Convert a datetime or timestamp to a pretty, relative date. Args: time: date to print prettily now: the date the pretty date is relative to (default is ``datetime.now()``) Returns: pretty string like "an hour ago", "Yesterday", "3 months ago", "just now", etc. Adapted from https://stackoverflow.com/questions/1551382. """ if now is None: now = datetime.now() if type(time) is int: diff = now - datetime.fromtimestamp(time) elif isinstance(time, datetime): diff = now - time else: raise ValueError("pretty_date requires a timestamp or datetime") second_diff = diff.seconds day_diff = diff.days if day_diff < 0: return "" if day_diff == 0: if second_diff < 10: return "just now" if second_diff < 60: return f"{second_diff} seconds ago" if second_diff < 120: return "a minute ago" if second_diff < 3600: return f"{second_diff // 60} minutes ago" if second_diff < 7200: return "an hour ago" if second_diff < 86400: return f"{second_diff // 3600} hours ago" if day_diff == 1: return "yesterday" if day_diff < 7: return f"{day_diff} days ago" if day_diff < 28: weeks = day_diff // 7 if weeks == 1: return "a week ago" else: return f"{day_diff // 7} weeks ago" if day_diff < 365: months = day_diff // 30 if months == 1: return "a month ago" elif months == 12: months -= 1 return f"{months} months ago" year_diff = day_diff // 365 if year_diff == 1: return "a year ago" return f"{year_diff} years ago" def pretty_string_to_date(date_str: str, now: Optional[datetime] = None) -> datetime: """Parses a string representing a date and returns a datetime object. Args: date_str: string representing a date. This string might be in different format (like ``YYYY``, ``YYYY-MM``, ``YYYY-MM-DD``, ``YYYY-MM-DD HH:MM``, ``YYYY-MM-DD HH:MM:SS``) or be a *pretty date* (like ``yesterday`` or ``two months ago``) Returns: datetime object corresponding to ``date_str`` """ pattern = {} now = now or datetime.now() # datetime formats pattern[re.compile(r"^\d{4}$")] = lambda x: datetime.strptime(x, "%Y") pattern[re.compile(r"^\d{4}-\d{2}$")] = lambda x: datetime.strptime(x, "%Y-%m") pattern[re.compile(r"^\d{4}-\d{2}-\d{2}$")] = lambda x: datetime.strptime(x, "%Y-%m-%d") pattern[re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}$")] = lambda x: datetime.strptime( x, "%Y-%m-%d %H:%M" ) pattern[re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$")] = lambda x: datetime.strptime( x, "%Y-%m-%d %H:%M:%S" ) pretty_regex = re.compile(r"(a|\d+)\s*(year|month|week|day|hour|minute|second)s?\s*ago") def _n_xxx_ago(x): how_many, time_period = pretty_regex.search(x).groups() how_many = 1 if how_many == "a" else int(how_many) # timedelta natively supports time periods up to 'weeks'. # To apply month or year we convert to 30 and 365 days if time_period == "month": how_many *= 30 time_period = "day" elif time_period == "year": how_many *= 365 time_period = "day" kwargs = {(time_period + "s"): how_many} return now - timedelta(**kwargs) pattern[pretty_regex] = _n_xxx_ago # yesterday callback = lambda x: now - timedelta(days=1) pattern[re.compile("^yesterday$")] = callback for regexp, parser in pattern.items(): if bool(regexp.match(date_str)): return parser(date_str) raise ValueError(f'date "{date_str}" does not match any valid format') def pretty_seconds_formatter(seconds): if seconds >= 1: multiplier, unit = 1, "s" elif seconds >= 1e-3: multiplier, unit = 1e3, "ms" elif seconds >= 1e-6: multiplier, unit = 1e6, "us" else: multiplier, unit = 1e9, "ns" return lambda s: "%.3f%s" % (multiplier * s, unit) def pretty_seconds(seconds): """Seconds to string with appropriate units Arguments: seconds (float): Number of seconds Returns: str: Time string with units """ return pretty_seconds_formatter(seconds)(seconds)
HashableMap
python
sympy__sympy
sympy/core/function.py
{ "start": 27619, "end": 28681 }
class ____(Function): """ Base class for expressions resulting from the application of an undefined function. """ is_number = False name: str def __new__(cls, *args, **options) -> Expr: # type: ignore args = tuple(map(sympify, args)) u = [a.name for a in args if isinstance(a, UndefinedFunction)] if u: raise TypeError('Invalid argument: expecting an expression, not UndefinedFunction%s: %s' % ( 's'*(len(u) > 1), ', '.join(u))) obj: Expr = super().__new__(cls, *args, **options) # type: ignore return obj def _eval_as_leading_term(self, x, logx, cdir): return self @property def _diff_wrt(self): """ Allow derivatives wrt to undefined functions. Examples ======== >>> from sympy import Function, Symbol >>> f = Function('f') >>> x = Symbol('x') >>> f(x)._diff_wrt True >>> f(x).diff(x) Derivative(f(x), x) """ return True
AppliedUndef
python
gevent__gevent
src/gevent/_fileobjectcommon.py
{ "start": 2094, "end": 2822 }
class ____(object): def writeall(self, value): """ Similar to :meth:`socket.socket.sendall`, ensures that all the contents of *value* have been written (though not necessarily flushed) before returning. Returns the length of *value*. .. versionadded:: 20.12.0 """ # Do we need to play the same get_memory games we do with sockets? # And what about chunking for large values? See _socketcommon.py write = super(WriteallMixin, self).write total = len(value) while value: l = len(value) w = write(value) if w == l: break value = value[w:] return total
WriteallMixin
python
openai__openai-python
src/openai/types/beta/assistant_stream_event.py
{ "start": 5591, "end": 5820 }
class ____(BaseModel): data: Message """ Represents a message within a [thread](https://platform.openai.com/docs/api-reference/threads). """ event: Literal["thread.message.completed"]
ThreadMessageCompleted
python
jazzband__prettytable
tests/test_prettytable.py
{ "start": 11290, "end": 11962 }
class ____: """Make sure all options are properly overwritten by get_string.""" def test_border(self, city_data: PrettyTable) -> None: assert city_data.get_string() != city_data.get_string(border=False) def test_header(self, city_data: PrettyTable) -> None: assert city_data.get_string() != city_data.get_string(header=False) def test_hrules_all(self, city_data: PrettyTable) -> None: assert city_data.get_string() != city_data.get_string(hrules=HRuleStyle.ALL) def test_hrules_none(self, city_data: PrettyTable) -> None: assert city_data.get_string() != city_data.get_string(hrules=HRuleStyle.NONE)
TestOptionOverride
python
tensorflow__tensorflow
tensorflow/python/distribute/input_ops_test.py
{ "start": 10203, "end": 12069 }
class ____(test.TestCase): def _assert_datasets_equal(self, ds1, ds2): # First lets assert the structure is the same. self.assertTrue( structure.are_compatible(ds1.element_spec, ds2.element_spec)) # Now create iterators on both and assert they produce the same values. it1 = dataset_ops.make_initializable_iterator(ds1) it2 = dataset_ops.make_initializable_iterator(ds2) get_next1 = it1.get_next() get_next2 = it2.get_next() with self.cached_session(): self.evaluate([it1.initializer, it2.initializer]) val1, val2 = self.evaluate([get_next1, get_next2]) self.assertEqual(val1, val2) @test_util.run_deprecated_v1 def testOnlySource(self): ds = dataset_ops.Dataset.range(10) cloned_ds = input_ops._clone_dataset(ds) self._assert_datasets_equal(ds, cloned_ds) @test_util.run_deprecated_v1 def testSimplePipeline(self): ds = dataset_ops.Dataset.range(10).map(math_ops.square) cloned_ds = input_ops._clone_dataset(ds) self._assert_datasets_equal(ds, cloned_ds) @test_util.run_deprecated_v1 def testConcat(self): ds1 = dataset_ops.Dataset.range(10) ds2 = dataset_ops.Dataset.range(10) ds = ds1.concatenate(ds2) cloned_ds = input_ops._clone_dataset(ds) self._assert_datasets_equal(ds, cloned_ds) @test_util.run_deprecated_v1 def testZip(self): ds1 = dataset_ops.Dataset.range(10) ds2 = dataset_ops.Dataset.range(10) ds = dataset_ops.Dataset.zip((ds1, ds2)) cloned_ds = input_ops._clone_dataset(ds) self._assert_datasets_equal(ds, cloned_ds) @test_util.run_deprecated_v1 def testMultipleVariantTensors(self): ds = dataset_ops.Dataset.range(10) ds = _TestDataset(ds) cloned_ds = input_ops._clone_dataset(ds) self._assert_datasets_equal(ds, cloned_ds) if __name__ == "__main__": test.main()
CloneDatasetTest
python
numba__numba
numba/experimental/function_type.py
{ "start": 1803, "end": 12475 }
class ____(models.StructModel): """FunctionModel holds addresses of function implementations """ def __init__(self, dmm, fe_type): members = [ # Address of cfunc wrapper function. # This uses a C callconv and doesn't not support exceptions. ('c_addr', types.voidptr), # Address of PyObject* referencing the Python function # object: ('py_addr', types.voidptr), # Address of the underlying function object. # Calling through this function pointer supports all features of # regular numba function as it follows the same Numba callconv. ('jit_addr', types.voidptr), ] super(FunctionModel, self).__init__(dmm, fe_type, members) @lower_constant(types.Dispatcher) def lower_constant_dispatcher(context, builder, typ, pyval): return context.add_dynamic_addr(builder, id(pyval), info=type(pyval).__name__) @lower_constant(FunctionType) def lower_constant_function_type(context, builder, typ, pyval): typ = typ.get_precise() if isinstance(pyval, CFunc): addr = pyval._wrapper_address sfunc = cgutils.create_struct_proxy(typ)(context, builder) sfunc.c_addr = context.add_dynamic_addr(builder, addr, info=str(typ)) sfunc.py_addr = context.add_dynamic_addr(builder, id(pyval), info=type(pyval).__name__) return sfunc._getvalue() if isinstance(pyval, Dispatcher): sfunc = cgutils.create_struct_proxy(typ)(context, builder) sfunc.py_addr = context.add_dynamic_addr(builder, id(pyval), info=type(pyval).__name__) return sfunc._getvalue() if isinstance(pyval, WrapperAddressProtocol): addr = pyval.__wrapper_address__() assert typ.check_signature(pyval.signature()) sfunc = cgutils.create_struct_proxy(typ)(context, builder) sfunc.c_addr = context.add_dynamic_addr(builder, addr, info=str(typ)) sfunc.py_addr = context.add_dynamic_addr(builder, id(pyval), info=type(pyval).__name__) return sfunc._getvalue() # TODO: implement support for pytypes.FunctionType, ctypes.CFUNCTYPE raise NotImplementedError( 'lower_constant_struct_function_type({}, {}, {}, {})' .format(context, builder, typ, pyval)) def _get_wrapper_address(func, sig): """Return the address of a compiled cfunc wrapper function of `func`. Warning: The compiled function must be compatible with the given signature `sig`. If it is not, then result of calling the compiled function is undefined. The compatibility is ensured when passing in a first-class function to a Numba njit compiled function either as an argument or via namespace scoping. Parameters ---------- func : object A Numba cfunc or jit decoreated function or an object that implements the wrapper address protocol (see note below). sig : Signature The expected function signature. Returns ------- addr : int An address in memory (pointer value) of the compiled function corresponding to the specified signature. Note: wrapper address protocol ------------------------------ An object implements the wrapper address protocol iff the object provides a callable attribute named __wrapper_address__ that takes a Signature instance as the argument, and returns an integer representing the address or pointer value of a compiled function for the given signature. """ if not sig.is_precise(): # addr==-1 will indicate that no implementation is available # for cases where type-inference did not identified the # function type. For example, the type of an unused # jit-decorated function argument will be undefined but also # irrelevant. addr = -1 elif hasattr(func, '__wrapper_address__'): # func can be any object that implements the # __wrapper_address__ protocol. addr = func.__wrapper_address__() elif isinstance(func, CFunc): assert sig == func._sig addr = func.address elif isinstance(func, Dispatcher): cres = func.get_compile_result(sig) wrapper_name = cres.fndesc.llvm_cfunc_wrapper_name addr = cres.library.get_pointer_to_function(wrapper_name) else: raise NotImplementedError( f'get wrapper address of {type(func)} instance with {sig!r}') if not isinstance(addr, int): raise TypeError( f'wrapper address must be integer, got {type(addr)} instance') if addr <= 0 and addr != -1: raise ValueError(f'wrapper address of {type(func)} instance must be' f' a positive integer but got {addr} [sig={sig}]') # print(f'_get_wrapper_address[{func}]({sig=}) -> {addr}') return addr def _get_jit_address(func, sig): """Similar to ``_get_wrapper_address()`` but get the `.jit_addr` instead. """ if isinstance(func, Dispatcher): cres = func.get_compile_result(sig) jit_name = cres.fndesc.llvm_func_name addr = cres.library.get_pointer_to_function(jit_name) else: addr = 0 if not isinstance(addr, int): raise TypeError( f'jit address must be integer, got {type(addr)} instance') return addr def _lower_get_address(context, builder, func, sig, failure_mode, *, function_name): """Low-level call to <function_name>(func, sig). When calling this function, GIL must be acquired. """ pyapi = context.get_python_api(builder) # Get the cfunc wrapper address. The code below trusts that the # function numba.function._get_wrapper_address exists and can be # called with two arguments. However, if an exception is raised in # the function, then it will be caught and propagated to the # caller. modname = context.insert_const_string(builder.module, __name__) numba_mod = pyapi.import_module(modname) numba_func = pyapi.object_getattr_string(numba_mod, function_name) pyapi.decref(numba_mod) sig_obj = pyapi.unserialize(pyapi.serialize_object(sig)) addr = pyapi.call_function_objargs(numba_func, (func, sig_obj)) if failure_mode != 'ignore': with builder.if_then(cgutils.is_null(builder, addr), likely=False): # *function_name* has raised an exception, propagate it # to the caller. if failure_mode == 'return_exc': context.call_conv.return_exc(builder) elif failure_mode == 'return_null': builder.ret(pyapi.get_null_object()) else: raise NotImplementedError(failure_mode) # else the caller will handle addr == NULL return addr # new reference or NULL lower_get_wrapper_address = partial( _lower_get_address, function_name="_get_wrapper_address", ) lower_get_jit_address = partial( _lower_get_address, function_name="_get_jit_address", ) @unbox(FunctionType) def unbox_function_type(typ, obj, c): typ = typ.get_precise() sfunc = cgutils.create_struct_proxy(typ)(c.context, c.builder) addr = lower_get_wrapper_address( c.context, c.builder, obj, typ.signature, failure_mode='return_null') sfunc.c_addr = c.pyapi.long_as_voidptr(addr) c.pyapi.decref(addr) llty = c.context.get_value_type(types.voidptr) sfunc.py_addr = c.builder.ptrtoint(obj, llty) addr = lower_get_jit_address( c.context, c.builder, obj, typ.signature, failure_mode='return_null') sfunc.jit_addr = c.pyapi.long_as_voidptr(addr) c.pyapi.decref(addr) return NativeValue(sfunc._getvalue()) @box(FunctionType) def box_function_type(typ, val, c): typ = typ.get_precise() sfunc = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val) pyaddr_ptr = cgutils.alloca_once(c.builder, c.pyapi.pyobj) raw_ptr = c.builder.inttoptr(sfunc.py_addr, c.pyapi.pyobj) with c.builder.if_then(cgutils.is_null(c.builder, raw_ptr), likely=False): cstr = f"first-class function {typ} parent object not set" c.pyapi.err_set_string("PyExc_MemoryError", cstr) c.builder.ret(c.pyapi.get_null_object()) c.builder.store(raw_ptr, pyaddr_ptr) cfunc = c.builder.load(pyaddr_ptr) c.pyapi.incref(cfunc) return cfunc @lower_cast(UndefinedFunctionType, FunctionType) def lower_cast_function_type_to_function_type( context, builder, fromty, toty, val): return val @lower_cast(types.Dispatcher, FunctionType) def lower_cast_dispatcher_to_function_type(context, builder, fromty, toty, val): toty = toty.get_precise() sig = toty.signature dispatcher = fromty.dispatcher llvoidptr = context.get_value_type(types.voidptr) sfunc = cgutils.create_struct_proxy(toty)(context, builder) # Always store the python function sfunc.py_addr = builder.ptrtoint(val, llvoidptr) # Attempt to compile the Dispatcher to the expected function type try: cres = dispatcher.get_compile_result(sig) except errors.NumbaError: cres = None # If compilation is successful, we can by-pass using GIL to get the cfunc if cres is not None: # Declare cfunc in the current module wrapper_name = cres.fndesc.llvm_cfunc_wrapper_name llfnptr = context.get_value_type(toty.ftype) llfnty = llfnptr.pointee fn = cgutils.get_or_insert_function( builder.module, llfnty, wrapper_name, ) addr = builder.bitcast(fn, llvoidptr) # Store the cfunc sfunc.c_addr = addr # Store the jit func fn = context.declare_function(builder.module, cres.fndesc) sfunc.jit_addr = builder.bitcast(fn, llvoidptr) # Link-in the dispatcher library context.active_code_library.add_linking_library(cres.library) else: # Use lower_get_wrapper_address() to get the cfunc lower_get_wrapper_address pyapi = context.get_python_api(builder) gil_state = pyapi.gil_ensure() addr = lower_get_wrapper_address( context, builder, val, toty.signature, failure_mode='return_exc') sfunc.c_addr = pyapi.long_as_voidptr(addr) pyapi.decref(addr) pyapi.gil_release(gil_state) return sfunc._getvalue()
FunctionModel
python
ray-project__ray
python/ray/serve/_private/common.py
{ "start": 26089, "end": 26225 }
class ____: """Sent from the GRPC proxy to replicas on both unary and streaming codepaths.""" user_request_proto: Any
gRPCRequest
python
langchain-ai__langchain
libs/core/langchain_core/runnables/base.py
{ "start": 214416, "end": 217272 }
class ____(Protocol[Input, Output]): def __call__( self, _in: AsyncIterator[Input], /, *, config: RunnableConfig ) -> AsyncIterator[Output]: ... RunnableLike = ( Runnable[Input, Output] | Callable[[Input], Output] | Callable[[Input], Awaitable[Output]] | Callable[[Iterator[Input]], Iterator[Output]] | Callable[[AsyncIterator[Input]], AsyncIterator[Output]] | _RunnableCallableSync[Input, Output] | _RunnableCallableAsync[Input, Output] | _RunnableCallableIterator[Input, Output] | _RunnableCallableAsyncIterator[Input, Output] | Mapping[str, Any] ) def coerce_to_runnable(thing: RunnableLike) -> Runnable[Input, Output]: """Coerce a `Runnable`-like object into a `Runnable`. Args: thing: A `Runnable`-like object. Returns: A `Runnable`. Raises: TypeError: If the object is not `Runnable`-like. """ if isinstance(thing, Runnable): return thing if is_async_generator(thing) or inspect.isgeneratorfunction(thing): return RunnableGenerator(thing) if callable(thing): return RunnableLambda(cast("Callable[[Input], Output]", thing)) if isinstance(thing, dict): return cast("Runnable[Input, Output]", RunnableParallel(thing)) msg = ( f"Expected a Runnable, callable or dict." f"Instead got an unsupported type: {type(thing)}" ) raise TypeError(msg) @overload def chain( func: Callable[[Input], Coroutine[Any, Any, Output]], ) -> Runnable[Input, Output]: ... @overload def chain( func: Callable[[Input], Iterator[Output]], ) -> Runnable[Input, Output]: ... @overload def chain( func: Callable[[Input], AsyncIterator[Output]], ) -> Runnable[Input, Output]: ... @overload def chain( func: Callable[[Input], Output], ) -> Runnable[Input, Output]: ... def chain( func: Callable[[Input], Output] | Callable[[Input], Iterator[Output]] | Callable[[Input], Coroutine[Any, Any, Output]] | Callable[[Input], AsyncIterator[Output]], ) -> Runnable[Input, Output]: """Decorate a function to make it a `Runnable`. Sets the name of the `Runnable` to the name of the function. Any runnables called by the function will be traced as dependencies. Args: func: A `Callable`. Returns: A `Runnable`. Example: ```python from langchain_core.runnables import chain from langchain_core.prompts import PromptTemplate from langchain_openai import OpenAI @chain def my_func(fields): prompt = PromptTemplate("Hello, {name}!") model = OpenAI() formatted = prompt.invoke(**fields) for chunk in model.stream(formatted): yield chunk ``` """ return RunnableLambda(func)
_RunnableCallableAsyncIterator
python
django__django
django/tasks/exceptions.py
{ "start": 339, "end": 437 }
class ____(TaskException): """The requested TaskResult does not exist."""
TaskResultDoesNotExist
python
django__django
tests/admin_views/admin.py
{ "start": 24921, "end": 25043 }
class ____(admin.ModelAdmin): def get_queryset(self, request): return FilteredManager.objects
CustomManagerAdmin
python
streamlit__streamlit
lib/tests/streamlit/components/v2/test_bidi_presentation.py
{ "start": 962, "end": 1283 }
class ____: def __init__(self) -> None: self.widget_metadata: dict[str, Any] = {} self._payloads: dict[str, Any] = {} def __getitem__(self, k: str) -> Any: # emulate WStates __getitem__ if k not in self._payloads: raise KeyError(k) return self._payloads[k]
_FakeWStates
python
huggingface__transformers
src/transformers/models/emu3/modeling_emu3.py
{ "start": 49998, "end": 53011 }
class ____(Emu3PreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} config: Emu3TextConfig def __init__(self, config): super().__init__(config) self.model = Emu3TextModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" Example: ```python >>> from transformers import Emu3Processor, Emu3ForConditionalGeneration >>> import torch >>> import requests >>> from PIL import Image >>> model = Emu3ForCausalLM.from_pretrained("BAAI/Emu3-Chat-hf", dtype=torch.bfloat16) >>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf") >>> inputs = processor(text=["Can you write me a poem about winter."], return_tensors="pt").to(model.device) >>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False) >>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0] ```""" outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
Emu3ForCausalLM
python
conda__conda
conda/exceptions.py
{ "start": 13824, "end": 13994 }
class ____(CondaError, EnvironmentError): def __init__(self, message: str, *args): msg = f"{message}" super().__init__(msg, *args)
CondaEnvironmentError
python
GokuMohandas__MadeWithML
madewithml/data.py
{ "start": 5036, "end": 5666 }
class ____: """Custom preprocessor class.""" def __init__(self, class_to_index={}): self.class_to_index = class_to_index or {} # mutable defaults self.index_to_class = {v: k for k, v in self.class_to_index.items()} def fit(self, ds): tags = ds.unique(column="tag") self.class_to_index = {tag: i for i, tag in enumerate(tags)} self.index_to_class = {v: k for k, v in self.class_to_index.items()} return self def transform(self, ds): return ds.map_batches(preprocess, fn_kwargs={"class_to_index": self.class_to_index}, batch_format="pandas")
CustomPreprocessor
python
pennersr__django-allauth
allauth/templatetags/allauth.py
{ "start": 1115, "end": 2182 }
class ____(template.Node): def __init__(self, name, nodelist): self.name = name self.nodelist = nodelist def render(self, context): slots = context.render_context.get(SLOTS_CONTEXT_KEY) with context.push(): if slots is None: if self.name in context["slots"]: return "".join(context["slots"][self.name]) return self.nodelist.render(context) else: result = self.nodelist.render(context) slot_list = slots.setdefault(self.name, []) slot_list.append(result) return "" @register.tag(name="element") def do_element(parser, token): nodelist = parser.parse(("endelement",)) tag_name, args, kwargs = parse_tag(token, parser) usage = f'{{% {tag_name} "element" argument=value %}} ... {{% end{tag_name} %}}' if len(args) > 1: raise template.TemplateSyntaxError("Usage: %s" % usage) parser.delete_first_token() return ElementNode(nodelist, args[0], kwargs)
SlotNode
python
joke2k__faker
faker/cli.py
{ "start": 3705, "end": 9113 }
class ____: def __init__(self, argv: Optional[str] = None) -> None: self.argv = argv or sys.argv[:] self.prog_name = Path(self.argv[0]).name def execute(self) -> None: """ Given the command-line arguments, this creates a parser appropriate to that command, and runs it. """ # retrieve default language from system environment default_locale = os.environ.get("LANG", "en_US").split(".")[0] if default_locale not in AVAILABLE_LOCALES: default_locale = DEFAULT_LOCALE epilog = f"""supported locales: {', '.join(sorted(AVAILABLE_LOCALES))} Faker can take a locale as an optional argument, to return localized data. If no locale argument is specified, the factory falls back to the user's OS locale as long as it is supported by at least one of the providers. - for this user, the default locale is {default_locale}. If the optional argument locale and/or user's default locale is not available for the specified provider, the factory falls back to faker's default locale, which is {DEFAULT_LOCALE}. examples: $ faker address 968 Bahringer Garden Apt. 722 Kristinaland, NJ 09890 $ faker -l de_DE address Samira-Niemeier-Allee 56 94812 Biedenkopf $ faker profile ssn,birthdate {{'ssn': u'628-10-1085', 'birthdate': '2008-03-29'}} $ faker -r=3 -s=";" name Willam Kertzmann; Josiah Maggio; Gayla Schmitt; """ formatter_class = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser( prog=self.prog_name, description=f"{self.prog_name} version {VERSION}", epilog=epilog, formatter_class=formatter_class, ) parser.add_argument("--version", action="version", version=f"%(prog)s {VERSION}") parser.add_argument( "-v", "--verbose", action="store_true", help="show INFO logging events instead " "of CRITICAL, which is the default. These logging " "events provide insight into localization of " "specific providers.", ) parser.add_argument( "-o", metavar="output", type=argparse.FileType("w"), default=sys.stdout, help="redirect output to a file", ) parser.add_argument( "-l", "--lang", choices=AVAILABLE_LOCALES, default=default_locale, metavar="LOCALE", help="specify the language for a localized provider (e.g. de_DE)", ) parser.add_argument( "-r", "--repeat", default=1, type=int, help="generate the specified number of outputs", ) parser.add_argument( "-s", "--sep", default="\n", help="use the specified separator after each output", ) parser.add_argument( "--seed", metavar="SEED", type=int, help="specify a seed for the random generator so " "that results are repeatable. Also compatible " "with 'repeat' option", ) parser.add_argument( "-i", "--include", default=META_PROVIDERS_MODULES, nargs="*", help="list of additional custom providers to " "user, given as the import path of the module " "containing your Provider class (not the provider " "class itself)", ) parser.add_argument( "fake", action="store", nargs="?", help="name of the fake to generate output for (e.g. profile)", ) parser.add_argument( "fake_args", metavar="fake argument", action="store", nargs="*", help="optional arguments to pass to the fake " "(e.g. the profile fake takes an optional " "list of comma separated field names as the " "first argument)", ) arguments = parser.parse_args(self.argv[1:]) if arguments.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.CRITICAL) random.seed(arguments.seed) seeds = [random.random() for _ in range(arguments.repeat)] for i in range(arguments.repeat): print_doc( arguments.fake, arguments.fake_args, lang=arguments.lang, output=arguments.o, seed=seeds[i], includes=arguments.include, ) print(arguments.sep, file=arguments.o) if not arguments.fake: # repeat not supported for all docs break def execute_from_command_line(argv: Optional[str] = None) -> None: """A simple method that runs a Command.""" if sys.stdout.encoding is None: print( "please set python env PYTHONIOENCODING=UTF-8, example: " "export PYTHONIOENCODING=UTF-8, when writing to stdout", file=sys.stderr, ) exit(1) command = Command(argv) command.execute() if __name__ == "__main__": execute_from_command_line()
Command
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_decorators_2.py
{ "start": 289, "end": 335 }
class ____: x: pandas.DataFrame @dataclass
C
python
viewflow__viewflow
viewflow/views/list.py
{ "start": 4003, "end": 6334 }
class ____(BaseColumn): """ Retrieve attribute value from external data source. Data source attribute could be a property or callable. For a callable, to get the value it would be called with model instance. """ def __init__(self, data_source, attr_name, verbose_name=None): super().__init__(attr_name) self.verbose_name = verbose_name self.data_source = data_source def _get_attr_boolean(self): return _get_method_attr(self.data_source, self.attr_name, "boolean", False) def _get_attr_empty_value(self): return _get_method_attr(self.data_source, self.attr_name, "empty_value") def get_value(self, obj): attr = getattr(self.data_source, self.attr_name) if callable(attr): attr = attr(obj) if attr is None: attr = self._get_attr_empty_value() return attr def header(self): if self.verbose_name is not None: return self.verbose_name attr = getattr(self.data_source, self.attr_name) if hasattr(attr, "short_description"): return attr.short_description elif isinstance(attr, property) and hasattr(attr, "fget"): if hasattr(attr.fget, "short_description"): return attr.fget.short_description else: return pretty_name(self.attr_name) elif callable(attr): return "--" if attr.__name__ == "<lambda>" else pretty_name(attr.__name__) else: return pretty_name(self.attr_name) def column_type(self): is_boolean = _get_method_attr(self.data_source, self.attr_name, "boolean", None) if is_boolean: return "boolean" return _get_method_attr(self.data_source, self.attr_name, "column_type", "text") def orderby(self): return _get_method_attr( self.data_source, self.attr_name, "orderby_column", None ) def format_value(self, obj, value): if self._get_attr_boolean(): if value is None: return " " # Icon("indeterminate_check_box") elif value is True: return Icon("check") else: return Icon("close") else: return super().format_value(obj, value)
DataSourceColumn
python
davidhalter__jedi
test/completion/dynamic_params.py
{ "start": 2123, "end": 2199 }
class ____(): #? str() x_method = lambda self, a: a X().x_method('')
X
python
getsentry__sentry
src/sentry/hybridcloud/rpc/__init__.py
{ "start": 4738, "end": 8491 }
class ____(Generic[ServiceInterface]): """ It is possible to run monolith mode in a split database scenario -- in this case, the silo mode does not help select the correct implementation to ensure non mingled transactions. This helper picks a backing implementation by checking if an open transaction exists for the routing of the given model for a backend implementation. If no transactions are open, it uses a given default implementation instead. """ _constructors: Mapping[type[Model], Callable[[], ServiceInterface]] _default: Callable[[], ServiceInterface] def __init__( self, mapping: Mapping[type[Model], Callable[[], ServiceInterface]], default: Callable[[], ServiceInterface], ): self._constructors = mapping self._default = default def __getattr__(self, item: str) -> Any: for model, constructor in self._constructors.items(): if in_test_environment(): from sentry.testutils.hybrid_cloud import ( # NOQA:S007 simulated_transaction_watermarks, ) open_transaction = ( simulated_transaction_watermarks.connection_transaction_depth_above_watermark( using=router.db_for_write(model) ) > 0 ) else: open_transaction = transaction.get_connection( router.db_for_write(model) ).in_atomic_block if open_transaction: return getattr(constructor(), item) return getattr(self._default(), item) def silo_mode_delegation( mapping: Mapping[SiloMode, Callable[[], ServiceInterface]], ) -> ServiceInterface: """ Simply creates a DelegatedBySiloMode from a mapping object, but casts it as a ServiceInterface matching the mapping values. In split database mode, it will also inject DelegatedByOpenTransaction in for the monolith mode implementation. """ return cast(ServiceInterface, DelegatedBySiloMode(get_delegated_constructors(mapping))) def get_delegated_constructors( mapping: Mapping[SiloMode, Callable[[], ServiceInterface]], ) -> Mapping[SiloMode, Callable[[], ServiceInterface]]: """ Creates a new constructor mapping by replacing the monolith constructor with a DelegatedByOpenTransaction that intelligently selects the correct service implementation based on the call site. """ def delegator() -> ServiceInterface: from sentry.models.organization import Organization from sentry.users.models.user import User return cast( ServiceInterface, DelegatedByOpenTransaction( { User: mapping[SiloMode.CONTROL], Organization: mapping[SiloMode.REGION], }, mapping[SiloMode.MONOLITH], ), ) # We need to retain a closure around the original mapping passed in, so we'll use a new variable here final_mapping: Mapping[SiloMode, Callable[[], ServiceInterface]] = { SiloMode.MONOLITH: delegator, **({k: v for k, v in mapping.items() if k != SiloMode.MONOLITH}), } return final_mapping def coerce_id_from(m: object | int | None) -> int | None: if m is None: return None if isinstance(m, int): return m if hasattr(m, "id"): return m.id raise ValueError(f"Cannot coerce {m!r} into id!") def extract_id_from(m: object | int) -> int: if isinstance(m, int): return m if hasattr(m, "id"): return m.id raise ValueError(f"Cannot extract {m!r} from id!")
DelegatedByOpenTransaction
python
jazzband__django-model-utils
tests/test_managers/test_softdelete_manager.py
{ "start": 114, "end": 1056 }
class ____(TestCase): def test_custom_manager_empty(self) -> None: qs = CustomSoftDelete.available_objects.only_read() self.assertEqual(qs.count(), 0) def test_custom_qs_empty(self) -> None: qs = CustomSoftDelete.available_objects.all().only_read() self.assertEqual(qs.count(), 0) def test_is_read(self) -> None: for is_read in [True, False, True, False]: CustomSoftDelete.available_objects.create(is_read=is_read) qs = CustomSoftDelete.available_objects.only_read() self.assertEqual(qs.count(), 2) def test_is_read_removed(self) -> None: for is_read, is_removed in [(True, True), (True, False), (False, False), (False, True)]: CustomSoftDelete.available_objects.create(is_read=is_read, is_removed=is_removed) qs = CustomSoftDelete.available_objects.only_read() self.assertEqual(qs.count(), 1)
CustomSoftDeleteManagerTests
python
huggingface__transformers
src/transformers/models/sam2_video/configuration_sam2_video.py
{ "start": 6705, "end": 20462 }
class ____(PreTrainedConfig): r""" [`Sam2Config`] is the configuration class to store the configuration of a [`Sam2Model`]. It is used to instantiate a SAM2 model according to the specified arguments, defining the memory attention, memory encoder, and image encoder configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny [facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vision_config (Union[`dict`, `Sam2VisionConfig`], *optional*): Dictionary of configuration options used to initialize [`Sam2VisionConfig`]. prompt_encoder_config (Union[`dict`, `Sam2PromptEncoderConfig`], *optional*): Dictionary of configuration options used to initialize [`Sam2PromptEncoderConfig`]. mask_decoder_config (Union[`dict`, `Sam2MaskDecoderConfig`], *optional*): Dictionary of configuration options used to initialize [`Sam2MaskDecoderConfig`]. initializer_range (`float`, *optional*, defaults to 0.02): Standard deviation for parameter initialization. num_maskmem (`int`, *optional*, defaults to 7): The number of memory slots for the mask memory. image_size (`int`, *optional*, defaults to 1024): The size of the input images. sigmoid_scale_for_mem_enc (`float`, *optional*, defaults to 20.0): Scale factor for the sigmoid function in the memory encoder. sigmoid_bias_for_mem_enc (`float`, *optional*, defaults to -10.0): Bias for the sigmoid function in the memory encoder. enable_occlusion_spatial_embedding (`bool`, *optional*, defaults to `True`): Whether to enable spatial embedding for occlusions. multimask_output_in_sam (`bool`, *optional*, defaults to `True`): Whether to output multiple masks from the SAM head. multimask_min_pt_num (`int`, *optional*, defaults to 0): The minimum number of points to trigger multimask output. multimask_max_pt_num (`int`, *optional*, defaults to 1): The maximum number of points to trigger multimask output. multimask_output_for_tracking (`bool`, *optional*, defaults to `True`): Whether to use multimask output for tracking. max_object_pointers_in_encoder (`int`, *optional*, defaults to 16): The maximum number of object pointers in the encoder. max_cond_frame_num (`int`, *optional*, defaults to -1): Maximum number of conditioning frames to use in memory attention. Set to -1 to use all conditioning frames. enable_temporal_pos_encoding_for_object_pointers (`bool`, *optional*, defaults to `True`): Whether to enable temporal positional encoding for object pointers. memory_attention_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the memory attention hidden states. memory_attention_num_layers (`int`, *optional*, defaults to 4): The number of layers in the memory attention module. memory_attention_num_attention_heads (`int`, *optional*, defaults to 1): Number of attention heads for each attention layer in the memory attention. memory_attention_downsample_rate (`int`, *optional*, defaults to 1): The downsample rate for the attention layers. memory_attention_feed_forward_hidden_size (`int`, *optional*, defaults to 2048): The dimension of the feedforward network in the memory attention module. memory_attention_feed_forward_hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function in the feedforward network in the memory attention module. memory_attention_dropout (`float`, *optional*, defaults to 0.1): The dropout rate for the memory attention module. memory_attention_rope_theta (`float`, *optional*, defaults to 10000): The Rope theta parameter. memory_attention_rope_feat_sizes (`list[int]`, *optional*, defaults to `[64, 64]`): The feature sizes for the Rope positional encoding. memory_attention_rope_dropout (`float`, *optional*, defaults to 0.1): The dropout rate for the Rope positional encoding. memory_encoder_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the memory encoder hidden states. memory_encoder_output_channels (`int`, *optional*, defaults to 64): The number of output channels for the memory encoder. mask_downsampler_embed_dim (`int`, *optional*, defaults to 256): The dimension of the mask downsampler embedding. mask_downsampler_kernel_size (`int`, *optional*, defaults to 3): The kernel size for the mask downsampler. mask_downsampler_stride (`int`, *optional*, defaults to 2): The stride for the mask downsampler. mask_downsampler_padding (`int`, *optional*, defaults to 1): The padding for the mask downsampler. mask_downsampler_total_stride (`int`, *optional*, defaults to 16): The total stride for the mask downsampler. mask_downsampler_hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the mask downsampler. memory_fuser_num_layers (`int`, *optional*, defaults to 2): The number of layers in the memory fuser. memory_fuser_embed_dim (`int`, *optional*, defaults to 256): The dimension of the embedding layer in the memory fuser. memory_fuser_intermediate_dim (`int`, *optional*, defaults to 1024): The dimension of the intermediate layer in the memory fuser. memory_fuser_kernel_size (`int`, *optional*, defaults to 7): The kernel size for the memory fuser. memory_fuser_padding (`int`, *optional*, defaults to 3): The padding for the memory fuser. memory_fuser_layer_scale_init_value (`float`, *optional*, defaults to 1e-06): The initial value for the layer scale in the memory fuser. memory_fuser_hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the memory fuser. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ( ... Sam2VisionConfig, ... Sam2PromptEncoderConfig, ... Sam2MaskDecoderConfig, ... Sam2Model, ... ) >>> # Initializing a Sam2Config with `"facebook/sam2.1_hiera_tiny"` style configuration >>> configuration = Sam2config() >>> # Initializing a Sam2Model (with random weights) from the `"facebook/sam2.1_hiera_tiny"` style configuration >>> model = Sam2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a Sam2Config from a Sam2VisionConfig, Sam2PromptEncoderConfig, and Sam2MaskDecoderConfig >>> # Initializing SAM2 vision encoder, memory attention, and memory encoder configurations >>> vision_config = Sam2VisionConfig() >>> prompt_encoder_config = Sam2PromptEncoderConfig() >>> mask_decoder_config = Sam2MaskDecoderConfig() >>> config = Sam2Config(vision_config, prompt_encoder_config, mask_decoder_config) ```""" model_type = "sam2_video" sub_configs = { "vision_config": AutoConfig, "prompt_encoder_config": Sam2VideoPromptEncoderConfig, "mask_decoder_config": Sam2VideoMaskDecoderConfig, } def __init__( self, vision_config=None, prompt_encoder_config=None, mask_decoder_config=None, initializer_range=0.02, num_maskmem=7, image_size=1024, sigmoid_scale_for_mem_enc=20.0, sigmoid_bias_for_mem_enc=-10.0, enable_occlusion_spatial_embedding=True, multimask_output_in_sam=True, multimask_min_pt_num=0, multimask_max_pt_num=1, multimask_output_for_tracking=True, max_object_pointers_in_encoder=16, max_cond_frame_num=-1, enable_temporal_pos_encoding_for_object_pointers=True, # memory attention memory_attention_hidden_size=256, memory_attention_num_layers=4, memory_attention_num_attention_heads=1, memory_attention_downsample_rate=1, memory_attention_feed_forward_hidden_size=2048, memory_attention_feed_forward_hidden_act="relu", memory_attention_dropout=0.1, memory_attention_rope_theta=10000, memory_attention_rope_feat_sizes=None, memory_attention_rope_dropout=0.1, # memory encoder memory_encoder_hidden_size=256, memory_encoder_output_channels=64, mask_downsampler_embed_dim=256, mask_downsampler_kernel_size=3, mask_downsampler_stride=2, mask_downsampler_padding=1, mask_downsampler_total_stride=16, mask_downsampler_hidden_act="gelu", memory_fuser_num_layers=2, memory_fuser_embed_dim=256, memory_fuser_intermediate_dim=1024, memory_fuser_kernel_size=7, memory_fuser_padding=3, memory_fuser_layer_scale_init_value=1e-6, memory_fuser_hidden_act="gelu", **kwargs, ): super().__init__(**kwargs) vision_config = vision_config if vision_config is not None else {} prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {} mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {} memory_attention_rope_feat_sizes = ( [64, 64] if memory_attention_rope_feat_sizes is None else memory_attention_rope_feat_sizes ) if isinstance(vision_config, dict): vision_config["model_type"] = vision_config.get("model_type", "sam2_vision_model") vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) if isinstance(prompt_encoder_config, Sam2VideoPromptEncoderConfig): prompt_encoder_config = prompt_encoder_config.to_dict() if isinstance(mask_decoder_config, Sam2VideoMaskDecoderConfig): mask_decoder_config = mask_decoder_config.to_dict() self.vision_config = vision_config self.prompt_encoder_config = Sam2VideoPromptEncoderConfig(**prompt_encoder_config) self.mask_decoder_config = Sam2VideoMaskDecoderConfig(**mask_decoder_config) self.initializer_range = initializer_range self.num_maskmem = num_maskmem # default 1 input frame + 6 previous frames self.image_size = image_size self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc self.multimask_output_in_sam = multimask_output_in_sam self.multimask_min_pt_num = multimask_min_pt_num self.multimask_max_pt_num = multimask_max_pt_num self.multimask_output_for_tracking = multimask_output_for_tracking self.max_object_pointers_in_encoder = max_object_pointers_in_encoder self.max_cond_frame_num = max_cond_frame_num # The next 4 are True for sam2.1 and False for sam2 self.enable_occlusion_spatial_embedding = enable_occlusion_spatial_embedding self.enable_temporal_pos_encoding_for_object_pointers = enable_temporal_pos_encoding_for_object_pointers # memory attention self.memory_attention_hidden_size = memory_attention_hidden_size self.memory_attention_num_layers = memory_attention_num_layers self.memory_attention_num_attention_heads = memory_attention_num_attention_heads self.memory_attention_downsample_rate = memory_attention_downsample_rate self.memory_attention_feed_forward_hidden_size = memory_attention_feed_forward_hidden_size self.memory_attention_feed_forward_hidden_act = memory_attention_feed_forward_hidden_act self.memory_attention_dropout = memory_attention_dropout self.memory_attention_rope_theta = memory_attention_rope_theta self.memory_attention_rope_feat_sizes = memory_attention_rope_feat_sizes self.memory_attention_rope_dropout = memory_attention_rope_dropout # memory encoder self.memory_encoder_hidden_size = memory_encoder_hidden_size self.memory_encoder_output_channels = memory_encoder_output_channels self.mask_downsampler_embed_dim = mask_downsampler_embed_dim self.mask_downsampler_kernel_size = mask_downsampler_kernel_size self.mask_downsampler_stride = mask_downsampler_stride self.mask_downsampler_padding = mask_downsampler_padding self.mask_downsampler_total_stride = mask_downsampler_total_stride self.mask_downsampler_hidden_act = mask_downsampler_hidden_act self.memory_fuser_num_layers = memory_fuser_num_layers self.memory_fuser_embed_dim = memory_fuser_embed_dim self.memory_fuser_intermediate_dim = memory_fuser_intermediate_dim self.memory_fuser_kernel_size = memory_fuser_kernel_size self.memory_fuser_padding = memory_fuser_padding self.memory_fuser_layer_scale_init_value = memory_fuser_layer_scale_init_value self.memory_fuser_hidden_act = memory_fuser_hidden_act __all__ = ["Sam2VideoMaskDecoderConfig", "Sam2VideoPromptEncoderConfig", "Sam2VideoConfig"]
Sam2VideoConfig
python
scikit-learn__scikit-learn
sklearn/impute/tests/test_base.py
{ "start": 341, "end": 549 }
class ____(_BaseImputer): def fit(self, X, y=None): return self def transform(self, X, y=None): return self._concatenate_indicator(X, self._transform_indicator(X))
NoFitIndicatorImputer
python
pytorch__pytorch
torch/cuda/memory.py
{ "start": 51180, "end": 54129 }
class ____(_MemPool): r"""MemPool represents a pool of memory in a caching allocator. Currently, it's just the ID of the pool object maintained in the CUDACachingAllocator. Args: allocator(torch._C._cuda_CUDAAllocator, optional): a torch._C._cuda_CUDAAllocator object that can be used to define how memory gets allocated in the pool. If :attr:`allocator` is ``None`` (default), memory allocation follows the default/ current configuration of the CUDACachingAllocator. use_on_oom(bool): a bool that indicates if this pool can be used as a last resort if a memory allocation outside of the pool fails due to Out Of Memory. This is False by default. """ def __init__( self, allocator: Optional[_cuda_CUDAAllocator] = None, use_on_oom: bool = False, ): super().__init__(allocator, True, use_on_oom) @property def id(self) -> tuple[int, int]: r"""Returns the ID of this pool as a tuple of two ints.""" return super().id @property def allocator(self) -> Optional[_cuda_CUDAAllocator]: r"""Returns the allocator this MemPool routes allocations to.""" return super().allocator def use_count(self) -> int: r"""Returns the reference count of this pool.""" return super().use_count() def snapshot(self): r"""Return a snapshot of the CUDA memory allocator pool state across all devices. Interpreting the output of this function requires familiarity with the memory allocator internals. .. note:: See :ref:`cuda-memory-management` for more details about GPU memory management. """ snapshot = torch.cuda.memory_snapshot(self.id) return snapshot @contextlib.contextmanager def use_mem_pool(pool: MemPool, device: "Device" = None): r"""A context manager that routes allocations to a given pool. Args: pool(torch.cuda.MemPool): a MemPool object to be made active so that allocations route to this pool. device (torch.device or int, optional): selected device. Uses MemPool on the current device, given by :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` (default). .. note:: This context manager makes only current thread's allocations route to the given pool. If a new thread is spawned inside the context manager (e.g. by calling backward) the allocations in that thread will not route to the given pool. """ device_index = ( torch.cuda.current_device() if device is None else _get_device_index(device) ) _cuda_beginAllocateCurrentThreadToPool(device_index, pool.id) try: yield finally: _cuda_endAllocateToPool(device_index, pool.id) _cuda_releasePool(device_index, pool.id)
MemPool
python
scrapy__scrapy
tests/test_spiderloader/test_spiders/spider0.py
{ "start": 36, "end": 112 }
class ____(Spider): allowed_domains = ["scrapy1.org", "scrapy3.org"]
Spider0
python
python-attrs__attrs
tests/test_make.py
{ "start": 34939, "end": 34989 }
class ____: @attr.s class D: pass
GC
python
python__mypy
mypyc/ir/rtypes.py
{ "start": 32140, "end": 34974 }
class ____(RType): """union[x, ..., y]""" is_unboxed = False def __init__(self, items: list[RType]) -> None: self.name = "union" self.items = items self.items_set = frozenset(items) self._ctype = "PyObject *" @staticmethod def make_simplified_union(items: list[RType]) -> RType: """Return a normalized union that covers the given items. Flatten nested unions and remove duplicate items. Overlapping items are *not* simplified. For example, [object, str] will not be simplified. """ items = flatten_nested_unions(items) assert items unique_items = dict.fromkeys(items) if len(unique_items) > 1: return RUnion(list(unique_items)) else: return next(iter(unique_items)) def accept(self, visitor: RTypeVisitor[T]) -> T: return visitor.visit_runion(self) @property def may_be_immortal(self) -> bool: return any(item.may_be_immortal for item in self.items) def __repr__(self) -> str: return "<RUnion %s>" % ", ".join(str(item) for item in self.items) def __str__(self) -> str: return "union[%s]" % ", ".join(str(item) for item in self.items) # We compare based on the set because order in a union doesn't matter def __eq__(self, other: object) -> TypeGuard[RUnion]: return isinstance(other, RUnion) and self.items_set == other.items_set def __hash__(self) -> int: return hash(("union", self.items_set)) def serialize(self) -> JsonDict: types = [x.serialize() for x in self.items] return {".class": "RUnion", "types": types} @classmethod def deserialize(cls, data: JsonDict, ctx: DeserMaps) -> RUnion: types = [deserialize_type(t, ctx) for t in data["types"]] return RUnion(types) def flatten_nested_unions(types: list[RType]) -> list[RType]: if not any(isinstance(t, RUnion) for t in types): return types # Fast path flat_items: list[RType] = [] for t in types: if isinstance(t, RUnion): flat_items.extend(flatten_nested_unions(t.items)) else: flat_items.append(t) return flat_items def optional_value_type(rtype: RType) -> RType | None: """If rtype is the union of none_rprimitive and another type X, return X. Otherwise, return None. """ if isinstance(rtype, RUnion) and len(rtype.items) == 2: if rtype.items[0] == none_rprimitive: return rtype.items[1] elif rtype.items[1] == none_rprimitive: return rtype.items[0] return None def is_optional_type(rtype: RType) -> TypeGuard[RUnion]: """Is rtype an optional type with exactly two union items?""" return optional_value_type(rtype) is not None @final
RUnion
python
python-pillow__Pillow
src/PIL/DcxImagePlugin.py
{ "start": 977, "end": 2145 }
class ____(PcxImageFile): format = "DCX" format_description = "Intel DCX" _close_exclusive_fp_after_loading = False def _open(self) -> None: # Header s = self.fp.read(4) if not _accept(s): msg = "not a DCX file" raise SyntaxError(msg) # Component directory self._offset = [] for i in range(1024): offset = i32(self.fp.read(4)) if not offset: break self._offset.append(offset) self._fp = self.fp self.frame = -1 self.n_frames = len(self._offset) self.is_animated = self.n_frames > 1 self.seek(0) def seek(self, frame: int) -> None: if not self._seek_check(frame): return if isinstance(self._fp, DeferredError): raise self._fp.ex self.frame = frame self.fp = self._fp self.fp.seek(self._offset[frame]) PcxImageFile._open(self) def tell(self) -> int: return self.frame Image.register_open(DcxImageFile.format, DcxImageFile, _accept) Image.register_extension(DcxImageFile.format, ".dcx")
DcxImageFile
python
python-markdown__markdown
tests/test_extensions.py
{ "start": 1009, "end": 2501 }
class ____(unittest.TestCase): """ Test markdown.extensions.Extension. """ def setUp(self): class TestExtension(markdown.extensions.Extension): config = { 'foo': ['bar', 'Description of foo'], 'bar': ['baz', 'Description of bar'] } self.ext = TestExtension() self.ExtKlass = TestExtension def testGetConfig(self): self.assertEqual(self.ext.getConfig('foo'), 'bar') def testGetConfigDefault(self): self.assertEqual(self.ext.getConfig('baz'), '') self.assertEqual(self.ext.getConfig('baz', default='missing'), 'missing') def testGetConfigs(self): self.assertEqual(self.ext.getConfigs(), {'foo': 'bar', 'bar': 'baz'}) def testGetConfigInfo(self): self.assertEqual( dict(self.ext.getConfigInfo()), dict([ ('foo', 'Description of foo'), ('bar', 'Description of bar') ]) ) def testSetConfig(self): self.ext.setConfig('foo', 'baz') self.assertEqual(self.ext.getConfigs(), {'foo': 'baz', 'bar': 'baz'}) def testSetConfigWithBadKey(self): # `self.ext.setConfig('bad', 'baz)` => `KeyError` self.assertRaises(KeyError, self.ext.setConfig, 'bad', 'baz') def testConfigAsKwargsOnInit(self): ext = self.ExtKlass(foo='baz', bar='blah') self.assertEqual(ext.getConfigs(), {'foo': 'baz', 'bar': 'blah'})
TestExtensionClass
python
viewflow__viewflow
viewflow/this_object.py
{ "start": 1390, "end": 2555 }
class ____(object): """ Helper for forward references to class attributes. This class is used to defer the resolution of an attribute reference until the class is fully constructed. This allows for the use of class attributes before they are defined. Attributes: name (str): The name of the attribute to resolve. """ def __init__(self, name: str) -> None: # noqa D102 self.name = name def resolve(self, instance: object) -> Any: """ Resolve the attribute reference on the given instance. Args: instance (object): The instance on which to resolve the attribute. Returns: Any: The resolved attribute. Raises: AttributeError: If the attribute does not exist on the instance. """ return getattr(instance, self.name) # def __copy__(self): # return super().__copy__() # def __deepcopy__(self, memo): # return super().__deepcopy__(memo) def __getattr__(self, name): if name.startswith("__"): super().__getattr__(name) return ThisMethod(self.name, name)
ThisObject
python
tensorflow__tensorflow
tensorflow/python/framework/tensor_shape_test.py
{ "start": 10304, "end": 10826 }
class ____(test_util.TensorFlowTestCase): def testSerialization(self): shape_1 = tensor_shape.TensorShape([1, 2, 3]) shape_2 = tensor_shape.TensorShape([None, 2, None]) shape_3 = tensor_shape.TensorShape(None) self.assertEqual( trace_type.deserialize(trace_type.serialize(shape_1)), shape_1) self.assertEqual( trace_type.deserialize(trace_type.serialize(shape_2)), shape_2) self.assertEqual( trace_type.deserialize(trace_type.serialize(shape_3)), shape_3)
SerilizationTest
python
readthedocs__readthedocs.org
readthedocs/api/v3/filters.py
{ "start": 417, "end": 1002 }
class ____(filters.FilterSet): # TODO this is copying the patterns from other filter sets, where the fields # are all ``icontains`` lookups by default. We discussed reversing this # pattern in the future though, see: # https://github.com/readthedocs/readthedocs.org/issues/9862 name = filters.CharFilter(lookup_expr="icontains") slug = filters.CharFilter(lookup_expr="icontains") class Meta: model = Project fields = [ "name", "slug", "language", "programming_language", ]
ProjectFilter
python
mlflow__mlflow
dev/clint/src/clint/rules/unparameterized_generic_type.py
{ "start": 84, "end": 902 }
class ____(Rule): def __init__(self, type_hint: str) -> None: self.type_hint = type_hint @staticmethod def is_generic_type(node: ast.Name | ast.Attribute, resolver: Resolver) -> bool: if names := resolver.resolve(node): return tuple(names) in { ("typing", "Callable"), ("typing", "Sequence"), } elif isinstance(node, ast.Name): return node.id in { "dict", "list", "set", "tuple", "frozenset", } return False def _message(self) -> str: return ( f"Generic type `{self.type_hint}` must be parameterized " "(e.g., `list[str]` rather than `list`)." )
UnparameterizedGenericType
python
getsentry__sentry
src/sentry/sentry_apps/utils/webhooks.py
{ "start": 690, "end": 791 }
class ____(SentryAppActionType): CREATED = "created" DELETED = "deleted"
InstallationActionType
python
ray-project__ray
python/ray/data/context.py
{ "start": 11505, "end": 35651 }
class ____: """Global settings for Ray Data. Configure this class to enable advanced features and tune performance. .. warning:: Apply changes before creating a :class:`~ray.data.Dataset`. Changes made after won't take effect. .. note:: This object is automatically propagated to workers. Access it from the driver and remote workers with :meth:`DataContext.get_current()`. Examples: >>> from ray.data import DataContext >>> DataContext.get_current().enable_progress_bars = False Args: target_max_block_size: The max target block size in bytes for reads and transformations. If `None`, this means the block size is infinite. target_min_block_size: Ray Data avoids creating blocks smaller than this size in bytes on read. This takes precedence over ``read_op_min_num_blocks``. streaming_read_buffer_size: Buffer size when doing streaming reads from local or remote storage. enable_pandas_block: Whether pandas block format is enabled. actor_prefetcher_enabled: Whether to use actor based block prefetcher. autoscaling_config: Autoscaling configuration. use_push_based_shuffle: Whether to use push-based shuffle. pipeline_push_based_shuffle_reduce_tasks: scheduling_strategy: The global scheduling strategy. For tasks with large args, ``scheduling_strategy_large_args`` takes precedence. scheduling_strategy_large_args: Scheduling strategy for tasks with large args. large_args_threshold: Size in bytes after which point task arguments are considered large. Choose a value so that the data transfer overhead is significant in comparison to task scheduling (i.e., low tens of ms). use_polars: Whether to use Polars for tabular dataset sorts, groupbys, and aggregations. eager_free: Whether to eagerly free memory. decoding_size_estimation: Whether to estimate in-memory decoding data size for data source. min_parallelism: This setting is deprecated. Use ``read_op_min_num_blocks`` instead. read_op_min_num_blocks: Minimum number of read output blocks for a dataset. enable_tensor_extension_casting: Whether to automatically cast NumPy ndarray columns in Pandas DataFrames to tensor extension columns. use_arrow_tensor_v2: Config enabling V2 version of ArrowTensorArray supporting tensors > 2Gb in size (off by default) enable_fallback_to_arrow_object_ext_type: Enables fallback to serialize column values not suppported by Arrow natively (like user-defined custom Python classes for ex, etc) using `ArrowPythonObjectType` (simply serializing these as bytes) enable_auto_log_stats: Whether to automatically log stats after execution. If disabled, you can still manually print stats with ``Dataset.stats()``. verbose_stats_logs: Whether stats logs should be verbose. This includes fields such as `extra_metrics` in the stats output, which are excluded by default. trace_allocations: Whether to trace allocations / eager free. This adds significant performance overheads and should only be used for debugging. execution_options: The :class:`~ray.data._internal.execution.interfaces.execution_options.ExecutionOptions` to use. use_ray_tqdm: Whether to enable distributed tqdm. enable_progress_bars: Whether to enable progress bars. enable_operator_progress_bars: Whether to enable progress bars for individual operators during execution. enable_progress_bar_name_truncation: If True, the name of the progress bar (often the operator name) will be truncated if it exceeds `ProgressBar.MAX_NAME_LENGTH`. Otherwise, the full operator name is shown. enable_rich_progress_bars: Whether to use the new rich progress bars instead of the tqdm TUI. enable_get_object_locations_for_metrics: Whether to enable ``get_object_locations`` for metrics. This is useful for tracking whether the object input of a task is local (cache hit) or not local (cache miss) to the node that task is running on. write_file_retry_on_errors: A list of substrings of error messages that should trigger a retry when writing files. This is useful for handling transient errors when writing to remote storage systems. warn_on_driver_memory_usage_bytes: If driver memory exceeds this threshold, Ray Data warns you. For now, this only applies to shuffle ops because most other ops are unlikely to use as much driver memory. actor_task_retry_on_errors: The application-level errors that actor task should retry. This follows same format as :ref:`retry_exceptions <task-retries>` in Ray Core. Default to `False` to not retry on any errors. Set to `True` to retry all errors, or set to a list of errors to retry. op_resource_reservation_enabled: Whether to enable resource reservation for operators to prevent resource contention. op_resource_reservation_ratio: The ratio of the total resources to reserve for each operator. max_errored_blocks: Max number of blocks that are allowed to have errors, unlimited if negative. This option allows application-level exceptions in block processing tasks. These exceptions may be caused by UDFs (e.g., due to corrupted data samples) or IO errors. Data in the failed blocks are dropped. This option can be useful to prevent a long-running job from failing due to a small number of bad blocks. log_internal_stack_trace_to_stdout: Whether to include internal Ray Data/Ray Core code stack frames when logging to stdout. The full stack trace is always written to the Ray Data log file. raise_original_map_exception: Whether to raise the original exception encountered in map UDF instead of wrapping it in a `UserCodeException`. print_on_execution_start: If ``True``, print execution information when execution starts. s3_try_create_dir: If ``True``, try to create directories on S3 when a write call is made with a S3 URI. wait_for_min_actors_s: The default time to wait for minimum requested actors to start before raising a timeout, in seconds. max_tasks_in_flight_per_actor: Max number of tasks that could be submitted for execution to individual actor at the same time. Note that only up to `max_concurrency` number of these tasks will be executing concurrently while remaining ones will be waiting in the Actor's queue. Buffering tasks in the queue allows us to overlap pulling of the blocks (which are tasks arguments) with the execution of the prior tasks maximizing individual Actor's utilization retried_io_errors: A list of substrings of error messages that should trigger a retry when reading or writing files. This is useful for handling transient errors when reading from remote storage systems. default_hash_shuffle_parallelism: Default parallelism level for hash-based shuffle operations if the number of partitions is unspecifed. max_hash_shuffle_aggregators: Maximum number of aggregating actors that can be provisioned for hash-shuffle aggregations. min_hash_shuffle_aggregator_wait_time_in_s: Minimum time to wait for hash shuffle aggregators to become available, in seconds. hash_shuffle_aggregator_health_warning_interval_s: Interval for health warning checks on hash shuffle aggregators, in seconds. max_hash_shuffle_finalization_batch_size: Maximum batch size for concurrent hash-shuffle finalization tasks. If `None`, defaults to `max_hash_shuffle_aggregators`. join_operator_actor_num_cpus_per_partition_override: Override CPU allocation per partition for join operator actors. hash_shuffle_operator_actor_num_cpus_per_partition_override: Override CPU allocation per partition for hash shuffle operator actors. hash_aggregate_operator_actor_num_cpus_per_partition_override: Override CPU allocation per partition for hash aggregate operator actors. use_polars_sort: Whether to use Polars for tabular dataset sorting operations. enable_per_node_metrics: Enable per node metrics reporting for Ray Data, disabled by default. override_object_store_memory_limit_fraction: Override the fraction of object store memory limit. If `None`, uses Ray's default. memory_usage_poll_interval_s: The interval to poll the USS of map tasks. If `None`, map tasks won't record memory stats. dataset_logger_id: Optional logger ID for dataset operations. If `None`, uses default logging configuration. issue_detectors_config: Configuration for issue detection and monitoring during dataset operations. downstream_capacity_backpressure_ratio: Ratio for downstream capacity backpressure control. A higher ratio causes backpressure to kick-in later. If `None`, this type of backpressure is disabled. downstream_capacity_backpressure_max_queued_bundles: Maximum number of queued bundles before applying backpressure. If `None`, no limit is applied. enable_dynamic_output_queue_size_backpressure: Whether to cap the concurrency of an operator based on it's and downstream's queue size. enforce_schemas: Whether to enforce schema consistency across dataset operations. pandas_block_ignore_metadata: Whether to ignore pandas metadata when converting between Arrow and pandas formats for better type inference. """ # `None` means the block size is infinite. target_max_block_size: Optional[int] = DEFAULT_TARGET_MAX_BLOCK_SIZE target_min_block_size: int = DEFAULT_TARGET_MIN_BLOCK_SIZE streaming_read_buffer_size: int = DEFAULT_STREAMING_READ_BUFFER_SIZE enable_pandas_block: bool = DEFAULT_ENABLE_PANDAS_BLOCK actor_prefetcher_enabled: bool = DEFAULT_ACTOR_PREFETCHER_ENABLED autoscaling_config: AutoscalingConfig = field(default_factory=AutoscalingConfig) ################################################################ # Sort-based shuffling configuration ################################################################ use_push_based_shuffle: bool = DEFAULT_USE_PUSH_BASED_SHUFFLE _shuffle_strategy: ShuffleStrategy = _deduce_default_shuffle_algorithm() pipeline_push_based_shuffle_reduce_tasks: bool = True ################################################################ # Hash-based shuffling configuration ################################################################ # Default hash-shuffle parallelism level (will be used when not # provided explicitly) default_hash_shuffle_parallelism: int = DEFAULT_MIN_PARALLELISM # Max number of aggregators (actors) that could be provisioned # to perform aggregations on partitions produced during hash-shuffling # # When unset defaults to the smaller of # - Total # of CPUs available in the cluster * 2 # - DEFAULT_MAX_HASH_SHUFFLE_AGGREGATORS (128 by default) max_hash_shuffle_aggregators: Optional[int] = None min_hash_shuffle_aggregator_wait_time_in_s: int = ( DEFAULT_MIN_HASH_SHUFFLE_AGGREGATOR_WAIT_TIME_IN_S ) hash_shuffle_aggregator_health_warning_interval_s: int = ( DEFAULT_HASH_SHUFFLE_AGGREGATOR_HEALTH_WARNING_INTERVAL_S ) # Max number of *concurrent* hash-shuffle finalization tasks running # at the same time. This config is helpful to control concurrency of # finalization tasks to prevent single aggregator running multiple tasks # concurrently (for ex, to prevent it failing w/ OOM) # # When unset defaults to `DataContext.max_hash_shuffle_aggregators` max_hash_shuffle_finalization_batch_size: Optional[int] = None # (Advanced) Following configuration allows to override `num_cpus` allocation for the # Join/Aggregate/Shuffle workers (utilizing hash-shuffle) join_operator_actor_num_cpus_override: float = None hash_shuffle_operator_actor_num_cpus_override: float = None hash_aggregate_operator_actor_num_cpus_override: float = None scheduling_strategy: SchedulingStrategyT = DEFAULT_SCHEDULING_STRATEGY scheduling_strategy_large_args: SchedulingStrategyT = ( DEFAULT_SCHEDULING_STRATEGY_LARGE_ARGS ) large_args_threshold: int = DEFAULT_LARGE_ARGS_THRESHOLD use_polars: bool = DEFAULT_USE_POLARS use_polars_sort: bool = DEFAULT_USE_POLARS_SORT eager_free: bool = DEFAULT_EAGER_FREE decoding_size_estimation: bool = DEFAULT_DECODING_SIZE_ESTIMATION_ENABLED min_parallelism: int = DEFAULT_MIN_PARALLELISM read_op_min_num_blocks: int = DEFAULT_READ_OP_MIN_NUM_BLOCKS enable_tensor_extension_casting: bool = DEFAULT_ENABLE_TENSOR_EXTENSION_CASTING use_arrow_tensor_v2: bool = DEFAULT_USE_ARROW_TENSOR_V2 enable_fallback_to_arrow_object_ext_type: Optional[bool] = None enable_auto_log_stats: bool = DEFAULT_AUTO_LOG_STATS verbose_stats_logs: bool = DEFAULT_VERBOSE_STATS_LOG trace_allocations: bool = DEFAULT_TRACE_ALLOCATIONS execution_options: "ExecutionOptions" = field( default_factory=_execution_options_factory ) use_ray_tqdm: bool = DEFAULT_USE_RAY_TQDM enable_progress_bars: bool = DEFAULT_ENABLE_PROGRESS_BARS # By default, enable the progress bar for operator-level progress. # In __post_init__(), we disable operator-level progress # bars when running in a Ray job. enable_operator_progress_bars: bool = True enable_progress_bar_name_truncation: bool = ( DEFAULT_ENABLE_PROGRESS_BAR_NAME_TRUNCATION ) enable_rich_progress_bars: bool = DEFAULT_ENABLE_RICH_PROGRESS_BARS enable_get_object_locations_for_metrics: bool = ( DEFAULT_ENABLE_GET_OBJECT_LOCATIONS_FOR_METRICS ) write_file_retry_on_errors: List[str] = DEFAULT_WRITE_FILE_RETRY_ON_ERRORS warn_on_driver_memory_usage_bytes: int = DEFAULT_WARN_ON_DRIVER_MEMORY_USAGE_BYTES actor_task_retry_on_errors: Union[ bool, List[BaseException] ] = DEFAULT_ACTOR_TASK_RETRY_ON_ERRORS op_resource_reservation_enabled: bool = DEFAULT_ENABLE_OP_RESOURCE_RESERVATION op_resource_reservation_ratio: float = DEFAULT_OP_RESOURCE_RESERVATION_RATIO max_errored_blocks: int = DEFAULT_MAX_ERRORED_BLOCKS log_internal_stack_trace_to_stdout: bool = ( DEFAULT_LOG_INTERNAL_STACK_TRACE_TO_STDOUT ) raise_original_map_exception: bool = DEFAULT_RAY_DATA_RAISE_ORIGINAL_MAP_EXCEPTION print_on_execution_start: bool = True s3_try_create_dir: bool = DEFAULT_S3_TRY_CREATE_DIR # Timeout threshold (in seconds) for how long it should take for actors in the # Actor Pool to start up. Exceeding this threshold will lead to execution being # terminated with exception due to inability to secure min required capacity. # # Setting non-positive value here (ie <= 0) disables this functionality # (defaults to -1). wait_for_min_actors_s: int = DEFAULT_WAIT_FOR_MIN_ACTORS_S # This setting serves as a global override max_tasks_in_flight_per_actor: Optional[int] = None retried_io_errors: List[str] = field( default_factory=lambda: list(DEFAULT_RETRIED_IO_ERRORS) ) enable_per_node_metrics: bool = DEFAULT_ENABLE_PER_NODE_METRICS override_object_store_memory_limit_fraction: float = None memory_usage_poll_interval_s: Optional[float] = 1 dataset_logger_id: Optional[str] = None # This is a temporary workaround to allow actors to perform cleanup # until https://github.com/ray-project/ray/issues/53169 is fixed. # This hook is known to have a race condition bug in fault tolerance. # I.E., after the hook is triggered and the UDF is deleted, another # retry task may still be scheduled to this actor and it will fail. _enable_actor_pool_on_exit_hook: bool = False issue_detectors_config: "IssueDetectorsConfiguration" = field( default_factory=_issue_detectors_config_factory ) downstream_capacity_backpressure_ratio: float = None downstream_capacity_backpressure_max_queued_bundles: int = None enable_dynamic_output_queue_size_backpressure: bool = ( DEFAULT_ENABLE_DYNAMIC_OUTPUT_QUEUE_SIZE_BACKPRESSURE ) enforce_schemas: bool = DEFAULT_ENFORCE_SCHEMAS pandas_block_ignore_metadata: bool = DEFAULT_PANDAS_BLOCK_IGNORE_METADATA def __post_init__(self): # The additonal ray remote args that should be added to # the task-pool-based data tasks. self._task_pool_data_task_remote_args: Dict[str, Any] = {} # The extra key-value style configs. # These configs are managed by individual components or plugins via # `set_config`, `get_config` and `remove_config`. # The reason why we use a dict instead of individual fields is to decouple # the DataContext from the plugin implementations, as well as to avoid # circular dependencies. self._kv_configs: Dict[str, Any] = {} # Sync hash shuffle aggregator fields to its detector config self.issue_detectors_config.hash_shuffle_detector_config.detection_time_interval_s = ( self.hash_shuffle_aggregator_health_warning_interval_s ) self.issue_detectors_config.hash_shuffle_detector_config.min_wait_time_s = ( self.min_hash_shuffle_aggregator_wait_time_in_s ) self._max_num_blocks_in_streaming_gen_buffer = ( DEFAULT_MAX_NUM_BLOCKS_IN_STREAMING_GEN_BUFFER ) is_ray_job = os.environ.get("RAY_JOB_ID") is not None if is_ray_job: is_driver = ray.get_runtime_context().worker.mode != WORKER_MODE if is_driver and log_once( "ray_data_disable_operator_progress_bars_in_ray_jobs" ): logger.info( "Disabling operator-level progress bars by default in Ray Jobs. " "To enable progress bars for all operators, set " "`ray.data.DataContext.get_current()" ".enable_operator_progress_bars = True`." ) # Disable operator-level progress bars by default in Ray jobs. # The global progress bar for the overall Dataset execution will # still be enabled, unless the user also sets # `ray.data.DataContext.get_current().enable_progress_bars = False`. self.enable_operator_progress_bars = False else: # When not running in Ray job, operator-level progress # bars are enabled by default. self.enable_operator_progress_bars = True def __setattr__(self, name: str, value: Any) -> None: if ( name == "write_file_retry_on_errors" and value != DEFAULT_WRITE_FILE_RETRY_ON_ERRORS ): warnings.warn( "`write_file_retry_on_errors` is deprecated! Configure " "`retried_io_errors` instead.", DeprecationWarning, ) elif name == "use_push_based_shuffle": warnings.warn( "`use_push_based_shuffle` is deprecated! Configure " "`shuffle_strategy` instead.", DeprecationWarning, ) elif name == "target_shuffle_max_block_size": warnings.warn( "`target_shuffle_max_block_size` is deprecated! Configure `target_max_block_size` instead." ) self.target_max_block_size = value elif name == "use_polars": warnings.warn( "`use_polars` is deprecated, please configure " "`use_polars_sort` instead.", DeprecationWarning, ) self.use_polars_sort = value super().__setattr__(name, value) @staticmethod def get_current() -> "DataContext": """Get or create the current DataContext. When a Dataset is created, the current DataContext will be sealed. Changes to `DataContext.get_current()` will not impact existing Datasets. Examples: .. testcode:: import ray context = ray.data.DataContext.get_current() context.target_max_block_size = 100 * 1024 ** 2 ds1 = ray.data.range(1) context.target_max_block_size = 1 * 1024 ** 2 ds2 = ray.data.range(1) # ds1's target_max_block_size will be 100MB ds1.take_all() # ds2's target_max_block_size will be 1MB ds2.take_all() Developer notes: Avoid using `DataContext.get_current()` in data internal components, use the DataContext object captured in the Dataset and pass it around as arguments. """ global _default_context with _context_lock: if _default_context is None: _default_context = DataContext() return _default_context @staticmethod def _set_current(context: "DataContext") -> None: """Set the current context in a remote worker. This is used internally by Dataset to propagate the driver context to remote workers used for parallelization. """ global _default_context if ( not _default_context or _default_context.dataset_logger_id != context.dataset_logger_id ): update_dataset_logger_for_worker(context.dataset_logger_id) _default_context = context @property def shuffle_strategy(self) -> ShuffleStrategy: if self.use_push_based_shuffle: logger.warning( "`use_push_based_shuffle` is deprecated, please configure " "`shuffle_strategy` instead.", ) return ShuffleStrategy.SORT_SHUFFLE_PUSH_BASED return self._shuffle_strategy @shuffle_strategy.setter def shuffle_strategy(self, value: ShuffleStrategy) -> None: self._shuffle_strategy = value def get_config(self, key: str, default: Any = None) -> Any: """Get the value for a key-value style config. Args: key: The key of the config. default: The default value to return if the key is not found. Returns: The value for the key, or the default value if the key is not found. """ return self._kv_configs.get(key, default) def set_config(self, key: str, value: Any) -> None: """Set the value for a key-value style config. Args: key: The key of the config. value: The value of the config. """ self._kv_configs[key] = value def remove_config(self, key: str) -> None: """Remove a key-value style config. Args: key: The key of the config. """ self._kv_configs.pop(key, None) def copy(self) -> "DataContext": """Create a copy of the current DataContext.""" return copy.deepcopy(self) def set_dataset_logger_id(self, dataset_id: str) -> None: """Set the current dataset logger id. This is used internally to propagate the current dataset logger id to remote workers. """ self.dataset_logger_id = dataset_id # Backwards compatibility alias. DatasetContext = DataContext
DataContext
python
sphinx-doc__sphinx
tests/test_util/typing_test_data.py
{ "start": 1014, "end": 1907 }
class ____: def __repr__(self): return 'CustomAnnotation' def f11(x: CustomAnnotation(), y: 123) -> None: pass def f12() -> Tuple[int, str, int]: pass def f13() -> Optional[str]: pass def f14() -> Any: pass def f15(x: 'Unknown', y: 'int') -> Any: # NoQA: F821 # type: ignore[attr-defined] pass def f16(arg1, arg2, *, arg3=None, arg4=None): pass def f17(*, arg3, arg4): pass def f18(self, arg1: Union[int, Tuple] = 10) -> List[Dict]: pass def f19(*args: int, **kwargs: str): pass def f20() -> Optional[Union[int, str]]: pass def f21(arg1='whatever', arg2=Signature.empty): pass def f22(*, a, b): pass def f23(a, b, /, c, d): pass def f24(a, /, *, b): pass def f25(a, b, /): pass def f26(x: Literal[1, 2, 3] = 1, y: Union[Literal['a'], Literal['b']] = 'a') -> None: pass
CustomAnnotation
python
has2k1__plotnine
plotnine/themes/theme_light.py
{ "start": 140, "end": 1415 }
class ____(theme_gray): """ A theme similar to [](`~plotnine.themes.theme_linedraw.theme_linedraw`) Has light grey lines lines and axes to direct more attention towards the data. Parameters ---------- base_size : int Base font size. All text sizes are a scaled versions of the base font size. base_family : str Base font family. If `None`, use [](`plotnine.options.base_family`). """ def __init__(self, base_size=11, base_family=None): super().__init__(base_size, base_family) self += theme( axis_ticks=element_line(color="#B3B3B3", size=0.5), axis_ticks_minor=element_blank(), legend_key=element_rect(color="#7F7F7F", size=0.72), panel_background=element_rect(fill="white"), panel_border=element_rect(fill="none", color="#B3B3B3", size=1), panel_grid_major=element_line(color="#D9D9D9", size=0.5), panel_grid_minor=element_line(color="#EDEDED", size=0.25), strip_background=element_rect( fill="#B3B3B3", color="#B3B3B3", size=1 ), strip_text_x=element_text(color="white"), strip_text_y=element_text(color="white", angle=-90), )
theme_light
python
geekcomputers__Python
venv/Lib/site-packages/pip/_vendor/rich/panel.py
{ "start": 467, "end": 10705 }
class ____(JupyterMixin): """A console renderable that draws a border around its contents. Example: >>> console.print(Panel("Hello, World!")) Args: renderable (RenderableType): A console renderable object. box (Box, optional): A Box instance that defines the look of the border (see :ref:`appendix_box`. Defaults to box.ROUNDED. safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True. expand (bool, optional): If True the panel will stretch to fill the console width, otherwise it will be sized to fit the contents. Defaults to True. style (str, optional): The style of the panel (border and contents). Defaults to "none". border_style (str, optional): The style of the border. Defaults to "none". width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect. height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect. padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0. highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False. """ def __init__( self, renderable: "RenderableType", box: Box = ROUNDED, *, title: Optional[TextType] = None, title_align: AlignMethod = "center", subtitle: Optional[TextType] = None, subtitle_align: AlignMethod = "center", safe_box: Optional[bool] = None, expand: bool = True, style: StyleType = "none", border_style: StyleType = "none", width: Optional[int] = None, height: Optional[int] = None, padding: PaddingDimensions = (0, 1), highlight: bool = False, ) -> None: self.renderable = renderable self.box = box self.title = title self.title_align: AlignMethod = title_align self.subtitle = subtitle self.subtitle_align = subtitle_align self.safe_box = safe_box self.expand = expand self.style = style self.border_style = border_style self.width = width self.height = height self.padding = padding self.highlight = highlight @classmethod def fit( cls, renderable: "RenderableType", box: Box = ROUNDED, *, title: Optional[TextType] = None, title_align: AlignMethod = "center", subtitle: Optional[TextType] = None, subtitle_align: AlignMethod = "center", safe_box: Optional[bool] = None, style: StyleType = "none", border_style: StyleType = "none", width: Optional[int] = None, height: Optional[int] = None, padding: PaddingDimensions = (0, 1), highlight: bool = False, ) -> "Panel": """An alternative constructor that sets expand=False.""" return cls( renderable, box, title=title, title_align=title_align, subtitle=subtitle, subtitle_align=subtitle_align, safe_box=safe_box, style=style, border_style=border_style, width=width, height=height, padding=padding, highlight=highlight, expand=False, ) @property def _title(self) -> Optional[Text]: if self.title: title_text = ( Text.from_markup(self.title) if isinstance(self.title, str) else self.title.copy() ) title_text.end = "" title_text.plain = title_text.plain.replace("\n", " ") title_text.no_wrap = True title_text.expand_tabs() title_text.pad(1) return title_text return None @property def _subtitle(self) -> Optional[Text]: if self.subtitle: subtitle_text = ( Text.from_markup(self.subtitle) if isinstance(self.subtitle, str) else self.subtitle.copy() ) subtitle_text.end = "" subtitle_text.plain = subtitle_text.plain.replace("\n", " ") subtitle_text.no_wrap = True subtitle_text.expand_tabs() subtitle_text.pad(1) return subtitle_text return None def __rich_console__( self, console: "Console", options: "ConsoleOptions" ) -> "RenderResult": _padding = Padding.unpack(self.padding) renderable = ( Padding(self.renderable, _padding) if any(_padding) else self.renderable ) style = console.get_style(self.style) border_style = style + console.get_style(self.border_style) width = ( options.max_width if self.width is None else min(options.max_width, self.width) ) safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box box = self.box.substitute(options, safe=safe_box) def align_text( text: Text, width: int, align: str, character: str, style: Style ) -> Text: """Gets new aligned text. Args: text (Text): Title or subtitle text. width (int): Desired width. align (str): Alignment. character (str): Character for alignment. style (Style): Border style Returns: Text: New text instance """ text = text.copy() text.truncate(width) excess_space = width - cell_len(text.plain) if excess_space: if align == "left": return Text.assemble( text, (character * excess_space, style), no_wrap=True, end="", ) elif align == "center": left = excess_space // 2 return Text.assemble( (character * left, style), text, (character * (excess_space - left), style), no_wrap=True, end="", ) else: return Text.assemble( (character * excess_space, style), text, no_wrap=True, end="", ) return text title_text = self._title if title_text is not None: title_text.stylize_before(border_style) child_width = ( width - 2 if self.expand else console.measure( renderable, options=options.update_width(width - 2) ).maximum ) child_height = self.height or options.height or None if child_height: child_height -= 2 if title_text is not None: child_width = min( options.max_width - 2, max(child_width, title_text.cell_len + 2) ) width = child_width + 2 child_options = options.update( width=child_width, height=child_height, highlight=self.highlight ) lines = console.render_lines(renderable, child_options, style=style) line_start = Segment(box.mid_left, border_style) line_end = Segment(f"{box.mid_right}", border_style) new_line = Segment.line() if title_text is None or width <= 4: yield Segment(box.get_top([width - 2]), border_style) else: title_text = align_text( title_text, width - 4, self.title_align, box.top, border_style, ) yield Segment(box.top_left + box.top, border_style) yield from console.render(title_text, child_options.update_width(width - 4)) yield Segment(box.top + box.top_right, border_style) yield new_line for line in lines: yield line_start yield from line yield line_end yield new_line subtitle_text = self._subtitle if subtitle_text is not None: subtitle_text.stylize_before(border_style) if subtitle_text is None or width <= 4: yield Segment(box.get_bottom([width - 2]), border_style) else: subtitle_text = align_text( subtitle_text, width - 4, self.subtitle_align, box.bottom, border_style, ) yield Segment(box.bottom_left + box.bottom, border_style) yield from console.render( subtitle_text, child_options.update_width(width - 4) ) yield Segment(box.bottom + box.bottom_right, border_style) yield new_line def __rich_measure__( self, console: "Console", options: "ConsoleOptions" ) -> "Measurement": _title = self._title _, right, _, left = Padding.unpack(self.padding) padding = left + right renderables = [self.renderable, _title] if _title else [self.renderable] if self.width is None: width = ( measure_renderables( console, options.update_width(options.max_width - padding - 2), renderables, ).maximum + padding + 2 ) else: width = self.width return Measurement(width, width) if __name__ == "__main__": # pragma: no cover from .console import Console c = Console() from .box import DOUBLE, ROUNDED from .padding import Padding p = Panel( "Hello, World!", title="rich.Panel", style="white on blue", box=DOUBLE, padding=1, ) c.print() c.print(p)
Panel
python
langchain-ai__langchain
libs/core/langchain_core/runnables/passthrough.py
{ "start": 20852, "end": 26230 }
class ____(RunnableSerializable[dict[str, Any], Any]): """`Runnable` that picks keys from `dict[str, Any]` inputs. `RunnablePick` class represents a `Runnable` that selectively picks keys from a dictionary input. It allows you to specify one or more keys to extract from the input dictionary. !!! note "Return Type Behavior" The return type depends on the `keys` parameter: - When `keys` is a `str`: Returns the single value associated with that key - When `keys` is a `list`: Returns a dictionary containing only the selected keys Example: ```python from langchain_core.runnables.passthrough import RunnablePick input_data = { "name": "John", "age": 30, "city": "New York", "country": "USA", } # Single key - returns the value directly runnable_single = RunnablePick(keys="name") result_single = runnable_single.invoke(input_data) print(result_single) # Output: "John" # Multiple keys - returns a dictionary runnable_multiple = RunnablePick(keys=["name", "age"]) result_multiple = runnable_multiple.invoke(input_data) print(result_multiple) # Output: {'name': 'John', 'age': 30} ``` """ keys: str | list[str] def __init__(self, keys: str | list[str], **kwargs: Any) -> None: """Create a `RunnablePick`. Args: keys: A single key or a list of keys to pick from the input dictionary. """ super().__init__(keys=keys, **kwargs) @classmethod @override def is_lc_serializable(cls) -> bool: """Return `True` as this class is serializable.""" return True @classmethod @override def get_lc_namespace(cls) -> list[str]: """Get the namespace of the LangChain object. Returns: `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] @override def get_name(self, suffix: str | None = None, *, name: str | None = None) -> str: name = ( name or self.name or "RunnablePick" f"<{','.join([self.keys] if isinstance(self.keys, str) else self.keys)}>" ) return super().get_name(suffix, name=name) def _pick(self, value: dict[str, Any]) -> Any: if not isinstance(value, dict): msg = "The input to RunnablePassthrough.assign() must be a dict." raise ValueError(msg) # noqa: TRY004 if isinstance(self.keys, str): return value.get(self.keys) picked = {k: value.get(k) for k in self.keys if k in value} if picked: return AddableDict(picked) return None def _invoke( self, value: dict[str, Any], ) -> dict[str, Any]: return self._pick(value) @override def invoke( self, input: dict[str, Any], config: RunnableConfig | None = None, **kwargs: Any, ) -> dict[str, Any]: return self._call_with_config(self._invoke, input, config, **kwargs) async def _ainvoke( self, value: dict[str, Any], ) -> dict[str, Any]: return self._pick(value) @override async def ainvoke( self, input: dict[str, Any], config: RunnableConfig | None = None, **kwargs: Any, ) -> dict[str, Any]: return await self._acall_with_config(self._ainvoke, input, config, **kwargs) def _transform( self, chunks: Iterator[dict[str, Any]], ) -> Iterator[dict[str, Any]]: for chunk in chunks: picked = self._pick(chunk) if picked is not None: yield picked @override def transform( self, input: Iterator[dict[str, Any]], config: RunnableConfig | None = None, **kwargs: Any, ) -> Iterator[dict[str, Any]]: yield from self._transform_stream_with_config( input, self._transform, config, **kwargs ) async def _atransform( self, chunks: AsyncIterator[dict[str, Any]], ) -> AsyncIterator[dict[str, Any]]: async for chunk in chunks: picked = self._pick(chunk) if picked is not None: yield picked @override async def atransform( self, input: AsyncIterator[dict[str, Any]], config: RunnableConfig | None = None, **kwargs: Any, ) -> AsyncIterator[dict[str, Any]]: async for chunk in self._atransform_stream_with_config( input, self._atransform, config, **kwargs ): yield chunk @override def stream( self, input: dict[str, Any], config: RunnableConfig | None = None, **kwargs: Any, ) -> Iterator[dict[str, Any]]: return self.transform(iter([input]), config, **kwargs) @override async def astream( self, input: dict[str, Any], config: RunnableConfig | None = None, **kwargs: Any, ) -> AsyncIterator[dict[str, Any]]: async def input_aiter() -> AsyncIterator[dict[str, Any]]: yield input async for chunk in self.atransform(input_aiter(), config, **kwargs): yield chunk
RunnablePick