language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 130062, "end": 130755 }
class ____(sgqlc.types.Input): """Autogenerated input type of AddLabelsToLabelable""" __schema__ = github_schema __field_names__ = ("labelable_id", "label_ids", "client_mutation_id") labelable_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="labelableId") """The id of the labelable object to add labels to.""" label_ids = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(ID))), graphql_name="labelIds") """The ids of the labels to add.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
AddLabelsToLabelableInput
python
huggingface__transformers
src/transformers/models/xmod/configuration_xmod.py
{ "start": 848, "end": 7744 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`XmodModel`]. It is used to instantiate an X-MOD model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [facebook/xmod-base](https://huggingface.co/facebook/xmod-base) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the X-MOD model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`XmodModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`XmodModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. pre_norm (`bool`, *optional*, defaults to `False`): Whether to apply layer normalization before each block. adapter_reduction_factor (`int` or `float`, *optional*, defaults to 2): The factor by which the dimensionality of the adapter is reduced relative to `hidden_size`. adapter_layer_norm (`bool`, *optional*, defaults to `False`): Whether to apply a new layer normalization before the adapter modules (shared across all adapters). adapter_reuse_layer_norm (`bool`, *optional*, defaults to `True`): Whether to reuse the second layer normalization and apply it before the adapter modules as well. ln_before_adapter (`bool`, *optional*, defaults to `True`): Whether to apply the layer normalization before the residual connection around the adapter module. languages (`Iterable[str]`, *optional*, defaults to `["en_XX"]`): An iterable of language codes for which adapter modules should be initialized. default_language (`str`, *optional*): Language code of a default language. It will be assumed that the input is in this language if no language codes are explicitly passed to the forward method. Examples: ```python >>> from transformers import XmodConfig, XmodModel >>> # Initializing an X-MOD facebook/xmod-base style configuration >>> configuration = XmodConfig() >>> # Initializing a model (with random weights) from the facebook/xmod-base style configuration >>> model = XmodModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "xmod" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, use_cache=True, classifier_dropout=None, pre_norm=False, adapter_reduction_factor=2, adapter_layer_norm=False, adapter_reuse_layer_norm=True, ln_before_adapter=True, languages=("en_XX",), default_language=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.classifier_dropout = classifier_dropout self.pre_norm = pre_norm self.adapter_reduction_factor = adapter_reduction_factor self.adapter_layer_norm = adapter_layer_norm self.adapter_reuse_layer_norm = adapter_reuse_layer_norm self.ln_before_adapter = ln_before_adapter self.languages = list(languages) self.default_language = default_language __all__ = ["XmodConfig"]
XmodConfig
python
python-pillow__Pillow
src/PIL/ImageShow.py
{ "start": 6555, "end": 7337 }
class ____(UnixViewer): """ The ImageMagick ``display`` command. This viewer supports the ``title`` parameter. """ def get_command_ex( self, file: str, title: str | None = None, **options: Any ) -> tuple[str, str]: command = executable = "display" if title: command += f" -title {quote(title)}" return command, executable def show_file(self, path: str, **options: Any) -> int: """ Display given file. """ if not os.path.exists(path): raise FileNotFoundError args = ["display"] title = options.get("title") if title: args += ["-title", title] args.append(path) subprocess.Popen(args) return 1
DisplayViewer
python
weaviate__weaviate-python-client
weaviate/collections/aggregations/near_vector/executor.py
{ "start": 736, "end": 7569 }
class ____(Generic[ConnectionType], _BaseExecutor[ConnectionType]): @overload def near_vector( self, near_vector: NearVectorInputType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, object_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, target_vector: Optional[TargetVectorJoinType] = None, total_count: bool = True, return_metrics: Optional[PropertiesMetrics] = None, ) -> executor.Result[AggregateReturn]: ... @overload def near_vector( self, near_vector: NearVectorInputType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, object_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Union[str, GroupByAggregate], target_vector: Optional[TargetVectorJoinType] = None, total_count: bool = True, return_metrics: Optional[PropertiesMetrics] = None, ) -> executor.Result[AggregateGroupByReturn]: ... @overload def near_vector( self, near_vector: NearVectorInputType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, object_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Optional[Union[str, GroupByAggregate]] = None, target_vector: Optional[TargetVectorJoinType] = None, total_count: bool = True, return_metrics: Optional[PropertiesMetrics] = None, ) -> executor.Result[Union[AggregateReturn, AggregateGroupByReturn]]: ... def near_vector( self, near_vector: NearVectorInputType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, object_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Optional[Union[str, GroupByAggregate]] = None, target_vector: Optional[TargetVectorJoinType] = None, total_count: bool = True, return_metrics: Optional[PropertiesMetrics] = None, ) -> executor.Result[Union[AggregateReturn, AggregateGroupByReturn]]: """Aggregate metrics over the objects returned by a near vector search on this collection. At least one of `certainty`, `distance`, or `object_limit` must be specified here for the vector search. This method requires that the objects in the collection have associated vectors. Args: near_vector: The vector to search on. certainty: The minimum certainty of the vector search. distance: The maximum distance of the vector search. object_limit: The maximum number of objects to return from the vector search prior to the aggregation. filters: The filters to apply to the search. group_by: How to group the aggregation by. total_count: Whether to include the total number of objects that match the query in the response. return_metrics: A list of property metrics to aggregate together after the text search. Returns: Depending on the presence of the `group_by` argument, either a `AggregateReturn` object or a `AggregateGroupByReturn that includes the aggregation objects. Raises: weaviate.exceptions.WeaviateQueryError: If an error occurs while performing the query against Weaviate. weaviate.exceptions.WeaviateInvalidInputError: If any of the input arguments are of the wrong type. """ return_metrics = ( return_metrics if (return_metrics is None or isinstance(return_metrics, list)) else [return_metrics] ) if isinstance(group_by, str): group_by = GroupByAggregate(prop=group_by) if self._connection._weaviate_version.is_lower_than(1, 29, 0): # use gql, remove once 1.29 is the minimum supported version if not isinstance(near_vector, list): raise WeaviateInvalidInputError( "A `near_vector` argument other than a list of float is not supported in <v1.28.4", ) if isinstance(near_vector[0], list): raise WeaviateInvalidInputError( "A `near_vector` argument other than a list of floats is not supported in <v1.28.4", ) near_vector = cast( List[float], near_vector ) # pylance cannot type narrow the immediately above check if target_vector is not None and not isinstance(target_vector, str): raise WeaviateInvalidInputError( "A `target_vector` argument other than a string is not supported in <v1.28.4", ) def resp(res: dict) -> Union[AggregateReturn, AggregateGroupByReturn]: return ( self._to_aggregate_result(res, return_metrics) if group_by is None else self._to_group_by_result(res, return_metrics) ) builder = self._base(return_metrics, filters, total_count) builder = self._add_groupby_to_builder(builder, group_by) builder = self._add_near_vector_to_builder( builder, near_vector, certainty, distance, object_limit, target_vector ) return executor.execute( response_callback=resp, method=self._do, query=builder, ) else: # use grpc request = self._grpc.near_vector( near_vector=near_vector, certainty=certainty, distance=distance, target_vector=target_vector, aggregations=( [metric.to_grpc() for metric in return_metrics] if return_metrics is not None else [] ), filters=_FilterToGRPC.convert(filters) if filters is not None else None, group_by=group_by._to_grpc() if group_by is not None else None, limit=group_by.limit if group_by is not None else None, objects_count=total_count, object_limit=object_limit, ) def respGrpc( res: aggregate_pb2.AggregateReply, ) -> Union[AggregateReturn, AggregateGroupByReturn]: return self._to_result(group_by is not None, res) return executor.execute( response_callback=respGrpc, method=self._connection.grpc_aggregate, request=request, )
_NearVectorExecutor
python
chroma-core__chroma
chromadb/utils/embedding_functions/google_embedding_function.py
{ "start": 282, "end": 5265 }
class ____(EmbeddingFunction[Documents]): def __init__( self, model_name: str, vertexai: Optional[bool] = None, project: Optional[str] = None, location: Optional[str] = None, api_key_env_var: str = "GOOGLE_API_KEY", ): """ Initialize the GoogleGenaiEmbeddingFunction. Args: model_name (str): The name of the model to use for text embeddings. api_key_env_var (str, optional): Environment variable name that contains your API key for the Google GenAI API. Defaults to "GOOGLE_API_KEY". """ try: import google.genai as genai except ImportError: raise ValueError( "The google-genai python package is not installed. Please install it with `pip install google-genai`" ) self.model_name = model_name self.api_key_env_var = api_key_env_var self.vertexai = vertexai self.project = project self.location = location self.api_key = os.getenv(self.api_key_env_var) if not self.api_key: raise ValueError( f"The {self.api_key_env_var} environment variable is not set." ) self.client = genai.Client( api_key=self.api_key, vertexai=vertexai, project=project, location=location ) def __call__(self, input: Documents) -> Embeddings: """ Generate embeddings for the given documents. Args: input: Documents or images to generate embeddings for. Returns: Embeddings for the documents. """ if not input: raise ValueError("Input documents cannot be empty") if not isinstance(input, (list, tuple)): raise ValueError("Input must be a list or tuple of documents") if not all(isinstance(doc, str) for doc in input): raise ValueError("All input documents must be strings") try: response = self.client.models.embed_content( model=self.model_name, contents=input ) except Exception as e: raise ValueError(f"Failed to generate embeddings: {str(e)}") from e # Validate response structure if not hasattr(response, "embeddings") or not response.embeddings: raise ValueError("No embeddings returned from the API") embeddings_list = [] for ce in response.embeddings: if not hasattr(ce, "values"): raise ValueError("Malformed embedding response: missing 'values'") embeddings_list.append(np.array(ce.values, dtype=np.float32)) return cast(Embeddings, embeddings_list) @staticmethod def name() -> str: return "google_genai" def default_space(self) -> Space: return "cosine" def supported_spaces(self) -> List[Space]: return ["cosine", "l2", "ip"] @staticmethod def build_from_config(config: Dict[str, Any]) -> "EmbeddingFunction[Documents]": model_name = config.get("model_name") vertexai = config.get("vertexai") project = config.get("project") location = config.get("location") if model_name is None: raise ValueError("The model name is required.") return GoogleGenaiEmbeddingFunction( model_name=model_name, vertexai=vertexai, project=project, location=location, ) def get_config(self) -> Dict[str, Any]: return { "model_name": self.model_name, "vertexai": self.vertexai, "project": self.project, "location": self.location, } def validate_config_update( self, old_config: Dict[str, Any], new_config: Dict[str, Any] ) -> None: if "model_name" in new_config: raise ValueError( "The model name cannot be changed after the embedding function has been initialized." ) if "vertexai" in new_config: raise ValueError( "The vertexai cannot be changed after the embedding function has been initialized." ) if "project" in new_config: raise ValueError( "The project cannot be changed after the embedding function has been initialized." ) if "location" in new_config: raise ValueError( "The location cannot be changed after the embedding function has been initialized." ) @staticmethod def validate_config(config: Dict[str, Any]) -> None: """ Validate the configuration using the JSON schema. Args: config: Configuration to validate Raises: ValidationError: If the configuration does not match the schema """ validate_config_schema(config, "google_genai")
GoogleGenaiEmbeddingFunction
python
numba__numba
numba/cuda/models.py
{ "start": 372, "end": 649 }
class ____(models.StructModel): def __init__(self, dmm, fe_type): members = [ ('x', types.int32), ('y', types.int32), ('z', types.int32) ] super().__init__(dmm, fe_type, members) @register_model(GridGroup)
Dim3Model
python
google__pytype
pytype/tools/xref/indexer.py
{ "start": 9556, "end": 11766 }
class ____: """A collection of namespaced symbols.""" def __init__(self, ast, scope, parent, cls): """Initialize an environment. Arguments: ast: An ast module scope: The namespace key (e.g. module:class A:function f) parent: The env of the directly enclosing namespace cls: The class currently being defined (None if we are not in a class definition) Other attributes defined: env: The dictionary holding the symbol table for this environment attrs: Attributes defined on the current class self_var: The `self` variable in method definitions ret: The `return` variable for functions """ self.ast = ast self.scope = scope self.parent = parent self.cls = cls self.env = {} self.attrs = None self.self_var = parent and parent.self_var self.ret = None def lookup(self, symbol): if symbol in self.env: return (self, self.env[symbol]) elif self.parent: return self.parent.lookup(symbol) else: return (None, None) def __getitem__(self, symbol): return self.lookup(symbol)[1] def __setitem__(self, symbol, value): self.env[symbol] = value def is_self_attr(self, node): if not self.self_var or not isinstance(node, self.ast.Attribute): return False if isinstance(node.value, self.ast.Name): name = node.value.id else: name = node.value return name == self.self_var.name def getattr(self, attrib): if self.attrs is not None and attrib in self.attrs: return self.attrs[attrib] elif self.cls and self.cls.scope != self.scope: return self.cls.getattr(attrib) else: raise AttrError("called getattr in non-class context") def setattr(self, attrib, value): if self.attrs is not None: self.attrs[attrib] = value elif self.cls is not None: return self.cls.setattr(attrib, value) else: raise AttrError("called setattr in non-class context") # pylint: disable=invalid-name # pylint: disable=missing-docstring # # Visitors use generated method names that don't follow the pylint spec. # Also names like visit_Name are self-documenting and do not need docstrings.
Env
python
rapidsai__cudf
python/cudf_polars/cudf_polars/experimental/sort.py
{ "start": 10487, "end": 14835 }
class ____: # pragma: no cover """cuDF-Polars protocol for rapidsmpf shuffler.""" @staticmethod def insert_partition( df: DataFrame, partition_id: int, partition_count: int, shuffler: Any, options: SortedShuffleOptions, sort_boundaries: DataFrame, ) -> None: """Add cudf-polars DataFrame chunks to an RMP shuffler.""" from rapidsmpf.integrations.cudf.partition import split_and_pack if options["cluster_kind"] == "dask": from rapidsmpf.integrations.dask import get_worker_context else: from rapidsmpf.integrations.single import get_worker_context context = get_worker_context() by = options["by"] stream = get_joined_cuda_stream( get_dask_cuda_stream, upstreams=(df.stream, sort_boundaries.stream) ) splits = find_sort_splits( df.select(by).table, sort_boundaries.table, partition_id, options["order"], options["null_order"], stream=stream, ) packed_inputs = split_and_pack( df.table, splits=splits, br=context.br, stream=stream, ) # TODO: figure out handoff with rapidsmpf # https://github.com/rapidsai/cudf/issues/20337 shuffler.insert_chunks(packed_inputs) @staticmethod def extract_partition( partition_id: int, shuffler: Any, options: SortedShuffleOptions, ) -> DataFrame: """Extract a finished partition from the RMP shuffler.""" from rapidsmpf.integrations.cudf.partition import ( unpack_and_concat, unspill_partitions, ) if options["cluster_kind"] == "dask": from rapidsmpf.integrations.dask import get_worker_context else: from rapidsmpf.integrations.single import get_worker_context context = get_worker_context() shuffler.wait_on(partition_id) column_names = options["column_names"] column_dtypes = options["column_dtypes"] stream = DEFAULT_STREAM # TODO: When sorting, this step should finalize with a merge (unless we # require stability, as cudf merge is not stable). # TODO: figure out handoff with rapidsmpf # https://github.com/rapidsai/cudf/issues/20337 return DataFrame.from_table( unpack_and_concat( unspill_partitions( shuffler.extract(partition_id), br=context.br, allow_overbooking=True, statistics=context.statistics, ), br=context.br, stream=stream, ), column_names, column_dtypes, stream=stream, ) def _sort_partition_dataframe( df: DataFrame, partition_id: int, # Not currently used partition_count: int, options: MutableMapping[str, Any], sort_boundaries: DataFrame, ) -> MutableMapping[int, DataFrame]: """ Partition a sorted DataFrame for shuffling. Parameters ---------- df The DataFrame to partition. partition_id The partition id of the current partition. partition_count The total number of partitions. options The sort options ``(by, order, null_order)``. sort_boundaries The global sort boundary candidates used to decide where to split. """ if df.num_rows == 0: # pragma: no cover # Fast path for empty DataFrame return dict.fromkeys(range(partition_count), df) stream = get_joined_cuda_stream( get_dask_cuda_stream, upstreams=(df.stream, sort_boundaries.stream) ) splits = find_sort_splits( df.select(options["by"]).table, sort_boundaries.table, partition_id, options["order"], options["null_order"], stream=stream, ) # Split and return the partitioned result return { i: DataFrame.from_table( split, df.column_names, df.dtypes, stream=df.stream, ) for i, split in enumerate(plc.copying.split(df.table, splits, stream=stream)) }
RMPFIntegrationSortedShuffle
python
doocs__leetcode
solution/1900-1999/1999.Smallest Greater Multiple Made of Two Digits/Solution.py
{ "start": 0, "end": 528 }
class ____: def findInteger(self, k: int, digit1: int, digit2: int) -> int: if digit1 == 0 and digit2 == 0: return -1 if digit1 > digit2: return self.findInteger(k, digit2, digit1) q = deque([0]) while 1: x = q.popleft() if x > 2**31 - 1: return -1 if x > k and x % k == 0: return x q.append(x * 10 + digit1) if digit1 != digit2: q.append(x * 10 + digit2)
Solution
python
django__django
tests/admin_views/models.py
{ "start": 17610, "end": 18026 }
class ____(models.Model): big_id = models.BigAutoField(primary_key=True) question = models.CharField(max_length=20) posted = models.DateField(default=datetime.date.today) expires = models.DateTimeField(null=True, blank=True) related_questions = models.ManyToManyField("self") uuid = models.UUIDField(default=uuid.uuid4, unique=True) def __str__(self): return self.question
Question
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/dataplex.py
{ "start": 4347, "end": 4585 }
class ____(BaseGoogleLink): """Helper class for constructing Dataplex Catalog Entry link.""" name = "Dataplex Catalog Entry" key = "dataplex_catalog_entry_key" format_str = DATAPLEX_CATALOG_ENTRY_LINK
DataplexCatalogEntryLink
python
pytorch__pytorch
torch/_inductor/template_heuristics/triton.py
{ "start": 83062, "end": 83605 }
class ____( ScaledBlackwellTMAConfigMixin, CUDAConfigHeuristic ): """Scaled Blackwell TMA template heuristic for CUDA""" def __init__(self) -> None: super().__init__() # Override mm_configs to use scaled_persistent_mm_configs for TMA # TODO: Tune scaled_persistent_mm_configs for Blackwell self.mm_configs = self.scaled_persistent_mm_configs @register_template_heuristic( mm_plus_mm_template.uid, "cuda", register=torch.version.hip is None, )
CUDAScaledBlackwellTMATemplateConfigHeuristic
python
openai__openai-python
tests/api_resources/test_batches.py
{ "start": 429, "end": 6382 }
class ____: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: batch = client.batches.create( completion_window="24h", endpoint="/v1/responses", input_file_id="string", ) assert_matches_type(Batch, batch, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: batch = client.batches.create( completion_window="24h", endpoint="/v1/responses", input_file_id="string", metadata={"foo": "string"}, output_expires_after={ "anchor": "created_at", "seconds": 3600, }, ) assert_matches_type(Batch, batch, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.batches.with_raw_response.create( completion_window="24h", endpoint="/v1/responses", input_file_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" batch = response.parse() assert_matches_type(Batch, batch, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.batches.with_streaming_response.create( completion_window="24h", endpoint="/v1/responses", input_file_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" batch = response.parse() assert_matches_type(Batch, batch, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_retrieve(self, client: OpenAI) -> None: batch = client.batches.retrieve( "string", ) assert_matches_type(Batch, batch, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.batches.with_raw_response.retrieve( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" batch = response.parse() assert_matches_type(Batch, batch, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.batches.with_streaming_response.retrieve( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" batch = response.parse() assert_matches_type(Batch, batch, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): client.batches.with_raw_response.retrieve( "", ) @parametrize def test_method_list(self, client: OpenAI) -> None: batch = client.batches.list() assert_matches_type(SyncCursorPage[Batch], batch, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: batch = client.batches.list( after="string", limit=0, ) assert_matches_type(SyncCursorPage[Batch], batch, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.batches.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" batch = response.parse() assert_matches_type(SyncCursorPage[Batch], batch, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.batches.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" batch = response.parse() assert_matches_type(SyncCursorPage[Batch], batch, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_cancel(self, client: OpenAI) -> None: batch = client.batches.cancel( "string", ) assert_matches_type(Batch, batch, path=["response"]) @parametrize def test_raw_response_cancel(self, client: OpenAI) -> None: response = client.batches.with_raw_response.cancel( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" batch = response.parse() assert_matches_type(Batch, batch, path=["response"]) @parametrize def test_streaming_response_cancel(self, client: OpenAI) -> None: with client.batches.with_streaming_response.cancel( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" batch = response.parse() assert_matches_type(Batch, batch, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_cancel(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): client.batches.with_raw_response.cancel( "", )
TestBatches
python
PrefectHQ__prefect
src/prefect/server/utilities/messaging/memory.py
{ "start": 2371, "end": 6513 }
class ____: """ A subscription to a topic. Messages are delivered to the subscription's queue and retried up to a maximum number of times. If a message cannot be delivered after the maximum number of retries it is moved to the dead letter queue. The dead letter queue is a directory of JSON files containing the serialized message. Messages remain in the dead letter queue until they are removed manually. Attributes: topic: The topic that the subscription receives messages from. max_retries: The maximum number of times a message will be retried for this subscription. dead_letter_queue_path: The path to the dead letter queue folder. """ def __init__( self, topic: "Topic", max_retries: int = 3, dead_letter_queue_path: Path | str | None = None, ) -> None: self.topic = topic self.max_retries = max_retries self.dead_letter_queue_path: Path = ( Path(dead_letter_queue_path) if dead_letter_queue_path else get_current_settings().home / "dlq" ) self._queue: asyncio.Queue[MemoryMessage] = asyncio.Queue(maxsize=10000) self._retry: asyncio.Queue[MemoryMessage] = asyncio.Queue(maxsize=1000) async def deliver(self, message: MemoryMessage) -> None: """ Deliver a message to the subscription's queue. Args: message: The message to deliver. """ try: self._queue.put_nowait(message) await update_metric(self.topic.name, "published") logger.debug( "Delivered message to topic=%r queue_size=%d retry_queue_size=%d", self.topic.name, self._queue.qsize(), self._retry.qsize(), ) except asyncio.QueueFull: logger.warning( "Subscription queue is full, dropping message for topic=%r queue_size=%d retry_queue_size=%d", self.topic.name, self._queue.qsize(), self._retry.qsize(), ) async def retry(self, message: MemoryMessage) -> None: """ Place a message back on the retry queue. If the message has retried more than the maximum number of times it is moved to the dead letter queue. Args: message: The message to retry. """ message.retry_count += 1 if message.retry_count > self.max_retries: logger.warning( "Message failed after %d retries and will be moved to the dead letter queue", message.retry_count, extra={"event_message": message}, ) await self.send_to_dead_letter_queue(message) else: await self._retry.put(message) await update_metric(self.topic.name, "retried") logger.debug( "Retried message on topic=%r retry_count=%d queue_size=%d retry_queue_size=%d", self.topic.name, message.retry_count, self._queue.qsize(), self._retry.qsize(), ) async def get(self) -> MemoryMessage: """ Get a message from the subscription's queue. """ if not self._retry.empty(): return await self._retry.get() return await self._queue.get() async def send_to_dead_letter_queue(self, message: MemoryMessage) -> None: """ Send a message to the dead letter queue. The dead letter queue is a directory of JSON files containing the serialized messages. Args: message: The message to send to the dead letter queue. """ self.dead_letter_queue_path.mkdir(parents=True, exist_ok=True) try: await anyio.Path(self.dead_letter_queue_path / uuid4().hex).write_bytes( to_json(asdict(message)) ) except Exception as e: logger.warning("Failed to write message to dead letter queue", exc_info=e)
Subscription
python
facelessuser__soupsieve
tests/test_level3/test_disabled.py
{ "start": 53, "end": 4976 }
class ____(util.TestCase): """Test disabled selectors.""" MARKUP = """ <body> <form action="#"> <fieldset id='a' disabled> <legend> Simple fieldset <input type="radio" id="1" checked> <fieldset id='b' disabled> <legend>Simple fieldset <input type="radio" id="2" checked></legend> <input type="radio" id="3" checked> <label for="radio">radio</label> </fieldset> </legend> <fieldset id='c' disabled> <legend>Simple fieldset <input type="radio" id="4" checked></legend> <input type="radio" id="5" checked> <label for="radio">radio</label> </fieldset> <input type="radio" id="6" checked> <label for="radio">radio</label> </fieldset> <optgroup> <option id="7" disabled>option</option> </optgroup> <optgroup id="8" disabled> <option id="9">option</option> </optgroup> </form> </body> """ MARKUP_NESTED = """ <body> <form action="#"> <fieldset id='a' disabled> <legend> Simple fieldset <input type="radio" id="1" checked> <fieldset id='b'> <legend>Simple fieldset <input type="radio" id="2" checked></legend> <input type="radio" id="3" checked> <label for="radio">radio</label> </fieldset> </legend> <fieldset id='c' disabled> <legend>Simple fieldset <input type="radio" id="4" checked></legend> <input type="radio" id="5" checked> <label for="radio">radio</label> </fieldset> <input type="radio" id="6" checked> <label for="radio">radio</label> </fieldset> <optgroup> <option id="7" disabled>option</option> </optgroup> <optgroup id="8" disabled> <option id="9">option</option> </optgroup> </form> </body> """ def test_disabled_html5(self): """ Test disabled for HTML5 parser. Form elements that have `disabled`. `option` that is child of disabled `optgroup`. Form elements that are children of a disabled `fieldset`, but not it's `legend`. """ self.assert_selector( self.MARKUP, ":disabled", ['3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c'], flags=util.HTML5 ) def test_disabled_lxml(self): """ Test disabled for `lxml` HTML parser. Form elements that have `disabled`. `option` that is child of disabled `optgroup`. Form elements that are children of a disabled `fieldset`, but not it's `legend`. """ self.assert_selector( self.MARKUP, ":disabled", ['2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c'], flags=util.LXML_HTML ) def test_disabled_python(self): """ Test disabled for the built-in HTML parser. Form elements that have `disabled`. `option` that is child of disabled `optgroup`. Form elements that are children of a disabled `fieldset`, but not it's `legend`. """ self.assert_selector( self.MARKUP, ":disabled", ['3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c'], flags=util.PYHTML ) def test_disabled_with_nested_disabled_form_html5(self): """Test disabled (with nested disabled forms) in the built-in HTML parser.""" self.assert_selector( self.MARKUP_NESTED, ":disabled", ['4', '5', '6', '7', '8', '9', 'a', 'c'], flags=util.HTML5 ) def test_disabled_with_nested_disabled_form_lxml(self): """Test disabled (with nested disabled forms) in the `lxml` HTML parser.""" self.assert_selector( self.MARKUP_NESTED, ":disabled", ['2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c'], flags=util.LXML_HTML ) def test_disabled_with_nested_disabled_form_python(self): """Test disabled (with nested disabled forms) the built-in HTML parser.""" self.assert_selector( self.MARKUP_NESTED, ":disabled", ['4', '5', '6', '7', '8', '9', 'a', 'c'], flags=util.PYHTML ) def test_disabled_with_nested_optgroup(self): """Test `:disabled` only selects `option` elements whose closest `optgroup` parent is disabled.""" self.assert_selector( """ <optgroup id="0" disabled> <option id="1"></option> <optgroup id="3"> <option id="4"></option> </optgroup> </optgroup> """, ":disabled", ['0', '1'], flags=util.HTML )
TestDisabled
python
geekcomputers__Python
venv/Lib/site-packages/pip/_vendor/rich/progress_bar.py
{ "start": 458, "end": 8156 }
class ____(JupyterMixin): """Renders a (progress) bar. Used by rich.progress. Args: total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation. completed (float, optional): Number of steps completed. Defaults to 0. width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None. pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed. style (StyleType, optional): Style for the bar background. Defaults to "bar.back". complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished". pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time. """ def __init__( self, total: Optional[float] = 100.0, completed: float = 0, width: Optional[int] = None, pulse: bool = False, style: StyleType = "bar.back", complete_style: StyleType = "bar.complete", finished_style: StyleType = "bar.finished", pulse_style: StyleType = "bar.pulse", animation_time: Optional[float] = None, ): self.total = total self.completed = completed self.width = width self.pulse = pulse self.style = style self.complete_style = complete_style self.finished_style = finished_style self.pulse_style = pulse_style self.animation_time = animation_time self._pulse_segments: Optional[List[Segment]] = None def __repr__(self) -> str: return f"<Bar {self.completed!r} of {self.total!r}>" @property def percentage_completed(self) -> Optional[float]: """Calculate percentage complete.""" if self.total is None: return None completed = (self.completed / self.total) * 100.0 completed = min(100, max(0.0, completed)) return completed @lru_cache(maxsize=16) def _get_pulse_segments( self, fore_style: Style, back_style: Style, color_system: str, no_color: bool, ascii: bool = False, ) -> List[Segment]: """Get a list of segments to render a pulse animation. Returns: List[Segment]: A list of segments, one segment per character. """ bar = "-" if ascii else "━" segments: List[Segment] = [] if color_system not in ("standard", "eight_bit", "truecolor") or no_color: segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2) segments += [Segment(" " if no_color else bar, back_style)] * ( PULSE_SIZE - (PULSE_SIZE // 2) ) return segments append = segments.append fore_color = ( fore_style.color.get_truecolor() if fore_style.color else ColorTriplet(255, 0, 255) ) back_color = ( back_style.color.get_truecolor() if back_style.color else ColorTriplet(0, 0, 0) ) cos = math.cos pi = math.pi _Segment = Segment _Style = Style from_triplet = Color.from_triplet for index in range(PULSE_SIZE): position = index / PULSE_SIZE fade = 0.5 + cos((position * pi * 2)) / 2.0 color = blend_rgb(fore_color, back_color, cross_fade=fade) append(_Segment(bar, _Style(color=from_triplet(color)))) return segments def update(self, completed: float, total: Optional[float] = None) -> None: """Update progress with new values. Args: completed (float): Number of steps completed. total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None. """ self.completed = completed self.total = total if total is not None else self.total def _render_pulse( self, console: Console, width: int, ascii: bool = False ) -> Iterable[Segment]: """Renders the pulse animation. Args: console (Console): Console instance. width (int): Width in characters of pulse animation. Returns: RenderResult: [description] Yields: Iterator[Segment]: Segments to render pulse """ fore_style = console.get_style(self.pulse_style, default="white") back_style = console.get_style(self.style, default="black") pulse_segments = self._get_pulse_segments( fore_style, back_style, console.color_system, console.no_color, ascii=ascii ) segment_count = len(pulse_segments) current_time = ( monotonic() if self.animation_time is None else self.animation_time ) segments = pulse_segments * (int(width / segment_count) + 2) offset = int(-current_time * 15) % segment_count segments = segments[offset : offset + width] yield from segments def __rich_console__( self, console: Console, options: ConsoleOptions ) -> RenderResult: width = min(self.width or options.max_width, options.max_width) ascii = options.legacy_windows or options.ascii_only should_pulse = self.pulse or self.total is None if should_pulse: yield from self._render_pulse(console, width, ascii=ascii) return completed: Optional[float] = ( min(self.total, max(0, self.completed)) if self.total is not None else None ) bar = "-" if ascii else "━" half_bar_right = " " if ascii else "╸" half_bar_left = " " if ascii else "╺" complete_halves = ( int(width * 2 * completed / self.total) if self.total and completed is not None else width * 2 ) bar_count = complete_halves // 2 half_bar_count = complete_halves % 2 style = console.get_style(self.style) is_finished = self.total is None or self.completed >= self.total complete_style = console.get_style( self.finished_style if is_finished else self.complete_style ) _Segment = Segment if bar_count: yield _Segment(bar * bar_count, complete_style) if half_bar_count: yield _Segment(half_bar_right * half_bar_count, complete_style) if not console.no_color: remaining_bars = width - bar_count - half_bar_count if remaining_bars and console.color_system is not None: if not half_bar_count and bar_count: yield _Segment(half_bar_left, style) remaining_bars -= 1 if remaining_bars: yield _Segment(bar * remaining_bars, style) def __rich_measure__( self, console: Console, options: ConsoleOptions ) -> Measurement: return ( Measurement(self.width, self.width) if self.width is not None else Measurement(4, options.max_width) ) if __name__ == "__main__": # pragma: no cover console = Console() bar = ProgressBar(width=50, total=100) import time console.show_cursor(False) for n in range(0, 101, 1): bar.update(n) console.print(bar) console.file.write("\r") time.sleep(0.05) console.show_cursor(True) console.print()
ProgressBar
python
django-import-export__django-import-export
tests/core/tests/test_widgets.py
{ "start": 10126, "end": 10202 }
class ____(time): """test derived instance of time""" pass
CustomTime
python
dask__dask
dask/dataframe/dask_expr/io/io.py
{ "start": 5561, "end": 7463 }
class ____(FusedIO): _parameters = ["_expr"] @functools.cached_property def _name(self): return ( funcname(type(self.operand("_expr"))).lower() + "-fused-parq-" + self.deterministic_token ) @staticmethod def _load_multiple_files( frag_filters, columns, schema, **to_pandas_kwargs, ): from dask.dataframe.dask_expr.io.parquet import ReadParquetPyarrowFS tables = ( ReadParquetPyarrowFS._fragment_to_table( frag, filter, columns, schema, ) for frag, filter in frag_filters ) table = pa.concat_tables(tables, promote_options="permissive") return ReadParquetPyarrowFS._table_to_pandas(table, **to_pandas_kwargs) def _task(self, name: str, index: int) -> Task: # type: ignore[override] expr = self.operand("_expr") bucket = self._fusion_buckets[index] fragments_filters = [] assert bucket for i in bucket: subtask = expr._filtered_task(name, i) # This is unique / same for all tasks to_pandas_kwargs = subtask.kwargs assert len(subtask.args) == 1 frag_to_table_task = subtask.args[0] fragments_filters.append( ( frag_to_table_task.kwargs["fragment_wrapper"], frag_to_table_task.kwargs["filters"], ) ) columns = frag_to_table_task.kwargs["columns"] schema = frag_to_table_task.kwargs["schema"] return Task( name, self._load_multiple_files, fragments_filters, columns, schema, **to_pandas_kwargs, _data_producer=True, )
FusedParquetIO
python
takluyver__flit
flit/__init__.py
{ "start": 342, "end": 8439 }
class ____(FileNotFoundError): pass def find_python_executable(python: Optional[str] = None) -> str: """Returns an absolute filepath to the executable of Python to use.""" if not python: python = os.environ.get("FLIT_INSTALL_PYTHON") if not python: return sys.executable if os.path.isdir(python): # Assume it's a virtual environment and look for the environment's # Python executable. This is the same behavior used by pip. # # Try both Unix and Windows paths in case of odd cases like cygwin. for exe in ("bin/python", "Scripts/python.exe"): py = os.path.join(python, exe) if os.path.exists(py): return os.path.abspath(py) if os.path.isabs(python): # sys.executable is absolute too return python # get absolute filepath of {python} # shutil.which may give a different result to the raw subprocess call # see https://github.com/pypa/flit/pull/300 and https://bugs.python.org/issue38905 resolved_python = shutil.which(python) if resolved_python is None: raise PythonNotFoundError(f"Unable to resolve Python executable {python!r}") try: return subprocess.check_output( [resolved_python, "-c", "import sys; print(sys.executable)"], universal_newlines=True, ).strip() except Exception as e: raise PythonNotFoundError( f"{e.__class__.__name__} occurred trying to find the absolute filepath " f"of Python executable {python!r} ({resolved_python!r})" ) from e def add_shared_install_options(parser: argparse.ArgumentParser): parser.add_argument('--user', action='store_true', default=None, help="Do a user-local install (default if site.ENABLE_USER_SITE is True)" ) parser.add_argument('--env', action='store_false', dest='user', help="Install into sys.prefix (default if site.ENABLE_USER_SITE is False, i.e. in virtualenvs)" ) parser.add_argument('--python', help="Target Python executable, if different from the one running flit" ) parser.add_argument('--deps', choices=['all', 'production', 'develop', 'none'], default='all', help="Which set of dependencies to install. If --deps=develop, the extras dev, doc, and test are installed" ) parser.add_argument('--only-deps', action='store_true', help="Install only dependencies of this package, and not the package itself" ) parser.add_argument('--extras', default=(), type=lambda l: l.split(',') if l else (), help="Install the dependencies of these (comma separated) extras additionally to the ones implied by --deps. " "--extras=all can be useful in combination with --deps=production, --deps=none precludes using --extras" ) def add_shared_build_options(parser: argparse.ArgumentParser): parser.add_argument('--format', action='append', help="Select a format to publish. Options: 'wheel', 'sdist'" ) vcs_grp = parser.add_mutually_exclusive_group() vcs_grp.add_argument('--use-vcs', action='store_true', help=("Choose which files to include in the sdist using git or hg. " "This is a convenient way to include all checked-in files, like " "tests and doc source files, in your sdist, but requires that git " "or hg is available on the command line." ) ) vcs_grp.add_argument('--no-use-vcs', action='store_true', help=("Select the files to include in the sdist without using git or hg. " "This should include all essential files to install and use your " "package; see the documentation for precisely what is included. " "This is the default for Flit version 4 and above." ) ) def main(argv=None): ap = argparse.ArgumentParser() ap.add_argument('-f', '--ini-file', type=pathlib.Path, default='pyproject.toml') ap.add_argument('-V', '--version', action='version', version='Flit '+__version__) # --repository now belongs on 'flit publish' - it's still here for # compatibility with scripts passing it before the subcommand. ap.add_argument('--repository', dest='deprecated_repository', help=argparse.SUPPRESS) ap.add_argument('--debug', action='store_true', help=argparse.SUPPRESS) ap.add_argument('--logo', action='store_true', help=argparse.SUPPRESS) subparsers = ap.add_subparsers(title='subcommands', dest='subcmd') # flit build -------------------------------------------- parser_build = subparsers.add_parser('build', help="Build wheel and sdist", ) add_shared_build_options(parser_build) # flit publish -------------------------------------------- parser_publish = subparsers.add_parser('publish', help="Upload wheel and sdist", ) add_shared_build_options(parser_publish) parser_publish.add_argument('--pypirc', help="The .pypirc config file to be used. DEFAULT = \"~/.pypirc\"" ) parser_publish.add_argument('--repository', help="Name of the repository to upload to (must be in the specified .pypirc file)" ) # flit install -------------------------------------------- parser_install = subparsers.add_parser('install', help="Install the package", ) parser_install.add_argument('-s', '--symlink', action='store_true', help="Symlink the module/package into site packages instead of copying it" ) parser_install.add_argument('--pth-file', action='store_true', help="Add .pth file for the module/package to site packages instead of copying it" ) add_shared_install_options(parser_install) # flit init -------------------------------------------- parser_init = subparsers.add_parser('init', help="Prepare pyproject.toml for a new package" ) args = ap.parse_args(argv) if args.ini_file.suffix == '.ini': sys.exit("flit.ini format is no longer supported. You can use " "'python3 -m flit.tomlify' to convert it to pyproject.toml") if args.subcmd not in {'init'} and not args.ini_file.is_file(): sys.exit(f'Config file {args.ini_file} does not exist') enable_colourful_output(logging.DEBUG if args.debug else logging.INFO) log.debug("Parsed arguments %r", args) if args.logo: from .logo import clogo print(clogo.format(version=__version__)) sys.exit(0) if args.subcmd == 'build': from .build import main try: main(args.ini_file, formats=set(args.format or []), use_vcs=args.use_vcs) except(common.NoDocstringError, common.VCSError, common.NoVersionError) as e: sys.exit(e.args[0]) elif args.subcmd == 'publish': if args.deprecated_repository: log.warning("Passing --repository before the 'upload' subcommand is deprecated: pass it after") repository = args.repository or args.deprecated_repository from .upload import main main(args.ini_file, repository, args.pypirc, formats=set(args.format or []), use_vcs=args.use_vcs) elif args.subcmd == 'install': from .install import Installer try: python = find_python_executable(args.python) installer = Installer.from_ini_path( args.ini_file, user=args.user, python=python, symlink=args.symlink, deps=args.deps, extras=args.extras, pth=args.pth_file ) if args.only_deps: installer.install_requirements() else: installer.install() except (ConfigError, PythonNotFoundError, common.NoDocstringError, common.NoVersionError) as e: sys.exit(e.args[0]) elif args.subcmd == 'init': from .init import TerminalIniter TerminalIniter().initialise() else: ap.print_help() sys.exit(1)
PythonNotFoundError
python
mlflow__mlflow
mlflow/store/model_registry/dbmodels/models.py
{ "start": 6631, "end": 7522 }
class ____(TypeDecorator): """ A custom SQLAlchemy type that encrypts data before storing in the database and decrypts it when retrieving. """ impl = String(1000) cache_ok = True def __init__(self): super().__init__() # Get encryption key from environment variable or generate one # In production, this should come from a secure key management service encryption_key = MLFLOW_WEBHOOK_SECRET_ENCRYPTION_KEY.get() or Fernet.generate_key() self.cipher = Fernet(encryption_key) def process_bind_param(self, value, dialect): if value is not None: return self.cipher.encrypt(value.encode()).decode() return value def process_result_value(self, value, dialect): if value is not None: return self.cipher.decrypt(value.encode()).decode() return value
EncryptedString
python
celery__celery
t/unit/app/test_app.py
{ "start": 2241, "end": 60243 }
class ____: def setup_method(self): self.app.add_defaults(deepcopy(self.CELERY_TEST_CONFIG)) def test_now(self): timezone_setting_value = 'US/Eastern' tz_utc = timezone.get_timezone('UTC') tz_us_eastern = timezone.get_timezone(timezone_setting_value) now = to_utc(datetime.now(datetime_timezone.utc)) app_now = self.app.now() assert app_now.tzinfo is tz_utc assert app_now - now <= timedelta(seconds=1) # Check that timezone conversion is applied from configuration self.app.conf.enable_utc = False self.app.conf.timezone = timezone_setting_value # timezone is a cached property del self.app.timezone app_now = self.app.now() assert app_now.tzinfo == tz_us_eastern diff = to_utc(datetime.now(datetime_timezone.utc)) - localize(app_now, tz_utc) assert diff <= timedelta(seconds=1) # Verify that timezone setting overrides enable_utc=on setting self.app.conf.enable_utc = True del self.app.timezone app_now = self.app.now() assert self.app.timezone == tz_us_eastern assert app_now.tzinfo == tz_us_eastern @patch('celery.app.base.set_default_app') def test_set_default(self, set_default_app): self.app.set_default() set_default_app.assert_called_with(self.app) @patch('celery.security.setup_security') def test_setup_security(self, setup_security): self.app.setup_security( {'json'}, 'key', None, 'cert', 'store', 'digest', 'serializer') setup_security.assert_called_with( {'json'}, 'key', None, 'cert', 'store', 'digest', 'serializer', app=self.app) def test_task_autofinalize_disabled(self): with self.Celery('xyzibari', autofinalize=False) as app: @app.task def ttafd(): return 42 with pytest.raises(RuntimeError): ttafd() with self.Celery('xyzibari', autofinalize=False) as app: @app.task def ttafd2(): return 42 app.finalize() assert ttafd2() == 42 def test_registry_autofinalize_disabled(self): with self.Celery('xyzibari', autofinalize=False) as app: with pytest.raises(RuntimeError): app.tasks['celery.chain'] app.finalize() assert app.tasks['celery.chain'] def test_task(self): with self.Celery('foozibari') as app: def fun(): pass fun.__module__ = '__main__' task = app.task(fun) assert task.name == app.main + '.fun' def test_task_too_many_args(self): with pytest.raises(TypeError): self.app.task(Mock(name='fun'), True) with pytest.raises(TypeError): self.app.task(Mock(name='fun'), True, 1, 2) def test_with_config_source(self): with self.Celery(config_source=ObjectConfig) as app: assert app.conf.FOO == 1 assert app.conf.BAR == 2 @pytest.mark.usefixtures('depends_on_current_app') def test_task_windows_execv(self): prev, _appbase.USING_EXECV = _appbase.USING_EXECV, True try: @self.app.task(shared=False) def foo(): pass assert foo._get_current_object() # is proxy finally: _appbase.USING_EXECV = prev assert not _appbase.USING_EXECV def test_task_takes_no_args(self): with pytest.raises(TypeError): @self.app.task(1) def foo(): pass def test_add_defaults(self): assert not self.app.configured _conf = {'foo': 300} def conf(): return _conf self.app.add_defaults(conf) assert conf in self.app._pending_defaults assert not self.app.configured assert self.app.conf.foo == 300 assert self.app.configured assert not self.app._pending_defaults # defaults not pickled appr = loads(dumps(self.app)) with pytest.raises(AttributeError): appr.conf.foo # add more defaults after configured conf2 = {'foo': 'BAR'} self.app.add_defaults(conf2) assert self.app.conf.foo == 'BAR' assert _conf in self.app.conf.defaults assert conf2 in self.app.conf.defaults def test_connection_or_acquire(self): with self.app.connection_or_acquire(block=True): assert self.app.pool._dirty with self.app.connection_or_acquire(pool=False): assert not self.app.pool._dirty def test_using_v1_reduce(self): self.app._using_v1_reduce = True assert loads(dumps(self.app)) def test_autodiscover_tasks_force_fixup_fallback(self): self.app.loader.autodiscover_tasks = Mock() self.app.autodiscover_tasks([], force=True) self.app.loader.autodiscover_tasks.assert_called_with( [], 'tasks', ) def test_autodiscover_tasks_force(self): self.app.loader.autodiscover_tasks = Mock() self.app.autodiscover_tasks(['proj.A', 'proj.B'], force=True) self.app.loader.autodiscover_tasks.assert_called_with( ['proj.A', 'proj.B'], 'tasks', ) self.app.loader.autodiscover_tasks = Mock() def lazy_list(): return ['proj.A', 'proj.B'] self.app.autodiscover_tasks( lazy_list, related_name='george', force=True, ) self.app.loader.autodiscover_tasks.assert_called_with( ['proj.A', 'proj.B'], 'george', ) def test_autodiscover_tasks_lazy(self): with patch('celery.signals.import_modules') as import_modules: def lazy_list(): return [1, 2, 3] self.app.autodiscover_tasks(lazy_list) import_modules.connect.assert_called() prom = import_modules.connect.call_args[0][0] assert isinstance(prom, promise) assert prom.fun == self.app._autodiscover_tasks assert prom.args[0](), [1, 2 == 3] def test_autodiscover_tasks__no_packages(self): fixup1 = Mock(name='fixup') fixup2 = Mock(name='fixup') self.app._autodiscover_tasks_from_names = Mock(name='auto') self.app._fixups = [fixup1, fixup2] fixup1.autodiscover_tasks.return_value = ['A', 'B', 'C'] fixup2.autodiscover_tasks.return_value = ['D', 'E', 'F'] self.app.autodiscover_tasks(force=True) self.app._autodiscover_tasks_from_names.assert_called_with( ['A', 'B', 'C', 'D', 'E', 'F'], related_name='tasks', ) def test_with_broker(self, patching): patching.setenv('CELERY_BROKER_URL', '') with self.Celery(broker='foo://baribaz') as app: assert app.conf.broker_url == 'foo://baribaz' def test_pending_configuration_non_true__kwargs(self): with self.Celery(task_create_missing_queues=False) as app: assert app.conf.task_create_missing_queues is False def test_pending_configuration__kwargs(self): with self.Celery(foo='bar') as app: assert app.conf.foo == 'bar' def test_pending_configuration__setattr(self): with self.Celery(broker='foo://bar') as app: app.conf.task_default_delivery_mode = 44 app.conf.worker_agent = 'foo:Bar' assert not app.configured assert app.conf.worker_agent == 'foo:Bar' assert app.conf.broker_url == 'foo://bar' assert app._preconf['worker_agent'] == 'foo:Bar' assert app.configured reapp = pickle.loads(pickle.dumps(app)) assert reapp._preconf['worker_agent'] == 'foo:Bar' assert not reapp.configured assert reapp.conf.worker_agent == 'foo:Bar' assert reapp.configured assert reapp.conf.broker_url == 'foo://bar' assert reapp._preconf['worker_agent'] == 'foo:Bar' def test_pending_configuration__update(self): with self.Celery(broker='foo://bar') as app: app.conf.update( task_default_delivery_mode=44, worker_agent='foo:Bar', ) assert not app.configured assert app.conf.worker_agent == 'foo:Bar' assert app.conf.broker_url == 'foo://bar' assert app._preconf['worker_agent'] == 'foo:Bar' def test_pending_configuration__compat_settings(self): with self.Celery(broker='foo://bar', backend='foo') as app: app.conf.update( CELERY_ALWAYS_EAGER=4, CELERY_DEFAULT_DELIVERY_MODE=63, CELERYD_AGENT='foo:Barz', ) assert app.conf.task_always_eager == 4 assert app.conf.task_default_delivery_mode == 63 assert app.conf.worker_agent == 'foo:Barz' assert app.conf.broker_url == 'foo://bar' assert app.conf.result_backend == 'foo' def test_pending_configuration__compat_settings_mixing(self): with self.Celery(broker='foo://bar', backend='foo') as app: app.conf.update( CELERY_ALWAYS_EAGER=4, CELERY_DEFAULT_DELIVERY_MODE=63, CELERYD_AGENT='foo:Barz', worker_consumer='foo:Fooz', ) with pytest.raises(ImproperlyConfigured): assert app.conf.task_always_eager == 4 def test_pending_configuration__django_settings(self): with self.Celery(broker='foo://bar', backend='foo') as app: app.config_from_object(DictAttribute(Bunch( CELERY_TASK_ALWAYS_EAGER=4, CELERY_TASK_DEFAULT_DELIVERY_MODE=63, CELERY_WORKER_AGENT='foo:Barz', CELERY_RESULT_SERIALIZER='pickle', )), namespace='CELERY') assert app.conf.result_serializer == 'pickle' assert app.conf.CELERY_RESULT_SERIALIZER == 'pickle' assert app.conf.task_always_eager == 4 assert app.conf.task_default_delivery_mode == 63 assert app.conf.worker_agent == 'foo:Barz' assert app.conf.broker_url == 'foo://bar' assert app.conf.result_backend == 'foo' def test_pending_configuration__compat_settings_mixing_new(self): with self.Celery(broker='foo://bar', backend='foo') as app: app.conf.update( task_always_eager=4, task_default_delivery_mode=63, worker_agent='foo:Barz', CELERYD_CONSUMER='foo:Fooz', CELERYD_AUTOSCALER='foo:Xuzzy', ) with pytest.raises(ImproperlyConfigured): assert app.conf.worker_consumer == 'foo:Fooz' def test_pending_configuration__compat_settings_mixing_alt(self): with self.Celery(broker='foo://bar', backend='foo') as app: app.conf.update( task_always_eager=4, task_default_delivery_mode=63, worker_agent='foo:Barz', CELERYD_CONSUMER='foo:Fooz', worker_consumer='foo:Fooz', CELERYD_AUTOSCALER='foo:Xuzzy', worker_autoscaler='foo:Xuzzy' ) def test_pending_configuration__setdefault(self): with self.Celery(broker='foo://bar') as app: assert not app.configured app.conf.setdefault('worker_agent', 'foo:Bar') assert not app.configured def test_pending_configuration__iter(self): with self.Celery(broker='foo://bar') as app: app.conf.worker_agent = 'foo:Bar' assert not app.configured assert list(app.conf.keys()) assert app.configured assert 'worker_agent' in app.conf assert dict(app.conf) def test_pending_configuration__raises_ImproperlyConfigured(self): with self.Celery(set_as_current=False) as app: app.conf.worker_agent = 'foo://bar' app.conf.task_default_delivery_mode = 44 app.conf.CELERY_ALWAYS_EAGER = 5 with pytest.raises(ImproperlyConfigured): app.finalize() with self.Celery() as app: assert not self.app.conf.task_always_eager def test_pending_configuration__ssl_settings(self): with self.Celery(broker='foo://bar', broker_use_ssl={ 'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', 'ssl_keyfile': '/path/to/client.key'}, redis_backend_use_ssl={ 'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', 'ssl_keyfile': '/path/to/client.key'}) as app: assert not app.configured assert app.conf.broker_url == 'foo://bar' assert app.conf.broker_use_ssl['ssl_certfile'] == \ '/path/to/client.crt' assert app.conf.broker_use_ssl['ssl_keyfile'] == \ '/path/to/client.key' assert app.conf.broker_use_ssl['ssl_ca_certs'] == \ '/path/to/ca.crt' assert app.conf.broker_use_ssl['ssl_cert_reqs'] == \ ssl.CERT_REQUIRED assert app.conf.redis_backend_use_ssl['ssl_certfile'] == \ '/path/to/client.crt' assert app.conf.redis_backend_use_ssl['ssl_keyfile'] == \ '/path/to/client.key' assert app.conf.redis_backend_use_ssl['ssl_ca_certs'] == \ '/path/to/ca.crt' assert app.conf.redis_backend_use_ssl['ssl_cert_reqs'] == \ ssl.CERT_REQUIRED def test_repr(self): assert repr(self.app) def test_custom_task_registry(self): with self.Celery(tasks=self.app.tasks) as app2: assert app2.tasks is self.app.tasks def test_include_argument(self): with self.Celery(include=('foo', 'bar.foo')) as app: assert app.conf.include, ('foo' == 'bar.foo') def test_set_as_current(self): current = _state._tls.current_app try: app = self.Celery(set_as_current=True) assert _state._tls.current_app is app finally: _state._tls.current_app = current def test_current_task(self): @self.app.task def foo(shared=False): pass _state._task_stack.push(foo) try: assert self.app.current_task.name == foo.name finally: _state._task_stack.pop() def test_task_not_shared(self): with patch('celery.app.base.connect_on_app_finalize') as sh: @self.app.task(shared=False) def foo(): pass sh.assert_not_called() def test_task_compat_with_filter(self): with self.Celery() as app: check = Mock() def filter(task): check(task) return task @app.task(filter=filter, shared=False) def foo(): pass check.assert_called_with(foo) def test_task_with_filter(self): with self.Celery() as app: check = Mock() def filter(task): check(task) return task assert not _appbase.USING_EXECV @app.task(filter=filter, shared=False) def foo(): pass check.assert_called_with(foo) def test_task_with_pydantic_with_no_args(self): """Test a pydantic task with no arguments or return value.""" with self.Celery() as app: check = Mock() @app.task(pydantic=True) def foo(): check() assert foo() is None check.assert_called_once() def test_task_with_pydantic_with_arg_and_kwarg(self): """Test a pydantic task with simple (non-pydantic) arg/kwarg and return value.""" with self.Celery() as app: check = Mock() @app.task(pydantic=True) def foo(arg: int, kwarg: bool = True) -> int: check(arg, kwarg=kwarg) return 1 assert foo(0) == 1 check.assert_called_once_with(0, kwarg=True) def test_task_with_pydantic_with_optional_args(self): """Test pydantic task receiving and returning an optional argument.""" with self.Celery() as app: check = Mock() @app.task(pydantic=True) def foo(arg: Optional[int], kwarg: Optional[bool] = True) -> Optional[int]: check(arg, kwarg=kwarg) if isinstance(arg, int): return 1 return 2 assert foo(0) == 1 check.assert_called_once_with(0, kwarg=True) assert foo(None) == 2 check.assert_called_with(None, kwarg=True) @pytest.mark.skipif(sys.version_info < (3, 9), reason="Notation is only supported in Python 3.9 or newer.") def test_task_with_pydantic_with_dict_args(self): """Test pydantic task receiving and returning a generic dict argument.""" with self.Celery() as app: check = Mock() @app.task(pydantic=True) def foo(arg: dict[str, str], kwarg: dict[str, str]) -> dict[str, str]: check(arg, kwarg=kwarg) return {'x': 'y'} assert foo({'a': 'b'}, kwarg={'c': 'd'}) == {'x': 'y'} check.assert_called_once_with({'a': 'b'}, kwarg={'c': 'd'}) @pytest.mark.skipif(sys.version_info < (3, 9), reason="Notation is only supported in Python 3.9 or newer.") def test_task_with_pydantic_with_list_args(self): """Test pydantic task receiving and returning a generic dict argument.""" with self.Celery() as app: check = Mock() @app.task(pydantic=True) def foo(arg: list[str], kwarg: list[str] = True) -> list[str]: check(arg, kwarg=kwarg) return ['x'] assert foo(['a'], kwarg=['b']) == ['x'] check.assert_called_once_with(['a'], kwarg=['b']) def test_task_with_pydantic_with_pydantic_arg_and_default_kwarg(self): """Test a pydantic task with pydantic arg/kwarg and return value.""" class ArgModel(BaseModel): arg_value: int class KwargModel(BaseModel): kwarg_value: int kwarg_default = KwargModel(kwarg_value=1) class ReturnModel(BaseModel): ret_value: int with self.Celery() as app: check = Mock() @app.task(pydantic=True) def foo(arg: ArgModel, kwarg: KwargModel = kwarg_default) -> ReturnModel: check(arg, kwarg=kwarg) return ReturnModel(ret_value=2) assert foo({'arg_value': 0}) == {'ret_value': 2} check.assert_called_once_with(ArgModel(arg_value=0), kwarg=kwarg_default) check.reset_mock() # Explicitly pass kwarg (but as argument) assert foo({'arg_value': 3}, {'kwarg_value': 4}) == {'ret_value': 2} check.assert_called_once_with(ArgModel(arg_value=3), kwarg=KwargModel(kwarg_value=4)) check.reset_mock() # Explicitly pass all arguments as kwarg assert foo(arg={'arg_value': 5}, kwarg={'kwarg_value': 6}) == {'ret_value': 2} check.assert_called_once_with(ArgModel(arg_value=5), kwarg=KwargModel(kwarg_value=6)) def test_task_with_pydantic_with_non_strict_validation(self): """Test a pydantic task with where Pydantic has to apply non-strict validation.""" class Model(BaseModel): value: timedelta with self.Celery() as app: check = Mock() @app.task(pydantic=True) def foo(arg: Model) -> Model: check(arg) return Model(value=timedelta(days=arg.value.days * 2)) assert foo({'value': timedelta(days=1)}) == {'value': 'P2D'} check.assert_called_once_with(Model(value=timedelta(days=1))) check.reset_mock() # Pass a serialized value to the task assert foo({'value': 'P3D'}) == {'value': 'P6D'} check.assert_called_once_with(Model(value=timedelta(days=3))) def test_task_with_pydantic_with_optional_pydantic_args(self): """Test pydantic task receiving and returning an optional argument.""" class ArgModel(BaseModel): arg_value: int class KwargModel(BaseModel): kwarg_value: int class ReturnModel(BaseModel): ret_value: int with self.Celery() as app: check = Mock() @app.task(pydantic=True) def foo(arg: Optional[ArgModel], kwarg: Optional[KwargModel] = None) -> Optional[ReturnModel]: check(arg, kwarg=kwarg) if isinstance(arg, ArgModel): return ReturnModel(ret_value=1) return None assert foo(None) is None check.assert_called_once_with(None, kwarg=None) assert foo({'arg_value': 1}, kwarg={'kwarg_value': 2}) == {'ret_value': 1} check.assert_called_with(ArgModel(arg_value=1), kwarg=KwargModel(kwarg_value=2)) @pytest.mark.skipif(sys.version_info < (3, 9), reason="Notation is only supported in Python 3.9 or newer.") def test_task_with_pydantic_with_generic_return_value(self): """Test pydantic task receiving and returning an optional argument.""" class ReturnModel(BaseModel): ret_value: int with self.Celery() as app: check = Mock() @app.task(pydantic=True) def foo() -> dict[str, str]: check() return ReturnModel(ret_value=1) # type: ignore # whole point here is that this doesn't match assert foo() == ReturnModel(ret_value=1) check.assert_called_once_with() def test_task_with_pydantic_with_task_name_in_context(self): """Test that the task name is passed to as additional context.""" class ArgModel(BaseModel): value: int @model_validator(mode='after') def validate_context(self, info: ValidationInfo): context = info.context assert context assert context.get('celery_task_name') == 't.unit.app.test_app.task' return self with self.Celery() as app: check = Mock() @app.task(pydantic=True) def task(arg: ArgModel): check(arg) return 1 assert task({'value': 1}) == 1 def test_task_with_pydantic_with_strict_validation(self): """Test a pydantic task with/without strict model validation.""" class ArgModel(BaseModel): value: int with self.Celery() as app: check = Mock() @app.task(pydantic=True, pydantic_strict=True) def strict(arg: ArgModel): check(arg) @app.task(pydantic=True, pydantic_strict=False) def loose(arg: ArgModel): check(arg) # In Pydantic, passing an "exact int" as float works without strict validation assert loose({'value': 1.0}) is None check.assert_called_once_with(ArgModel(value=1)) check.reset_mock() # ... but a non-strict value will raise an exception with pytest.raises(ValueError): loose({'value': 1.1}) check.assert_not_called() # ... with strict validation, even an "exact int" will not work: with pytest.raises(ValueError): strict({'value': 1.0}) check.assert_not_called() def test_task_with_pydantic_with_extra_context(self): """Test passing additional validation context to the model.""" class ArgModel(BaseModel): value: int @model_validator(mode='after') def validate_context(self, info: ValidationInfo): context = info.context assert context, context assert context.get('foo') == 'bar' return self with self.Celery() as app: check = Mock() @app.task(pydantic=True, pydantic_context={'foo': 'bar'}) def task(arg: ArgModel): check(arg.value) return 1 assert task({'value': 1}) == 1 check.assert_called_once_with(1) def test_task_with_pydantic_with_dump_kwargs(self): """Test passing keyword arguments to model_dump().""" class ArgModel(BaseModel): value: int class RetModel(BaseModel): value: datetime unset_value: typing.Optional[int] = 99 # this would be in the output, if exclude_unset weren't True with self.Celery() as app: check = Mock() @app.task(pydantic=True, pydantic_dump_kwargs={'mode': 'python', 'exclude_unset': True}) def task(arg: ArgModel) -> RetModel: check(arg) return RetModel(value=datetime(2024, 5, 14, tzinfo=timezone.utc)) assert task({'value': 1}) == {'value': datetime(2024, 5, 14, tzinfo=timezone.utc)} check.assert_called_once_with(ArgModel(value=1)) def test_task_with_pydantic_with_pydantic_not_installed(self): """Test configuring a task with Pydantic when pydantic is not installed.""" with self.Celery() as app: @app.task(pydantic=True) def task(): return # mock function will raise ModuleNotFoundError only if pydantic is imported def import_module(name, *args, **kwargs): if name == 'pydantic': raise ModuleNotFoundError('Module not found.') return DEFAULT msg = r'^You need to install pydantic to use pydantic model serialization\.$' with patch( 'celery.app.base.importlib.import_module', side_effect=import_module, wraps=importlib.import_module ): with pytest.raises(ImproperlyConfigured, match=msg): task() def test_task_sets_main_name_MP_MAIN_FILE(self): from celery.utils import imports as _imports _imports.MP_MAIN_FILE = __file__ try: with self.Celery('xuzzy') as app: @app.task def foo(): pass assert foo.name == 'xuzzy.foo' finally: _imports.MP_MAIN_FILE = None def test_can_get_type_hints_for_tasks(self): import typing with self.Celery() as app: @app.task def foo(parameter: int) -> None: pass assert typing.get_type_hints(foo) == { 'parameter': int, 'return': type(None)} def test_annotate_decorator(self): from celery.app.task import Task class adX(Task): def run(self, y, z, x): return y, z, x check = Mock() def deco(fun): def _inner(*args, **kwargs): check(*args, **kwargs) return fun(*args, **kwargs) return _inner self.app.conf.task_annotations = { adX.name: {'@__call__': deco} } adX.bind(self.app) assert adX.app is self.app i = adX() i(2, 4, x=3) check.assert_called_with(i, 2, 4, x=3) i.annotate() i.annotate() def test_apply_async_adds_children(self): from celery._state import _task_stack @self.app.task(bind=True, shared=False) def a3cX1(self): pass @self.app.task(bind=True, shared=False) def a3cX2(self): pass _task_stack.push(a3cX1) try: a3cX1.push_request(called_directly=False) try: res = a3cX2.apply_async(add_to_parent=True) assert res in a3cX1.request.children finally: a3cX1.pop_request() finally: _task_stack.pop() def test_pickle_app(self): changes = {'THE_FOO_BAR': 'bars', 'THE_MII_MAR': 'jars'} self.app.conf.update(changes) saved = pickle.dumps(self.app) assert len(saved) < 2048 restored = pickle.loads(saved) for key, value in changes.items(): assert restored.conf[key] == value @patch('celery.bin.celery.celery') def test_worker_main(self, mocked_celery): self.app.worker_main(argv=['worker', '--help']) mocked_celery.main.assert_called_with( args=['worker', '--help'], standalone_mode=False) def test_config_from_envvar(self, monkeypatch): monkeypatch.setenv("CELERYTEST_CONFIG_OBJECT", 't.unit.app.test_app') self.app.config_from_envvar('CELERYTEST_CONFIG_OBJECT') assert self.app.conf.THIS_IS_A_KEY == 'this is a value' def assert_config2(self): assert self.app.conf.LEAVE_FOR_WORK assert self.app.conf.MOMENT_TO_STOP assert self.app.conf.CALL_ME_BACK == 123456789 assert not self.app.conf.WANT_ME_TO assert self.app.conf.UNDERSTAND_ME def test_config_from_object__lazy(self): conf = ObjectConfig2() self.app.config_from_object(conf) assert self.app.loader._conf is unconfigured assert self.app._config_source is conf self.assert_config2() def test_config_from_object__force(self): self.app.config_from_object(ObjectConfig2(), force=True) assert self.app.loader._conf self.assert_config2() def test_config_from_object__compat(self): class Config: CELERY_ALWAYS_EAGER = 44 CELERY_DEFAULT_DELIVERY_MODE = 30 CELERY_TASK_PUBLISH_RETRY = False self.app.config_from_object(Config) assert self.app.conf.task_always_eager == 44 assert self.app.conf.CELERY_ALWAYS_EAGER == 44 assert not self.app.conf.task_publish_retry assert self.app.conf.task_default_routing_key == 'testcelery' def test_config_from_object__supports_old_names(self): class Config: task_always_eager = 45 task_default_delivery_mode = 301 self.app.config_from_object(Config()) assert self.app.conf.CELERY_ALWAYS_EAGER == 45 assert self.app.conf.task_always_eager == 45 assert self.app.conf.CELERY_DEFAULT_DELIVERY_MODE == 301 assert self.app.conf.task_default_delivery_mode == 301 assert self.app.conf.task_default_routing_key == 'testcelery' def test_config_from_object__namespace_uppercase(self): class Config: CELERY_TASK_ALWAYS_EAGER = 44 CELERY_TASK_DEFAULT_DELIVERY_MODE = 301 self.app.config_from_object(Config(), namespace='CELERY') assert self.app.conf.task_always_eager == 44 def test_config_from_object__namespace_lowercase(self): class Config: celery_task_always_eager = 44 celery_task_default_delivery_mode = 301 self.app.config_from_object(Config(), namespace='celery') assert self.app.conf.task_always_eager == 44 def test_config_from_object__mixing_new_and_old(self): class Config: task_always_eager = 44 worker_agent = 'foo:Agent' worker_consumer = 'foo:Consumer' beat_schedule = '/foo/schedule' CELERY_DEFAULT_DELIVERY_MODE = 301 with pytest.raises(ImproperlyConfigured) as exc: self.app.config_from_object(Config(), force=True) assert exc.args[0].startswith('CELERY_DEFAULT_DELIVERY_MODE') assert 'task_default_delivery_mode' in exc.args[0] def test_config_from_object__mixing_old_and_new(self): class Config: CELERY_ALWAYS_EAGER = 46 CELERYD_AGENT = 'foo:Agent' CELERYD_CONSUMER = 'foo:Consumer' CELERYBEAT_SCHEDULE = '/foo/schedule' task_default_delivery_mode = 301 with pytest.raises(ImproperlyConfigured) as exc: self.app.config_from_object(Config(), force=True) assert exc.args[0].startswith('task_default_delivery_mode') assert 'CELERY_DEFAULT_DELIVERY_MODE' in exc.args[0] def test_config_form_object__module_attr_does_not_exist(self): module_name = __name__ attr_name = 'bar' # the module must exist, but it should not have the config attr self.app.config_from_object(f'{module_name}.{attr_name}') with pytest.raises(ModuleNotFoundError) as exc: assert self.app.conf.broker_url is None assert module_name in exc.value.args[0] assert attr_name in exc.value.args[0] def test_config_from_cmdline(self): cmdline = ['task_always_eager=no', 'result_backend=/dev/null', 'worker_prefetch_multiplier=368', '.foobarstring=(string)300', '.foobarint=(int)300', 'database_engine_options=(dict){"foo": "bar"}'] self.app.config_from_cmdline(cmdline, namespace='worker') assert not self.app.conf.task_always_eager assert self.app.conf.result_backend == '/dev/null' assert self.app.conf.worker_prefetch_multiplier == 368 assert self.app.conf.worker_foobarstring == '300' assert self.app.conf.worker_foobarint == 300 assert self.app.conf.database_engine_options == {'foo': 'bar'} def test_setting__broker_transport_options(self): _args = {'foo': 'bar', 'spam': 'baz'} self.app.config_from_object(Bunch()) assert self.app.conf.broker_transport_options == \ {'polling_interval': 0.1} self.app.config_from_object(Bunch(broker_transport_options=_args)) assert self.app.conf.broker_transport_options == _args def test_Windows_log_color_disabled(self): self.app.IS_WINDOWS = True assert not self.app.log.supports_color(True) def test_WorkController(self): x = self.app.WorkController assert x.app is self.app def test_Worker(self): x = self.app.Worker assert x.app is self.app @pytest.mark.usefixtures('depends_on_current_app') def test_AsyncResult(self): x = self.app.AsyncResult('1') assert x.app is self.app r = loads(dumps(x)) # not set as current, so ends up as default app after reduce assert r.app is current_app._get_current_object() def test_get_active_apps(self): assert list(_state._get_active_apps()) app1 = self.Celery() appid = id(app1) assert app1 in _state._get_active_apps() app1.close() del (app1) gc.collect() # weakref removed from list when app goes out of scope. with pytest.raises(StopIteration): next(app for app in _state._get_active_apps() if id(app) == appid) def test_config_from_envvar_more(self, key='CELERY_HARNESS_CFG1'): assert not self.app.config_from_envvar( 'HDSAJIHWIQHEWQU', force=True, silent=True) with pytest.raises(ImproperlyConfigured): self.app.config_from_envvar( 'HDSAJIHWIQHEWQU', force=True, silent=False, ) os.environ[key] = __name__ + '.object_config' assert self.app.config_from_envvar(key, force=True) assert self.app.conf['FOO'] == 1 assert self.app.conf['BAR'] == 2 os.environ[key] = 'unknown_asdwqe.asdwqewqe' with pytest.raises(ImportError): self.app.config_from_envvar(key, silent=False) assert not self.app.config_from_envvar(key, force=True, silent=True) os.environ[key] = __name__ + '.dict_config' assert self.app.config_from_envvar(key, force=True) assert self.app.conf['FOO'] == 10 assert self.app.conf['BAR'] == 20 @patch('celery.bin.celery.celery') def test_start(self, mocked_celery): self.app.start() mocked_celery.main.assert_called() @pytest.mark.parametrize('url,expected_fields', [ ('pyamqp://', { 'hostname': 'localhost', 'userid': 'guest', 'password': 'guest', 'virtual_host': '/', }), ('pyamqp://:1978/foo', { 'port': 1978, 'virtual_host': 'foo', }), ('pyamqp:////value', { 'virtual_host': '/value', }) ]) def test_amqp_get_broker_info(self, url, expected_fields): info = self.app.connection(url).info() for key, expected_value in expected_fields.items(): assert info[key] == expected_value def test_amqp_failover_strategy_selection(self): # Test passing in a string and make sure the string # gets there untouched self.app.conf.broker_failover_strategy = 'foo-bar' assert self.app.connection('amqp:////value') \ .failover_strategy == 'foo-bar' # Try passing in None self.app.conf.broker_failover_strategy = None assert self.app.connection('amqp:////value') \ .failover_strategy == itertools.cycle # Test passing in a method def my_failover_strategy(it): yield True self.app.conf.broker_failover_strategy = my_failover_strategy assert self.app.connection('amqp:////value') \ .failover_strategy == my_failover_strategy def test_after_fork(self): self.app._pool = Mock() self.app.on_after_fork = Mock(name='on_after_fork') self.app._after_fork() assert self.app._pool is None self.app.on_after_fork.send.assert_called_with(sender=self.app) self.app._after_fork() def test_global_after_fork(self): self.app._after_fork = Mock(name='_after_fork') _appbase._after_fork_cleanup_app(self.app) self.app._after_fork.assert_called_with() @patch('celery.app.base.logger') def test_after_fork_cleanup_app__raises(self, logger): self.app._after_fork = Mock(name='_after_fork') exc = self.app._after_fork.side_effect = KeyError() _appbase._after_fork_cleanup_app(self.app) logger.info.assert_called_with( 'after forker raised exception: %r', exc, exc_info=1) def test_ensure_after_fork__no_multiprocessing(self): prev, _appbase.register_after_fork = ( _appbase.register_after_fork, None) try: self.app._after_fork_registered = False self.app._ensure_after_fork() assert self.app._after_fork_registered finally: _appbase.register_after_fork = prev def test_canvas(self): assert self.app._canvas.Signature def test_signature(self): sig = self.app.signature('foo', (1, 2)) assert sig.app is self.app def test_timezone_none_set(self): self.app.conf.timezone = None self.app.conf.enable_utc = True assert self.app.timezone == timezone.utc del self.app.timezone self.app.conf.enable_utc = False assert self.app.timezone == timezone.local def test_use_local_timezone(self): self.app.conf.timezone = None self.app.conf.enable_utc = False self._clear_timezone_cache() try: assert isinstance(self.app.timezone, ZoneInfo) finally: self._clear_timezone_cache() @patch("celery.utils.time.get_localzone") def test_use_local_timezone_failure(self, mock_get_localzone): mock_get_localzone.side_effect = Exception("Failed to get local timezone") self.app.conf.timezone = None self.app.conf.enable_utc = False self._clear_timezone_cache() try: assert isinstance(self.app.timezone, LocalTimezone) finally: self._clear_timezone_cache() def _clear_timezone_cache(self): del self.app.timezone del timezone.local def test_uses_utc_timezone(self): self.app.conf.timezone = None self.app.conf.enable_utc = True assert self.app.uses_utc_timezone() is True self.app.conf.enable_utc = False del self.app.timezone assert self.app.uses_utc_timezone() is False self.app.conf.timezone = 'US/Eastern' del self.app.timezone assert self.app.uses_utc_timezone() is False self.app.conf.timezone = 'UTC' del self.app.timezone assert self.app.uses_utc_timezone() is True def test_compat_on_configure(self): _on_configure = Mock(name='on_configure') class CompatApp(Celery): def on_configure(self, *args, **kwargs): # on pypy3 if named on_configure the class function # will be called, instead of the mock defined above, # so we add the underscore. _on_configure(*args, **kwargs) with CompatApp(set_as_current=False) as app: app.loader = Mock() app.loader.conf = {} app._load_config() _on_configure.assert_called_with() def test_add_periodic_task(self): @self.app.task def add(x, y): pass assert not self.app.configured self.app.add_periodic_task( 10, self.app.signature('add', (2, 2)), name='add1', expires=3, ) assert self.app._pending_periodic_tasks assert not self.app.configured sig2 = add.s(4, 4) assert self.app.configured self.app.add_periodic_task(20, sig2, name='add2', expires=4) assert 'add1' in self.app.conf.beat_schedule assert 'add2' in self.app.conf.beat_schedule def test_add_periodic_task_expected_override(self): @self.app.task def add(x, y): pass sig = add.s(2, 2) self.app.add_periodic_task(10, sig, name='add1', expires=3) self.app.add_periodic_task(20, sig, name='add1', expires=3) assert 'add1' in self.app.conf.beat_schedule assert len(self.app.conf.beat_schedule) == 1 def test_add_periodic_task_unexpected_override(self, caplog): @self.app.task def add(x, y): pass sig = add.s(2, 2) self.app.add_periodic_task(10, sig, expires=3) self.app.add_periodic_task(20, sig, expires=3) assert len(self.app.conf.beat_schedule) == 1 assert caplog.records[0].message == ( "Periodic task key='t.unit.app.test_app.add(2, 2)' shadowed a" " previous unnamed periodic task. Pass a name kwarg to" " add_periodic_task to silence this warning." ) @pytest.mark.masked_modules('multiprocessing.util') def test_pool_no_multiprocessing(self, mask_modules): pool = self.app.pool assert pool is self.app._pool def test_bugreport(self): assert self.app.bugreport() @patch('celery.app.base.detect_quorum_queues', return_value=[False, ""]) def test_send_task__connection_provided(self, detect_quorum_queues): connection = Mock(name='connection') router = Mock(name='router') router.route.return_value = {} self.app.amqp = Mock(name='amqp') self.app.amqp.Producer.attach_mock(ContextMock(), 'return_value') self.app.send_task('foo', (1, 2), connection=connection, router=router) self.app.amqp.Producer.assert_called_with( connection, auto_declare=False) self.app.amqp.send_task_message.assert_called_with( self.app.amqp.Producer(), 'foo', self.app.amqp.create_task_message()) def test_send_task_sent_event(self): class Dispatcher: sent = [] def publish(self, type, fields, *args, **kwargs): self.sent.append((type, fields)) conn = self.app.connection() chan = conn.channel() try: for e in ('foo_exchange', 'moo_exchange', 'bar_exchange'): chan.exchange_declare(e, 'direct', durable=True) chan.queue_declare(e, durable=True) chan.queue_bind(e, e, e) finally: chan.close() assert conn.transport_cls == 'memory' message = self.app.amqp.create_task_message( 'id', 'footask', (), {}, create_sent_event=True, ) prod = self.app.amqp.Producer(conn) dispatcher = Dispatcher() self.app.amqp.send_task_message( prod, 'footask', message, exchange='moo_exchange', routing_key='moo_exchange', event_dispatcher=dispatcher, ) assert dispatcher.sent assert dispatcher.sent[0][0] == 'task-sent' self.app.amqp.send_task_message( prod, 'footask', message, event_dispatcher=dispatcher, exchange='bar_exchange', routing_key='bar_exchange', ) def test_select_queues(self): self.app.amqp = Mock(name='amqp') self.app.select_queues({'foo', 'bar'}) self.app.amqp.queues.select.assert_called_with({'foo', 'bar'}) def test_Beat(self): from celery.apps.beat import Beat beat = self.app.Beat() assert isinstance(beat, Beat) def test_registry_cls(self): class TaskRegistry(self.app.registry_cls): pass class CustomCelery(type(self.app)): registry_cls = TaskRegistry app = CustomCelery(set_as_current=False) assert isinstance(app.tasks, TaskRegistry) def test_oid(self): # Test that oid is global value. oid1 = self.app.oid oid2 = self.app.oid uuid.UUID(oid1) uuid.UUID(oid2) assert oid1 == oid2 def test_global_oid(self): # Test that oid is global value also within threads main_oid = self.app.oid uuid.UUID(main_oid) from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(lambda: self.app.oid) thread_oid = future.result() uuid.UUID(thread_oid) assert main_oid == thread_oid def test_thread_oid(self): # Test that thread_oid is global value in single thread. oid1 = self.app.thread_oid oid2 = self.app.thread_oid uuid.UUID(oid1) uuid.UUID(oid2) assert oid1 == oid2 def test_backend(self): # Test that app.backend returns the same backend in single thread backend1 = self.app.backend backend2 = self.app.backend assert isinstance(backend1, Backend) assert isinstance(backend2, Backend) assert backend1 is backend2 def test_thread_backend(self): # Test that app.backend returns the new backend for each thread main_backend = self.app.backend from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(lambda: self.app.backend) thread_backend = future.result() assert isinstance(main_backend, Backend) assert isinstance(thread_backend, Backend) assert main_backend is not thread_backend def test_thread_oid_is_local(self): # Test that thread_oid is local to thread. main_oid = self.app.thread_oid uuid.UUID(main_oid) from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(lambda: self.app.thread_oid) thread_oid = future.result() uuid.UUID(thread_oid) assert main_oid != thread_oid def test_thread_backend_thread_safe(self): # Should share the backend object across threads from concurrent.futures import ThreadPoolExecutor with self.Celery() as app: app.conf.update(result_backend_thread_safe=True) main_backend = app.backend with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(lambda: app.backend) thread_backend = future.result() assert isinstance(main_backend, Backend) assert isinstance(thread_backend, Backend) assert main_backend is thread_backend def test_send_task_expire_as_string(self): try: self.app.send_task( 'foo', (1, 2), expires='2023-03-16T17:21:20.663973') except TypeError as e: pytest.fail(f'raise unexcepted error {e}') @patch('celery.app.base.detect_quorum_queues', return_value=[True, "testcelery"]) def test_native_delayed_delivery_countdown(self, detect_quorum_queues): self.app.amqp = MagicMock(name='amqp') self.app.amqp.router.route.return_value = { 'queue': Queue( 'testcelery', routing_key='testcelery', exchange=Exchange('testcelery', type='topic') ) } self.app.send_task('foo', (1, 2), countdown=30) exchange = Exchange( 'celery_delayed_27', type='topic', ) self.app.amqp.send_task_message.assert_called_once_with( ANY, ANY, ANY, exchange=exchange, routing_key='0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.1.1.1.0.testcelery' ) driver_type_stub = self.app.amqp.producer_pool.connections.connection.transport.driver_type detect_quorum_queues.assert_called_once_with(self.app, driver_type_stub) @patch('celery.app.base.detect_quorum_queues', return_value=[True, "testcelery"]) def test_native_delayed_delivery__no_queue_arg__no_eta(self, detect_quorum_queues): self.app.amqp = MagicMock(name='amqp') options = { 'routing_key': 'testcelery', 'exchange': 'testcelery', 'exchange_type': 'topic', } self.app.amqp.router.route.return_value = options self.app.send_task( name='foo', args=(1, 2), ) self.app.amqp.send_task_message.assert_called_once_with( ANY, ANY, ANY, **options, ) assert not detect_quorum_queues.called @patch('celery.app.base.detect_quorum_queues', return_value=[True, "testcelery"]) def test_native_delayed_delivery__no_queue_arg__with_countdown(self, detect_quorum_queues): self.app.amqp = MagicMock(name='amqp') options = { 'routing_key': 'testcelery', 'exchange': 'testcelery', 'exchange_type': 'topic', } self.app.amqp.router.route.return_value = options self.app.send_task( name='foo', args=(1, 2), countdown=30, ) exchange = Exchange( 'celery_delayed_27', type='topic', ) self.app.amqp.send_task_message.assert_called_once_with( ANY, ANY, ANY, exchange=exchange, routing_key='0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.1.1.1.0.testcelery', exchange_type="topic", ) driver_type_stub = self.app.amqp.producer_pool.connections.connection.transport.driver_type detect_quorum_queues.assert_called_once_with(self.app, driver_type_stub) @patch('celery.app.base.detect_quorum_queues', return_value=[True, "testcelery"]) def test_native_delayed_delivery_eta_datetime(self, detect_quorum_queues): self.app.amqp = MagicMock(name='amqp') self.app.amqp.router.route.return_value = { 'queue': Queue( 'testcelery', routing_key='testcelery', exchange=Exchange('testcelery', type='topic') ) } self.app.now = Mock(return_value=datetime(2024, 8, 24, tzinfo=datetime_timezone.utc)) self.app.send_task('foo', (1, 2), eta=datetime(2024, 8, 25)) exchange = Exchange( 'celery_delayed_27', type='topic', ) self.app.amqp.send_task_message.assert_called_once_with( ANY, ANY, ANY, exchange=exchange, routing_key='0.0.0.0.0.0.0.0.0.0.0.1.0.1.0.1.0.0.0.1.1.0.0.0.0.0.0.0.testcelery' ) @patch('celery.app.base.detect_quorum_queues', return_value=[True, "testcelery"]) def test_native_delayed_delivery_eta_str(self, detect_quorum_queues): self.app.amqp = MagicMock(name='amqp') self.app.amqp.router.route.return_value = { 'queue': Queue( 'testcelery', routing_key='testcelery', exchange=Exchange('testcelery', type='topic') ) } self.app.now = Mock(return_value=datetime(2024, 8, 24, tzinfo=datetime_timezone.utc)) self.app.send_task('foo', (1, 2), eta=datetime(2024, 8, 25).isoformat()) exchange = Exchange( 'celery_delayed_27', type='topic', ) self.app.amqp.send_task_message.assert_called_once_with( ANY, ANY, ANY, exchange=exchange, routing_key='0.0.0.0.0.0.0.0.0.0.0.1.0.1.0.1.0.0.0.1.1.0.0.0.0.0.0.0.testcelery', ) @patch('celery.app.base.detect_quorum_queues', return_value=[True, "testcelery"]) def test_native_delayed_delivery_no_eta_or_countdown(self, detect_quorum_queues): self.app.amqp = MagicMock(name='amqp') self.app.amqp.router.route.return_value = {'queue': Queue('testcelery', routing_key='testcelery')} self.app.send_task('foo', (1, 2), countdown=-10) self.app.amqp.send_task_message.assert_called_once_with( ANY, ANY, ANY, queue=Queue( 'testcelery', routing_key='testcelery' ) ) @patch('celery.app.base.detect_quorum_queues', return_value=[True, "testcelery"]) def test_native_delayed_delivery_countdown_in_the_past(self, detect_quorum_queues): self.app.amqp = MagicMock(name='amqp') self.app.amqp.router.route.return_value = { 'queue': Queue( 'testcelery', routing_key='testcelery', exchange=Exchange('testcelery', type='topic') ) } self.app.send_task('foo', (1, 2)) self.app.amqp.send_task_message.assert_called_once_with( ANY, ANY, ANY, queue=Queue( 'testcelery', routing_key='testcelery', exchange=Exchange('testcelery', type='topic') ) ) @patch('celery.app.base.detect_quorum_queues', return_value=[True, "testcelery"]) def test_native_delayed_delivery_eta_in_the_past(self, detect_quorum_queues): self.app.amqp = MagicMock(name='amqp') self.app.amqp.router.route.return_value = { 'queue': Queue( 'testcelery', routing_key='testcelery', exchange=Exchange('testcelery', type='topic') ) } self.app.now = Mock(return_value=datetime(2024, 8, 24, tzinfo=datetime_timezone.utc)) self.app.send_task('foo', (1, 2), eta=datetime(2024, 8, 23).isoformat()) self.app.amqp.send_task_message.assert_called_once_with( ANY, ANY, ANY, queue=Queue( 'testcelery', routing_key='testcelery', exchange=Exchange('testcelery', type='topic') ) ) @patch('celery.app.base.detect_quorum_queues', return_value=[True, "testcelery"]) def test_native_delayed_delivery_direct_exchange(self, detect_quorum_queues, caplog): self.app.amqp = MagicMock(name='amqp') self.app.amqp.router.route.return_value = { 'queue': Queue( 'testcelery', routing_key='testcelery', exchange=Exchange('testcelery', type='direct') ) } self.app.send_task('foo', (1, 2), countdown=10) self.app.amqp.send_task_message.assert_called_once_with( ANY, ANY, ANY, queue=Queue( 'testcelery', routing_key='testcelery', exchange=Exchange('testcelery', type='direct') ) ) assert len(caplog.records) == 1 record: LogRecord = caplog.records[0] assert record.levelname == "WARNING" assert record.message == ( "Direct exchanges are not supported with native delayed delivery.\n" "testcelery is a direct exchange but should be a topic exchange or " "a fanout exchange in order for native delayed delivery to work properly.\n" "If quorum queues are used, this task may block the worker process until the ETA arrives." )
test_App
python
ray-project__ray
python/ray/train/v2/_internal/state/schema.py
{ "start": 4107, "end": 4508 }
class ____(BaseModel): """CPU and memory statistics for a process.""" cpuPercent: float = Field(description="The percentage of CPU usage.") mem: Optional[List[int]] = Field( description="Memory statistics, including total memory, free memory, " "and memory usage ratio." ) memoryInfo: MemoryInfo = Field(description="Detailed memory usage information.")
ProcessStats
python
pypa__warehouse
warehouse/oidc/views.py
{ "start": 1790, "end": 15308 }
class ____(BaseModel): token: StrictStr def _ratelimiters(request: Request) -> dict[str, IRateLimiter]: return { "user.oidc": request.find_service( IRateLimiter, name="user_oidc.publisher.register" ), "ip.oidc": request.find_service( IRateLimiter, name="ip_oidc.publisher.register" ), } def _invalid(errors: list[Error], request: Request) -> JsonResponse: request.response.status = 422 return { "message": "Token request failed", "errors": errors, } @view_config( route_name="oidc.audience", require_methods=["GET"], renderer="json", require_csrf=False, has_translations=False, ) def oidc_audience(request: Request): if request.flags.disallow_oidc(): return HTTPForbidden( json={"message": "Trusted publishing functionality not enabled"} ) audience: str = request.registry.settings["warehouse.oidc.audience"] return {"audience": audience} @view_config( route_name="oidc.github.mint_token", require_methods=["POST"], renderer="json", require_csrf=False, ) @view_config( route_name="oidc.mint_token", require_methods=["POST"], renderer="json", require_csrf=False, ) def mint_token_from_oidc(request: Request): try: payload = TokenPayload.model_validate_json(request.body) unverified_jwt = payload.token except ValidationError as exc: return _invalid( errors=[{"code": "invalid-payload", "description": str(exc)}], request=request, ) # We currently have an **unverified** JWT. To verify it, we need to # know which OIDC service's keyring to check it against. # To do this, we gingerly peek into the unverified claims and # use the `iss` to key into the right `OIDCPublisherService`. try: unverified_claims = jwt.decode( unverified_jwt, options=dict(verify_signature=False) ) unverified_issuer: str = unverified_claims["iss"] except Exception as e: metrics = request.find_service(IMetricsService, context=None) metrics.increment("warehouse.oidc.mint_token_from_oidc.malformed_jwt") # We expect only PyJWTError and KeyError; anything else indicates # an abstraction leak in jwt that we'll log for upstream reporting. if not isinstance(e, (jwt.PyJWTError, KeyError)): with sentry_sdk.new_scope() as scope: scope.fingerprint = [e] sentry_sdk.capture_message(f"jwt.decode raised generic error: {e}") return _invalid( errors=[{"code": "invalid-payload", "description": "malformed JWT"}], request=request, ) # Associate the given issuer claim with Warehouse's OIDCPublisherService. # First, try the standard issuers service_name = OIDC_ISSUER_SERVICE_NAMES.get(unverified_issuer) # If not in global mapping, check for organization-specific custom issuer if not service_name: service_name = lookup_custom_issuer_type(request.db, unverified_issuer) if not service_name: request.metrics.increment( "warehouse.oidc.mint_token_from_oidc.unknown_issuer", tags=[f"issuer_url:{unverified_issuer}"], ) return _invalid( errors=[ { "code": "invalid-payload", "description": "unknown trusted publishing issuer", } ], request=request, ) if request.flags.disallow_oidc(OIDC_ISSUER_ADMIN_FLAGS.get(unverified_issuer)): return _invalid( errors=[ { "code": "not-enabled", "description": f"{service_name} trusted publishing functionality not enabled", # noqa: E501 } ], request=request, ) oidc_service: OIDCPublisherService = request.find_service( IOIDCPublisherService, name=service_name ) return mint_token(oidc_service, unverified_jwt, unverified_issuer, request) def mint_token( oidc_service: OIDCPublisherService, unverified_jwt: str, unverified_issuer: str, request: Request, ) -> JsonResponse: claims = oidc_service.verify_jwt_signature(unverified_jwt, unverified_issuer) if not claims: return _invalid( errors=[ {"code": "invalid-token", "description": "malformed or invalid token"} ], request=request, ) # First, try to find a pending publisher. try: pending_publisher = oidc_service.find_publisher(claims, pending=True) factory = ProjectFactory(request) if isinstance(pending_publisher, PendingOIDCPublisher): # If the project already exists, this pending publisher is no longer # valid and needs to be removed. # NOTE: This is mostly a sanity check, since we dispose of invalidated # pending publishers below. if pending_publisher.project_name in factory: request.db.delete(pending_publisher) return _invalid( errors=[ { "code": "invalid-pending-publisher", "description": "valid token, but project already exists", } ], request=request, ) # Try creating the new project project_service = request.find_service(IProjectService) try: new_project = project_service.create_project( pending_publisher.project_name, pending_publisher.added_by, request, creator_is_owner=pending_publisher.organization_id is None, ratelimited=False, organization_id=pending_publisher.organization_id, ) except HTTPException as exc: return _invalid( errors=[{"code": "invalid-payload", "description": str(exc)}], request=request, ) # Reify the pending publisher against the newly created project reified_publisher = oidc_service.reify_pending_publisher( pending_publisher, new_project ) request.db.flush() # To get the reified_publisher.id new_project.record_event( tag=EventTag.Project.OIDCPublisherAdded, request=request, additional={ "publisher": reified_publisher.publisher_name, "id": str(reified_publisher.id), "specifier": str(reified_publisher), "url": reified_publisher.publisher_url(), "submitted_by": "OpenID created token", "reified_from_pending_publisher": True, "constrained_from_existing_publisher": False, }, ) # Successfully converting a pending publisher into a normal publisher # is a positive signal, so we reset the associated ratelimits. ratelimiters = _ratelimiters(request) ratelimiters["user.oidc"].clear(pending_publisher.added_by.id) ratelimiters["ip.oidc"].clear(request.remote_addr) except InvalidPublisherError: # If the claim set isn't valid for a pending publisher, it's OK, we # will try finding a regular publisher pass # We either don't have a pending OIDC publisher, or we *did* # have one and we've just converted it. Either way, look for a full publisher # to actually do the macaroon minting with. try: publisher = oidc_service.find_publisher(claims, pending=False) # NOTE: assert to persuade mypy of the correct type here. assert isinstance(publisher, OIDCPublisher) except ReusedTokenError: return _invalid( errors=[ { "code": "invalid-reuse-token", "description": "invalid token: already used", } ], request=request, ) except InvalidPublisherError as e: return _invalid( errors=[ { "code": "invalid-publisher", "description": f"valid token, but no corresponding publisher ({e})", } ], request=request, ) # At this point, we've verified that the given JWT is valid for the given # project. All we need to do is mint a new token. # NOTE: For OIDC-minted API tokens, the Macaroon's description string # is purely an implementation detail and is not displayed to the user. macaroon_service: DatabaseMacaroonService = request.find_service( IMacaroonService, context=None ) not_before = int(time.time()) expires_at = not_before + 900 serialized, dm = macaroon_service.create_macaroon( request.domain, ( f"OpenID token: {str(publisher)} " f"({datetime.fromtimestamp(not_before).isoformat()})" ), [ caveats.OIDCPublisher( oidc_publisher_id=str(publisher.id), ), caveats.ProjectID(project_ids=[str(p.id) for p in publisher.projects]), caveats.Expiration(expires_at=expires_at, not_before=not_before), ], oidc_publisher_id=str(publisher.id), additional={"oidc": publisher.stored_claims(claims)}, ) # We have used the given JWT to mint a new token. Let now store it to prevent # its reuse if the claims contain a JTI. Of note, exp is coming from a trusted # source here, so we don't validate it if jwt_identifier := claims.get("jti"): expiration = cast(int, claims.get("exp")) oidc_service.store_jwt_identifier(jwt_identifier, expiration) for project in publisher.projects: project.record_event( tag=EventTag.Project.ShortLivedAPITokenAdded, request=request, additional={ "expires": expires_at, "publisher_name": publisher.publisher_name, "publisher_url": publisher.publisher_url(), "reusable_workflow_used": is_from_reusable_workflow(publisher, claims), }, ) # Send a warning email to the owners of the project using the Trusted Publisher if # the TP has no environment configured but the OIDC claims contain one. # The email contains a link to change the TP so that it only accepts the # environment seen in the current OIDC claims. # # Note: currently we only send the email if the Trusted Publisher is used in only # a single project, since multiple projects using the same TP might mean they don't # use a single environment. if len(publisher.projects) == 1 and should_send_environment_warning_email( publisher, claims ): send_environment_ignored_in_trusted_publisher_email( request, set(publisher.projects[0].owners), project_name=publisher.projects[0].name, publisher=publisher, environment_name=claims["environment"], ) # NOTE: This is for temporary metrics collection of GitHub Trusted Publishers # that use reusable workflows. Since support for reusable workflows is accidental # and not correctly implemented, we need to understand how widely it's being # used before changing its behavior. # ref: https://github.com/pypi/warehouse/pull/16364 if claims and is_from_reusable_workflow(publisher, claims): metrics = request.find_service(IMetricsService, context=None) metrics.increment("warehouse.oidc.mint_token.github_reusable_workflow") return {"success": True, "token": serialized, "expires": expires_at} def is_from_reusable_workflow( publisher: OIDCPublisher | None, claims: SignedClaims ) -> bool: """Detect if the claims are originating from a reusable workflow.""" if not isinstance(publisher, GitHubPublisher): return False job_workflow_ref = claims.get("job_workflow_ref") workflow_ref = claims.get("workflow_ref") # When using reusable workflows, `job_workflow_ref` contains the reusable ( # called) workflow and `workflow_ref` contains the parent (caller) workflow. # With non-reusable workflows they are the same, so we count reusable # workflows by checking if they are different. return bool(job_workflow_ref and workflow_ref and job_workflow_ref != workflow_ref) def should_send_environment_warning_email( publisher: OIDCPublisher, claims: SignedClaims ) -> bool: """ Determine if the claims contain an environment but the publisher doesn't If the publisher does not have an environment configured but the claims contain one, it means the project can easily improve security by constraining the Trusted Publisher to only that environment. This currently only applies to GitHub and GitLab publishers. """ if not isinstance(publisher, (GitHubPublisher, GitLabPublisher)): return False claims_env = claims.get("environment") return publisher.environment == "" and claims_env is not None and claims_env != ""
TokenPayload
python
ansible__ansible
test/units/module_utils/facts/test_collector.py
{ "start": 20473, "end": 25491 }
class ____(unittest.TestCase): maxDiff = None def _classes(self, all_collector_classes=None, valid_subsets=None, minimal_gather_subset=None, gather_subset=None, gather_timeout=None, platform_info=None): platform_info = platform_info or {'system': 'Linux'} return collector.collector_classes_from_gather_subset(all_collector_classes=all_collector_classes, valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=gather_subset, gather_timeout=gather_timeout, platform_info=platform_info) def test_no_args(self): res = self._classes() self.assertIsInstance(res, list) self.assertEqual(res, []) def test_not_all(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['!all']) self.assertIsInstance(res, list) self.assertEqual(res, []) def test_all(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['all']) self.assertIsInstance(res, list) def test_hardware(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['hardware']) self.assertIsInstance(res, list) self.assertIn(default_collectors.PlatformFactCollector, res) self.assertIn(default_collectors.LinuxHardwareCollector, res) assert res.index(default_collectors.LinuxHardwareCollector) > res.index(default_collectors.PlatformFactCollector) def test_network(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['network']) self.assertIsInstance(res, list) self.assertIn(default_collectors.DistributionFactCollector, res) self.assertIn(default_collectors.PlatformFactCollector, res) self.assertIn(default_collectors.LinuxNetworkCollector, res) assert res.index(default_collectors.LinuxNetworkCollector) > res.index(default_collectors.PlatformFactCollector) assert res.index(default_collectors.LinuxNetworkCollector) > res.index(default_collectors.DistributionFactCollector) def test_env(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['env']) self.assertIsInstance(res, list) self.assertEqual(res, [default_collectors.EnvFactCollector]) def test_facter(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=set(['env', 'facter'])) self.assertIsInstance(res, list) self.assertEqual(set(res), set([default_collectors.EnvFactCollector, default_collectors.FacterFactCollector])) def test_facter_ohai(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=set(['env', 'facter', 'ohai'])) self.assertIsInstance(res, list) self.assertEqual(set(res), set([default_collectors.EnvFactCollector, default_collectors.FacterFactCollector, default_collectors.OhaiFactCollector])) def test_just_facter(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=set(['facter'])) self.assertIsInstance(res, list) self.assertEqual(set(res), set([default_collectors.FacterFactCollector])) def test_collector_specified_multiple_times(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['platform', 'all', 'machine']) self.assertIsInstance(res, list) self.assertIn(default_collectors.PlatformFactCollector, res) def test_unknown_collector(self): # something claims 'unknown_collector' is a valid gather_subset, but there is # no FactCollector mapped to 'unknown_collector' self.assertRaisesRegex(TypeError, r'Bad subset.*unknown_collector.*given to Ansible.*allowed\:.*all,.*env.*', self._classes, all_collector_classes=default_collectors.collectors, gather_subset=['env', 'unknown_collector'])
TestCollectorClassesFromGatherSubset
python
kamyu104__LeetCode-Solutions
Python/most-beautiful-item-for-each-query.py
{ "start": 57, "end": 536 }
class ____(object): def maximumBeauty(self, items, queries): """ :type items: List[List[int]] :type queries: List[int] :rtype: List[int] """ items.sort() for i in xrange(len(items)-1): items[i+1][1] = max(items[i+1][1], items[i][1]) result = [] for q in queries: i = bisect.bisect_left(items, [q+1]) result.append(items[i-1][1] if i else 0) return result
Solution
python
getsentry__sentry
tests/sentry/data_export/test_models.py
{ "start": 544, "end": 7994 }
class ____(TestCase): TEST_STRING = b"A bunch of test data..." def setUp(self) -> None: super().setUp() self.user = self.create_user() self.organization = self.create_organization() self.data_export = ExportedData.objects.create( user_id=self.user.id, organization=self.organization, query_type=0, query_info={"env": "test"}, ) self.file1 = File.objects.create( name="tempfile-data-export", type="export.csv", headers={"Content-Type": "text/csv"} ) self.file2 = File.objects.create( name="tempfile-data-export", type="export.csv", headers={"Content-Type": "text/csv"} ) def test_status_property(self) -> None: def _assert_status(status: ExportStatus) -> None: assert self.data_export.status == status _assert_status(ExportStatus.Early) self.data_export.update( date_expired=timezone.now() + timedelta(weeks=2), date_finished=timezone.now() - timedelta(weeks=2), ) _assert_status(ExportStatus.Valid) self.data_export.update(date_expired=timezone.now() - timedelta(weeks=1)) _assert_status(ExportStatus.Expired) def test_payload_property(self) -> None: assert isinstance(self.data_export.payload, dict) keys = list(self.data_export.query_info.keys()) + ["export_type"] assert sorted(self.data_export.payload.keys()) == sorted(keys) def test_file_name_property(self) -> None: assert isinstance(self.data_export.file_name, str) file_name = self.data_export.file_name assert file_name.startswith(ExportQueryType.as_str(self.data_export.query_type)) assert file_name.endswith(str(self.data_export.id) + ".csv") def test_format_date(self) -> None: assert ExportedData.format_date(self.data_export.date_finished) is None assert isinstance(ExportedData.format_date(self.data_export.date_added), str) def test_delete_file(self) -> None: # Empty call should have no effect assert self.data_export.file_id is None self.data_export.delete_file() assert self.data_export.file_id is None # Real call should delete the file assert File.objects.filter(id=self.file1.id).exists() self.data_export.update(file_id=self.file1.id) assert isinstance(self.data_export._get_file(), File) self.data_export.delete_file() assert self.data_export._get_file() is None assert not File.objects.filter(id=self.file1.id).exists() # The ExportedData should be unaffected assert ExportedData.objects.filter(id=self.data_export.id).exists() assert ExportedData.objects.get(id=self.data_export.id)._get_file() is None def test_delete(self) -> None: self.data_export.finalize_upload(file=self.file1) assert ExportedData.objects.filter(id=self.data_export.id).exists() assert File.objects.filter(id=self.file1.id).exists() self.data_export.delete() assert not ExportedData.objects.filter(id=self.data_export.id).exists() assert not File.objects.filter(id=self.file1.id).exists() def test_finalize_upload(self) -> None: # With default expiration with tempfile.TemporaryFile() as tf: tf.write(self.TEST_STRING) tf.seek(0) self.file1.putfile(tf) self.data_export.finalize_upload(file=self.file1) file = self.data_export._get_file() assert isinstance(file, File) assert file.getfile().read() == self.TEST_STRING assert self.data_export.date_finished is not None assert self.data_export.date_expired is not None assert self.data_export.date_expired == self.data_export.date_finished + DEFAULT_EXPIRATION # With custom expiration with tempfile.TemporaryFile() as tf: tf.write(self.TEST_STRING + self.TEST_STRING) tf.seek(0) self.file2.putfile(tf) self.data_export.finalize_upload(file=self.file2, expiration=timedelta(weeks=2)) file = self.data_export._get_file() assert isinstance(file, File) assert file.getfile().read() == self.TEST_STRING + self.TEST_STRING # Ensure the first file is deleted assert not File.objects.filter(id=self.file1.id).exists() assert self.data_export.date_expired == self.data_export.date_finished + timedelta(weeks=2) def test_email_success(self) -> None: # Shouldn't send if ExportedData is incomplete with self.tasks(): self.data_export.email_success() assert len(mail.outbox) == 0 # Should send one email if complete self.data_export.finalize_upload(file=self.file1) with self.tasks(): self.data_export.email_success() assert len(mail.outbox) == 1 @with_feature("system:multi-region") def test_email_success_customer_domains(self) -> None: self.data_export.finalize_upload(file=self.file1) with self.tasks(): self.data_export.email_success() assert len(mail.outbox) == 1 msg = mail.outbox[0] assert msg.subject == "Your data is ready." assert ( self.organization.absolute_url(f"/organizations/{self.organization.slug}/data-export/") in msg.body ) @patch("sentry.utils.email.MessageBuilder") def test_email_success_content(self, builder: MagicMock) -> None: self.data_export.finalize_upload(file=self.file1) with self.tasks(): self.data_export.email_success() expected_url = absolute_uri( reverse( "sentry-data-export-details", args=[self.organization.slug, self.data_export.id] ) ) expected_email_args = { "subject": "Your data is ready.", "context": { "url": expected_url, "expiration": ExportedData.format_date(date=self.data_export.date_expired), }, "type": "organization.export-data", "template": "sentry/emails/data-export-success.txt", "html_template": "sentry/emails/data-export-success.html", } builder.assert_called_with(**expected_email_args) def test_email_failure(self) -> None: with self.tasks(): self.data_export.email_failure("failed to export data!") assert len(mail.outbox) == 1 assert not ExportedData.objects.filter(id=self.data_export.id).exists() @patch("sentry.utils.email.MessageBuilder") def test_email_failure_content(self, builder: MagicMock) -> None: with self.tasks(): self.data_export.email_failure("failed to export data!") expected_email_args = { "subject": "We couldn't export your data.", "context": { "creation": ExportedData.format_date(date=self.data_export.date_added), "error_message": "failed to export data!", "payload": json.dumps(self.data_export.payload), }, "type": "organization.export-data", "template": "sentry/emails/data-export-failure.txt", "html_template": "sentry/emails/data-export-failure.html", } builder.assert_called_with(**expected_email_args)
ExportedDataTest
python
Netflix__metaflow
metaflow/_vendor/click/types.py
{ "start": 3415, "end": 4127 }
class ____(ParamType): name = "text" def convert(self, value, param, ctx): if isinstance(value, bytes): enc = _get_argv_encoding() try: value = value.decode(enc) except UnicodeError: fs_enc = get_filesystem_encoding() if fs_enc != enc: try: value = value.decode(fs_enc) except UnicodeError: value = value.decode("utf-8", "replace") else: value = value.decode("utf-8", "replace") return value return value def __repr__(self): return "STRING"
StringParamType
python
sqlalchemy__sqlalchemy
test/ext/test_mutable.py
{ "start": 29739, "end": 31113 }
class ____(_MutableDictTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): import json class JSONEncodedDict(TypeDecorator): impl = VARCHAR(50) cache_ok = True def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value MutableDict = cls._type_fixture() Base = declarative_base(metadata=metadata) class AbstractFoo(Base): __abstract__ = True id = Column( Integer, primary_key=True, test_needs_autoincrement=True ) data = Column(MutableDict.as_mutable(JSONEncodedDict)) non_mutable_data = Column(JSONEncodedDict) unrelated_data = Column(String(50)) class Foo(AbstractFoo): __tablename__ = "foo" column_prop = column_property( func.lower(AbstractFoo.unrelated_data) ) assert Foo.data.property.columns[0].type is not AbstractFoo.data.type def test_non_mutable(self): self._test_non_mutable()
MutableColumnCopyJSONTest
python
scipy__scipy
scipy/optimize/tests/test_optimize.py
{ "start": 126502, "end": 137413 }
class ____: def test_optimize_result_attributes(self): def func(x): return x ** 2 # Note that `brute` solver does not return `OptimizeResult` results = [optimize.basinhopping(func, x0=1), optimize.differential_evolution(func, [(-4, 4)]), optimize.shgo(func, [(-4, 4)]), optimize.dual_annealing(func, [(-4, 4)]), optimize.direct(func, [(-4, 4)]), ] for result in results: assert isinstance(result, optimize.OptimizeResult) assert hasattr(result, "x") assert hasattr(result, "success") assert hasattr(result, "message") assert hasattr(result, "fun") assert hasattr(result, "nfev") assert hasattr(result, "nit") def test_approx_fprime(): # check that approx_fprime (serviced by approx_derivative) works for # jac and hess g = optimize.approx_fprime(himmelblau_x0, himmelblau) assert_allclose(g, himmelblau_grad(himmelblau_x0), rtol=5e-6) h = optimize.approx_fprime(himmelblau_x0, himmelblau_grad) assert_allclose(h, himmelblau_hess(himmelblau_x0), rtol=5e-6) def test_gh12594(): # gh-12594 reported an error in `_linesearch_powell` and # `_line_for_search` when `Bounds` was passed lists instead of arrays. # Check that results are the same whether the inputs are lists or arrays. def f(x): return x[0]**2 + (x[1] - 1)**2 bounds = Bounds(lb=[-10, -10], ub=[10, 10]) res = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds) bounds = Bounds(lb=np.array([-10, -10]), ub=np.array([10, 10])) ref = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds) assert_allclose(res.fun, ref.fun) assert_allclose(res.x, ref.x) def test_gh12513_trustregion_exact_infinite_loop(): # gh-12513 reported that optimize.minimize might hang when # method='trust-exact', using the option ``subproblem_maxiter``, # this can be avoided. H = np.array( [[3.67335930e01, -2.52334820e02, 1.15477558e01, -1.19933725e-03, -2.06408851e03, -2.05821411e00, -2.52334820e02, -6.52076924e02, -2.71362566e-01, -1.98885126e00, 1.22085415e00, 2.30220713e00, -9.71278532e-02, -5.11210123e-01, -1.00399562e00, 1.43319679e-01, 6.03815471e00, -6.38719934e-02, 1.65623929e-01], [-2.52334820e02, 1.76757312e03, -9.92814996e01, 1.06533600e-02, 1.44442941e04, 1.43811694e01, 1.76757312e03, 4.56694461e03, 2.22263363e00, 1.62977318e01, -7.81539315e00, -1.24938012e01, 6.74029088e-01, 3.22802671e00, 5.14978971e00, -9.58561209e-01, -3.92199895e01, 4.47201278e-01, -1.17866744e00], [1.15477558e01, -9.92814996e01, 3.63872363e03, -4.40007197e-01, -9.55435081e02, -1.13985105e00, -9.92814996e01, -2.58307255e02, -5.21335218e01, -3.77485107e02, -6.75338369e01, -1.89457169e02, 5.67828623e00, 5.82402681e00, 1.72734354e01, -4.29114840e00, -7.84885258e01, 3.17594634e00, 2.45242852e00], [-1.19933725e-03, 1.06533600e-02, -4.40007197e-01, 5.73576663e-05, 1.01563710e-01, 1.18838745e-04, 1.06533600e-02, 2.76535767e-02, 6.25788669e-03, 4.50699620e-02, 8.64152333e-03, 2.27772377e-02, -8.51026855e-04, 1.65316383e-04, 1.38977551e-03, 5.51629259e-04, 1.38447755e-02, -5.17956723e-04, -1.29260347e-04], [-2.06408851e03, 1.44442941e04, -9.55435081e02, 1.01563710e-01, 1.23101825e05, 1.26467259e02, 1.44442941e04, 3.74590279e04, 2.18498571e01, 1.60254460e02, -7.52977260e01, -1.17989623e02, 6.58253160e00, 3.14949206e01, 4.98527190e01, -9.33338661e00, -3.80465752e02, 4.33872213e00, -1.14768816e01], [-2.05821411e00, 1.43811694e01, -1.13985105e00, 1.18838745e-04, 1.26467259e02, 1.46226198e-01, 1.43811694e01, 3.74509252e01, 2.76928748e-02, 2.03023837e-01, -8.84279903e-02, -1.29523344e-01, 8.06424434e-03, 3.83330661e-02, 5.81579023e-02, -1.12874980e-02, -4.48118297e-01, 5.15022284e-03, -1.41501894e-02], [-2.52334820e02, 1.76757312e03, -9.92814996e01, 1.06533600e-02, 1.44442941e04, 1.43811694e01, 1.76757312e03, 4.56694461e03, 2.22263363e00, 1.62977318e01, -7.81539315e00, -1.24938012e01, 6.74029088e-01, 3.22802671e00, 5.14978971e00, -9.58561209e-01, -3.92199895e01, 4.47201278e-01, -1.17866744e00], [-6.52076924e02, 4.56694461e03, -2.58307255e02, 2.76535767e-02, 3.74590279e04, 3.74509252e01, 4.56694461e03, 1.18278398e04, 5.82242837e00, 4.26867612e01, -2.03167952e01, -3.22894255e01, 1.75705078e00, 8.37153730e00, 1.32246076e01, -2.49238529e00, -1.01316422e02, 1.16165466e00, -3.09390862e00], [-2.71362566e-01, 2.22263363e00, -5.21335218e01, 6.25788669e-03, 2.18498571e01, 2.76928748e-02, 2.22263363e00, 5.82242837e00, 4.36278066e01, 3.14836583e02, -2.04747938e01, -3.05535101e01, -1.24881456e-01, 1.15775394e01, 4.06907410e01, -1.39317748e00, -3.90902798e01, -9.71716488e-02, 1.06851340e-01], [-1.98885126e00, 1.62977318e01, -3.77485107e02, 4.50699620e-02, 1.60254460e02, 2.03023837e-01, 1.62977318e01, 4.26867612e01, 3.14836583e02, 2.27255216e03, -1.47029712e02, -2.19649109e02, -8.83963155e-01, 8.28571708e01, 2.91399776e02, -9.97382920e00, -2.81069124e02, -6.94946614e-01, 7.38151960e-01], [1.22085415e00, -7.81539315e00, -6.75338369e01, 8.64152333e-03, -7.52977260e01, -8.84279903e-02, -7.81539315e00, -2.03167952e01, -2.04747938e01, -1.47029712e02, 7.83372613e01, 1.64416651e02, -4.30243758e00, -2.59579610e01, -6.25644064e01, 6.69974667e00, 2.31011701e02, -2.68540084e00, 5.44531151e00], [2.30220713e00, -1.24938012e01, -1.89457169e02, 2.27772377e-02, -1.17989623e02, -1.29523344e-01, -1.24938012e01, -3.22894255e01, -3.05535101e01, -2.19649109e02, 1.64416651e02, 3.75893031e02, -7.42084715e00, -4.56437599e01, -1.11071032e02, 1.18761368e01, 4.78724142e02, -5.06804139e00, 8.81448081e00], [-9.71278532e-02, 6.74029088e-01, 5.67828623e00, -8.51026855e-04, 6.58253160e00, 8.06424434e-03, 6.74029088e-01, 1.75705078e00, -1.24881456e-01, -8.83963155e-01, -4.30243758e00, -7.42084715e00, 9.62009425e-01, 1.53836355e00, 2.23939458e00, -8.01872920e-01, -1.92191084e01, 3.77713908e-01, -8.32946970e-01], [-5.11210123e-01, 3.22802671e00, 5.82402681e00, 1.65316383e-04, 3.14949206e01, 3.83330661e-02, 3.22802671e00, 8.37153730e00, 1.15775394e01, 8.28571708e01, -2.59579610e01, -4.56437599e01, 1.53836355e00, 2.63851056e01, 7.34859767e01, -4.39975402e00, -1.12015747e02, 5.11542219e-01, -2.64962727e00], [-1.00399562e00, 5.14978971e00, 1.72734354e01, 1.38977551e-03, 4.98527190e01, 5.81579023e-02, 5.14978971e00, 1.32246076e01, 4.06907410e01, 2.91399776e02, -6.25644064e01, -1.11071032e02, 2.23939458e00, 7.34859767e01, 2.36535458e02, -1.09636675e01, -2.72152068e02, 6.65888059e-01, -6.29295273e00], [1.43319679e-01, -9.58561209e-01, -4.29114840e00, 5.51629259e-04, -9.33338661e00, -1.12874980e-02, -9.58561209e-01, -2.49238529e00, -1.39317748e00, -9.97382920e00, 6.69974667e00, 1.18761368e01, -8.01872920e-01, -4.39975402e00, -1.09636675e01, 1.16820748e00, 3.00817252e01, -4.51359819e-01, 9.82625204e-01], [6.03815471e00, -3.92199895e01, -7.84885258e01, 1.38447755e-02, -3.80465752e02, -4.48118297e-01, -3.92199895e01, -1.01316422e02, -3.90902798e01, -2.81069124e02, 2.31011701e02, 4.78724142e02, -1.92191084e01, -1.12015747e02, -2.72152068e02, 3.00817252e01, 1.13232557e03, -1.33695932e01, 2.22934659e01], [-6.38719934e-02, 4.47201278e-01, 3.17594634e00, -5.17956723e-04, 4.33872213e00, 5.15022284e-03, 4.47201278e-01, 1.16165466e00, -9.71716488e-02, -6.94946614e-01, -2.68540084e00, -5.06804139e00, 3.77713908e-01, 5.11542219e-01, 6.65888059e-01, -4.51359819e-01, -1.33695932e01, 4.27994168e-01, -5.09020820e-01], [1.65623929e-01, -1.17866744e00, 2.45242852e00, -1.29260347e-04, -1.14768816e01, -1.41501894e-02, -1.17866744e00, -3.09390862e00, 1.06851340e-01, 7.38151960e-01, 5.44531151e00, 8.81448081e00, -8.32946970e-01, -2.64962727e00, -6.29295273e00, 9.82625204e-01, 2.22934659e01, -5.09020820e-01, 4.09964606e00]] ) J = np.array([ -2.53298102e-07, 1.76392040e-06, 1.74776130e-06, -4.19479903e-10, 1.44167498e-05, 1.41703911e-08, 1.76392030e-06, 4.96030153e-06, -2.35771675e-07, -1.68844985e-06, 4.29218258e-07, 6.65445159e-07, -3.87045830e-08, -3.17236594e-07, -1.21120169e-06, 4.59717313e-08, 1.67123246e-06, 1.46624675e-08, 4.22723383e-08 ]) def fun(x): return np.dot(np.dot(x, H), x) / 2 + np.dot(x, J) def jac(x): return np.dot(x, H) + J def hess(x): return H x0 = np.zeros(19) res = optimize.minimize( fun, x0, jac=jac, hess=hess, method="trust-exact", options={"gtol": 1e-6, "subproblem_maxiter": 10}, ) assert res.success assert abs(fun(res.x)) < 1e-5 @pytest.mark.parametrize('method', ['Newton-CG', 'trust-constr']) @pytest.mark.parametrize('sparse_type', [coo_matrix, csc_matrix, csr_matrix, coo_array, csr_array, csc_array]) def test_sparse_hessian(method, sparse_type): # gh-8792 reported an error for minimization with `newton_cg` when `hess` # returns a sparse array. Check that results are the same whether `hess` # returns a dense or sparse array for optimization methods that accept # sparse Hessian matrices. def sparse_rosen_hess(x): return sparse_type(rosen_hess(x)) x0 = [2., 2.] res_sparse = optimize.minimize(rosen, x0, method=method, jac=rosen_der, hess=sparse_rosen_hess) res_dense = optimize.minimize(rosen, x0, method=method, jac=rosen_der, hess=rosen_hess) assert_allclose(res_dense.fun, res_sparse.fun) assert_allclose(res_dense.x, res_sparse.x) assert res_dense.nfev == res_sparse.nfev assert res_dense.njev == res_sparse.njev assert res_dense.nhev == res_sparse.nhev @pytest.mark.parametrize('workers', [None, 2]) @pytest.mark.parametrize( 'method', ['l-bfgs-b', 'bfgs', 'slsqp', 'trust-constr', 'Newton-CG', 'CG', 'tnc', 'trust-ncg', 'trust-krylov'])
TestGlobalOptimization
python
sdispater__pendulum
src/pendulum/testing/traveller.py
{ "start": 271, "end": 5040 }
class ____: def __init__(self, datetime_class: type[DateTime] = DateTime) -> None: self._datetime_class: type[DateTime] = datetime_class def freeze(self) -> Self: raise self._not_implemented() def travel_back(self) -> Self: raise self._not_implemented() def travel( self, years: int = 0, months: int = 0, weeks: int = 0, days: int = 0, hours: int = 0, minutes: int = 0, seconds: int = 0, microseconds: int = 0, ) -> Self: raise self._not_implemented() def travel_to(self, dt: DateTime, *, freeze: bool = False) -> Self: raise self._not_implemented() def __enter__(self) -> Self: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType, ) -> None: ... def _not_implemented(self) -> NotImplementedError: return NotImplementedError() if not PYPY: try: import time_machine except ImportError: time_machine = None # type: ignore[assignment] if time_machine is not None: class Traveller(BaseTraveller): def __init__(self, datetime_class: type[DateTime] = DateTime) -> None: super().__init__(datetime_class) self._started: bool = False self._traveller: time_machine.travel | None = None self._coordinates: time_machine.Coordinates | None = None def freeze(self) -> Self: if self._started: cast("time_machine.Coordinates", self._coordinates).move_to( self._datetime_class.now(), tick=False ) else: self._start(freeze=True) return self def travel_back(self) -> Self: if not self._started: return self cast("time_machine.travel", self._traveller).stop() self._coordinates = None self._traveller = None self._started = False return self def travel( self, years: int = 0, months: int = 0, weeks: int = 0, days: int = 0, hours: int = 0, minutes: int = 0, seconds: int = 0, microseconds: int = 0, *, freeze: bool = False, ) -> Self: self._start(freeze=freeze) cast("time_machine.Coordinates", self._coordinates).move_to( self._datetime_class.now().add( years=years, months=months, weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds, ) ) return self def travel_to(self, dt: DateTime, *, freeze: bool = False) -> Self: self._start(freeze=freeze) cast("time_machine.Coordinates", self._coordinates).move_to(dt) return self def _start(self, freeze: bool = False) -> None: if self._started: return if not self._traveller: self._traveller = time_machine.travel( self._datetime_class.now(), tick=not freeze ) self._coordinates = self._traveller.start() self._started = True def __enter__(self) -> Self: self._start() return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType, ) -> None: self.travel_back() else: class Traveller(BaseTraveller): # type: ignore[no-redef] def _not_implemented(self) -> NotImplementedError: return NotImplementedError( "Time travelling is an optional feature. " 'You can add it by installing Pendulum with the "test" extra.' ) else: class Traveller(BaseTraveller): # type: ignore[no-redef] def _not_implemented(self) -> NotImplementedError: return NotImplementedError( "Time travelling is not supported on the PyPy Python implementation." )
BaseTraveller
python
astropy__astropy
astropy/io/ascii/core.py
{ "start": 1727, "end": 5417 }
class ____: """ Internal class to replace the csv writer ``writerow`` and ``writerows`` functions so that in the case of ``delimiter=' '`` and ``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty fields (when value == ''). This changes the API slightly in that the writerow() and writerows() methods return the output written string instead of the length of that string. Examples -------- >>> from astropy.io.ascii.core import CsvWriter >>> writer = CsvWriter(delimiter=' ') >>> print(writer.writerow(['hello', '', 'world'])) hello "" world """ # Random 16-character string that gets injected instead of any # empty fields and is then replaced post-write with doubled-quotechar. # Created with: # ''.join(random.choice(string.printable[:90]) for _ in range(16)) replace_sentinel: Final[str] = "2b=48Av%0-V3p>bX" def __init__(self, csvfile=None, **kwargs): self.csvfile = csvfile # Temporary StringIO for catching the real csv.writer() object output self.temp_out = StringIO() self.writer = csv.writer(self.temp_out, **kwargs) dialect = self.writer.dialect self.quotechar2 = dialect.quotechar * 2 self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and ( dialect.delimiter == " " ) def writerow(self, values): """ Similar to csv.writer.writerow but with the custom quoting behavior. Returns the written string instead of the length of that string. """ has_empty = False # If QUOTE_MINIMAL and space-delimited then replace empty fields with # the sentinel value. if self.quote_empty: for i, value in enumerate(values): if value == "": has_empty = True values[i] = self.replace_sentinel return self._writerow(self.writer.writerow, values, has_empty) def writerows(self, values_list): """ Similar to csv.writer.writerows but with the custom quoting behavior. Returns the written string instead of the length of that string. """ has_empty = False # If QUOTE_MINIMAL and space-delimited then replace empty fields with # the sentinel value. if self.quote_empty: for values in values_list: for i, value in enumerate(values): if value == "": has_empty = True values[i] = self.replace_sentinel return self._writerow(self.writer.writerows, values_list, has_empty) def _writerow(self, writerow_func, values, has_empty): """ Call ``writerow_func`` (either writerow or writerows) with ``values``. If it has empty fields that have been replaced then change those sentinel strings back to quoted empty strings, e.g. ``""``. """ # Clear the temporary StringIO buffer that self.writer writes into and # then call the real csv.writer().writerow or writerows with values. self.temp_out.seek(0) self.temp_out.truncate() writerow_func(values) row_string = self.temp_out.getvalue() if self.quote_empty and has_empty: row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string) # self.csvfile is defined then write the output. In practice the pure # Python writer calls with csvfile=None, while the fast writer calls with # a file-like object. if self.csvfile: self.csvfile.write(row_string) return row_string
CsvWriter
python
PrefectHQ__prefect
src/prefect/client/schemas/actions.py
{ "start": 21207, "end": 21524 }
class ____(ActionBaseModel): """Data used by the Prefect REST API to create a concurrency limit.""" tag: str = Field( default=..., description="A tag the concurrency limit is applied to." ) concurrency_limit: int = Field(default=..., description="The concurrency limit.")
ConcurrencyLimitCreate
python
django__django
tests/sessions_tests/tests.py
{ "start": 38664, "end": 47338 }
class ____(TestCase): request_factory = RequestFactory() @staticmethod def get_response_touching_session(request): request.session["hello"] = "world" return HttpResponse("Session test") @override_settings(SESSION_COOKIE_SECURE=True) def test_secure_session_cookie(self): request = self.request_factory.get("/") middleware = SessionMiddleware(self.get_response_touching_session) # Handle the response through the middleware response = middleware(request) self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]["secure"], True) @override_settings(SESSION_COOKIE_HTTPONLY=True) def test_httponly_session_cookie(self): request = self.request_factory.get("/") middleware = SessionMiddleware(self.get_response_touching_session) # Handle the response through the middleware response = middleware(request) self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]["httponly"], True) self.assertIn( cookies.Morsel._reserved["httponly"], str(response.cookies[settings.SESSION_COOKIE_NAME]), ) @override_settings(SESSION_COOKIE_SAMESITE="Strict") def test_samesite_session_cookie(self): request = self.request_factory.get("/") middleware = SessionMiddleware(self.get_response_touching_session) response = middleware(request) self.assertEqual( response.cookies[settings.SESSION_COOKIE_NAME]["samesite"], "Strict" ) @override_settings(SESSION_COOKIE_HTTPONLY=False) def test_no_httponly_session_cookie(self): request = self.request_factory.get("/") middleware = SessionMiddleware(self.get_response_touching_session) response = middleware(request) self.assertEqual(response.cookies[settings.SESSION_COOKIE_NAME]["httponly"], "") self.assertNotIn( cookies.Morsel._reserved["httponly"], str(response.cookies[settings.SESSION_COOKIE_NAME]), ) def test_session_save_on_500(self): def response_500(request): response = HttpResponse("Horrible error") response.status_code = 500 request.session["hello"] = "world" return response request = self.request_factory.get("/") SessionMiddleware(response_500)(request) # The value wasn't saved above. self.assertNotIn("hello", request.session.load()) def test_session_save_on_5xx(self): def response_503(request): response = HttpResponse("Service Unavailable") response.status_code = 503 request.session["hello"] = "world" return response request = self.request_factory.get("/") SessionMiddleware(response_503)(request) # The value wasn't saved above. self.assertNotIn("hello", request.session.load()) def test_session_update_error_redirect(self): def response_delete_session(request): request.session = DatabaseSession() request.session.save(must_create=True) request.session.delete() return HttpResponse() request = self.request_factory.get("/foo/") middleware = SessionMiddleware(response_delete_session) msg = ( "The request's session was deleted before the request completed. " "The user may have logged out in a concurrent request, for example." ) with self.assertRaisesMessage(SessionInterrupted, msg): # Handle the response through the middleware. It will try to save # the deleted session which will cause an UpdateError that's caught # and raised as a SessionInterrupted. middleware(request) def test_session_delete_on_end(self): def response_ending_session(request): request.session.flush() return HttpResponse("Session test") request = self.request_factory.get("/") middleware = SessionMiddleware(response_ending_session) # Before deleting, there has to be an existing cookie request.COOKIES[settings.SESSION_COOKIE_NAME] = "abc" # Handle the response through the middleware response = middleware(request) # The cookie was deleted, not recreated. # A deleted cookie header looks like: # "Set-Cookie: sessionid=; expires=Thu, 01 Jan 1970 00:00:00 GMT; " # "Max-Age=0; Path=/" self.assertEqual( 'Set-Cookie: {}=""; expires=Thu, 01 Jan 1970 00:00:00 GMT; ' "Max-Age=0; Path=/; SameSite={}".format( settings.SESSION_COOKIE_NAME, settings.SESSION_COOKIE_SAMESITE, ), str(response.cookies[settings.SESSION_COOKIE_NAME]), ) # SessionMiddleware sets 'Vary: Cookie' to prevent the 'Set-Cookie' # from being cached. self.assertEqual(response.headers["Vary"], "Cookie") @override_settings( SESSION_COOKIE_DOMAIN=".example.local", SESSION_COOKIE_PATH="/example/" ) def test_session_delete_on_end_with_custom_domain_and_path(self): def response_ending_session(request): request.session.flush() return HttpResponse("Session test") request = self.request_factory.get("/") middleware = SessionMiddleware(response_ending_session) # Before deleting, there has to be an existing cookie request.COOKIES[settings.SESSION_COOKIE_NAME] = "abc" # Handle the response through the middleware response = middleware(request) # The cookie was deleted, not recreated. # A deleted cookie header with a custom domain and path looks like: # Set-Cookie: sessionid=; Domain=.example.local; # expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0; # Path=/example/ self.assertEqual( 'Set-Cookie: {}=""; Domain=.example.local; expires=Thu, ' "01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/example/; SameSite={}".format( settings.SESSION_COOKIE_NAME, settings.SESSION_COOKIE_SAMESITE, ), str(response.cookies[settings.SESSION_COOKIE_NAME]), ) def test_flush_empty_without_session_cookie_doesnt_set_cookie(self): def response_ending_session(request): request.session.flush() return HttpResponse("Session test") request = self.request_factory.get("/") middleware = SessionMiddleware(response_ending_session) # Handle the response through the middleware response = middleware(request) # A cookie should not be set. self.assertEqual(response.cookies, {}) # The session is accessed so "Vary: Cookie" should be set. self.assertEqual(response.headers["Vary"], "Cookie") def test_empty_session_saved(self): """ If a session is emptied of data but still has a key, it should still be updated. """ def response_set_session(request): # Set a session key and some data. request.session["foo"] = "bar" return HttpResponse("Session test") request = self.request_factory.get("/") middleware = SessionMiddleware(response_set_session) # Handle the response through the middleware. response = middleware(request) self.assertEqual(tuple(request.session.items()), (("foo", "bar"),)) # A cookie should be set, along with Vary: Cookie. self.assertIn( "Set-Cookie: sessionid=%s" % request.session.session_key, str(response.cookies), ) self.assertEqual(response.headers["Vary"], "Cookie") # Empty the session data. del request.session["foo"] # Handle the response through the middleware. response = HttpResponse("Session test") response = middleware.process_response(request, response) self.assertEqual(dict(request.session.values()), {}) session = Session.objects.get(session_key=request.session.session_key) self.assertEqual(session.get_decoded(), {}) # While the session is empty, it hasn't been flushed so a cookie should # still be set, along with Vary: Cookie. self.assertGreater(len(request.session.session_key), 8) self.assertIn( "Set-Cookie: sessionid=%s" % request.session.session_key, str(response.cookies), ) self.assertEqual(response.headers["Vary"], "Cookie")
SessionMiddlewareTests
python
kamyu104__LeetCode-Solutions
Python/merge-strings-alternately.py
{ "start": 33, "end": 471 }
class ____(object): def mergeAlternately(self, word1, word2): """ :type word1: str :type word2: str :rtype: str """ result = [] i = 0 while i < len(word1) or i < len(word2): if i < len(word1): result.append(word1[i]) if i < len(word2): result.append(word2[i]) i += 1 return "".join(result)
Solution
python
apache__avro
lang/py/avro/test/test_protocol.py
{ "start": 939, "end": 1374 }
class ____: """A proxy for a protocol string that provides useful test metadata.""" def __init__(self, data, name="", comment=""): if not isinstance(data, str): data = json.dumps(data) self.data = data self.name = name or data self.comment = comment def parse(self): return avro.protocol.parse(str(self)) def __str__(self): return str(self.data)
TestProtocol
python
pytorch__pytorch
torch/_dynamo/symbolic_convert.py
{ "start": 7049, "end": 8089 }
class ____: filename: str lineno: int instruction_pointer: int inst: Instruction # for debugging only _failed: bool = False error_on_graph_break: Optional[bool] = None reason: Optional[GraphCompileReason] = None def fail_and_restart_analysis(self, error_on_graph_break: bool) -> None: """ Start tracing of the current frame over again, and don't take this branch. """ self._failed = True self.error_on_graph_break = error_on_graph_break if self.reason is not None: restart_reason = self.reason.reason else: restart_reason = "Unknown fail_and_restart_analysis" raise exc.SpeculationRestartAnalysis(restart_reason=restart_reason) def failed(self, tx: InstructionTranslatorBase) -> bool: if self._failed: assert self.error_on_graph_break is not None tx.error_on_graph_break = self.error_on_graph_break return True return False @dataclasses.dataclass
SpeculationEntry
python
pytorch__pytorch
torch/ao/nn/qat/modules/linear.py
{ "start": 304, "end": 3051 }
class ____(nn.Linear): r""" A linear module attached with FakeQuantize modules for weight, used for quantization aware training. We adopt the same interface as `torch.nn.Linear`, please see https://pytorch.org/docs/stable/nn.html#torch.nn.Linear for documentation. Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to default. Attributes: weight: fake quant module for weight """ _FLOAT_MODULE = nn.Linear def __init__( self, in_features, out_features, bias=True, qconfig=None, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__(in_features, out_features, bias, **factory_kwargs) assert qconfig, "qconfig must be provided for QAT module" self.qconfig = qconfig self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs) def forward(self, input): return F.linear(input, self.weight_fake_quant(self.weight), self.bias) @classmethod def from_float(cls, mod, use_precomputed_fake_quant=False): r"""Create a qat module from a float module or qparams_dict Args: `mod` a float module, either produced by torch.ao.quantization utilities or directly from user """ assert type_before_parametrizations(mod) == cls._FLOAT_MODULE, ( " qat." + cls.__name__ + ".from_float only works for " + cls._FLOAT_MODULE.__name__ ) assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined" assert mod.qconfig, "Input float module must have a valid qconfig" if type_before_parametrizations(mod) == LinearReLU: mod = mod[0] qconfig = mod.qconfig qat_linear = cls( mod.in_features, mod.out_features, bias=mod.bias is not None, qconfig=qconfig, ) if is_parametrized(mod, "weight"): transfer_parametrizations_and_params(mod, qat_linear, "weight") else: qat_linear.weight = mod.weight if is_parametrized(mod, "bias"): transfer_parametrizations_and_params(mod, qat_linear, "bias") else: qat_linear.bias = mod.bias return qat_linear def to_float(self): linear = torch.nn.Linear( self.in_features, self.out_features, self.bias is not None ) linear.weight = torch.nn.Parameter(self.weight.detach()) if self.bias is not None: linear.bias = torch.nn.Parameter(self.bias.detach()) linear.train(self.training) return linear
Linear
python
ray-project__ray
python/ray/data/_internal/execution/interfaces/op_runtime_metrics.py
{ "start": 3949, "end": 4130 }
class ____: inputs: RefBundle num_outputs: int bytes_outputs: int num_rows_produced: int start_time: float cum_block_gen_time: float @dataclass
RunningTaskInfo
python
scipy__scipy
scipy/signal/tests/test_dltisys.py
{ "start": 10669, "end": 11554 }
class ____: def test_dlti_instantiation(self): # Test that lti can be instantiated. dt = 0.05 # TransferFunction s = dlti([1], [-1], dt=dt) assert isinstance(s, TransferFunction) assert isinstance(s, dlti) assert not isinstance(s, lti) assert s.dt == dt # ZerosPolesGain s = dlti(np.array([]), np.array([-1]), 1, dt=dt) assert isinstance(s, ZerosPolesGain) assert isinstance(s, dlti) assert not isinstance(s, lti) assert s.dt == dt # StateSpace s = dlti([1], [-1], 1, 3, dt=dt) assert isinstance(s, StateSpace) assert isinstance(s, dlti) assert not isinstance(s, lti) assert s.dt == dt # Number of inputs assert_raises(ValueError, dlti, 1) assert_raises(ValueError, dlti, 1, 1, 1, 1, 1)
TestDlti
python
wandb__wandb
wandb/vendor/pygments/lexers/csound.py
{ "start": 10474, "end": 12543 }
class ____(RegexLexer): """ For `Csound <http://csound.github.io>`_ documents. .. versionadded:: 2.1 """ name = 'Csound Document' aliases = ['csound-document', 'csound-csd'] filenames = ['*.csd'] # These tokens are based on those in XmlLexer in pygments/lexers/html.py. Making # CsoundDocumentLexer a subclass of XmlLexer rather than RegexLexer may seem like a # better idea, since Csound Document files look like XML files. However, Csound # Documents can contain Csound comments (preceded by //, for example) before and # after the root element, unescaped bitwise AND & and less than < operators, etc. In # other words, while Csound Document files look like XML files, they may not actually # be XML files. tokens = { 'root': [ newline, (r'/[*](.|\n)*?[*]/', Comment.Multiline), (r'[^<&;/]+', Text), (r'<\s*CsInstruments', Name.Tag, ('orchestra', 'tag')), (r'<\s*CsScore', Name.Tag, ('score', 'tag')), (r'<\s*[hH][tT][mM][lL]', Name.Tag, ('HTML', 'tag')), (r'<\s*[\w:.-]+', Name.Tag, 'tag'), (r'<\s*/\s*[\w:.-]+\s*>', Name.Tag) ], 'orchestra': [ (r'<\s*/\s*CsInstruments\s*>', Name.Tag, '#pop'), (r'(.|\n)+?(?=<\s*/\s*CsInstruments\s*>)', using(CsoundOrchestraLexer)) ], 'score': [ (r'<\s*/\s*CsScore\s*>', Name.Tag, '#pop'), (r'(.|\n)+?(?=<\s*/\s*CsScore\s*>)', using(CsoundScoreLexer)) ], 'HTML': [ (r'<\s*/\s*[hH][tT][mM][lL]\s*>', Name.Tag, '#pop'), (r'(.|\n)+?(?=<\s*/\s*[hH][tT][mM][lL]\s*>)', using(HtmlLexer)) ], 'tag': [ (r'\s+', Text), (r'[\w.:-]+\s*=', Name.Attribute, 'attr'), (r'/?\s*>', Name.Tag, '#pop') ], 'attr': [ (r'\s+', Text), (r'".*?"', String, '#pop'), (r"'.*?'", String, '#pop'), (r'[^\s>]+', String, '#pop') ] }
CsoundDocumentLexer
python
spack__spack
lib/spack/spack/database.py
{ "start": 8286, "end": 8390 }
class ____(SpackError): """Raised when an upstream DB attempts to acquire a lock"""
ForbiddenLockError
python
joke2k__faker
faker/providers/date_time/sk_SK/__init__.py
{ "start": 46, "end": 769 }
class ____(DateTimeProvider): DAY_NAMES = { "0": "nedeľa", "1": "pondelok", "2": "utorok", "3": "streda", "4": "štvrtok", "5": "piatok", "6": "sobota", } MONTH_NAMES = { "01": "január", "02": "február", "03": "marec", "04": "apríl", "05": "máj", "06": "jún", "07": "júl", "08": "august", "09": "september", "10": "október", "11": "november", "12": "december", } def day_of_week(self): day = self.date("%w") return self.DAY_NAMES[day] def month_name(self): month = self.month() return self.MONTH_NAMES[month]
Provider
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/exc.py
{ "start": 8940, "end": 9496 }
class ____(DisconnectionError): """Raised when the connection pool should invalidate all stale connections. A subclass of :class:`_exc.DisconnectionError` that indicates that the disconnect situation encountered on the connection probably means the entire pool should be invalidated, as the database has been restarted. This exception will be handled otherwise the same way as :class:`_exc.DisconnectionError`, allowing three attempts to reconnect before giving up. """ invalidate_pool: bool = True
InvalidatePoolError
python
ray-project__ray
python/ray/serve/schema.py
{ "start": 42002, "end": 45348 }
class ____(BaseModel, extra=Extra.forbid, frozen=True): """Detailed info about a Serve application.""" name: str = Field(description="Application name.") route_prefix: Optional[str] = Field( ..., description=( "This is the `route_prefix` of the ingress deployment in the application. " "Requests to paths under this HTTP path prefix will be routed to this " "application. This value may be null if the application is deploying " "and app information has not yet fully propagated in the backend; or " "if the user explicitly set the prefix to `None`, so the application isn't " "exposed over HTTP. Routing is done based on longest-prefix match, so if " 'you have deployment A with a prefix of "/a" and deployment B with a ' 'prefix of "/a/b", requests to "/a", "/a/", and "/a/c" go to A and ' 'requests to "/a/b", "/a/b/", and "/a/b/c" go to B. Routes must not end ' 'with a "/" unless they\'re the root (just "/"), which acts as a catch-all.' ), ) docs_path: Optional[str] = Field( ..., description=( "The path at which the docs for this application is served, for instance " "the `docs_url` for FastAPI-integrated applications." ), ) status: ApplicationStatus = Field( description="The current status of the application." ) message: str = Field( description="A message that gives more insight into the application status." ) last_deployed_time_s: float = Field( description="The time at which the application was deployed." ) deployed_app_config: Optional[ServeApplicationSchema] = Field( description=( "The exact copy of the application config that was submitted to the " "cluster. This will include all of, and only, the options that were " "explicitly specified in the submitted config. Default values for " "unspecified options will not be displayed, and deployments that are part " "of the application but unlisted in the config will also not be displayed. " "Note that default values for unspecified options are applied to the " "cluster under the hood, and deployments that were unlisted will still be " "deployed. This config simply avoids cluttering with unspecified fields " "for readability." ) ) source: APIType = Field( description=( "The type of API that the application originates from. " "This is a Developer API that is subject to change." ), ) deployments: Dict[str, DeploymentDetails] = Field( description="Details about the deployments in this application." ) external_scaler_enabled: bool = Field( description="Whether external scaling is enabled for this application.", ) application_details_route_prefix_format = validator( "route_prefix", allow_reuse=True )(_route_prefix_format) deployment_topology: Optional[DeploymentTopology] = Field( default=None, description="The deployment topology showing how deployments in this application call each other.", ) @PublicAPI(stability="stable")
ApplicationDetails
python
readthedocs__readthedocs.org
readthedocs/projects/migrations/0082_add_extra_history_fields.py
{ "start": 149, "end": 840 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("projects", "0081_add_another_header"), ] operations = [ migrations.AddField( model_name="historicalproject", name="extra_history_browser", field=models.CharField( blank=True, max_length=250, null=True, verbose_name="Browser user-agent" ), ), migrations.AddField( model_name="historicalproject", name="extra_history_ip", field=models.CharField( blank=True, max_length=250, null=True, verbose_name="IP address" ), ), ]
Migration
python
modin-project__modin
asv_bench/benchmarks/benchmarks.py
{ "start": 11830, "end": 12235 }
class ____(BaseTimeSetItem): params = [ get_benchmark_shapes("TimeSetItem"), [1], ["zero", "middle", "last"], [True, False], ] def time_setitem_qc(self, *args, **kwargs): self.df[self.loc] = self.item execute(self.df) def time_setitem_raw(self, *args, **kwargs): self.df[self.loc] = self.item_raw execute(self.df)
TimeSetItem
python
django-import-export__django-import-export
tests/core/tests/widget.py
{ "start": 36, "end": 192 }
class ____(widgets.CharWidget): def clean(self, value, row=None, *args, **kwargs): raise ValueError("Ова вриједност је страшна!")
HarshRussianWidget
python
apache__airflow
airflow-core/src/airflow/callbacks/callback_requests.py
{ "start": 2852, "end": 3306 }
class ____(BaseCallbackRequest): """Email notification request for task failures/retries.""" ti: ti_datamodel.TaskInstance """Simplified Task Instance representation""" email_type: Literal["failure", "retry"] = "failure" """Whether this is for a failure or retry email""" context_from_server: ti_datamodel.TIRunContext """Task execution context from the Server""" type: Literal["EmailRequest"] = "EmailRequest"
EmailRequest
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/dashboard.py
{ "start": 1512, "end": 1734 }
class ____(BaseModel): """Historical Metric Data serializer for responses.""" dag_run_types: DAGRunTypes dag_run_states: DAGRunStates task_instance_states: TaskInstanceStateCount
HistoricalMetricDataResponse
python
allegroai__clearml
clearml/backend_api/services/v2_23/datasets.py
{ "start": 497, "end": 2040 }
class ____(NonStrictDataModel): """ :param pattern: Pattern string (regex) :type pattern: str :param fields: List of field names :type fields: Sequence[str] """ _schema = { "properties": { "fields": { "description": "List of field names", "items": {"type": "string"}, "type": ["array", "null"], }, "pattern": { "description": "Pattern string (regex)", "type": ["string", "null"], }, }, "type": "object", } def __init__(self, pattern=None, fields=None, **kwargs): super(MultiFieldPatternData, self).__init__(**kwargs) self.pattern = pattern self.fields = fields @schema_property("pattern") def pattern(self): return self._property_pattern @pattern.setter def pattern(self, value): if value is None: self._property_pattern = None return self.assert_isinstance(value, "pattern", six.string_types) self._property_pattern = value @schema_property("fields") def fields(self): return self._property_fields @fields.setter def fields(self, value): if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (list, tuple)) self.assert_isinstance(value, "fields", six.string_types, is_array=True) self._property_fields = value
MultiFieldPatternData
python
pytorch__pytorch
torch/_functorch/_aot_autograd/functional_utils.py
{ "start": 15696, "end": 23437 }
class ____: def __init__(self, tensor: FunctionalTensor) -> None: assert torch._is_functional_tensor(tensor.elem) self.sequence = _functionalization.get_view_meta_sequence(tensor.elem) self.metadata = MetadataKey.make(tensor) def __repr__(self) -> str: suffix = len("_ViewMeta") types = ", ".join(type(vm).__name__[:-suffix] for vm in self.sequence) return f"ViewMetaSequence({types})" def __eq__(self, other: object) -> bool: # If other is None, then it probably means that we weren't able to recreate # the ViewMeta sequence. One example is when we update the view metadata by # calling: create_synthetic_base_metadata. if other is None: return True # Comparison against any other type is not implemented. if not isinstance(other, ViewMetaSequence): return NotImplemented return self.metadata == other.metadata # new_arg and arg here are either: # (1) both a FakeTensor # (2) both a traceable tensor subclass that holds a FakeTensor # Pre-condition: the two args are the "old" and "new" inputs from running functionalization. # When we run functionalization and wrap our inputs into FunctionalTensors, # we can detect whether or not an input was mutated by checking to see if the inner tensor has changed # # Normally it would be enough just to check if arg is new_arg, which is normally enough for functionalization # to confirm that inputs were not mutated when running the user's model with functionalization on. # But when we have subclass inputs, we can't rely on that: # `from_fun(to_fun(x)) is x` will return False, because the call to `from_fun` constructs # a brand new subclass instance: we are calling __tensor_unflatten__, and going # from Subclass(FakeTensor) to Subclass(FunctionalTensor(FakeTensor)) def was_tensor_updated(arg, new_arg): if is_traceable_wrapper_subclass(arg): assert is_traceable_wrapper_subclass(new_arg) attrs, _ = arg.__tensor_flatten__() new_attrs, _ = new_arg.__tensor_flatten__() assert attrs == new_attrs # A tensor subclass was updated if any of its inner elements were updated return any( was_tensor_updated(getattr(arg, attr), getattr(new_arg, attr)) for attr in attrs ) else: return arg is not new_arg # new_arg and arg here are either: # (1) both a FakeTensor # (2) both a traceable tensor subclass that holds a FakeTensor # Pre-condition: the two args are the "old" and "new" inputs from running functionalization. # When we run functionalization and wrap our inputs into FunctionalTensors, # we can detect whether or not an input was mutated by checking to see if the inner tensor has changed, # but shares storage with the old input def was_tensor_metadata_updated(arg, new_arg): if is_traceable_wrapper_subclass(arg): assert is_traceable_wrapper_subclass(new_arg) attrs, _ = arg.__tensor_flatten__() new_attrs, _ = new_arg.__tensor_flatten__() assert attrs == new_attrs # A tensor subclass was updated if any of its inner elements were updated return any( was_tensor_metadata_updated(getattr(arg, attr), getattr(new_arg, attr)) for attr in attrs ) else: return arg is not new_arg and StorageWeakRef( arg.untyped_storage() ) == StorageWeakRef(new_arg.untyped_storage()) # Returns the number of detected copy_ def assert_functional_graph(fx_g: torch.fx.Graph) -> int: allowed_mutation_ops = [ torch.ops.aten.copy_.default, torch.ops.aten.set_.source_Tensor, ] if hasattr(torch.ops.fsdp, "copy_"): allowed_mutation_ops.append(torch.ops.fsdp.copy_.default) placeholders = set() mutation_count = 0 # NB: It would also be nice to verify that the mutations all happen at the # end, but we also do some administrative views after mutations so this # isn't actually true. (TODO: Could this cause problems for Inductor?) for n in fx_g.nodes: if n.op == "placeholder": placeholders.add(n) if isinstance(n.target, torch._ops.OpOverload): if n.target in allowed_mutation_ops: # Can only copy_/set_ into an input # this is mostly a hack to avoid failing XLA tests. # See https://github.com/pytorch/pytorch/pull/122434#issuecomment-2101012113 if "set_buffer_donor_" not in str(n.args[0]): assert n.args[0] in placeholders, ( f"n={str(n)}, n.args[0]={str(n.args[0])}, placeholders={str(placeholders)}, graph={str(fx_g)}" ) mutation_count += 1 else: assert not n.target._schema.is_mutable, ( f"aot_autograd expected to have an entirely functional graph, but found {n.format_node()}" ) return mutation_count def propagate_input_mutation_stacktraces(fx_g: torch.fx.Graph) -> None: placeholders = set() for n in fx_g.nodes: if n.op == "placeholder": placeholders.add(n) if isinstance(n.target, torch._ops.OpOverload): if n.target is torch.ops.aten.copy_.default: # Can only copy_ into an input, and can only do so once if "set_buffer_donor_" not in str(n.args[0]): assert n.args[0] in placeholders, ( f"n={str(n)}, n.args[0]={str(n.args[0])}, placeholders={str(placeholders)}, graph={str(fx_g)}" ) placeholders.remove(n.args[0]) copy_from_node = n.args[1] # Pre-condition: every node has a "stack_trace" field in its meta, # but copy_() nodes do not (since we manually added them during functionalization). # Instead, we manually propagate here. if "stack_trace" in copy_from_node.meta: n.meta["stack_trace"] = copy_from_node.meta["stack_trace"] def _check_if_mutation_can_be_in_graph( keep_input_mutations: bool, mutates_data, mutates_metadata, mutations_hidden_from_autograd, mutations_under_no_grad_or_inference_mode, mutates_storage_metadata, mutation_inductor_storage_resize, requires_grad, ): if keep_input_mutations: in_graph = ( mutates_data or mutates_storage_metadata or mutation_inductor_storage_resize ) and ( (not mutates_metadata and not requires_grad) or mutations_hidden_from_autograd or mutations_under_no_grad_or_inference_mode ) else: in_graph = False # See Note [set_() Input Mutations in AOTAutograd] # If there was a `set_()`, we require that all mutations were under no_grad, # so we can (safely) emit the set_() in the graph at runtime # resize_() gets the same treatment if mutation_inductor_storage_resize or mutates_storage_metadata: op_name = "resize_" if mutation_inductor_storage_resize else "set_" assert in_graph, f"""\ Encountered a {op_name} on a graph input, but the input has other mutations that we cannot keep in the graph. This is not supported today. Current state: keep_input_mutations={keep_input_mutations} mutates_data={mutates_data} mutates_metadata={mutates_metadata} mutations_hidden_from_autograd={mutations_hidden_from_autograd} mutations_under_no_grad_or_inference_mode={mutations_under_no_grad_or_inference_mode} mutation_inductor_storage_resize={mutation_inductor_storage_resize} requires_grad={requires_grad}""" return in_graph
ViewMetaSequence
python
jazzband__django-polymorphic
src/polymorphic/tests/models.py
{ "start": 662, "end": 799 }
class ____(ShowFieldType, PolymorphicModel): field1 = models.CharField(max_length=30) polymorphic_showfield_deferred = True
Model2A
python
milvus-io__pymilvus
pymilvus/exceptions.py
{ "start": 3284, "end": 3369 }
class ____(MilvusException): """Raise when fields is invalid"""
FieldsTypeException
python
dask__distributed
distributed/client.py
{ "start": 4341, "end": 4912 }
class ____(CancelledError): error_groups: list[CancelledFuturesGroup] def __init__(self, error_groups: list[CancelledFuturesGroup]): self.error_groups = sorted( error_groups, key=lambda group: len(group.errors), reverse=True ) def __str__(self): count = sum(map(lambda group: len(group.errors), self.error_groups)) result = f"{count} Future{'s' if count > 1 else ''} cancelled:" return "\n".join( [result, "Reasons:"] + [str(group) for group in self.error_groups] )
FuturesCancelledError
python
Netflix__metaflow
metaflow/runner/click_api.py
{ "start": 10751, "end": 25936 }
class ____(object): def __init__(self, parent=None, flow_cls=None, config_input=None, **kwargs): self._parent = parent self._chain = [{self._API_NAME: kwargs}] self._flow_cls = flow_cls self._config_input = config_input self._cached_computed_parameters = None @property def parent(self): if self._parent: return self._parent return None @property def chain(self): return self._chain @property def name(self): return self._API_NAME @classmethod def from_cli(cls, flow_file: str, cli_collection: Callable) -> Callable: flow_cls = extract_flow_class_from_file(flow_file) with flow_context(flow_cls) as _: cli_collection, config_input = config_options_with_config_input( cli_collection ) cli_collection = add_decorator_options(cli_collection) def getattr_wrapper(_self, name): # Functools.partial do not automatically bind self (no __get__) with flow_context(flow_cls) as _: # We also wrap this in the proper flow context because since commands # are loaded lazily, we need the proper flow context to compute things # like parameters. If we do not do this, the outer flow's context will # be used. return _self._internal_getattr(_self, name) class_dict = { "__module__": "metaflow", "_API_NAME": flow_file, "_internal_getattr": staticmethod( functools.partial( _lazy_load_command, cli_collection, "_compute_flow_parameters" ) ), "__getattr__": getattr_wrapper, } to_return = type(flow_file, (MetaflowAPI,), class_dict) to_return.__name__ = flow_file ( params_sigs, possible_arg_params, possible_opt_params, annotations, defaults, ) = extract_all_params(cli_collection) def _method(_self, *args, **kwargs): method_params = _method_sanity_check( possible_arg_params, possible_opt_params, annotations, defaults, **kwargs, ) return to_return( parent=None, flow_cls=flow_cls, config_input=config_input, **method_params, ) m = _method m.__name__ = cli_collection.name m.__doc__ = getattr(cli_collection, "help", None) m.__signature__ = inspect.signature(_method).replace( parameters=params_sigs.values() ) m.__annotations__ = annotations m.__defaults__ = tuple(defaults.values()) return m def execute(self) -> List[str]: parents = [] current = self while current.parent: parents.append(current.parent) current = current.parent parents.reverse() final_chain = list(itertools.chain.from_iterable([p.chain for p in parents])) final_chain.extend(self.chain) components = [] for each_cmd in final_chain: for cmd, params in each_cmd.items(): components.append(cmd) args = params.pop("args", {}) options = params.pop("options", {}) for _, v in args.items(): if v is None: continue if isinstance(v, (list, tuple)): for i in v: components.append(i) else: components.append(v) for k, v in options.items(): if v is None: continue if isinstance(v, list): for i in v: if isinstance(i, tuple): components.append("--%s" % k) components.extend(map(str, i)) else: components.append("--%s" % k) components.append(str(i)) elif v is None: continue # Skip None values -- they are defaults and converting # them to string will not be what the user wants else: components.append("--%s" % k) if v != "flag": components.append(str(v)) return components def _compute_flow_parameters(self): if ( self._flow_cls is None or self._config_input is None or self._parent is not None ): raise RuntimeError( "Computing flow-level parameters for a non start API. " "Please report to the Metaflow team." ) if self._cached_computed_parameters is not None: return self._cached_computed_parameters self._cached_computed_parameters = [] config_options = None if CLICK_API_PROCESS_CONFIG: with flow_context(self._flow_cls) as _: # We are going to resolve the configs first and then get the parameters. # Note that configs may update/add parameters so the order is important # Since part of the processing of configs happens by click, we need to # "fake" it. # Extract any config options as well as datastore and quiet options method_params = self._chain[0][self._API_NAME] opts = method_params["options"] defaults = method_params["defaults"] ds = opts.get("datastore", defaults["datastore"]) quiet = opts.get("quiet", defaults["quiet"]) # Order to find config or config_value: # 1. Passed directly to the Click API # 2. If not found, check if passed through an environment variable # 3. If not found, use the default value is_default = False config_file = opts.get("config") if config_file is None: # Check if it was set through an environment variable -- we # don't have click process them here so we need to "fake" it. env_config_file = os.environ.get("METAFLOW_FLOW_CONFIG") if env_config_file: # Convert dict items to list of tuples config_file = list(json.loads(env_config_file).items()) is_default = False else: is_default = True config_file = defaults.get("config") if config_file: config_file = dict( map( lambda x: ( x[0], ConvertPath.convert_value(x[1], is_default), ), config_file, ) ) is_default = False config_value = opts.get("config-value") if config_value is None: env_config_value = os.environ.get("METAFLOW_FLOW_CONFIG_VALUE") if env_config_value: # Parse environment variable using MultipleTuple logic loaded = json.loads(env_config_value) # Convert dict items to list of tuples with JSON-serialized values config_value = [ (k, json.dumps(v) if not isinstance(v, str) else v) for k, v in loaded.items() ] is_default = False else: is_default = True config_value = defaults.get("config_value") if config_value: config_value = dict( map( lambda x: ( x[0], ConvertDictOrStr.convert_value(x[1], is_default), ), config_value, ) ) if (config_file is None) ^ (config_value is None): # If we have one, we should have the other raise MetaflowException( "Options were not properly set -- this is an internal error." ) if config_file: # Process both configurations; the second one will return all the merged # configuration options properly processed. self._config_input.process_configs( self._flow_cls.__name__, "config", config_file, quiet, ds ) config_options = self._config_input.process_configs( self._flow_cls.__name__, "config_value", config_value, quiet, ds ) # At this point, we are like in start() in cli.py -- we obtained the # properly processed config_options which we can now use to process # the config decorators (including StepMutator/FlowMutator) # Note that if CLICK_API_PROCESS_CONFIG is False, we still do this because # it will init all parameters (config_options will be None) # We ignore any errors if we don't check the configs in the click API. # Init all values in the flow mutators and then process them for decorator in self._flow_cls._flow_state[FlowStateItems.FLOW_MUTATORS]: decorator.external_init() new_cls = self._flow_cls._process_config_decorators( config_options, process_configs=CLICK_API_PROCESS_CONFIG ) if new_cls: self._flow_cls = new_cls for _, param in self._flow_cls._get_parameters(): if param.IS_CONFIG_PARAMETER: continue self._cached_computed_parameters.append(param) return self._cached_computed_parameters def extract_all_params(cmd_obj: Union[click.Command, click.Group]): arg_params_sigs = OrderedDict() opt_params_sigs = OrderedDict() params_sigs = OrderedDict() arg_parameters = OrderedDict() opt_parameters = OrderedDict() annotations = OrderedDict() defaults = OrderedDict() for each_param in cmd_obj.params: if isinstance(each_param, click.Argument): ( arg_params_sigs[each_param.name], annotations[each_param.name], ) = get_inspect_param_obj(each_param, inspect.Parameter.POSITIONAL_ONLY) arg_parameters[each_param.name] = each_param elif isinstance(each_param, click.Option): ( opt_params_sigs[each_param.name], annotations[each_param.name], ) = get_inspect_param_obj(each_param, inspect.Parameter.KEYWORD_ONLY) opt_parameters[each_param.name] = each_param defaults[each_param.name] = each_param.default # first, fill in positional arguments for name, each_arg_param in arg_params_sigs.items(): params_sigs[name] = each_arg_param # then, fill in keyword arguments for name, each_opt_param in opt_params_sigs.items(): params_sigs[name] = each_opt_param return params_sigs, arg_parameters, opt_parameters, annotations, defaults def extract_group(cmd_obj: click.Group, flow_parameters: List[Parameter]) -> Callable: class_dict = {"__module__": "metaflow", "_API_NAME": cmd_obj.name} for _, sub_cmd_obj in cmd_obj.commands.items(): if isinstance(sub_cmd_obj, click.Group): # recursion class_dict[sub_cmd_obj.name] = extract_group(sub_cmd_obj, flow_parameters) elif isinstance(sub_cmd_obj, click.Command): class_dict[sub_cmd_obj.name] = extract_command(sub_cmd_obj, flow_parameters) else: raise RuntimeError( "Cannot handle %s of type %s" % (sub_cmd_obj.name, type(sub_cmd_obj)) ) resulting_class = type(cmd_obj.name, (MetaflowAPI,), class_dict) resulting_class.__name__ = cmd_obj.name ( params_sigs, possible_arg_params, possible_opt_params, annotations, defaults, ) = extract_all_params(cmd_obj) def _method(_self, *args, **kwargs): method_params = _method_sanity_check( possible_arg_params, possible_opt_params, annotations, defaults, **kwargs ) return resulting_class(parent=_self, flow_cls=None, **method_params) m = _method m.__name__ = cmd_obj.name m.__doc__ = getattr(cmd_obj, "help", None) m.__signature__ = inspect.signature(_method).replace( parameters=params_sigs.values() ) m.__annotations__ = annotations m.__defaults__ = tuple(defaults.values()) return m def extract_command( cmd_obj: click.Command, flow_parameters: List[Parameter] ) -> Callable: if getattr(cmd_obj, "has_flow_params", False): for p in flow_parameters[::-1]: cmd_obj.params.insert(0, click.Option(("--" + p.name,), **p.kwargs)) ( params_sigs, possible_arg_params, possible_opt_params, annotations, defaults, ) = extract_all_params(cmd_obj) def _method(_self, *args, **kwargs): method_params = _method_sanity_check( possible_arg_params, possible_opt_params, annotations, defaults, **kwargs ) _self._chain.append({cmd_obj.name: method_params}) return _self.execute() m = _method m.__name__ = cmd_obj.name m.__doc__ = getattr(cmd_obj, "help", None) m.__signature__ = inspect.signature(_method).replace( parameters=params_sigs.values() ) m.__annotations__ = annotations m.__defaults__ = tuple(defaults.values()) return m if __name__ == "__main__": from metaflow.cli import start api = MetaflowAPI.from_cli("../try.py", start) command = api(metadata="local").run( tags=["abc", "def"], decospecs=["kubernetes"], max_workers=5, alpha=3, myfile="path/to/file", ) print(" ".join(command)) command = ( api(metadata="local") .kubernetes() .step( step_name="process", code_package_metadata="some_version", code_package_sha="some_sha", code_package_url="some_url", ) ) print(" ".join(command)) command = api().tag().add(tags=["abc", "def"]) print(" ".join(command)) command = getattr(api(decospecs=["retry"]), "argo-workflows")().create() print(" ".join(command))
MetaflowAPI
python
realpython__materials
python-sqlite-sqlalchemy/project/examples/example_3/app/models.py
{ "start": 88, "end": 313 }
class ____(db.Model): __tablename__ = "artists" artist_id = db.Column("ArtistId", db.Integer, primary_key=True) name = db.Column("Name", db.String(120)) albums = db.relationship("Album", backref="artist")
Artist
python
pdm-project__pdm
src/pdm/cli/commands/run.py
{ "start": 18735, "end": 21980 }
class ____(BaseCommand): """Run commands or scripts with local packages loaded""" arguments = (*BaseCommand.arguments, skip_option, venv_option) def add_arguments(self, parser: argparse.ArgumentParser) -> None: action = parser.add_mutually_exclusive_group() action.add_argument( "-l", "--list", action="store_true", help="Show all available scripts defined in pyproject.toml", ) action.add_argument( "-j", "--json", action="store_true", help="Output all scripts infos in JSON", ) exec = parser.add_argument_group("Execution parameters") exec.add_argument( "-s", "--site-packages", action="store_true", help="Load site-packages from the selected interpreter", ) exec.add_argument( "--recreate", action="store_true", help="Recreate the script environment for self-contained scripts" ) exec.add_argument("script", nargs="?", help="The command to run") exec.add_argument( "args", nargs=argparse.REMAINDER, help="Arguments that will be passed to the command", ) def get_runner(self, project: Project, hooks: HookManager, options: argparse.Namespace) -> TaskRunner: if (runner_cls := getattr(self, "runner_cls", None)) is not None: # pragma: no cover deprecation_warning("runner_cls attribute is deprecated, use get_runner method instead.") runner = cast("type[TaskRunner]", runner_cls)(project, hooks) else: runner = TaskRunner(project, hooks) runner.recreate_env = options.recreate if options.site_packages: runner.global_options["site_packages"] = True return runner def handle(self, project: Project, options: argparse.Namespace) -> None: hooks = HookManager(project, options.skip) runner = self.get_runner(project, hooks, options) if options.list: return runner.show_list() if options.json: return print_json(data=runner.as_json()) if not options.script: project.core.ui.warn("No command is given, default to the Python REPL.") options.script = "python" hooks.try_emit("pre_run", script=options.script, args=options.args) exit_code = runner.run(options.script, options.args) hooks.try_emit("post_run", script=options.script, args=options.args) sys.exit(exit_code) def run_script_if_present(script_name: str, sender: Project, hooks: HookManager, **kwargs: Any) -> None: """A signal handler to run a script if present in the project.""" runner = TaskRunner(sender, hooks) task = runner.get_task(script_name) if task is None: return exit_code = runner.run_task(task) if exit_code != 0: sys.exit(exit_code) # reload project files in case the script modified them sender.project_config.reload() sender.pyproject.reload() sender.lockfile.reload() for hook in pdm_signals: pdm_signals.signal(hook).connect(partial(run_script_if_present, hook), weak=False)
Command
python
getsentry__sentry
src/sentry/users/services/lost_password_hash/impl.py
{ "start": 270, "end": 1112 }
class ____(LostPasswordHashService): def get_or_create( self, user_id: int, ) -> RpcLostPasswordHash: # NOTE(mattrobenolt): Some security people suggest we invalidate # existing password hashes, but this opens up the possibility # of a DoS vector where then password resets are continually # requested, thus preventing someone from actually resetting # their password. # See: https://github.com/getsentry/sentry/pull/17299 password_hash, created = LostPasswordHash.objects.get_or_create(user_id=user_id) if not password_hash.is_valid(): password_hash.date_added = datetime.datetime.now() password_hash.set_hash() password_hash.save() return serialize_lostpasswordhash(password_hash)
DatabaseLostPasswordHashService
python
django__django
tests/template_tests/syntax_tests/test_include.py
{ "start": 8261, "end": 14908 }
class ____(SimpleTestCase): def test_include_missing_template(self): """ The correct template is identified as not existing when {% include %} specifies a template that does not exist. """ engine = Engine(app_dirs=True, debug=True) template = engine.get_template("test_include_error.html") with self.assertRaisesMessage(TemplateDoesNotExist, "missing.html"): template.render(Context()) def test_extends_include_missing_baseloader(self): """ #12787 -- The correct template is identified as not existing when {% extends %} specifies a template that does exist, but that template has an {% include %} of something that does not exist. """ engine = Engine(app_dirs=True, debug=True) template = engine.get_template("test_extends_error.html") with self.assertRaisesMessage(TemplateDoesNotExist, "missing.html"): template.render(Context()) def test_extends_include_missing_cachedloader(self): engine = Engine( debug=True, loaders=[ ( "django.template.loaders.cached.Loader", [ "django.template.loaders.app_directories.Loader", ], ), ], ) template = engine.get_template("test_extends_error.html") with self.assertRaisesMessage(TemplateDoesNotExist, "missing.html"): template.render(Context()) # Repeat to ensure it still works when loading from the cache template = engine.get_template("test_extends_error.html") with self.assertRaisesMessage(TemplateDoesNotExist, "missing.html"): template.render(Context()) def test_include_template_argument(self): """ Support any render() supporting object """ engine = Engine() ctx = Context( { "tmpl": engine.from_string("This worked!"), } ) outer_tmpl = engine.from_string("{% include tmpl %}") output = outer_tmpl.render(ctx) self.assertEqual(output, "This worked!") def test_include_template_iterable(self): engine = Engine.get_default() outer_temp = engine.from_string("{% include var %}") tests = [ ("admin/fail.html", "index.html"), ["admin/fail.html", "index.html"], ] for template_names in tests: with self.subTest(template_names): output = outer_temp.render(Context({"var": template_names})) self.assertEqual(output, "index\n") def test_include_template_none(self): engine = Engine.get_default() outer_temp = engine.from_string("{% include var %}") ctx = Context({"var": None}) msg = "No template names provided" with self.assertRaisesMessage(TemplateDoesNotExist, msg): outer_temp.render(ctx) def test_include_from_loader_get_template(self): tmpl = loader.get_template("include_tpl.html") # {% include tmpl %} output = tmpl.render({"tmpl": loader.get_template("index.html")}) self.assertEqual(output, "index\n\n") def test_include_immediate_missing(self): """ #16417 -- Include tags pointing to missing templates should not raise an error at parsing time. """ Engine(debug=True).from_string('{% include "this_does_not_exist.html" %}') def test_include_recursive(self): comments = [ { "comment": "A1", "children": [ {"comment": "B1", "children": []}, {"comment": "B2", "children": []}, {"comment": "B3", "children": [{"comment": "C1", "children": []}]}, ], } ] with self.subTest(template="recursive_include.html"): engine = Engine(app_dirs=True) t = engine.get_template("recursive_include.html") self.assertEqual( "Recursion! A1 Recursion! B1 B2 B3 Recursion! C1", t.render(Context({"comments": comments})) .replace(" ", "") .replace("\n", " ") .strip(), ) with self.subTest(template="recursive_relative_include.html"): engine = Engine(app_dirs=True) t = engine.get_template("recursive_relative_include.html") self.assertEqual( "Recursion! A1 Recursion! B1 B2 B3 Recursion! C1", t.render(Context({"comments": comments})) .replace(" ", "") .replace("\n", " ") .strip(), ) with self.subTest(template="tmpl"): engine = Engine() template = """ Recursion! {% for c in comments %} {{ c.comment }} {% if c.children %}{% include tmpl with comments=c.children %}{% endif %} {% endfor %} """ outer_tmpl = engine.from_string("{% include tmpl %}") output = outer_tmpl.render( Context({"tmpl": engine.from_string(template), "comments": comments}) ) self.assertEqual( "Recursion! A1 Recursion! B1 B2 B3 Recursion! C1", output.replace(" ", "").replace("\n", " ").strip(), ) def test_include_cache(self): """ {% include %} keeps resolved templates constant (#27974). The CounterNode object in the {% counter %} template tag is created once if caching works properly. Each iteration increases the counter instead of restarting it. This works as a regression test only if the cached loader isn't used, so the @setup decorator isn't used. """ engine = Engine( loaders=[ ( "django.template.loaders.locmem.Loader", { "template": ( '{% for x in vars %}{% include "include" %}{% endfor %}' ), "include": '{% include "next" %}', "next": "{% load custom %}{% counter %}", }, ), ], libraries={"custom": "template_tests.templatetags.custom"}, ) output = engine.render_to_string("template", {"vars": range(9)}) self.assertEqual(output, "012345678")
IncludeTests
python
walkccc__LeetCode
solutions/992. Subarrays with K Different Integers/992.py
{ "start": 0, "end": 596 }
class ____: def subarraysWithKDistinct(self, nums: list[int], k: int) -> int: def subarraysWithAtMostKDistinct(k: int) -> int: res = 0 count = collections.Counter() l = 0 for r, num in enumerate(nums): count[num] += 1 if count[num] == 1: k -= 1 while k < 0: count[nums[l]] -= 1 if count[nums[l]] == 0: k += 1 l += 1 res += r - l + 1 # nums[l..r], nums[l + 1..r], ..., nums[r] return res return subarraysWithAtMostKDistinct(k) - subarraysWithAtMostKDistinct(k - 1)
Solution
python
pytorch__pytorch
torch/utils/checkpoint.py
{ "start": 53794, "end": 54666 }
class ____: """ Context passed to policy function during selective checkpointing. This class is used to pass relevant metadata to the policy function during selective checkpointing. The metadata includes whether the current invocation of the policy function is during recomputation or not. Example: >>> # xdoctest: +SKIP(stub) >>> >>> def policy_fn(ctx, op, *args, **kwargs): >>> print(ctx.is_recompute) >>> >>> context_fn = functools.partial(create_selective_checkpoint_contexts, policy_fn) >>> >>> out = torch.utils.checkpoint.checkpoint( >>> fn, x, y, >>> use_reentrant=False, >>> context_fn=context_fn, >>> ) """ def __init__(self, *, is_recompute) -> None: self.is_recompute = is_recompute
SelectiveCheckpointContext
python
PrefectHQ__prefect
tests/workers/test_base_worker.py
{ "start": 46339, "end": 54046 }
class ____: def test_defaults(self): class WorkerImplNoCustomization(BaseWorker): type = "test-no-customization" async def run(self): pass async def verify_submitted_deployment(self, deployment): pass assert WorkerImplNoCustomization.get_logo_url() == "" assert WorkerImplNoCustomization.get_documentation_url() == "" assert WorkerImplNoCustomization.get_description() == "" assert WorkerImplNoCustomization.get_default_base_job_template() == { "job_configuration": { "command": "{{ command }}", "env": "{{ env }}", "labels": "{{ labels }}", "name": "{{ name }}", }, "variables": { "properties": { "command": { "anyOf": [{"type": "string"}, {"type": "null"}], "title": "Command", "default": None, "description": ( "The command to use when starting a flow run. " "In most cases, this should be left blank and the command " "will be automatically generated by the worker." ), }, "env": { "title": "Environment Variables", "type": "object", "additionalProperties": { "anyOf": [{"type": "string"}, {"type": "null"}] }, "description": ( "Environment variables to set when starting a flow run." ), }, "labels": { "title": "Labels", "type": "object", "additionalProperties": {"type": "string"}, "description": ( "Labels applied to infrastructure created by the worker" " using this job configuration." ), }, "name": { "anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name", "default": None, "description": ( "Name given to infrastructure created by the worker using " "this job configuration." ), }, }, "type": "object", }, } def test_custom_logo_url(self): class WorkerImplWithLogoUrl(BaseWorker): type = "test-with-logo-url" job_configuration = BaseJobConfiguration _logo_url = "https://example.com/logo.png" async def run(self): pass async def verify_submitted_deployment(self, deployment): pass assert WorkerImplWithLogoUrl.get_logo_url() == "https://example.com/logo.png" def test_custom_documentation_url(self): class WorkerImplWithDocumentationUrl(BaseWorker): type = "test-with-documentation-url" job_configuration = BaseJobConfiguration _documentation_url = "https://example.com/docs" async def run(self): pass async def verify_submitted_deployment(self, deployment): pass assert ( WorkerImplWithDocumentationUrl.get_documentation_url() == "https://example.com/docs" ) def test_custom_description(self): class WorkerImplWithDescription(BaseWorker): type = "test-with-description" job_configuration = BaseJobConfiguration _description = "Custom Worker Description" async def run(self): pass async def verify_submitted_deployment(self, deployment): pass assert ( WorkerImplWithDescription.get_description() == "Custom Worker Description" ) def test_custom_base_job_configuration(self): class CustomBaseJobConfiguration(BaseJobConfiguration): var1: str = Field(json_schema_extra={"template": "{{ var1 }}"}) var2: int = Field(json_schema_extra={"template": "{{ var2 }}"}) class CustomBaseVariables(BaseVariables): var1: str = Field(default=...) var2: int = Field(default=1) class WorkerImplWithCustomBaseJobConfiguration(BaseWorker): type = "test-with-base-job-configuration" job_configuration = CustomBaseJobConfiguration job_configuration_variables = CustomBaseVariables async def run(self): pass async def verify_submitted_deployment(self, deployment): pass assert ( WorkerImplWithCustomBaseJobConfiguration.get_default_base_job_template() == { "job_configuration": { "command": "{{ command }}", "env": "{{ env }}", "labels": "{{ labels }}", "name": "{{ name }}", "var1": "{{ var1 }}", "var2": "{{ var2 }}", }, "variables": { "properties": { "command": { "title": "Command", "anyOf": [{"type": "string"}, {"type": "null"}], "default": None, "description": ( "The command to use when starting a flow run. " "In most cases, this should be left blank and the command " "will be automatically generated by the worker." ), }, "env": { "title": "Environment Variables", "type": "object", "additionalProperties": { "anyOf": [{"type": "string"}, {"type": "null"}] }, "description": ( "Environment variables to set when starting a flow run." ), }, "labels": { "title": "Labels", "type": "object", "additionalProperties": {"type": "string"}, "description": ( "Labels applied to infrastructure created by a worker." ), }, "name": { "title": "Name", "anyOf": [{"type": "string"}, {"type": "null"}], "default": None, "description": ( "Name given to infrastructure created by a worker." ), }, "var1": {"title": "Var1", "type": "string"}, "var2": {"title": "Var2", "type": "integer", "default": 1}, }, "required": ["var1"], "type": "object", }, } )
TestWorkerProperties
python
pypa__warehouse
warehouse/organizations/models.py
{ "start": 2502, "end": 3370 }
class ____(db.Model): __tablename__ = "organization_projects" __table_args__ = ( Index("organization_projects_organization_id_idx", "organization_id"), Index("organization_projects_project_id_idx", "project_id"), UniqueConstraint( "organization_id", "project_id", name="_organization_projects_organization_project_uc", ), ) __repr__ = make_repr("project_id", "organization_id") organization_id: Mapped[UUID] = mapped_column( ForeignKey("organizations.id", onupdate="CASCADE", ondelete="CASCADE"), ) project_id: Mapped[UUID] = mapped_column( ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"), ) organization: Mapped[Organization] = relationship(lazy=False) project: Mapped[Project] = relationship(lazy=False)
OrganizationProject
python
fastapi__sqlmodel
docs_src/tutorial/many_to_many/tutorial001_py310.py
{ "start": 295, "end": 536 }
class ____(SQLModel, table=True): id: int | None = Field(default=None, primary_key=True) name: str = Field(index=True) headquarters: str heroes: list["Hero"] = Relationship(back_populates="teams", link_model=HeroTeamLink)
Team
python
django__django
django/db/models/fields/related_descriptors.py
{ "start": 5351, "end": 15160 }
class ____: """ Accessor to the related object on the forward side of a many-to-one or one-to-one (via ForwardOneToOneDescriptor subclass) relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Child.parent`` is a ``ForwardManyToOneDescriptor`` instance. """ def __init__(self, field_with_rel): self.field = field_with_rel @cached_property def RelatedObjectDoesNotExist(self): # The exception can't be created at initialization time since the # related model might not be resolved yet; `self.field.model` might # still be a string model reference. return type( "RelatedObjectDoesNotExist", (self.field.remote_field.model.DoesNotExist, AttributeError), { "__module__": self.field.model.__module__, "__qualname__": "%s.%s.RelatedObjectDoesNotExist" % ( self.field.model.__qualname__, self.field.name, ), }, ) def is_cached(self, instance): return self.field.is_cached(instance) def get_queryset(self, *, instance): return self.field.remote_field.model._base_manager.db_manager( hints={"instance": instance} ).fetch_mode(instance._state.fetch_mode) def get_prefetch_querysets(self, instances, querysets=None): if querysets and len(querysets) != 1: raise ValueError( "querysets argument of get_prefetch_querysets() should have a length " "of 1." ) queryset = ( querysets[0] if querysets else self.get_queryset(instance=instances[0]) ) rel_obj_attr = self.field.get_foreign_related_value instance_attr = self.field.get_local_related_value instances_dict = {instance_attr(inst): inst for inst in instances} remote_field = self.field.remote_field related_fields = [ queryset.query.resolve_ref(field.name).target for field in self.field.foreign_related_fields ] queryset = queryset.filter( TupleIn( ColPairs( queryset.model._meta.db_table, related_fields, related_fields, self.field, ), list(instances_dict), ) ) # There can be only one object prefetched for each instance so clear # ordering if the query allows it without side effects. queryset.query.clear_ordering() # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. if not remote_field.multiple: for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] remote_field.set_cached_value(rel_obj, instance) return ( queryset, rel_obj_attr, instance_attr, True, self.field.cache_name, False, ) def get_object(self, instance): qs = self.get_queryset(instance=instance) # Assuming the database enforces foreign keys, this won't fail. return qs.get(self.field.get_reverse_related_filter(instance)) def __get__(self, instance, cls=None): """ Get the related instance through the forward relation. With the example above, when getting ``child.parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``cls`` is the ``Child`` class (we don't need it) """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the reverse accessor (ReverseOneToOneDescriptor). try: rel_obj = self.field.get_cached_value(instance) except KeyError: rel_obj = None has_value = None not in self.field.get_local_related_value(instance) if has_value: model = self.field.model for current_instance, ancestor in _traverse_ancestors(model, instance): if ancestor: # The value might be cached on an ancestor if the # instance originated from walking down the inheritance # chain. rel_obj = self.field.get_cached_value(ancestor, default=None) if rel_obj is not None: break if rel_obj is None and has_value: instance._state.fetch_mode.fetch(self, instance) return self.field.get_cached_value(instance) self.field.set_cached_value(instance, rel_obj) if rel_obj is None and not self.field.null: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (self.field.model.__name__, self.field.name) ) else: return rel_obj def fetch_one(self, instance): rel_obj = self.get_object(instance) self.field.set_cached_value(instance, rel_obj) # If this is a one-to-one relation, set the reverse accessor cache on # the related object to the current instance to avoid an extra SQL # query if it's accessed later on. remote_field = self.field.remote_field if not remote_field.multiple: remote_field.set_cached_value(rel_obj, instance) def fetch_many(self, instances): is_cached = self.is_cached missing_instances = [i for i in instances if not is_cached(i)] prefetch_related_objects(missing_instances, self.field.name) def __set__(self, instance, value): """ Set the related instance through the forward relation. With the example above, when setting ``child.parent = parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``value`` is the ``parent`` instance on the right of the equal sign """ # An object must be an instance of the related class. if value is not None and not isinstance( value, self.field.remote_field.model._meta.concrete_model ): raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name, ) ) elif value is not None: if instance._state.db is None: instance._state.db = router.db_for_write( instance.__class__, instance=value ) if value._state.db is None: value._state.db = router.db_for_write( value.__class__, instance=instance ) if not router.allow_relation(value, instance): raise ValueError( 'Cannot assign "%r": the current database router prevents this ' "relation." % value ) remote_field = self.field.remote_field # If we're setting the value of a OneToOneField to None, we need to # clear out the cache on any old related object. Otherwise, deleting # the previously-related object will also cause this object to be # deleted, which is wrong. if value is None: # Look up the previously-related object, which may still be # available since we've not yet cleared out the related field. Use # the cache directly, instead of the accessor; if we haven't # populated the cache, then we don't care - we're only accessing # the object to invalidate the accessor cache, so there's no need # to populate the cache just to expire it again. related = self.field.get_cached_value(instance, default=None) # If we've got an old related object, we need to clear out its # cache. This cache also might not exist if the related object # hasn't been accessed yet. if related is not None: remote_field.set_cached_value(related, None) for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, None) # Set the values of the related field. else: for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, getattr(value, rh_field.attname)) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.field.set_cached_value(instance, value) # If this is a one-to-one relation, set the reverse accessor cache on # the related object to the current instance to avoid an extra SQL # query if it's accessed later on. if value is not None and not remote_field.multiple: remote_field.set_cached_value(value, instance) def __reduce__(self): """ Pickling should return the instance attached by self.field on the model, not a new copy of that descriptor. Use getattr() to retrieve the instance directly from the model. """ return getattr, (self.field.model, self.field.name)
ForwardManyToOneDescriptor
python
cherrypy__cherrypy
cherrypy/test/test_misc_tools.py
{ "start": 6820, "end": 7127 }
class ____(helper.CPWebCase): setup_server = staticmethod(setup_server) def testAutoVary(self): self.getPage('/autovary/') self.assertHeader( 'Vary', 'Accept, Accept-Charset, Accept-Encoding, ' 'Host, If-Modified-Since, Range', )
AutoVaryTest
python
scikit-learn__scikit-learn
sklearn/impute/_iterative.py
{ "start": 1640, "end": 38719 }
class ____(_BaseImputer): """Multivariate imputer that estimates each feature from all the others. A strategy for imputing missing values by modeling each feature with missing values as a function of other features in a round-robin fashion. Read more in the :ref:`User Guide <iterative_imputer>`. .. versionadded:: 0.21 .. note:: This estimator is still **experimental** for now: the predictions and the API might change without any deprecation cycle. To use it, you need to explicitly import `enable_iterative_imputer`:: >>> # explicitly require this experimental feature >>> from sklearn.experimental import enable_iterative_imputer # noqa >>> # now you can import normally from sklearn.impute >>> from sklearn.impute import IterativeImputer Parameters ---------- estimator : estimator object, default=BayesianRidge() The estimator to use at each step of the round-robin imputation. If `sample_posterior=True`, the estimator must support `return_std` in its `predict` method. missing_values : int or np.nan, default=np.nan The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For pandas' dataframes with nullable integer dtypes with missing values, `missing_values` should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`. sample_posterior : bool, default=False Whether to sample from the (Gaussian) predictive posterior of the fitted estimator for each imputation. Estimator must support `return_std` in its `predict` method if set to `True`. Set to `True` if using `IterativeImputer` for multiple imputations. max_iter : int, default=10 Maximum number of imputation rounds to perform before returning the imputations computed during the final round. A round is a single imputation of each feature with missing values. The stopping criterion is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`, where `X_t` is `X` at iteration `t`. Note that early stopping is only applied if `sample_posterior=False`. tol : float, default=1e-3 Tolerance of the stopping condition. n_nearest_features : int, default=None Number of other features to use to estimate the missing values of each feature column. Nearness between features is measured using the absolute correlation coefficient between each feature pair (after initial imputation). To ensure coverage of features throughout the imputation process, the neighbor features are not necessarily nearest, but are drawn with probability proportional to correlation for each imputed target feature. Can provide significant speed-up when the number of features is huge. If `None`, all features will be used. initial_strategy : {'mean', 'median', 'most_frequent', 'constant'}, \ default='mean' Which strategy to use to initialize the missing values. Same as the `strategy` parameter in :class:`~sklearn.impute.SimpleImputer`. fill_value : str or numerical value, default=None When `strategy="constant"`, `fill_value` is used to replace all occurrences of missing_values. For string or object data types, `fill_value` must be a string. If `None`, `fill_value` will be 0 when imputing numerical data and "missing_value" for strings or object data types. .. versionadded:: 1.3 imputation_order : {'ascending', 'descending', 'roman', 'arabic', \ 'random'}, default='ascending' The order in which the features will be imputed. Possible values: - `'ascending'`: From features with fewest missing values to most. - `'descending'`: From features with most missing values to fewest. - `'roman'`: Left to right. - `'arabic'`: Right to left. - `'random'`: A random order for each round. skip_complete : bool, default=False If `True` then features with missing values during :meth:`transform` which did not have any missing values during :meth:`fit` will be imputed with the initial imputation method only. Set to `True` if you have many features with no missing values at both :meth:`fit` and :meth:`transform` time to save compute. min_value : float or array-like of shape (n_features,), default=-np.inf Minimum possible imputed value. Broadcast to shape `(n_features,)` if scalar. If array-like, expects shape `(n_features,)`, one min value for each feature. The default is `-np.inf`. .. versionchanged:: 0.23 Added support for array-like. max_value : float or array-like of shape (n_features,), default=np.inf Maximum possible imputed value. Broadcast to shape `(n_features,)` if scalar. If array-like, expects shape `(n_features,)`, one max value for each feature. The default is `np.inf`. .. versionchanged:: 0.23 Added support for array-like. verbose : int, default=0 Verbosity flag, controls the debug messages that are issued as functions are evaluated. The higher, the more verbose. Can be 0, 1, or 2. random_state : int, RandomState instance or None, default=None The seed of the pseudo random number generator to use. Randomizes selection of estimator features if `n_nearest_features` is not `None`, the `imputation_order` if `random`, and the sampling from posterior if `sample_posterior=True`. Use an integer for determinism. See :term:`the Glossary <random_state>`. add_indicator : bool, default=False If `True`, a :class:`MissingIndicator` transform will stack onto output of the imputer's transform. This allows a predictive estimator to account for missingness despite imputation. If a feature has no missing values at fit/train time, the feature won't appear on the missing indicator even if there are missing values at transform/test time. keep_empty_features : bool, default=False If True, features that consist exclusively of missing values when `fit` is called are returned in results when `transform` is called. The imputed value is always `0` except when `initial_strategy="constant"` in which case `fill_value` will be used instead. .. versionadded:: 1.2 Attributes ---------- initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer` Imputer used to initialize the missing values. imputation_sequence_ : list of tuples Each tuple has `(feat_idx, neighbor_feat_idx, estimator)`, where `feat_idx` is the current feature to be imputed, `neighbor_feat_idx` is the array of other features used to impute the current feature, and `estimator` is the trained estimator used for the imputation. Length is `self.n_features_with_missing_ * self.n_iter_`. n_iter_ : int Number of iteration rounds that occurred. Will be less than `self.max_iter` if early stopping criterion was reached. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_features_with_missing_ : int Number of features with missing values. indicator_ : :class:`~sklearn.impute.MissingIndicator` Indicator used to add binary indicators for missing values. `None` if `add_indicator=False`. random_state_ : RandomState instance RandomState instance that is generated either from a seed, the random number generator or by `np.random`. See Also -------- SimpleImputer : Univariate imputer for completing missing values with simple strategies. KNNImputer : Multivariate imputer that estimates missing features using nearest samples. Notes ----- To support imputation in inductive mode we store each feature's estimator during the :meth:`fit` phase, and predict without refitting (in order) during the :meth:`transform` phase. Features which contain all missing values at :meth:`fit` are discarded upon :meth:`transform`. Using defaults, the imputer scales in :math:`\\mathcal{O}(knp^3\\min(n,p))` where :math:`k` = `max_iter`, :math:`n` the number of samples and :math:`p` the number of features. It thus becomes prohibitively costly when the number of features increases. Setting `n_nearest_features << n_features`, `skip_complete=True` or increasing `tol` can help to reduce its computational cost. Depending on the nature of missing values, simple imputers can be preferable in a prediction context. References ---------- .. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice: Multivariate Imputation by Chained Equations in R". Journal of Statistical Software 45: 1-67. <https://www.jstatsoft.org/article/view/v045i03>`_ .. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in Multivariate Data Suitable for use with an Electronic Computer". Journal of the Royal Statistical Society 22(2): 302-306. <https://www.jstor.org/stable/2984099>`_ Examples -------- >>> import numpy as np >>> from sklearn.experimental import enable_iterative_imputer >>> from sklearn.impute import IterativeImputer >>> imp_mean = IterativeImputer(random_state=0) >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]]) IterativeImputer(random_state=0) >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]] >>> imp_mean.transform(X) array([[ 6.9584, 2. , 3. ], [ 4. , 2.6000, 6. ], [10. , 4.9999, 9. ]]) For a more detailed example see :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py` or :ref:`sphx_glr_auto_examples_impute_plot_iterative_imputer_variants_comparison.py`. """ _parameter_constraints: dict = { **_BaseImputer._parameter_constraints, "estimator": [None, HasMethods(["fit", "predict"])], "sample_posterior": ["boolean"], "max_iter": [Interval(Integral, 0, None, closed="left")], "tol": [Interval(Real, 0, None, closed="left")], "n_nearest_features": [None, Interval(Integral, 1, None, closed="left")], "initial_strategy": [ StrOptions({"mean", "median", "most_frequent", "constant"}) ], "fill_value": "no_validation", # any object is valid "imputation_order": [ StrOptions({"ascending", "descending", "roman", "arabic", "random"}) ], "skip_complete": ["boolean"], "min_value": [None, Interval(Real, None, None, closed="both"), "array-like"], "max_value": [None, Interval(Real, None, None, closed="both"), "array-like"], "verbose": ["verbose"], "random_state": ["random_state"], } def __init__( self, estimator=None, *, missing_values=np.nan, sample_posterior=False, max_iter=10, tol=1e-3, n_nearest_features=None, initial_strategy="mean", fill_value=None, imputation_order="ascending", skip_complete=False, min_value=-np.inf, max_value=np.inf, verbose=0, random_state=None, add_indicator=False, keep_empty_features=False, ): super().__init__( missing_values=missing_values, add_indicator=add_indicator, keep_empty_features=keep_empty_features, ) self.estimator = estimator self.sample_posterior = sample_posterior self.max_iter = max_iter self.tol = tol self.n_nearest_features = n_nearest_features self.initial_strategy = initial_strategy self.fill_value = fill_value self.imputation_order = imputation_order self.skip_complete = skip_complete self.min_value = min_value self.max_value = max_value self.verbose = verbose self.random_state = random_state def _impute_one_feature( self, X_filled, mask_missing_values, feat_idx, neighbor_feat_idx, estimator=None, fit_mode=True, params=None, ): """Impute a single feature from the others provided. This function predicts the missing values of one of the features using the current estimates of all the other features. The `estimator` must support `return_std=True` in its `predict` method for this function to work. Parameters ---------- X_filled : ndarray Input data with the most recent imputations. mask_missing_values : ndarray Input data's missing indicator matrix. feat_idx : int Index of the feature currently being imputed. neighbor_feat_idx : ndarray Indices of the features to be used in imputing `feat_idx`. estimator : object The estimator to use at this step of the round-robin imputation. If `sample_posterior=True`, the estimator must support `return_std` in its `predict` method. If None, it will be cloned from self._estimator. fit_mode : boolean, default=True Whether to fit and predict with the estimator or just predict. params : dict Additional params routed to the individual estimator. Returns ------- X_filled : ndarray Input data with `X_filled[missing_row_mask, feat_idx]` updated. estimator : estimator with sklearn API The fitted estimator used to impute `X_filled[missing_row_mask, feat_idx]`. """ if estimator is None and fit_mode is False: raise ValueError( "If fit_mode is False, then an already-fitted " "estimator should be passed in." ) if estimator is None: estimator = clone(self._estimator) missing_row_mask = mask_missing_values[:, feat_idx] if fit_mode: X_train = _safe_indexing( _safe_indexing(X_filled, neighbor_feat_idx, axis=1), ~missing_row_mask, axis=0, ) y_train = _safe_indexing( _safe_indexing(X_filled, feat_idx, axis=1), ~missing_row_mask, axis=0, ) estimator.fit(X_train, y_train, **params) # if no missing values, don't predict if np.sum(missing_row_mask) == 0: return X_filled, estimator # get posterior samples if there is at least one missing value X_test = _safe_indexing( _safe_indexing(X_filled, neighbor_feat_idx, axis=1), missing_row_mask, axis=0, ) if self.sample_posterior: mus, sigmas = estimator.predict(X_test, return_std=True) imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype) # two types of problems: (1) non-positive sigmas # (2) mus outside legal range of min_value and max_value # (results in inf sample) positive_sigmas = sigmas > 0 imputed_values[~positive_sigmas] = mus[~positive_sigmas] mus_too_low = mus < self._min_value[feat_idx] imputed_values[mus_too_low] = self._min_value[feat_idx] mus_too_high = mus > self._max_value[feat_idx] imputed_values[mus_too_high] = self._max_value[feat_idx] # the rest can be sampled without statistical issues inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high mus = mus[inrange_mask] sigmas = sigmas[inrange_mask] a = (self._min_value[feat_idx] - mus) / sigmas b = (self._max_value[feat_idx] - mus) / sigmas truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas) imputed_values[inrange_mask] = truncated_normal.rvs( random_state=self.random_state_ ) else: imputed_values = estimator.predict(X_test) imputed_values = np.clip( imputed_values, self._min_value[feat_idx], self._max_value[feat_idx] ) # update the feature _safe_assign( X_filled, imputed_values, row_indexer=missing_row_mask, column_indexer=feat_idx, ) return X_filled, estimator def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat): """Get a list of other features to predict `feat_idx`. If `self.n_nearest_features` is less than or equal to the total number of features, then use a probability proportional to the absolute correlation between `feat_idx` and each other feature to randomly choose a subsample of the other features (without replacement). Parameters ---------- n_features : int Number of features in `X`. feat_idx : int Index of the feature currently being imputed. abs_corr_mat : ndarray, shape (n_features, n_features) Absolute correlation matrix of `X`. The diagonal has been zeroed out and each feature has been normalized to sum to 1. Can be None. Returns ------- neighbor_feat_idx : array-like The features to use to impute `feat_idx`. """ if self.n_nearest_features is not None and self.n_nearest_features < n_features: p = abs_corr_mat[:, feat_idx] neighbor_feat_idx = self.random_state_.choice( np.arange(n_features), self.n_nearest_features, replace=False, p=p ) else: inds_left = np.arange(feat_idx) inds_right = np.arange(feat_idx + 1, n_features) neighbor_feat_idx = np.concatenate((inds_left, inds_right)) return neighbor_feat_idx def _get_ordered_idx(self, mask_missing_values): """Decide in what order we will update the features. As a homage to the MICE R package, we will have 4 main options of how to order the updates, and use a random order if anything else is specified. Also, this function skips features which have no missing values. Parameters ---------- mask_missing_values : array-like, shape (n_samples, n_features) Input data's missing indicator matrix, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- ordered_idx : ndarray, shape (n_features,) The order in which to impute the features. """ frac_of_missing_values = mask_missing_values.mean(axis=0) if self.skip_complete: missing_values_idx = np.flatnonzero(frac_of_missing_values) else: missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0]) if self.imputation_order == "roman": ordered_idx = missing_values_idx elif self.imputation_order == "arabic": ordered_idx = missing_values_idx[::-1] elif self.imputation_order == "ascending": n = len(frac_of_missing_values) - len(missing_values_idx) ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:] elif self.imputation_order == "descending": n = len(frac_of_missing_values) - len(missing_values_idx) ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1] elif self.imputation_order == "random": ordered_idx = missing_values_idx self.random_state_.shuffle(ordered_idx) return ordered_idx def _get_abs_corr_mat(self, X_filled, tolerance=1e-6): """Get absolute correlation matrix between features. Parameters ---------- X_filled : ndarray, shape (n_samples, n_features) Input data with the most recent imputations. tolerance : float, default=1e-6 `abs_corr_mat` can have nans, which will be replaced with `tolerance`. Returns ------- abs_corr_mat : ndarray, shape (n_features, n_features) Absolute correlation matrix of `X` at the beginning of the current round. The diagonal has been zeroed out and each feature's absolute correlations with all others have been normalized to sum to 1. """ n_features = X_filled.shape[1] if self.n_nearest_features is None or self.n_nearest_features >= n_features: return None with np.errstate(invalid="ignore"): # if a feature in the neighborhood has only a single value # (e.g., categorical feature), the std. dev. will be null and # np.corrcoef will raise a warning due to a division by zero abs_corr_mat = np.abs(np.corrcoef(X_filled.T)) # np.corrcoef is not defined for features with zero std abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance # ensures exploration, i.e. at least some probability of sampling np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat) # features are not their own neighbors np.fill_diagonal(abs_corr_mat, 0) # needs to sum to 1 for np.random.choice sampling abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False) return abs_corr_mat def _initial_imputation(self, X, in_fit=False): """Perform initial imputation for input `X`. Parameters ---------- X : ndarray of shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. in_fit : bool, default=False Whether function is called in :meth:`fit`. Returns ------- Xt : ndarray of shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. X_filled : ndarray of shape (n_samples, n_features) Input data with the most recent imputations. mask_missing_values : ndarray of shape (n_samples, n_features) Input data's missing indicator matrix, where `n_samples` is the number of samples and `n_features` is the number of features, masked by non-missing features. X_missing_mask : ndarray, shape (n_samples, n_features) Input data's mask matrix indicating missing datapoints, where `n_samples` is the number of samples and `n_features` is the number of features. """ if is_scalar_nan(self.missing_values): ensure_all_finite = "allow-nan" else: ensure_all_finite = True X = validate_data( self, X, dtype=FLOAT_DTYPES, order="F", reset=in_fit, ensure_all_finite=ensure_all_finite, ) _check_inputs_dtype(X, self.missing_values) X_missing_mask = _get_mask(X, self.missing_values) mask_missing_values = X_missing_mask.copy() if self.initial_imputer_ is None: self.initial_imputer_ = SimpleImputer( missing_values=self.missing_values, strategy=self.initial_strategy, fill_value=self.fill_value, keep_empty_features=self.keep_empty_features, ).set_output(transform="default") X_filled = self.initial_imputer_.fit_transform(X) else: X_filled = self.initial_imputer_.transform(X) if in_fit: self._is_empty_feature = np.all(mask_missing_values, axis=0) if not self.keep_empty_features: # drop empty features Xt = X[:, ~self._is_empty_feature] mask_missing_values = mask_missing_values[:, ~self._is_empty_feature] else: # mark empty features as not missing and keep the original # imputation mask_missing_values[:, self._is_empty_feature] = False Xt = X Xt[:, self._is_empty_feature] = X_filled[:, self._is_empty_feature] return Xt, X_filled, mask_missing_values, X_missing_mask @staticmethod def _validate_limit( limit, limit_type, n_features, is_empty_feature, keep_empty_feature ): """Validate the limits (min/max) of the feature values. Converts scalar min/max limits to vectors of shape `(n_features,)`. Parameters ---------- limit: scalar or array-like The user-specified limit (i.e, min_value or max_value). limit_type: {'max', 'min'} Type of limit to validate. n_features: int Number of features in the dataset. is_empty_feature: ndarray, shape (n_features, ) Mask array indicating empty feature imputer has seen during fit. keep_empty_feature: bool If False, remove empty-feature indices from the limit. Returns ------- limit: ndarray, shape(n_features,) Array of limits, one for each feature. """ n_features_in = _num_samples(is_empty_feature) if ( limit is not None and not np.isscalar(limit) and _num_samples(limit) != n_features_in ): raise ValueError( f"'{limit_type}_value' should be of shape ({n_features_in},) when an" f" array-like is provided. Got {len(limit)}, instead." ) limit_bound = np.inf if limit_type == "max" else -np.inf limit = limit_bound if limit is None else limit if np.isscalar(limit): limit = np.full(n_features, limit) limit = check_array(limit, ensure_all_finite=False, copy=False, ensure_2d=False) # Make sure to remove the empty feature elements from the bounds if not keep_empty_feature and len(limit) == len(is_empty_feature): limit = limit[~is_empty_feature] return limit @_fit_context( # IterativeImputer.estimator is not validated yet prefer_skip_nested_validation=False ) def fit_transform(self, X, y=None, **params): """Fit the imputer on `X` and return the transformed `X`. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. **params : dict Parameters routed to the `fit` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- Xt : array-like, shape (n_samples, n_features) The imputed input data. """ _raise_for_params(params, self, "fit") routed_params = process_routing( self, "fit", **params, ) self.random_state_ = getattr( self, "random_state_", check_random_state(self.random_state) ) if self.estimator is None: from sklearn.linear_model import BayesianRidge self._estimator = BayesianRidge() else: self._estimator = clone(self.estimator) self.imputation_sequence_ = [] self.initial_imputer_ = None X, Xt, mask_missing_values, complete_mask = self._initial_imputation( X, in_fit=True ) super()._fit_indicator(complete_mask) X_indicator = super()._transform_indicator(complete_mask) if self.max_iter == 0 or np.all(mask_missing_values): self.n_iter_ = 0 return super()._concatenate_indicator(Xt, X_indicator) # Edge case: a single feature, we return the initial imputation. if Xt.shape[1] == 1: self.n_iter_ = 0 return super()._concatenate_indicator(Xt, X_indicator) self._min_value = self._validate_limit( self.min_value, "min", X.shape[1], self._is_empty_feature, self.keep_empty_features, ) self._max_value = self._validate_limit( self.max_value, "max", X.shape[1], self._is_empty_feature, self.keep_empty_features, ) if not np.all(np.greater(self._max_value, self._min_value)): raise ValueError("One (or more) features have min_value >= max_value.") # order in which to impute # note this is probably too slow for large feature data (d > 100000) # and a better way would be good. # see: https://goo.gl/KyCNwj and subsequent comments ordered_idx = self._get_ordered_idx(mask_missing_values) self.n_features_with_missing_ = len(ordered_idx) abs_corr_mat = self._get_abs_corr_mat(Xt) n_samples, n_features = Xt.shape if self.verbose > 0: print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,)) start_t = time() if not self.sample_posterior: Xt_previous = Xt.copy() normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values])) for self.n_iter_ in range(1, self.max_iter + 1): if self.imputation_order == "random": ordered_idx = self._get_ordered_idx(mask_missing_values) for feat_idx in ordered_idx: neighbor_feat_idx = self._get_neighbor_feat_idx( n_features, feat_idx, abs_corr_mat ) Xt, estimator = self._impute_one_feature( Xt, mask_missing_values, feat_idx, neighbor_feat_idx, estimator=None, fit_mode=True, params=routed_params.estimator.fit, ) estimator_triplet = _ImputerTriplet( feat_idx, neighbor_feat_idx, estimator ) self.imputation_sequence_.append(estimator_triplet) if self.verbose > 1: print( "[IterativeImputer] Ending imputation round " "%d/%d, elapsed time %0.2f" % (self.n_iter_, self.max_iter, time() - start_t) ) if not self.sample_posterior: inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None) if self.verbose > 0: print( "[IterativeImputer] Change: {}, scaled tolerance: {} ".format( inf_norm, normalized_tol ) ) if inf_norm < normalized_tol: if self.verbose > 0: print("[IterativeImputer] Early stopping criterion reached.") break Xt_previous = Xt.copy() else: if not self.sample_posterior: warnings.warn( "[IterativeImputer] Early stopping criterion not reached.", ConvergenceWarning, ) _assign_where(Xt, X, cond=~mask_missing_values) return super()._concatenate_indicator(Xt, X_indicator) def transform(self, X): """Impute all missing values in `X`. Note that this is stochastic, and that if `random_state` is not fixed, repeated calls, or permuted input, results will differ. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data to complete. Returns ------- Xt : array-like, shape (n_samples, n_features) The imputed input data. """ check_is_fitted(self) X, Xt, mask_missing_values, complete_mask = self._initial_imputation( X, in_fit=False ) X_indicator = super()._transform_indicator(complete_mask) if self.n_iter_ == 0 or np.all(mask_missing_values): return super()._concatenate_indicator(Xt, X_indicator) imputations_per_round = len(self.imputation_sequence_) // self.n_iter_ i_rnd = 0 if self.verbose > 0: print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,)) start_t = time() for it, estimator_triplet in enumerate(self.imputation_sequence_): Xt, _ = self._impute_one_feature( Xt, mask_missing_values, estimator_triplet.feat_idx, estimator_triplet.neighbor_feat_idx, estimator=estimator_triplet.estimator, fit_mode=False, ) if not (it + 1) % imputations_per_round: if self.verbose > 1: print( "[IterativeImputer] Ending imputation round " "%d/%d, elapsed time %0.2f" % (i_rnd + 1, self.n_iter_, time() - start_t) ) i_rnd += 1 _assign_where(Xt, X, cond=~mask_missing_values) return super()._concatenate_indicator(Xt, X_indicator) def fit(self, X, y=None, **fit_params): """Fit the imputer on `X` and return self. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. **fit_params : dict Parameters routed to the `fit` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """ self.fit_transform(X, **fit_params) return self def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self, "n_features_in_") input_features = _check_feature_names_in(self, input_features) names = self.initial_imputer_.get_feature_names_out(input_features) return self._concatenate_indicator_feature_names_out(names, input_features) def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. .. versionadded:: 1.5 Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = MetadataRouter(owner=self).add( estimator=self.estimator, method_mapping=MethodMapping().add(callee="fit", caller="fit"), ) return router
IterativeImputer
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_partition_sets.py
{ "start": 5049, "end": 8123 }
class ____(NonLaunchableGraphQLContextTestMatrix): def test_get_partition_sets_for_pipeline(self, graphql_context, snapshot): selector = infer_repository_selector(graphql_context) result = execute_dagster_graphql( graphql_context, GET_PARTITION_SETS_FOR_PIPELINE_QUERY, variables={"repositorySelector": selector, "pipelineName": "integers"}, ) assert result.data snapshot.assert_match(result.data) invalid_job_result = execute_dagster_graphql( graphql_context, GET_PARTITION_SETS_FOR_PIPELINE_QUERY, variables={"repositorySelector": selector, "pipelineName": "invalid_job"}, ) assert invalid_job_result.data snapshot.assert_match(invalid_job_result.data) def test_get_partition_set(self, graphql_context, snapshot): selector = infer_repository_selector(graphql_context) result = execute_dagster_graphql( graphql_context, GET_PARTITION_SET_QUERY, variables={ "partitionSetName": "integers_partition_set", "repositorySelector": selector, }, ) assert result.data snapshot.assert_match(result.data) invalid_partition_set_result = execute_dagster_graphql( graphql_context, GET_PARTITION_SET_QUERY, variables={"partitionSetName": "invalid_partition", "repositorySelector": selector}, ) assert ( invalid_partition_set_result.data["partitionSetOrError"]["__typename"] == "PartitionSetNotFoundError" ) assert invalid_partition_set_result.data snapshot.assert_match(invalid_partition_set_result.data) result = execute_dagster_graphql( graphql_context, GET_PARTITION_SET_QUERY, variables={ "partitionSetName": "dynamic_partitioned_assets_job_partition_set", "repositorySelector": selector, }, ) assert result.data snapshot.assert_match(result.data) def test_get_partition_tags(self, graphql_context): selector = infer_repository_selector(graphql_context) result = execute_dagster_graphql( graphql_context, GET_PARTITION_SET_TAGS_QUERY, variables={ "partitionSetName": "integers_partition_set", "repositorySelector": selector, }, ) assert not result.errors assert result.data partitions = result.data["partitionSetOrError"]["partitionsOrError"]["results"] assert len(partitions) == 1 sorted_items = sorted(partitions[0]["tagsOrError"]["results"], key=lambda item: item["key"]) tags = OrderedDict({item["key"]: item["value"] for item in sorted_items}) assert tags == { "foo": "0", "dagster/partition": "0", "dagster/partition_set": "integers_partition_set", }
TestPartitionSets
python
altair-viz__altair
altair/vegalite/v6/schema/_config.py
{ "start": 249281, "end": 249551 }
class ____(TypedDict, total=False): """ :class:`altair.Resolve` ``TypedDict`` wrapper. Parameters ---------- axis legend scale """ axis: AxisResolveMapKwds legend: LegendResolveMapKwds scale: ScaleResolveMapKwds
ResolveKwds
python
fastai__fastai
fastai/callback/mixup.py
{ "start": 2039, "end": 3082 }
class ____(MixHandler): "Implementation of https://arxiv.org/abs/1710.09412" def __init__(self, alpha:float=.4 # Determine `Beta` distribution in range (0.,inf] ): super().__init__(alpha) def before_batch(self): "Blend xb and yb with another random item in a second batch (xb1,yb1) with `lam` weights" lam = self.distrib.sample((self.y.size(0),)).squeeze().to(self.x.device) lam = torch.stack([lam, 1-lam], 1) self.lam = lam.max(1)[0] shuffle = torch.randperm(self.y.size(0)).to(self.x.device) xb1,self.yb1 = tuple(L(self.xb).itemgot(shuffle)),tuple(L(self.yb).itemgot(shuffle)) nx_dims = len(self.x.size()) self.learn.xb = tuple(L(xb1,self.xb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=nx_dims-1))) if not self.stack_y: ny_dims = len(self.y.size()) self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1))) # %% ../../nbs/19_callback.mixup.ipynb 21
MixUp
python
getsentry__sentry-python
sentry_sdk/integrations/aiohttp.py
{ "start": 1902, "end": 13007 }
class ____(Integration): identifier = "aiohttp" origin = f"auto.http.{identifier}" def __init__( self, transaction_style="handler_name", # type: str *, failed_request_status_codes=_DEFAULT_FAILED_REQUEST_STATUS_CODES, # type: Set[int] ): # type: (...) -> None if transaction_style not in TRANSACTION_STYLE_VALUES: raise ValueError( "Invalid value for transaction_style: %s (must be in %s)" % (transaction_style, TRANSACTION_STYLE_VALUES) ) self.transaction_style = transaction_style self._failed_request_status_codes = failed_request_status_codes @staticmethod def setup_once(): # type: () -> None version = parse_version(AIOHTTP_VERSION) _check_minimum_version(AioHttpIntegration, version) if not HAS_REAL_CONTEXTVARS: # We better have contextvars or we're going to leak state between # requests. raise DidNotEnable( "The aiohttp integration for Sentry requires Python 3.7+ " " or aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE ) ignore_logger("aiohttp.server") old_handle = Application._handle async def sentry_app_handle(self, request, *args, **kwargs): # type: (Any, Request, *Any, **Any) -> Any integration = sentry_sdk.get_client().get_integration(AioHttpIntegration) if integration is None: return await old_handle(self, request, *args, **kwargs) weak_request = weakref.ref(request) with sentry_sdk.isolation_scope() as scope: with track_session(scope, session_mode="request"): # Scope data will not leak between requests because aiohttp # create a task to wrap each request. scope.generate_propagation_context() scope.clear_breadcrumbs() scope.add_event_processor(_make_request_processor(weak_request)) headers = dict(request.headers) transaction = continue_trace( headers, op=OP.HTTP_SERVER, # If this transaction name makes it to the UI, AIOHTTP's # URL resolver did not find a route or died trying. name="generic AIOHTTP request", source=TransactionSource.ROUTE, origin=AioHttpIntegration.origin, ) with sentry_sdk.start_transaction( transaction, custom_sampling_context={"aiohttp_request": request}, ): try: response = await old_handle(self, request) except HTTPException as e: transaction.set_http_status(e.status_code) if ( e.status_code in integration._failed_request_status_codes ): _capture_exception() raise except (asyncio.CancelledError, ConnectionResetError): transaction.set_status(SPANSTATUS.CANCELLED) raise except Exception: # This will probably map to a 500 but seems like we # have no way to tell. Do not set span status. reraise(*_capture_exception()) try: # A valid response handler will return a valid response with a status. But, if the handler # returns an invalid response (e.g. None), the line below will raise an AttributeError. # Even though this is likely invalid, we need to handle this case to ensure we don't break # the application. response_status = response.status except AttributeError: pass else: transaction.set_http_status(response_status) return response Application._handle = sentry_app_handle old_urldispatcher_resolve = UrlDispatcher.resolve @wraps(old_urldispatcher_resolve) async def sentry_urldispatcher_resolve(self, request): # type: (UrlDispatcher, Request) -> UrlMappingMatchInfo rv = await old_urldispatcher_resolve(self, request) integration = sentry_sdk.get_client().get_integration(AioHttpIntegration) if integration is None: return rv name = None try: if integration.transaction_style == "handler_name": name = transaction_from_function(rv.handler) elif integration.transaction_style == "method_and_path_pattern": route_info = rv.get_info() pattern = route_info.get("path") or route_info.get("formatter") name = "{} {}".format(request.method, pattern) except Exception: pass if name is not None: sentry_sdk.get_current_scope().set_transaction_name( name, source=SOURCE_FOR_STYLE[integration.transaction_style], ) return rv UrlDispatcher.resolve = sentry_urldispatcher_resolve old_client_session_init = ClientSession.__init__ @ensure_integration_enabled(AioHttpIntegration, old_client_session_init) def init(*args, **kwargs): # type: (Any, Any) -> None client_trace_configs = list(kwargs.get("trace_configs") or ()) trace_config = create_trace_config() client_trace_configs.append(trace_config) kwargs["trace_configs"] = client_trace_configs return old_client_session_init(*args, **kwargs) ClientSession.__init__ = init def create_trace_config(): # type: () -> TraceConfig async def on_request_start(session, trace_config_ctx, params): # type: (ClientSession, SimpleNamespace, TraceRequestStartParams) -> None if sentry_sdk.get_client().get_integration(AioHttpIntegration) is None: return method = params.method.upper() parsed_url = None with capture_internal_exceptions(): parsed_url = parse_url(str(params.url), sanitize=False) span = sentry_sdk.start_span( op=OP.HTTP_CLIENT, name="%s %s" % (method, parsed_url.url if parsed_url else SENSITIVE_DATA_SUBSTITUTE), origin=AioHttpIntegration.origin, ) span.set_data(SPANDATA.HTTP_METHOD, method) if parsed_url is not None: span.set_data("url", parsed_url.url) span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query) span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment) client = sentry_sdk.get_client() if should_propagate_trace(client, str(params.url)): for ( key, value, ) in sentry_sdk.get_current_scope().iter_trace_propagation_headers( span=span ): logger.debug( "[Tracing] Adding `{key}` header {value} to outgoing request to {url}.".format( key=key, value=value, url=params.url ) ) if key == BAGGAGE_HEADER_NAME and params.headers.get( BAGGAGE_HEADER_NAME ): # do not overwrite any existing baggage, just append to it params.headers[key] += "," + value else: params.headers[key] = value trace_config_ctx.span = span async def on_request_end(session, trace_config_ctx, params): # type: (ClientSession, SimpleNamespace, TraceRequestEndParams) -> None if trace_config_ctx.span is None: return span = trace_config_ctx.span span.set_http_status(int(params.response.status)) span.set_data("reason", params.response.reason) span.finish() with capture_internal_exceptions(): add_http_request_source(span) trace_config = TraceConfig() trace_config.on_request_start.append(on_request_start) trace_config.on_request_end.append(on_request_end) return trace_config def _make_request_processor(weak_request): # type: (weakref.ReferenceType[Request]) -> EventProcessor def aiohttp_processor( event, # type: Event hint, # type: dict[str, Tuple[type, BaseException, Any]] ): # type: (...) -> Event request = weak_request() if request is None: return event with capture_internal_exceptions(): request_info = event.setdefault("request", {}) request_info["url"] = "%s://%s%s" % ( request.scheme, request.host, request.path, ) request_info["query_string"] = request.query_string request_info["method"] = request.method request_info["env"] = {"REMOTE_ADDR": request.remote} request_info["headers"] = _filter_headers(dict(request.headers)) # Just attach raw data here if it is within bounds, if available. # Unfortunately there's no way to get structured data from aiohttp # without awaiting on some coroutine. request_info["data"] = get_aiohttp_request_data(request) return event return aiohttp_processor def _capture_exception(): # type: () -> ExcInfo exc_info = sys.exc_info() event, hint = event_from_exception( exc_info, client_options=sentry_sdk.get_client().options, mechanism={"type": "aiohttp", "handled": False}, ) sentry_sdk.capture_event(event, hint=hint) return exc_info BODY_NOT_READ_MESSAGE = "[Can't show request body due to implementation details.]" def get_aiohttp_request_data(request): # type: (Request) -> Union[Optional[str], AnnotatedValue] bytes_body = request._read_bytes if bytes_body is not None: # we have body to show if not request_body_within_bounds(sentry_sdk.get_client(), len(bytes_body)): return AnnotatedValue.removed_because_over_size_limit() encoding = request.charset or "utf-8" return bytes_body.decode(encoding, "replace") if request.can_read_body: # body exists but we can't show it return BODY_NOT_READ_MESSAGE # request has no body return None
AioHttpIntegration
python
pyinstaller__pyinstaller
bootloader/waflib/Logs.py
{ "start": 1840, "end": 2048 }
class ____(object): def __getattr__(self, a): return get_color(a) def __call__(self, a): return get_color(a) colors = color_dict() re_log = re.compile(r'(\w+): (.*)', re.M)
color_dict
python
encode__starlette
starlette/testclient.py
{ "start": 6755, "end": 13469 }
class ____(httpx.BaseTransport): def __init__( self, app: ASGI3App, portal_factory: _PortalFactoryType, raise_server_exceptions: bool = True, root_path: str = "", *, client: tuple[str, int], app_state: dict[str, Any], ) -> None: self.app = app self.raise_server_exceptions = raise_server_exceptions self.root_path = root_path self.portal_factory = portal_factory self.app_state = app_state self.client = client def handle_request(self, request: httpx.Request) -> httpx.Response: scheme = request.url.scheme netloc = request.url.netloc.decode(encoding="ascii") path = request.url.path raw_path = request.url.raw_path query = request.url.query.decode(encoding="ascii") default_port = {"http": 80, "ws": 80, "https": 443, "wss": 443}[scheme] if ":" in netloc: host, port_string = netloc.split(":", 1) port = int(port_string) else: host = netloc port = default_port # Include the 'host' header. if "host" in request.headers: headers: list[tuple[bytes, bytes]] = [] elif port == default_port: # pragma: no cover headers = [(b"host", host.encode())] else: # pragma: no cover headers = [(b"host", (f"{host}:{port}").encode())] # Include other request headers. headers += [(key.lower().encode(), value.encode()) for key, value in request.headers.multi_items()] scope: dict[str, Any] if scheme in {"ws", "wss"}: subprotocol = request.headers.get("sec-websocket-protocol", None) if subprotocol is None: subprotocols: Sequence[str] = [] else: subprotocols = [value.strip() for value in subprotocol.split(",")] scope = { "type": "websocket", "path": unquote(path), "raw_path": raw_path.split(b"?", 1)[0], "root_path": self.root_path, "scheme": scheme, "query_string": query.encode(), "headers": headers, "client": self.client, "server": [host, port], "subprotocols": subprotocols, "state": self.app_state.copy(), "extensions": {"websocket.http.response": {}}, } session = WebSocketTestSession(self.app, scope, self.portal_factory) raise _Upgrade(session) scope = { "type": "http", "http_version": "1.1", "method": request.method, "path": unquote(path), "raw_path": raw_path.split(b"?", 1)[0], "root_path": self.root_path, "scheme": scheme, "query_string": query.encode(), "headers": headers, "client": self.client, "server": [host, port], "extensions": {"http.response.debug": {}}, "state": self.app_state.copy(), } request_complete = False response_started = False response_complete: anyio.Event raw_kwargs: dict[str, Any] = {"stream": io.BytesIO()} template = None context = None async def receive() -> Message: nonlocal request_complete if request_complete: if not response_complete.is_set(): await response_complete.wait() return {"type": "http.disconnect"} body = request.read() if isinstance(body, str): body_bytes: bytes = body.encode("utf-8") # pragma: no cover elif body is None: body_bytes = b"" # pragma: no cover elif isinstance(body, GeneratorType): try: # pragma: no cover chunk = body.send(None) if isinstance(chunk, str): chunk = chunk.encode("utf-8") return {"type": "http.request", "body": chunk, "more_body": True} except StopIteration: # pragma: no cover request_complete = True return {"type": "http.request", "body": b""} else: body_bytes = body request_complete = True return {"type": "http.request", "body": body_bytes} async def send(message: Message) -> None: nonlocal raw_kwargs, response_started, template, context if message["type"] == "http.response.start": assert not response_started, 'Received multiple "http.response.start" messages.' raw_kwargs["status_code"] = message["status"] raw_kwargs["headers"] = [(key.decode(), value.decode()) for key, value in message.get("headers", [])] response_started = True elif message["type"] == "http.response.body": assert response_started, 'Received "http.response.body" without "http.response.start".' assert not response_complete.is_set(), 'Received "http.response.body" after response completed.' body = message.get("body", b"") more_body = message.get("more_body", False) if request.method != "HEAD": raw_kwargs["stream"].write(body) if not more_body: raw_kwargs["stream"].seek(0) response_complete.set() elif message["type"] == "http.response.debug": template = message["info"]["template"] context = message["info"]["context"] try: with self.portal_factory() as portal: response_complete = portal.call(anyio.Event) portal.call(self.app, scope, receive, send) except BaseException as exc: if self.raise_server_exceptions: raise exc if self.raise_server_exceptions: assert response_started, "TestClient did not receive any response." elif not response_started: raw_kwargs = { "status_code": 500, "headers": [], "stream": io.BytesIO(), } raw_kwargs["stream"] = httpx.ByteStream(raw_kwargs["stream"].read()) response = httpx.Response(**raw_kwargs, request=request) if template is not None: response.template = template # type: ignore[attr-defined] response.context = context # type: ignore[attr-defined] return response
_TestClientTransport
python
readthedocs__readthedocs.org
readthedocs/projects/admin.py
{ "start": 2315, "end": 2483 }
class ____(admin.TabularInline): """Redirect inline relationship view for :py:class:`ProjectAdmin`.""" model = Redirect classes = ["collapse"]
RedirectInline
python
walkccc__LeetCode
solutions/306. Additive Number/306.py
{ "start": 0, "end": 852 }
class ____: def isAdditiveNumber(self, num: str) -> bool: n = len(num) def dfs(firstNum: int, secondNum: int, s: int) -> bool: if s == len(num): return True thirdNum = firstNum + secondNum thirdNumStr = str(thirdNum) return (num.find(thirdNumStr, s) == s and dfs(secondNum, thirdNum, s + len(thirdNumStr))) # num[0..i] = firstNum for i in range(n // 2): if i > 0 and num[0] == '0': return False firstNum = int(num[:i + 1]) # num[i + 1..j] = secondNum # |thirdNum| >= max(|firstNum|, |secondNum|) j = i + 1 while max(i, j - i) < n - j: if j > i + 1 and num[i + 1] == '0': break secondNum = int(num[i + 1:j + 1]) if dfs(firstNum, secondNum, j + 1): return True j += 1 return False
Solution
python
tensorflow__tensorflow
tensorflow/python/debug/cli/debugger_cli_common.py
{ "start": 1304, "end": 4618 }
class ____: """Rich single-line text. Attributes: text: A plain string, the raw text represented by this object. Should not contain newlines. font_attr_segs: A list of (start, end, font attribute) triples, representing richness information applied to substrings of text. """ def __init__(self, text="", font_attr=None): """Construct a RichLine with no rich attributes or a single attribute. Args: text: Raw text string font_attr: If specified, a single font attribute to be applied to the entire text. Extending this object via concatenation allows creation of text with varying attributes. """ # TODO(ebreck) Make .text and .font_attr protected members when we no # longer need public access. self.text = text if font_attr: self.font_attr_segs = [(0, len(text), font_attr)] else: self.font_attr_segs = [] def __add__(self, other): """Concatenate two chunks of maybe rich text to make a longer rich line. Does not modify self. Args: other: Another piece of text to concatenate with this one. If it is a plain str, it will be appended to this string with no attributes. If it is a RichLine, it will be appended to this string with its attributes preserved. Returns: A new RichLine comprising both chunks of text, with appropriate attributes applied to the corresponding substrings. """ ret = RichLine() if isinstance(other, str): ret.text = self.text + other ret.font_attr_segs = self.font_attr_segs[:] return ret elif isinstance(other, RichLine): ret.text = self.text + other.text ret.font_attr_segs = self.font_attr_segs[:] old_len = len(self.text) for start, end, font_attr in other.font_attr_segs: ret.font_attr_segs.append((old_len + start, old_len + end, font_attr)) return ret else: raise TypeError("%r cannot be concatenated with a RichLine" % other) def __len__(self): return len(self.text) def rich_text_lines_from_rich_line_list(rich_text_list, annotations=None): """Convert a list of RichLine objects or strings to a RichTextLines object. Args: rich_text_list: a list of RichLine objects or strings annotations: annotations for the resultant RichTextLines object. Returns: A corresponding RichTextLines object. """ lines = [] font_attr_segs = {} for i, rl in enumerate(rich_text_list): if isinstance(rl, RichLine): lines.append(rl.text) if rl.font_attr_segs: font_attr_segs[i] = rl.font_attr_segs else: lines.append(rl) return RichTextLines(lines, font_attr_segs, annotations=annotations) def get_tensorflow_version_lines(include_dependency_versions=False): """Generate RichTextLines with TensorFlow version info. Args: include_dependency_versions: Include the version of TensorFlow's key dependencies, such as numpy. Returns: A formatted, multi-line `RichTextLines` object. """ lines = ["TensorFlow version: %s" % pywrap_tf_session.__version__] lines.append("") if include_dependency_versions: lines.append("Dependency version(s):") lines.append(" numpy: %s" % np.__version__) lines.append("") return RichTextLines(lines)
RichLine
python
bokeh__bokeh
src/bokeh/core/types.py
{ "start": 2151, "end": 2339 }
class ____(TypedDict): type: Literal["poly"] sx: Sequence[float] sy: Sequence[float] Geometry: TypeAlias = PointGeometry | SpanGeometry | RectGeometry | PolyGeometry
PolyGeometry
python
skorch-dev__skorch
skorch/tests/llm/test_llm_classifier.py
{ "start": 145, "end": 18103 }
class ____: @pytest.fixture(scope='class') def model(self): from transformers import AutoModelForSeq2SeqLM return AutoModelForSeq2SeqLM.from_pretrained('google/flan-t5-small') @pytest.fixture(scope='class') def tokenizer(self): from transformers import AutoTokenizer return AutoTokenizer.from_pretrained('google/flan-t5-small') @pytest.fixture(scope='class') def classifier_cls(self): from skorch.llm import ZeroShotClassifier return ZeroShotClassifier @pytest.fixture def X(self): return [ "A masterpiece, instant classic, 5 stars out of 5", "I was bored. Would not recommend.", "My friends and I really enjoyed this one. Best time of my life", ] def test_classes(self, model, tokenizer, classifier_cls): clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=False) clf.fit(None, ['positive', 'negative', 'very positive', 'foobar']) # classes_ are sorted expected = np.array(['foobar', 'negative', 'positive', 'very positive']) np.testing.assert_equal(clf.classes_, expected) def test_init_encoder_decoder_with_caching_raises( self, classifier_cls, model, tokenizer ): msg = ( "Caching is not supported for encoder-decoder models, " "initialize the model with use_caching=False." ) clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=True) with pytest.raises(ValueError, match=msg): clf.fit(None, ['positive', 'negative']) def test_init_encoder_decoder_with_caching_using_model_name_raises( self, classifier_cls, model, tokenizer ): msg = ( "Caching is not supported for encoder-decoder models, " "initialize the model with use_caching=False." ) clf = classifier_cls('google/flan-t5-small', use_caching=True) with pytest.raises(ValueError, match=msg): clf.fit(None, ['positive', 'negative']) def test_init_without_model_raises(self, classifier_cls, tokenizer): msg = ( "ZeroShotClassifier needs to be initialized with either a model name, " "or a model & tokenizer, but not both." ) clf = classifier_cls(tokenizer=tokenizer) with pytest.raises(ValueError, match=msg): clf.fit(None, ['positive', 'negative']) def test_init_without_tokenizer_raises(self, classifier_cls, model): msg = ( "ZeroShotClassifier needs to be initialized with either a model name, " "or a model & tokenizer, but not both." ) clf = classifier_cls(model=model) with pytest.raises(ValueError, match=msg): clf.fit(None, ['positive', 'negative']) def test_init_with_model_and_model_name_raises(self, classifier_cls, model): msg = ( "ZeroShotClassifier needs to be initialized with either a model name, " "or a model & tokenizer, but not both." ) clf = classifier_cls('google/flan-t5-small', model=model) with pytest.raises(ValueError, match=msg): clf.fit(None, ['positive', 'negative']) def test_init_with_tokenizer_and_model_name_raises(self, classifier_cls, tokenizer): msg = ( "ZeroShotClassifier needs to be initialized with either a model name, " "or a model & tokenizer, but not both." ) clf = classifier_cls('google/flan-t5-small', tokenizer=tokenizer) with pytest.raises(ValueError, match=msg): clf.fit(None, ['positive', 'negative']) def test_init_wrong_error_low_prob_raises(self, classifier_cls, model, tokenizer): clf = classifier_cls(model=model, tokenizer=tokenizer, error_low_prob='foo') msg = ( "error_low_prob must be one of ignore, raise, warn, return_none; " "got foo instead" ) with pytest.raises(ValueError, match=msg): clf.fit(None, ['positive', 'negative']) def test_init_wrong_threshold_low_prob_raises( self, classifier_cls, model, tokenizer ): clf = classifier_cls(model=model, tokenizer=tokenizer, threshold_low_prob=-0.1) msg = "threshold_low_prob must be between 0 and 1, got -0.1 instead" with pytest.raises(ValueError, match=msg): clf.fit(None, ['positive', 'negative']) clf = classifier_cls(model=model, tokenizer=tokenizer, threshold_low_prob=99) msg = "threshold_low_prob must be between 0 and 1, got 99 instead" with pytest.raises(ValueError, match=msg): clf.fit(None, ['positive', 'negative']) def test_init_no_fitting_architecture_raises(self, classifier_cls): # resnet-18 exists but cannot be used for language generation clf = classifier_cls('microsoft/resnet-18') msg = ( "Could not identify architecture for model 'microsoft/resnet-18', " "try loading model and tokenizer directly using the corresponding 'Auto' " "classes from transformers and pass them to the classifier" ) with pytest.raises(ValueError, match=msg): clf.fit(None, ['positive', 'negative']) def test_no_fit_predict_raises(self, classifier_cls, model, tokenizer, X): # When calling predict/predict_proba before fitting, an error is raised clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=False) # don't check the exact message, as it might change in the future with pytest.raises(NotFittedError): clf.predict(X) with pytest.raises(NotFittedError): clf.predict_proba(X) def test_fit_y_none_raises(self, classifier_cls, model, tokenizer): # X can be None but y should not be None clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=False) msg = "y cannot be None, as it is used to infer the existing classes" with pytest.raises(ValueError, match=msg): clf.fit(None, None) def test_fit_warning_if_y_not_strings( self, classifier_cls, model, tokenizer, recwarn ): # y should be strings but also accepts other types, but will give a # warning clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=False) clf.fit(None, [1, 2, 3]) assert len(recwarn.list) == 1 expected = ( "y should contain the name of the labels as strings, e.g. " "'positive' and 'negative', don't pass label-encoded targets" ) assert str(recwarn.list[0].message) == expected def test_predict(self, model, tokenizer, classifier_cls, X): clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=False) clf.fit(None, ['negative', 'positive']) y_pred = clf.predict(X) np.testing.assert_array_equal( y_pred, np.array(['positive', 'negative', 'positive']), ) def test_predict_proba(self, model, tokenizer, classifier_cls, X): clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=False) clf.fit(None, ['positive', 'negative']) y_proba = clf.predict_proba(X) assert y_proba.shape == (3, 2) assert np.allclose(y_proba.sum(axis=1), 1.0) assert (y_proba >= 0.0).all() assert (y_proba <= 1.0).all() def test_init_from_model_name(self, classifier_cls, X): clf = classifier_cls('google/flan-t5-small', use_caching=False) # check that none of the below raise clf.fit(None, ['positive', 'negative']) clf.predict_proba(X) clf.predict(X) def test_proba_for_unlikely_label_low(self, model, tokenizer, classifier_cls, X): clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=False) clf.fit(None, ['positive', 'negative', 'zip']) y_proba = clf.predict_proba(X) assert y_proba.shape == (3, 3) assert (y_proba[:, -1] < 2e-3).all() def test_predict_proba_labels_differing_num_tokens( self, model, tokenizer, classifier_cls, X ): # positive and negative have 1 token, foobar has 3 tokens clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=False) clf.fit(None, ['foobar', 'positive', 'negative']) y_proba = clf.predict_proba(X) # foobar is column 0 assert (y_proba[:, 0] < 1e-3).all() def test_predict_proba_not_normalized(self, model, tokenizer, classifier_cls, X): clf = classifier_cls( model=model, tokenizer=tokenizer, use_caching=False, probas_sum_to_1=False ) clf.fit(None, ['negative', 'positive']) y_proba = clf.predict_proba(X) assert (y_proba.sum(axis=1) < 1.0).all() def test_same_X_same_probas(self, model, tokenizer, classifier_cls, X): clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=False) clf.fit(None, ['foo', 'bar']) y_proba = clf.predict_proba(X) y_proba2 = clf.predict_proba(X) y_proba3 = clf.predict_proba(X) np.testing.assert_allclose(y_proba, y_proba2) np.testing.assert_allclose(y_proba, y_proba3) def test_same_X_caching(self, model, tokenizer, classifier_cls): # Check that caching is performed correctly. On first call, there is # only an uncached call. On subsequent call with the same argument, # there are 4 cached calls, 1 from the prompt and 3 for the tokens from # the label. #clf = classifier_cls(model=model, tokenizer=tokenizer) clf = classifier_cls('gpt2') clf.fit(None, ['foobar']) X = ["A masterpiece, instant classic, 5 stars out of 5"] y_proba = clf.predict_proba(X) assert clf.cached_model_._uncached_calls == 1 assert clf.cached_model_._total_calls == 1 y_proba2 = clf.predict_proba(X) assert clf.cached_model_._uncached_calls == 1 assert clf.cached_model_._total_calls == 2 np.testing.assert_allclose(y_proba, y_proba2) y_proba3 = clf.predict_proba(X) assert clf.cached_model_._uncached_calls == 1 assert clf.cached_model_._total_calls == 3 np.testing.assert_allclose(y_proba, y_proba3) def test_caching_is_faster(self, classifier_cls): # use a decoder-only model clf = classifier_cls('gpt2') # classes should have a long common prefix for caching to make a # difference X = ["A masterpiece, instant classic, 5 stars out of 5"] y = ['absolutely undoubtedly positive', 'absolutely undoubtedly negative'] clf.fit(X=None, y=y) # measure time for uncached call using timeit uncached_time = timeit.timeit(lambda: clf.predict_proba(X), number=1) cached_time = timeit.timeit(lambda: clf.predict_proba(X), number=1) # at least 1/3 faster assert cached_time < 0.1 * uncached_time def test_caching_works_shared_label_prefix_without_eos(self, classifier_cls): clf = classifier_cls('gpt2') # carefully chosen class labels so that one label has the other label as # its prefix. '11111' = '11' + '111'. For models that tokenize single # digits indepdentenly this is far more relevant. X = np.array(["Hey there", "No thank you"]) y = ['11', '11111'] clf.fit(X, y) y_pred_1 = clf.predict(X) y_pred_2 = clf.predict(X) # does not raise and gives the same results np.testing.assert_array_equal(y_pred_1, y_pred_2) def test_custom_prompt(self, model, tokenizer, classifier_cls, X): prompt = "Please classify my text:\n{text}\n\nLabels: {labels}\n\n" clf = classifier_cls( model=model, tokenizer=tokenizer, use_caching=False, prompt=prompt ) # just checking that this works, we don't necessarily expect the # predictions to be correct clf.fit(None, ['positive', 'negative']) clf.predict_proba(X) clf.predict(X) def test_defective_prompt_missing_key_raises( self, model, tokenizer, classifier_cls, recwarn ): # the prompt has no 'labels' placeholders prompt = "Please classify my text:\n{text}\n\n" clf = classifier_cls( model=model, tokenizer=tokenizer, use_caching=False, prompt=prompt ) msg = ( "The prompt may not be correct, it expects 2 " "placeholders: 'labels', 'text', missing keys: 'labels'" ) clf.fit(None, ['positive', 'negative']) assert str(recwarn.list[0].message) == msg def test_defective_prompt_extra_key_raises( self, model, tokenizer, classifier_cls, recwarn ): # the prompt has excess 'examples' placeholder prompt = "Please classify my text:\n{text}\n\nLabels: {labels}\n\n" prompt += "Examples: {examples}\n\n" clf = classifier_cls( model=model, tokenizer=tokenizer, use_caching=False, prompt=prompt ) msg = ( "The prompt may not be correct, it expects 2 " "placeholders: 'labels', 'text', extra keys: 'examples'" ) clf.fit(None, ['positive', 'negative']) assert str(recwarn.list[0].message) == msg def test_get_prompt(self, classifier_cls, model, tokenizer): prompt = "Foo {labels} bar {text}" clf = classifier_cls( model=model, tokenizer=tokenizer, use_caching=False, prompt=prompt ) clf.fit(None, ['label-a', 'label-b']) x = "My input" expected = "Foo ['label-a', 'label-b'] bar My input" assert clf.get_prompt(x) == expected def test_causal_lm(self, classifier_cls, X): # flan-t5 has an encoder-decoder architecture, here we check that a pure # decoder architecture works as well. We're just interested in it # working, not if the predictions are good. name = 'gpt2' clf = classifier_cls(name, probas_sum_to_1=False, use_caching=True) clf.fit(None, ['negative', 'positive']) clf.predict_proba(X[:3]) clf.predict(X[:3]) def test_no_low_probability_no_warning( self, classifier_cls, model, tokenizer, X, recwarn ): # test to explicitly ensure that there is no false warning, as this # would go undetected otherwise clf = classifier_cls( model=model, tokenizer=tokenizer, use_caching=False, threshold_low_prob=0.000001, error_low_prob='warn', ) clf.fit(None, ['negative', 'positive']) clf.predict_proba(X) assert not recwarn.list def test_low_probability_warning( self, classifier_cls, model, tokenizer, X, recwarn ): # With a threshold of 0.993, empirically, 2 samples will fall below it # and 1 is above it. clf = classifier_cls( model=model, tokenizer=tokenizer, use_caching=False, threshold_low_prob=0.993, error_low_prob='warn', ) clf.fit(None, ['negative', 'positive']) clf.predict_proba(X) msg = "Found 2 samples to have a total probability below the threshold of 0.99" assert len(recwarn.list) == 1 # use startswith because the exact number of decimals is not clear assert str(recwarn.list[0].message).startswith(msg) def test_low_probability_error(self, classifier_cls, model, tokenizer, X): from skorch.llm.classifier import LowProbabilityError # With a threshold of 0.993, empirically, 2 samples will fall below it # and 1 is above it. clf = classifier_cls( model=model, tokenizer=tokenizer, use_caching=False, threshold_low_prob=0.993, error_low_prob='raise', ) clf.fit(None, ['negative', 'positive']) msg = ( r"The sum of all probabilities is \d\.\d+, " "which is below the minimum threshold of 0.99" ) with pytest.raises(LowProbabilityError, match=msg): clf.predict_proba(X) def test_low_probability_return_none(self, classifier_cls, model, tokenizer, X): clf = classifier_cls( model=model, tokenizer=tokenizer, use_caching=False, threshold_low_prob=0.993, error_low_prob='return_none', ) clf.fit(None, ['negative', 'positive']) y_pred = clf.predict(X) # With a threshold of 0.99, empirically, the first 2 samples will fall # below it and the last is above it. expected = [None, 'negative', None] np.testing.assert_array_equal(y_pred, expected) def test_repr(self, classifier_cls, model, tokenizer): expected = ( "ZeroShotClassifier(model='T5ForConditionalGeneration', " "tokenizer='T5TokenizerFast', use_caching=False)" ) clf = classifier_cls(model=model, tokenizer=tokenizer, use_caching=False) assert str(clf) == expected assert repr(clf) == expected clf.fit(None, ['positive', 'negative']) assert str(clf) == expected assert repr(clf) == expected def test_clear_model_cache(self, classifier_cls, X): clf = classifier_cls('gpt2') clf.fit(None, ['very negative', 'very positive']) cache = clf.cached_model_.cache assert not cache # empty at this point clf.predict(X) # 2 entries for each sample, one for the prompt itself, one for the # prompt + "very" assert len(cache) == 2 * len(X) clf.clear_model_cache() assert not cache # empty again
TestZeroShotClassifier
python
ray-project__ray
python/ray/exceptions.py
{ "start": 28953, "end": 29339 }
class ____(RayError): """Raised when the task cannot be scheduled. One example is that the node specified through NodeAffinitySchedulingStrategy is dead. """ def __init__(self, error_message: str): self.error_message = error_message def __str__(self): return f"The task is not schedulable: {self.error_message}" @PublicAPI
TaskUnschedulableError
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 248883, "end": 249498 }
class ____(sgqlc.types.Input): """Autogenerated input type of LockLockable""" __schema__ = github_schema __field_names__ = ("lockable_id", "lock_reason", "client_mutation_id") lockable_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="lockableId") """ID of the item to be locked.""" lock_reason = sgqlc.types.Field(LockReason, graphql_name="lockReason") """A reason for why the item will be locked.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
LockLockableInput
python
modin-project__modin
modin/experimental/xgboost/xgboost.py
{ "start": 1090, "end": 8629 }
class ____: """ DMatrix holds references to partitions of Modin DataFrame. On init stage unwrapping partitions of Modin DataFrame is started. Parameters ---------- data : modin.pandas.DataFrame Data source of DMatrix. label : modin.pandas.DataFrame or modin.pandas.Series, optional Labels used for training. missing : float, optional Value in the input data which needs to be present as a missing value. If ``None``, defaults to ``np.nan``. silent : boolean, optional Whether to print messages during construction or not. feature_names : list, optional Set names for features. feature_types : list, optional Set types for features. feature_weights : array_like, optional Set feature weights for column sampling. enable_categorical : boolean, optional Experimental support of specializing for categorical features. Notes ----- Currently DMatrix doesn't support `weight`, `base_margin`, `nthread`, `group`, `qid`, `label_lower_bound`, `label_upper_bound` parameters. """ def __init__( self, data, label=None, missing=None, silent=False, feature_names=None, feature_types=None, feature_weights=None, enable_categorical=None, ): assert isinstance( data, pd.DataFrame ), f"Type of `data` is {type(data)}, but expected {pd.DataFrame}." if label is not None: assert isinstance( label, (pd.DataFrame, pd.Series) ), f"Type of `data` is {type(label)}, but expected {pd.DataFrame} or {pd.Series}." self.label = unwrap_partitions(label, axis=0) else: self.label = None self.data = unwrap_partitions(data, axis=0, get_ip=True) self._n_rows = data.shape[0] self._n_cols = data.shape[1] for i, dtype in enumerate(data.dtypes): if dtype == "object": raise ValueError(f"Column {i} has unsupported data type {dtype}.") self.feature_names = feature_names self.feature_types = feature_types self.missing = missing self.silent = silent self.feature_weights = feature_weights self.enable_categorical = enable_categorical self.metadata = ( data.index, data.columns, data._query_compiler._modin_frame.row_lengths, ) def __iter__(self): """ Return unwrapped `self.data` and `self.label`. Yields ------ list List of `self.data` with pairs of references to IP of row partition and row partition [(IP_ref0, partition_ref0), ..]. list List of `self.label` with references to row partitions [partition_ref0, ..]. """ yield self.data yield self.label def get_dmatrix_params(self): """ Get dict of DMatrix parameters excluding `self.data`/`self.label`. Returns ------- dict """ dmatrix_params = { "feature_names": self.feature_names, "feature_types": self.feature_types, "missing": self.missing, "silent": self.silent, "feature_weights": self.feature_weights, "enable_categorical": self.enable_categorical, } return dmatrix_params @property def feature_names(self): """ Get column labels. Returns ------- Column labels. """ return self._feature_names @feature_names.setter def feature_names(self, feature_names): """ Set column labels. Parameters ---------- feature_names : list or None Labels for columns. In the case of ``None``, existing feature names will be reset. """ if feature_names is not None: feature_names = ( list(feature_names) if not isinstance(feature_names, str) else [feature_names] ) if len(feature_names) != len(set(feature_names)): raise ValueError("Items in `feature_names` must be unique.") if len(feature_names) != self.num_col() and self.num_col() != 0: raise ValueError( "`feature_names` must have the same width as `self.data`." ) if not all( isinstance(f, str) and not any(x in f for x in set(("[", "]", "<"))) for f in feature_names ): raise ValueError( "Items of `feature_names` must be string and must not contain [, ] or <." ) else: feature_names = None self._feature_names = feature_names @property def feature_types(self): """ Get column types. Returns ------- Column types. """ return self._feature_types @feature_types.setter def feature_types(self, feature_types): """ Set column types. Parameters ---------- feature_types : list or None Labels for columns. In case None, existing feature names will be reset. """ if feature_types is not None: if not isinstance(feature_types, (list, str)): raise TypeError("feature_types must be string or list of strings") if isinstance(feature_types, str): feature_types = [feature_types] * self.num_col() feature_types = ( list(feature_types) if not isinstance(feature_types, str) else [feature_types] ) else: feature_types = None self._feature_types = feature_types def num_row(self): """ Get number of rows. Returns ------- int """ return self._n_rows def num_col(self): """ Get number of columns. Returns ------- int """ return self._n_cols def get_float_info(self, name): """ Get float property from the DMatrix. Parameters ---------- name : str The field name of the information. Returns ------- A NumPy array of float information of the data. """ return getattr(self, name) def set_info( self, *, label=None, feature_names=None, feature_types=None, feature_weights=None, ) -> None: """ Set meta info for DMatrix. Parameters ---------- label : modin.pandas.DataFrame or modin.pandas.Series, optional Labels used for training. feature_names : list, optional Set names for features. feature_types : list, optional Set types for features. feature_weights : array_like, optional Set feature weights for column sampling. """ if label is not None: self.label = label if feature_names is not None: self.feature_names = feature_names if feature_types is not None: self.feature_types = feature_types if feature_weights is not None: self.feature_weights = feature_weights
DMatrix
python
pydata__xarray
xarray/core/missing.py
{ "start": 2588, "end": 3828 }
class ____(BaseInterpolator): """One-dimensional linear interpolation. See Also -------- numpy.interp """ def __init__(self, xi, yi, method="linear", fill_value=None, period=None): if method != "linear": raise ValueError("only method `linear` is valid for the NumpyInterpolator") self.method = method self.f = np.interp self.cons_kwargs = {} self.call_kwargs = {"period": period} self._xi = xi self._yi = yi nan = np.nan if yi.dtype.kind != "c" else np.nan + np.nan * 1j if fill_value is None: self._left = nan self._right = nan elif isinstance(fill_value, Sequence) and len(fill_value) == 2: self._left = fill_value[0] self._right = fill_value[1] elif is_scalar(fill_value): self._left = fill_value self._right = fill_value else: raise ValueError(f"{fill_value} is not a valid fill_value") def __call__(self, x): return self.f( x, self._xi, self._yi, left=self._left, right=self._right, **self.call_kwargs, )
NumpyInterpolator
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_doughnut02.py
{ "start": 315, "end": 1237 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_doughnut02.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "doughnut"}) data = [ [2, 4, 6], [60, 30, 10], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) chart.add_series( { "categories": "=Sheet1!$A$1:$A$3", "values": "=Sheet1!$B$1:$B$3", } ) chart.set_hole_size(10) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
getlogbook__logbook
src/logbook/base.py
{ "start": 38345, "end": 42002 }
class ____: """A LoggerGroup represents a group of loggers. It cannot emit log messages on its own but it can be used to set the disabled flag and log level of all loggers in the group. Furthermore the :meth:`process_record` method of the group is called by any logger in the group which by default calls into the :attr:`processor` callback function. """ def __init__(self, loggers=None, level=NOTSET, processor=None): #: a list of all loggers on the logger group. Use the #: :meth:`add_logger` and :meth:`remove_logger` methods to add #: or remove loggers from this list. self.loggers = [] if loggers is not None: for logger in loggers: self.add_logger(logger) #: the level of the group. This is reflected to the loggers #: in the group unless they overrode the setting. self.level = lookup_level(level) #: the disabled flag for all loggers in the group, unless #: the loggers overrode the setting. self.disabled = False #: an optional callback function that is executed to process #: the log records of all loggers in the group. self.processor = processor def add_logger(self, logger): """Adds a logger to this group.""" assert logger.group is None, "Logger already belongs to a group" logger.group = self self.loggers.append(logger) def remove_logger(self, logger): """Removes a logger from the group.""" self.loggers.remove(logger) logger.group = None def process_record(self, record): """Like :meth:`Logger.process_record` but for all loggers in the group. By default this calls into the :attr:`processor` function is it's not `None`. """ if self.processor is not None: self.processor(record) def enable(self, force=False): """Convenience method to enable this group. :param force: Force enable loggers that were explicitly set. :raises AttributeError: If ``force=True`` and the disabled property of a logger is read-only, typically because it was overridden in a subclass. .. versionadded:: 1.0 """ self.disabled = False if force: for logger in self.loggers: rv = getattr(logger, "_disabled", _missing) if rv is not _missing: logger.enable() def disable(self, force=False): """Convenience method to disable this group. :param force: Force disable loggers that were explicitly set. :raises AttributeError: If ``force=True`` and the disabled property of a logger is read-only, typically because it was overridden in a subclass. .. versionadded:: 1.0 """ self.disabled = True if force: for logger in self.loggers: rv = getattr(logger, "_disabled", _missing) if rv is not _missing: logger.disable() _default_dispatcher = RecordDispatcher() def dispatch_record(record): """Passes a record on to the handlers on the stack. This is useful when log records are created programmatically and already have all the information attached and should be dispatched independent of a logger. """ _default_dispatcher.call_handlers(record) # at that point we are safe to import handler from logbook.handlers import Handler # isort:skip # noqa: E402
LoggerGroup
python
pandas-dev__pandas
asv_bench/benchmarks/stat_ops.py
{ "start": 2870, "end": 3288 }
class ____: params = [["DataFrame", "Series"], [True, False]] param_names = ["constructor", "pct"] def setup(self, constructor, pct): values = np.random.randn(10**5) self.data = getattr(pd, constructor)(values) def time_rank(self, constructor, pct): self.data.rank(pct=pct) def time_average_old(self, constructor, pct): self.data.rank(pct=pct) / len(self.data)
Rank
python
bokeh__bokeh
tests/unit/bokeh/models/test_mappers.py
{ "start": 3532, "end": 3822 }
class ____: def test_basic(self) -> None: mapper = bmm.CategoricalMarkerMapper() check_properties_existence(mapper, [ "factors", "markers", "start", "end", "default_value"], )
Test_CategoricalMarkerMapper
python
scipy__scipy
scipy/fft/_pocketfft/tests/test_basic.py
{ "start": 13384, "end": 13537 }
class ____(_TestIRFFTBase): def setup_method(self): self.cdt = np.complex64 self.rdt = np.float32 self.ndec = 5
TestIRFFTSingle
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_custom_reprs.py
{ "start": 1306, "end": 2009 }
class ____: def __repr__(self): raise ValueError("Oh no!") def test_errors_are_deferred_until_repr_is_calculated(): s = ( st.builds( lambda x, y: 1, st.just(IHaveABadRepr()), y=st.one_of(st.sampled_from((IHaveABadRepr(),)), st.just(IHaveABadRepr())), ) .map(lambda t: t) .filter(lambda t: True) .flatmap(lambda t: st.just(IHaveABadRepr())) ) with pytest.raises(ValueError): repr(s) @given(st.iterables(st.integers())) def test_iterables_repr_is_useful(it): # fairly hard-coded but useful; also ensures _values are inexhaustible assert repr(it) == f"iter({it._values!r})"
IHaveABadRepr
python
ansible__ansible
lib/ansible/modules/wait_for.py
{ "start": 12293, "end": 28084 }
class ____(TCPConnectionInfo): """ This is a TCP Connection Info evaluation strategy class that utilizes information from Linux's procfs. While less universal, does allow Linux targets to not require an additional library. """ platform = 'Linux' distribution = None source_file = { socket.AF_INET: '/proc/net/tcp', socket.AF_INET6: '/proc/net/tcp6' } match_all_ips = { socket.AF_INET: '00000000', socket.AF_INET6: '00000000000000000000000000000000', } ipv4_mapped_ipv6_address = { 'prefix': '0000000000000000FFFF0000', 'match_all': '0000000000000000FFFF000000000000' } local_address_field = 1 remote_address_field = 2 connection_state_field = 3 def __init__(self, module): self.module = module self.ips = _convert_host_to_hex(module.params['host']) self.port = "%0.4X" % int(module.params['port']) self.exclude_ips = self._get_exclude_ips() def _get_exclude_ips(self): exclude_hosts = self.module.params['exclude_hosts'] exclude_ips = [] if exclude_hosts is not None: for host in exclude_hosts: exclude_ips.extend(_convert_host_to_hex(host)) return exclude_ips def get_active_connections_count(self): active_connections = 0 for family in self.source_file.keys(): if not os.path.isfile(self.source_file[family]): continue try: with open(self.source_file[family]) as f: for tcp_connection in f.readlines(): tcp_connection = tcp_connection.strip().split() if tcp_connection[self.local_address_field] == 'local_address': continue if (tcp_connection[self.connection_state_field] not in [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]): continue (local_ip, local_port) = tcp_connection[self.local_address_field].split(':') if self.port != local_port: continue (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':') if (family, remote_ip) in self.exclude_ips: continue if any(( (family, local_ip) in self.ips, (family, self.match_all_ips[family]) in self.ips, local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips, )): active_connections += 1 except OSError: pass return active_connections def _convert_host_to_ip(host): """ Perform forward DNS resolution on host, IP will give the same IP Args: host: String with either hostname, IPv4, or IPv6 address Returns: List of tuples containing address family and IP """ addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP) ips = [] for family, socktype, proto, canonname, sockaddr in addrinfo: ip = sockaddr[0] ips.append((family, ip)) if family == socket.AF_INET: ips.append((socket.AF_INET6, "::ffff:" + ip)) return ips def _convert_host_to_hex(host): """ Convert the provided host to the format in /proc/net/tcp* /proc/net/tcp uses little-endian four byte hex for ipv4 /proc/net/tcp6 uses little-endian per 4B word for ipv6 Args: host: String with either hostname, IPv4, or IPv6 address Returns: List of tuples containing address family and the little-endian converted host """ ips = [] if host is not None: for family, ip in _convert_host_to_ip(host): hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip)) hexip_hf = "" for i in range(0, len(hexip_nf), 8): ipgroup_nf = hexip_nf[i:i + 8] ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16)) hexip_hf = "%s%08X" % (hexip_hf, ipgroup_hf) ips.append((family, hexip_hf)) return ips def _timedelta_total_seconds(timedelta): return ( timedelta.microseconds + 0.0 + (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6 def get_connection_state_id(state): connection_state_id = { 'ESTABLISHED': '01', 'SYN_SENT': '02', 'SYN_RECV': '03', 'FIN_WAIT1': '04', 'FIN_WAIT2': '05', 'TIME_WAIT': '06', } return connection_state_id[state] def main(): module = AnsibleModule( argument_spec=dict( host=dict(type='str', default='127.0.0.1'), timeout=dict(type='int', default=300), connect_timeout=dict(type='int', default=5), delay=dict(type='int', default=0), port=dict(type='int'), active_connection_states=dict(type='list', elements='str', default=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT']), path=dict(type='path'), search_regex=dict(type='str'), state=dict(type='str', default='started', choices=['absent', 'drained', 'present', 'started', 'stopped']), exclude_hosts=dict(type='list', elements='str'), sleep=dict(type='int', default=1), msg=dict(type='str'), ), ) host = module.params['host'] timeout = module.params['timeout'] connect_timeout = module.params['connect_timeout'] delay = module.params['delay'] port = module.params['port'] state = module.params['state'] path = module.params['path'] b_path = to_bytes(path, errors='surrogate_or_strict', nonstring='passthru') search_regex = module.params['search_regex'] b_search_regex = to_bytes(search_regex, errors='surrogate_or_strict', nonstring='passthru') msg = module.params['msg'] if search_regex is not None: try: b_compiled_search_re = re.compile(b_search_regex, re.MULTILINE) except re.error as e: module.fail_json(msg="Invalid regular expression: %s" % e) else: b_compiled_search_re = None match_groupdict = {} match_groups = () if port and path: module.fail_json(msg="port and path parameter can not both be passed to wait_for", elapsed=0) if path and state == 'stopped': module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module", elapsed=0) if path and state == 'drained': module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module", elapsed=0) if module.params['exclude_hosts'] is not None and state != 'drained': module.fail_json(msg="exclude_hosts should only be with state=drained", elapsed=0) for _connection_state in module.params['active_connection_states']: try: get_connection_state_id(_connection_state) except Exception: module.fail_json(msg="unknown active_connection_state (%s) defined" % _connection_state, elapsed=0) start = datetime.now(timezone.utc) if delay: time.sleep(delay) if not port and not path and state != 'drained': time.sleep(timeout) elif state in ['absent', 'stopped']: # first wait for the stop condition end = start + timedelta(seconds=timeout) while datetime.now(timezone.utc) < end: if path: try: if not os.access(b_path, os.F_OK): break except OSError: break elif port: try: s = socket.create_connection((host, port), connect_timeout) s.shutdown(socket.SHUT_RDWR) s.close() except Exception: break # Conditions not yet met, wait and try again time.sleep(module.params['sleep']) else: elapsed = datetime.now(timezone.utc) - start if port: module.fail_json(msg=msg or "Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds) elif path: module.fail_json(msg=msg or "Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds) elif state in ['started', 'present']: # wait for start condition end = start + timedelta(seconds=timeout) while datetime.now(timezone.utc) < end: if path: try: os.stat(b_path) except OSError as e: # If anything except file not present, throw an error if e.errno != 2: elapsed = datetime.now(timezone.utc) - start module.fail_json(msg=msg or "Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds) # file doesn't exist yet, so continue else: # File exists. Are there additional things to check? if not b_compiled_search_re: # nope, succeed! break try: with open(b_path, 'rb') as f: try: with contextlib.closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as mm: search = b_compiled_search_re.search(mm) if search: if search.groupdict(): match_groupdict = search.groupdict() if search.groups(): match_groups = search.groups() break except (ValueError, OSError) as e: module.debug('wait_for failed to use mmap on "%s": %s. Falling back to file read().' % (path, to_native(e))) # cannot mmap this file, try normal read search = re.search(b_compiled_search_re, f.read()) if search: if search.groupdict(): match_groupdict = search.groupdict() if search.groups(): match_groups = search.groups() break except Exception as e: module.warn('wait_for failed on "%s", unexpected exception(%s): %s.).' % (path, to_native(e.__class__), to_native(e))) except OSError: pass elif port: alt_connect_timeout = math.ceil( _timedelta_total_seconds(end - datetime.now(timezone.utc)), ) try: s = socket.create_connection((host, int(port)), min(connect_timeout, alt_connect_timeout)) except Exception: # Failed to connect by connect_timeout. wait and try again pass else: # Connected -- are there additional conditions? if b_compiled_search_re: b_data = b'' matched = False while datetime.now(timezone.utc) < end: max_timeout = math.ceil( _timedelta_total_seconds( end - datetime.now(timezone.utc), ), ) readable = select.select([s], [], [], max_timeout)[0] if not readable: # No new data. Probably means our timeout # expired continue response = s.recv(1024) if not response: # Server shutdown break b_data += response if b_compiled_search_re.search(b_data): matched = True break # Shutdown the client socket try: s.shutdown(socket.SHUT_RDWR) except OSError as ex: if ex.errno != errno.ENOTCONN: raise # else, the server broke the connection on its end, assume it's not ready else: s.close() if matched: # Found our string, success! break else: # Connection established, success! try: s.shutdown(socket.SHUT_RDWR) except OSError as ex: if ex.errno != errno.ENOTCONN: raise # else, the server broke the connection on its end, assume it's not ready else: s.close() break # Conditions not yet met, wait and try again time.sleep(module.params['sleep']) else: # while-else # Timeout expired elapsed = datetime.now(timezone.utc) - start if port: if search_regex: module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds) else: module.fail_json(msg=msg or "Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds) elif path: if search_regex: module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds) else: module.fail_json(msg=msg or "Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds) elif state == 'drained': # wait until all active connections are gone end = start + timedelta(seconds=timeout) tcpconns = TCPConnectionInfo(module) while datetime.now(timezone.utc) < end: if tcpconns.get_active_connections_count() == 0: break # Conditions not yet met, wait and try again time.sleep(module.params['sleep']) else: elapsed = datetime.now(timezone.utc) - start module.fail_json(msg=msg or "Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds) elapsed = datetime.now(timezone.utc) - start module.exit_json(state=state, port=port, search_regex=search_regex, match_groups=match_groups, match_groupdict=match_groupdict, path=path, elapsed=elapsed.seconds) if __name__ == '__main__': main()
LinuxTCPConnectionInfo
python
getsentry__sentry
src/sentry/integrations/source_code_management/commit_context.py
{ "start": 13693, "end": 14903 }
class ____(ABC): base_url: str @abstractmethod def get_blame_for_files( self, files: Sequence[SourceLineInfo], extra: dict[str, Any] ) -> list[FileBlameInfo]: """Get the blame for a list of files. This method should include custom metrics for the specific integration implementation.""" raise NotImplementedError @abstractmethod def create_comment(self, repo: str, issue_id: str, data: dict[str, Any]) -> Any: raise NotImplementedError @abstractmethod def update_comment( self, repo: str, issue_id: str, comment_id: str, data: dict[str, Any] ) -> Any: raise NotImplementedError @abstractmethod def create_pr_comment(self, repo: Repository, pr: PullRequest, data: dict[str, Any]) -> Any: raise NotImplementedError @abstractmethod def update_pr_comment( self, repo: Repository, pr: PullRequest, pr_comment: PullRequestComment, data: dict[str, Any], ) -> Any: raise NotImplementedError @abstractmethod def get_merge_commit_sha_from_commit(self, repo: Repository, sha: str) -> str | None: raise NotImplementedError
CommitContextClient
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/exc.py
{ "start": 24462, "end": 24565 }
class ____(DatabaseError): """Wraps a DB-API OperationalError.""" code = "e3q8"
OperationalError
python
google__pytype
pytype/rewrite/function_call_helper.py
{ "start": 395, "end": 5453 }
class ____(Generic[_FrameT]): """Helper for executing function calls.""" def __init__(self, ctx: context.Context, frame: _FrameT): self._ctx = ctx self._frame = frame # Function kwnames are stored in the vm by KW_NAMES and retrieved by CALL self._kw_names: Sequence[str] = () def set_kw_names(self, kw_names: Sequence[str]) -> None: self._kw_names = kw_names def make_function_args(self, args: Sequence[_Var]) -> abstract.Args[_FrameT]: """Unpack args into posargs and kwargs (3.11+).""" if self._kw_names: n_kw = len(self._kw_names) posargs = tuple(args[:-n_kw]) kw_vals = args[-n_kw:] kwargs = datatypes.immutabledict(zip(self._kw_names, kw_vals)) else: posargs = tuple(args) kwargs = datatypes.EMPTY_MAP self._kw_names = () return abstract.Args(posargs=posargs, kwargs=kwargs, frame=self._frame) def _unpack_starargs(self, starargs: _Var) -> abstract.FunctionArgTuple: """Unpacks variable positional arguments.""" # TODO(b/331853896): This follows vm_utils.ensure_unpacked_starargs, but # does not yet handle indefinite iterables. posargs = starargs.get_atomic_value() if isinstance(posargs, abstract.FunctionArgTuple): # This has already been converted pass elif isinstance(posargs, abstract.FrozenInstance): # This is indefinite. posargs = abstract.FunctionArgTuple(self._ctx, indefinite=True) elif isinstance(posargs, abstract.Tuple): posargs = abstract.FunctionArgTuple(self._ctx, posargs.constant) elif isinstance(posargs, tuple): posargs = abstract.FunctionArgTuple(self._ctx, posargs) elif abstract.is_any(posargs): posargs = abstract.FunctionArgTuple(self._ctx, indefinite=True) else: assert False, f'unexpected posargs type: {posargs}: {type(posargs)}' return posargs def _unpack_starstarargs( self, starstarargs: _Var ) -> abstract.FunctionArgDict: """Unpacks variable keyword arguments.""" kwargs = starstarargs.get_atomic_value() if isinstance(kwargs, abstract.FunctionArgDict): # This has already been converted pass elif isinstance(kwargs, abstract.FrozenInstance): # This is indefinite. kwargs = abstract.FunctionArgDict(self._ctx, indefinite=True) elif isinstance(kwargs, abstract.Dict): kwargs = kwargs.to_function_arg_dict() elif abstract.is_any(kwargs): kwargs = abstract.FunctionArgDict(self._ctx, indefinite=True) else: assert False, f'unexpected kwargs type: {kwargs}: {type(kwargs)}' return kwargs def make_function_args_ex( self, starargs: _Var, starstarargs: _Var | None, ) -> abstract.Args[_FrameT]: """Makes function args from variable positional and keyword arguments.""" # Convert *args unpacked_starargs = self._unpack_starargs(starargs) if unpacked_starargs.indefinite: # We have an indefinite tuple; leave it in starargs posargs = () starargs = unpacked_starargs.to_variable() else: # We have a concrete tuple we are unpacking; move it into posargs posargs = unpacked_starargs.constant starargs = None # Convert **kwargs if starstarargs: unpacked_starstarargs = self._unpack_starstarargs(starstarargs) if unpacked_starstarargs.indefinite: kwargs = datatypes.EMPTY_MAP starstarargs = unpacked_starstarargs.to_variable() else: kwargs = unpacked_starstarargs.constant starstarargs = None else: kwargs = datatypes.EMPTY_MAP return abstract.Args( posargs=posargs, kwargs=kwargs, starargs=starargs, starstarargs=starstarargs, frame=self._frame, ) def build_class( self, args: abstract.Args[_FrameT] ) -> abstract.InterpreterClass: """Builds a class.""" builder = args.posargs[0].get_atomic_value( abstract.InterpreterFunction[_FrameT] ) name_var = args.posargs[1] name = abstract.get_atomic_constant(name_var, str) base_vars = args.posargs[2:] bases = [] for base_var in base_vars: try: base = base_var.get_atomic_value(abstract.SimpleClass) except ValueError as e: raise NotImplementedError('Unexpected base class') from e bases.append(base) keywords = {} for kw, var in args.kwargs.items(): try: val = var.get_atomic_value() except ValueError as e: raise NotImplementedError('Unexpected keyword value') from e keywords[kw] = val frame = builder.call(abstract.Args(frame=self._frame)) members = dict(frame.final_locals) cls = abstract.InterpreterClass( ctx=self._ctx, name=name, members=members, bases=bases, keywords=keywords, functions=frame.functions, classes=frame.classes, ) for base in bases: if base.full_name in overlays.CLASS_TRANSFORMS: overlays.CLASS_TRANSFORMS[base.full_name](self._ctx, cls) break return cls
FunctionCallHelper
python
google__jax
jax/experimental/jax2tf/jax2tf.py
{ "start": 13425, "end": 38394 }
class ____: def __init__(self, fun_jax, *, args_specs, kwargs_specs, native_serialization_platforms: Sequence[str] | None, native_serialization_disabled_checks: Sequence[DisabledSafetyCheck]): self.convert_kwargs = dict(native_serialization_platforms=native_serialization_platforms, native_serialization_disabled_checks=native_serialization_disabled_checks) if hasattr(fun_jax, "trace"): # If we have a pjit or pmap already we do not wrap with another, and we # allow shardings. fun_jit = fun_jax else: # We support convert(pjit(f_jax)) and convert(jit(f_jax)) but also # convert(f_jax), in which case a "jit" is implied. In that case we raise # an error if the lowered function contains non-replicated sharding annotations. fun_jit = jax.jit(fun_jax) self.fun_jax = fun_jit self.args_specs = args_specs self.kwargs_specs = kwargs_specs self.native_serialization_disabled_checks = native_serialization_disabled_checks self.native_serialization_platforms = native_serialization_platforms def before_conversion(self): _prev_func_list = _thread_local_state.call_tf_concrete_function_list _thread_local_state.call_tf_concrete_function_list = [] def _restore_context(): _thread_local_state.call_tf_concrete_function_list = _prev_func_list self._restore_context = _restore_context _exported_device_assignment = [None] self.exported = _export._export_internal( self.fun_jax, platforms=self.native_serialization_platforms, disabled_checks=self.native_serialization_disabled_checks, _device_assignment_for_internal_jax2tf_use_only=_exported_device_assignment, )(*self.args_specs, **self.kwargs_specs) assert(_exported_device_assignment[0] is not None) self.device_assignment = _exported_device_assignment[0] def after_conversion(self): self._restore_context() def run_fun_tf(self, args_flat_tf: Sequence[TfVal] ) -> tuple[Sequence[TfVal], Sequence[core.ShapedArray], tree_util.PyTreeDef]: results = _run_exported_as_tf(args_flat_tf, self.exported) return results, tuple(self.exported.out_avals), self.exported.out_tree def get_vjp_fun(self) -> tuple[Callable, Sequence[core.AbstractValue]]: return _export._get_vjp_fun(self.fun_jax, in_tree=self.exported.in_tree, in_avals=self.exported.in_avals, in_shardings_hlo=self.exported.in_shardings_hlo, out_avals=self.exported.out_avals, out_shardings_hlo=self.exported.out_shardings_hlo, device_assignment=self.device_assignment, apply_jit=True) def dtype_of_val(val: TfVal) -> DType: """Computes the TensorFlow dtype using JAX's typing rules. If the value is a tf.Tensor, it starts with its dtype. If the value is a constant it uses JAX to infer its dtype. The resulting dtype follows the JAX type inference rules, and depends on the value of the JAX_ENABLE_X64 flag. See README.md for how 64-bit values are treated. """ tval, _ = _tfval_to_tensor_jax_dtype(val) return tval.dtype @partial(api_util.api_hook, tag="jax2tf_eval_polymorphic_shapes") def eval_polymorphic_shape(fun_jax: Callable, *, polymorphic_shapes=None) -> Callable: """Evaluates the output shape in presence of shape polymorphism. This is done without lowering or executing the function, same as for `jax.eval_shape`. Args: fun_jax: target JAX function to be called. Its arguments and return value should be JAX arrays, or nested standard Python containers (tuple/list/dict) thereof (pytrees). polymorphic_shapes: Specifies input shapes to be treated polymorphically during shape evaluation. See discussion for `jax2tf.convert`. .. warning:: The shape-polymorphic lowering is an experimental feature. Returns: a function that takes `jax.ShapeDtypeStruct`s (or any values with `.shape` and `.dtype` attributes) corresponding to the inputs for `fun_jax`, and returns a tuple with: * the jax.ShapeDtypeStruct corresponding to the result, as for `jax.eval_shape`. The shape may contain symbolic dimension expressions. * the value that can be passed to `polymorphic_shapes` for a subsequent call to `jax2tf.eval_polymorphic_shape`, or `jax2tf.convert`. For example: >>> import jax >>> from jax.experimental import jax2tf >>> from jax import numpy as jnp >>> >>> f = lambda A, x: jnp.sin(jnp.dot(A, x)) >>> A = jax.ShapeDtypeStruct((2000, 3000), jnp.float32) >>> x = jax.ShapeDtypeStruct((3000, 1000), jnp.float32) >>> out_spec, out_poly_shape = jax2tf.eval_polymorphic_shape(f, polymorphic_shapes=["a, b", "b, c"])(A, x) >>> print(out_spec.shape) ("a", "c") >>> print(out_poly_shape) (a, c) >>> res_spec, res_poly_shape = jax2tf.eval_polymorphic_shape(lambda x: x.T, polymorphic_shapes=[out_poly_shape])(out_spec) >>> print(res_poly_shape) (c, a) """ def do_eval_polymorphic_shape(*args_specs) -> Any: args_poly_specs = export.symbolic_args_specs( args_specs, polymorphic_shapes) res_poly_spec = jax.eval_shape(fun_jax, *args_poly_specs) # TODO(necula): For now we export the polymorphic shapes using `str`. res_polymorphic_shape = tree_util.tree_map(lambda r: str(r.shape), res_poly_spec) return res_poly_spec, res_polymorphic_shape return do_eval_polymorphic_shape # Internals def flatten_fun_jax(fun_jax: Callable, in_tree, ) -> tuple[Callable, Callable]: """Wraps the function to take a (flat) list of positional args. jax2tf works better and is simpler when the JAX function takes and returns just a tuple of values (no pytrees, no kwargs). This is in part because jax.vjp does not support kwargs and we can only set tf.custom_gradient on functions with flat arguments and results Returns: * the wrapped JAX function taking and returning a flat list of arguments * a thunk that can be called after the wrapped function has been called to return the output pytree. """ out_tree_ref = None def fun_flat_jax(*args_flat_jax): tree_args, tree_kwargs = tree_util.tree_unflatten(in_tree, args_flat_jax) tree_res = fun_jax(*tree_args, **tree_kwargs) res_flat_jax, out_tree = tree_util.tree_flatten(tree_res) nonlocal out_tree_ref assert out_tree_ref is None or out_tree_ref == out_tree out_tree_ref = out_tree return res_flat_jax return fun_flat_jax, lambda: out_tree_ref def preprocess_arg_tf(arg_idx: int, arg_tf: TfVal) -> TfVal: """Pre-processes the TF args. Returns: a tuple with the pre-processed TF arg, the TF shape, and the JAX dtype. """ if not _is_tfval(arg_tf): msg = (f"Argument {arg_tf} of type {type(arg_tf)} of jax2tf.convert(f) should " "be NumPy array, scalar, tf.Variable, or tf.Tensor") raise TypeError(msg) # May cast the args_flat to JAX types, using JAX's interpretation # of types of constants. arg_tf, _ = _tfval_to_tensor_jax_dtype(arg_tf) # Name input tensors; do this after we have cast the arguments arg_tf = tf.identity(arg_tf, f"jax2tf_arg_{arg_idx}") return arg_tf def _make_custom_gradient_fn_tf(fun_jax, *, impl: NativeSerializationImpl, with_gradient: bool, args_specs, kwargs_specs, args_tf: Sequence[TfVal], outs_avals: Sequence[core.ShapedArray], outs_tf: Sequence[TfVal]): """Prepares the TF function to be used with tf.custom_gradient. Args: impl: the serialization implementation details with_gradient: whether to include a tf.custom_gradient args_specs, kwargs_specs: the jax.ShapeDtypeArrays for the args and kwargs args_tf: the flattened TF arguments of the primal function outs_avals: the flattened output JAX abstract values of the primal function outs_tf: the flattened TF outputs of the primal function """ def grad_fn_tf(*out_cts_flat_tf: TfVal, variables=None): if variables: raise ValueError( "Unexpected variables used in forward pass. " "This should not happen for first-order differentiation. " f"{variables=}") # TODO: enable higher-order gradients with tf.name_scope("jax2tf_vjp"): def fix_out_ct(out_ct_tf, out_ct_aval: core.ShapedArray, out_tf: TfVal): # If the primal function has outputs of integer or bool types, and if we are # under a tf.function context, then TF will pass None in _out_cts_flat # in place of these values. We should change these to float0 or # else JAX gets unhappy. See issue #6975. if out_ct_tf is not None: return out_ct_tf assert core.primal_dtype_to_tangent_dtype(out_ct_aval.dtype) == dtypes.float0, f"{out_ct_tf=}" # Note that out_ct_aval.shape contains dimension variable from the # primal function scope. We use tf.zeros_like to make a 0 of the right shape. return tf.zeros_like(out_tf, dtype=_tf_np_dtype_for_float0) out_cts_fixed_flat_tf = tuple(map(fix_out_ct, out_cts_flat_tf, outs_avals, outs_tf)) vjp_args_flat_tf = tuple(args_tf) + out_cts_fixed_flat_tf fun_vjp_jax, vjp_in_avals = impl.get_vjp_fun() vjp_polymorphic_shapes = tuple( str(a.shape) # Note: may be _DimExpr, not just DimVar for a in vjp_in_avals) in_cts_flat = convert( fun_vjp_jax, with_gradient=with_gradient, polymorphic_shapes=vjp_polymorphic_shapes, **impl.convert_kwargs)(*vjp_args_flat_tf) # We do not need to fix the in_cts because the TF gradient machinery # will adjust the unconnected gradients and those for integer types. return in_cts_flat return grad_fn_tf def _run_exported_as_tf(args_flat_tf: Sequence[TfVal], exported: export.Exported, ) -> Sequence[TfVal]: """Runs the `exported` as an XlaCallModule TF op. Returns: the flattened tuple of results. """ args_avals = exported.in_avals # TF values may be integer types for float0 def _convert_value(val, aval): # Check the shape assert all(d_aval == d_val for d_aval, d_val in zip(aval.shape, val.shape) if core.is_constant_dim(d_aval)), (aval, val) conversion_dtype = _to_tf_dtype(aval.dtype) if conversion_dtype != aval.dtype: return tf.cast(val, conversion_dtype) else: return val args_flat_tf = tuple(map(_convert_value, args_flat_tf, args_avals)) out_shapes_tf = tuple( tuple(d if core.is_constant_dim(d) else None for d in out_aval.shape) for out_aval in exported.out_avals) out_types = tuple(_to_tf_dtype(out_aval.dtype) for out_aval in exported.out_avals) kept_args_flat_tf = [atf for i, atf in enumerate(args_flat_tf) if i in exported.module_kept_var_idx] version = exported.calling_convention_version try: get_max_supported_version = tfxla.call_module_maximum_supported_version except AttributeError: get_max_supported_version = None if get_max_supported_version: max_supported_version = get_max_supported_version() else: max_supported_version = 6 if version > max_supported_version: raise NotImplementedError( "XlaCallModule from your TensorFlow installation supports up to " f"serialization version {max_supported_version} but the serialized " f"module needs version {version}. " "You should upgrade TensorFlow, e.g., to tf_nightly." ) call_module_attrs = dict( version=version, Tout=out_types, Sout=out_shapes_tf, function_list=[ concrete_fn.function_def.signature.name for concrete_fn in _thread_local_state.call_tf_concrete_function_list ] if _thread_local_state.call_tf_concrete_function_list is not None else [], # We always set has_token_input_output because it requires real tokens # for versions less than 9 and is not used starting with version 9. has_token_input_output=False ) call_module_attrs["platforms"] = tuple(p.upper() for p in exported.platforms) if version >= 6: call_module_attrs["disabled_checks"] = tuple( str(dc) for dc in exported.disabled_safety_checks) else: if version >= 3: if DisabledSafetyCheck.platform() in exported.disabled_safety_checks: call_module_attrs["platforms"] = () # No platform checking if version >= 10: call_module_attrs["use_shardy_partitioner"] = ( config.use_shardy_partitioner.value ) if logging.vlog_is_on(3): # We already logged the MLIR module when we exported it. logging.vlog(3, "XlaCallModule %s", str(call_module_attrs)) call_module_attrs["module"] = exported.mlir_module_serialized # Apply the shardings on arguments and results for pjit. This is redundant # because the mlir_module_text will already contain the shardings, but it # makes it easier for tools like the TPU inference converter to see the # sharding without digging into the `module` attribute of the `XlaCallModule` # op, in the same way as it is done for the legacy jax2tf conversion. # Do not apply XlaSharding for REPLICATED, on inputs and outputs. # This is an agreed convention, and also improves usability under TF eager. # See b/255511660. kept_in_shardings = [] for i in exported.module_kept_var_idx: kept_in_shardings.append(exported.in_shardings_hlo[i]) args_flat_tf = tuple( map(partial(_shard_value, skip_replicated_sharding=tf.executing_eagerly()), kept_args_flat_tf, kept_in_shardings)) res = tfxla.call_module(args_flat_tf, **call_module_attrs) # TODO(b/278940799): Replace the TF v1 API with public TF2 API. # Add the custom call tf.function into the default graph, so those functions # will be available during tf.SavedModel.save. if _thread_local_state.call_tf_concrete_function_list is not None: for concrete_fn in _thread_local_state.call_tf_concrete_function_list: tf.compat.v1.get_default_graph()._add_function_recursive( concrete_fn._inference_function ) res = list(map(partial(_shard_value, skip_replicated_sharding=tf.executing_eagerly()), res, exported.out_shardings_hlo)) res = tuple(map(_convert_value, res, exported.out_avals)) return res def _jax_physical_aval(aval: core.ShapedArray) -> core.ShapedArray: """Converts JAX avals from logical to physical, if relevant. JAX might have avals whose logical vs physical shape/dtype may differ, and only the physical view is expected to possibly relate to TF. TF impl rules should operate on the physical form. A JAX logical aval might even correspond, in principle, to several physical avals, but we don't support those here. Instead we assert there is only one and return it. """ physical_aval = core.physical_aval(aval) assert (len(physical_aval.shape) >= len(aval.shape) and physical_aval.shape[:len(aval.shape)] == aval.shape), (physical_aval, aval) return physical_aval def _jax_physical_dtype(dtype): # assuming () is a fine stand-in shape return _jax_physical_aval(core.ShapedArray((), dtype)).dtype def _aval_to_tf_shape(aval: core.ShapedArray) -> tuple[int | None, ...]: """Generate a TF shape, possibly containing None for polymorphic dimensions.""" aval = _jax_physical_aval(aval) return tuple(map(lambda d: None if export.is_symbolic_dim(d) else d, aval.shape)) # In the TF world, we represent float0 as zeros of this type. # We pick bool because this is what JAX uses when it lowers float0 to HLO. _tf_np_dtype_for_float0 = np.bool_ def _to_tf_dtype(jax_dtype): # Note that converting _to_tf_dtype and _to_jax_dtype are not inverses, # due to float0 and 64-bit behavior. try: jax_dtype = _jax_physical_dtype(jax_dtype) except TypeError: # `jax_dtype` isn't actually a valid jax dtype (e.g. it is # tf.float32), so there is no physical dtype anyway pass if jax_dtype == dtypes.float0: jax_dtype = _tf_np_dtype_for_float0 return tf.dtypes.as_dtype(jax_dtype) def _to_jax_dtype(tf_dtype): # Note that converting _to_tf_dtype and _to_jax_dtype are not inverses, # due to float0 and 64-bit behavior. dt = dtypes.canonicalize_dtype(tf_dtype.as_numpy_dtype) if dt not in dtypes._jax_dtype_set: raise TypeError(f"dtype {dt} is not a valid JAX array " "type. Only arrays of numeric types are supported by JAX.") return dt def _tfval_to_tensor_jax_dtype(val: TfVal, jax_dtype: DType | None = None, memoize_constants=False) -> tuple[TfVal, DType]: """Converts a scalar, ndarray, or tf.Tensor to a tf.Tensor with proper type. If `jax_dtype` is missing, uses JAX typing rules. See README.md for details regarding 64-bit values. Args: val: a scalar, ndarray, tf.Tensor, or tf.Variable jax_dtype: an optional dtype to use. If missing, uses JAX type inference rules for constants. memoize_constants: whether to memoize TF constants. We can't do this everywhere, we may be outside of a conversion scope. Returns: a tuple with a tf.Tensor with the type as needed by JAX, and the JAX type. """ if isinstance(val, (tf.Tensor, tf.Variable)): jax_dtype = jax_dtype or _to_jax_dtype(val.dtype) # Give JAX a chance to pick the type conversion_dtype = _to_tf_dtype(jax_dtype) if conversion_dtype != val.dtype: # May need to cast for 64-bit values return tf.cast(val, conversion_dtype), jax_dtype else: return val, jax_dtype else: # A constant jax_dtype = jax_dtype or core.abstractify(val).dtype # TODO(document): We assume that the value of a constant does not # change through the scope of the function. But it may be an ndarray, ... # JAX has the same problem when generating HLO. const_key = (id(val), jax_dtype) # Since we use id(val) as a cache key, we have to make sure that we keep # the previous `val` alive. Otherwise, for a ndarray, it can get garbage # collected and reused for a different value, which would create correctness # issues. We keep the `val` alive by storing in the cache the pair # `(val, tf_val)`. # Only memoize non-scalars. JAX will lift all non-scalar constants as # Jaxpr consts, to the top level of the Jaxpr. This ensures that we see them # early, when entering the Jaxpr, so we create the tf.const early and its # scope is the entire Jaxpr. do_memoize = (memoize_constants and np.size(val) > 1 and _thread_local_state.constant_cache is not None) if do_memoize: _, tf_val = _thread_local_state.constant_cache.get(const_key, (None, None)) else: tf_val = None if tf_val is None: conversion_dtype = _to_tf_dtype(jax_dtype) # The float0 type is not known to TF. if jax_dtype == dtypes.float0: val = np.zeros(np.shape(val), conversion_dtype.as_numpy_dtype) if hasattr(val, 'dtype') and dtypes.issubdtype(val.dtype, dtypes.extended): val = val.dtype._rules.physical_const(val) tf_val = tf.convert_to_tensor(val, dtype=conversion_dtype) if do_memoize: _thread_local_state.constant_cache[const_key] = (val, tf_val) return tf_val, jax_dtype PartitionsOrReplicated = Union[tuple[int, ...], None] def split_to_logical_devices(tensor: TfVal, partition_dimensions: PartitionsOrReplicated): """Like TPUMPStrategy.experimental_split_to_logical_devices. For jax2tf purposes we want to avoid needing to thread the `strategy` object through the generated computation. It seems that the original function needs the strategy object only for error checking, which we assume is done upstream by JAX. Args: tensor: Input tensor to annotate. partition_dimensions: A list of integers, with one integer per tensor dimension, specifying in how many parts the dimension should be split. The product of integers must equal the number of devices per replica. use_sharding_op: whether to use a sharding op, or not. Returns: an annotated tensor. """ # TODO: this is only for sharded_jit. Either remove, or implement in terms # of _shard_values. if partition_dimensions is None: return xla_sharding.replicate(tensor, use_sharding_op=True) num_partition_splits = math.prod(partition_dimensions) tile_assignment = np.arange(num_partition_splits).reshape( partition_dimensions) return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True) def _shard_value(val: TfVal, sd: xla_client.HloSharding | None, *, skip_replicated_sharding: bool) -> TfVal: """Apply sharding to a TfVal.""" if sd is None: return val sharding_proto = sd.to_proto() if (skip_replicated_sharding and op_shardings.is_hlo_sharding_replicated(sd)): return val # Tensorflow heavily relies on tile_assignment_devices proto fields specific # to V1 sharding format, falling back to this format. if ( not sharding_proto.tile_assignment_devices and sharding_proto.iota_reshape_dims ): tad = list( np.arange(math.prod(sharding_proto.tile_assignment_dimensions)) .reshape(sharding_proto.iota_reshape_dims) .transpose(sharding_proto.iota_transpose_perm) .flat ) else: tad = sharding_proto.tile_assignment_devices # type: ignore # To use xla_sharding.py, we must have a xla_data_pb2.OpSharding. xla_sharding_v1_proto: xla_data_pb2.OpSharding = xla_data_pb2.OpSharding( type=int(sharding_proto.type), tile_assignment_dimensions=sharding_proto.tile_assignment_dimensions, tile_assignment_devices=tad, replicate_on_last_tile_dim=sharding_proto.replicate_on_last_tile_dim, last_tile_dims=sharding_proto.last_tile_dims, ) # Shardy requires V2 sharding format. if config.use_shardy_partitioner.value: xla_sharding_v2_proto: xla_data_pb2.OpSharding = xla_data_pb2.OpSharding( type=int(sharding_proto.type), tile_assignment_dimensions=sharding_proto.tile_assignment_dimensions, tile_assignment_devices=sharding_proto.tile_assignment_devices, iota_reshape_dims=sharding_proto.iota_reshape_dims, iota_transpose_perm=sharding_proto.iota_transpose_perm, replicate_on_last_tile_dim=sharding_proto.replicate_on_last_tile_dim, last_tile_dims=sharding_proto.last_tile_dims, ) else: xla_sharding_v2_proto = None if tf_context.executing_eagerly(): raise ValueError( "A jit function with sharded arguments or results must be used under a `tf.function` context. " "See https://github.com/jax-ml/jax/blob/main/jax/experimental/jax2tf/README.md#support-for-partitioning for a discussion") tf_version = tuple(int(v) for v in tf.__version__.split(".")[:2]) # apply_to_tensor comes from a tensorflow package, check the tensorflow # version to make sure that it has the sharding_v2_proto parameter. if tf_version < (2, 20): return xla_sharding.Sharding(proto=xla_sharding_v1_proto).apply_to_tensor( val, use_sharding_op=True ) return xla_sharding.Sharding(proto=xla_sharding_v1_proto).apply_to_tensor( val, use_sharding_op=True, sharding_v2_proto=xla_sharding_v2_proto ) def _register_checkpoint_pytrees(): """Registers TF custom container types as pytrees.""" m = tf.Module() # The types here are automagically changed by TensorFlow's checkpointing # infrastructure. m.a = (tf.Module(), tf.Module()) m.b = [tf.Module(), tf.Module()] m.c = {"a": tf.Module()} tuple_wrapper = type(m.a) list_wrapper = type(m.b) dict_wrapper = type(m.c) # TF AutoTrackable swaps container types out for wrappers. assert tuple_wrapper is not tuple assert list_wrapper is not list assert dict_wrapper is not dict jax.tree_util.register_pytree_node(tuple_wrapper, lambda xs: (tuple(xs), None), lambda _, xs: tuple(xs)) jax.tree_util.register_pytree_node(list_wrapper, lambda xs: (tuple(xs), None), lambda _, xs: list(xs)) jax.tree_util.register_pytree_node( dict_wrapper, lambda s: (tuple(s.values()), tuple(s.keys())), lambda k, xs: dict_wrapper(zip(k, xs))) _register_checkpoint_pytrees()
NativeSerializationImpl
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_image_anchor07.py
{ "start": 315, "end": 1108 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("image_anchor07.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.insert_image("A1", self.image_dir + "blue.png") worksheet.insert_image("B3", self.image_dir + "red.jpg", {"positioning": 3}) worksheet.insert_image("D5", self.image_dir + "yellow.jpg", {"positioning": 2}) worksheet.insert_image("F9", self.image_dir + "grey.png", {"positioning": 1}) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
getsentry__sentry
src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py
{ "start": 556, "end": 631 }
class ____(IntEnum): LOW = 25 MEDIUM = 50 HIGH = 75
PriorityLevel
python
ansible__ansible
hacking/update-sanity-requirements.py
{ "start": 579, "end": 5744 }
class ____: name: str requirements_path: pathlib.Path source_path: pathlib.Path def freeze_requirements(self) -> None: source_requirements = [packaging.requirements.Requirement(re.sub(' #.*$', '', line)) for line in self.source_path.read_text().splitlines()] install_packages = {requirement.name for requirement in source_requirements} exclude_packages = {'distribute', 'pip', 'setuptools', 'wheel'} - install_packages with tempfile.TemporaryDirectory() as venv_dir: venv.create(venv_dir, with_pip=True) python = pathlib.Path(venv_dir, 'bin', 'python') pip = [python, '-m', 'pip', '--disable-pip-version-check'] env = dict() pip_freeze = subprocess.run(pip + ['freeze'], env=env, check=True, capture_output=True, text=True) if pip_freeze.stdout: raise Exception(f'Initial virtual environment is not empty:\n{pip_freeze.stdout}') subprocess.run(pip + ['install', '-r', self.source_path], env=env, check=True) freeze_options = ['--all'] for exclude_package in exclude_packages: freeze_options.extend(('--exclude', exclude_package)) pip_freeze = subprocess.run(pip + ['freeze'] + freeze_options, env=env, check=True, capture_output=True, text=True) self.write_requirements(pip_freeze.stdout) def update_pre_build(self) -> None: """Update requirements in place with current pre-build instructions.""" requirements = pathlib.Path(self.requirements_path).read_text() lines = requirements.splitlines(keepends=True) lines = [line for line in lines if not line.startswith('#')] requirements = ''.join(lines) self.write_requirements(requirements) def write_requirements(self, requirements: str) -> None: """Write the given test requirements to the requirements file for this test.""" pre_build = pre_build_instructions(requirements) requirements = f'# edit "{self.source_path.name}" and generate with: {SELF} --test {self.name}\n{pre_build}{requirements}' with open(self.requirements_path, 'w') as requirement_file: requirement_file.write(requirements) @staticmethod def create(path: pathlib.Path) -> SanityTest: return SanityTest( name=path.stem.replace('sanity.', '').replace('.requirements', ''), requirements_path=path, source_path=path.with_suffix('.in'), ) def pre_build_instructions(requirements: str) -> str: """Parse the given requirements and return any applicable pre-build instructions.""" parsed_requirements = requirements.splitlines() package_versions = { match.group('package').lower(): match.group('version') for match in (re.search('^(?P<package>.*)==(?P<version>.*)$', requirement) for requirement in parsed_requirements) if match } instructions: list[str] = [] build_constraints = ( ('pyyaml', '>= 5.4, <= 6.0', ('Cython < 3.0',)), ) for package, specifier, constraints in build_constraints: version_string = package_versions.get(package) if version_string: version = packaging.version.Version(version_string) specifier_set = packaging.specifiers.SpecifierSet(specifier) if specifier_set.contains(version): instructions.append(f'# pre-build requirement: {package} == {version}\n') for constraint in constraints: instructions.append(f'# pre-build constraint: {constraint}\n') return ''.join(instructions) def main() -> None: tests = find_tests() parser = argparse.ArgumentParser() parser.add_argument( '--test', metavar='TEST', dest='test_names', action='append', choices=[test.name for test in tests], help='test requirements to update' ) parser.add_argument( '--pre-build-only', action='store_true', help='apply pre-build instructions to existing requirements', ) if argcomplete: argcomplete.autocomplete(parser) args = parser.parse_args() test_names: set[str] = set(args.test_names or []) tests = [test for test in tests if test.name in test_names] if test_names else tests for test in tests: print(f'===[ {test.name} ]===', flush=True) if args.pre_build_only: test.update_pre_build() else: test.freeze_requirements() def find_tests() -> t.List[SanityTest]: globs = ( 'test/lib/ansible_test/_data/requirements/sanity.*.txt', 'test/sanity/code-smell/*.requirements.txt', ) tests: t.List[SanityTest] = [] for glob in globs: tests.extend(get_tests(pathlib.Path(glob))) return sorted(tests, key=lambda test: test.name) def get_tests(glob: pathlib.Path) -> t.List[SanityTest]: path = pathlib.Path(ROOT, glob.parent) pattern = glob.name return [SanityTest.create(item) for item in path.glob(pattern)] if __name__ == '__main__': main()
SanityTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 973690, "end": 974446 }
class ____(sgqlc.types.relay.Connection): """The connection type for SponsorsActivity.""" __schema__ = github_schema __field_names__ = ("edges", "nodes", "page_info", "total_count") edges = sgqlc.types.Field(sgqlc.types.list_of("SponsorsActivityEdge"), graphql_name="edges") """A list of edges.""" nodes = sgqlc.types.Field(sgqlc.types.list_of("SponsorsActivity"), graphql_name="nodes") """A list of nodes.""" page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo") """Information to aid in pagination.""" total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount") """Identifies the total count of items in the connection."""
SponsorsActivityConnection
python
great-expectations__great_expectations
contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_sum_values_to_be_between.py
{ "start": 2663, "end": 6771 }
class ____(MulticolumnMapExpectation): """Expect a sum of values over the columns to be between max and min values. min_value <= SUM(col_a, cob_b, cob_c, ...) <= max_value Args: column_list (list of str): \ A list of 2 or more integer columns min_value (int): \ A value that the sum of values over the column must be equal to or more than the given value max_value (int): \ A value that the sum of values over the column must be equal to or less than the given value """ # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "col_a": [3, 6, 9, 12, 3], "col_b": [0, 3, 6, 33, 9], "col_c": [1, 5, 6, 27, 3], }, "only_for": ["pandas", "spark"], "tests": [ { "title": "multi_column_sum_to_equal_range_2-set_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": { "column_list": ["col_a", "col_b"], "min_value": 1, "max_value": 50, }, "out": { "success": True, }, }, { "title": "multi_column_sum_to_equal_range_3-set_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": { "column_list": ["col_a", "col_b", "col_c"], "min_value": 4, "max_value": 72, }, "out": { "success": True, }, }, { "title": "multi_column_sum_to_equal_range_2-set_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": { "column_list": ["col_a", "col_b"], "min_value": 10, "max_value": 30, }, "out": { "success": False, }, }, { "title": "multi_column_sum_to_equal_range_3-set_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": { "column_list": ["col_a", "col_b", "col_c"], "min_value": 10, "max_value": 20, }, "out": { "success": False, }, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "multicolumn_values.sum_values_to_be_between_max_and_min" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ( "min_value", "max_value", ) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "tags": [ "multi-column sum to be between min and max", "multi-column expectation", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@swittchawa", # Don't forget to add your github handle here! ], } if __name__ == "__main__": ExpectMulticolumnSumValuesToBeBetween().print_diagnostic_checklist()
ExpectMulticolumnSumValuesToBeBetween
python
sympy__sympy
sympy/categories/baseclasses.py
{ "start": 11648, "end": 15481 }
class ____(Basic): r""" An (abstract) category. Explanation =========== A category [JoyOfCats] is a quadruple `\mbox{K} = (O, \hom, id, \circ)` consisting of * a (set-theoretical) class `O`, whose members are called `K`-objects, * for each pair `(A, B)` of `K`-objects, a set `\hom(A, B)` whose members are called `K`-morphisms from `A` to `B`, * for a each `K`-object `A`, a morphism `id:A\rightarrow A`, called the `K`-identity of `A`, * a composition law `\circ` associating with every `K`-morphisms `f:A\rightarrow B` and `g:B\rightarrow C` a `K`-morphism `g\circ f:A\rightarrow C`, called the composite of `f` and `g`. Composition is associative, `K`-identities are identities with respect to composition, and the sets `\hom(A, B)` are pairwise disjoint. This class knows nothing about its objects and morphisms. Concrete cases of (abstract) categories should be implemented as classes derived from this one. Certain instances of :class:`Diagram` can be asserted to be commutative in a :class:`Category` by supplying the argument ``commutative_diagrams`` in the constructor. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram, Category >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> K = Category("K", commutative_diagrams=[d]) >>> K.commutative_diagrams == FiniteSet(d) True See Also ======== Diagram """ def __new__(cls, name, objects=EmptySet, commutative_diagrams=EmptySet): if not name: raise ValueError("A Category cannot have an empty name.") if not isinstance(name, Str): name = Str(name) if not isinstance(objects, Class): objects = Class(objects) new_category = Basic.__new__(cls, name, objects, FiniteSet(*commutative_diagrams)) return new_category @property def name(self): """ Returns the name of this category. Examples ======== >>> from sympy.categories import Category >>> K = Category("K") >>> K.name 'K' """ return self.args[0].name @property def objects(self): """ Returns the class of objects of this category. Examples ======== >>> from sympy.categories import Object, Category >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> K = Category("K", FiniteSet(A, B)) >>> K.objects Class({Object("A"), Object("B")}) """ return self.args[1] @property def commutative_diagrams(self): """ Returns the :class:`~.FiniteSet` of diagrams which are known to be commutative in this category. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram, Category >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> K = Category("K", commutative_diagrams=[d]) >>> K.commutative_diagrams == FiniteSet(d) True """ return self.args[2] def hom(self, A, B): raise NotImplementedError( "hom-sets are not implemented in Category.") def all_morphisms(self): raise NotImplementedError( "Obtaining the class of morphisms is not implemented in Category.")
Category
python
getsentry__sentry
tests/sentry/integrations/discord/message_builder/test_action_row.py
{ "start": 444, "end": 2193 }
class ____(TestCase): def test_empty(self) -> None: action_row = DiscordActionRow([]) result = action_row.build() assert result == { "type": 1, "components": [], } def test_non_empty(self) -> None: custom_button = DiscordButton( style=DiscordButtonStyle.PRIMARY, custom_id="test_button", label="a custom button", disabled=True, ) link_button = DiscordLinkButton( label="a link", url="https://sentry.io", ) custom_component = DiscordMessageComponent( type=9 ) # not a real type number, just testing custom component action_row = DiscordActionRow( [ link_button, custom_button, custom_component, ] ) result = action_row.build() assert result == { "type": 1, "components": [ { "type": 2, "style": 5, "label": "a link", "url": "https://sentry.io", "disabled": False, }, { "type": 2, "style": 1, "custom_id": "test_button", "label": "a custom button", "disabled": True, }, { "type": 9, }, ], } def test_action_row_error(self) -> None: nested_row = DiscordActionRow([]) with pytest.raises(DiscordActionRowError): DiscordActionRow([nested_row])
TestDiscordActionRow