language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
facelessuser__pymdown-extensions
tests/test_extensions/test_highlight.py
{ "start": 14100, "end": 14857 }
class ____(util.MdCase): """Test with line numbers globally disabled with no Pygments.""" extension = ['pymdownx.highlight', 'pymdownx.superfences'] extension_configs = { 'pymdownx.highlight': { 'linenums': False, 'use_pygments': False } } def test_global_disable(self): """Test with line numbers globally disabled and no Pygments.""" self.check_markdown( r''' ```python linenums="1" import test test.test() ``` ''', r''' <p><code>python linenums="1" import test test.test()</code></p> ''', True )
TestDisabledLinenumsNoPygments
python
pytorch__pytorch
torch/_inductor/codegen/cpp_utils.py
{ "start": 10930, "end": 28276 }
class ____: """ This class creates a context that helps to generate code involving Inductor IR with function local buffers. These buffers are constructed during the codegen process and are used to store intermediate results such as local accumulators. We do not want to add them to `V.graph` since they are not global and we do not want to add them as function arguments either. So we patch the codegen processes under this scope to support these buffers without exposure to the outside world. """ def __init__(self, kernel_args: KernelArgs) -> None: self.kernel_args = kernel_args self.exit_stack = contextlib.ExitStack() # map local buffer name to local buffer self.local_buffers: dict[str, ir.Buffer] = {} # map global buffer name to global buffer self.global_buffers: dict[str, ir.Buffer] = {} # map global buffer name to local buffer self.global_to_local: dict[str, ir.Buffer] = {} # record the global buffers that are removed by this LocalBufferContext self.removed_buffers: OrderedSet[str] = OrderedSet() def __enter__(self): self.exit_stack.__enter__() original_get_dtype = V.graph.get_dtype def get_dtype(name): if name in self.local_buffers: return self.local_buffers[name].get_dtype() return original_get_dtype(name) self.exit_stack.enter_context(patch.object(V.graph, "get_dtype", get_dtype)) original_input = self.kernel_args.input def input(name): if name in self.local_buffers: return name return original_input(name) self.exit_stack.enter_context(patch.object(self.kernel_args, "input", input)) original_output = self.kernel_args.output def output(name): if name in self.local_buffers: return name return original_output(name) self.exit_stack.enter_context(patch.object(self.kernel_args, "output", output)) # Set current LocalBufferContext into V self.exit_stack.enter_context(V.set_local_buffer_context(self)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.local_buffers.clear() self.exit_stack.__exit__(exc_type, exc_val, exc_tb) def add_local_buffer( self, local_buffer: ir.Buffer, global_buffers: Optional[list[ir.Buffer]] = None ): assert local_buffer.get_name() not in self.local_buffers self.local_buffers[local_buffer.get_name()] = local_buffer if global_buffers: for global_buffer in global_buffers: global_buffer_name = global_buffer.get_name() assert ( global_buffer_name not in self.global_buffers and global_buffer_name not in self.global_to_local ) self.global_buffers[global_buffer_name] = global_buffer self.global_to_local[global_buffer_name] = local_buffer if global_buffer_name not in V.graph.removed_buffers: # Record the global buffers that are removed by this LocalBufferContext # since which may need to restore. Refer to issue: # https://github.com/pytorch/pytorch/issues/144186 self.removed_buffers.add(global_buffer_name) V.graph.removed_buffers.add(global_buffer_name) def localize_function( self, fn: Callable[..., Any], rewrite_index: Callable[ ["LocalizeBufferHandler", sympy.Expr, str], sympy.Expr ] = rewrite_index_for_function, ): def inner(*args, **kwargs): with V.set_ops_handler( LocalizeBufferHandler( V.get_ops_handler(), global_to_local=self.global_to_local, rewrite_index=rewrite_index, ) ): return fn(*args, **kwargs) return inner def localize_nodes( self, nodes: list[ir.IRNode], rewrite_index: Callable[ ["LocalizeBufferHandler", sympy.Expr, str], sympy.Expr ] = rewrite_index_for_nodes, ) -> list[ir.IRNode]: """ Given `local_buf` and `global_buf` registered in current `LocalBufferContext` though the method of `add_local_buffer`, localizes the `global_buf` to `local_buf` for the given `nodes` and returns a new list of IR nodes that work on `local_buf` instead of `global_buf`, i.e., all the loads and stores are redirected to `local_buf`. This helps the fused loops to work on smaller-sized local buffers for better data locality. The data access of `local_buf` is assumed to be contiguous with the same order as the `global_buf`. """ assert len(nodes) > 0 def wrap_inner_fn_for_node(node: ir.IRNode): loops = node.data if isinstance(node, ir.ComputedBuffer) else node assert isinstance(loops, ir.Loops) new_inner_fn = self.localize_function( loops.inner_fn, rewrite_index, ) new_loops = dataclasses.replace(loops, inner_fn=new_inner_fn) if isinstance(node, ir.ComputedBuffer): new_node = ir.ComputedBuffer( name=node.get_name(), layout=node.get_layout(), data=new_loops ) else: new_node = new_loops # type: ignore[assignment] return new_node return [wrap_inner_fn_for_node(node) for node in nodes] def unify_mask_base_type( buffer: IndentedBuffer, vars: tuple[CSEVariable, ...], dtype=torch.float, ): """ Given list of cse variables, Cast each to new mask base dtype and return casted cse variable. """ new_vars = ( V.kernel.cse.generate( buffer, f"{V.kernel._get_mask_cast(var, dtype)}", ) for var in vars ) return new_vars def may_unify_binary_op_mask_type(a, b): """ Given two cse variables, when dtype is bool, unify them to the same mask dtype and return casted cse variable. """ if a.dtype == torch.bool: assert b.dtype == torch.bool mask_dtype = torch.int32 return unify_mask_base_type(V.kernel.compute, (a, b), mask_dtype) return a, b def codegen_rand(offset, code, rand_function, dst_dtype=torch.float32): assert is_integer_dtype(offset.dtype) code.writeline("[&]()") with code.indent(): code.writeline( f"{DTYPE_TO_CPP[offset.dtype]} offset[{V.kernel.tiling_factor}];" ) code.writeline(f"{DTYPE_TO_CPP[dst_dtype]} result[{V.kernel.tiling_factor}];") code.writeline(f"{offset}.store(offset);") code.writeline( f"for( {DTYPE_TO_CPP[offset.dtype]} offset_idx = 0; offset_idx < {V.kernel.tiling_factor}; offset_idx++ )" ) with code.indent(): code.writeline(rand_function) num_vectors = V.kernel._get_num_vectors(dtype=dst_dtype) if num_vectors == 1: code.writeline( f"return at::vec::Vectorized<{DTYPE_TO_CPP[dst_dtype]}>::loadu(result);" ) else: code.writeline( f"return at::vec::VectorizedN<{DTYPE_TO_CPP[dst_dtype]}, {num_vectors}>::loadu(result);" ) code.writeline("()") return code def get_gemm_template_output_and_compute_dtype(input_dtype): if input_dtype in [torch.uint8, torch.int8]: return (torch.int32, torch.int32) else: return (torch.float32, torch.float32) def create_epilogue_with_attr(input_buffer, attr, **kwargs): input_loader = input_buffer.make_loader() dtype = input_buffer.get_dtype() if attr == "relu": def inner_fn(index): input = input_loader(index) zero = ops.constant(0, dtype) return ops.maximum(input, zero) elif attr == "gelu": assert "algorithm" in kwargs if kwargs["algorithm"] == "none": def inner_fn(index): input = input_loader(index) if dtype != torch.float: input = ops.to_dtype(input, torch.float) half = ops.constant(0.5, torch.float) one = ops.constant(1.0, torch.float) const = ops.constant(0.7071067811865476, torch.float) result = input * half * (ops.erf(input * const) + one) if dtype != torch.float: result = ops.to_dtype(result, dtype) return result else: assert kwargs["algorithm"] == "tanh" def inner_fn(index): input = input_loader(index) if dtype != torch.float: input = ops.to_dtype(input, torch.float) half = ops.constant(0.5, torch.float) one = ops.constant(1.0, torch.float) const1 = ops.constant(0.7978845608028654, torch.float) const2 = ops.constant(0.044715, torch.float) result = ( half * input * ( one + ops.tanh(const1 * (input + const2 * input * input * input)) ) ) if dtype != torch.float: result = ops.to_dtype(result, dtype) return result elif attr == "swish": def inner_fn(index): input = input_loader(index) result = input * ops.sigmoid(input) return result elif attr == "sigmoid": def inner_fn(index): return ops.sigmoid(input_loader(index)) elif attr == "tanh": def inner_fn(index): return ops.tanh(input_loader(index)) elif attr == "hardswish" or attr == "hardsigmoid": def hardsigmoid_float(input): zero = ops.constant(0, torch.float) six = ops.constant(6, torch.float) three = ops.constant(3, torch.float) one_over_six = ops.constant(0.16666666666666666, torch.float) max = ops.maximum(input + three, zero) min = ops.minimum(max, six) return min * one_over_six def inner_fn(index): input = input_loader(index) if dtype != torch.float: input = ops.to_dtype(input, torch.float) result = hardsigmoid_float(input) if attr == "hardswish": result = input * result if dtype != torch.float: result = ops.to_dtype(result, dtype) return result elif attr == "leaky_relu": assert "scalars" in kwargs assert len(kwargs["scalars"]) == 1 negative_slope = kwargs["scalars"][0] def inner_fn(index): input = input_loader(index) if dtype != torch.float: input = ops.to_dtype(input, torch.float) zero = ops.constant(0, torch.float) result = ops.where( input > zero, input, input * ops.constant(negative_slope, torch.float) ) if dtype != torch.float: result = ops.to_dtype(result, dtype) return result elif attr == "hardtanh": assert "scalars" in kwargs assert len(kwargs["scalars"]) == 2 min_value = kwargs["scalars"][0] max_value = kwargs["scalars"][1] def inner_fn(index): input = input_loader(index) if dtype != torch.float: input = ops.to_dtype(input, torch.float) result = ops.minimum( ops.maximum(input, ops.constant(min_value, torch.float)), ops.constant(max_value, torch.float), ) if dtype != torch.float: result = ops.to_dtype(result, dtype) return result elif attr in ["add", "sub", "mul"]: assert "other" in kwargs other = kwargs["other"] num_input_dims = len(input_buffer.get_size()) num_other_dims = len(other.get_size()) dims_diff = num_input_dims - num_other_dims other_loader = other.make_loader() def inner_fn(index): op = getattr(ops, attr) if dims_diff != 0: return op(input_loader(index), other_loader(index[dims_diff:])) else: return op(input_loader(index), other_loader(index)) elif attr == "bias_add": assert "other" in kwargs assert "beta" in kwargs assert "dtype" in kwargs beta = kwargs["beta"] other = kwargs["other"] dtype = kwargs["dtype"] bias_loader = other.make_loader() def inner_fn(index): bias = bias_loader(index) input = input_loader(index) if beta != 1: result = ops.constant(beta, torch.float) * bias + input else: result = bias + input return result else: raise ValueError(f"Unsupported epilogue attribute: {attr}") return ir.Pointwise( device=input_buffer.get_device(), dtype=dtype, inner_fn=inner_fn, ranges=input_buffer.get_size(), ) def _get_loop_body(fn_list): if all(isinstance(fn, LoopBody) for fn in fn_list): loop_bodies = fn_list else: if hasattr(fn_list[0], "original_fn"): # For the case of local buffer, we wrap the fn with localize_function assert all(hasattr(fn, "original_fn") for fn in fn_list) assert all( isinstance(fn.original_fn.args[0]._body, LoopBody) for fn in fn_list ) loop_bodies = [fn.original_fn.args[0]._body for fn in fn_list] else: assert all(isinstance(fn, functools.partial) for fn in fn_list) assert all(isinstance(fn.args[0]._body, LoopBody) for fn in fn_list) loop_bodies = [fn.args[0]._body for fn in fn_list] assert loop_bodies is not None return loop_bodies def _get_dtype_from_loopbodies(loop_bodies): dtypes = OrderedSet[torch.dtype]() for loop_body in loop_bodies: graphs = [loop_body.root_block.graph] + [ body.graph for body in list(loop_body.subblocks.values()) ] for graph in graphs: for node in graph.nodes: if node.op != "call_method": continue dtypes.add(node.meta[OptimizationContext.key].dtype) return dtypes def template_fusion_with_epilogues_supported( template: BaseSchedulerNode, epilogues: list[BaseSchedulerNode] ) -> tuple[bool, bool]: def _get_indexes_of_template_buf_read( epilogue_node: ir.Operation, template_buf_names: list[str] ) -> list[sympy.Expr]: return [ read.index for read in epilogue_node.get_reads() if read.name in template_buf_names ] def _check_supported_and_same_indexes( index_of_template_buf_read: Sequence[sympy.Expr], epilogue_writes: OrderedSet[Dep], ) -> tuple[bool, bool]: num_indexes = len(OrderedSet(index_of_template_buf_read)) if num_indexes > 1: same_index = False supported = False # Different read indexes not supported elif num_indexes == 0: same_index = True supported = True # No reads, automatically supported elif num_indexes == 1: iotbr = index_of_template_buf_read[0] same_index = all(write.index == iotbr for write in epilogue_writes) # TODO: Add support of fusion when the read of template buffer and the write of epilogue output # in the epilogue node don't have the same index and change supported to True supported = same_index else: raise AssertionError("Should not reach here") return supported, same_index def _template_fusion_supported( template_outputs: Sequence[SchedulerBuffer], epilogue_nodes: list[ir.Operation] ) -> tuple[bool, bool]: template_buf_names = [x.get_name() for x in template_outputs] indexes_of_template_buf_reads = [ _get_indexes_of_template_buf_read(epilogue_node, template_buf_names) for epilogue_node in epilogue_nodes ] epilogue_nodes_writes = [ epilogue_node.get_read_writes().writes for epilogue_node in epilogue_nodes ] results = [ _check_supported_and_same_indexes(reads, writes) for reads, writes in zip( indexes_of_template_buf_reads, epilogue_nodes_writes ) ] supported, same_indexes = zip(*results) return all(supported), all(same_indexes) assert template.is_template() template_outputs = template.get_outputs() epilogue_nodes = [ n.node for epilogue in epilogues for n in epilogue.get_nodes() if n.node is not None ] return _template_fusion_supported(template_outputs, epilogue_nodes)
LocalBufferContext
python
pytorch__pytorch
torch/distributions/transforms.py
{ "start": 6657, "end": 8515 }
class ____(Transform): """ Inverts a single :class:`Transform`. This class is private; please instead use the ``Transform.inv`` property. """ def __init__(self, transform: Transform) -> None: super().__init__(cache_size=transform._cache_size) self._inv: Transform = transform # type: ignore[assignment] @constraints.dependent_property(is_discrete=False) # pyrefly: ignore [bad-override] def domain(self): assert self._inv is not None return self._inv.codomain @constraints.dependent_property(is_discrete=False) # pyrefly: ignore [bad-override] def codomain(self): assert self._inv is not None return self._inv.domain @property def bijective(self) -> bool: # type: ignore[override] assert self._inv is not None return self._inv.bijective @property def sign(self) -> int: assert self._inv is not None return self._inv.sign @property def inv(self) -> Transform: return self._inv def with_cache(self, cache_size=1): assert self._inv is not None return self.inv.with_cache(cache_size).inv def __eq__(self, other): if not isinstance(other, _InverseTransform): return False assert self._inv is not None return self._inv == other._inv def __repr__(self): return f"{self.__class__.__name__}({repr(self._inv)})" def __call__(self, x): assert self._inv is not None return self._inv._inv_call(x) def log_abs_det_jacobian(self, x, y): assert self._inv is not None return -self._inv.log_abs_det_jacobian(y, x) def forward_shape(self, shape): return self._inv.inverse_shape(shape) def inverse_shape(self, shape): return self._inv.forward_shape(shape)
_InverseTransform
python
walkccc__LeetCode
solutions/3551. Minimum Swaps to Sort by Digit Sum/3551.py
{ "start": 0, "end": 598 }
class ____: def minSwaps(self, nums: list[int]) -> int: ans = 0 seen = set() sortedNums = sorted(nums, key=lambda x: (self._getDigitSum(x), x)) numToIndex = {num: i for i, num in enumerate(sortedNums)} for i, num in enumerate(nums): if i in seen or numToIndex[num] == i: continue cycleSize = 0 j = i while j not in seen: seen.add(j) j = numToIndex[nums[j]] cycleSize += 1 ans += max(cycleSize - 1, 0) return ans def _getDigitSum(self, num: int) -> int: return sum(int(digit) for digit in str(num))
Solution
python
huggingface__transformers
src/transformers/models/sam2_video/modeling_sam2_video.py
{ "start": 43859, "end": 44536 }
class ____(nn.Module): def __init__(self, config: Sam2VideoConfig, in_channels: int, out_channels: int): super().__init__() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=config.mask_downsampler_kernel_size, stride=config.mask_downsampler_stride, padding=config.mask_downsampler_padding, ) self.layer_norm = Sam2VideoLayerNorm(out_channels, eps=1e-6, data_format="channels_first") self.activation = ACT2FN[config.mask_downsampler_hidden_act] def forward(self, x): return self.activation(self.layer_norm(self.conv(x)))
Sam2VideoMaskDownSamplerLayer
python
pytorch__pytorch
torch/_inductor/pattern_matcher.py
{ "start": 28007, "end": 28159 }
class ____(_TargetArgsExpr): """ Matches a call_module node in the FX graphs: `module(*args, **kwargs)` """ op = "call_module"
CallModule
python
huggingface__transformers
examples/modular-transformers/configuration_new_model.py
{ "start": 678, "end": 7923 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`NewModelModel`]. It is used to instantiate an NewModel model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the NewModel-7B. e.g. [google/new_model-7b](https://huggingface.co/google/new_model-7b) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the NewModel model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`NewModelModel`] hidden_size (`int`, *optional*, defaults to 3072): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 24576): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 28): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 16): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 256): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The legacy activation function. It is overwritten by the `hidden_activation`. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. layer_types (`list`, *optional*): Attention pattern for each layer. use_bidirectional_attention (`bool`, *optional*): If True, the model will attend to all text tokens instead of using a causal mask. ```python >>> from transformers import NewModelModel, NewModelConfig >>> # Initializing a NewModel new_model-7b style configuration >>> configuration = NewModelConfig() >>> # Initializing a model from the new_model-7b style configuration >>> model = NewModelModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "new_model" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size=256030, hidden_size=64, intermediate_size=90, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act="gelu_pytorch_tanh", hidden_activation=None, max_position_embeddings=1500, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, use_bidirectional_attention=False, layer_types=None, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.use_bidirectional_attention = use_bidirectional_attention self.layer_types = layer_types if self.layer_types is None: self.layer_types = ["full_attention" for _ in range(self.num_hidden_layers)] layer_type_validation(self.layer_types, self.num_hidden_layers) @property def num_heads(self): return self.num_attention_heads
NewModelConfig
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeVarDefault3.py
{ "start": 398, "end": 491 }
class ____(Generic[T2, T1]): ... # This should generate an error because T1 is after T2.
ClassA
python
huggingface__transformers
src/transformers/models/data2vec/modeling_data2vec_text.py
{ "start": 47580, "end": 50586 }
class ____(Data2VecTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, QuestionAnsweringModelOutput]: outputs = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", "Data2VecTextForQuestionAnswering", "Data2VecTextForSequenceClassification", "Data2VecTextForTokenClassification", "Data2VecTextModel", "Data2VecTextPreTrainedModel", ]
Data2VecTextForQuestionAnswering
python
tensorflow__tensorflow
tensorflow/python/debug/lib/debug_grappler_test.py
{ "start": 1974, "end": 5080 }
class ____(test_util.TensorFlowTestCase): def setUp(self): super(SessionDebugGrapplerInteractionTest, self).setUp() self._dump_root = tempfile.mkdtemp() self._debug_url = "file://%s" % self._dump_root def tearDown(self): ops.reset_default_graph() if os.path.isdir(self._dump_root): file_io.delete_recursively(self._dump_root) super(SessionDebugGrapplerInteractionTest, self).tearDown() def testArithmeticOptimizationActive(self): """Tests that tfdbg can dump the tensor from nodes created by Grappler.""" with session.Session(config=_grappler_enabled_session_config()) as sess: u = variable_v1.VariableV1([[1, 2], [3, 4]], name="u", dtype=dtypes.float32) # The next two ops should be optimized by Grappler into a single op: # either an AddN op or a Mul op. x = math_ops.add(u, u) x = math_ops.add(x, u) y = math_ops.multiply(x, u) sess.run(variables.global_variables_initializer()) run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=[self._debug_url]) run_metadata = config_pb2.RunMetadata() run_result = sess.run(y, options=run_options, run_metadata=run_metadata) self.assertAllClose(run_result, [[3, 12], [27, 48]]) dump_data = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs, validate=True) original_node_names = set(op.name for op in sess.graph.get_operations()) dumped_node_names = set(dump_data.nodes()) grappler_created_node_names = dumped_node_names - original_node_names grappler_removed_node_names = original_node_names - dumped_node_names # Assert that Grappler should have replaced some of the nodes from the # original graph with new nodes. self.assertTrue(grappler_created_node_names) self.assertTrue(grappler_removed_node_names) # Iterate through the nodes created by Grappler. One of them should be # be the result of replacing the original add ops with an AddN op or a # Mul op. found_optimized_node = False for grappler_node_name in grappler_created_node_names: node_op_type = dump_data.node_op_type(grappler_node_name) # Look for the node created by Grappler's arithmetic optimization. if ((test_util.IsMklEnabled() and node_op_type in ("_MklAddN", "Mul")) or (node_op_type in ("AddN", "Mul"))): datum = dump_data.get_tensors(grappler_node_name, 0, "DebugIdentity") self.assertEqual(1, len(datum)) self.assertAllClose(datum[0], [[3, 6], [9, 12]]) found_optimized_node = True break self.assertTrue( found_optimized_node, "Failed to find optimized node created by Grappler's arithmetic " "optimization.") if __name__ == "__main__": googletest.main()
SessionDebugGrapplerInteractionTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 920096, "end": 920688 }
class ____(sgqlc.types.Type): """Autogenerated return type of RegenerateEnterpriseIdentityProviderRecoveryCodes """ __schema__ = github_schema __field_names__ = ("client_mutation_id", "identity_provider") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" identity_provider = sgqlc.types.Field("EnterpriseIdentityProvider", graphql_name="identityProvider") """The identity provider for the enterprise."""
RegenerateEnterpriseIdentityProviderRecoveryCodesPayload
python
weaviate__weaviate-python-client
weaviate/rbac/models.py
{ "start": 10333, "end": 10395 }
class ____(_ClusterPermission): pass
ClusterPermissionOutput
python
django__django
django/core/management/commands/testserver.py
{ "start": 135, "end": 2221 }
class ____(BaseCommand): help = "Runs a development server with data from the given fixture(s)." requires_system_checks = [] def add_arguments(self, parser): parser.add_argument( "args", metavar="fixture", nargs="*", help="Path(s) to fixtures to load before running the server.", ) parser.add_argument( "--noinput", "--no-input", action="store_false", dest="interactive", help="Tells Django to NOT prompt the user for input of any kind.", ) parser.add_argument( "--addrport", default="", help="Port number or ipaddr:port to run the server on.", ) parser.add_argument( "--ipv6", "-6", action="store_true", dest="use_ipv6", help="Tells Django to use an IPv6 address.", ) def handle(self, *fixture_labels, **options): verbosity = options["verbosity"] interactive = options["interactive"] # Create a test database. db_name = connection.creation.create_test_db( verbosity=verbosity, autoclobber=not interactive ) # Import the fixture data into the test database. call_command("loaddata", *fixture_labels, verbosity=verbosity) # Run the development server. Turn off auto-reloading because it causes # a strange error -- it causes this handle() method to be called # multiple times. shutdown_message = ( "\nServer stopped.\nNote that the test database, %r, has not been " "deleted. You can explore it on your own." % db_name ) use_threading = connection.features.test_db_allows_multiple_connections call_command( "runserver", addrport=options["addrport"], shutdown_message=shutdown_message, use_reloader=False, use_ipv6=options["use_ipv6"], use_threading=use_threading, )
Command
python
django__django
tests/composite_pk/models/tenant.py
{ "start": 418, "end": 705 }
class ____(models.Model): pk = models.CompositePrimaryKey("tenant_id", "id") tenant = models.ForeignKey(Tenant, on_delete=models.CASCADE) email = models.EmailField(unique=True) id = models.SmallIntegerField(unique=True) class Meta: abstract = True
AbstractUser
python
dagster-io__dagster
python_modules/dagster/dagster/_core/storage/file_manager.py
{ "start": 1965, "end": 8453 }
class ____(ABC): """Base class for all file managers in dagster. The file manager is an interface that can be implemented by resources to provide abstract access to a file system such as local disk, S3, or other cloud storage. For examples of usage, see the documentation of the concrete file manager implementations. """ @public @abstractmethod def copy_handle_to_local_temp(self, file_handle: FileHandle) -> str: """Copy a file represented by a file handle to a temp file. In an implementation built around an object store such as S3, this method would be expected to download the file from S3 to local filesystem in a location assigned by the standard library's :py:mod:`python:tempfile` module. Temp files returned by this method are *not* guaranteed to be reusable across solid boundaries. For files that must be available across solid boundaries, use the :py:meth:`~dagster._core.storage.file_manager.FileManager.read`, :py:meth:`~dagster._core.storage.file_manager.FileManager.read_data`, :py:meth:`~dagster._core.storage.file_manager.FileManager.write`, and :py:meth:`~dagster._core.storage.file_manager.FileManager.write_data` methods. Args: file_handle (FileHandle): The handle to the file to make available as a local temp file. Returns: str: Path to the local temp file. """ raise NotImplementedError() @public @abstractmethod def delete_local_temp(self) -> None: """Delete all local temporary files created by previous calls to :py:meth:`~dagster._core.storage.file_manager.FileManager.copy_handle_to_local_temp`. Should typically only be called by framework implementors. """ raise NotImplementedError() @public @abstractmethod def read(self, file_handle: FileHandle, mode: str = "rb") -> ContextManager[IOStream]: """Return a file-like stream for the file handle. This may incur an expensive network call for file managers backed by object stores such as S3. Args: file_handle (FileHandle): The file handle to make available as a stream. mode (str): The mode in which to open the file. Default: ``"rb"``. Returns: Union[TextIO, BinaryIO]: A file-like stream. """ raise NotImplementedError() @public @abstractmethod def read_data(self, file_handle: FileHandle) -> bytes: """Return the bytes for a given file handle. This may incur an expensive network call for file managers backed by object stores such as s3. Args: file_handle (FileHandle): The file handle for which to return bytes. Returns: bytes: Bytes for a given file handle. """ raise NotImplementedError() @public @abstractmethod def write(self, file_obj: IOStream, mode: str = "wb", ext: Optional[str] = None) -> FileHandle: """Write the bytes contained within the given file object into the file manager. Args: file_obj (Union[TextIO, StringIO]): A file-like object. mode (Optional[str]): The mode in which to write the file into the file manager. Default: ``"wb"``. ext (Optional[str]): For file managers that support file extensions, the extension with which to write the file. Default: ``None``. Returns: FileHandle: A handle to the newly created file. """ raise NotImplementedError() @public @abstractmethod def write_data(self, data: bytes, ext: Optional[str] = None) -> FileHandle: """Write raw bytes into the file manager. Args: data (bytes): The bytes to write into the file manager. ext (Optional[str]): For file managers that support file extensions, the extension with which to write the file. Default: ``None``. Returns: FileHandle: A handle to the newly created file. """ raise NotImplementedError() @dagster_maintained_resource @resource(config_schema={"base_dir": Field(StringSource, is_required=False)}) def local_file_manager(init_context: InitResourceContext) -> "LocalFileManager": """FileManager that provides abstract access to a local filesystem. By default, files will be stored in `<local_artifact_storage>/storage/file_manager` where `<local_artifact_storage>` can be configured the ``dagster.yaml`` file in ``$DAGSTER_HOME``. Implements the :py:class:`~dagster._core.storage.file_manager.FileManager` API. Examples: .. code-block:: python import tempfile from dagster import job, local_file_manager, op @op(required_resource_keys={"file_manager"}) def write_files(context): fh_1 = context.resources.file_manager.write_data(b"foo") with tempfile.NamedTemporaryFile("w+") as fd: fd.write("bar") fd.seek(0) fh_2 = context.resources.file_manager.write(fd, mode="w", ext=".txt") return (fh_1, fh_2) @op(required_resource_keys={"file_manager"}) def read_files(context, file_handles): fh_1, fh_2 = file_handles assert context.resources.file_manager.read_data(fh_2) == b"bar" fd = context.resources.file_manager.read(fh_2, mode="r") assert fd.read() == "foo" fd.close() @job(resource_defs={"file_manager": local_file_manager}) def files_pipeline(): read_files(write_files()) Or to specify the file directory: .. code-block:: python @job( resource_defs={ "file_manager": local_file_manager.configured({"base_dir": "/my/base/dir"}) } ) def files_pipeline(): read_files(write_files()) """ return LocalFileManager( base_dir=init_context.resource_config.get( "base_dir", os.path.join(init_context.instance.storage_directory(), "file_manager"), # type: ignore # (possible none) ) ) def check_file_like_obj(obj: object) -> None: check.invariant(obj and hasattr(obj, "read") and hasattr(obj, "write"))
FileManager
python
Lightning-AI__lightning
tests/tests_pytorch/strategies/test_ddp_integration.py
{ "start": 9412, "end": 10037 }
class ____(BoringModel): def on_train_start(self) -> None: # make sure that the model is on CPU when training assert self.device == torch.device("cpu") @RunIf(skip_windows=True) def test_ddp_cpu(): """Tests if device is set correctly when training for DDPStrategy.""" trainer = Trainer(devices=2, strategy="ddp_spawn", accelerator="cpu", fast_dev_run=True) # assert strategy attributes for device setting assert isinstance(trainer.strategy, DDPStrategy) assert trainer.strategy.root_device == torch.device("cpu") model = BoringModelDDPCPU() trainer.fit(model)
BoringModelDDPCPU
python
apache__airflow
airflow-core/src/airflow/models/taskinstance.py
{ "start": 13799, "end": 95649 }
class ____(Base, LoggingMixin): """ Task instances store the state of a task instance. This table is the authority and single source of truth around what tasks have run and the state they are in. The SqlAlchemy model doesn't have a SqlAlchemy foreign key to the task or dag model deliberately to have more control over transactions. Database transactions on this table should insure double triggers and any confusion around what task instances are or aren't ready to run even while multiple schedulers may be firing task instances. A value of -1 in map_index represents any of: a TI without mapped tasks; a TI with mapped tasks that has yet to be expanded (state=pending); a TI with mapped tasks that expanded to an empty list (state=skipped). """ __tablename__ = "task_instance" id: Mapped[str] = mapped_column( String(36).with_variant(postgresql.UUID(as_uuid=False), "postgresql"), primary_key=True, default=uuid7, nullable=False, ) task_id: Mapped[str] = mapped_column(StringID(), nullable=False) dag_id: Mapped[str] = mapped_column(StringID(), nullable=False) run_id: Mapped[str] = mapped_column(StringID(), nullable=False) map_index: Mapped[int] = mapped_column(Integer, nullable=False, server_default=text("-1")) start_date: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) end_date: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) duration: Mapped[float | None] = mapped_column(Float, nullable=True) state: Mapped[str | None] = mapped_column(String(20), nullable=True) try_number: Mapped[int] = mapped_column(Integer, default=0) max_tries: Mapped[int] = mapped_column(Integer, server_default=text("-1")) hostname: Mapped[str] = mapped_column(String(1000)) unixname: Mapped[str] = mapped_column(String(1000)) pool: Mapped[str] = mapped_column(String(256), nullable=False) pool_slots: Mapped[int] = mapped_column(Integer, default=1, nullable=False) queue: Mapped[str] = mapped_column(String(256)) priority_weight: Mapped[int] = mapped_column(Integer) operator: Mapped[str | None] = mapped_column(String(1000), nullable=True) custom_operator_name: Mapped[str] = mapped_column(String(1000)) queued_dttm: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) scheduled_dttm: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) queued_by_job_id: Mapped[int | None] = mapped_column(Integer, nullable=True) last_heartbeat_at: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) pid: Mapped[int | None] = mapped_column(Integer, nullable=True) executor: Mapped[str | None] = mapped_column(String(1000), nullable=True) executor_config: Mapped[dict] = mapped_column(ExecutorConfigType(pickler=dill)) updated_at: Mapped[datetime | None] = mapped_column( UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow, nullable=True ) _rendered_map_index: Mapped[str | None] = mapped_column("rendered_map_index", String(250), nullable=True) context_carrier: Mapped[dict | None] = mapped_column(MutableDict.as_mutable(ExtendedJSON), nullable=True) span_status: Mapped[str] = mapped_column( String(250), server_default=SpanStatus.NOT_STARTED, nullable=False ) external_executor_id: Mapped[str | None] = mapped_column(StringID(), nullable=True) # The trigger to resume on if we are in state DEFERRED trigger_id: Mapped[int | None] = mapped_column(Integer, nullable=True) # Optional timeout utcdatetime for the trigger (past this, we'll fail) trigger_timeout: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) # The method to call next, and any extra arguments to pass to it. # Usually used when resuming from DEFERRED. next_method: Mapped[str | None] = mapped_column(String(1000), nullable=True) next_kwargs: Mapped[dict | str | None] = mapped_column( MutableDict.as_mutable(ExtendedJSON), nullable=True ) _task_display_property_value: Mapped[str | None] = mapped_column( "task_display_name", String(2000), nullable=True ) dag_version_id: Mapped[str | uuid.UUID | None] = mapped_column( UUIDType(binary=False), ForeignKey("dag_version.id", ondelete="RESTRICT"), nullable=True, ) dag_version = relationship("DagVersion", back_populates="task_instances") __table_args__ = ( Index("ti_dag_state", dag_id, state), Index("ti_dag_run", dag_id, run_id), Index("ti_state", state), Index("ti_state_lkp", dag_id, task_id, run_id, state), Index("ti_pool", pool, state, priority_weight), Index("ti_trigger_id", trigger_id), Index("ti_heartbeat", last_heartbeat_at), PrimaryKeyConstraint("id", name="task_instance_pkey"), UniqueConstraint("dag_id", "task_id", "run_id", "map_index", name="task_instance_composite_key"), ForeignKeyConstraint( [trigger_id], ["trigger.id"], name="task_instance_trigger_id_fkey", ondelete="CASCADE", ), ForeignKeyConstraint( [dag_id, run_id], ["dag_run.dag_id", "dag_run.run_id"], name="task_instance_dag_run_fkey", ondelete="CASCADE", ), ) dag_model: Mapped[DagModel] = relationship( "DagModel", primaryjoin="TaskInstance.dag_id == DagModel.dag_id", foreign_keys=dag_id, uselist=False, innerjoin=True, viewonly=True, ) trigger = relationship("Trigger", uselist=False, back_populates="task_instance") triggerer_job = association_proxy("trigger", "triggerer_job") dag_run = relationship("DagRun", back_populates="task_instances", lazy="joined", innerjoin=True) rendered_task_instance_fields = relationship("RenderedTaskInstanceFields", lazy="noload", uselist=False) hitl_detail = relationship("HITLDetail", lazy="noload", uselist=False) run_after = association_proxy("dag_run", "run_after") logical_date = association_proxy("dag_run", "logical_date") task_instance_note = relationship( "TaskInstanceNote", back_populates="task_instance", uselist=False, cascade="all, delete, delete-orphan", ) note = association_proxy("task_instance_note", "content", creator=_creator_note) task: Operator | None = None test_mode: bool = False is_trigger_log_context: bool = False run_as_user: str | None = None raw: bool | None = None """Indicate to FileTaskHandler that logging context should be set up for trigger logging. :meta private: """ _logger_name = "airflow.task" def __init__( self, task: Operator, dag_version_id: UUIDType | uuid.UUID, run_id: str | None = None, state: str | None = None, map_index: int = -1, ): super().__init__() self.dag_id = task.dag_id self.task_id = task.task_id self.map_index = map_index self.refresh_from_task(task) if TYPE_CHECKING: assert self.task # init_on_load will config the log self.init_on_load() if run_id is not None: self.run_id = run_id self.try_number = 0 self.max_tries = self.task.retries if not self.id: self.id = uuid7() self.unixname = getuser() if state: self.state = state self.hostname = "" # Is this TaskInstance being currently running within `airflow tasks run --raw`. # Not persisted to the database so only valid for the current process self.raw = False # can be changed when calling 'run' self.test_mode = False self.dag_version_id = dag_version_id def __hash__(self): return hash((self.task_id, self.dag_id, self.run_id, self.map_index)) @property def stats_tags(self) -> dict[str, str]: """Returns task instance tags.""" return prune_dict({"dag_id": self.dag_id, "task_id": self.task_id}) @staticmethod def insert_mapping( run_id: str, task: Operator, map_index: int, dag_version_id: UUIDType ) -> dict[str, Any]: """ Insert mapping. :meta private: """ priority_weight = task.weight_rule.get_weight( TaskInstance(task=task, run_id=run_id, map_index=map_index, dag_version_id=dag_version_id) ) return { "dag_id": task.dag_id, "task_id": task.task_id, "run_id": run_id, "try_number": 0, "hostname": "", "unixname": getuser(), "queue": task.queue, "pool": task.pool, "pool_slots": task.pool_slots, "priority_weight": priority_weight, "run_as_user": task.run_as_user, "max_tries": task.retries, "executor": task.executor, "executor_config": task.executor_config, "operator": task.task_type, "custom_operator_name": getattr(task, "operator_name", None), "map_index": map_index, "_task_display_property_value": task.task_display_name, "dag_version_id": dag_version_id, } @reconstructor def init_on_load(self) -> None: """Initialize the attributes that aren't stored in the DB.""" self.test_mode = False # can be changed when calling 'run' @property def operator_name(self) -> str | None: """@property: use a more friendly display name for the operator, if set.""" return self.custom_operator_name or self.operator @hybrid_property def task_display_name(self) -> str: return self._task_display_property_value or self.task_id @hybrid_property def rendered_map_index(self) -> str | None: if self._rendered_map_index is not None: return self._rendered_map_index if self.map_index >= 0: return str(self.map_index) return None def to_runtime_ti(self, context_from_server) -> RuntimeTaskInstanceProtocol: from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance runtime_ti = RuntimeTaskInstance.model_construct( id=self.id, task_id=self.task_id, dag_id=self.dag_id, run_id=self.run_id, try_numer=self.try_number, map_index=self.map_index, task=self.task, max_tries=self.max_tries, hostname=self.hostname, _ti_context_from_server=context_from_server, start_date=self.start_date, dag_version_id=self.dag_version_id, ) return runtime_ti @property def log_url(self) -> str: """Log URL for TaskInstance.""" run_id = quote(self.run_id) base_url = conf.get("api", "base_url", fallback="http://localhost:8080/") map_index = f"/mapped/{self.map_index}" if self.map_index >= 0 else "" try_number = f"?try_number={self.try_number}" if self.try_number > 0 else "" _log_uri = f"{base_url.rstrip('/')}/dags/{self.dag_id}/runs/{run_id}/tasks/{self.task_id}{map_index}{try_number}" return _log_uri @property def mark_success_url(self) -> str: """URL to mark TI success.""" return self.log_url @provide_session def error(self, session: Session = NEW_SESSION) -> None: """ Force the task instance's state to FAILED in the database. :param session: SQLAlchemy ORM Session """ self.log.error("Recording the task instance as FAILED") self.state = TaskInstanceState.FAILED session.merge(self) session.commit() @classmethod @provide_session def get_task_instance( cls, dag_id: str, run_id: str, task_id: str, map_index: int, lock_for_update: bool = False, session: Session = NEW_SESSION, ) -> TaskInstance | None: query = ( select(TaskInstance) .options(lazyload(TaskInstance.dag_run)) # lazy load dag run to avoid locking it .filter_by( run_id=run_id, task_id=task_id, map_index=map_index, ) ) if lock_for_update: for attempt in run_with_db_retries(logger=cls.logger()): with attempt: return session.execute(query.with_for_update()).scalar_one_or_none() else: return session.execute(query).scalar_one_or_none() return None @provide_session def refresh_from_db( self, session: Session = NEW_SESSION, lock_for_update: bool = False, keep_local_changes: bool = False ) -> None: """ Refresh the task instance from the database based on the primary key. :param session: SQLAlchemy ORM Session :param lock_for_update: if True, indicates that the database should lock the TaskInstance (issuing a FOR UPDATE clause) until the session is committed. :param keep_local_changes: Force all attributes to the values from the database if False (the default), or if True don't overwrite locally set attributes """ query = select( # Select the columns, not the ORM object, to bypass any session/ORM caching layer *TaskInstance.__table__.columns ).filter_by( dag_id=self.dag_id, run_id=self.run_id, task_id=self.task_id, map_index=self.map_index, ) if lock_for_update: query = query.with_for_update() source = session.execute(query).mappings().one_or_none() if source: target_state: Any = inspect(self) if target_state is None: raise RuntimeError(f"Unable to inspect SQLAlchemy state of {type(self)}: {self}") # To deal with `@hybrid_property` we need to get the names from `mapper.columns` for attr_name, col in target_state.mapper.columns.items(): if keep_local_changes and target_state.attrs[attr_name].history.has_changes(): continue set_committed_value(self, attr_name, source[col.name]) # ID may have changed, update SQLAs state and object tracking newkey = session.identity_key(type(self), (self.id,)) # Delete anything under the new key if newkey != target_state.key: old = session.identity_map.get(newkey) if old is not self and old is not None: session.expunge(old) target_state.key = newkey if target_state.attrs.dag_run.loaded_value is not NO_VALUE: dr_key = session.identity_key(type(self.dag_run), (self.dag_run.id,)) if (dr := session.identity_map.get(dr_key)) is not None: set_committed_value(self, "dag_run", dr) else: self.state = None def refresh_from_task(self, task: Operator, pool_override: str | None = None) -> None: """ Copy common attributes from the given task. :param task: The task object to copy from :param pool_override: Use the pool_override instead of task's pool """ self.task = task self.queue = task.queue self.pool = pool_override or task.pool self.pool_slots = task.pool_slots with contextlib.suppress(Exception): # This method is called from the different places, and sometimes the TI is not fully initialized self.priority_weight = self.task.weight_rule.get_weight(self) self.run_as_user = task.run_as_user # Do not set max_tries to task.retries here because max_tries is a cumulative # value that needs to be stored in the db. self.executor = task.executor self.executor_config = task.executor_config self.operator = task.task_type op_name = getattr(task, "operator_name", None) self.custom_operator_name = op_name if isinstance(op_name, str) else "" # Re-apply cluster policy here so that task default do not overload previous data task_instance_mutation_hook(self) @property def key(self) -> TaskInstanceKey: """Returns a tuple that identifies the task instance uniquely.""" return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, self.try_number, self.map_index) @provide_session def set_state(self, state: str | None, session: Session = NEW_SESSION) -> bool: """ Set TaskInstance state. :param state: State to set for the TI :param session: SQLAlchemy ORM Session :return: Was the state changed """ if self.state == state: return False current_time = timezone.utcnow() self.log.debug("Setting task state for %s to %s", self, state) if self not in session: self.refresh_from_db(session) self.state = state self.start_date = self.start_date or current_time if self.state in State.finished or self.state == TaskInstanceState.UP_FOR_RETRY: self.end_date = self.end_date or current_time self.duration = (self.end_date - self.start_date).total_seconds() session.merge(self) session.flush() return True @property def is_premature(self) -> bool: """Returns whether a task is in UP_FOR_RETRY state and its retry interval has elapsed.""" # is the task still in the retry waiting period? return self.state == TaskInstanceState.UP_FOR_RETRY and not self.ready_for_retry() def prepare_db_for_next_try(self, session: Session): """Update the metadata with all the records needed to put this TI in queued for the next try.""" from airflow.models.taskinstancehistory import TaskInstanceHistory TaskInstanceHistory.record_ti(self, session=session) session.execute(delete(TaskReschedule).filter_by(ti_id=self.id)) self.id = uuid7() @provide_session def are_dependents_done(self, session: Session = NEW_SESSION) -> bool: """ Check whether the immediate dependents of this task instance have succeeded or have been skipped. This is meant to be used by wait_for_downstream. This is useful when you do not want to start processing the next schedule of a task until the dependents are done. For instance, if the task DROPs and recreates a table. :param session: SQLAlchemy ORM Session """ task = self.task if TYPE_CHECKING: assert task if not task.downstream_task_ids: return True ti = select(func.count(TaskInstance.task_id)).where( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id.in_(task.downstream_task_ids), TaskInstance.run_id == self.run_id, TaskInstance.state.in_((TaskInstanceState.SKIPPED, TaskInstanceState.SUCCESS)), ) count = session.scalar(ti) return count == len(task.downstream_task_ids) @provide_session def get_previous_dagrun( self, state: DagRunState | None = None, session: Session | None = None, ) -> DagRun | None: """ Return the DagRun that ran before this task instance's DagRun. :param state: If passed, it only take into account instances of a specific state. :param session: SQLAlchemy ORM Session. """ if TYPE_CHECKING: assert self.task assert session is not None dag = self.task.dag if dag is None: return None dr = self.get_dagrun(session=session) dr.dag = dag from airflow.models.dagrun import DagRun # Avoid circular import # We always ignore schedule in dagrun lookup when `state` is given # or the DAG is never scheduled. For legacy reasons, when # `catchup=True`, we use `get_previous_scheduled_dagrun` unless # `ignore_schedule` is `True`. ignore_schedule = state is not None or not dag.timetable.can_be_scheduled if dag.catchup is True and not ignore_schedule: last_dagrun = DagRun.get_previous_scheduled_dagrun(dr.id, session=session) else: last_dagrun = DagRun.get_previous_dagrun(dag_run=dr, session=session, state=state) if last_dagrun: return last_dagrun return None @provide_session def get_previous_ti( self, state: DagRunState | None = None, session: Session = NEW_SESSION, ) -> TaskInstance | None: """ Return the task instance for the task that ran before this task instance. :param session: SQLAlchemy ORM Session :param state: If passed, it only take into account instances of a specific state. """ dagrun = self.get_previous_dagrun(state, session=session) if dagrun is None: return None return dagrun.get_task_instance(self.task_id, session=session) @provide_session def are_dependencies_met( self, dep_context: DepContext | None = None, session: Session = NEW_SESSION, verbose: bool = False ) -> bool: """ Are all conditions met for this task instance to be run given the context for the dependencies. (e.g. a task instance being force run from the UI will ignore some dependencies). :param dep_context: The execution context that determines the dependencies that should be evaluated. :param session: database session :param verbose: whether log details on failed dependencies on info or debug log level """ dep_context = dep_context or DepContext() failed = False verbose_aware_logger = self.log.info if verbose else self.log.debug for dep_status in self.get_failed_dep_statuses(dep_context=dep_context, session=session): failed = True verbose_aware_logger( "Dependencies not met for %s, dependency '%s' FAILED: %s", self, dep_status.dep_name, dep_status.reason, ) if failed: return False verbose_aware_logger("Dependencies all met for dep_context=%s ti=%s", dep_context.description, self) return True @provide_session def get_failed_dep_statuses(self, dep_context: DepContext | None = None, session: Session = NEW_SESSION): """Get failed Dependencies.""" if TYPE_CHECKING: assert self.task is not None dep_context = dep_context or DepContext() for dep in dep_context.deps | self.task.deps: for dep_status in dep.get_dep_statuses(self, session, dep_context): self.log.debug( "%s dependency '%s' PASSED: %s, %s", self, dep_status.dep_name, dep_status.passed, dep_status.reason, ) if not dep_status.passed: yield dep_status def __repr__(self) -> str: prefix = f"<TaskInstance: {self.dag_id}.{self.task_id} {self.run_id} " if self.map_index != -1: prefix += f"map_index={self.map_index} " return prefix + f"[{self.state}]>" def next_retry_datetime(self): """ Get datetime of the next retry if the task instance fails. For exponential backoff, retry_delay is used as base and will be converted to seconds. """ from airflow.sdk.definitions._internal.abstractoperator import MAX_RETRY_DELAY delay = self.task.retry_delay multiplier = self.task.retry_exponential_backoff if self.task.retry_exponential_backoff != 0 else 1.0 if multiplier != 1.0 and multiplier > 0: try: # If the min_backoff calculation is below 1, it will be converted to 0 via int. Thus, # we must round up prior to converting to an int, otherwise a divide by zero error # will occur in the modded_hash calculation. # this probably gives unexpected results if a task instance has previously been cleared, # because try_number can increase without bound min_backoff = math.ceil(delay.total_seconds() * (multiplier ** (self.try_number - 1))) except OverflowError: min_backoff = MAX_RETRY_DELAY self.log.warning( "OverflowError occurred while calculating min_backoff, using MAX_RETRY_DELAY for min_backoff." ) # In the case when delay.total_seconds() is 0, min_backoff will not be rounded up to 1. # To address this, we impose a lower bound of 1 on min_backoff. This effectively makes # the ceiling function unnecessary, but the ceiling function was retained to avoid # introducing a breaking change. if min_backoff < 1: min_backoff = 1 # deterministic per task instance ti_hash = int( hashlib.sha1( f"{self.dag_id}#{self.task_id}#{self.logical_date}#{self.try_number}".encode(), usedforsecurity=False, ).hexdigest(), 16, ) # between 1 and 1.0 * delay * (multiplier^retry_number) modded_hash = min_backoff + ti_hash % min_backoff # timedelta has a maximum representable value. The exponentiation # here means this value can be exceeded after a certain number # of tries (around 50 if the initial delay is 1s, even fewer if # the delay is larger). Cap the value here before creating a # timedelta object so the operation doesn't fail with "OverflowError". delay_backoff_in_seconds = min(modded_hash, MAX_RETRY_DELAY) delay = timedelta(seconds=delay_backoff_in_seconds) if self.task.max_retry_delay: delay = min(self.task.max_retry_delay, delay) return self.end_date + delay def ready_for_retry(self) -> bool: """Check on whether the task instance is in the right state and timeframe to be retried.""" return self.state == TaskInstanceState.UP_FOR_RETRY and self.next_retry_datetime() < timezone.utcnow() @staticmethod def _get_dagrun(dag_id, run_id, session) -> DagRun: from airflow.models.dagrun import DagRun # Avoid circular import dr = session.execute( select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == run_id) ).scalar_one() return dr @provide_session def get_dagrun(self, session: Session = NEW_SESSION) -> DagRun: """ Return the DagRun for this TaskInstance. :param session: SQLAlchemy ORM Session :return: DagRun """ info: Any = inspect(self) if info.attrs.dag_run.loaded_value is not NO_VALUE: if getattr(self, "task", None) is not None: if TYPE_CHECKING: assert self.task self.dag_run.dag = self.task.dag return self.dag_run dr = self._get_dagrun(self.dag_id, self.run_id, session) if getattr(self, "task", None) is not None: if TYPE_CHECKING: assert self.task dr.dag = self.task.dag # Record it in the instance for next time. This means that `self.logical_date` will work correctly set_committed_value(self, "dag_run", dr) return dr @classmethod @provide_session def _check_and_change_state_before_execution( cls, task_instance: TaskInstance, verbose: bool = True, ignore_all_deps: bool = False, ignore_depends_on_past: bool = False, wait_for_past_depends_before_skipping: bool = False, ignore_task_deps: bool = False, ignore_ti_state: bool = False, mark_success: bool = False, test_mode: bool = False, hostname: str = "", pool: str | None = None, external_executor_id: str | None = None, session: Session = NEW_SESSION, ) -> bool: """ Check dependencies and then sets state to RUNNING if they are met. Returns True if and only if state is set to RUNNING, which implies that task should be executed, in preparation for _run_raw_task. :param verbose: whether to turn on more verbose logging :param ignore_all_deps: Ignore all of the non-critical dependencies, just runs :param ignore_depends_on_past: Ignore depends_on_past DAG attribute :param wait_for_past_depends_before_skipping: Wait for past depends before mark the ti as skipped :param ignore_task_deps: Don't check the dependencies of this TaskInstance's task :param ignore_ti_state: Disregards previous task instance state :param mark_success: Don't run the task, mark its state as success :param test_mode: Doesn't record success or failure in the DB :param hostname: The hostname of the worker running the task instance. :param pool: specifies the pool to use to run the task instance :param external_executor_id: The identifier of the celery executor :param session: SQLAlchemy ORM Session :return: whether the state was changed to running or not """ if TYPE_CHECKING: assert task_instance.task ti: TaskInstance = task_instance task = task_instance.task ti.refresh_from_task(task, pool_override=pool) ti.test_mode = test_mode ti.refresh_from_db(session=session, lock_for_update=True) ti.hostname = hostname ti.pid = None if not ignore_all_deps and not ignore_ti_state and ti.state == TaskInstanceState.SUCCESS: Stats.incr("previously_succeeded", tags=ti.stats_tags) if not mark_success: # Firstly find non-runnable and non-requeueable tis. # Since mark_success is not set, we do nothing. non_requeueable_dep_context = DepContext( deps=RUNNING_DEPS - REQUEUEABLE_DEPS, ignore_all_deps=ignore_all_deps, ignore_ti_state=ignore_ti_state, ignore_depends_on_past=ignore_depends_on_past, wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping, ignore_task_deps=ignore_task_deps, description="non-requeueable deps", ) if not ti.are_dependencies_met( dep_context=non_requeueable_dep_context, session=session, verbose=True ): session.commit() return False # For reporting purposes, we report based on 1-indexed, # not 0-indexed lists (i.e. Attempt 1 instead of # Attempt 0 for the first attempt). # Set the task start date. In case it was re-scheduled use the initial # start date that is recorded in task_reschedule table # If the task continues after being deferred (next_method is set), use the original start_date ti.start_date = ti.start_date if ti.next_method else timezone.utcnow() if ti.state == TaskInstanceState.UP_FOR_RESCHEDULE: tr_start_date = session.scalar( TR.stmt_for_task_instance(ti, descending=False).with_only_columns(TR.start_date).limit(1) ) if tr_start_date: ti.start_date = tr_start_date # Secondly we find non-runnable but requeueable tis. We reset its state. # This is because we might have hit concurrency limits, # e.g. because of backfilling. dep_context = DepContext( deps=REQUEUEABLE_DEPS, ignore_all_deps=ignore_all_deps, ignore_depends_on_past=ignore_depends_on_past, wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state, description="requeueable deps", ) if not ti.are_dependencies_met(dep_context=dep_context, session=session, verbose=True): ti.state = None cls.logger().warning( "Rescheduling due to concurrency limits reached " "at task runtime. Attempt %s of " "%s. State set to NONE.", ti.try_number, ti.max_tries + 1, ) ti.queued_dttm = timezone.utcnow() session.merge(ti) session.commit() return False if ti.next_kwargs is not None: cls.logger().info("Resuming after deferral") else: cls.logger().info("Starting attempt %s of %s", ti.try_number, ti.max_tries + 1) if not test_mode: session.add(Log(TaskInstanceState.RUNNING.value, ti)) ti.state = TaskInstanceState.RUNNING ti.emit_state_change_metric(TaskInstanceState.RUNNING) if external_executor_id: ti.external_executor_id = external_executor_id ti.end_date = None if not test_mode: session.merge(ti).task = task session.commit() # Closing all pooled connections to prevent # "max number of connections reached" if settings.engine is not None: settings.engine.dispose() if verbose: if mark_success: cls.logger().info("Marking success for %s on %s", ti.task, ti.logical_date) else: cls.logger().info("Executing %s on %s", ti.task, ti.logical_date) return True @provide_session def check_and_change_state_before_execution( self, verbose: bool = True, ignore_all_deps: bool = False, ignore_depends_on_past: bool = False, wait_for_past_depends_before_skipping: bool = False, ignore_task_deps: bool = False, ignore_ti_state: bool = False, mark_success: bool = False, test_mode: bool = False, pool: str | None = None, external_executor_id: str | None = None, session: Session = NEW_SESSION, ) -> bool: return TaskInstance._check_and_change_state_before_execution( task_instance=self, verbose=verbose, ignore_all_deps=ignore_all_deps, ignore_depends_on_past=ignore_depends_on_past, wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state, mark_success=mark_success, test_mode=test_mode, hostname=get_hostname(), pool=pool, external_executor_id=external_executor_id, session=session, ) def emit_state_change_metric(self, new_state: TaskInstanceState) -> None: """ Send a time metric representing how much time a given state transition took. The previous state and metric name is deduced from the state the task was put in. :param new_state: The state that has just been set for this task. We do not use `self.state`, because sometimes the state is updated directly in the DB and not in the local TaskInstance object. Supported states: QUEUED and RUNNING """ if self.end_date: # if the task has an end date, it means that this is not its first round. # we send the state transition time metric only on the first try, otherwise it gets more complex. return # switch on state and deduce which metric to send if new_state == TaskInstanceState.RUNNING: metric_name = "queued_duration" if self.queued_dttm is None: # this should not really happen except in tests or rare cases, # but we don't want to create errors just for a metric, so we just skip it self.log.warning( "cannot record %s for task %s because previous state change time has not been saved", metric_name, self.task_id, ) return timing = timezone.utcnow() - self.queued_dttm elif new_state == TaskInstanceState.QUEUED: metric_name = "scheduled_duration" if self.scheduled_dttm is None: self.log.warning( "cannot record %s for task %s because previous state change time has not been saved", metric_name, self.task_id, ) return timing = timezone.utcnow() - self.scheduled_dttm else: raise NotImplementedError("no metric emission setup for state %s", new_state) # send metric twice, once (legacy) with tags in the name and once with tags as tags Stats.timing(f"dag.{self.dag_id}.{self.task_id}.{metric_name}", timing) Stats.timing( f"task.{metric_name}", timing, tags={"task_id": self.task_id, "dag_id": self.dag_id, "queue": self.queue}, ) def clear_next_method_args(self) -> None: """Ensure we unset next_method and next_kwargs to ensure that any retries don't reuse them.""" log.debug("Clearing next_method and next_kwargs.") self.next_method = None self.next_kwargs = None @provide_session def _run_raw_task( self, mark_success: bool = False, session: Session = NEW_SESSION, **kwargs: Any, ) -> None: """Only kept for tests.""" from airflow.sdk.definitions.dag import _run_task if mark_success: self.set_state(TaskInstanceState.SUCCESS) log.info("[DAG TEST] Marking success for %s ", self.task_id) return None # TODO (TaskSDK): This is the old ti execution path. The only usage is # in TI.run(...), someone needs to analyse if it's still actually used # somewhere and fix it, likely by rewriting TI.run(...) to use the same # mechanism as Operator.test(). taskrun_result = _run_task(ti=self, task=self.task) # type: ignore[arg-type] if taskrun_result is None: return None if taskrun_result.error: raise taskrun_result.error self.task = taskrun_result.ti.task # type: ignore[assignment] return None @staticmethod @provide_session def register_asset_changes_in_db( ti: TaskInstance, task_outlets: list[AssetProfile], outlet_events: list[dict[str, Any]], session: Session = NEW_SESSION, ) -> None: from airflow.sdk.definitions.asset import Asset, AssetAlias, AssetNameRef, AssetUniqueKey, AssetUriRef asset_keys = { AssetUniqueKey(o.name, o.uri) for o in task_outlets if o.type == Asset.__name__ and o.name and o.uri } asset_name_refs = { Asset.ref(name=o.name) for o in task_outlets if o.type == AssetNameRef.__name__ and o.name } asset_uri_refs = { Asset.ref(uri=o.uri) for o in task_outlets if o.type == AssetUriRef.__name__ and o.uri } asset_models: dict[AssetUniqueKey, AssetModel] = { AssetUniqueKey.from_asset(am): am for am in session.scalars( select(AssetModel).where( AssetModel.active.has(), or_( tuple_(AssetModel.name, AssetModel.uri).in_(attrs.astuple(k) for k in asset_keys), AssetModel.name.in_(r.name for r in asset_name_refs), AssetModel.uri.in_(r.uri for r in asset_uri_refs), ), ) ) } asset_event_extras: dict[AssetUniqueKey, dict] = { AssetUniqueKey(**event["dest_asset_key"]): event["extra"] for event in outlet_events if "source_alias_name" not in event } for key in asset_keys: try: am = asset_models[key] except KeyError: ti.log.warning( 'Task has inactive assets "Asset(name=%s, uri=%s)" in inlets or outlets', key.name, key.uri, ) continue ti.log.debug("register event for asset %s", am) asset_manager.register_asset_change( task_instance=ti, asset=am, extra=asset_event_extras.get(key), session=session, ) if asset_name_refs: asset_models_by_name = {key.name: am for key, am in asset_models.items()} asset_event_extras_by_name = {key.name: extra for key, extra in asset_event_extras.items()} for nref in asset_name_refs: try: am = asset_models_by_name[nref.name] except KeyError: ti.log.warning( 'Task has inactive assets "Asset.ref(name=%s)" in inlets or outlets', nref.name ) continue ti.log.debug("register event for asset name ref %s", am) asset_manager.register_asset_change( task_instance=ti, asset=am, extra=asset_event_extras_by_name.get(nref.name), session=session, ) if asset_uri_refs: asset_models_by_uri = {key.uri: am for key, am in asset_models.items()} asset_event_extras_by_uri = {key.uri: extra for key, extra in asset_event_extras.items()} for uref in asset_uri_refs: try: am = asset_models_by_uri[uref.uri] except KeyError: ti.log.warning( 'Task has inactive assets "Asset.ref(uri=%s)" in inlets or outlets', uref.uri ) continue ti.log.debug("register event for asset uri ref %s", am) asset_manager.register_asset_change( task_instance=ti, asset=am, extra=asset_event_extras_by_uri.get(uref.uri), session=session, ) def _asset_event_extras_from_aliases() -> dict[tuple[AssetUniqueKey, str, str], set[str]]: d = defaultdict(set) for event in outlet_events: try: alias_name = event["source_alias_name"] except KeyError: continue if alias_name not in outlet_alias_names: continue asset_key = AssetUniqueKey(**event["dest_asset_key"]) # fallback for backward compatibility asset_extra_json = json.dumps(event.get("dest_asset_extra", {}), sort_keys=True) asset_event_extra_json = json.dumps(event["extra"], sort_keys=True) d[asset_key, asset_extra_json, asset_event_extra_json].add(alias_name) return d outlet_alias_names = {o.name for o in task_outlets if o.type == AssetAlias.__name__ and o.name} if outlet_alias_names and (event_extras_from_aliases := _asset_event_extras_from_aliases()): for ( asset_key, asset_extra_json, asset_event_extras_json, ), event_aliase_names in event_extras_from_aliases.items(): asset_event_extra = json.loads(asset_event_extras_json) asset = Asset(name=asset_key.name, uri=asset_key.uri, extra=json.loads(asset_extra_json)) ti.log.debug("register event for asset %s with aliases %s", asset_key, event_aliase_names) event = asset_manager.register_asset_change( task_instance=ti, asset=asset, source_alias_names=event_aliase_names, extra=asset_event_extra, session=session, ) if event is None: ti.log.info("Dynamically creating AssetModel %s", asset_key) session.add(AssetModel.from_public(asset)) session.flush() # So event can set up its asset fk. asset_manager.register_asset_change( task_instance=ti, asset=asset, source_alias_names=event_aliase_names, extra=asset_event_extra, session=session, ) @provide_session def update_rtif(self, rendered_fields, session: Session = NEW_SESSION): from airflow.models.renderedtifields import RenderedTaskInstanceFields rtif = RenderedTaskInstanceFields(ti=self, render_templates=False, rendered_fields=rendered_fields) rtif.write(session=session) session.flush() RenderedTaskInstanceFields.delete_old_records(self.task_id, self.dag_id, session=session) def update_heartbeat(self): with create_session() as session: session.execute( update(TaskInstance) .where(TaskInstance.id == self.id) .values(last_heartbeat_at=timezone.utcnow()) ) @provide_session def run( self, verbose: bool = True, ignore_all_deps: bool = False, ignore_depends_on_past: bool = False, wait_for_past_depends_before_skipping: bool = False, ignore_task_deps: bool = False, ignore_ti_state: bool = False, mark_success: bool = False, test_mode: bool = False, pool: str | None = None, session: Session = NEW_SESSION, raise_on_defer: bool = False, ) -> None: """Run TaskInstance (only kept for tests).""" # This method is only used in ti.run and dag.test and task.test. # So doing the s10n/de-s10n dance to operator on Serialized task for the scheduler dep check part. from airflow.serialization.serialized_objects import SerializedDAG original_task = self.task if TYPE_CHECKING: assert original_task is not None assert original_task.dag is not None # We don't set up all tests well... if not isinstance(original_task.dag, SerializedDAG): serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(original_task.dag)) self.task = serialized_dag.get_task(original_task.task_id) res = self.check_and_change_state_before_execution( verbose=verbose, ignore_all_deps=ignore_all_deps, ignore_depends_on_past=ignore_depends_on_past, wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state, mark_success=mark_success, test_mode=test_mode, pool=pool, session=session, ) self.task = original_task if not res: return self._run_raw_task(mark_success=mark_success) @classmethod def fetch_handle_failure_context( cls, ti: TaskInstance, error: None | str, test_mode: bool | None = None, *, session: Session, fail_fast: bool = False, ): """ Fetch the context needed to handle a failure. :param ti: TaskInstance :param error: if specified, log the specific exception if thrown :param test_mode: doesn't record success or failure in the DB if True :param session: SQLAlchemy ORM Session :param fail_fast: if True, fail all downstream tasks """ if error: cls.logger().error("%s", error) if not test_mode: ti.refresh_from_db(session) ti.end_date = timezone.utcnow() ti.set_duration() Stats.incr(f"operator_failures_{ti.operator}", tags=ti.stats_tags) # Same metric with tagging Stats.incr("operator_failures", tags={**ti.stats_tags, "operator": ti.operator}) Stats.incr("ti_failures", tags=ti.stats_tags) if not test_mode: session.add(Log(TaskInstanceState.FAILED.value, ti)) ti.clear_next_method_args() # Set state correctly and figure out how to log it and decide whether # to email # Since this function is called only when the TaskInstance state is running, # try_number contains the current try_number (not the next). We # only mark task instance as FAILED if the next task instance # try_number exceeds the max_tries ... or if force_fail is truthy # Actual callbacks are handled by the DAG processor, not the scheduler task = getattr(ti, "task", None) if not ti.is_eligible_to_retry(): ti.state = TaskInstanceState.FAILED if task and fail_fast: _stop_remaining_tasks(task_instance=ti, session=session) else: if ti.state == TaskInstanceState.RUNNING: # If the task instance is in the running state, it means it raised an exception and # about to retry so we record the task instance history. For other states, the task # instance was cleared and already recorded in the task instance history. ti.prepare_db_for_next_try(session) ti.state = State.UP_FOR_RETRY try: get_listener_manager().hook.on_task_instance_failed( previous_state=TaskInstanceState.RUNNING, task_instance=ti, error=error ) except Exception: log.exception("error calling listener") return ti @staticmethod @provide_session def save_to_db(ti: TaskInstance, session: Session = NEW_SESSION): ti.updated_at = timezone.utcnow() session.merge(ti) session.flush() session.commit() @provide_session def handle_failure( self, error: None | str, test_mode: bool | None = None, session: Session = NEW_SESSION, ) -> None: """ Handle Failure for a task instance. :param error: if specified, log the specific exception if thrown :param test_mode: doesn't record success or failure in the DB if True :param session: SQLAlchemy ORM Session """ if TYPE_CHECKING: assert self.task assert self.task.dag try: fail_fast = self.task.dag.fail_fast except Exception: fail_fast = False if test_mode is None: test_mode = self.test_mode ti = TaskInstance.fetch_handle_failure_context( ti=self, error=error, test_mode=test_mode, session=session, fail_fast=fail_fast, ) _log_state(task_instance=self) if not test_mode: TaskInstance.save_to_db(ti, session) def is_eligible_to_retry(self) -> bool: """Is task instance is eligible for retry.""" if self.state == TaskInstanceState.RESTARTING: # If a task is cleared when running, it goes into RESTARTING state and is always # eligible for retry return True if not getattr(self, "task", None): # Couldn't load the task, don't know number of retries, guess: return self.try_number <= self.max_tries if TYPE_CHECKING: assert self.task assert self.task.retries return bool(self.task.retries and self.try_number <= self.max_tries) # TODO (GH-52141): We should remove this entire function (only makes sense at runtime). def get_template_context( self, session: Session | None = None, ignore_param_exceptions: bool = True, ) -> Context: """ Return TI Context. :param session: SQLAlchemy ORM Session :param ignore_param_exceptions: flag to suppress value exceptions while initializing the ParamsDict """ # Do not use provide_session here -- it expunges everything on exit! if not session: session = settings.get_session()() from airflow.exceptions import NotMapped from airflow.models.mappedoperator import get_mapped_ti_count from airflow.sdk.api.datamodels._generated import ( DagRun as DagRunSDK, PrevSuccessfulDagRunResponse, TIRunContext, ) from airflow.sdk.definitions.param import process_params from airflow.sdk.execution_time.context import InletEventsAccessors from airflow.utils.context import ( ConnectionAccessor, OutletEventAccessors, VariableAccessor, ) if TYPE_CHECKING: assert session def _get_dagrun(session: Session) -> DagRun: dag_run = self.get_dagrun(session) if dag_run in session: return dag_run # The dag_run may not be attached to the session anymore since the # code base is over-zealous with use of session.expunge_all(). # Re-attach it if the relation is not loaded so we can load it when needed. info: Any = inspect(dag_run) if info.attrs.consumed_asset_events.loaded_value is not NO_VALUE: return dag_run # If dag_run is not flushed to db at all (e.g. CLI commands using # in-memory objects for ad-hoc operations), just set the value manually. if not info.has_identity: dag_run.consumed_asset_events = [] return dag_run return session.merge(dag_run, load=False) task: Any = self.task dag = task.dag dag_run = _get_dagrun(session) validated_params = process_params(dag, task, dag_run.conf, suppress_exception=ignore_param_exceptions) ti_context_from_server = TIRunContext( dag_run=DagRunSDK.model_validate(dag_run, from_attributes=True), max_tries=self.max_tries, should_retry=self.is_eligible_to_retry(), ) runtime_ti = self.to_runtime_ti(context_from_server=ti_context_from_server) context: Context = runtime_ti.get_template_context() @cache # Prevent multiple database access. def _get_previous_dagrun_success() -> PrevSuccessfulDagRunResponse: dr_from_db = self.get_previous_dagrun(state=DagRunState.SUCCESS, session=session) if dr_from_db: return PrevSuccessfulDagRunResponse.model_validate(dr_from_db, from_attributes=True) return PrevSuccessfulDagRunResponse() def get_prev_data_interval_start_success() -> pendulum.DateTime | None: return timezone.coerce_datetime(_get_previous_dagrun_success().data_interval_start) def get_prev_data_interval_end_success() -> pendulum.DateTime | None: return timezone.coerce_datetime(_get_previous_dagrun_success().data_interval_end) def get_prev_start_date_success() -> pendulum.DateTime | None: return timezone.coerce_datetime(_get_previous_dagrun_success().start_date) def get_prev_end_date_success() -> pendulum.DateTime | None: return timezone.coerce_datetime(_get_previous_dagrun_success().end_date) def get_triggering_events() -> dict[str, list[AssetEvent]]: asset_events = dag_run.consumed_asset_events triggering_events: dict[str, list[AssetEvent]] = defaultdict(list) for event in asset_events: if event.asset: triggering_events[event.asset.uri].append(event) return triggering_events # NOTE: If you add to this dict, make sure to also update the following: # * Context in task-sdk/src/airflow/sdk/definitions/context.py # * KNOWN_CONTEXT_KEYS in airflow/utils/context.py # * Table in docs/apache-airflow/templates-ref.rst context.update( { "outlet_events": OutletEventAccessors(), "inlet_events": InletEventsAccessors(task.inlets), "params": validated_params, "prev_data_interval_start_success": get_prev_data_interval_start_success(), "prev_data_interval_end_success": get_prev_data_interval_end_success(), "prev_start_date_success": get_prev_start_date_success(), "prev_end_date_success": get_prev_end_date_success(), "test_mode": self.test_mode, # ti/task_instance are added here for ti.xcom_{push,pull} "task_instance": self, "ti": self, "triggering_asset_events": lazy_object_proxy.Proxy(get_triggering_events), "var": { "json": VariableAccessor(deserialize_json=True), "value": VariableAccessor(deserialize_json=False), }, "conn": ConnectionAccessor(), } ) try: expanded_ti_count: int | None = get_mapped_ti_count(task, self.run_id, session=session) context["expanded_ti_count"] = expanded_ti_count if expanded_ti_count: setattr( self, "_upstream_map_indexes", { upstream.task_id: self.get_relevant_upstream_map_indexes( upstream, expanded_ti_count, session=session, ) for upstream in task.upstream_list }, ) except NotMapped: pass return context # TODO (GH-52141): We should remove this entire function (only makes sense at runtime). # This is intentionally left untyped so Mypy complains less about this dead code. def render_templates(self, context=None, jinja_env=None): """ Render templates in the operator fields. If the task was originally mapped, this may replace ``self.task`` with the unmapped, fully rendered BaseOperator. The original ``self.task`` before replacement is returned. """ from airflow.sdk.definitions.mappedoperator import MappedOperator if not context: context = self.get_template_context() original_task = self.task # If self.task is mapped, this call replaces self.task to point to the # unmapped BaseOperator created by this function! This is because the # MappedOperator is useless for template rendering, and we need to be # able to access the unmapped task instead. original_task.render_template_fields(context, jinja_env) if isinstance(self.task, MappedOperator): self.task = context["ti"].task return original_task def set_duration(self) -> None: """Set task instance duration.""" if self.end_date and self.start_date: self.duration = (self.end_date - self.start_date).total_seconds() else: self.duration = None log.debug("Task Duration set to %s", self.duration) @provide_session def xcom_push( self, key: str, value: Any, session: Session = NEW_SESSION, ) -> None: """ Make an XCom available for tasks to pull. :param key: Key to store the value under. :param value: Value to store. Only be JSON-serializable may be used otherwise. """ XComModel.set( key=key, value=value, task_id=self.task_id, dag_id=self.dag_id, run_id=self.run_id, map_index=self.map_index, session=session, ) @provide_session def xcom_pull( self, task_ids: str | Iterable[str] | None = None, dag_id: str | None = None, key: str = XCOM_RETURN_KEY, include_prior_dates: bool = False, session: Session = NEW_SESSION, *, map_indexes: int | Iterable[int] | None = None, default: Any = None, run_id: str | None = None, ) -> Any: """:meta private:""" # noqa: D400 # This is only kept for compatibility in tests for now while AIP-72 is in progress. if dag_id is None: dag_id = self.dag_id if run_id is None: run_id = self.run_id query = XComModel.get_many( key=key, run_id=run_id, dag_ids=dag_id, task_ids=task_ids, map_indexes=map_indexes, include_prior_dates=include_prior_dates, ) # NOTE: Since we're only fetching the value field and not the whole # class, the @recreate annotation does not kick in. Therefore we need to # call XCom.deserialize_value() manually. # We are only pulling one single task. if (task_ids is None or isinstance(task_ids, str)) and not isinstance(map_indexes, Iterable): first = session.execute( query.with_only_columns( XComModel.run_id, XComModel.task_id, XComModel.dag_id, XComModel.map_index, XComModel.value, ) ).first() if first is None: # No matching XCom at all. return default if map_indexes is not None or first.map_index < 0: return XComModel.deserialize_value(first) # raise RuntimeError("Nothing should hit this anymore") # TODO: TaskSDK: We should remove this, but many tests still currently call `ti.run()`. See #45549 # At this point either task_ids or map_indexes is explicitly multi-value. # Order return values to match task_ids and map_indexes ordering. ordering: list[Any] = [] if task_ids is None or isinstance(task_ids, str): ordering.append(XComModel.task_id) elif task_id_whens := {tid: i for i, tid in enumerate(task_ids)}: ordering.append(case(task_id_whens, value=XComModel.task_id)) else: ordering.append(XComModel.task_id) if map_indexes is None or isinstance(map_indexes, int): ordering.append(XComModel.map_index) elif isinstance(map_indexes, range): if map_indexes.step < 0: ordering.append(XComModel.map_index.desc()) else: ordering.append(XComModel.map_index) elif map_index_whens := {map_index: i for i, map_index in enumerate(map_indexes)}: ordering.append(case(map_index_whens, value=XComModel.map_index)) else: ordering.append(XComModel.map_index) return LazyXComSelectSequence.from_select( query.with_only_columns(XComModel.value).order_by(None), order_by=ordering, session=session, ) @provide_session def get_num_running_task_instances(self, session: Session, same_dagrun: bool = False) -> int: """Return Number of running TIs from the DB.""" # .count() is inefficient num_running_task_instances_query = ( select(func.count()) .select_from(TaskInstance) .where( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id == self.task_id, TaskInstance.state == TaskInstanceState.RUNNING, ) ) if same_dagrun: num_running_task_instances_query = num_running_task_instances_query.where( TaskInstance.run_id == self.run_id ) return session.scalar(num_running_task_instances_query) or 0 @staticmethod def filter_for_tis(tis: Iterable[TaskInstance | TaskInstanceKey]) -> ColumnElement[bool] | None: """Return SQLAlchemy filter to query selected task instances.""" # DictKeys type, (what we often pass here from the scheduler) is not directly indexable :( # Or it might be a generator, but we need to be able to iterate over it more than once tis = list(tis) if not tis: return None first = tis[0] dag_id = first.dag_id run_id = first.run_id map_index = first.map_index first_task_id = first.task_id # pre-compute the set of dag_id, run_id, map_indices and task_ids dag_ids, run_ids, map_indices, task_ids = set(), set(), set(), set() for t in tis: dag_ids.add(t.dag_id) run_ids.add(t.run_id) map_indices.add(t.map_index) task_ids.add(t.task_id) # Common path optimisations: when all TIs are for the same dag_id and run_id, or same dag_id # and task_id -- this can be over 150x faster for huge numbers of TIs (20k+) if dag_ids == {dag_id} and run_ids == {run_id} and map_indices == {map_index}: return and_( TaskInstance.dag_id == dag_id, TaskInstance.run_id == run_id, TaskInstance.map_index == map_index, TaskInstance.task_id.in_(task_ids), ) if dag_ids == {dag_id} and task_ids == {first_task_id} and map_indices == {map_index}: return and_( TaskInstance.dag_id == dag_id, TaskInstance.run_id.in_(run_ids), TaskInstance.map_index == map_index, TaskInstance.task_id == first_task_id, ) if dag_ids == {dag_id} and run_ids == {run_id} and task_ids == {first_task_id}: return and_( TaskInstance.dag_id == dag_id, TaskInstance.run_id == run_id, TaskInstance.map_index.in_(map_indices), TaskInstance.task_id == first_task_id, ) filter_condition = [] # create 2 nested groups, both primarily grouped by dag_id and run_id, # and in the nested group 1 grouped by task_id the other by map_index. task_id_groups: dict[tuple, dict[Any, list[Any]]] = defaultdict(lambda: defaultdict(list)) map_index_groups: dict[tuple, dict[Any, list[Any]]] = defaultdict(lambda: defaultdict(list)) for t in tis: task_id_groups[(t.dag_id, t.run_id)][t.task_id].append(t.map_index) map_index_groups[(t.dag_id, t.run_id)][t.map_index].append(t.task_id) # this assumes that most dags have dag_id as the largest grouping, followed by run_id. even # if its not, this is still a significant optimization over querying for every single tuple key for cur_dag_id, cur_run_id in itertools.product(dag_ids, run_ids): # we compare the group size between task_id and map_index and use the smaller group dag_task_id_groups = task_id_groups[(cur_dag_id, cur_run_id)] dag_map_index_groups = map_index_groups[(cur_dag_id, cur_run_id)] if len(dag_task_id_groups) <= len(dag_map_index_groups): for cur_task_id, cur_map_indices in dag_task_id_groups.items(): filter_condition.append( and_( TaskInstance.dag_id == cur_dag_id, TaskInstance.run_id == cur_run_id, TaskInstance.task_id == cur_task_id, TaskInstance.map_index.in_(cur_map_indices), ) ) else: for cur_map_index, cur_task_ids in dag_map_index_groups.items(): filter_condition.append( and_( TaskInstance.dag_id == cur_dag_id, TaskInstance.run_id == cur_run_id, TaskInstance.task_id.in_(cur_task_ids), TaskInstance.map_index == cur_map_index, ) ) return or_(*filter_condition) @classmethod def ti_selector_condition(cls, vals: Collection[str | tuple[str, int]]) -> ColumnElement[bool]: """ Build an SQLAlchemy filter for a list of task_ids or tuples of (task_id,map_index). :meta private: """ # Compute a filter for TI.task_id and TI.map_index based on input values # For each item, it will either be a task_id, or (task_id, map_index) task_id_only = [v for v in vals if isinstance(v, str)] with_map_index = [v for v in vals if not isinstance(v, str)] filters: list[Any] = [] if task_id_only: filters.append(cls.task_id.in_(task_id_only)) if with_map_index: filters.append(tuple_(cls.task_id, cls.map_index).in_(with_map_index)) if not filters: return false() if len(filters) == 1: return filters[0] return or_(*filters) def get_relevant_upstream_map_indexes( self, upstream: Operator, ti_count: int | None, *, session: Session, ) -> int | range | None: if TYPE_CHECKING: assert self.task return _get_relevant_map_indexes( run_id=self.run_id, map_index=self.map_index, ti_count=ti_count, task=self.task, relative=upstream, session=session, ) def clear_db_references(self, session: Session): """ Clear db tables that have a reference to this instance. :param session: ORM Session :meta private: """ from airflow.models.renderedtifields import RenderedTaskInstanceFields tables: list[type[TaskInstanceDependencies]] = [ XComModel, RenderedTaskInstanceFields, TaskMap, ] tables_by_id: list[type[Base]] = [TaskInstanceNote, TaskReschedule] for table in tables: session.execute( delete(table).where( table.dag_id == self.dag_id, table.task_id == self.task_id, table.run_id == self.run_id, table.map_index == self.map_index, ) ) for table in tables_by_id: session.execute(delete(table).where(table.ti_id == self.id)) @classmethod def duration_expression_update( cls, end_date: datetime, query: Update, bind: Engine | SAConnection ) -> Update: """Return a SQL expression for calculating the duration of this TI, based on the start and end date columns.""" # TODO: Compare it with self._set_duration method if bind.dialect.name == "sqlite": return query.values( { "end_date": end_date, "duration": ( (func.strftime("%s", end_date) - func.strftime("%s", cls.start_date)) + func.round((func.strftime("%f", end_date) - func.strftime("%f", cls.start_date)), 3) ), } ) if bind.dialect.name == "postgresql": return query.values( { "end_date": end_date, "duration": extract("EPOCH", end_date - cls.start_date), } ) return query.values( { "end_date": end_date, "duration": ( func.timestampdiff(text("MICROSECOND"), cls.start_date, end_date) # Turn microseconds into floating point seconds. / 1_000_000 ), } ) @property def is_schedulable(self): """Determine if the task_instance should be scheduled or short-circuited to ``success``.""" return self.is_task_schedulable(self.task) @staticmethod def is_task_schedulable(task: Operator) -> bool: """ Determine if the task should be scheduled instead of being short-circuited to ``success``. A task requires scheduling if it is not a trivial EmptyOperator, i.e. one of the following conditions holds: * it does **not** inherit from ``EmptyOperator`` * it defines an ``on_execute_callback`` * it defines an ``on_success_callback`` * it declares any ``outlets`` * it declares any ``inlets`` If none of these are true, the task is considered empty and is immediately marked successful without being scheduled. Note: keeping this check as a separate public method is important so it can also be used by listeners (when a task is not scheduled, listeners are never called). For example, the OpenLineage listener checks all tasks at DAG start, and using this method lets it consistently determine whether the listener will run for each task. """ return bool( not task.inherits_from_empty_operator or task.has_on_execute_callback or task.has_on_success_callback or task.outlets or task.inlets ) def _find_common_ancestor_mapped_group(node1: Operator, node2: Operator) -> SerializedTaskGroup | None: """Given two operators, find their innermost common mapped task group.""" if node1.dag is None or node2.dag is None or node1.dag_id != node2.dag_id: return None parent_group_ids = {g.group_id for g in node1.iter_mapped_task_groups()} common_groups = (g for g in node2.iter_mapped_task_groups() if g.group_id in parent_group_ids) return next(common_groups, None) def _is_further_mapped_inside(operator: Operator, container: SerializedTaskGroup) -> bool: """Whether given operator is *further* mapped inside a task group.""" from airflow.models.mappedoperator import is_mapped if is_mapped(operator): return True task_group = operator.task_group while task_group is not None and task_group.group_id != container.group_id: if is_mapped(task_group): return True task_group = task_group.parent_group return False def _get_relevant_map_indexes( *, task: Operator, run_id: str, map_index: int, relative: Operator, ti_count: int | None, session: Session, ) -> int | range | None: """ Infer the map indexes of a relative that's "relevant" to this ti. The bulk of the logic mainly exists to solve the problem described by the following example, where 'val' must resolve to different values, depending on where the reference is being used:: @task def this_task(v): # This is self.task. return v * 2 @task_group def tg1(inp): val = upstream(inp) # This is the upstream task. this_task(val) # When inp is 1, val here should resolve to 2. return val # This val is the same object returned by tg1. val = tg1.expand(inp=[1, 2, 3]) @task_group def tg2(inp): another_task(inp, val) # val here should resolve to [2, 4, 6]. tg2.expand(inp=["a", "b"]) The surrounding mapped task groups of ``upstream`` and ``task`` are inspected to find a common "ancestor". If such an ancestor is found, we need to return specific map indexes to pull a partial value from upstream XCom. The same logic apply for finding downstream tasks. :param task: Current task being inspected. :param run_id: Current run ID. :param map_index: Map index of the current task instance. :param relative: The relative task to find relevant map indexes for. :param ti_count: The total count of task instance this task was expanded by the scheduler, i.e. ``expanded_ti_count`` in the template context. :return: Specific map index or map indexes to pull, or ``None`` if we want to "whole" return value (i.e. no mapped task groups involved). """ from airflow.models.mappedoperator import get_mapped_ti_count # This value should never be None since we already know the current task # is in a mapped task group, and should have been expanded, despite that, # we need to check that it is not None to satisfy Mypy. # But this value can be 0 when we expand an empty list, for that it is # necessary to check that ti_count is not 0 to avoid dividing by 0. if not ti_count: return None # Find the innermost common mapped task group between the current task # If the current task and the referenced task does not have a common # mapped task group, the two are in different task mapping contexts # (like another_task above), and we should use the "whole" value. if (common_ancestor := _find_common_ancestor_mapped_group(task, relative)) is None: return None # At this point we know the two tasks share a mapped task group, and we # should use a "partial" value. Let's break down the mapped ti count # between the ancestor and further expansion happened inside it. ancestor_ti_count = get_mapped_ti_count(common_ancestor, run_id, session=session) ancestor_map_index = map_index * ancestor_ti_count // ti_count # If the task is NOT further expanded inside the common ancestor, we # only want to reference one single ti. We must walk the actual DAG, # and "ti_count == ancestor_ti_count" does not work, since the further # expansion may be of length 1. if not _is_further_mapped_inside(relative, common_ancestor): return ancestor_map_index # Otherwise we need a partial aggregation for values from selected task # instances in the ancestor's expansion context. further_count = ti_count // ancestor_ti_count map_index_start = ancestor_map_index * further_count return range(map_index_start, map_index_start + further_count) def find_relevant_relatives( normal_tasks: Iterable[str], mapped_tasks: Iterable[tuple[str, int]], *, direction: Literal["upstream", "downstream"], dag: SerializedDAG, run_id: str, session: Session, ) -> Collection[str | tuple[str, int]]: from airflow.models.mappedoperator import get_mapped_ti_count visited: set[str | tuple[str, int]] = set() def _visit_relevant_relatives_for_normal(task_ids: Iterable[str]) -> None: partial_dag = dag.partial_subset( task_ids=task_ids, include_downstream=direction == "downstream", include_upstream=direction == "upstream", exclude_original=True, ) visited.update(partial_dag.task_dict) def _visit_relevant_relatives_for_mapped(mapped_tasks: Iterable[tuple[str, int]]) -> None: for task_id, map_index in mapped_tasks: task = dag.get_task(task_id) ti_count = get_mapped_ti_count(task, run_id, session=session) # TODO (GH-52141): This should return scheduler operator types, but # currently get_flat_relatives is inherited from SDK DAGNode. relatives = cast("Iterable[Operator]", task.get_flat_relatives(upstream=direction == "upstream")) for relative in relatives: if relative.task_id in visited: continue relative_map_indexes = _get_relevant_map_indexes( task=task, relative=relative, # type: ignore[arg-type] run_id=run_id, map_index=map_index, ti_count=ti_count, session=session, ) visiting_mapped: set[tuple[str, int]] = set() visiting_normal: set[str] = set() match relative_map_indexes: case int(): if (item := (relative.task_id, relative_map_indexes)) not in visited: visiting_mapped.add(item) case range(): visiting_mapped.update((relative.task_id, i) for i in relative_map_indexes) case None: if (task_id := relative.task_id) not in visited: visiting_normal.add(task_id) _visit_relevant_relatives_for_normal(visiting_normal) _visit_relevant_relatives_for_mapped(visiting_mapped) visited.update(visiting_mapped, visiting_normal) _visit_relevant_relatives_for_normal(normal_tasks) _visit_relevant_relatives_for_mapped(mapped_tasks) return visited
TaskInstance
python
PrefectHQ__prefect
src/prefect/task_engine.py
{ "start": 10518, "end": 33811 }
class ____(BaseTaskRunEngine[P, R]): task_run: Optional[TaskRun] = None _client: Optional[SyncPrefectClient] = None @property def client(self) -> SyncPrefectClient: if not self._is_started or self._client is None: raise RuntimeError("Engine has not started.") return self._client def can_retry(self, exc_or_state: Exception | State[R]) -> bool: retry_condition: Optional[ Callable[["Task[P, Coroutine[Any, Any, R]]", TaskRun, State[R]], bool] ] = self.task.retry_condition_fn failure_type = "exception" if isinstance(exc_or_state, Exception) else "state" if not self.task_run: raise ValueError("Task run is not set") try: self.logger.debug( f"Running `retry_condition_fn` check {retry_condition!r} for task" f" {self.task.name!r}" ) state = Failed( data=exc_or_state, message=f"Task run encountered unexpected {failure_type}: {repr(exc_or_state)}", ) if inspect.iscoroutinefunction(retry_condition): should_retry = run_coro_as_sync( retry_condition(self.task, self.task_run, state) ) elif inspect.isfunction(retry_condition): should_retry = retry_condition(self.task, self.task_run, state) else: should_retry = not retry_condition return should_retry except Exception: self.logger.error( ( "An error was encountered while running `retry_condition_fn` check" f" '{retry_condition!r}' for task {self.task.name!r}" ), exc_info=True, ) return False def call_hooks(self, state: Optional[State] = None) -> None: if state is None: state = self.state task = self.task task_run = self.task_run if not task_run: raise ValueError("Task run is not set") if state.is_failed() and task.on_failure_hooks: hooks = task.on_failure_hooks elif state.is_completed() and task.on_completion_hooks: hooks = task.on_completion_hooks elif state.is_running() and task.on_running_hooks: hooks = task.on_running_hooks else: hooks = None for hook in hooks or []: hook_name = get_hook_name(hook) try: self.logger.info( f"Running hook {hook_name!r} in response to entering state" f" {state.name!r}" ) result = hook(task, task_run, state) if asyncio.iscoroutine(result): run_coro_as_sync(result) except Exception: self.logger.error( f"An error was encountered while running hook {hook_name!r}", exc_info=True, ) else: self.logger.info(f"Hook {hook_name!r} finished running successfully") def begin_run(self) -> None: new_state = Running() assert self.task_run is not None, "Task run is not set" self.task_run.start_time = new_state.timestamp flow_run_context = FlowRunContext.get() if flow_run_context and flow_run_context.flow_run: # Carry forward any task run information from the flow run flow_run = flow_run_context.flow_run self.task_run.flow_run_run_count = flow_run.run_count state = self.set_state(new_state) # TODO: this is temporary until the API stops rejecting state transitions # and the client / transaction store becomes the source of truth # this is a bandaid caused by the API storing a Completed state with a bad # result reference that no longer exists if state.is_completed(): try: state.result(retry_result_failure=False, _sync=True) # type: ignore[reportCallIssue] except Exception: state = self.set_state(new_state, force=True) backoff_count = 0 # TODO: Could this listen for state change events instead of polling? while state.is_pending() or state.is_paused(): if backoff_count < BACKOFF_MAX: backoff_count += 1 interval = clamped_poisson_interval( average_interval=backoff_count, clamping_factor=0.3 ) time.sleep(interval) state = self.set_state(new_state) # Call on_running hooks after the task has entered the Running state if state.is_running(): self.call_hooks(state) def set_state(self, state: State[R], force: bool = False) -> State[R]: last_state = self.state if not self.task_run: raise ValueError("Task run is not set") self.task_run.state = new_state = state if last_state.timestamp == new_state.timestamp: # Ensure that the state timestamp is unique, or at least not equal to the last state. # This might occur especially on Windows where the timestamp resolution is limited. new_state.timestamp += timedelta(microseconds=1) # Ensure that the state_details are populated with the current run IDs new_state.state_details.task_run_id = self.task_run.id new_state.state_details.flow_run_id = self.task_run.flow_run_id # Predictively update the de-normalized task_run.state_* attributes self.task_run.state_id = new_state.id self.task_run.state_type = new_state.type self.task_run.state_name = new_state.name if last_state.is_running(): self.task_run.total_run_time += new_state.timestamp - last_state.timestamp if new_state.is_running(): self.task_run.run_count += 1 if new_state.is_final(): if ( self.task_run and self.task_run.start_time and not self.task_run.end_time ): self.task_run.end_time = new_state.timestamp if isinstance(state.data, ResultRecord): result = state.data.result else: result = state.data link_state_to_task_run_result(new_state, result) # emit a state change event self._last_event = emit_task_run_state_change_event( task_run=self.task_run, initial_state=last_state, validated_state=self.task_run.state, follows=self._last_event, ) self._telemetry.update_state(new_state) return new_state def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]": if self._return_value is not NotSet: if isinstance(self._return_value, ResultRecord): return self._return_value.result # otherwise, return the value as is return self._return_value if self._raised is not NotSet: # if the task raised an exception, raise it if raise_on_failure: raise self._raised # otherwise, return the exception return self._raised def handle_success( self, result: R, transaction: Transaction ) -> Union[ResultRecord[R], None, Coroutine[Any, Any, R], R]: # Handle the case where the task explicitly returns a failed state, in # which case we should retry the task if it has retries left. if isinstance(result, State) and result.is_failed(): if self.handle_retry(result): return None if self.task.cache_expiration is not None: expiration = prefect.types._datetime.now("UTC") + self.task.cache_expiration else: expiration = None terminal_state = run_coro_as_sync( return_value_to_state( result, result_store=get_result_store(), key=transaction.key, expiration=expiration, ) ) # Avoid logging when running this rollback hook since it is not user-defined handle_rollback = partial(self.handle_rollback) handle_rollback.log_on_run = False transaction.stage( terminal_state.data, on_rollback_hooks=[handle_rollback] + self.task.on_rollback_hooks, on_commit_hooks=self.task.on_commit_hooks, ) if transaction.is_committed(): terminal_state.name = "Cached" self.set_state(terminal_state) self._return_value = result self._telemetry.end_span_on_success() def handle_retry(self, exc_or_state: Exception | State[R]) -> bool: """Handle any task run retries. - If the task has retries left, and the retry condition is met, set the task to retrying and return True. - If the task has a retry delay, place in AwaitingRetry state with a delayed scheduled time. - If the task has no retries left, or the retry condition is not met, return False. """ failure_type = "exception" if isinstance(exc_or_state, Exception) else "state" if self.retries < self.task.retries and self.can_retry(exc_or_state): if self.task.retry_delay_seconds: delay = ( self.task.retry_delay_seconds[ min(self.retries, len(self.task.retry_delay_seconds) - 1) ] # repeat final delay value if attempts exceed specified delays if isinstance(self.task.retry_delay_seconds, Sequence) else self.task.retry_delay_seconds ) new_state = AwaitingRetry( scheduled_time=prefect.types._datetime.now("UTC") + timedelta(seconds=delay) ) else: delay = None new_state = Retrying() self.logger.info( "Task run failed with %s: %r - Retry %s/%s will start %s", failure_type, exc_or_state, self.retries + 1, self.task.retries, str(delay) + " second(s) from now" if delay else "immediately", ) self.set_state(new_state, force=True) # Call on_running hooks if we transitioned to a Running state (immediate retry) if new_state.is_running(): self.call_hooks(new_state) self.retries: int = self.retries + 1 return True elif self.retries >= self.task.retries: if self.task.retries > 0: self.logger.error( f"Task run failed with {failure_type}: {exc_or_state!r} - Retries are exhausted", exc_info=True, ) else: self.logger.error( f"Task run failed with {failure_type}: {exc_or_state!r}", exc_info=True, ) return False return False def handle_exception(self, exc: Exception) -> None: # If the task fails, and we have retries left, set the task to retrying. self._telemetry.record_exception(exc) if not self.handle_retry(exc): # If the task has no retries left, or the retry condition is not met, set the task to failed. state = run_coro_as_sync( exception_to_failed_state( exc, message="Task run encountered an exception", result_store=get_result_store(), write_result=True, ) ) self.set_state(state) self._raised = exc self._telemetry.end_span_on_failure(state.message if state else None) def handle_timeout(self, exc: TimeoutError) -> None: if not self.handle_retry(exc): if isinstance(exc, TaskRunTimeoutError): message = f"Task run exceeded timeout of {self.task.timeout_seconds} second(s)" else: message = f"Task run failed due to timeout: {exc!r}" self.logger.error(message) state = Failed( data=exc, message=message, name="TimedOut", ) self.set_state(state) self._raised = exc def handle_crash(self, exc: BaseException) -> None: state = run_coro_as_sync(exception_to_crashed_state(exc)) self.logger.error(f"Crash detected! {state.message}") self.logger.debug("Crash details:", exc_info=exc) self.set_state(state, force=True) self._raised = exc self._telemetry.record_exception(exc) self._telemetry.end_span_on_failure(state.message if state else None) @contextmanager def setup_run_context(self, client: Optional[SyncPrefectClient] = None): from prefect.utilities.engine import ( should_log_prints, ) settings = get_current_settings() if client is None: client = self.client if not self.task_run: raise ValueError("Task run is not set") with ExitStack() as stack: if log_prints := should_log_prints(self.task): stack.enter_context(patch_print()) if self.task.persist_result is not None: persist_result = self.task.persist_result elif settings.tasks.default_persist_result is not None: persist_result = settings.tasks.default_persist_result else: persist_result = should_persist_result() stack.enter_context( TaskRunContext( task=self.task, log_prints=log_prints, task_run=self.task_run, parameters=self.parameters, result_store=get_result_store().update_for_task( self.task, _sync=True ), client=client, persist_result=persist_result, ) ) stack.enter_context(ConcurrencyContext()) self.logger: "logging.Logger" = task_run_logger( task_run=self.task_run, task=self.task ) # type: ignore yield @contextmanager def asset_context(self): parent_asset_ctx = AssetContext.get() if parent_asset_ctx and parent_asset_ctx.copy_to_child_ctx: asset_ctx = parent_asset_ctx.model_copy() asset_ctx.copy_to_child_ctx = False else: asset_ctx = AssetContext.from_task_and_inputs( self.task, self.task_run.id, self.task_run.task_inputs ) with asset_ctx as ctx: try: yield finally: ctx.emit_events(self.state) @contextmanager def initialize_run( self, task_run_id: Optional[UUID] = None, dependencies: Optional[dict[str, set[RunInput]]] = None, ) -> Generator[Self, Any, Any]: """ Enters a client context and creates a task run if needed. """ with hydrated_context(self.context): with SyncClientContext.get_or_create() as client_ctx: self._client = client_ctx.client self._is_started = True parent_flow_run_context = FlowRunContext.get() parent_task_run_context = TaskRunContext.get() try: if not self.task_run: self.task_run = run_coro_as_sync( self.task.create_local_run( id=task_run_id, parameters=self.parameters, flow_run_context=parent_flow_run_context, parent_task_run_context=parent_task_run_context, wait_for=self.wait_for, extra_task_inputs=dependencies, ) ) # Emit an event to capture that the task run was in the `PENDING` state. self._last_event = emit_task_run_state_change_event( task_run=self.task_run, initial_state=None, validated_state=self.task_run.state, ) with self.setup_run_context(): # setup_run_context might update the task run name, so log creation here self.logger.debug( f"Created task run {self.task_run.name!r} for task {self.task.name!r}" ) self._telemetry.start_span( run=self.task_run, client=self.client, parameters=self.parameters, ) yield self except TerminationSignal as exc: # TerminationSignals are caught and handled as crashes self.handle_crash(exc) raise exc except Exception: # regular exceptions are caught and re-raised to the user raise except (Pause, Abort) as exc: # Do not capture internal signals as crashes if isinstance(exc, Abort): self.logger.error("Task run was aborted: %s", exc) raise except GeneratorExit: # Do not capture generator exits as crashes raise except BaseException as exc: # BaseExceptions are caught and handled as crashes self.handle_crash(exc) raise finally: self.log_finished_message() self._is_started = False self._client = None async def wait_until_ready(self) -> None: """Waits until the scheduled time (if its the future), then enters Running.""" if scheduled_time := self.state.state_details.scheduled_time: sleep_time = ( scheduled_time - prefect.types._datetime.now("UTC") ).total_seconds() await anyio.sleep(sleep_time if sleep_time > 0 else 0) new_state = Retrying() if self.state.name == "AwaitingRetry" else Running() self.set_state( new_state, force=True, ) # Call on_running hooks if we transitioned to a Running state if self.state.is_running(): self.call_hooks() # -------------------------- # # The following methods compose the main task run loop # # -------------------------- @contextmanager def start( self, task_run_id: Optional[UUID] = None, dependencies: Optional[dict[str, set[RunInput]]] = None, ) -> Generator[None, None, None]: with self.initialize_run(task_run_id=task_run_id, dependencies=dependencies): with ( trace.use_span(self._telemetry.span) if self._telemetry.span else nullcontext() ): try: self._resolve_parameters() self._set_custom_task_run_name() self._wait_for_dependencies() except UpstreamTaskError as upstream_exc: self.set_state( Pending( name="NotReady", message=str(upstream_exc), ), # if orchestrating a run already in a pending state, force orchestration to # update the state name force=self.state.is_pending(), ) yield self.call_hooks() return with _concurrency( names=[f"tag:{tag}" for tag in self.task_run.tags], occupy=1, holder=ConcurrencyLeaseHolder(type="task_run", id=self.task_run.id), lease_duration=60, suppress_warnings=True, ): self.begin_run() try: yield finally: self.call_hooks() @contextmanager def transaction_context(self) -> Generator[Transaction, None, None]: # refresh cache setting is now repurposes as overwrite transaction record overwrite = ( self.task.refresh_cache if self.task.refresh_cache is not None else PREFECT_TASKS_REFRESH_CACHE.value() ) isolation_level = ( IsolationLevel(self.task.cache_policy.isolation_level) if self.task.cache_policy and self.task.cache_policy is not NotSet and self.task.cache_policy.isolation_level is not None else None ) with transaction( key=self.compute_transaction_key(), store=get_result_store(), overwrite=overwrite, logger=self.logger, write_on_commit=should_persist_result(), isolation_level=isolation_level, ) as txn: yield txn @contextmanager def run_context(self): # reenter the run context to ensure it is up to date for every run with self.setup_run_context(): try: with timeout( seconds=self.task.timeout_seconds, timeout_exc_type=TaskRunTimeoutError, ): self.logger.debug( f"Executing task {self.task.name!r} for task run {self.task_run.name!r}..." ) if self.is_cancelled(): raise CancelledError("Task run cancelled by the task runner") yield self except TimeoutError as exc: self.handle_timeout(exc) except Exception as exc: self.handle_exception(exc) def call_task_fn( self, transaction: Transaction ) -> Union[ResultRecord[Any], None, Coroutine[Any, Any, R], R]: """ Convenience method to call the task function. Returns a coroutine if the task is async. """ parameters = self.parameters or {} if transaction.is_committed(): result = transaction.read() else: result = call_with_parameters(self.task.fn, parameters) self.handle_success(result, transaction=transaction) return result @dataclass
SyncTaskRunEngine
python
dagster-io__dagster
python_modules/dagster/dagster/_core/workspace/permissions.py
{ "start": 116, "end": 3343 }
class ____(str, Enum): LAUNCH_PIPELINE_EXECUTION = "launch_pipeline_execution" LAUNCH_PIPELINE_REEXECUTION = "launch_pipeline_reexecution" START_SCHEDULE = "start_schedule" STOP_RUNNING_SCHEDULE = "stop_running_schedule" EDIT_SENSOR = "edit_sensor" UPDATE_SENSOR_CURSOR = "update_sensor_cursor" TERMINATE_PIPELINE_EXECUTION = "terminate_pipeline_execution" DELETE_PIPELINE_RUN = "delete_pipeline_run" RELOAD_REPOSITORY_LOCATION = "reload_repository_location" RELOAD_WORKSPACE = "reload_workspace" WIPE_ASSETS = "wipe_assets" REPORT_RUNLESS_ASSET_EVENTS = "report_runless_asset_events" LAUNCH_PARTITION_BACKFILL = "launch_partition_backfill" CANCEL_PARTITION_BACKFILL = "cancel_partition_backfill" EDIT_DYNAMIC_PARTITIONS = "edit_dynamic_partitions" TOGGLE_AUTO_MATERIALIZE = "toggle_auto_materialize" EDIT_CONCURRENCY_LIMIT = "edit_concurrency_limit" def __str__(self) -> str: return str.__str__(self) VIEWER_PERMISSIONS: dict[str, bool] = { Permissions.LAUNCH_PIPELINE_EXECUTION: False, Permissions.LAUNCH_PIPELINE_REEXECUTION: False, Permissions.START_SCHEDULE: False, Permissions.STOP_RUNNING_SCHEDULE: False, Permissions.EDIT_SENSOR: False, Permissions.UPDATE_SENSOR_CURSOR: False, Permissions.TERMINATE_PIPELINE_EXECUTION: False, Permissions.DELETE_PIPELINE_RUN: False, Permissions.RELOAD_REPOSITORY_LOCATION: False, Permissions.RELOAD_WORKSPACE: False, Permissions.WIPE_ASSETS: False, Permissions.REPORT_RUNLESS_ASSET_EVENTS: False, Permissions.LAUNCH_PARTITION_BACKFILL: False, Permissions.CANCEL_PARTITION_BACKFILL: False, Permissions.EDIT_DYNAMIC_PARTITIONS: False, Permissions.TOGGLE_AUTO_MATERIALIZE: False, Permissions.EDIT_CONCURRENCY_LIMIT: False, } EDITOR_PERMISSIONS: dict[str, bool] = { Permissions.LAUNCH_PIPELINE_EXECUTION: True, Permissions.LAUNCH_PIPELINE_REEXECUTION: True, Permissions.START_SCHEDULE: True, Permissions.STOP_RUNNING_SCHEDULE: True, Permissions.EDIT_SENSOR: True, Permissions.UPDATE_SENSOR_CURSOR: True, Permissions.TERMINATE_PIPELINE_EXECUTION: True, Permissions.DELETE_PIPELINE_RUN: True, Permissions.RELOAD_REPOSITORY_LOCATION: True, Permissions.RELOAD_WORKSPACE: True, Permissions.WIPE_ASSETS: True, Permissions.REPORT_RUNLESS_ASSET_EVENTS: True, Permissions.LAUNCH_PARTITION_BACKFILL: True, Permissions.CANCEL_PARTITION_BACKFILL: True, Permissions.EDIT_DYNAMIC_PARTITIONS: True, Permissions.TOGGLE_AUTO_MATERIALIZE: True, Permissions.EDIT_CONCURRENCY_LIMIT: True, } LOCATION_SCOPED_PERMISSIONS = { Permissions.LAUNCH_PIPELINE_EXECUTION, Permissions.LAUNCH_PIPELINE_REEXECUTION, Permissions.START_SCHEDULE, Permissions.STOP_RUNNING_SCHEDULE, Permissions.EDIT_SENSOR, Permissions.UPDATE_SENSOR_CURSOR, Permissions.TERMINATE_PIPELINE_EXECUTION, Permissions.DELETE_PIPELINE_RUN, Permissions.RELOAD_REPOSITORY_LOCATION, Permissions.LAUNCH_PARTITION_BACKFILL, Permissions.CANCEL_PARTITION_BACKFILL, Permissions.EDIT_DYNAMIC_PARTITIONS, Permissions.REPORT_RUNLESS_ASSET_EVENTS, Permissions.WIPE_ASSETS, }
Permissions
python
ansible__ansible
test/units/cli/test_galaxy.py
{ "start": 14600, "end": 15123 }
class ____(unittest.TestCase, ValidRoleTests): @classmethod def setUpClass(cls): cls.setUpRole(role_name='delete_me') @classmethod def tearDownClass(cls): cls.tearDownRole() def test_metadata_contents(self): with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf: metadata = yaml.safe_load(mf) self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata')
TestGalaxyInitDefault
python
django__django
tests/m2o_recursive/models.py
{ "start": 609, "end": 969 }
class ____(models.Model): full_name = models.CharField(max_length=20) mother = models.ForeignKey( "self", models.SET_NULL, null=True, related_name="mothers_child_set" ) father = models.ForeignKey( "self", models.SET_NULL, null=True, related_name="fathers_child_set" ) def __str__(self): return self.full_name
Person
python
run-llama__llama_index
llama-index-integrations/llms/llama-index-llms-dashscope/tests/test_dashscope.py
{ "start": 1685, "end": 9984 }
class ____: metadata = DummyMetadata() @patch("llama_index.llms.dashscope.base.call_with_messages") def test_dashscope_complete( mock_call_with_messages, dashscope_llm, dashscope_api_response, prompt ): mock_call_with_messages.return_value = dashscope_api_response response = dashscope_llm.complete(prompt) assert isinstance(response, CompletionResponse) assert response.text == "hi, there!" @patch("llama_index.llms.dashscope.base.call_with_messages") def test_dashscope_chat( mock_call_with_messages, dashscope_llm, dashscope_api_response, prompt ): mock_call_with_messages.return_value = dashscope_api_response response = dashscope_llm.chat(messages=[ChatMessage.from_str(prompt)]) assert isinstance(response, ChatResponse) assert response.message.content == "hi, there!" @pytest.mark.asyncio @patch("llama_index.llms.dashscope.base.astream_call_with_messages") async def test_dashscope_astream_complete( mock_astream_call_with_messages, dashscope_llm, dashscope_api_response, prompt ): async def async_response_generator(): yield FakeDashscopeResponse(dashscope_api_response) mock_astream_call_with_messages.return_value = async_response_generator() responses = [] gen = await dashscope_llm.astream_complete(prompt) # 先 await 获取异步生成器 async for partial_resp in gen: responses.append(partial_resp) assert len(responses) == 1 assert isinstance(responses[0], CompletionResponse) assert responses[0].text == "hi, there!" assert responses[0].delta == "hi, there!" @pytest.mark.asyncio @patch("llama_index.llms.dashscope.base.astream_call_with_messages") async def test_dashscope_astream_chat( mock_astream_call_with_messages, dashscope_llm, dashscope_api_response, prompt ): async def async_response_generator(): yield FakeDashscopeResponse(dashscope_api_response) mock_astream_call_with_messages.return_value = async_response_generator() responses = [] gen = await dashscope_llm.astream_chat(messages=[ChatMessage.from_str(prompt)]) async for partial_chat_resp in gen: responses.append(partial_chat_resp) assert len(responses) == 1 assert isinstance(responses[0], ChatResponse) assert responses[0].message.content == "hi, there!" assert responses[0].delta == "hi, there!" assert responses[0].message.role == "assistant" def test_convert_tool_to_dashscope_format(dashscope_llm): """Test _convert_tool_to_dashscope_format correctly converts a tool to the DashScope format.""" result = dashscope_llm._convert_tool_to_dashscope_format(DummyTool()) expected = { "type": "function", "function": { "name": "dummy_tool", "description": "A dummy tool for testing.", "parameters": { "type": "object", "properties": { "param1": {"type": "string", "description": "A test parameter."} }, }, "required": ["param1"], }, } assert result == expected, f"Expected {expected}, but got {result}" def test_get_tool_calls_from_response_actual_data(dashscope_llm): """Test get_tool_calls_from_response correctly extracts tool calls from a ChatResponse.""" additional_kwargs = { "tool_calls": [ { "index": 0, "id": "call_function_id", "type": "function", "function": { "name": "get_current_weather", "arguments": '{"location":"location"}', }, } ] } chat_message = ChatMessage( role=MessageRole.ASSISTANT.value, content="", additional_kwargs=additional_kwargs, ) chat_response = ChatResponse(message=chat_message, delta="", raw=None) tool_selections = dashscope_llm.get_tool_calls_from_response(chat_response) assert len(tool_selections) == 1 selection = tool_selections[0] assert selection.tool_id == "call_function_id" assert selection.tool_name == "get_current_weather" assert selection.tool_kwargs == {"location": "location"} def test_prepare_chat_with_tools(dashscope_llm): """Test _prepare_chat_with_tools correctly prepares chat with tools.""" tools: Sequence[DummyTool] = [DummyTool()] chat_history: List[ChatMessage] = [ ChatMessage(role="assistant", content="Previous message") ] user_msg: str = "User's question" extra_kwargs = {"extra_param": 123} result = dashscope_llm._prepare_chat_with_tools( tools=tools, user_msg=user_msg, chat_history=chat_history, verbose=True, allow_parallel_tool_calls=False, **extra_kwargs, ) assert "messages" in result assert "tools" in result assert "stream" in result assert result["extra_param"] == 123 messages: List[ChatMessage] = result["messages"] assert len(messages) == 2 assert messages[0].role == "assistant" assert messages[0].content == "Previous message" assert messages[1].role == "user" assert messages[1].content == "User's question" tools_spec = result["tools"] expected_tool_spec = { "type": "function", "function": { "name": "dummy_tool", "description": "A dummy tool for testing.", "parameters": { "type": "object", "properties": { "param1": {"type": "string", "description": "A test parameter."} }, }, "required": ["param1"], }, } assert len(tools_spec) == 1 assert tools_spec[0] == expected_tool_spec assert result["stream"] is True @pytest.mark.asyncio async def test_astream_chat_with_tools(monkeypatch, dashscope_llm): """ Test astream_chat method: when the tools parameter is passed, astream_call_with_messages should receive this parameter, and the additional_kwargs of the returned ChatResponse should contain the correct tool_calls. """ async def fake_async_responses(*args, **kwargs) -> AsyncGenerator: expected_tools = [{"dummy": "tool_spec"}] assert kwargs.get("tools") == expected_tools, ( "tools parameter is not passed correctly" ) class FakeOutput: def __init__(self) -> None: self.choices = [ { "message": { "role": "assistant", "content": "Hello, this is a test.", "tool_calls": [ { "index": 0, "id": "dummy_tool_call_id", "type": "function", "function": { "name": "dummy_tool", "arguments": '{"param": "value"}', }, } ], } } ] class FakeResponse: def __init__(self) -> None: self.status_code = HTTPStatus.OK self.output = FakeOutput() yield FakeResponse() monkeypatch.setattr( "llama_index.llms.dashscope.base.astream_call_with_messages", fake_async_responses, ) messages = [ChatMessage(role="user", content="Test message")] dummy_tools = [{"dummy": "tool_spec"}] gen = await dashscope_llm.astream_chat(messages, tools=dummy_tools) responses = [] async for response in gen: responses.append(response) assert len(responses) == 1 response = responses[0] tool_calls = response.message.additional_kwargs.get("tool_calls", []) assert len(tool_calls) == 1, "tool_calls number is not correct" expected_tool_call = { "index": 0, "id": "dummy_tool_call_id", "type": "function", "function": {"name": "dummy_tool", "arguments": '{"param": "value"}'}, } assert tool_calls[0] == expected_tool_call, "tool_calls is not as expected"
DummyTool
python
walkccc__LeetCode
solutions/3207. Maximum Points After Enemy Battles/3207.py
{ "start": 0, "end": 254 }
class ____: def maximumPoints(self, enemyEnergies: list[int], currentEnergy: int) -> int: minEnergy = min(enemyEnergies) return (0 if currentEnergy < minEnergy else (currentEnergy + sum(enemyEnergies) - minEnergy) // minEnergy)
Solution
python
Pylons__pyramid
tests/test_csrf.py
{ "start": 10366, "end": 15672 }
class ____(unittest.TestCase): def _callFUT(self, *args, **kwargs): from pyramid.csrf import check_csrf_origin return check_csrf_origin(*args, **kwargs) def test_success_with_http(self): request = testing.DummyRequest() request.scheme = "http" self.assertTrue(self._callFUT(request)) def test_success_with_https_and_referrer(self): request = testing.DummyRequest() request.scheme = "https" request.host = "example.com" request.host_port = "443" request.referrer = "https://example.com/login/" request.registry.settings = {} self.assertTrue(self._callFUT(request)) def test_success_with_https_and_origin(self): request = testing.DummyRequest() request.scheme = "https" request.host = "example.com" request.host_port = "443" request.headers = {"Origin": "https://example.com/"} request.referrer = "https://not-example.com/" request.registry.settings = {} self.assertTrue(self._callFUT(request)) def test_success_with_additional_trusted_host(self): request = testing.DummyRequest() request.scheme = "https" request.host = "example.com" request.host_port = "443" request.referrer = "https://not-example.com/login/" request.registry.settings = { "pyramid.csrf_trusted_origins": ["not-example.com"] } self.assertTrue(self._callFUT(request)) def test_success_with_nonstandard_port(self): request = testing.DummyRequest() request.scheme = "https" request.host = "example.com:8080" request.host_port = "8080" request.referrer = "https://example.com:8080/login/" request.registry.settings = {} self.assertTrue(self._callFUT(request)) def test_success_with_allow_no_origin(self): request = testing.DummyRequest() request.scheme = "https" request.referrer = None self.assertTrue(self._callFUT(request, allow_no_origin=True)) def test_fails_with_wrong_host(self): from pyramid.exceptions import BadCSRFOrigin request = testing.DummyRequest() request.scheme = "https" request.host = "example.com" request.host_port = "443" request.referrer = "https://not-example.com/login/" request.registry.settings = {} self.assertRaises(BadCSRFOrigin, self._callFUT, request) self.assertFalse(self._callFUT(request, raises=False)) def test_fails_with_no_origin(self): from pyramid.exceptions import BadCSRFOrigin request = testing.DummyRequest() request.scheme = "https" request.referrer = None self.assertRaises( BadCSRFOrigin, self._callFUT, request, allow_no_origin=False ) self.assertFalse( self._callFUT(request, raises=False, allow_no_origin=False) ) def test_fail_with_null_origin(self): from pyramid.exceptions import BadCSRFOrigin request = testing.DummyRequest() request.scheme = "https" request.host = "example.com" request.host_port = "443" request.referrer = None request.headers = {'Origin': 'null'} request.registry.settings = {} self.assertFalse(self._callFUT(request, raises=False)) self.assertRaises(BadCSRFOrigin, self._callFUT, request) def test_success_with_null_origin_and_setting(self): request = testing.DummyRequest() request.scheme = "https" request.host = "example.com" request.host_port = "443" request.referrer = None request.headers = {'Origin': 'null'} request.registry.settings = {"pyramid.csrf_trusted_origins": ["null"]} self.assertTrue(self._callFUT(request, raises=False)) def test_success_with_multiple_origins(self): request = testing.DummyRequest() request.scheme = "https" request.host = "example.com" request.host_port = "443" request.headers = { 'Origin': 'https://google.com https://not-example.com' } request.registry.settings = { "pyramid.csrf_trusted_origins": ["not-example.com"] } self.assertTrue(self._callFUT(request, raises=False)) def test_fails_when_http_to_https(self): from pyramid.exceptions import BadCSRFOrigin request = testing.DummyRequest() request.scheme = "https" request.host = "example.com" request.host_port = "443" request.referrer = "http://example.com/evil/" request.registry.settings = {} self.assertRaises(BadCSRFOrigin, self._callFUT, request) self.assertFalse(self._callFUT(request, raises=False)) def test_fails_with_nonstandard_port(self): from pyramid.exceptions import BadCSRFOrigin request = testing.DummyRequest() request.scheme = "https" request.host = "example.com:8080" request.host_port = "8080" request.referrer = "https://example.com/login/" request.registry.settings = {} self.assertRaises(BadCSRFOrigin, self._callFUT, request) self.assertFalse(self._callFUT(request, raises=False))
Test_check_csrf_origin
python
pennersr__django-allauth
tests/projects/common/idp/rest_framework/views.py
{ "start": 256, "end": 532 }
class ____(APIView): authentication_classes = [TokenAuthentication] permission_classes = [TokenPermission.has_scope(["view-resource"])] def get(self, request, *args, **kwargs): return Response({"resource": "ok", "user_email": request.user.email})
ResourceView
python
anthropics__anthropic-sdk-python
src/anthropic/types/raw_message_delta_event.py
{ "start": 432, "end": 1275 }
class ____(BaseModel): delta: Delta type: Literal["message_delta"] usage: MessageDeltaUsage """Billing and rate-limit usage. Anthropic's API bills and rate-limits by token counts, as tokens represent the underlying cost to our systems. Under the hood, the API transforms requests into a format suitable for the model. The model's output then goes through a parsing stage before becoming an API response. As a result, the token counts in `usage` will not match one-to-one with the exact visible content of an API request or response. For example, `output_tokens` will be non-zero, even for an empty string response from Claude. Total input tokens in a request is the summation of `input_tokens`, `cache_creation_input_tokens`, and `cache_read_input_tokens`. """
RawMessageDeltaEvent
python
airbytehq__airbyte
airbyte-integrations/connectors/source-iterable/source_iterable/streams.py
{ "start": 16271, "end": 16366 }
class ____(IterableExportStreamAdjustableRange): data_field = "emailSubscribe"
EmailSubscribe
python
mwaskom__seaborn
seaborn/_core/scales.py
{ "start": 16928, "end": 25210 }
class ____(ContinuousBase): """ A numeric scale supporting norms and functional transforms. """ values: tuple | str | None = None trans: str | TransFuncs | None = None # TODO Add this to deal with outliers? # outside: Literal["keep", "drop", "clip"] = "keep" _priority: ClassVar[int] = 1 def tick( self, locator: Locator | None = None, *, at: Sequence[float] | None = None, upto: int | None = None, count: int | None = None, every: float | None = None, between: tuple[float, float] | None = None, minor: int | None = None, ) -> Continuous: """ Configure the selection of ticks for the scale's axis or legend. Parameters ---------- locator : :class:`matplotlib.ticker.Locator` subclass Pre-configured matplotlib locator; other parameters will not be used. at : sequence of floats Place ticks at these specific locations (in data units). upto : int Choose "nice" locations for ticks, but do not exceed this number. count : int Choose exactly this number of ticks, bounded by `between` or axis limits. every : float Choose locations at this interval of separation (in data units). between : pair of floats Bound upper / lower ticks when using `every` or `count`. minor : int Number of unlabeled ticks to draw between labeled "major" ticks. Returns ------- scale Copy of self with new tick configuration. """ # Input checks if locator is not None and not isinstance(locator, Locator): raise TypeError( f"Tick locator must be an instance of {Locator!r}, " f"not {type(locator)!r}." ) log_base, symlog_thresh = self._parse_for_log_params(self.trans) if log_base or symlog_thresh: if count is not None and between is None: raise RuntimeError("`count` requires `between` with log transform.") if every is not None: raise RuntimeError("`every` not supported with log transform.") new = copy(self) new._tick_params = { "locator": locator, "at": at, "upto": upto, "count": count, "every": every, "between": between, "minor": minor, } return new def label( self, formatter: Formatter | None = None, *, like: str | Callable | None = None, base: int | None | Default = default, unit: str | None = None, ) -> Continuous: """ Configure the appearance of tick labels for the scale's axis or legend. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured formatter to use; other parameters will be ignored. like : str or callable Either a format pattern (e.g., `".2f"`), a format string with fields named `x` and/or `pos` (e.g., `"${x:.2f}"`), or a callable with a signature like `f(x: float, pos: int) -> str`. In the latter variants, `x` is passed as the tick value and `pos` is passed as the tick index. base : number Use log formatter (with scientific notation) having this value as the base. Set to `None` to override the default formatter with a log transform. unit : str or (str, str) tuple Use SI prefixes with these units (e.g., with `unit="g"`, a tick value of 5000 will appear as `5 kg`). When a tuple, the first element gives the separator between the number and unit. Returns ------- scale Copy of self with new label configuration. """ # Input checks if formatter is not None and not isinstance(formatter, Formatter): raise TypeError( f"Label formatter must be an instance of {Formatter!r}, " f"not {type(formatter)!r}" ) if like is not None and not (isinstance(like, str) or callable(like)): msg = f"`like` must be a string or callable, not {type(like).__name__}." raise TypeError(msg) new = copy(self) new._label_params = { "formatter": formatter, "like": like, "base": base, "unit": unit, } return new def _parse_for_log_params( self, trans: str | TransFuncs | None ) -> tuple[float | None, float | None]: log_base = symlog_thresh = None if isinstance(trans, str): m = re.match(r"^log(\d*)", trans) if m is not None: log_base = float(m[1] or 10) m = re.match(r"symlog(\d*)", trans) if m is not None: symlog_thresh = float(m[1] or 1) return log_base, symlog_thresh def _get_locators(self, locator, at, upto, count, every, between, minor): log_base, symlog_thresh = self._parse_for_log_params(self.trans) if locator is not None: major_locator = locator elif upto is not None: if log_base: major_locator = LogLocator(base=log_base, numticks=upto) else: major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10]) elif count is not None: if between is None: # This is rarely useful (unless you are setting limits) major_locator = LinearLocator(count) else: if log_base or symlog_thresh: forward, inverse = self._get_transform() lo, hi = forward(between) ticks = inverse(np.linspace(lo, hi, num=count)) else: ticks = np.linspace(*between, num=count) major_locator = FixedLocator(ticks) elif every is not None: if between is None: major_locator = MultipleLocator(every) else: lo, hi = between ticks = np.arange(lo, hi + every, every) major_locator = FixedLocator(ticks) elif at is not None: major_locator = FixedLocator(at) else: if log_base: major_locator = LogLocator(log_base) elif symlog_thresh: major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10) else: major_locator = AutoLocator() if minor is None: minor_locator = LogLocator(log_base, subs=None) if log_base else None else: if log_base: subs = np.linspace(0, log_base, minor + 2)[1:-1] minor_locator = LogLocator(log_base, subs=subs) else: minor_locator = AutoMinorLocator(minor + 1) return major_locator, minor_locator def _get_formatter(self, locator, formatter, like, base, unit): log_base, symlog_thresh = self._parse_for_log_params(self.trans) if base is default: if symlog_thresh: log_base = 10 base = log_base if formatter is not None: return formatter if like is not None: if isinstance(like, str): if "{x" in like or "{pos" in like: fmt = like else: fmt = f"{{x:{like}}}" formatter = StrMethodFormatter(fmt) else: formatter = FuncFormatter(like) elif base is not None: # We could add other log options if necessary formatter = LogFormatterSciNotation(base) elif unit is not None: if isinstance(unit, tuple): sep, unit = unit elif not unit: sep = "" else: sep = " " formatter = EngFormatter(unit, sep=sep) else: formatter = ScalarFormatter() return formatter @dataclass
Continuous
python
run-llama__llama_index
llama-index-packs/llama-index-packs-evaluator-benchmarker/llama_index/packs/evaluator_benchmarker/base.py
{ "start": 404, "end": 6374 }
class ____(BaseLlamaPack): """ A pack for benchmarking/evaluating your own evaluator. Args: evaluator (BaseEvaluator): The evaluator to evaluate/benchmark. eval_dataset (LabelledEvaluatorDataset | LabelledPairwiseEvaluatorDataset): The labelled evaluation dataset to run benchmarks against. """ def __init__( self, evaluator: BaseEvaluator, eval_dataset: Union[LabelledEvaluatorDataset, LabelledPairwiseEvaluatorDataset], show_progress: bool = True, ): self.evaluator = evaluator self.eval_dataset = eval_dataset self._num_examples = len(self.eval_dataset.examples) self.show_progress = show_progress self.prediction_dataset = None async def _amake_predictions( self, batch_size: int = 20, sleep_time_in_seconds: int = 1, ): """Async make predictions with evaluator.""" self.prediction_dataset: Union[ EvaluatorPredictionDataset, PairwiseEvaluatorPredictionDataset ] = await self.eval_dataset.amake_predictions_with( predictor=self.evaluator, show_progress=self.show_progress, batch_size=batch_size, sleep_time_in_seconds=sleep_time_in_seconds, ) def make_predictions(self, batch_size: int = 20, sleep_time_in_seconds: int = 1): """Sync make predictions with evaluator.""" self.prediction_dataset: Union[ EvaluatorPredictionDataset, PairwiseEvaluatorPredictionDataset ] = self.eval_dataset.make_predictions_with( predictor=self.evaluator, show_progress=self.show_progress, batch_size=batch_size, sleep_time_in_seconds=sleep_time_in_seconds, ) def _prepare_and_save_benchmark_results_pairwise_grading(self) -> pd.DataFrame: """Compute benchmark metrics for pairwise evaluation.""" inconclusive_counts = 0 agreements_with_ties = 0 agreements_without_ties = 0 ties = 0 invalid_counts = 0 for example, prediction in zip( self.eval_dataset[:], self.prediction_dataset[:] ): if prediction.invalid_prediction: invalid_counts += 1 continue # don't count inconclusive results if prediction.evaluation_source == "neither": inconclusive_counts += 1 continue if prediction.score == 0.5 or example.reference_score == 0.5: ties += 1 else: agreements_without_ties += int( example.reference_score == prediction.score ) agreements_with_ties += int(example.reference_score == prediction.score) agreement_rate_with_ties = agreements_with_ties / ( len(self.prediction_dataset[:]) - inconclusive_counts - invalid_counts ) agreement_rate_without_ties = agreements_without_ties / ( len(self.prediction_dataset[:]) - inconclusive_counts - ties - invalid_counts ) df_data = { "number_examples": [len(self.prediction_dataset[:])], "invalid_predictions": [invalid_counts], "inconclusives": [inconclusive_counts], "ties": [ties], "agreement_rate_with_ties": [agreement_rate_with_ties], "agreement_rate_without_ties": [agreement_rate_without_ties], } benchmark_df = pd.DataFrame(df_data) benchmark_df.to_csv("benchmark.csv") return benchmark_df def _prepare_and_save_benchmark_results_single_grading(self) -> pd.DataFrame: """Compute benchmark metrics for single grading evaluation.""" invalid_counts = sum([p.invalid_prediction for p in self.prediction_dataset[:]]) np_preds = np.array([p.score for p in self.prediction_dataset[:]]) np_refs = np.array([e.reference_score for e in self.eval_dataset[:]]) invalid_mask = ~np.array( [p.invalid_prediction for p in self.prediction_dataset[:]] ) # metrics mae = np.mean(np.abs(np_preds[invalid_mask] - np_refs[invalid_mask])) corr = np.corrcoef( np_preds[invalid_mask].astype(float), np_refs[invalid_mask].astype(float) )[0, 1] hamming = np.sum(np_preds[invalid_mask] == np_refs[invalid_mask]) df_data = { "number_examples": [len(self.prediction_dataset[:])], "invalid_predictions": [invalid_counts], "correlation": [corr], "mae": [mae], "hamming": [hamming], } benchmark_df = pd.DataFrame(df_data) benchmark_df.to_csv("benchmark.csv") return benchmark_df def _make_evaluations(self) -> pd.DataFrame: """Returns benchmark_df.""" if isinstance(self.eval_dataset, LabelledPairwiseEvaluatorDataset): return self._prepare_and_save_benchmark_results_pairwise_grading() else: return self._prepare_and_save_benchmark_results_single_grading() async def arun(self, batch_size: int = 10, sleep_time_in_seconds: int = 1): if batch_size > 10: warnings.warn( "You've set a large batch_size (>10). If using OpenAI GPT-4 as " " `judge_llm` (which is the default judge_llm)," " you may experience a RateLimitError. Previous successful eval " " responses are cached per batch. So hitting a RateLimitError" " would mean you'd lose all of the current batches successful " " GPT-4 calls." ) # make predictions if self.prediction_dataset is None: await self._amake_predictions(batch_size, sleep_time_in_seconds) # produce metrics return self._make_evaluations()
EvaluatorBenchmarkerPack
python
charliermarsh__ruff
crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py
{ "start": 1721, "end": 1858 }
class ____( 1 # comment ): pass @dataclass # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP
C
python
walkccc__LeetCode
solutions/346. Moving Average from Data Stream/346.py
{ "start": 0, "end": 308 }
class ____: def __init__(self, size: int): self.size = size self.sum = 0 self.q = collections.deque() def next(self, val: int) -> float: if len(self.q) == self.size: self.sum -= self.q.popleft() self.sum += val self.q.append(val) return self.sum / len(self.q)
MovingAverage
python
sphinx-doc__sphinx
sphinx/pygments_styles.py
{ "start": 317, "end": 380 }
class ____(Style): """Style without any styling."""
NoneStyle
python
airbytehq__airbyte
airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/test_owners_archived.py
{ "start": 452, "end": 2614 }
class ____(HubspotTestCase): """ The test case contains a single test - this is just a sanity check, as the tested stream is identical to the `Owners` stream (which is covered by acceptance tests), except for a single url param. """ SCOPES = ["crm.objects.owners.read"] CURSOR_FIELD = "updatedAt" STREAM_NAME = "owners_archived" def request(self): return OwnersArchivedStreamRequestBuilder() @property def response_builder(self): return HubspotStreamResponseBuilder.for_stream(self.STREAM_NAME) def response(self, with_pagination: bool = False): record = ( self.record_builder(self.STREAM_NAME, FieldPath(self.CURSOR_FIELD)) .with_field(FieldPath(self.CURSOR_FIELD), self.dt_str(self.updated_at())) .with_field(FieldPath("id"), self.OBJECT_ID) ) response = self.response_builder.with_record(record) if with_pagination: response = response.with_pagination() return response @HttpMocker() def test_given_one_page_when_read_stream_oauth_then_return_records(self, http_mocker: HttpMocker): self.mock_oauth(http_mocker, self.ACCESS_TOKEN) self.mock_custom_objects(http_mocker) self.mock_response(http_mocker, self.request().build(), self.response().build()) output = self.read_from_stream(self.oauth_config(), self.STREAM_NAME, SyncMode.full_refresh) assert len(output.records) == 1 @HttpMocker() def test_given_two_pages_when_read_stream_private_token_then_return_records(self, http_mocker: HttpMocker): self.mock_custom_objects(http_mocker) self.mock_response(http_mocker, self.request().build(), self.response(with_pagination=True).build()) self.mock_response( http_mocker, self.request().with_page_token(self.response_builder.pagination_strategy.NEXT_PAGE_TOKEN).build(), self.response().build(), ) output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.full_refresh) assert len(output.records) == 2
TestOwnersArchivedStream
python
pyqtgraph__pyqtgraph
pyqtgraph/ThreadsafeTimer.py
{ "start": 55, "end": 1611 }
class ____(QtCore.QObject): """ Thread-safe replacement for QTimer. """ timeout = QtCore.Signal() sigTimerStopRequested = QtCore.Signal() sigTimerStartRequested = QtCore.Signal(object) def __init__(self): QtCore.QObject.__init__(self) self.timer = QtCore.QTimer() self.timer.timeout.connect(self.timerFinished) self.timer.moveToThread(QtCore.QCoreApplication.instance().thread()) self.moveToThread(QtCore.QCoreApplication.instance().thread()) self.sigTimerStopRequested.connect(self.stop, QtCore.Qt.ConnectionType.QueuedConnection) self.sigTimerStartRequested.connect(self.start, QtCore.Qt.ConnectionType.QueuedConnection) def start(self, timeout): isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread() if isGuiThread: #print "start timer", self, "from gui thread" self.timer.start(int(timeout)) else: #print "start timer", self, "from remote thread" self.sigTimerStartRequested.emit(timeout) def stop(self): isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread() if isGuiThread: #print "stop timer", self, "from gui thread" self.timer.stop() else: #print "stop timer", self, "from remote thread" self.sigTimerStopRequested.emit() def timerFinished(self): self.timeout.emit()
ThreadsafeTimer
python
airbytehq__airbyte
airbyte-integrations/connectors/source-mixpanel/source_mixpanel/components.py
{ "start": 4565, "end": 5040 }
class ____(SubstreamPartitionRouter): def get_request_body_json( self, stream_state: Optional[StreamState] = None, stream_slice: Optional[StreamSlice] = None, next_page_token: Optional[Mapping[str, Any]] = None, ) -> Mapping[str, Any]: # https://developer.mixpanel.com/reference/engage-query cohort_id = stream_slice["id"] return {"filter_by_cohort": f'{{"id":{cohort_id}}}'}
CohortMembersSubstreamPartitionRouter
python
scipy__scipy
scipy/stats/_distn_infrastructure.py
{ "start": 116136, "end": 147046 }
class ____(rv_generic): """A generic discrete random variable class meant for subclassing. `rv_discrete` is a base class to construct specific distribution classes and instances for discrete random variables. It can also be used to construct an arbitrary distribution defined by a list of support points and corresponding probabilities. Parameters ---------- a : float, optional Lower bound of the support of the distribution, default: 0 b : float, optional Upper bound of the support of the distribution, default: plus infinity moment_tol : float, optional The tolerance for the generic calculation of moments. values : tuple of two array_like, optional ``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk`` and ``pk`` must have the same shape, and ``xk`` must be unique. inc : integer, optional Increment for the support of the distribution. Default is 1. (other values have not been tested) badvalue : float, optional The value in a result arrays that indicates a value that for which some argument restriction is violated, default is np.nan. name : str, optional The name of the instance. This string is used to construct the default example for distributions. longname : str, optional This string is used as part of the first line of the docstring returned when a subclass has no docstring of its own. Note: `longname` exists for backwards compatibility, do not use for new subclasses. shapes : str, optional The shape of the distribution. For example "m, n" for a distribution that takes two integers as the two shape arguments for all its methods If not provided, shape parameters will be inferred from the signatures of the private methods, ``_pmf`` and ``_cdf`` of the instance. seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Attributes ---------- a, b : float, optional Lower/upper bound of the support of the unshifted/unscaled distribution. This value is unaffected by the `loc` and `scale` parameters. To calculate the support of the shifted/scaled distribution, use the `support` method. Methods ------- rvs pmf logpmf cdf logcdf sf logsf ppf isf moment stats entropy expect median mean std var interval __call__ support Notes ----- This class is similar to `rv_continuous`. Whether a shape parameter is valid is decided by an ``_argcheck`` method (which defaults to checking that its arguments are strictly positive.) The main differences are as follows. - The support of the distribution is a set of integers. - Instead of the probability density function, ``pdf`` (and the corresponding private ``_pdf``), this class defines the *probability mass function*, `pmf` (and the corresponding private ``_pmf``.) - There is no ``scale`` parameter. - The default implementations of methods (e.g. ``_cdf``) are not designed for distributions with support that is unbounded below (i.e. ``a=-np.inf``), so they must be overridden. To create a new discrete distribution, we would do the following: >>> from scipy.stats import rv_discrete >>> class poisson_gen(rv_discrete): ... "Poisson distribution" ... def _pmf(self, k, mu): ... return exp(-mu) * mu**k / factorial(k) and create an instance:: >>> poisson = poisson_gen(name="poisson") Note that above we defined the Poisson distribution in the standard form. Shifting the distribution can be done by providing the ``loc`` parameter to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)`` delegates the work to ``poisson._pmf(x-loc, mu)``. **Discrete distributions from a list of probabilities** Alternatively, you can construct an arbitrary discrete rv defined on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the ``values`` keyword argument to the `rv_discrete` constructor. **Deepcopying / Pickling** If a distribution or frozen distribution is deepcopied (pickled/unpickled, etc.), any underlying random number generator is deepcopied with it. An implication is that if a distribution relies on the singleton RandomState before copying, it will rely on a copy of that random state after copying, and ``np.random.seed`` will no longer control the state. Examples -------- Custom made discrete distribution: >>> import numpy as np >>> from scipy import stats >>> xk = np.arange(7) >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2) >>> custm = stats.rv_discrete(name='custm', values=(xk, pk)) >>> >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 1) >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r') >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4) >>> plt.show() Random number generation: >>> R = custm.rvs(size=100) """ def __new__(cls, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-8, values=None, inc=1, longname=None, shapes=None, seed=None): if values is not None: # dispatch to a subclass return super().__new__(rv_sample) else: # business as usual return super().__new__(cls) def __init__(self, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-8, values=None, inc=1, longname=None, shapes=None, seed=None): super().__init__(seed) # cf generic freeze self._ctor_param = dict( a=a, b=b, name=name, badvalue=badvalue, moment_tol=moment_tol, values=values, inc=inc, longname=longname, shapes=shapes, seed=seed) if badvalue is None: badvalue = nan self.badvalue = badvalue self.a = a self.b = b self.moment_tol = moment_tol self.inc = inc self.shapes = shapes if values is not None: raise ValueError("rv_discrete.__init__(..., values != None, ...)") self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf], locscale_in='loc=0', # scale=1 for discrete RVs locscale_out='loc, 1') self._attach_methods() self._construct_docstrings(name, longname) def __getstate__(self): dct = self.__dict__.copy() # these methods will be remade in __setstate__ attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs", "_cdfvec", "_ppfvec", "generic_moment"] [dct.pop(attr, None) for attr in attrs] return dct def _attach_methods(self): """Attaches dynamically created methods to the rv_discrete instance.""" self._cdfvec = vectorize(self._cdf_single, otypes='d') self.vecentropy = vectorize(self._entropy) # _attach_methods is responsible for calling _attach_argparser_methods self._attach_argparser_methods() # nin correction needs to be after we know numargs # correct nin for generic moment vectorization _vec_generic_moment = vectorize(_drv2_moment, otypes='d') _vec_generic_moment.nin = self.numargs + 2 self.generic_moment = types.MethodType(_vec_generic_moment, self) # correct nin for ppf vectorization _vppf = vectorize(_drv2_ppfsingle, otypes='d') _vppf.nin = self.numargs + 2 self._ppfvec = types.MethodType(_vppf, self) # now that self.numargs is defined, we can adjust nin self._cdfvec.nin = self.numargs + 1 def _construct_docstrings(self, name, longname): if name is None: name = 'Distribution' self.name = name # generate docstring for subclass instances if longname is None: if name[0] in ['aeiouAEIOU']: hstr = "An " else: hstr = "A " longname = hstr + name if sys.flags.optimize < 2: # Skip adding docstrings if interpreter is run with -OO if self.__doc__ is None: self._construct_default_doc(longname=longname, docdict=docdict_discrete, discrete='discrete') else: dct = dict(distdiscrete) self._construct_doc(docdict_discrete, dct.get(self.name)) # discrete RV do not have the scale parameter, remove it self.__doc__ = self.__doc__.replace( '\n scale : array_like, ' 'optional\n scale parameter (default=1)', '') def _updated_ctor_param(self): """Return the current version of _ctor_param, possibly updated by user. Used by freezing. Keep this in sync with the signature of __init__. """ dct = self._ctor_param.copy() dct['a'] = self.a dct['b'] = self.b dct['badvalue'] = self.badvalue dct['moment_tol'] = self.moment_tol dct['inc'] = self.inc dct['name'] = self.name dct['shapes'] = self.shapes return dct def _nonzero(self, k, *args): return floor(k) == k def _pmf(self, k, *args): return self._cdf(k, *args) - self._cdf(k-1, *args) def _logpmf(self, k, *args): with np.errstate(divide='ignore'): return log(self._pmf(k, *args)) def _logpxf(self, k, *args): # continuous distributions have PDF, discrete have PMF, but sometimes # the distinction doesn't matter. This lets us use `_logpxf` for both # discrete and continuous distributions. return self._logpmf(k, *args) def _unpack_loc_scale(self, theta): try: loc = theta[-1] scale = 1 args = tuple(theta[:-1]) except IndexError as e: raise ValueError("Not enough input arguments.") from e return loc, scale, args def _cdf_single(self, k, *args): _a, _b = self._get_support(*args) m = arange(int(_a), k+1) return np.sum(self._pmf(m, *args), axis=0) def _cdf(self, x, *args): k = floor(x).astype(np.float64) return self._cdfvec(k, *args) # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic def rvs(self, *args, **kwargs): """Random variates of given type. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). size : int or tuple of ints, optional Defining number of random variates (Default is 1). Note that `size` has to be given as keyword, not as positional argument. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `random_state` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is already a ``Generator`` or ``RandomState`` instance, that instance is used. Returns ------- rvs : ndarray or scalar Random variates of given `size`. """ kwargs['discrete'] = True return super().rvs(*args, **kwargs) def pmf(self, k, *args, **kwds): """Probability mass function at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional Location parameter (default=0). Returns ------- pmf : array_like Probability mass function evaluated at k """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) _a, _b = self._get_support(*args) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= _a) & (k <= _b) if not isinstance(self, rv_sample): cond1 = cond1 & self._nonzero(k, *args) cond = cond0 & cond1 output = zeros(shape(cond), 'd') place(output, (1-cond0) + np.isnan(k), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, np.clip(self._pmf(*goodargs), 0, 1)) if output.ndim == 0: return output[()] return output def logpmf(self, k, *args, **kwds): """Log of the probability mass function at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter. Default is 0. Returns ------- logpmf : array_like Log of the probability mass function evaluated at k. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) _a, _b = self._get_support(*args) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= _a) & (k <= _b) if not isinstance(self, rv_sample): cond1 = cond1 & self._nonzero(k, *args) cond = cond0 & cond1 output = empty(shape(cond), 'd') output.fill(-inf) place(output, (1-cond0) + np.isnan(k), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, self._logpmf(*goodargs)) if output.ndim == 0: return output[()] return output def cdf(self, k, *args, **kwds): """Cumulative distribution function of the given RV. Parameters ---------- k : array_like, int Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- cdf : ndarray Cumulative distribution function evaluated at `k`. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) _a, _b = self._get_support(*args) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= _a) & (k < _b) cond2 = (k >= _b) cond3 = np.isneginf(k) cond = cond0 & cond1 & np.isfinite(k) output = zeros(shape(cond), 'd') place(output, cond2*(cond0 == cond0), 1.0) place(output, cond3*(cond0 == cond0), 0.0) place(output, (1-cond0) + np.isnan(k), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, np.clip(self._cdf(*goodargs), 0, 1)) if output.ndim == 0: return output[()] return output def logcdf(self, k, *args, **kwds): """Log of the cumulative distribution function at k of the given RV. Parameters ---------- k : array_like, int Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- logcdf : array_like Log of the cumulative distribution function evaluated at k. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) _a, _b = self._get_support(*args) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= _a) & (k < _b) cond2 = (k >= _b) cond = cond0 & cond1 output = empty(shape(cond), 'd') output.fill(-inf) place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2*(cond0 == cond0), 0.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, self._logcdf(*goodargs)) if output.ndim == 0: return output[()] return output def sf(self, k, *args, **kwds): """Survival function (1 - `cdf`) at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- sf : array_like Survival function evaluated at k. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) _a, _b = self._get_support(*args) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= _a) & (k < _b) cond2 = ((k < _a) | np.isneginf(k)) & cond0 cond = cond0 & cond1 & np.isfinite(k) output = zeros(shape(cond), 'd') place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2, 1.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, np.clip(self._sf(*goodargs), 0, 1)) if output.ndim == 0: return output[()] return output def logsf(self, k, *args, **kwds): """Log of the survival function of the given RV. Returns the log of the "survival function," defined as 1 - `cdf`, evaluated at `k`. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- logsf : ndarray Log of the survival function evaluated at `k`. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) _a, _b = self._get_support(*args) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= _a) & (k < _b) cond2 = (k < _a) & cond0 cond = cond0 & cond1 output = empty(shape(cond), 'd') output.fill(-inf) place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2, 0.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, self._logsf(*goodargs)) if output.ndim == 0: return output[()] return output def ppf(self, q, *args, **kwds): """Percent point function (inverse of `cdf`) at q of the given RV. Parameters ---------- q : array_like Lower tail probability. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- k : array_like Quantile corresponding to the lower tail probability, q. Notes ----- For discrete distributions, the `cdf` is not strictly invertible. By convention, this method returns the minimum value `k` for which the `cdf` at `k` is at least `q`. There is one exception: the `ppf` of ``0`` is ``a-1``, where ``a`` is the left endpoint of the support. """ args, loc, _ = self._parse_args(*args, **kwds) q, loc = map(asarray, (q, loc)) args = tuple(map(asarray, args)) _a, _b = self._get_support(*args) cond0 = self._argcheck(*args) & (loc == loc) cond1 = (q > 0) & (q < 1) cond2 = (q == 0) & cond0 cond3 = (q == 1) & cond0 cond = cond0 & cond1 output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') # output type 'd' to handle nin and inf place(output, cond2, argsreduce(cond2, _a-1 + loc)[0]) place(output, cond3, argsreduce(cond3, _b + loc)[0]) if np.any(cond): goodargs = argsreduce(cond, *((q,)+args+(loc,))) loc, goodargs = goodargs[-1], goodargs[:-1] place(output, cond, self._ppf(*goodargs) + loc) if output.ndim == 0: return output[()] return output def isf(self, q, *args, **kwds): """Inverse survival function (inverse of `sf`) at q of the given RV. Parameters ---------- q : array_like Upper tail probability. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- k : ndarray or scalar Quantile corresponding to the upper tail probability, q. Notes ----- For discrete distributions, the `sf` is not strictly invertible. By convention, this method returns the minimum value `k` for which the `sf` at `k` is no greater than `q`. There is one exception: the `isf` of ``1`` is ``a-1``, where ``a`` is the left endpoint of the support. """ args, loc, _ = self._parse_args(*args, **kwds) q, loc = map(asarray, (q, loc)) args = tuple(map(asarray, args)) _a, _b = self._get_support(*args) cond0 = self._argcheck(*args) & (loc == loc) cond1 = (q > 0) & (q < 1) cond2 = (q == 1) & cond0 cond3 = (q == 0) & cond0 cond = cond0 & cond1 # same problem as with ppf; copied from ppf and changed output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') # output type 'd' to handle nin and inf lower_bound = _a - 1 + loc upper_bound = _b + loc place(output, cond2, argsreduce(cond2, lower_bound)[0]) place(output, cond3, argsreduce(cond3, upper_bound)[0]) # call place only if at least 1 valid argument if np.any(cond): goodargs = argsreduce(cond, *((q,)+args+(loc,))) loc, goodargs = goodargs[-1], goodargs[:-1] # PB same as ticket 766 place(output, cond, self._isf(*goodargs) + loc) if output.ndim == 0: return output[()] return output def _entropy(self, *args): if hasattr(self, 'pk'): return stats.entropy(self.pk) else: _a, _b = self._get_support(*args) return _expect(lambda x: entr(self._pmf(x, *args)), _a, _b, self._ppf(0.5, *args), self.inc) def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32): """ Calculate expected value of a function with respect to the distribution for discrete distribution by numerical summation. Parameters ---------- func : callable, optional Function for which the expectation value is calculated. Takes only one argument. The default is the identity mapping f(k) = k. args : tuple, optional Shape parameters of the distribution. loc : float, optional Location parameter. Default is 0. lb, ub : int, optional Lower and upper bound for the summation, default is set to the support of the distribution, inclusive (``lb <= k <= ub``). conditional : bool, optional If true then the expectation is corrected by the conditional probability of the summation interval. The return value is the expectation of the function, `func`, conditional on being in the given interval (k such that ``lb <= k <= ub``). Default is False. maxcount : int, optional Maximal number of terms to evaluate (to avoid an endless loop for an infinite sum). Default is 1000. tolerance : float, optional Absolute tolerance for the summation. Default is 1e-10. chunksize : int, optional Iterate over the support of a distributions in chunks of this size. Default is 32. Returns ------- expect : float Expected value. Notes ----- For heavy-tailed distributions, the expected value may or may not exist, depending on the function, `func`. If it does exist, but the sum converges slowly, the accuracy of the result may be rather low. For instance, for ``zipf(4)``, accuracy for mean, variance in example is only 1e-5. increasing `maxcount` and/or `chunksize` may improve the result, but may also make zipf very slow. The function is not vectorized. """ # Although `args` is just the shape parameters, `poisson_binom` needs this # to split the vector-valued shape into a tuple of separate shapes args, _, _ = self._parse_args(*args) if func is None: def fun(x): # loc and args from outer scope return (x+loc)*self._pmf(x, *args) else: def fun(x): # loc and args from outer scope return func(x+loc)*self._pmf(x, *args) # used pmf because _pmf does not check support in randint and there # might be problems(?) with correct self.a, self.b at this stage maybe # not anymore, seems to work now with _pmf _a, _b = self._get_support(*args) if lb is None: lb = _a else: lb = lb - loc # convert bound for standardized distribution if ub is None: ub = _b else: ub = ub - loc # convert bound for standardized distribution if conditional: invfac = self.sf(lb-1, *args) - self.sf(ub, *args) else: invfac = 1.0 if isinstance(self, rv_sample): res = self._expect(fun, lb, ub) return res / invfac # iterate over the support, starting from the median x0 = self._ppf(0.5, *args) res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize) return res / invfac def _param_info(self): shape_info = self._shape_info() loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False)) param_info = shape_info + [loc_info] return param_info def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10, chunksize=32): """Helper for computing the expectation value of `fun`.""" # short-circuit if the support size is small enough if (ub - lb) <= chunksize: supp = np.arange(lb, ub+1, inc) vals = fun(supp) return np.sum(vals) # otherwise, iterate starting from x0 if x0 < lb: x0 = lb if x0 > ub: x0 = ub count, tot = 0, 0. # iterate over [x0, ub] inclusive for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc): count += x.size delta = np.sum(fun(x)) tot += delta if abs(delta) < tolerance * x.size: break if count > maxcount: warnings.warn('expect(): sum did not converge', RuntimeWarning, stacklevel=3) return tot # iterate over [lb, x0) for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc): count += x.size delta = np.sum(fun(x)) tot += delta if abs(delta) < tolerance * x.size: break if count > maxcount: warnings.warn('expect(): sum did not converge', RuntimeWarning, stacklevel=3) break return tot def _iter_chunked(x0, x1, chunksize=4, inc=1): """Iterate from x0 to x1 in chunks of chunksize and steps inc. x0 must be finite, x1 need not be. In the latter case, the iterator is infinite. Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards (make sure to set inc < 0.) >>> from scipy.stats._distn_infrastructure import _iter_chunked >>> [x for x in _iter_chunked(2, 5, inc=2)] [array([2, 4])] >>> [x for x in _iter_chunked(2, 11, inc=2)] [array([2, 4, 6, 8]), array([10])] >>> [x for x in _iter_chunked(2, -5, inc=-2)] [array([ 2, 0, -2, -4])] >>> [x for x in _iter_chunked(2, -9, inc=-2)] [array([ 2, 0, -2, -4]), array([-6, -8])] """ if inc == 0: raise ValueError('Cannot increment by zero.') if chunksize <= 0: raise ValueError(f'Chunk size must be positive; got {chunksize}.') s = 1 if inc > 0 else -1 stepsize = abs(chunksize * inc) x = np.copy(x0) while (x - x1) * inc < 0: delta = min(stepsize, abs(x - x1)) step = delta * s supp = np.arange(x, x + step, inc) x += step yield supp
rv_discrete
python
chroma-core__chroma
chromadb/test/api/test_schema.py
{ "start": 1192, "end": 2123 }
class ____(EmbeddingFunction[List[str]]): """Mock embedding function for testing.""" def __init__(self, model_name: str = "mock_model"): self._model_name = model_name def __call__(self, input: List[str]) -> Embeddings: import numpy as np # Return mock embeddings (3-dimensional) return [np.array([1.0, 2.0, 3.0], dtype=np.float32) for _ in input] @staticmethod def name() -> str: return "mock_embedding" def get_config(self) -> Dict[str, Any]: return {"model_name": self._model_name} @staticmethod def build_from_config(config: Dict[str, Any]) -> "MockEmbeddingFunction": return MockEmbeddingFunction(config.get("model_name", "mock_model")) def default_space(self) -> str: # type: ignore return "cosine" def supported_spaces(self) -> List[str]: # type: ignore return ["cosine", "l2", "ip"]
MockEmbeddingFunction
python
astropy__astropy
astropy/io/fits/util.py
{ "start": 641, "end": 29352 }
class ____: """ Mixin class that provides services by which objects can register listeners to changes on that object. All methods provided by this class are underscored, since this is intended for internal use to communicate between classes in a generic way, and is not machinery that should be exposed to users of the classes involved. Use the ``_add_listener`` method to register a listener on an instance of the notifier. This registers the listener with a weak reference, so if no other references to the listener exist it is automatically dropped from the list and does not need to be manually removed. Call the ``_notify`` method on the notifier to update all listeners upon changes. ``_notify('change_type', *args, **kwargs)`` results in calling ``listener._update_change_type(*args, **kwargs)`` on all listeners subscribed to that notifier. If a particular listener does not have the appropriate update method it is ignored. Examples -------- >>> class Widget(NotifierMixin): ... state = 1 ... def __init__(self, name): ... self.name = name ... def update_state(self): ... self.state += 1 ... self._notify('widget_state_changed', self) ... >>> class WidgetListener: ... def _update_widget_state_changed(self, widget): ... print('Widget {0} changed state to {1}'.format( ... widget.name, widget.state)) ... >>> widget = Widget('fred') >>> listener = WidgetListener() >>> widget._add_listener(listener) >>> widget.update_state() Widget fred changed state to 2 """ _listeners = None def _add_listener(self, listener): """ Add an object to the list of listeners to notify of changes to this object. This adds a weakref to the list of listeners that is removed from the listeners list when the listener has no other references to it. """ if self._listeners is None: self._listeners = weakref.WeakValueDictionary() self._listeners[id(listener)] = listener def _remove_listener(self, listener): """ Removes the specified listener from the listeners list. This relies on object identity (i.e. the ``is`` operator). """ if self._listeners is None: return with suppress(KeyError): del self._listeners[id(listener)] def _notify(self, notification, *args, **kwargs): """ Notify all listeners of some particular state change by calling their ``_update_<notification>`` method with the given ``*args`` and ``**kwargs``. The notification does not by default include the object that actually changed (``self``), but it certainly may if required. """ if self._listeners is None: return method_name = f"_update_{notification}" for listener in self._listeners.valuerefs(): # Use valuerefs instead of itervaluerefs; see # https://github.com/astropy/astropy/issues/4015 listener = listener() # dereference weakref if listener is None: continue if hasattr(listener, method_name): method = getattr(listener, method_name) if callable(method): method(*args, **kwargs) def __getstate__(self): """ Exclude listeners when saving the listener's state, since they may be ephemeral. """ # TODO: This hasn't come up often, but if anyone needs to pickle HDU # objects it will be necessary when HDU objects' states are restored to # re-register themselves as listeners on their new column instances. try: state = super().__getstate__() except AttributeError: # Chances are the super object doesn't have a getstate state = self.__dict__.copy() state["_listeners"] = None return state def first(iterable): """ Returns the first item returned by iterating over an iterable object. Examples -------- >>> a = [1, 2, 3] >>> first(a) 1 """ return next(iter(iterable)) def itersubclasses(cls, _seen=None): """ Generator over all subclasses of a given class, in depth first order. >>> class A: pass >>> class B(A): pass >>> class C(A): pass >>> class D(B,C): pass >>> class E(D): pass >>> >>> for cls in itersubclasses(A): ... print(cls.__name__) B D E C >>> # get ALL classes currently defined >>> [cls.__name__ for cls in itersubclasses(object)] [...'tuple', ...'type', ...] From http://code.activestate.com/recipes/576949/ """ if _seen is None: _seen = set() try: subs = cls.__subclasses__() except TypeError: # fails only when cls is type subs = cls.__subclasses__(cls) for sub in sorted(subs, key=operator.attrgetter("__name__")): if sub not in _seen: _seen.add(sub) yield sub yield from itersubclasses(sub, _seen) def ignore_sigint(func): """ This decorator registers a custom SIGINT handler to catch and ignore SIGINT until the wrapped function is completed. """ @wraps(func) def wrapped(*args, **kwargs): # Get the name of the current thread and determine if this is a single # threaded application curr_thread = threading.current_thread() single_thread = ( threading.active_count() == 1 and curr_thread.name == "MainThread" ) class SigintHandler: def __init__(self): self.sigint_received = False def __call__(self, signum, frame): warnings.warn( f"KeyboardInterrupt ignored until {func.__name__} is complete!", AstropyUserWarning, ) self.sigint_received = True sigint_handler = SigintHandler() # Define new signal interput handler if single_thread: # Install new handler old_handler = signal.signal(signal.SIGINT, sigint_handler) try: func(*args, **kwargs) finally: if single_thread: if old_handler is not None: signal.signal(signal.SIGINT, old_handler) else: signal.signal(signal.SIGINT, signal.SIG_DFL) if sigint_handler.sigint_received: raise KeyboardInterrupt return wrapped def encode_ascii(s): if isinstance(s, str): return s.encode("ascii") elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.str_): ns = np.char.encode(s, "ascii").view(type(s)) if ns.dtype.itemsize != s.dtype.itemsize / 4: ns = ns.astype((np.bytes_, s.dtype.itemsize / 4)) return ns elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.bytes_): raise TypeError("string operation on non-string array") return s def decode_ascii(s): if isinstance(s, bytes): try: return s.decode("ascii") except UnicodeDecodeError: warnings.warn( "non-ASCII characters are present in the FITS " 'file header and have been replaced by "?" characters', AstropyUserWarning, ) s = s.decode("ascii", errors="replace") return s.replace("\ufffd", "?") elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.bytes_): # np.char.encode/decode annoyingly don't preserve the type of the # array, hence the view() call # It also doesn't necessarily preserve widths of the strings, # hence the astype() if s.size == 0: # Numpy apparently also has a bug that if a string array is # empty calling np.char.decode on it returns an empty float64 # array : https://github.com/numpy/numpy/issues/13156 dt = s.dtype.str.replace("S", "U") ns = np.array([], dtype=dt).view(type(s)) else: ns = np.char.decode(s, "ascii").view(type(s)) if ns.dtype.itemsize / 4 != s.dtype.itemsize: ns = ns.astype((np.str_, s.dtype.itemsize)) return ns elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.str_): # Don't silently pass through on non-string arrays; we don't want # to hide errors where things that are not stringy are attempting # to be decoded raise TypeError("string operation on non-string array") return s def isreadable(f): """ Returns True if the file-like object can be read from. This is a common- sense approximation of io.IOBase.readable. """ if hasattr(f, "readable"): return f.readable() if hasattr(f, "closed") and f.closed: # This mimics the behavior of io.IOBase.readable raise ValueError("I/O operation on closed file") if not hasattr(f, "read"): return False # Not closed, has a 'read()' method, and either has no known mode or a # readable mode--should be good enough to assume 'readable' return (not hasattr(f, "mode")) or any(c in f.mode for c in "r+") def iswritable(f): """ Returns True if the file-like object can be written to. This is a common- sense approximation of io.IOBase.writable. """ if hasattr(f, "writable"): return f.writable() if hasattr(f, "closed") and f.closed: # This mimics the behavior of io.IOBase.writable raise ValueError("I/O operation on closed file") if not hasattr(f, "write"): return False # Note closed, has a 'write()' method, and either has no known mode or a # mode that supports writing--should be good enough to assume 'writable' return (not hasattr(f, "mode")) or any(c in f.mode for c in "wa+") def isfile(f): """ Returns True if the given object represents an OS-level file (that is, ``isinstance(f, file)``). This also returns True if the given object is higher level wrapper on top of a FileIO object, such as a TextIOWrapper. """ if isinstance(f, io.FileIO): return True elif hasattr(f, "buffer"): return isfile(f.buffer) elif hasattr(f, "raw"): return isfile(f.raw) return False def fileobj_name(f): """ Returns the 'name' of file-like object *f*, if it has anything that could be called its name. Otherwise f's class or type is returned. If f is a string f itself is returned. """ if isinstance(f, (str, bytes)): return f elif isinstance(f, gzip.GzipFile): # The .name attribute on GzipFiles does not always represent the name # of the file being read/written--it can also represent the original # name of the file being compressed # See the documentation at # https://docs.python.org/3/library/gzip.html#gzip.GzipFile # As such, for gzip files only return the name of the underlying # fileobj, if it exists return fileobj_name(f.fileobj) elif hasattr(f, "name"): return f.name elif hasattr(f, "filename"): return f.filename elif hasattr(f, "__class__"): return str(f.__class__) else: return str(type(f)) def fileobj_closed(f): """ Returns True if the given file-like object is closed or if *f* is a string (and assumed to be a pathname). Returns False for all other types of objects, under the assumption that they are file-like objects with no sense of a 'closed' state. """ if isinstance(f, path_like): return True if hasattr(f, "closed"): return f.closed elif hasattr(f, "fileobj") and hasattr(f.fileobj, "closed"): return f.fileobj.closed elif hasattr(f, "fp") and hasattr(f.fp, "closed"): return f.fp.closed else: return False def fileobj_mode(f): """ Returns the 'mode' string of a file-like object if such a thing exists. Otherwise returns None. """ # Go from most to least specific--for example gzip objects have a 'mode' # attribute, but it's not analogous to the file.mode attribute # gzip.GzipFile -like if hasattr(f, "fileobj") and hasattr(f.fileobj, "mode"): fileobj = f.fileobj # astropy.io.fits._File -like, doesn't need additional checks because it's # already validated elif hasattr(f, "fileobj_mode"): return f.fileobj_mode # PIL-Image -like investigate the fp (filebuffer) elif hasattr(f, "fp") and hasattr(f.fp, "mode"): fileobj = f.fp # FILEIO -like (normal open(...)), keep as is. elif hasattr(f, "mode"): fileobj = f # Doesn't look like a file-like object, for example strings, urls or paths. else: return None return _fileobj_normalize_mode(fileobj) def _fileobj_normalize_mode(f): """Takes care of some corner cases in Python where the mode string is either oddly formatted or does not truly represent the file mode. """ mode = f.mode # Special case: Gzip modes: if isinstance(f, gzip.GzipFile): # GzipFiles can be either readonly or writeonly if mode == gzip.READ: return "rb" elif mode == gzip.WRITE: return "wb" else: return None # This shouldn't happen? # Sometimes Python can produce modes like 'r+b' which will be normalized # here to 'rb+' if "+" in mode: mode = mode.replace("+", "") mode += "+" return mode def fileobj_is_binary(f): """ Returns True if the give file or file-like object has a file open in binary mode. When in doubt, returns True by default. """ # This is kind of a hack for this to work correctly with _File objects, # which, for the time being, are *always* binary if hasattr(f, "binary"): return f.binary if isinstance(f, io.TextIOBase): return False mode = fileobj_mode(f) if mode: return "b" in mode else: return True def translate(s, table, deletechars): if deletechars: table = table.copy() for c in deletechars: table[ord(c)] = None return s.translate(table) def fill(text, width, **kwargs): """ Like :func:`textwrap.wrap` but preserves existing paragraphs which :func:`textwrap.wrap` does not otherwise handle well. Also handles section headers. """ paragraphs = text.split("\n\n") def maybe_fill(t): if all(len(line) < width for line in t.splitlines()): return t else: return textwrap.fill(t, width, **kwargs) return "\n\n".join(maybe_fill(p) for p in paragraphs) # On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to # fail when reading over 2Gb of data. If we detect these versions of MacOS X, # we can instead read the data in chunks. To avoid performance penalties at # import time, we defer the setting of this global variable until the first # time it is needed. CHUNKED_FROMFILE = None def _array_from_file(infile, dtype, count): """Create a numpy array from a file or a file-like object.""" if isfile(infile): global CHUNKED_FROMFILE if CHUNKED_FROMFILE is None: if sys.platform == "darwin" and Version(platform.mac_ver()[0]) < Version( "10.9" ): CHUNKED_FROMFILE = True else: CHUNKED_FROMFILE = False if CHUNKED_FROMFILE: chunk_size = int(1024**3 / dtype.itemsize) # 1Gb to be safe if count < chunk_size: return np.fromfile(infile, dtype=dtype, count=count) else: array = np.empty(count, dtype=dtype) for beg in range(0, count, chunk_size): end = min(count, beg + chunk_size) array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg) return array else: return np.fromfile(infile, dtype=dtype, count=count) else: # treat as file-like object with "read" method; this includes gzip file # objects, because numpy.fromfile just reads the compressed bytes from # their underlying file object, instead of the decompressed bytes read_size = np.dtype(dtype).itemsize * count s = infile.read(read_size) array = np.ndarray(buffer=s, dtype=dtype, shape=(count,)) # copy is needed because np.frombuffer returns a read-only view of the # underlying buffer return array.copy() _OSX_WRITE_LIMIT = (2**32) - 1 _WIN_WRITE_LIMIT = (2**31) - 1 def _array_to_file(arr, outfile): """ Write a numpy array to a file or a file-like object. Parameters ---------- arr : ndarray The Numpy array to write. outfile : file-like A file-like object such as a Python file object, an `io.BytesIO`, or anything else with a ``write`` method. The file object must support the buffer interface in its ``write``. If writing directly to an on-disk file this delegates directly to `ndarray.tofile`. Otherwise a slower Python implementation is used. """ try: seekable = outfile.seekable() except AttributeError: seekable = False if isfile(outfile) and seekable: write = lambda a, f: a.tofile(f) else: write = _array_to_file_like # Implements a workaround for a bug deep in OSX's stdlib file writing # functions; on 64-bit OSX it is not possible to correctly write a number # of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192-- # whatever the default blocksize for the filesystem is). # This issue should have a workaround in Numpy too, but hasn't been # implemented there yet: https://github.com/astropy/astropy/issues/839 # # Apparently Windows has its own fwrite bug: # https://github.com/numpy/numpy/issues/2256 if ( sys.platform == "darwin" and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and arr.nbytes % 4096 == 0 ): # chunksize is a count of elements in the array, not bytes chunksize = _OSX_WRITE_LIMIT // arr.itemsize elif sys.platform.startswith("win"): chunksize = _WIN_WRITE_LIMIT // arr.itemsize else: # Just pass the whole array to the write routine return write(arr, outfile) # Write one chunk at a time for systems whose fwrite chokes on large # writes. idx = 0 arr = arr.view(np.ndarray).flatten() while idx < arr.nbytes: write(arr[idx : idx + chunksize], outfile) idx += chunksize def _array_to_file_like(arr, fileobj): """ Write a `~numpy.ndarray` to a file-like object (which is not supported by `numpy.ndarray.tofile`). """ # If the array is empty, we can simply take a shortcut and return since # there is nothing to write. if len(arr) == 0: return if arr.flags.contiguous: # It suffices to just pass the underlying buffer directly to the # fileobj's write (assuming it supports the buffer interface). If # it does not have the buffer interface, a TypeError should be returned # in which case we can fall back to the other methods. try: fileobj.write(arr.data) except TypeError: pass else: return if hasattr(np, "nditer"): # nditer version for non-contiguous arrays for item in np.nditer(arr, order="C"): fileobj.write(item.tobytes()) else: # Slower version for Numpy versions without nditer; # The problem with flatiter is it doesn't preserve the original # byteorder byteorder = arr.dtype.byteorder if (sys.byteorder == "little" and byteorder == ">") or ( sys.byteorder == "big" and byteorder == "<" ): for item in arr.flat: fileobj.write(item.byteswap().tobytes()) else: for item in arr.flat: fileobj.write(item.tobytes()) def _write_string(f, s): """ Write a string to a file, encoding to ASCII if the file is open in binary mode, or decoding if the file is open in text mode. """ # Assume if the file object doesn't have a specific mode, that the mode is # binary binmode = fileobj_is_binary(f) if binmode and isinstance(s, str): s = encode_ascii(s) elif not binmode and not isinstance(f, str): s = decode_ascii(s) f.write(s) def _convert_array(array, dtype): """ Converts an array to a new dtype--if the itemsize of the new dtype is the same as the old dtype and both types are not numeric, a view is returned. Otherwise a new array must be created. """ if array.dtype == dtype: return array elif array.dtype.itemsize == dtype.itemsize and not ( np.issubdtype(array.dtype, np.number) and np.issubdtype(dtype, np.number) ): # Includes a special case when both dtypes are at least numeric to # account for old Trac ticket 218 (now inaccessible). return array.view(dtype) else: return array.astype(dtype) def _pseudo_zero(dtype): """ Given a numpy dtype, finds its "zero" point, which is exactly in the middle of its range. """ # special case for int8 if dtype.kind == "i" and dtype.itemsize == 1: return -128 assert dtype.kind == "u" return 1 << (dtype.itemsize * 8 - 1) def _is_pseudo_integer(dtype): return (dtype.kind == "u" and dtype.itemsize >= 2) or ( dtype.kind == "i" and dtype.itemsize == 1 ) def _is_int(val): return isinstance(val, all_integer_types) def _str_to_num(val): """Converts a given string to either an int or a float if necessary.""" try: num = int(val) except ValueError: # If this fails then an exception should be raised anyways num = float(val) return num def _words_group(s, width, first_width=None): """ Split a long string into parts where each part is no longer than ``width`` and no word is cut into two pieces. But if there are any single words which are longer than ``width``, then they will be split in the middle of the word. If the width of the first part should be smaller, e.g., because of a long HIERARCH header key, one can pass in ``first_width``. """ words = [] slen = len(s) # appending one blank at the end always ensures that the "last" blank # is beyond the end of the string arr = np.frombuffer(s.encode("utf8") + b" ", dtype="S1") # locations of the blanks blank_loc = np.nonzero(arr == b" ")[0] current_width = width if first_width is None else first_width offset = 0 xoffset = 0 while True: try: loc = np.nonzero(blank_loc >= current_width + offset)[0][0] except IndexError: loc = len(blank_loc) if loc > 0: offset = blank_loc[loc - 1] + 1 else: offset = -1 # check for one word longer than strlen, break in the middle if offset <= xoffset: offset = min(xoffset + current_width, slen) # collect the pieces in a list words.append(s[xoffset:offset]) if offset >= slen: break xoffset = offset current_width = width return words def _tmp_name(input): """ Create a temporary file name which should not already exist. Use the directory of the input file as the base name of the mkstemp() output. """ if input is not None: input = os.path.dirname(input) f, fn = tempfile.mkstemp(dir=input) os.close(f) return fn def _get_array_mmap(array): """ If the array has an mmap.mmap at base of its base chain, return the mmap object; otherwise return None. """ if isinstance(array, mmap.mmap): return array base = array while hasattr(base, "base") and base.base is not None: if isinstance(base.base, mmap.mmap): return base.base base = base.base @contextmanager def _free_space_check(hdulist, dirname=None): try: yield except OSError as exc: error_message = "" if not isinstance(hdulist, list): hdulist = [hdulist] if dirname is None: dirname = os.path.dirname(hdulist._file.name) if os.path.isdir(dirname): free_space = data.get_free_space_in_dir(dirname) hdulist_size = sum(hdu.size for hdu in hdulist) if free_space < hdulist_size: error_message = ( f"Not enough space on disk: requested {hdulist_size}, " f"available {free_space}. " ) for hdu in hdulist: hdu._close() raise OSError(error_message + str(exc)) def _extract_number(value, default): """ Attempts to extract an integer number from the given value. If the extraction fails, the value of the 'default' argument is returned. """ try: # The _str_to_num method converts the value to string/float # so we need to perform one additional conversion to int on top return int(_str_to_num(value)) except (TypeError, ValueError): return default def get_testdata_filepath(filename): """ Return a string representing the path to the file requested from the io.fits test data set. .. versionadded:: 2.0.3 Parameters ---------- filename : str The filename of the test data file. Returns ------- filepath : str The path to the requested file. """ return data.get_pkg_data_filename(f"io/fits/tests/data/{filename}", "astropy") def _rstrip_inplace(array): """ Performs an in-place rstrip operation on string arrays. This is necessary since the built-in `np.char.rstrip` in Numpy does not perform an in-place calculation. """ # The following implementation convert the string to unsigned integers of # the right length. Trailing spaces (which are represented as 32) are then # converted to null characters (represented as zeros). To avoid creating # large temporary mask arrays, we loop over chunks (attempting to do that # on a 1-D version of the array; large memory may still be needed in the # unlikely case that a string array has small first dimension and cannot # be represented as a contiguous 1-D array in memory). dt = array.dtype if dt.kind not in "SU": raise TypeError("This function can only be used on string arrays") # View the array as appropriate integers. The last dimension will # equal the number of characters in each string. bpc = 1 if dt.kind == "S" else 4 dt_int = f"({dt.itemsize // bpc},){dt.byteorder}u{bpc}" b = array.view(dt_int, np.ndarray) # For optimal speed, work in chunks of the internal ufunc buffer size. bufsize = np.getbufsize() # Attempt to have the strings as a 1-D array to give the chunk known size. # Note: the code will work if this fails; the chunks will just be larger. if b.ndim > 2: try: b.shape = -1, b.shape[-1] except AttributeError: # can occur for non-contiguous arrays pass for j in range(0, b.shape[0], bufsize): c = b[j : j + bufsize] # Mask which will tell whether we're in a sequence of trailing spaces. mask = np.ones(c.shape[:-1], dtype=bool) # Loop over the characters in the strings, in reverse order. We process # the i-th character of all strings in the chunk at the same time. If # the character is 32, this corresponds to a space, and we then change # this to 0. We then construct a new mask to find rows where the # i-th character is 0 (null) and the i-1-th is 32 (space) and repeat. for i in range(-1, -c.shape[-1], -1): mask &= c[..., i] == 32 c[..., i][mask] = 0 mask = c[..., i] == 0 return array def _is_dask_array(data): """Check whether data is a dask array.""" if not HAS_DASK or not hasattr(data, "compute"): return False from dask.array import Array return isinstance(data, Array)
NotifierMixin
python
django__django
tests/delete/models.py
{ "start": 6732, "end": 6863 }
class ____(models.Model): b1 = models.ForeignKey(B1, models.RESTRICT) b2 = models.ForeignKey(B2, models.CASCADE)
DeleteBottom
python
walkccc__LeetCode
solutions/1259. Handshakes That Don't Cross/1259.py
{ "start": 0, "end": 387 }
class ____: def numberOfWays(self, numPeople: int) -> int: MOD = 1_000_000_007 # dp[i] := the number of ways i handshakes could occure s.t. none of the # handshakes cross dp = [1] + [0] * (numPeople // 2) for i in range(1, numPeople // 2 + 1): for j in range(i): dp[i] += dp[j] * dp[i - 1 - j] dp[i] %= MOD return dp[numPeople // 2]
Solution
python
pandas-dev__pandas
pandas/tests/arrays/sparse/test_constructors.py
{ "start": 227, "end": 10573 }
class ____: def test_constructor_dtype(self): arr = SparseArray([np.nan, 1, 2, np.nan]) assert arr.dtype == SparseDtype(np.float64, np.nan) assert arr.dtype.subtype == np.float64 assert np.isnan(arr.fill_value) arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0) assert arr.dtype == SparseDtype(np.float64, 0) assert arr.fill_value == 0 arr = SparseArray([0, 1, 2, 4], dtype=np.float64) assert arr.dtype == SparseDtype(np.float64, np.nan) assert np.isnan(arr.fill_value) arr = SparseArray([0, 1, 2, 4], dtype=np.int64) assert arr.dtype == SparseDtype(np.int64, 0) assert arr.fill_value == 0 arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64) assert arr.dtype == SparseDtype(np.int64, 0) assert arr.fill_value == 0 arr = SparseArray([0, 1, 2, 4], dtype=None) assert arr.dtype == SparseDtype(np.int64, 0) assert arr.fill_value == 0 arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None) assert arr.dtype == SparseDtype(np.int64, 0) assert arr.fill_value == 0 def test_constructor_dtype_str(self): result = SparseArray([1, 2, 3], dtype="int") expected = SparseArray([1, 2, 3], dtype=int) tm.assert_sp_array_equal(result, expected) def test_constructor_sparse_dtype(self): result = SparseArray([1, 0, 0, 1], dtype=SparseDtype("int64", -1)) expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64) tm.assert_sp_array_equal(result, expected) assert result.sp_values.dtype == np.dtype("int64") def test_constructor_sparse_dtype_str(self): result = SparseArray([1, 0, 0, 1], dtype="Sparse[int32]") expected = SparseArray([1, 0, 0, 1], dtype=np.int32) tm.assert_sp_array_equal(result, expected) assert result.sp_values.dtype == np.dtype("int32") def test_constructor_object_dtype(self): # GH#11856 arr = SparseArray(["A", "A", np.nan, "B"], dtype=object) assert arr.dtype == SparseDtype(object) assert np.isnan(arr.fill_value) arr = SparseArray(["A", "A", np.nan, "B"], dtype=object, fill_value="A") assert arr.dtype == SparseDtype(object, "A") assert arr.fill_value == "A" def test_constructor_object_dtype_bool_fill(self): # GH#17574 data = [False, 0, 100.0, 0.0] arr = SparseArray(data, dtype=object, fill_value=False) assert arr.dtype == SparseDtype(object, False) assert arr.fill_value is False arr_expected = np.array(data, dtype=object) it = ( type(x) == type(y) and x == y for x, y in zip(arr, arr_expected, strict=True) ) assert np.fromiter(it, dtype=np.bool_).all() @pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int]) def test_constructor_na_dtype(self, dtype): with pytest.raises(ValueError, match="Cannot convert"): SparseArray([0, 1, np.nan], dtype=dtype) def test_constructor_warns_when_losing_timezone(self): # GH#32501 warn when losing timezone information dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") expected = SparseArray(np.asarray(dti, dtype="datetime64[ns]")) msg = "loses timezone information" with tm.assert_produces_warning(UserWarning, match=msg): result = SparseArray(dti) tm.assert_sp_array_equal(result, expected) with tm.assert_produces_warning(UserWarning, match=msg): result = SparseArray(pd.Series(dti)) tm.assert_sp_array_equal(result, expected) def test_constructor_spindex_dtype(self): arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2])) # TODO: actionable? # XXX: Behavior change: specifying SparseIndex no longer changes the # fill_value expected = SparseArray([0, 1, 2, 0], kind="integer") tm.assert_sp_array_equal(arr, expected) assert arr.dtype == SparseDtype(np.int64) assert arr.fill_value == 0 arr = SparseArray( data=[1, 2, 3], sparse_index=IntIndex(4, [1, 2, 3]), dtype=np.int64, fill_value=0, ) exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0) tm.assert_sp_array_equal(arr, exp) assert arr.dtype == SparseDtype(np.int64) assert arr.fill_value == 0 arr = SparseArray( data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=np.int64 ) exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64) tm.assert_sp_array_equal(arr, exp) assert arr.dtype == SparseDtype(np.int64) assert arr.fill_value == 0 arr = SparseArray( data=[1, 2, 3], sparse_index=IntIndex(4, [1, 2, 3]), dtype=None, fill_value=0, ) exp = SparseArray([0, 1, 2, 3], dtype=None) tm.assert_sp_array_equal(arr, exp) assert arr.dtype == SparseDtype(np.int64) assert arr.fill_value == 0 @pytest.mark.parametrize("sparse_index", [None, IntIndex(1, [0])]) def test_constructor_spindex_dtype_scalar(self, sparse_index): # scalar input msg = "Cannot construct SparseArray from scalar data. Pass a sequence instead" with pytest.raises(TypeError, match=msg): SparseArray(data=1, sparse_index=sparse_index, dtype=None) with pytest.raises(TypeError, match=msg): SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None) def test_constructor_spindex_dtype_scalar_broadcasts(self): arr = SparseArray( data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=None ) exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None) tm.assert_sp_array_equal(arr, exp) assert arr.dtype == SparseDtype(np.int64) assert arr.fill_value == 0 @pytest.mark.parametrize( "data, fill_value", [ (np.array([1, 2]), 0), (np.array([1.0, 2.0]), np.nan), ([True, False], False), ([pd.Timestamp("2017-01-01")], pd.NaT), ], ) def test_constructor_inferred_fill_value(self, data, fill_value): result = SparseArray(data).fill_value if isna(fill_value): assert isna(result) else: assert result == fill_value @pytest.mark.parametrize("format", ["coo", "csc", "csr"]) @pytest.mark.parametrize("size", [0, 10]) def test_from_spmatrix(self, size, format): sp_sparse = pytest.importorskip("scipy.sparse") mat = sp_sparse.random(size, 1, density=0.5, format=format) result = SparseArray.from_spmatrix(mat) result = np.asarray(result) expected = mat.toarray().ravel() tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize("format", ["coo", "csc", "csr"]) def test_from_spmatrix_including_explicit_zero(self, format): sp_sparse = pytest.importorskip("scipy.sparse") mat = sp_sparse.random(10, 1, density=0.5, format=format) mat.data[0] = 0 result = SparseArray.from_spmatrix(mat) result = np.asarray(result) expected = mat.toarray().ravel() tm.assert_numpy_array_equal(result, expected) def test_from_spmatrix_raises(self): sp_sparse = pytest.importorskip("scipy.sparse") mat = sp_sparse.eye(5, 4, format="csc") with pytest.raises(ValueError, match="not '4'"): SparseArray.from_spmatrix(mat) def test_constructor_from_too_large_array(self): with pytest.raises(TypeError, match="expected dimension <= 1 data"): SparseArray(np.arange(10).reshape((2, 5))) def test_constructor_from_sparse(self): zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) res = SparseArray(zarr) assert res.fill_value == 0 tm.assert_almost_equal(res.sp_values, zarr.sp_values) def test_constructor_copy(self): arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6]) arr = SparseArray(arr_data) cp = SparseArray(arr, copy=True) cp.sp_values[:3] = 0 assert not (arr.sp_values[:3] == 0).any() not_copy = SparseArray(arr) not_copy.sp_values[:3] = 0 assert (arr.sp_values[:3] == 0).all() def test_constructor_bool(self): # GH#10648 data = np.array([False, False, True, True, False, False]) arr = SparseArray(data, fill_value=False, dtype=bool) assert arr.dtype == SparseDtype(bool) tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True])) # Behavior change: np.asarray densifies. # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr)) tm.assert_numpy_array_equal(arr.sp_index.indices, np.array([2, 3], np.int32)) dense = arr.to_dense() assert dense.dtype == bool tm.assert_numpy_array_equal(dense, data) def test_constructor_bool_fill_value(self): arr = SparseArray([True, False, True], dtype=None) assert arr.dtype == SparseDtype(np.bool_) assert not arr.fill_value arr = SparseArray([True, False, True], dtype=np.bool_) assert arr.dtype == SparseDtype(np.bool_) assert not arr.fill_value arr = SparseArray([True, False, True], dtype=np.bool_, fill_value=True) assert arr.dtype == SparseDtype(np.bool_, True) assert arr.fill_value def test_constructor_float32(self): # GH#10648 data = np.array([1.0, np.nan, 3], dtype=np.float32) arr = SparseArray(data, dtype=np.float32) assert arr.dtype == SparseDtype(np.float32) tm.assert_numpy_array_equal(arr.sp_values, np.array([1, 3], dtype=np.float32)) # Behavior change: np.asarray densifies. # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr)) tm.assert_numpy_array_equal( arr.sp_index.indices, np.array([0, 2], dtype=np.int32) ) dense = arr.to_dense() assert dense.dtype == np.float32 tm.assert_numpy_array_equal(dense, data)
TestConstructors
python
pdm-project__pdm
src/pdm/installers/uv.py
{ "start": 3721, "end": 3965 }
class ____(UvSynchronizer): def _get_sync_command(self) -> list[str | HiddenText]: cmd = super()._get_sync_command() if "--verbose" in cmd: cmd.remove("--verbose") return [*cmd, "--quiet"]
QuietUvSynchronizer
python
doocs__leetcode
solution/1900-1999/1914.Cyclically Rotating a Grid/Solution.py
{ "start": 0, "end": 1189 }
class ____: def rotateGrid(self, grid: List[List[int]], k: int) -> List[List[int]]: def rotate(p: int, k: int): nums = [] for j in range(p, n - p - 1): nums.append(grid[p][j]) for i in range(p, m - p - 1): nums.append(grid[i][n - p - 1]) for j in range(n - p - 1, p, -1): nums.append(grid[m - p - 1][j]) for i in range(m - p - 1, p, -1): nums.append(grid[i][p]) k %= len(nums) if k == 0: return nums = nums[k:] + nums[:k] k = 0 for j in range(p, n - p - 1): grid[p][j] = nums[k] k += 1 for i in range(p, m - p - 1): grid[i][n - p - 1] = nums[k] k += 1 for j in range(n - p - 1, p, -1): grid[m - p - 1][j] = nums[k] k += 1 for i in range(m - p - 1, p, -1): grid[i][p] = nums[k] k += 1 m, n = len(grid), len(grid[0]) for p in range(min(m, n) >> 1): rotate(p, k) return grid
Solution
python
ray-project__ray
python/ray/autoscaler/_private/event_system.py
{ "start": 151, "end": 1623 }
class ____(Enum): """Events to track in ray.autoscaler.sdk.create_or_update_cluster. Attributes: up_started : Invoked at the beginning of create_or_update_cluster. ssh_keypair_downloaded : Invoked when the ssh keypair is downloaded. cluster_booting_started : Invoked when when the cluster booting starts. acquiring_new_head_node : Invoked before the head node is acquired. head_node_acquired : Invoked after the head node is acquired. ssh_control_acquired : Invoked when the node is being updated. run_initialization_cmd : Invoked before all initialization commands are called and again before each initialization command. run_setup_cmd : Invoked before all setup commands are called and again before each setup command. start_ray_runtime : Invoked before ray start commands are run. start_ray_runtime_completed : Invoked after ray start commands are run. cluster_booting_completed : Invoked after cluster booting is completed. """ up_started = auto() ssh_keypair_downloaded = auto() cluster_booting_started = auto() acquiring_new_head_node = auto() head_node_acquired = auto() ssh_control_acquired = auto() run_initialization_cmd = auto() run_setup_cmd = auto() start_ray_runtime = auto() start_ray_runtime_completed = auto() cluster_booting_completed = auto()
CreateClusterEvent
python
coleifer__peewee
tests/regressions.py
{ "start": 32261, "end": 32641 }
class ____(ModelTestCase): requires = [TC] def test_type_coercion(self): t = TC.create(ifield='10', ffield='20.5', cfield=30, tfield=40) t_db = TC.get(TC.id == t.id) self.assertEqual(t_db.ifield, 10) self.assertEqual(t_db.ffield, 20.5) self.assertEqual(t_db.cfield, '30') self.assertEqual(t_db.tfield, '40')
TestTypeCoercion
python
huggingface__transformers
src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py
{ "start": 2368, "end": 28786 }
class ____(ProcessorMixin): r""" Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor, a Wav2Vec2 CTC tokenizer and a decoder with language model support into a single processor for language model boosted speech recognition decoding. Args: feature_extractor ([`Wav2Vec2FeatureExtractor`] or [`SeamlessM4TFeatureExtractor`]): An instance of [`Wav2Vec2FeatureExtractor`] or [`SeamlessM4TFeatureExtractor`]. The feature extractor is a required input. tokenizer ([`Wav2Vec2CTCTokenizer`]): An instance of [`Wav2Vec2CTCTokenizer`]. The tokenizer is a required input. decoder (`pyctcdecode.BeamSearchDecoderCTC`): An instance of [`pyctcdecode.BeamSearchDecoderCTC`]. The decoder is a required input. """ def __init__( self, feature_extractor: "FeatureExtractionMixin", tokenizer: "PreTrainedTokenizerBase", decoder: "BeamSearchDecoderCTC", ): from pyctcdecode import BeamSearchDecoderCTC super().__init__(feature_extractor, tokenizer) if not isinstance(decoder, BeamSearchDecoderCTC): raise TypeError(f"`decoder` has to be of type {BeamSearchDecoderCTC.__class__}, but is {type(decoder)}") if feature_extractor.__class__.__name__ not in ["Wav2Vec2FeatureExtractor", "SeamlessM4TFeatureExtractor"]: raise ValueError( f"`feature_extractor` has to be of type `Wav2Vec2FeatureExtractor` or `SeamlessM4TFeatureExtractor`, but is {type(feature_extractor)}" ) # make sure that decoder's alphabet and tokenizer's vocab match in content missing_decoder_tokens = self.get_missing_alphabet_tokens(decoder, tokenizer) if len(missing_decoder_tokens) > 0: raise ValueError( f"The tokens {missing_decoder_tokens} are defined in the tokenizer's " "vocabulary, but not in the decoder's alphabet. " f"Make sure to include {missing_decoder_tokens} in the decoder's alphabet." ) self.decoder = decoder def save_pretrained(self, save_directory): super().save_pretrained(save_directory) self.decoder.save_to_dir(save_directory) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate a [`Wav2Vec2ProcessorWithLM`] from a pretrained Wav2Vec2 processor. <Tip> This class method is simply calling the feature extractor's [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`], Wav2Vec2CTCTokenizer's [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], and [`pyctcdecode.BeamSearchDecoderCTC.load_from_hf_hub`]. Please refer to the docstrings of the methods above for more information. </Tip> Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co. - a path to a *directory* containing a feature extractor file saved using the [`~SequenceFeatureExtractor.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved feature extractor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. **kwargs Additional keyword arguments passed along to both [`SequenceFeatureExtractor`] and [`PreTrainedTokenizer`] """ requires_backends(cls, "pyctcdecode") from pyctcdecode import BeamSearchDecoderCTC feature_extractor, tokenizer = super()._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs) if os.path.isdir(pretrained_model_name_or_path) or os.path.isfile(pretrained_model_name_or_path): unigram_encoding = kwargs.get("unigram_encoding", "utf-8") decoder = BeamSearchDecoderCTC.load_from_dir(pretrained_model_name_or_path, unigram_encoding) else: # BeamSearchDecoderCTC has no auto class kwargs.pop("_from_auto", None) # snapshot_download has no `trust_remote_code` flag kwargs.pop("trust_remote_code", None) # make sure that only relevant filenames are downloaded language_model_filenames = os.path.join(BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*") alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME allow_patterns = [language_model_filenames, alphabet_filename] decoder = BeamSearchDecoderCTC.load_from_hf_hub( pretrained_model_name_or_path, allow_patterns=allow_patterns, **kwargs ) # set language model attributes for attribute in ["alpha", "beta", "unk_score_offset", "score_boundary"]: value = kwargs.pop(attribute, None) if value is not None: cls._set_language_model_attribute(decoder, attribute, value) # make sure that decoder's alphabet and tokenizer's vocab match in content missing_decoder_tokens = cls.get_missing_alphabet_tokens(decoder, tokenizer) if len(missing_decoder_tokens) > 0: raise ValueError( f"The tokens {missing_decoder_tokens} are defined in the tokenizer's " "vocabulary, but not in the decoder's alphabet. " f"Make sure to include {missing_decoder_tokens} in the decoder's alphabet." ) return cls(feature_extractor=feature_extractor, tokenizer=tokenizer, decoder=decoder) @staticmethod def _set_language_model_attribute(decoder: "BeamSearchDecoderCTC", attribute: str, value: float): setattr(decoder.model_container[decoder._model_key], attribute, value) @property def language_model(self): return self.decoder.model_container[self.decoder._model_key] @staticmethod def get_missing_alphabet_tokens(decoder, tokenizer): from pyctcdecode.alphabet import BLANK_TOKEN_PTN, UNK_TOKEN, UNK_TOKEN_PTN # we need to make sure that all of the tokenizer's except the special tokens # are present in the decoder's alphabet. Retrieve missing alphabet token # from decoder tokenizer_vocab_list = list(tokenizer.get_vocab().keys()) # replace special tokens for i, token in enumerate(tokenizer_vocab_list): if BLANK_TOKEN_PTN.match(token): tokenizer_vocab_list[i] = "" if token == tokenizer.word_delimiter_token: tokenizer_vocab_list[i] = " " if UNK_TOKEN_PTN.match(token): tokenizer_vocab_list[i] = UNK_TOKEN # are any of the extra tokens no special tokenizer tokens? missing_tokens = set(tokenizer_vocab_list) - set(decoder._alphabet.labels) return missing_tokens def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to the feature extractor's [`~FeatureExtractionMixin.__call__`] and returns its output. If used in the context [`~Wav2Vec2ProcessorWithLM.as_target_processor`] this method forwards all its arguments to Wav2Vec2CTCTokenizer's [`~Wav2Vec2CTCTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. """ if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.") audio = kwargs.pop("raw_speech") else: audio = kwargs.pop("audio", None) sampling_rate = kwargs.pop("sampling_rate", None) text = kwargs.pop("text", None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def pad(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to the feature extractor's [`~FeatureExtractionMixin.pad`] and returns its output. If used in the context [`~Wav2Vec2ProcessorWithLM.as_target_processor`] this method forwards all its arguments to Wav2Vec2CTCTokenizer's [`~Wav2Vec2CTCTokenizer.pad`]. Please refer to the docstring of the above two methods for more information. """ input_features = kwargs.pop("input_features", None) labels = kwargs.pop("labels", None) if len(args) > 0: input_features = args[0] args = args[1:] if input_features is not None: input_features = self.feature_extractor.pad(input_features, *args, **kwargs) if labels is not None: labels = self.tokenizer.pad(labels, **kwargs) if labels is None: return input_features elif input_features is None: return labels else: input_features["labels"] = labels["input_ids"] return input_features def batch_decode( self, logits: np.ndarray, pool: Optional[Pool] = None, num_processes: Optional[int] = None, beam_width: Optional[int] = None, beam_prune_logp: Optional[float] = None, token_min_logp: Optional[float] = None, hotwords: Optional[Iterable[str]] = None, hotword_weight: Optional[float] = None, alpha: Optional[float] = None, beta: Optional[float] = None, unk_score_offset: Optional[float] = None, lm_score_boundary: Optional[bool] = None, output_word_offsets: bool = False, n_best: int = 1, ): """ Batch decode output logits to audio transcription with language model support. <Tip> This function makes use of Python's multiprocessing. Currently, multiprocessing is available only on Unix systems (see this [issue](https://github.com/kensho-technologies/pyctcdecode/issues/65)). If you are decoding multiple batches, consider creating a `Pool` and passing it to `batch_decode`. Otherwise, `batch_decode` will be very slow since it will create a fresh `Pool` for each call. See usage example below. </Tip> Args: logits (`np.ndarray`): The logits output vector of the model representing the log probabilities for each token. pool (`multiprocessing.Pool`, *optional*): An optional user-managed pool. If not set, one will be automatically created and closed. The pool should be instantiated *after* `Wav2Vec2ProcessorWithLM`. Otherwise, the LM won't be available to the pool's sub-processes. <Tip> Currently, only pools created with a 'fork' context can be used. If a 'spawn' pool is passed, it will be ignored and sequential decoding will be used instead. </Tip> num_processes (`int`, *optional*): If `pool` is not set, number of processes on which the function should be parallelized over. Defaults to the number of available CPUs. beam_width (`int`, *optional*): Maximum number of beams at each step in decoding. Defaults to pyctcdecode's DEFAULT_BEAM_WIDTH. beam_prune_logp (`int`, *optional*): Beams that are much worse than best beam will be pruned Defaults to pyctcdecode's DEFAULT_PRUNE_LOGP. token_min_logp (`int`, *optional*): Tokens below this logp are skipped unless they are argmax of frame Defaults to pyctcdecode's DEFAULT_MIN_TOKEN_LOGP. hotwords (`list[str]`, *optional*): List of words with extra importance, can be OOV for LM hotword_weight (`int`, *optional*): Weight factor for hotword importance Defaults to pyctcdecode's DEFAULT_HOTWORD_WEIGHT. alpha (`float`, *optional*): Weight for language model during shallow fusion beta (`float`, *optional*): Weight for length score adjustment of during scoring unk_score_offset (`float`, *optional*): Amount of log score offset for unknown tokens lm_score_boundary (`bool`, *optional*): Whether to have kenlm respect boundaries when scoring output_word_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed words. n_best (`int`, *optional*, defaults to `1`): Number of best hypotheses to return. If `n_best` is greater than 1, the returned `text` will be a list of lists of strings, `logit_score` will be a list of lists of floats, and `lm_score` will be a list of lists of floats, where the length of the outer list will correspond to the batch size and the length of the inner list will correspond to the number of returned hypotheses . The value should be >= 1. <Tip> Please take a look at the Example of [`~Wav2Vec2ProcessorWithLM.decode`] to better understand how to make use of `output_word_offsets`. [`~Wav2Vec2ProcessorWithLM.batch_decode`] works the same way with batched output. </Tip> Returns: [`~models.wav2vec2.Wav2Vec2DecoderWithLMOutput`]. Example: See [Decoding multiple audios](#decoding-multiple-audios). """ from pyctcdecode.constants import ( DEFAULT_BEAM_WIDTH, DEFAULT_HOTWORD_WEIGHT, DEFAULT_MIN_TOKEN_LOGP, DEFAULT_PRUNE_LOGP, ) # set defaults beam_width = beam_width if beam_width is not None else DEFAULT_BEAM_WIDTH beam_prune_logp = beam_prune_logp if beam_prune_logp is not None else DEFAULT_PRUNE_LOGP token_min_logp = token_min_logp if token_min_logp is not None else DEFAULT_MIN_TOKEN_LOGP hotword_weight = hotword_weight if hotword_weight is not None else DEFAULT_HOTWORD_WEIGHT # reset params at every forward call. It's just a `set` method in pyctcdecode self.decoder.reset_params( alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary ) # create multiprocessing pool and list numpy arrays # filter out logits padding logits_list = [array[(array != -100.0).all(axis=-1)] for array in logits] # create a pool if necessary while also using it as a context manager to close itself if pool is None: # fork is safe to use only on Unix, see "Contexts and start methods" section on # multiprocessing's docs (https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods) default_context = get_start_method() if default_context == "fork": cm = pool = get_context().Pool(num_processes) else: logger.warning( "Parallel batch decoding is not currently supported in this platform. " "Falling back to sequential decoding." ) cm = nullcontext() else: # pool is managed by the user, so we don't need to close it cm = nullcontext() if num_processes is not None: logger.warning( "Parameter `num_process` was passed, but it will be ignored since `pool` was also specified." ) # pyctcdecode with cm: decoded_beams = self.decoder.decode_beams_batch( pool=pool, logits_list=logits_list, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, hotwords=hotwords, hotword_weight=hotword_weight, ) # extract text and scores batch_texts, logit_scores, lm_scores, word_offsets = [], [], [], [] for d in decoded_beams: batch_texts.append([beam[0] for beam in d]) logit_scores.append([beam[-2] for beam in d]) lm_scores.append([beam[-1] for beam in d]) # word_offsets.append([{"word": t[0], "start_offset": t[1][0], "end_offset": t[1][1]} for t in d[0][1]]) word_offsets.append( [ [ {"word": word, "start_offset": start_offset, "end_offset": end_offset} for word, (start_offset, end_offset) in beam[1] ] for beam in d ] ) word_offsets = word_offsets if output_word_offsets else None if n_best == 1: return Wav2Vec2DecoderWithLMOutput( text=[hyps[0] for hyps in batch_texts], logit_score=[hyps[0] for hyps in logit_scores], lm_score=[hyps[0] for hyps in lm_scores], word_offsets=[hyps[0] for hyps in word_offsets] if word_offsets is not None else None, ) else: return Wav2Vec2DecoderWithLMOutput( text=[hyps[:n_best] for hyps in batch_texts], logit_score=[hyps[:n_best] for hyps in logit_scores], lm_score=[hyps[:n_best] for hyps in lm_scores], word_offsets=[hyps[:n_best] for hyps in word_offsets] if word_offsets is not None else None, ) def decode( self, logits: np.ndarray, beam_width: Optional[int] = None, beam_prune_logp: Optional[float] = None, token_min_logp: Optional[float] = None, hotwords: Optional[Iterable[str]] = None, hotword_weight: Optional[float] = None, alpha: Optional[float] = None, beta: Optional[float] = None, unk_score_offset: Optional[float] = None, lm_score_boundary: Optional[bool] = None, output_word_offsets: bool = False, n_best: int = 1, ): """ Decode output logits to audio transcription with language model support. Args: logits (`np.ndarray`): The logits output vector of the model representing the log probabilities for each token. beam_width (`int`, *optional*): Maximum number of beams at each step in decoding. Defaults to pyctcdecode's DEFAULT_BEAM_WIDTH. beam_prune_logp (`int`, *optional*): A threshold to prune beams with log-probs less than best_beam_logp + beam_prune_logp. The value should be <= 0. Defaults to pyctcdecode's DEFAULT_PRUNE_LOGP. token_min_logp (`int`, *optional*): Tokens with log-probs below token_min_logp are skipped unless they are have the maximum log-prob for an utterance. Defaults to pyctcdecode's DEFAULT_MIN_TOKEN_LOGP. hotwords (`list[str]`, *optional*): List of words with extra importance which can be missing from the LM's vocabulary, e.g. ["huggingface"] hotword_weight (`int`, *optional*): Weight multiplier that boosts hotword scores. Defaults to pyctcdecode's DEFAULT_HOTWORD_WEIGHT. alpha (`float`, *optional*): Weight for language model during shallow fusion beta (`float`, *optional*): Weight for length score adjustment of during scoring unk_score_offset (`float`, *optional*): Amount of log score offset for unknown tokens lm_score_boundary (`bool`, *optional*): Whether to have kenlm respect boundaries when scoring output_word_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed words. n_best (`int`, *optional*, defaults to `1`): Number of best hypotheses to return. If `n_best` is greater than 1, the returned `text` will be a list of strings, `logit_score` will be a list of floats, and `lm_score` will be a list of floats, where the length of these lists will correspond to the number of returned hypotheses. The value should be >= 1. <Tip> Please take a look at the example below to better understand how to make use of `output_word_offsets`. </Tip> Returns: [`~models.wav2vec2.Wav2Vec2DecoderWithLMOutput`]. Example: ```python >>> # Let's see how to retrieve time steps for a model >>> from transformers import AutoTokenizer, AutoProcessor, AutoModelForCTC >>> from datasets import load_dataset >>> import datasets >>> import torch >>> # import model, feature extractor, tokenizer >>> model = AutoModelForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") >>> processor = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") >>> # load first sample of English common_voice >>> dataset = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="train", streaming=True) >>> dataset = dataset.cast_column("audio", datasets.Audio(sampling_rate=16_000)) >>> dataset_iter = iter(dataset) >>> sample = next(dataset_iter) >>> # forward sample through model to get greedily predicted transcription ids >>> input_values = processor(sample["audio"]["array"], return_tensors="pt").input_values >>> with torch.no_grad(): ... logits = model(input_values).logits[0].cpu().numpy() >>> # retrieve word stamps (analogous commands for `output_char_offsets`) >>> outputs = processor.decode(logits, output_word_offsets=True) >>> # compute `time_offset` in seconds as product of downsampling ratio and sampling_rate >>> time_offset = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate >>> word_offsets = [ ... { ... "word": d["word"], ... "start_time": round(d["start_offset"] * time_offset, 2), ... "end_time": round(d["end_offset"] * time_offset, 2), ... } ... for d in outputs.word_offsets ... ] >>> # compare word offsets with audio `en_train_0/common_voice_en_19121553.mp3` online on the dataset viewer: >>> # https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/en >>> word_offsets[:4] [{'word': 'THE', 'start_time': 0.68, 'end_time': 0.78}, {'word': 'TRACK', 'start_time': 0.88, 'end_time': 1.1}, {'word': 'APPEARS', 'start_time': 1.18, 'end_time': 1.66}, {'word': 'ON', 'start_time': 1.86, 'end_time': 1.92}] ```""" from pyctcdecode.constants import ( DEFAULT_BEAM_WIDTH, DEFAULT_HOTWORD_WEIGHT, DEFAULT_MIN_TOKEN_LOGP, DEFAULT_PRUNE_LOGP, ) # set defaults beam_width = beam_width if beam_width is not None else DEFAULT_BEAM_WIDTH beam_prune_logp = beam_prune_logp if beam_prune_logp is not None else DEFAULT_PRUNE_LOGP token_min_logp = token_min_logp if token_min_logp is not None else DEFAULT_MIN_TOKEN_LOGP hotword_weight = hotword_weight if hotword_weight is not None else DEFAULT_HOTWORD_WEIGHT # reset params at every forward call. It's just a `set` method in pyctcdecode self.decoder.reset_params( alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary ) # pyctcdecode decoded_beams = self.decoder.decode_beams( logits, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, hotwords=hotwords, hotword_weight=hotword_weight, ) word_offsets = None if output_word_offsets: word_offsets = [ [ {"word": word, "start_offset": start_offset, "end_offset": end_offset} for word, (start_offset, end_offset) in beam[2] ] for beam in decoded_beams ] logit_scores = [beam[-2] for beam in decoded_beams] lm_scores = [beam[-1] for beam in decoded_beams] hypotheses = [beam[0] for beam in decoded_beams] if n_best > len(decoded_beams): logger.info( "N-best size is larger than the number of generated hypotheses, all hypotheses will be returned." ) if n_best == 1: return Wav2Vec2DecoderWithLMOutput( text=hypotheses[0], logit_score=logit_scores[0], lm_score=lm_scores[0], word_offsets=word_offsets[0] if word_offsets is not None else None, ) else: return Wav2Vec2DecoderWithLMOutput( text=hypotheses[:n_best], logit_score=logit_scores[:n_best], lm_score=lm_scores[:n_best], word_offsets=word_offsets[:n_best] if word_offsets is not None else None, ) __all__ = ["Wav2Vec2ProcessorWithLM"]
Wav2Vec2ProcessorWithLM
python
huggingface__transformers
src/transformers/models/glm4/modular_glm4.py
{ "start": 4872, "end": 4950 }
class ____(GlmForSequenceClassification): pass
Glm4ForSequenceClassification
python
huggingface__transformers
src/transformers/models/chameleon/modeling_chameleon.py
{ "start": 24180, "end": 24666 }
class ____(nn.Module): def __init__(self, in_channels): super().__init__() self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, hidden_states): # no asymmetric padding in torch conv, must do it ourselves hidden_states = F.pad(hidden_states, pad=(0, 1, 0, 1), mode="constant", value=0) hidden_states = self.conv(hidden_states) return hidden_states
ChameleonVQVAEEncoderConvDownsample
python
pytorch__pytorch
torch/_export/db/case.py
{ "start": 682, "end": 1769 }
class ____(Enum): """ Indicates at what stage the feature used in the example is handled in export. """ SUPPORTED = 1 NOT_SUPPORTED_YET = 0 ArgsType = tuple[Any, ...] def check_inputs_type(args, kwargs): if not isinstance(args, tuple): raise ValueError( f"Expecting args type to be a tuple, got: {type(args)}" ) if not isinstance(kwargs, dict): raise ValueError( f"Expecting kwargs type to be a dict, got: {type(kwargs)}" ) for key in kwargs: if not isinstance(key, str): raise ValueError( f"Expecting kwargs keys to be a string, got: {type(key)}" ) def _validate_tag(tag: str): parts = tag.split(".") t = _TAGS for part in parts: assert set(part) <= set( string.ascii_lowercase + "-" ), f"Tag contains invalid characters: {part}" if part in t: t = t[part] else: raise ValueError(f"Tag {tag} is not found in registered tags.") @dataclass(frozen=True)
SupportLevel
python
networkx__networkx
networkx/algorithms/tree/tests/test_mst.py
{ "start": 8826, "end": 9876 }
class ____(MinimumSpanningTreeTestBase): """Unit tests for computing a minimum (or maximum) spanning tree using Borůvka's algorithm. """ algorithm = "boruvka" def test_unicode_name(self): """Tests that using a Unicode string can correctly indicate Borůvka's algorithm. """ edges = nx.minimum_spanning_edges(self.G, algorithm="borůvka") # Edges from the spanning edges functions don't come in sorted # orientation, so we need to sort each edge individually. actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) assert edges_equal(actual, self.minimum_spanning_edgelist) def test_minimum_spanning_edges_multigraph_raises(self): MG = nx.MultiGraph() MG.add_edge(0, 1, weight=1) with pytest.raises(nx.NetworkXNotImplemented): list(nx.minimum_spanning_edges(MG, algorithm=self.algo)) with pytest.raises(nx.NetworkXNotImplemented): list(nx.maximum_spanning_edges(MG, algorithm=self.algo))
TestBoruvka
python
tornadoweb__tornado
tornado/simple_httpclient.py
{ "start": 1934, "end": 8700 }
class ____(AsyncHTTPClient): """Non-blocking HTTP client with no external dependencies. This class implements an HTTP 1.1 client on top of Tornado's IOStreams. Some features found in the curl-based AsyncHTTPClient are not yet supported. In particular, proxies are not supported, connections are not reused, and callers cannot select the network interface to be used. This implementation supports the following arguments, which can be passed to ``configure()`` to control the global singleton, or to the constructor when ``force_instance=True``. ``max_clients`` is the number of concurrent requests that can be in progress; when this limit is reached additional requests will be queued. Note that time spent waiting in this queue still counts against the ``request_timeout``. ``defaults`` is a dict of parameters that will be used as defaults on all `.HTTPRequest` objects submitted to this client. ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses. It can be used to make local DNS changes when modifying system-wide settings like ``/etc/hosts`` is not possible or desirable (e.g. in unittests). ``resolver`` is similar, but using the `.Resolver` interface instead of a simple mapping. ``max_buffer_size`` (default 100MB) is the number of bytes that can be read into memory at once. ``max_body_size`` (defaults to ``max_buffer_size``) is the largest response body that the client will accept. Without a ``streaming_callback``, the smaller of these two limits applies; with a ``streaming_callback`` only ``max_body_size`` does. .. versionchanged:: 4.2 Added the ``max_body_size`` argument. """ def initialize( # type: ignore self, max_clients: int = 10, hostname_mapping: Optional[Dict[str, str]] = None, max_buffer_size: int = 104857600, resolver: Optional[Resolver] = None, defaults: Optional[Dict[str, Any]] = None, max_header_size: Optional[int] = None, max_body_size: Optional[int] = None, ) -> None: super().initialize(defaults=defaults) self.max_clients = max_clients self.queue = ( collections.deque() ) # type: Deque[Tuple[object, HTTPRequest, Callable[[HTTPResponse], None]]] self.active = ( {} ) # type: Dict[object, Tuple[HTTPRequest, Callable[[HTTPResponse], None]]] self.waiting = ( {} ) # type: Dict[object, Tuple[HTTPRequest, Callable[[HTTPResponse], None], object]] self.max_buffer_size = max_buffer_size self.max_header_size = max_header_size self.max_body_size = max_body_size # TCPClient could create a Resolver for us, but we have to do it # ourselves to support hostname_mapping. if resolver: self.resolver = resolver self.own_resolver = False else: self.resolver = Resolver() self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver( resolver=self.resolver, mapping=hostname_mapping ) self.tcp_client = TCPClient(resolver=self.resolver) def close(self) -> None: super().close() if self.own_resolver: self.resolver.close() self.tcp_client.close() def fetch_impl( self, request: HTTPRequest, callback: Callable[[HTTPResponse], None] ) -> None: key = object() self.queue.append((key, request, callback)) assert request.connect_timeout is not None assert request.request_timeout is not None timeout_handle = None if len(self.active) >= self.max_clients: timeout = ( min(request.connect_timeout, request.request_timeout) or request.connect_timeout or request.request_timeout ) # min but skip zero if timeout: timeout_handle = self.io_loop.add_timeout( self.io_loop.time() + timeout, functools.partial(self._on_timeout, key, "in request queue"), ) self.waiting[key] = (request, callback, timeout_handle) self._process_queue() if self.queue: gen_log.debug( "max_clients limit reached, request queued. " "%d active, %d queued requests." % (len(self.active), len(self.queue)) ) def _process_queue(self) -> None: while self.queue and len(self.active) < self.max_clients: key, request, callback = self.queue.popleft() if key not in self.waiting: continue self._remove_timeout(key) self.active[key] = (request, callback) release_callback = functools.partial(self._release_fetch, key) self._handle_request(request, release_callback, callback) def _connection_class(self) -> type: return _HTTPConnection def _handle_request( self, request: HTTPRequest, release_callback: Callable[[], None], final_callback: Callable[[HTTPResponse], None], ) -> None: self._connection_class()( self, request, release_callback, final_callback, self.max_buffer_size, self.tcp_client, self.max_header_size, self.max_body_size, ) def _release_fetch(self, key: object) -> None: del self.active[key] self._process_queue() def _remove_timeout(self, key: object) -> None: if key in self.waiting: request, callback, timeout_handle = self.waiting[key] if timeout_handle is not None: self.io_loop.remove_timeout(timeout_handle) del self.waiting[key] def _on_timeout(self, key: object, info: Optional[str] = None) -> None: """Timeout callback of request. Construct a timeout HTTPResponse when a timeout occurs. :arg object key: A simple object to mark the request. :info string key: More detailed timeout information. """ request, callback, timeout_handle = self.waiting[key] self.queue.remove((key, request, callback)) error_message = f"Timeout {info}" if info else "Timeout" timeout_response = HTTPResponse( request, 599, error=HTTPTimeoutError(error_message), request_time=self.io_loop.time() - request.start_time, ) self.io_loop.add_callback(callback, timeout_response) del self.waiting[key]
SimpleAsyncHTTPClient
python
Textualize__textual
src/textual/pilot.py
{ "start": 1284, "end": 1397 }
class ____(Exception): """Raised when the pilot mouse target is outside of the (visible) screen."""
OutOfBounds
python
getsentry__sentry
src/sentry/issues/endpoints/organization_group_suspect_tags.py
{ "start": 560, "end": 2191 }
class ____(GroupEndpoint): publish_status = {"GET": ApiPublishStatus.PRIVATE} def get(self, request: Request, group: Group) -> Response: """Stats bucketed by time.""" if not features.has( "organizations:issues-suspect-tags", group.organization, actor=request.user, ): return Response(status=404) environments = [e.name for e in get_environments(request, group.organization)] group_id = group.id organization_id = group.organization.id project_id = group.project.id start, end = get_date_range_from_params(request.GET) # Clamp the range to be within the issue's first and last seen timestamps. start, end = max(start, group.first_seen), min(end, group.last_seen) # To increase our cache hit-rate we round the dates down to the nearest 5 minute interval. if end - start > timedelta(minutes=5): start = start.replace(minute=(start.minute // 5) * 5, second=0, microsecond=0) end = end.replace(minute=(end.minute // 5) * 5, second=0, microsecond=0) return Response( { "data": [ {"tag": tag, "score": score} for tag, score in get_suspect_tag_scores( organization_id, project_id, start, end, environments, group_id, ) ] }, status=200, )
OrganizationGroupSuspectTagsEndpoint
python
astropy__astropy
astropy/coordinates/tests/test_representation_arithmetic.py
{ "start": 45850, "end": 54325 }
class ____: def setup_method(self): self.s = SphericalRepresentation( lon=[0.0, 6.0, 21.0] * u.hourangle, lat=[0.0, -30.0, 85.0] * u.deg, distance=[1, 2, 3] * u.kpc, ) @pytest.mark.parametrize( "sd_cls", [SphericalDifferential, SphericalCosLatDifferential] ) def test_represent_as_own_class(self, sd_cls): so = sd_cls(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc) so2 = so.represent_as(sd_cls) assert so2 is so def test_represent_other_coslat(self): s = self.s coslat = np.cos(s.lat) so = SphericalDifferential(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc) so_coslat = so.represent_as(SphericalCosLatDifferential, base=s) assert_quantity_allclose(so.d_lon * coslat, so_coslat.d_lon_coslat) so2 = so_coslat.represent_as(SphericalDifferential, base=s) assert np.all(representation_equal(so2, so)) so3 = SphericalDifferential.from_representation(so_coslat, base=s) assert np.all(representation_equal(so3, so)) so_coslat2 = SphericalCosLatDifferential.from_representation(so, base=s) assert np.all(representation_equal(so_coslat2, so_coslat)) # Also test UnitSpherical us = s.represent_as(UnitSphericalRepresentation) uo = so.represent_as(UnitSphericalDifferential) uo_coslat = so.represent_as(UnitSphericalCosLatDifferential, base=s) assert_quantity_allclose(uo.d_lon * coslat, uo_coslat.d_lon_coslat) uo2 = uo_coslat.represent_as(UnitSphericalDifferential, base=us) assert np.all(representation_equal(uo2, uo)) uo3 = UnitSphericalDifferential.from_representation(uo_coslat, base=us) assert np.all(representation_equal(uo3, uo)) uo_coslat2 = UnitSphericalCosLatDifferential.from_representation(uo, base=us) assert np.all(representation_equal(uo_coslat2, uo_coslat)) uo_coslat3 = uo.represent_as(UnitSphericalCosLatDifferential, base=us) assert np.all(representation_equal(uo_coslat3, uo_coslat)) @pytest.mark.parametrize( "sd_cls", [SphericalDifferential, SphericalCosLatDifferential] ) @pytest.mark.parametrize( "r_cls", ( SphericalRepresentation, UnitSphericalRepresentation, PhysicsSphericalRepresentation, CylindricalRepresentation, ), ) def test_represent_regular_class(self, sd_cls, r_cls): so = sd_cls(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc) r = so.represent_as(r_cls, base=self.s) c = so.to_cartesian(self.s) r_check = c.represent_as(r_cls) assert np.all(representation_equal(r, r_check)) so2 = sd_cls.from_representation(r, base=self.s) so3 = sd_cls.from_cartesian(r.to_cartesian(), self.s) assert np.all(representation_equal(so2, so3)) @pytest.mark.parametrize( "sd_cls", [SphericalDifferential, SphericalCosLatDifferential] ) def test_convert_physics(self, sd_cls): # Conversion needs no base for SphericalDifferential, but does # need one (to get the latitude) for SphericalCosLatDifferential. if sd_cls is SphericalDifferential: usd_cls = UnitSphericalDifferential base_s = base_u = base_p = None else: usd_cls = UnitSphericalCosLatDifferential base_s = self.s[1] base_u = base_s.represent_as(UnitSphericalRepresentation) base_p = base_s.represent_as(PhysicsSphericalRepresentation) so = sd_cls(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc) po = so.represent_as(PhysicsSphericalDifferential, base=base_s) so2 = sd_cls.from_representation(po, base=base_s) assert_differential_allclose(so, so2) po2 = PhysicsSphericalDifferential.from_representation(so, base=base_p) assert_differential_allclose(po, po2) so3 = po.represent_as(sd_cls, base=base_p) assert_differential_allclose(so, so3) s = self.s p = s.represent_as(PhysicsSphericalRepresentation) cso = so.to_cartesian(s[1]) cpo = po.to_cartesian(p[1]) assert_representation_allclose(cso, cpo) assert_representation_allclose(s[1] + so, p[1] + po) po2 = so.represent_as( PhysicsSphericalDifferential, base=None if base_s is None else s ) assert_representation_allclose(s + so, p + po2) suo = usd_cls.from_representation(so) puo = usd_cls.from_representation(po, base=base_u) assert_differential_allclose(suo, puo) suo2 = so.represent_as(usd_cls) puo2 = po.represent_as(usd_cls, base=base_p) assert_differential_allclose(suo2, puo2) assert_differential_allclose(puo, puo2) sro = RadialDifferential.from_representation(so) pro = RadialDifferential.from_representation(po) assert representation_equal(sro, pro) sro2 = so.represent_as(RadialDifferential) pro2 = po.represent_as(RadialDifferential) assert representation_equal(sro2, pro2) assert representation_equal(pro, pro2) @pytest.mark.parametrize( ("sd_cls", "usd_cls"), [ (SphericalDifferential, UnitSphericalDifferential), (SphericalCosLatDifferential, UnitSphericalCosLatDifferential), ], ) def test_convert_unit_spherical_radial(self, sd_cls, usd_cls): s = self.s us = s.represent_as(UnitSphericalRepresentation) rs = s.represent_as(RadialRepresentation) assert_representation_allclose(rs * us, s) uo = usd_cls(2.0 * u.deg, 1.0 * u.deg) so = uo.represent_as(sd_cls, base=s) assert_quantity_allclose(so.d_distance, 0.0 * u.kpc, atol=1.0 * u.npc) uo2 = so.represent_as(usd_cls) assert_representation_allclose(uo.to_cartesian(us), uo2.to_cartesian(us)) so1 = sd_cls(2.0 * u.deg, 1.0 * u.deg, 5.0 * u.pc) uo_r = so1.represent_as(usd_cls) ro_r = so1.represent_as(RadialDifferential) assert np.all(representation_equal(uo_r, uo)) assert np.all(representation_equal(ro_r, RadialDifferential(5.0 * u.pc))) @pytest.mark.parametrize( "sd_cls", [SphericalDifferential, SphericalCosLatDifferential] ) def test_convert_cylindrial(self, sd_cls): s = self.s so = sd_cls(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc) cyo = so.represent_as(CylindricalDifferential, base=s) cy = s.represent_as(CylindricalRepresentation) so1 = cyo.represent_as(sd_cls, base=cy) assert_representation_allclose(so.to_cartesian(s), so1.to_cartesian(s)) cyo2 = CylindricalDifferential.from_representation(so, base=cy) assert_representation_allclose( cyo2.to_cartesian(base=cy), cyo.to_cartesian(base=cy) ) so2 = sd_cls.from_representation(cyo2, base=s) assert_representation_allclose(so.to_cartesian(s), so2.to_cartesian(s)) @pytest.mark.parametrize( "sd_cls", [SphericalDifferential, SphericalCosLatDifferential] ) def test_combinations(self, sd_cls): if sd_cls is SphericalDifferential: uo = UnitSphericalDifferential(2.0 * u.deg, 1.0 * u.deg) uo_d_lon = uo.d_lon else: uo = UnitSphericalCosLatDifferential(2.0 * u.deg, 1.0 * u.deg) uo_d_lon = uo.d_lon_coslat ro = RadialDifferential(1.0 * u.mpc) so1 = uo + ro so1c = sd_cls(uo_d_lon, uo.d_lat, ro.d_distance) assert np.all(representation_equal(so1, so1c)) so2 = uo - ro so2c = sd_cls(uo_d_lon, uo.d_lat, -ro.d_distance) assert np.all(representation_equal(so2, so2c)) so3 = so2 + ro so3c = sd_cls(uo_d_lon, uo.d_lat, 0.0 * u.kpc) assert np.all(representation_equal(so3, so3c)) so4 = so1 + ro so4c = sd_cls(uo_d_lon, uo.d_lat, 2 * ro.d_distance) assert np.all(representation_equal(so4, so4c)) so5 = so1 - uo so5c = sd_cls(0 * u.deg, 0.0 * u.deg, ro.d_distance) assert np.all(representation_equal(so5, so5c)) assert_representation_allclose(self.s + (uo + ro), self.s + so1) @pytest.mark.parametrize( "op,args", [ (operator.neg, ()), (operator.pos, ()), (operator.mul, (-8.0,)), (operator.truediv, ([4.0, 8.0] * u.s,)), ], scope="class", )
TestDifferentialConversion
python
lepture__authlib
authlib/integrations/django_oauth2/resource_protector.py
{ "start": 1958, "end": 2597 }
class ____(_BearerTokenValidator): def __init__(self, token_model, realm=None, **extra_attributes): self.token_model = token_model super().__init__(realm, **extra_attributes) def authenticate_token(self, token_string): try: return self.token_model.objects.get(access_token=token_string) except self.token_model.DoesNotExist: return None def return_error_response(error): body = dict(error.get_body()) resp = JsonResponse(body, status=error.status_code) headers = error.get_headers() for k, v in headers: resp[k] = v return resp
BearerTokenValidator
python
pytorch__pytorch
torch/fx/interpreter.py
{ "start": 19436, "end": 25278 }
class ____(Interpreter): """ ``Transformer`` is a special type of interpreter that produces a new ``Module``. It exposes a ``transform()`` method that returns the transformed ``Module``. ``Transformer`` does not require arguments to run, as ``Interpreter`` does. ``Transformer`` works entirely symbolically. Example: Suppose we want to swap all instances of ``torch.neg`` with ``torch.sigmoid`` and vice versa (including their ``Tensor`` method equivalents). We could subclass ``Transformer`` like so:: class NegSigmSwapXformer(Transformer): def call_function( self, target: "Target", args: Tuple[Argument, ...], kwargs: Dict[str, Any], ) -> Any: if target is torch.sigmoid: return torch.neg(*args, **kwargs) return super().call_function(target, args, kwargs) def call_method( self, target: "Target", args: Tuple[Argument, ...], kwargs: Dict[str, Any], ) -> Any: if target == "neg": call_self, *args_tail = args return call_self.sigmoid(*args_tail, **kwargs) return super().call_method(target, args, kwargs) def fn(x): return torch.sigmoid(x).neg() gm = torch.fx.symbolic_trace(fn) transformed: torch.nn.Module = NegSigmSwapXformer(gm).transform() input = torch.randn(3, 4) torch.testing.assert_close(transformed(input), torch.neg(input).sigmoid()) Args: module (GraphModule): The ``Module`` to be transformed. """ @compatibility(is_backward_compatible=True) def __init__(self, module): super().__init__(module) self.new_graph = Graph() self.new_graph.set_codegen(module.graph._codegen) class TransformerTracer(Tracer): def __init__(self, graph: Graph): super().__init__() self.graph = graph self.tensor_attrs: dict[torch.Tensor, str] = {} # type: ignore[assignment] def is_leaf_module(self, _, __) -> bool: return True self.tracer = TransformerTracer(self.new_graph) self.tracer.root = module @compatibility(is_backward_compatible=True) def placeholder( self, target: "Target", args: tuple[Argument, ...], kwargs: dict[str, Any] ) -> Proxy: """ Execute a ``placeholder`` node. In ``Transformer``, this is overridden to insert a new ``placeholder`` into the output graph. Args: target (Target): The call target for this node. See `Node <https://pytorch.org/docs/main/fx.html#torch.fx.Node>`__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation """ assert isinstance(target, str) default_value = next(iter(args)) if args else inspect.Signature.empty return Proxy( self.new_graph.placeholder(target, default_value=default_value), self.tracer ) @compatibility(is_backward_compatible=True) def get_attr( self, target: "Target", args: tuple[Argument, ...], kwargs: dict[str, Any] ) -> Proxy: """ Execute a ``get_attr`` node. In ``Transformer``, this is overridden to insert a new ``get_attr`` node into the output graph. Args: target (Target): The call target for this node. See `Node <https://pytorch.org/docs/main/fx.html#torch.fx.Node>`__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation """ assert isinstance(target, str) return self.tracer.create_proxy("get_attr", target, args, kwargs) @compatibility(is_backward_compatible=True) def call_module( self, target: "Target", args: tuple[Argument, ...], kwargs: dict[str, Any] ) -> Any: # Override so that the leaf module policy from `self.tracer` is respected. assert isinstance(target, str) submod = self.fetch_attr(target) return self.tracer.call_module(submod, submod.forward, args, kwargs) @compatibility(is_backward_compatible=True) def call_function( self, target: "Target", args: tuple[Argument, ...], kwargs: dict[str, Any] ) -> Any: # Override so that functions that were wrapped are still wrapped. return self.tracer.create_proxy("call_function", target, args, kwargs) @compatibility(is_backward_compatible=True) def transform(self) -> GraphModule: """ Transform ``self.module`` and return the transformed ``GraphModule``. """ with fx_traceback.preserve_node_meta(): result = super().run(enable_io_processing=False) if result is not None: def strip_proxy(a: Union[Argument, Proxy]) -> Any: return a.node if isinstance(a, Proxy) else a new_output_node = self.new_graph.output(map_aggregate(result, strip_proxy)) # also preserve the metadata from the old output node, if it exists old_output_node = list(self.graph.nodes)[-1] assert old_output_node.op == "output" for k, v in old_output_node.meta.items(): new_output_node.meta[k] = v return _make_graph_module(self.module, self.new_graph)
Transformer
python
HypothesisWorks__hypothesis
hypothesis-python/tests/attrs/test_pretty.py
{ "start": 853, "end": 1177 }
class ____: def _repr_pretty_(self, p, cycle): """Exercise the IPython callback interface.""" p.text("I am a banana") def test_custom_pretty_print_method_overrides_field_printing(): assert pretty.pretty(SomeAttrsClassWithCustomPretty()) == "I am a banana" @attrs.define
SomeAttrsClassWithCustomPretty
python
ansible__ansible
test/lib/ansible_test/_internal/config.py
{ "start": 11854, "end": 12366 }
class ____(TestConfig): """Configuration for the units command.""" def __init__(self, args: t.Any) -> None: super().__init__(args, 'units') self.collect_only: bool = args.collect_only self.num_workers: int = args.num_workers self.requirements_mode: str = getattr(args, 'requirements_mode', '') if self.requirements_mode == 'only': self.requirements = True elif self.requirements_mode == 'skip': self.requirements = False
UnitsConfig
python
kamyu104__LeetCode-Solutions
Python/truncate-sentence.py
{ "start": 29, "end": 337 }
class ____(object): def truncateSentence(self, s, k): """ :type s: str :type k: int :rtype: str """ for i in xrange(len(s)): if s[i] == ' ': k -= 1 if not k: return s[:i] return s
Solution
python
run-llama__llama_index
llama-index-core/llama_index/core/instrumentation/events/agent.py
{ "start": 1342, "end": 2738 }
class ____(BaseEvent): """ AgentChatWithStepEndEvent. Args: response (Optional[AGENT_CHAT_RESPONSE_TYPE]): Agent chat response. """ response: Optional[AGENT_CHAT_RESPONSE_TYPE] @model_validator(mode="before") @classmethod def validate_response(cls: Any, values: Any) -> Any: """Validate response.""" response = values.get("response") if response is None: pass elif not isinstance(response, AgentChatResponse) and not isinstance( response, StreamingAgentChatResponse ): raise ValueError( "response must be of type AgentChatResponse or StreamingAgentChatResponse" ) return values @field_validator("response", mode="before") @classmethod def validate_response_type(cls: Any, response: Any) -> Any: """Validate response type.""" if response is None: return response if not isinstance(response, AgentChatResponse) and not isinstance( response, StreamingAgentChatResponse ): raise ValueError( "response must be of type AgentChatResponse or StreamingAgentChatResponse" ) return response @classmethod def class_name(cls) -> str: """Class name.""" return "AgentChatWithStepEndEvent"
AgentChatWithStepEndEvent
python
ansible__ansible
lib/ansible/modules/group.py
{ "start": 9009, "end": 9661 }
class ____(Group): """ This is a Linux Group manipulation class. This is to apply the '-f' parameter to the groupdel command This overrides the following methods from the generic class:- - group_del() """ platform = 'Linux' distribution = None def group_del(self): if self.local: command_name = 'lgroupdel' else: command_name = 'groupdel' cmd = [self.module.get_bin_path(command_name, True)] if self.force: cmd.append('-f') cmd.append(self.name) return self.execute_command(cmd) # ===========================================
Linux
python
Pylons__pyramid
src/pyramid/interfaces.py
{ "start": 31637, "end": 32610 }
class ____(Interface): def __call__(request, elements, kw): """A pregenerator is a function associated by a developer with a :term:`route`. The pregenerator for a route is called by :meth:`pyramid.request.Request.route_url` in order to adjust the set of arguments passed to it by the user for special purposes, such as Pylons 'subdomain' support. It will influence the URL returned by ``route_url``. A pregenerator should return a two-tuple of ``(elements, kw)`` after examining the originals passed to this function, which are the arguments ``(request, elements, kw)``. The simplest pregenerator is:: def pregenerator(request, elements, kw): return elements, kw You can employ a pregenerator by passing a ``pregenerator`` argument to the :meth:`pyramid.config.Configurator.add_route` function. """
IRoutePregenerator
python
jazzband__django-formtools
tests/wizard/wizardtests/tests.py
{ "start": 18409, "end": 19183 }
class ____(TestCase): def setUp(self): self.rf = RequestFactory() self.poet = Poet.objects.create(name='test') self.poem = self.poet.poem_set.create(name='test poem') def test_set_instance(self): # Regression test for #21259 poet = self.poet class InlineFormSetWizard(CookieWizardView): instance = None def get_form_instance(self, step): if self.instance is None: self.instance = poet return self.instance view = InlineFormSetWizard.as_view([PoemFormSet]) response = view(self.rf.get('/')) formset = response.context_data['wizard']['form'] self.assertEqual(formset.instance, self.poet)
WizardInlineFormSetTests
python
tensorflow__tensorflow
tensorflow/python/util/deprecation_test.py
{ "start": 40975, "end": 41516 }
class ____(test.TestCase): @test.mock.patch.object(logging, "warning", autospec=True) def testCallDeprecatedModule(self, mock_warning): from tensorflow.python.util import deprecated_module # pylint: disable=g-import-not-at-top self.assertEqual(0, mock_warning.call_count) result = deprecated_module.a() self.assertEqual(1, mock_warning.call_count) self.assertEqual(1, result) deprecated_module.a() self.assertEqual(1, mock_warning.call_count) if __name__ == "__main__": test.main()
DeprecateMovedModuleTest
python
Textualize__textual
src/textual/widgets/_select.py
{ "start": 5607, "end": 8236 }
class ____(Horizontal): """Displays the currently selected option.""" DEFAULT_CSS = """ SelectCurrent { border: tall $border-blurred; color: $foreground; background: $surface; width: 1fr; height: auto; padding: 0 2; &.-textual-compact { border: none !important; } &:ansi { border: tall ansi_blue; color: ansi_default; background: ansi_default; } Static#label { width: 1fr; height: auto; color: $foreground 50%; background: transparent; } &.-has-value Static#label { color: $foreground; } .arrow { box-sizing: content-box; width: 1; height: 1; padding: 0 0 0 1; color: $foreground 50%; background: transparent; } } """ has_value: var[bool] = var(False) """True if there is a current value, or False if it is None.""" class Toggle(Message): """Request toggle overlay.""" def __init__(self, placeholder: str) -> None: """Initialize the SelectCurrent. Args: placeholder: A string to display when there is nothing selected. """ super().__init__() self.placeholder = placeholder self.label: RenderableType | NoSelection = Select.BLANK def update(self, label: RenderableType | NoSelection) -> None: """Update the content in the widget. Args: label: A renderable to display, or `None` for the placeholder. """ self.label = label self.has_value = label is not Select.BLANK self.query_one("#label", Static).update( self.placeholder if isinstance(label, NoSelection) else label ) def compose(self) -> ComposeResult: """Compose label and down arrow.""" yield Static(self.placeholder, id="label") yield Static("▼", classes="arrow down-arrow") yield Static("▲", classes="arrow up-arrow") def _watch_has_value(self, has_value: bool) -> None: """Toggle the class.""" self.set_class(has_value, "-has-value") def _on_click(self, event: events.Click) -> None: """Inform ancestor we want to toggle.""" event.stop() self.post_message(self.Toggle()) SelectType = TypeVar("SelectType", bound=Hashable) """The type used for data in the Select.""" SelectOption: TypeAlias = "tuple[str, SelectType]" """The type used for options in the Select."""
SelectCurrent
python
wandb__wandb
wandb/sdk/data_types/helper_types/bounding_boxes_2d.py
{ "start": 709, "end": 13794 }
class ____(JSONMetadata): """Format images with 2D bounding box overlays for logging to W&B. Args: val: (dictionary) A dictionary of the following form: box_data: (list of dictionaries) One dictionary for each bounding box, containing: position: (dictionary) the position and size of the bounding box, in one of two formats Note that boxes need not all use the same format. {"minX", "minY", "maxX", "maxY"}: (dictionary) A set of coordinates defining the upper and lower bounds of the box (the bottom left and top right corners) {"middle", "width", "height"}: (dictionary) A set of coordinates defining the center and dimensions of the box, with "middle" as a list [x, y] for the center point and "width" and "height" as numbers domain: (string) One of two options for the bounding box coordinate domain null: By default, or if no argument is passed, the coordinate domain is assumed to be relative to the original image, expressing this box as a fraction or percentage of the original image. This means all coordinates and dimensions passed into the "position" argument are floating point numbers between 0 and 1. "pixel": (string literal) The coordinate domain is set to the pixel space. This means all coordinates and dimensions passed into "position" are integers within the bounds of the image dimensions. class_id: (integer) The class label id for this box scores: (dictionary of string to number, optional) A mapping of named fields to numerical values (float or int), can be used for filtering boxes in the UI based on a range of values for the corresponding field box_caption: (string, optional) A string to be displayed as the label text above this box in the UI, often composed of the class label, class name, and/or scores class_labels: (dictionary, optional) A map of integer class labels to their readable class names key: (string) The readable name or id for this set of bounding boxes (e.g. predictions, ground_truth) Examples: ### Log bounding boxes for a single image ```python import numpy as np import wandb run = wandb.init() image = np.random.randint(low=0, high=256, size=(200, 300, 3)) class_labels = {0: "person", 1: "car", 2: "road", 3: "building"} img = wandb.Image( image, boxes={ "predictions": { "box_data": [ { # one box expressed in the default relative/fractional domain "position": { "minX": 0.1, "maxX": 0.2, "minY": 0.3, "maxY": 0.4, }, "class_id": 1, "box_caption": class_labels[1], "scores": {"acc": 0.2, "loss": 1.2}, }, { # another box expressed in the pixel domain "position": { "middle": [150, 20], "width": 68, "height": 112, }, "domain": "pixel", "class_id": 3, "box_caption": "a building", "scores": {"acc": 0.5, "loss": 0.7}, }, # Log as many boxes an as needed ], "class_labels": class_labels, } }, ) run.log({"driving_scene": img}) ``` ### Log a bounding box overlay to a Table ```python import numpy as np import wandb run = wandb.init() image = np.random.randint(low=0, high=256, size=(200, 300, 3)) class_labels = {0: "person", 1: "car", 2: "road", 3: "building"} class_set = wandb.Classes( [ {"name": "person", "id": 0}, {"name": "car", "id": 1}, {"name": "road", "id": 2}, {"name": "building", "id": 3}, ] ) img = wandb.Image( image, boxes={ "predictions": { "box_data": [ { # one box expressed in the default relative/fractional domain "position": { "minX": 0.1, "maxX": 0.2, "minY": 0.3, "maxY": 0.4, }, "class_id": 1, "box_caption": class_labels[1], "scores": {"acc": 0.2, "loss": 1.2}, }, { # another box expressed in the pixel domain "position": { "middle": [150, 20], "width": 68, "height": 112, }, "domain": "pixel", "class_id": 3, "box_caption": "a building", "scores": {"acc": 0.5, "loss": 0.7}, }, # Log as many boxes an as needed ], "class_labels": class_labels, } }, classes=class_set, ) table = wandb.Table(columns=["image"]) table.add_data(img) run.log({"driving_scene": table}) ``` """ _log_type = "bounding-boxes" # TODO: when the change is made to have this produce a dict with a _type, define # it here as _log_type, associate it in to_json def __init__(self, val: dict, key: str) -> None: """Initialize a BoundingBoxes object. The input dictionary `val` should contain the keys: box_data: a list of dictionaries, each of which describes a bounding box. class_labels: (optional) A map of integer class labels to their readable class names. Each bounding box dictionary should contain the following keys: position: (dictionary) the position and size of the bounding box. domain: (string) One of two options for the bounding box coordinate domain. class_id: (integer) The class label id for this box. scores: (dictionary of string to number, optional) A mapping of named fields to numerical values (float or int). box_caption: (optional) The label text, often composed of the class label, class name, and/or scores. The position dictionary should be in one of two formats: {"minX", "minY", "maxX", "maxY"}: (dictionary) A set of coordinates defining the upper and lower bounds of the box (the bottom left and top right corners). {"middle", "width", "height"}: (dictionary) A set of coordinates defining the center and dimensions of the box, with "middle" as a list [x, y] for the center point and "width" and "height" as numbers. Note that boxes need not all use the same format. Args: val: (dictionary) A dictionary containing the bounding box data. key: (string) The readable name or id for this set of bounding boxes (e.g. predictions, ground_truth) """ # Pytorch tensors are not serializable to json, # so we convert them to lists to avoid errors later on. _convert_pytorch_tensor_to_list(val.get("box_data", [])) super().__init__(val) self._val = val["box_data"] self._key = key # Add default class mapping if "class_labels" not in val: np = util.get_module( "numpy", required="Bounding box support requires numpy" ) classes = ( np.unique(list(box["class_id"] for box in val["box_data"])) .astype(np.int32) .tolist() ) class_labels = {c: "class_" + str(c) for c in classes} self._class_labels = class_labels else: self._class_labels = val["class_labels"] def bind_to_run( self, run: "LocalRun", key: Union[int, str], step: Union[int, str], id_: Optional[Union[int, str]] = None, ignore_copy_err: Optional[bool] = None, ) -> None: # bind_to_run key argument is the Image parent key # the self._key value is the mask's sub key super().bind_to_run(run, key, step, id_=id_, ignore_copy_err=ignore_copy_err) run._add_singleton( "bounding_box/class_labels", str(key) + "_wandb_delimeter_" + self._key, self._class_labels, ) @classmethod def type_name(cls) -> str: return "boxes2D" def validate(self, val: dict) -> bool: # Optional argument if "class_labels" in val: for k, v in list(val["class_labels"].items()): if (not isinstance(k, numbers.Number)) or (not isinstance(v, str)): raise TypeError( "Class labels must be a dictionary of numbers to string" ) boxes = val["box_data"] if not isinstance(boxes, list): raise TypeError("Boxes must be a list") for box in boxes: # Required arguments error_str = ( "Each box must contain a position with: middle, width, and height or \ \nminX, maxX, minY, maxY." ) if "position" not in box: raise TypeError(error_str) else: valid = False if ( "middle" in box["position"] and len(box["position"]["middle"]) == 2 and has_num(box["position"], "width") and has_num(box["position"], "height") ): valid = True elif ( has_num(box["position"], "minX") and has_num(box["position"], "maxX") and has_num(box["position"], "minY") and has_num(box["position"], "maxY") ): valid = True if not valid: raise TypeError(error_str) # Optional arguments if ("scores" in box) and not isinstance(box["scores"], dict): raise TypeError("Box scores must be a dictionary") elif "scores" in box: for k, v in list(box["scores"].items()): if not isinstance(k, str): raise TypeError("A score key must be a string") if not isinstance(v, numbers.Number): raise TypeError("A score value must be a number") if ("class_id" in box) and not isinstance(box["class_id"], int): raise TypeError("A box's class_id must be an integer") # Optional if ("box_caption" in box) and not isinstance(box["box_caption"], str): raise TypeError("A box's caption must be a string") return True def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict: if isinstance(run_or_artifact, wandb.Run): return super().to_json(run_or_artifact) elif isinstance(run_or_artifact, wandb.Artifact): # TODO (tim): I would like to log out a proper dictionary representing this object, but don't # want to mess with the visualizations that are currently available in the UI. This really should output # an object with a _type key. Will need to push this change to the UI first to ensure backwards compat return self._val else: raise TypeError("to_json accepts wandb_run.Run or wandb.Artifact") @classmethod def from_json( cls: Type["BoundingBoxes2D"], json_obj: dict, source_artifact: "Artifact" ) -> "BoundingBoxes2D": return cls({"box_data": json_obj}, "")
BoundingBoxes2D
python
pytorch__pytorch
torch/_dynamo/device_interface.py
{ "start": 19464, "end": 22332 }
class ____(DeviceInterface): @staticmethod def is_bf16_supported(including_emulation: bool = False) -> bool: return torch.backends.mps.is_macos_or_newer(14, 0) @classmethod def is_dtype_supported( cls, dtype: torch.dtype, including_emulation: bool = False ) -> bool: if dtype in [torch.float64, torch.complex128]: return False return dtype != torch.bfloat16 or cls.is_bf16_supported(including_emulation) @staticmethod def is_available() -> bool: return torch.backends.mps.is_available() @staticmethod def current_device() -> int: return 0 @staticmethod def get_compute_capability(device: torch.types.Device = None) -> str: return "" @staticmethod def synchronize(device: torch.types.Device = None) -> None: torch.mps.synchronize() # pyrefly: ignore [bad-override] class Worker: @staticmethod def get_device_properties(device: torch.types.Device = None) -> Any: return namedtuple("MPSProperties", ["multi_processor_count"])( torch.backends.mps.get_core_count() # type: ignore[arg-type] ) @staticmethod def current_device() -> int: return 0 device_interfaces: dict[str, type[DeviceInterface]] = {} _device_initialized = False def register_interface_for_device( device: Union[str, torch.device], device_interface: type[DeviceInterface] ) -> None: if isinstance(device, torch.device): device = device.type device_interfaces[device] = device_interface def get_interface_for_device(device: Union[str, torch.device]) -> type[DeviceInterface]: if isinstance(device, torch.device): device = device.type if not _device_initialized: init_device_reg() if device in device_interfaces: return device_interfaces[device] raise NotImplementedError(f"No interface for device {device}") def get_registered_device_interfaces() -> Iterable[tuple[str, type[DeviceInterface]]]: if not _device_initialized: init_device_reg() return device_interfaces.items() def init_device_reg() -> None: global _device_initialized register_interface_for_device("cuda", CudaInterface) for i in range(torch.cuda.device_count()): register_interface_for_device(f"cuda:{i}", CudaInterface) register_interface_for_device("xpu", XpuInterface) for i in range(torch.xpu.device_count()): register_interface_for_device(f"xpu:{i}", XpuInterface) register_interface_for_device("mtia", MtiaInterface) for i in range(torch.mtia.device_count()): register_interface_for_device(f"mtia:{i}", MtiaInterface) register_interface_for_device("cpu", CpuInterface) register_interface_for_device("mps", MpsInterface) _device_initialized = True
MpsInterface
python
pytorch__pytorch
torch/testing/_internal/common_quantization.py
{ "start": 83412, "end": 83802 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = FunctionalConv2d() self.conv2 = FunctionalConv2d() def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.conv1.get_example_inputs()
TwoLayerFunctionalConvModel
python
scrapy__scrapy
tests/test_feedexport.py
{ "start": 4940, "end": 7880 }
class ____: def get_test_spider(self, settings=None): class TestSpider(scrapy.Spider): name = "test_spider" crawler = get_crawler(settings_dict=settings) return TestSpider.from_crawler(crawler) async def _store(self, uri, content, feed_options=None, settings=None): crawler = get_crawler(settings_dict=settings or {}) storage = FTPFeedStorage.from_crawler( crawler, uri, feed_options=feed_options, ) verifyObject(IFeedStorage, storage) spider = self.get_test_spider() file = storage.open(spider) file.write(content) await maybe_deferred_to_future(storage.store(file)) def _assert_stored(self, path: Path, content): assert path.exists() try: assert path.read_bytes() == content finally: path.unlink() @deferred_f_from_coro_f async def test_append(self): with MockFTPServer() as ftp_server: filename = "file" url = ftp_server.url(filename) feed_options = {"overwrite": False} await self._store(url, b"foo", feed_options=feed_options) await self._store(url, b"bar", feed_options=feed_options) self._assert_stored(ftp_server.path / filename, b"foobar") @deferred_f_from_coro_f async def test_overwrite(self): with MockFTPServer() as ftp_server: filename = "file" url = ftp_server.url(filename) await self._store(url, b"foo") await self._store(url, b"bar") self._assert_stored(ftp_server.path / filename, b"bar") @deferred_f_from_coro_f async def test_append_active_mode(self): with MockFTPServer() as ftp_server: settings = {"FEED_STORAGE_FTP_ACTIVE": True} filename = "file" url = ftp_server.url(filename) feed_options = {"overwrite": False} await self._store(url, b"foo", feed_options=feed_options, settings=settings) await self._store(url, b"bar", feed_options=feed_options, settings=settings) self._assert_stored(ftp_server.path / filename, b"foobar") @deferred_f_from_coro_f async def test_overwrite_active_mode(self): with MockFTPServer() as ftp_server: settings = {"FEED_STORAGE_FTP_ACTIVE": True} filename = "file" url = ftp_server.url(filename) await self._store(url, b"foo", settings=settings) await self._store(url, b"bar", settings=settings) self._assert_stored(ftp_server.path / filename, b"bar") def test_uri_auth_quote(self): # RFC3986: 3.2.1. User Information pw_quoted = quote(string.punctuation, safe="") st = FTPFeedStorage(f"ftp://foo:{pw_quoted}@example.com/some_path", {}) assert st.password == string.punctuation
TestFTPFeedStorage
python
dagster-io__dagster
python_modules/dagster/dagster/components/resolved/scopes.py
{ "start": 2711, "end": 3462 }
class ____(WrappedObjectScope): """Provides access to Dagster definitions and utilities within templates. Available via `{{ dg.* }}` in component YAML files. Examples: {{ dg.AutomationCondition.eager() }} {{ dg.DailyPartitionsDefinition(start_date="2024-01-01") }} """ def __init__(self): import dagster as dg accessible_attributes = { "AutomationCondition", "DailyPartitionsDefinition", "WeeklyPartitionsDefinition", "MonthlyPartitionsDefinition", "HourlyPartitionsDefinition", "StaticPartitionsDefinition", "TimeWindowPartitionsDefinition", } super().__init__(dg, accessible_attributes)
DgScope
python
getsentry__sentry
tests/sentry/seer/fetch_issues/test_by_function_name.py
{ "start": 16124, "end": 21188 }
class ____(IntegrationTestCase, CreateEventTestCase): provider = GitHubIntegrationProvider def setUp(self): super().setUp() self.gh_repo: Repository = self.create_repo( name="getsentry/sentry", provider="integrations:github", integration_id=self.integration.id, project=self.project, url="https://github.com/getsentry/sentry", external_id="123456", ) self.code_mapping = self.create_code_mapping( project=self.project, repo=self.gh_repo, ) @patch("sentry.seer.fetch_issues.by_function_name._get_projects_and_filenames_from_source_file") @patch("sentry.seer.fetch_issues.by_function_name._get_issues_for_file") def test_no_projects_found_fallback(self, mock_get_issues, mock_get_projects): # Mock no projects found initially mock_get_projects.return_value = (set(), {"test.py"}) mock_get_issues.return_value = [] assert self.gh_repo.external_id is not None repo_projects = RepoProjects( organization_id=self.organization.id, provider="integrations:github", external_id=self.gh_repo.external_id, repo=self.gh_repo, repo_configs=[], projects=[self.project], ) _fetch_issues_from_repo_projects( repo_projects=repo_projects, filename="test.py", function_name="target_function", ) # Should fall back to using all repo projects mock_get_issues.assert_called_once() call_args = mock_get_issues.call_args[0] assert self.project in call_args[0] # Should use fallback projects @patch("sentry.seer.fetch_issues.by_function_name._get_projects_and_filenames_from_source_file") @patch("sentry.seer.fetch_issues.by_function_name._get_issues_for_file") def test_projects_found_no_fallback(self, mock_get_issues, mock_get_projects): # Mock projects found mock_get_projects.return_value = ({self.project}, {"test.py"}) mock_get_issues.return_value = [] assert self.gh_repo.external_id is not None repo_projects = RepoProjects( organization_id=self.organization.id, provider="integrations:github", external_id=self.gh_repo.external_id, repo=self.gh_repo, repo_configs=[], projects=[self.project], ) _fetch_issues_from_repo_projects( repo_projects=repo_projects, filename="test.py", function_name="target_function", ) # Should use the found projects, not fallback mock_get_issues.assert_called_once() call_args = mock_get_issues.call_args[0] assert call_args[0] == [self.project] def test_fetch_issues_from_repo_projects_returns_groups(self): """Test that _fetch_issues_from_repo_projects returns a list of Group objects.""" # Create a group that should match event = self._create_event( filenames=["test.py", "other.py"], function_names=["target_function", "other_func"], user_id="1", ) expected_group = event.group # Get repo projects assert self.gh_repo.external_id is not None repo_projects = get_repo_and_projects( organization_id=self.organization.id, provider="integrations:github", external_id=self.gh_repo.external_id, ) # Test the internal function directly with real search behavior # Based on existing tests, we know _get_issues_for_file works in test environment results = _fetch_issues_from_repo_projects( repo_projects=repo_projects, filename="test.py", function_name="target_function" ) # Verify it returns a list of Group objects assert isinstance(results, list) assert len(results) > 0, "Expected to find at least one matching group" for result in results: assert isinstance(result, Group) assert expected_group.id in [result.id for result in results] def test_fetch_issues_from_repo_projects_empty_result(self): """Test that _fetch_issues_from_repo_projects returns empty list when no matches.""" # Get repo projects but don't create any matching events assert self.gh_repo.external_id is not None repo_projects = get_repo_and_projects( organization_id=self.organization.id, provider="integrations:github", external_id=self.gh_repo.external_id, ) # Test the internal function with non-matching criteria results = _fetch_issues_from_repo_projects( repo_projects=repo_projects, filename="nonexistent.py", function_name="nonexistent_function", ) # Verify it returns an empty list assert isinstance(results, list) assert len(results) == 0
TestFetchIssuesFromRepoProjects
python
walkccc__LeetCode
solutions/500. Keyboard Row/500.py
{ "start": 0, "end": 297 }
class ____: def findWords(self, words: list[str]) -> list[str]: ans = [] rows = [set('qwertyuiop'), set('asdfghjkl'), set('zxcvbnm')] for word in words: lowerWord = set(word.lower()) if any(lowerWord <= row for row in rows): ans.append(word) return ans
Solution
python
sympy__sympy
sympy/integrals/manualintegrate.py
{ "start": 4310, "end": 4587 }
class ____(AtomicRule): """integrate(x**a, x)""" base: Expr exp: Expr def eval(self) -> Expr: return Piecewise( ((self.base**(self.exp + 1))/(self.exp + 1), Ne(self.exp, -1)), (log(self.base), True), ) @dataclass
PowerRule
python
jazzband__django-model-utils
tests/models.py
{ "start": 9370, "end": 9555 }
class ____(models.Model): name = models.CharField(max_length=20) number = models.IntegerField() mutable = MutableField(default=None) tracker = ModelTracker()
ModelTracked
python
PyCQA__pylint
tests/functional/i/invalid/invalid_exceptions/invalid_exceptions_caught.py
{ "start": 2270, "end": 2358 }
class ____(HasErrorInMRO): pass try: raise Second except Second: pass
Second
python
google__pytype
pytype/pyc/opcodes.py
{ "start": 8786, "end": 8886 }
class ____(Opcode): _FLAGS = HAS_JUNKNOWN # might call __exit__ __slots__ = ()
WITH_CLEANUP_START
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/ddl.py
{ "start": 14941, "end": 15093 }
class ____(_CreateBase["Table"]): def to_metadata(self, metadata: MetaData, table: Table) -> Self: raise NotImplementedError()
TableCreateDDL
python
numba__numba
numba/core/utils.py
{ "start": 11305, "end": 11832 }
class ____(MutableMapping[Tk, Tv], _tp.Generic[Tk, Tv]): def __init__(self, dct=None): if dct is None: dct = {} self._dct: dict[Tk, Tv] = dct def __getitem__(self, k: Tk) -> Tv: return self._dct[k] def __setitem__(self, k: Tk, v: Tv): self._dct[k] = v def __delitem__(self, k: Tk): del self._dct[k] def __len__(self) -> int: return len(self._dct) def __iter__(self) -> int: return iter(k for k in sorted(self._dct))
MutableSortedMap
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE796.py
{ "start": 653, "end": 719 }
class ____(enum.Enum): A = "A" B = "B" C = "C"
FakeEnum9
python
catalyst-team__catalyst
catalyst/contrib/losses/regression.py
{ "start": 228, "end": 1258 }
class ____(nn.Module): """@TODO: Docs. Contribution is welcome.""" def __init__(self, clip_delta=1.0, reduction="mean"): """@TODO: Docs. Contribution is welcome.""" super().__init__() self.clip_delta = clip_delta self.reduction = reduction or "none" def forward( self, output: torch.Tensor, target: torch.Tensor, weights=None ) -> torch.Tensor: """@TODO: Docs. Contribution is welcome.""" diff = target - output diff_abs = torch.abs(diff) quadratic_part = torch.clamp(diff_abs, max=self.clip_delta) linear_part = diff_abs - quadratic_part loss = 0.5 * quadratic_part ** 2 + self.clip_delta * linear_part if weights is not None: loss = torch.mean(loss * weights, dim=1) else: loss = torch.mean(loss, dim=1) if self.reduction == "mean": loss = torch.mean(loss) elif self.reduction == "sum": loss = torch.sum(loss) return loss
HuberLossV0
python
pypa__warehouse
warehouse/packaging/search.py
{ "start": 402, "end": 1879 }
class ____(Document): name = Text() normalized_name = Text(analyzer=NameAnalyzer) summary = Text(analyzer="snowball") description = Text(analyzer="snowball") author = Text() author_email = Text(analyzer=EmailAnalyzer) maintainer = Text() maintainer_email = Text(analyzer=EmailAnalyzer) license = Text() home_page = Keyword() download_url = Keyword() keywords = Text(analyzer="snowball") platform = Keyword() created = Date() classifiers = Keyword(multi=True) @classmethod def from_db(cls, release): obj = cls(meta={"id": release.normalized_name}) obj["name"] = release.name obj["normalized_name"] = release.normalized_name obj["summary"] = release.summary obj["description"] = release.description[:5_000_000] obj["author"] = release.author obj["author_email"] = release.author_email obj["maintainer"] = release.maintainer obj["maintainer_email"] = release.maintainer_email obj["home_page"] = release.home_page obj["download_url"] = release.download_url obj["keywords"] = release.keywords obj["platform"] = release.platform obj["created"] = release.created obj["classifiers"] = release.classifiers return obj class Index: # make sure this class can match any index so it will always be used to # deserialize data coming from opensearch. name = "*"
Project
python
geekcomputers__Python
flappyBird_pygame/flappy_bird.py
{ "start": 361, "end": 1937 }
class ____(pygame.sprite.Sprite): WIDTH = 32 # bird image width HEIGHT = 32 # bird image height DOWN_SPEED = 0.18 # pix per ms -y UP_SPEED = 0.3 # pix per ms +y UP_DURATION = 150 # time for which bird go up def __init__(self, x, y, ms_to_up, images): super(Bird, self).__init__() self.x, self.y = x, y self.ms_to_up = ms_to_up self._img_wingup, self._img_wingdown = images self._mask_wingup = pygame.mask.from_surface(self._img_wingup) self._mask_wingdown = pygame.mask.from_surface(self._img_wingdown) def update(self, delta_frames=1): if self.ms_to_up > 0: frac_climb_done = 1 - self.ms_to_up / Bird.UP_DURATION self.y -= ( Bird.UP_SPEED * frames_to_msec(delta_frames) * (1 - math.cos(frac_climb_done * math.pi)) ) self.ms_to_up -= frames_to_msec(delta_frames) else: self.y += Bird.DOWN_SPEED * frames_to_msec(delta_frames) @property def image(self): # to animate bird if pygame.time.get_ticks() % 500 >= 250: return self._img_wingup else: return self._img_wingdown @property def mask(self): # collision detection if pygame.time.get_ticks() % 500 >= 250: return self._mask_wingup else: return self._mask_wingdown @property def rect(self): # return birds params return Rect(self.x, self.y, Bird.WIDTH, Bird.HEIGHT)
Bird
python
pennersr__django-allauth
tests/apps/socialaccount/providers/xing/tests.py
{ "start": 235, "end": 2127 }
class ____(OAuthTestsMixin, TestCase): provider_id = XingProvider.id def get_mocked_response(self): return [ MockedResponse( HTTPStatus.OK, """ {"users":[{"id":"20493333_1cd028","active_email":"raymond.penners@example.com", "badges":[],"birth_date":{"year":null,"month":null,"day":null}, "business_address":{"street":null,"zip_code":null,"city":null,"province":null, "country":"NL","email":null,"fax":null,"phone":null,"mobile_phone":null}, "display_name":"Raymond Penners","educational_background": {"primary_school_id":null,"schools":[],"qualifications":[]}, "employment_status":"EMPLOYEE","first_name":"Raymond","gender":"m", "haves":null,"instant_messaging_accounts":{},"interests":null,"languages": {"nl":null},"last_name":"Penners","organisation_member":null, "page_name":"Raymond_Penners", "permalink":"https://www.xing.com/profile/Raymond_Penners", "photo_urls":{"thumb":"https://www.xing.com/img/n/nobody_m.30x40.jpg", "large":"https://www.xing.com/img/n/nobody_m.140x185.jpg","mini_thumb": "https://www.xing.com/img/n/nobody_m.18x24.jpg","maxi_thumb": "https://www.xing.com/img/n/nobody_m.70x93.jpg","medium_thumb": "https://www.xing.com/img/n/nobody_m.57x75.jpg"},"premium_services":[], "private_address":{"street":null,"zip_code":null,"city":null,"province":null, "country":null,"email":"raymond.penners@example.com","fax":null, "phone":null,"mobile_phone":null},"professional_experience": {"primary_company":{"name":null,"url":null,"tag":null,"title":null, "begin_date":null,"end_date":null,"description":null,"industry":"OTHERS", "company_size":null,"career_level":null},"non_primary_companies":[], "awards":[]},"time_zone":{"utc_offset":2.0,"name":"Europe/Berlin"}, "wants":null,"web_profiles":{}}]} """, ) ] def get_expected_to_str(self): return "raymond.penners@example.com"
XingTests
python
plotly__plotly.py
plotly/graph_objs/layout/map/layer/_circle.py
{ "start": 235, "end": 2380 }
class ____(_BaseLayoutHierarchyType): _parent_path_str = "layout.map.layer" _path_str = "layout.map.layer.circle" _valid_props = {"radius"} @property def radius(self): """ Sets the circle radius (map.layer.paint.circle-radius). Has an effect only when `type` is set to "circle". The 'radius' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["radius"] @radius.setter def radius(self, val): self["radius"] = val @property def _prop_descriptions(self): return """\ radius Sets the circle radius (map.layer.paint.circle-radius). Has an effect only when `type` is set to "circle". """ def __init__(self, arg=None, radius=None, **kwargs): """ Construct a new Circle object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.layout.map.layer.Circle` radius Sets the circle radius (map.layer.paint.circle-radius). Has an effect only when `type` is set to "circle". Returns ------- Circle """ super().__init__("circle") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.layout.map.layer.Circle constructor must be a dict or an instance of :class:`plotly.graph_objs.layout.map.layer.Circle`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("radius", arg, radius) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Circle
python
astropy__astropy
astropy/utils/metadata/tests/test_metadata.py
{ "start": 318, "end": 369 }
class ____(OrderedDict): pass
OrderedDictSubclass
python
coleifer__peewee
peewee.py
{ "start": 13673, "end": 13731 }
class ____(object): pass # SQL Generation.
ModelDescriptor
python
numba__numba
numba/core/typing/templates.py
{ "start": 8018, "end": 11182 }
class ____(ABC): # Set to true to disable unsafe cast. # subclass overide-able unsafe_casting = True # Set to true to require exact match without casting. # subclass overide-able exact_match_required = False # Set to true to prefer literal arguments. # Useful for definitions that specialize on literal but also support # non-literals. # subclass overide-able prefer_literal = False # metadata metadata = {} def __init__(self, context): self.context = context def _select(self, cases, args, kws): options = { 'unsafe_casting': self.unsafe_casting, 'exact_match_required': self.exact_match_required, } selected = self.context.resolve_overload(self.key, cases, args, kws, **options) return selected def get_impl_key(self, sig): """ Return the key for looking up the implementation for the given signature on the target context. """ # Lookup the key on the class, to avoid binding it with `self`. key = type(self).key # On Python 2, we must also take care about unbound methods if isinstance(key, MethodType): assert key.im_self is None key = key.im_func return key @classmethod def get_source_code_info(cls, impl): """ Gets the source information about function impl. Returns: code - str: source code as a string firstlineno - int: the first line number of the function impl path - str: the path to file containing impl if any of the above are not available something generic is returned """ try: code, firstlineno = inspect.getsourcelines(impl) except OSError: # missing source, probably a string code = "None available (built from string?)" firstlineno = 0 path = inspect.getsourcefile(impl) if path is None: path = "<unknown> (built from string?)" return code, firstlineno, path @abstractmethod def get_template_info(self): """ Returns a dictionary with information specific to the template that will govern how error messages are displayed to users. The dictionary must be of the form: info = { 'kind': "unknown", # str: The kind of template, e.g. "Overload" 'name': "unknown", # str: The name of the source function 'sig': "unknown", # str: The signature(s) of the source function 'filename': "unknown", # str: The filename of the source function 'lines': ("start", "end"), # tuple(int, int): The start and end line of the source function. 'docstring': "unknown" # str: The docstring of the source function } """ pass def __str__(self): info = self.get_template_info() srcinfo = f"{info['filename']}:{info['lines'][0]}" return f"<{self.__class__.__name__} {srcinfo}>" __repr__ = __str__
FunctionTemplate
python
scipy__scipy
scipy/special/tests/test_legendre.py
{ "start": 21309, "end": 28518 }
class ____: @pytest.mark.parametrize("shape", [(10,), (4, 9), (3, 5, 7)]) def test_specific(self, shape): rng = np.random.default_rng(1234) theta = rng.uniform(-np.pi, np.pi, shape) p, p_jac = sph_legendre_p_all(4, 4, theta, diff_n=1) np.testing.assert_allclose(p[0, 0], sph_legendre_p_0_0(theta)) np.testing.assert_allclose(p[0, 1], 0) np.testing.assert_allclose(p[0, 2], 0) np.testing.assert_allclose(p[0, 3], 0) np.testing.assert_allclose(p[0, 4], 0) np.testing.assert_allclose(p[0, -3], 0) np.testing.assert_allclose(p[0, -2], 0) np.testing.assert_allclose(p[0, -1], 0) np.testing.assert_allclose(p[1, 0], sph_legendre_p_1_0(theta)) np.testing.assert_allclose(p[1, 1], sph_legendre_p_1_1(theta)) np.testing.assert_allclose(p[1, 2], 0) np.testing.assert_allclose(p[1, 3], 0) np.testing.assert_allclose(p[1, 4], 0) np.testing.assert_allclose(p[1, -4], 0) np.testing.assert_allclose(p[1, -3], 0) np.testing.assert_allclose(p[1, -2], 0) np.testing.assert_allclose(p[1, -1], sph_legendre_p_1_m1(theta)) np.testing.assert_allclose(p[2, 0], sph_legendre_p_2_0(theta)) np.testing.assert_allclose(p[2, 1], sph_legendre_p_2_1(theta)) np.testing.assert_allclose(p[2, 2], sph_legendre_p_2_2(theta)) np.testing.assert_allclose(p[2, 3], 0) np.testing.assert_allclose(p[2, 4], 0) np.testing.assert_allclose(p[2, -4], 0) np.testing.assert_allclose(p[2, -3], 0) np.testing.assert_allclose(p[2, -2], sph_legendre_p_2_m2(theta)) np.testing.assert_allclose(p[2, -1], sph_legendre_p_2_m1(theta)) np.testing.assert_allclose(p[3, 0], sph_legendre_p_3_0(theta)) np.testing.assert_allclose(p[3, 1], sph_legendre_p_3_1(theta)) np.testing.assert_allclose(p[3, 2], sph_legendre_p_3_2(theta)) np.testing.assert_allclose(p[3, 3], sph_legendre_p_3_3(theta)) np.testing.assert_allclose(p[3, 4], 0) np.testing.assert_allclose(p[3, -4], 0) np.testing.assert_allclose(p[3, -3], sph_legendre_p_3_m3(theta)) np.testing.assert_allclose(p[3, -2], sph_legendre_p_3_m2(theta)) np.testing.assert_allclose(p[3, -1], sph_legendre_p_3_m1(theta)) np.testing.assert_allclose(p[4, 0], sph_legendre_p_4_0(theta)) np.testing.assert_allclose(p[4, 1], sph_legendre_p_4_1(theta)) np.testing.assert_allclose(p[4, 2], sph_legendre_p_4_2(theta)) np.testing.assert_allclose(p[4, 3], sph_legendre_p_4_3(theta)) np.testing.assert_allclose(p[4, 4], sph_legendre_p_4_4(theta)) np.testing.assert_allclose(p[4, -4], sph_legendre_p_4_m4(theta)) np.testing.assert_allclose(p[4, -3], sph_legendre_p_4_m3(theta)) np.testing.assert_allclose(p[4, -2], sph_legendre_p_4_m2(theta)) np.testing.assert_allclose(p[4, -1], sph_legendre_p_4_m1(theta)) np.testing.assert_allclose(p_jac[0, 0], sph_legendre_p_0_0_jac(theta)) np.testing.assert_allclose(p_jac[0, 1], 0) np.testing.assert_allclose(p_jac[0, 2], 0) np.testing.assert_allclose(p_jac[0, 3], 0) np.testing.assert_allclose(p_jac[0, 4], 0) np.testing.assert_allclose(p_jac[0, -3], 0) np.testing.assert_allclose(p_jac[0, -2], 0) np.testing.assert_allclose(p_jac[0, -1], 0) np.testing.assert_allclose(p_jac[1, 0], sph_legendre_p_1_0_jac(theta)) np.testing.assert_allclose(p_jac[1, 1], sph_legendre_p_1_1_jac(theta)) np.testing.assert_allclose(p_jac[1, 2], 0) np.testing.assert_allclose(p_jac[1, 3], 0) np.testing.assert_allclose(p_jac[1, 4], 0) np.testing.assert_allclose(p_jac[1, -4], 0) np.testing.assert_allclose(p_jac[1, -3], 0) np.testing.assert_allclose(p_jac[1, -2], 0) np.testing.assert_allclose(p_jac[1, -1], sph_legendre_p_1_m1_jac(theta)) np.testing.assert_allclose(p_jac[2, 0], sph_legendre_p_2_0_jac(theta)) np.testing.assert_allclose(p_jac[2, 1], sph_legendre_p_2_1_jac(theta)) np.testing.assert_allclose(p_jac[2, 2], sph_legendre_p_2_2_jac(theta)) np.testing.assert_allclose(p_jac[2, 3], 0) np.testing.assert_allclose(p_jac[2, 4], 0) np.testing.assert_allclose(p_jac[2, -4], 0) np.testing.assert_allclose(p_jac[2, -3], 0) np.testing.assert_allclose(p_jac[2, -2], sph_legendre_p_2_m2_jac(theta)) np.testing.assert_allclose(p_jac[2, -1], sph_legendre_p_2_m1_jac(theta)) np.testing.assert_allclose(p_jac[3, 0], sph_legendre_p_3_0_jac(theta)) np.testing.assert_allclose(p_jac[3, 1], sph_legendre_p_3_1_jac(theta)) np.testing.assert_allclose(p_jac[3, 2], sph_legendre_p_3_2_jac(theta)) np.testing.assert_allclose(p_jac[3, 3], sph_legendre_p_3_3_jac(theta)) np.testing.assert_allclose(p_jac[3, 4], 0) np.testing.assert_allclose(p_jac[3, -4], 0) np.testing.assert_allclose(p_jac[3, -3], sph_legendre_p_3_m3_jac(theta)) np.testing.assert_allclose(p_jac[3, -2], sph_legendre_p_3_m2_jac(theta)) np.testing.assert_allclose(p_jac[3, -1], sph_legendre_p_3_m1_jac(theta)) np.testing.assert_allclose(p_jac[4, 0], sph_legendre_p_4_0_jac(theta)) np.testing.assert_allclose(p_jac[4, 1], sph_legendre_p_4_1_jac(theta)) np.testing.assert_allclose(p_jac[4, 2], sph_legendre_p_4_2_jac(theta)) np.testing.assert_allclose(p_jac[4, 3], sph_legendre_p_4_3_jac(theta)) np.testing.assert_allclose(p_jac[4, 4], sph_legendre_p_4_4_jac(theta)) np.testing.assert_allclose(p_jac[4, -4], sph_legendre_p_4_m4_jac(theta)) np.testing.assert_allclose(p_jac[4, -3], sph_legendre_p_4_m3_jac(theta)) np.testing.assert_allclose(p_jac[4, -2], sph_legendre_p_4_m2_jac(theta)) np.testing.assert_allclose(p_jac[4, -1], sph_legendre_p_4_m1_jac(theta)) @pytest.mark.parametrize("shape", [(10,), (4, 9), (3, 5, 7, 10)]) def test_ode(self, shape): rng = np.random.default_rng(1234) n = rng.integers(0, 10, shape) m = rng.integers(-10, 10, shape) theta = rng.uniform(-np.pi, np.pi, shape) p, p_jac, p_hess = sph_legendre_p(n, m, theta, diff_n=2) assert p.shape == shape assert p_jac.shape == p.shape assert p_hess.shape == p_jac.shape np.testing.assert_allclose(np.sin(theta) * p_hess, -np.cos(theta) * p_jac - (n * (n + 1) * np.sin(theta) - m * m / np.sin(theta)) * p, rtol=1e-05, atol=1e-08)
TestSphLegendreP
python
matplotlib__matplotlib
lib/matplotlib/backend_bases.py
{ "start": 52580, "end": 60907 }
class ____(LocationEvent): """ A key event (key press, key release). A KeyEvent has a number of special attributes in addition to those defined by the parent `Event` and `LocationEvent` classes. Attributes ---------- key : None or str The key(s) pressed. Could be *None*, a single case sensitive Unicode character ("g", "G", "#", etc.), a special key ("control", "shift", "f1", "up", etc.) or a combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G"). Notes ----- Modifier keys will be prefixed to the pressed key and will be in the order "ctrl", "alt", "super". The exception to this rule is when the pressed key is itself a modifier key, therefore "ctrl+alt" and "alt+control" can both be valid key values. Examples -------- :: def on_key(event): print('you pressed', event.key, event.xdata, event.ydata) cid = fig.canvas.mpl_connect('key_press_event', on_key) """ def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None): super().__init__(name, canvas, x, y, guiEvent=guiEvent) self.key = key @classmethod def _from_ax_coords(cls, name, ax, xy, key, *args, **kwargs): """ Generate a synthetic event at a given axes coordinate. This method is intended for creating events during testing. The event can be emitted by calling its ``_process()`` method. """ # Separate from MouseEvent._from_ax_coords instead of being defined in the base # class, due to different parameter order in the constructor signature. x, y = ax.transData.transform(xy) event = cls(name, ax.figure.canvas, key, x, y, *args, **kwargs) event.inaxes = ax event.xdata, event.ydata = xy # Force exact xy to avoid fp roundtrip issues. return event # Default callback for key events. def _key_handler(event): # Dead reckoning of key. if event.name == "key_press_event": event.canvas._key = event.key elif event.name == "key_release_event": event.canvas._key = None # Default callback for mouse events. def _mouse_handler(event): # Dead-reckoning of button and key. if event.name == "button_press_event": event.canvas._button = event.button elif event.name == "button_release_event": event.canvas._button = None elif event.name == "motion_notify_event" and event.button is None: event.button = event.canvas._button if event.key is None: event.key = event.canvas._key # Emit axes_enter/axes_leave. if event.name == "motion_notify_event": last_ref = LocationEvent._last_axes_ref last_axes = last_ref() if last_ref else None if last_axes != event.inaxes: if last_axes is not None: # Create a synthetic LocationEvent for the axes_leave_event. # Its inaxes attribute needs to be manually set (because the # cursor is actually *out* of that Axes at that point); this is # done with the internal _set_inaxes method which ensures that # the xdata and ydata attributes are also correct. try: canvas = last_axes.get_figure(root=True).canvas leave_event = LocationEvent( "axes_leave_event", canvas, event.x, event.y, event.guiEvent, modifiers=event.modifiers) leave_event._set_inaxes(last_axes) canvas.callbacks.process("axes_leave_event", leave_event) except Exception: pass # The last canvas may already have been torn down. if event.inaxes is not None: event.canvas.callbacks.process("axes_enter_event", event) LocationEvent._last_axes_ref = ( weakref.ref(event.inaxes) if event.inaxes else None) def _get_renderer(figure, print_method=None): """ Get the renderer that would be used to save a `.Figure`. If you need a renderer without any active draw methods use renderer._draw_disabled to temporary patch them out at your call site. """ # This is implemented by triggering a draw, then immediately jumping out of # Figure.draw() by raising an exception. class Done(Exception): pass def _draw(renderer): raise Done(renderer) with cbook._setattr_cm(figure, draw=_draw), ExitStack() as stack: if print_method is None: fmt = figure.canvas.get_default_filetype() # Even for a canvas' default output type, a canvas switch may be # needed, e.g. for FigureCanvasBase. print_method = stack.enter_context( figure.canvas._switch_canvas_and_return_print_method(fmt)) try: print_method(io.BytesIO()) except Done as exc: renderer, = exc.args return renderer else: raise RuntimeError(f"{print_method} did not call Figure.draw, so " f"no renderer is available") def _no_output_draw(figure): # _no_output_draw was promoted to the figure level, but # keep this here in case someone was calling it... figure.draw_without_rendering() def _is_non_interactive_terminal_ipython(ip): """ Return whether we are in a terminal IPython, but non interactive. When in _terminal_ IPython, ip.parent will have and `interact` attribute, if this attribute is False we do not setup eventloop integration as the user will _not_ interact with IPython. In all other case (ZMQKernel, or is interactive), we do. """ return (hasattr(ip, 'parent') and (ip.parent is not None) and getattr(ip.parent, 'interact', None) is False) @contextmanager def _allow_interrupt(prepare_notifier, handle_sigint): """ A context manager that allows terminating a plot by sending a SIGINT. It is necessary because the running backend prevents the Python interpreter from running and processing signals (i.e., to raise a KeyboardInterrupt). To solve this, one needs to somehow wake up the interpreter and make it close the plot window. We do this by using the signal.set_wakeup_fd() function which organizes a write of the signal number into a socketpair. A backend-specific function, *prepare_notifier*, arranges to listen to the pair's read socket while the event loop is running. (If it returns a notifier object, that object is kept alive while the context manager runs.) If SIGINT was indeed caught, after exiting the on_signal() function the interpreter reacts to the signal according to the handler function which had been set up by a signal.signal() call; here, we arrange to call the backend-specific *handle_sigint* function, passing the notifier object as returned by prepare_notifier(). Finally, we call the old SIGINT handler with the same arguments that were given to our custom handler. We do this only if the old handler for SIGINT was not None, which means that a non-python handler was installed, i.e. in Julia, and not SIG_IGN which means we should ignore the interrupts. Parameters ---------- prepare_notifier : Callable[[socket.socket], object] handle_sigint : Callable[[object], object] """ old_sigint_handler = signal.getsignal(signal.SIGINT) if old_sigint_handler in (None, signal.SIG_IGN, signal.SIG_DFL): yield return handler_args = None wsock, rsock = socket.socketpair() wsock.setblocking(False) rsock.setblocking(False) old_wakeup_fd = signal.set_wakeup_fd(wsock.fileno()) notifier = prepare_notifier(rsock) def save_args_and_handle_sigint(*args): nonlocal handler_args, notifier handler_args = args handle_sigint(notifier) notifier = None signal.signal(signal.SIGINT, save_args_and_handle_sigint) try: yield finally: wsock.close() rsock.close() signal.set_wakeup_fd(old_wakeup_fd) signal.signal(signal.SIGINT, old_sigint_handler) if handler_args is not None: old_sigint_handler(*handler_args)
KeyEvent
python
davidhalter__jedi
jedi/inference/arguments.py
{ "start": 10289, "end": 12218 }
class ____(_AbstractArgumentsMixin): def __init__(self, arguments): self._wrapped_arguments = arguments @property def context(self): return self._wrapped_arguments.context @property def argument_node(self): return self._wrapped_arguments.argument_node @property def trailer(self): return self._wrapped_arguments.trailer def unpack(self, func=None): raise NotImplementedError def get_calling_nodes(self): return self._wrapped_arguments.get_calling_nodes() def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self._wrapped_arguments) def _iterate_star_args(context, array, input_node, funcdef=None): if not array.py__getattribute__('__iter__'): if funcdef is not None: # TODO this funcdef should not be needed. m = "TypeError: %s() argument after * must be a sequence, not %s" \ % (funcdef.name.value, array) analysis.add(context, 'type-error-star', input_node, message=m) try: iter_ = array.py__iter__ except AttributeError: pass else: yield from iter_() def _star_star_dict(context, array, input_node, funcdef): from jedi.inference.value.instance import CompiledInstance if isinstance(array, CompiledInstance) and array.name.string_name == 'dict': # For now ignore this case. In the future add proper iterators and just # make one call without crazy isinstance checks. return {} elif isinstance(array, iterable.Sequence) and array.array_type == 'dict': return array.exact_key_items() else: if funcdef is not None: m = "TypeError: %s argument after ** must be a mapping, not %s" \ % (funcdef.name.value, array) analysis.add(context, 'type-error-star-star', input_node, message=m) return {}
TreeArgumentsWrapper
python
sympy__sympy
sympy/geometry/polygon.py
{ "start": 968, "end": 45281 }
class ____(GeometrySet): """A two-dimensional polygon. A simple polygon in space. Can be constructed from a sequence of points or from a center, radius, number of sides and rotation angle. Parameters ========== vertices A sequence of points. n : int, optional If $> 0$, an n-sided RegularPolygon is created. Default value is $0$. Attributes ========== area angles perimeter vertices centroid sides Raises ====== GeometryError If all parameters are not Points. See Also ======== sympy.geometry.point.Point, sympy.geometry.line.Segment, Triangle Notes ===== Polygons are treated as closed paths rather than 2D areas so some calculations can be be negative or positive (e.g., area) based on the orientation of the points. Any consecutive identical points are reduced to a single point and any points collinear and between two points will be removed unless they are needed to define an explicit intersection (see examples). A Triangle, Segment or Point will be returned when there are 3 or fewer points provided. Examples ======== >>> from sympy import Polygon, pi >>> p1, p2, p3, p4, p5 = [(0, 0), (1, 0), (5, 1), (0, 1), (3, 0)] >>> Polygon(p1, p2, p3, p4) Polygon(Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1)) >>> Polygon(p1, p2) Segment2D(Point2D(0, 0), Point2D(1, 0)) >>> Polygon(p1, p2, p5) Segment2D(Point2D(0, 0), Point2D(3, 0)) The area of a polygon is calculated as positive when vertices are traversed in a ccw direction. When the sides of a polygon cross the area will have positive and negative contributions. The following defines a Z shape where the bottom right connects back to the top left. >>> Polygon((0, 2), (2, 2), (0, 0), (2, 0)).area 0 When the keyword `n` is used to define the number of sides of the Polygon then a RegularPolygon is created and the other arguments are interpreted as center, radius and rotation. The unrotated RegularPolygon will always have a vertex at Point(r, 0) where `r` is the radius of the circle that circumscribes the RegularPolygon. Its method `spin` can be used to increment that angle. >>> p = Polygon((0,0), 1, n=3) >>> p RegularPolygon(Point2D(0, 0), 1, 3, 0) >>> p.vertices[0] Point2D(1, 0) >>> p.args[0] Point2D(0, 0) >>> p.spin(pi/2) >>> p.vertices[0] Point2D(0, 1) """ __slots__ = () def __new__(cls, *args, n = 0, **kwargs): if n: args = list(args) # return a virtual polygon with n sides if len(args) == 2: # center, radius args.append(n) elif len(args) == 3: # center, radius, rotation args.insert(2, n) return RegularPolygon(*args, **kwargs) vertices = [Point(a, dim=2, **kwargs) for a in args] # remove consecutive duplicates nodup = [] for p in vertices: if nodup and p == nodup[-1]: continue nodup.append(p) if len(nodup) > 1 and nodup[-1] == nodup[0]: nodup.pop() # last point was same as first # remove collinear points i = -3 while i < len(nodup) - 3 and len(nodup) > 2: a, b, c = nodup[i], nodup[i + 1], nodup[i + 2] if Point.is_collinear(a, b, c): nodup.pop(i + 1) if a == c: nodup.pop(i) else: i += 1 vertices = list(nodup) if len(vertices) > 3: return GeometryEntity.__new__(cls, *vertices, **kwargs) elif len(vertices) == 3: return Triangle(*vertices, **kwargs) elif len(vertices) == 2: return Segment(*vertices, **kwargs) else: return Point(*vertices, **kwargs) @property def area(self): """ The area of the polygon. Notes ===== The area calculation can be positive or negative based on the orientation of the points. If any side of the polygon crosses any other side, there will be areas having opposite signs. See Also ======== sympy.geometry.ellipse.Ellipse.area Examples ======== >>> from sympy import Point, Polygon >>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)]) >>> poly = Polygon(p1, p2, p3, p4) >>> poly.area 3 In the Z shaped polygon (with the lower right connecting back to the upper left) the areas cancel out: >>> Z = Polygon((0, 1), (1, 1), (0, 0), (1, 0)) >>> Z.area 0 In the M shaped polygon, areas do not cancel because no side crosses any other (though there is a point of contact). >>> M = Polygon((0, 0), (0, 1), (2, 0), (3, 1), (3, 0)) >>> M.area -3/2 """ area = 0 args = self.args for i in range(len(args)): x1, y1 = args[i - 1].args x2, y2 = args[i].args area += x1*y2 - x2*y1 return simplify(area) / 2 @staticmethod def _is_clockwise(a, b, c): """Return True/False for cw/ccw orientation. Examples ======== >>> from sympy import Point, Polygon >>> a, b, c = [Point(i) for i in [(0, 0), (1, 1), (1, 0)]] >>> Polygon._is_clockwise(a, b, c) True >>> Polygon._is_clockwise(a, c, b) False """ ba = b - a ca = c - a t_area = simplify(ba.x*ca.y - ca.x*ba.y) res = t_area.is_nonpositive if res is None: raise ValueError("Can't determine orientation") return res @property def angles(self): """The internal angle at each vertex. Returns ======= angles : dict A dictionary where each key is a vertex and each value is the internal angle at that vertex. The vertices are represented as Points. See Also ======== sympy.geometry.point.Point, sympy.geometry.line.LinearEntity.angle_between Examples ======== >>> from sympy import Point, Polygon >>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)]) >>> poly = Polygon(p1, p2, p3, p4) >>> poly.angles[p1] pi/2 >>> poly.angles[p2] acos(-4*sqrt(17)/17) """ args = self.vertices n = len(args) ret = {} for i in range(n): a, b, c = args[i - 2], args[i - 1], args[i] reflex_ang = Ray(b, a).angle_between(Ray(b, c)) if self._is_clockwise(a, b, c): ret[b] = 2*S.Pi - reflex_ang else: ret[b] = reflex_ang # internal sum should be pi*(n - 2), not pi*(n+2) # so if ratio is (n+2)/(n-2) > 1 it is wrong wrong = ((sum(ret.values())/S.Pi-1)/(n - 2) - 1).is_positive if wrong: two_pi = 2*S.Pi for b in ret: ret[b] = two_pi - ret[b] elif wrong is None: raise ValueError("could not determine Polygon orientation.") return ret @property def ambient_dimension(self): return self.vertices[0].ambient_dimension @property def perimeter(self): """The perimeter of the polygon. Returns ======= perimeter : number or Basic instance See Also ======== sympy.geometry.line.Segment.length Examples ======== >>> from sympy import Point, Polygon >>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)]) >>> poly = Polygon(p1, p2, p3, p4) >>> poly.perimeter sqrt(17) + 7 """ p = 0 args = self.vertices for i in range(len(args)): p += args[i - 1].distance(args[i]) return simplify(p) @property def vertices(self): """The vertices of the polygon. Returns ======= vertices : list of Points Notes ===== When iterating over the vertices, it is more efficient to index self rather than to request the vertices and index them. Only use the vertices when you want to process all of them at once. This is even more important with RegularPolygons that calculate each vertex. See Also ======== sympy.geometry.point.Point Examples ======== >>> from sympy import Point, Polygon >>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)]) >>> poly = Polygon(p1, p2, p3, p4) >>> poly.vertices [Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1)] >>> poly.vertices[0] Point2D(0, 0) """ return list(self.args) @property def centroid(self): """The centroid of the polygon. Returns ======= centroid : Point See Also ======== sympy.geometry.point.Point, sympy.geometry.util.centroid Examples ======== >>> from sympy import Point, Polygon >>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)]) >>> poly = Polygon(p1, p2, p3, p4) >>> poly.centroid Point2D(31/18, 11/18) """ A = 1/(6*self.area) cx, cy = 0, 0 args = self.args for i in range(len(args)): x1, y1 = args[i - 1].args x2, y2 = args[i].args v = x1*y2 - x2*y1 cx += v*(x1 + x2) cy += v*(y1 + y2) return Point(simplify(A*cx), simplify(A*cy)) def second_moment_of_area(self, point=None): """Returns the second moment and product moment of area of a two dimensional polygon. Parameters ========== point : Point, two-tuple of sympifyable objects, or None(default=None) point is the point about which second moment of area is to be found. If "point=None" it will be calculated about the axis passing through the centroid of the polygon. Returns ======= I_xx, I_yy, I_xy : number or SymPy expression I_xx, I_yy are second moment of area of a two dimensional polygon. I_xy is product moment of area of a two dimensional polygon. Examples ======== >>> from sympy import Polygon, symbols >>> a, b = symbols('a, b') >>> p1, p2, p3, p4, p5 = [(0, 0), (a, 0), (a, b), (0, b), (a/3, b/3)] >>> rectangle = Polygon(p1, p2, p3, p4) >>> rectangle.second_moment_of_area() (a*b**3/12, a**3*b/12, 0) >>> rectangle.second_moment_of_area(p5) (a*b**3/9, a**3*b/9, a**2*b**2/36) References ========== .. [1] https://en.wikipedia.org/wiki/Second_moment_of_area """ I_xx, I_yy, I_xy = 0, 0, 0 args = self.vertices for i in range(len(args)): x1, y1 = args[i-1].args x2, y2 = args[i].args v = x1*y2 - x2*y1 I_xx += (y1**2 + y1*y2 + y2**2)*v I_yy += (x1**2 + x1*x2 + x2**2)*v I_xy += (x1*y2 + 2*x1*y1 + 2*x2*y2 + x2*y1)*v A = self.area c_x = self.centroid[0] c_y = self.centroid[1] # parallel axis theorem I_xx_c = (I_xx/12) - (A*(c_y**2)) I_yy_c = (I_yy/12) - (A*(c_x**2)) I_xy_c = (I_xy/24) - (A*(c_x*c_y)) if point is None: return I_xx_c, I_yy_c, I_xy_c I_xx = (I_xx_c + A*((point[1]-c_y)**2)) I_yy = (I_yy_c + A*((point[0]-c_x)**2)) I_xy = (I_xy_c + A*((point[0]-c_x)*(point[1]-c_y))) return I_xx, I_yy, I_xy def first_moment_of_area(self, point=None): """ Returns the first moment of area of a two-dimensional polygon with respect to a certain point of interest. First moment of area is a measure of the distribution of the area of a polygon in relation to an axis. The first moment of area of the entire polygon about its own centroid is always zero. Therefore, here it is calculated for an area, above or below a certain point of interest, that makes up a smaller portion of the polygon. This area is bounded by the point of interest and the extreme end (top or bottom) of the polygon. The first moment for this area is is then determined about the centroidal axis of the initial polygon. References ========== .. [1] https://skyciv.com/docs/tutorials/section-tutorials/calculating-the-statical-or-first-moment-of-area-of-beam-sections/?cc=BMD .. [2] https://mechanicalc.com/reference/cross-sections Parameters ========== point: Point, two-tuple of sympifyable objects, or None (default=None) point is the point above or below which the area of interest lies If ``point=None`` then the centroid acts as the point of interest. Returns ======= Q_x, Q_y: number or SymPy expressions Q_x is the first moment of area about the x-axis Q_y is the first moment of area about the y-axis A negative sign indicates that the section modulus is determined for a section below (or left of) the centroidal axis Examples ======== >>> from sympy import Point, Polygon >>> a, b = 50, 10 >>> p1, p2, p3, p4 = [(0, b), (0, 0), (a, 0), (a, b)] >>> p = Polygon(p1, p2, p3, p4) >>> p.first_moment_of_area() (625, 3125) >>> p.first_moment_of_area(point=Point(30, 7)) (525, 3000) """ if point: xc, yc = self.centroid else: point = self.centroid xc, yc = point h_line = Line(point, slope=0) v_line = Line(point, slope=S.Infinity) h_poly = self.cut_section(h_line) v_poly = self.cut_section(v_line) poly_1 = h_poly[0] if h_poly[0].area <= h_poly[1].area else h_poly[1] poly_2 = v_poly[0] if v_poly[0].area <= v_poly[1].area else v_poly[1] Q_x = (poly_1.centroid.y - yc)*poly_1.area Q_y = (poly_2.centroid.x - xc)*poly_2.area return Q_x, Q_y def polar_second_moment_of_area(self): """Returns the polar modulus of a two-dimensional polygon It is a constituent of the second moment of area, linked through the perpendicular axis theorem. While the planar second moment of area describes an object's resistance to deflection (bending) when subjected to a force applied to a plane parallel to the central axis, the polar second moment of area describes an object's resistance to deflection when subjected to a moment applied in a plane perpendicular to the object's central axis (i.e. parallel to the cross-section) Examples ======== >>> from sympy import Polygon, symbols >>> a, b = symbols('a, b') >>> rectangle = Polygon((0, 0), (a, 0), (a, b), (0, b)) >>> rectangle.polar_second_moment_of_area() a**3*b/12 + a*b**3/12 References ========== .. [1] https://en.wikipedia.org/wiki/Polar_moment_of_inertia """ second_moment = self.second_moment_of_area() return second_moment[0] + second_moment[1] def section_modulus(self, point=None): """Returns a tuple with the section modulus of a two-dimensional polygon. Section modulus is a geometric property of a polygon defined as the ratio of second moment of area to the distance of the extreme end of the polygon from the centroidal axis. Parameters ========== point : Point, two-tuple of sympifyable objects, or None(default=None) point is the point at which section modulus is to be found. If "point=None" it will be calculated for the point farthest from the centroidal axis of the polygon. Returns ======= S_x, S_y: numbers or SymPy expressions S_x is the section modulus with respect to the x-axis S_y is the section modulus with respect to the y-axis A negative sign indicates that the section modulus is determined for a point below the centroidal axis Examples ======== >>> from sympy import symbols, Polygon, Point >>> a, b = symbols('a, b', positive=True) >>> rectangle = Polygon((0, 0), (a, 0), (a, b), (0, b)) >>> rectangle.section_modulus() (a*b**2/6, a**2*b/6) >>> rectangle.section_modulus(Point(a/4, b/4)) (-a*b**2/3, -a**2*b/3) References ========== .. [1] https://en.wikipedia.org/wiki/Section_modulus """ x_c, y_c = self.centroid if point is None: # taking x and y as maximum distances from centroid x_min, y_min, x_max, y_max = self.bounds y = max(y_c - y_min, y_max - y_c) x = max(x_c - x_min, x_max - x_c) else: # taking x and y as distances of the given point from the centroid y = point.y - y_c x = point.x - x_c second_moment= self.second_moment_of_area() S_x = second_moment[0]/y S_y = second_moment[1]/x return S_x, S_y @property def sides(self): """The directed line segments that form the sides of the polygon. Returns ======= sides : list of sides Each side is a directed Segment. See Also ======== sympy.geometry.point.Point, sympy.geometry.line.Segment Examples ======== >>> from sympy import Point, Polygon >>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)]) >>> poly = Polygon(p1, p2, p3, p4) >>> poly.sides [Segment2D(Point2D(0, 0), Point2D(1, 0)), Segment2D(Point2D(1, 0), Point2D(5, 1)), Segment2D(Point2D(5, 1), Point2D(0, 1)), Segment2D(Point2D(0, 1), Point2D(0, 0))] """ res = [] args = self.vertices for i in range(-len(args), 0): res.append(Segment(args[i], args[i + 1])) return res @property def bounds(self): """Return a tuple (xmin, ymin, xmax, ymax) representing the bounding rectangle for the geometric figure. """ verts = self.vertices xs = [p.x for p in verts] ys = [p.y for p in verts] return (min(xs), min(ys), max(xs), max(ys)) def is_convex(self): """Is the polygon convex? A polygon is convex if all its interior angles are less than 180 degrees and there are no intersections between sides. Returns ======= is_convex : boolean True if this polygon is convex, False otherwise. See Also ======== sympy.geometry.util.convex_hull Examples ======== >>> from sympy import Point, Polygon >>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)]) >>> poly = Polygon(p1, p2, p3, p4) >>> poly.is_convex() True """ # Determine orientation of points args = self.vertices cw = self._is_clockwise(args[-2], args[-1], args[0]) for i in range(1, len(args)): if cw ^ self._is_clockwise(args[i - 2], args[i - 1], args[i]): return False # check for intersecting sides sides = self.sides for i, si in enumerate(sides): pts = si.args # exclude the sides connected to si for j in range(1 if i == len(sides) - 1 else 0, i - 1): sj = sides[j] if sj.p1 not in pts and sj.p2 not in pts: hit = si.intersection(sj) if hit: return False return True def encloses_point(self, p): """ Return True if p is enclosed by (is inside of) self. Notes ===== Being on the border of self is considered False. Parameters ========== p : Point Returns ======= encloses_point : True, False or None See Also ======== sympy.geometry.point.Point, sympy.geometry.ellipse.Ellipse.encloses_point Examples ======== >>> from sympy import Polygon, Point >>> p = Polygon((0, 0), (4, 0), (4, 4)) >>> p.encloses_point(Point(2, 1)) True >>> p.encloses_point(Point(2, 2)) False >>> p.encloses_point(Point(5, 5)) False References ========== .. [1] https://paulbourke.net/geometry/polygonmesh/#insidepoly """ p = Point(p, dim=2) if p in self.vertices or any(p in s for s in self.sides): return False # move to p, checking that the result is numeric lit = [] for v in self.vertices: lit.append(v - p) # the difference is simplified if lit[-1].free_symbols: return None poly = Polygon(*lit) # polygon closure is assumed in the following test but Polygon removes duplicate pts so # the last point has to be added so all sides are computed. Using Polygon.sides is # not good since Segments are unordered. args = poly.args indices = list(range(-len(args), 1)) if poly.is_convex(): orientation = None for i in indices: a = args[i] b = args[i + 1] test = ((-a.y)*(b.x - a.x) - (-a.x)*(b.y - a.y)).is_negative if orientation is None: orientation = test elif test is not orientation: return False return True hit_odd = False p1x, p1y = args[0].args for i in indices[1:]: p2x, p2y = args[i].args if 0 > min(p1y, p2y): if 0 <= max(p1y, p2y): if 0 <= max(p1x, p2x): if p1y != p2y: xinters = (-p1y)*(p2x - p1x)/(p2y - p1y) + p1x if p1x == p2x or 0 <= xinters: hit_odd = not hit_odd p1x, p1y = p2x, p2y return hit_odd def arbitrary_point(self, parameter='t'): """A parameterized point on the polygon. The parameter, varying from 0 to 1, assigns points to the position on the perimeter that is that fraction of the total perimeter. So the point evaluated at t=1/2 would return the point from the first vertex that is 1/2 way around the polygon. Parameters ========== parameter : str, optional Default value is 't'. Returns ======= arbitrary_point : Point Raises ====== ValueError When `parameter` already appears in the Polygon's definition. See Also ======== sympy.geometry.point.Point Examples ======== >>> from sympy import Polygon, Symbol >>> t = Symbol('t', real=True) >>> tri = Polygon((0, 0), (1, 0), (1, 1)) >>> p = tri.arbitrary_point('t') >>> perimeter = tri.perimeter >>> s1, s2 = [s.length for s in tri.sides[:2]] >>> p.subs(t, (s1 + s2/2)/perimeter) Point2D(1, 1/2) """ t = _symbol(parameter, real=True) if t.name in (f.name for f in self.free_symbols): raise ValueError('Symbol %s already appears in object and cannot be used as a parameter.' % t.name) sides = [] perimeter = self.perimeter perim_fraction_start = 0 for s in self.sides: side_perim_fraction = s.length/perimeter perim_fraction_end = perim_fraction_start + side_perim_fraction pt = s.arbitrary_point(parameter).subs( t, (t - perim_fraction_start)/side_perim_fraction) sides.append( (pt, (And(perim_fraction_start <= t, t < perim_fraction_end)))) perim_fraction_start = perim_fraction_end return Piecewise(*sides) def parameter_value(self, other, t): if not isinstance(other,GeometryEntity): other = Point(other, dim=self.ambient_dimension) if not isinstance(other,Point): raise ValueError("other must be a point") if other.free_symbols: raise NotImplementedError('non-numeric coordinates') unknown = False p = self.arbitrary_point(T) for pt, cond in p.args: sol = solve(pt - other, T, dict=True) if not sol: continue value = sol[0][T] if simplify(cond.subs(T, value)) == True: return {t: value} unknown = True if unknown: raise ValueError("Given point may not be on %s" % func_name(self)) raise ValueError("Given point is not on %s" % func_name(self)) def plot_interval(self, parameter='t'): """The plot interval for the default geometric plot of the polygon. Parameters ========== parameter : str, optional Default value is 't'. Returns ======= plot_interval : list (plot interval) [parameter, lower_bound, upper_bound] Examples ======== >>> from sympy import Polygon >>> p = Polygon((0, 0), (1, 0), (1, 1)) >>> p.plot_interval() [t, 0, 1] """ t = Symbol(parameter, real=True) return [t, 0, 1] def intersection(self, o): """The intersection of polygon and geometry entity. The intersection may be empty and can contain individual Points and complete Line Segments. Parameters ========== other: GeometryEntity Returns ======= intersection : list The list of Segments and Points See Also ======== sympy.geometry.point.Point, sympy.geometry.line.Segment Examples ======== >>> from sympy import Point, Polygon, Line >>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)]) >>> poly1 = Polygon(p1, p2, p3, p4) >>> p5, p6, p7 = map(Point, [(3, 2), (1, -1), (0, 2)]) >>> poly2 = Polygon(p5, p6, p7) >>> poly1.intersection(poly2) [Point2D(1/3, 1), Point2D(2/3, 0), Point2D(9/5, 1/5), Point2D(7/3, 1)] >>> poly1.intersection(Line(p1, p2)) [Segment2D(Point2D(0, 0), Point2D(1, 0))] >>> poly1.intersection(p1) [Point2D(0, 0)] """ intersection_result = [] k = o.sides if isinstance(o, Polygon) else [o] for side in self.sides: for side1 in k: intersection_result.extend(side.intersection(side1)) intersection_result = list(uniq(intersection_result)) points = [entity for entity in intersection_result if isinstance(entity, Point)] segments = [entity for entity in intersection_result if isinstance(entity, Segment)] if points and segments: points_in_segments = list(uniq([point for point in points for segment in segments if point in segment])) if points_in_segments: for i in points_in_segments: points.remove(i) return list(ordered(segments + points)) else: return list(ordered(intersection_result)) def cut_section(self, line): """ Returns a tuple of two polygon segments that lie above and below the intersecting line respectively. Parameters ========== line: Line object of geometry module line which cuts the Polygon. The part of the Polygon that lies above and below this line is returned. Returns ======= upper_polygon, lower_polygon: Polygon objects or None upper_polygon is the polygon that lies above the given line. lower_polygon is the polygon that lies below the given line. upper_polygon and lower polygon are ``None`` when no polygon exists above the line or below the line. Raises ====== ValueError: When the line does not intersect the polygon Examples ======== >>> from sympy import Polygon, Line >>> a, b = 20, 10 >>> p1, p2, p3, p4 = [(0, b), (0, 0), (a, 0), (a, b)] >>> rectangle = Polygon(p1, p2, p3, p4) >>> t = rectangle.cut_section(Line((0, 5), slope=0)) >>> t (Polygon(Point2D(0, 10), Point2D(0, 5), Point2D(20, 5), Point2D(20, 10)), Polygon(Point2D(0, 5), Point2D(0, 0), Point2D(20, 0), Point2D(20, 5))) >>> upper_segment, lower_segment = t >>> upper_segment.area 100 >>> upper_segment.centroid Point2D(10, 15/2) >>> lower_segment.centroid Point2D(10, 5/2) References ========== .. [1] https://github.com/sympy/sympy/wiki/A-method-to-return-a-cut-section-of-any-polygon-geometry """ intersection_points = self.intersection(line) if not intersection_points: raise ValueError("This line does not intersect the polygon") points = list(self.vertices) points.append(points[0]) eq = line.equation(x, y) # considering equation of line to be `ax +by + c` a = eq.coeff(x) b = eq.coeff(y) upper_vertices = [] lower_vertices = [] # prev is true when previous point is above the line prev = True prev_point = None for point in points: # when coefficient of y is 0, right side of the line is # considered compare = eq.subs({x: point.x, y: point.y})/b if b \ else eq.subs(x, point.x)/a # if point lies above line if compare > 0: if not prev: # if previous point lies below the line, the intersection # point of the polygon edge and the line has to be included edge = Line(point, prev_point) new_point = edge.intersection(line) upper_vertices.append(new_point[0]) lower_vertices.append(new_point[0]) upper_vertices.append(point) prev = True else: if prev and prev_point: edge = Line(point, prev_point) new_point = edge.intersection(line) upper_vertices.append(new_point[0]) lower_vertices.append(new_point[0]) lower_vertices.append(point) prev = False prev_point = point upper_polygon, lower_polygon = None, None if upper_vertices and isinstance(Polygon(*upper_vertices), Polygon): upper_polygon = Polygon(*upper_vertices) if lower_vertices and isinstance(Polygon(*lower_vertices), Polygon): lower_polygon = Polygon(*lower_vertices) return upper_polygon, lower_polygon def distance(self, o): """ Returns the shortest distance between self and o. If o is a point, then self does not need to be convex. If o is another polygon self and o must be convex. Examples ======== >>> from sympy import Point, Polygon, RegularPolygon >>> p1, p2 = map(Point, [(0, 0), (7, 5)]) >>> poly = Polygon(*RegularPolygon(p1, 1, 3).vertices) >>> poly.distance(p2) sqrt(61) """ if isinstance(o, Point): dist = oo for side in self.sides: current = side.distance(o) if current == 0: return S.Zero elif current < dist: dist = current return dist elif isinstance(o, Polygon) and self.is_convex() and o.is_convex(): return self._do_poly_distance(o) raise NotImplementedError() def _do_poly_distance(self, e2): """ Calculates the least distance between the exteriors of two convex polygons e1 and e2. Does not check for the convexity of the polygons as this is checked by Polygon.distance. Notes ===== - Prints a warning if the two polygons possibly intersect as the return value will not be valid in such a case. For a more through test of intersection use intersection(). See Also ======== sympy.geometry.point.Point.distance Examples ======== >>> from sympy import Point, Polygon >>> square = Polygon(Point(0, 0), Point(0, 1), Point(1, 1), Point(1, 0)) >>> triangle = Polygon(Point(1, 2), Point(2, 2), Point(2, 1)) >>> square._do_poly_distance(triangle) sqrt(2)/2 Description of method used ========================== Method: [1] https://web.archive.org/web/20150509035744/http://cgm.cs.mcgill.ca/~orm/mind2p.html Uses rotating calipers: [2] https://en.wikipedia.org/wiki/Rotating_calipers and antipodal points: [3] https://en.wikipedia.org/wiki/Antipodal_point """ e1 = self '''Tests for a possible intersection between the polygons and outputs a warning''' e1_center = e1.centroid e2_center = e2.centroid e1_max_radius = S.Zero e2_max_radius = S.Zero for vertex in e1.vertices: r = Point.distance(e1_center, vertex) if e1_max_radius < r: e1_max_radius = r for vertex in e2.vertices: r = Point.distance(e2_center, vertex) if e2_max_radius < r: e2_max_radius = r center_dist = Point.distance(e1_center, e2_center) if center_dist <= e1_max_radius + e2_max_radius: warnings.warn("Polygons may intersect producing erroneous output", stacklevel=3) ''' Find the upper rightmost vertex of e1 and the lowest leftmost vertex of e2 ''' e1_ymax = Point(0, -oo) e2_ymin = Point(0, oo) for vertex in e1.vertices: if vertex.y > e1_ymax.y or (vertex.y == e1_ymax.y and vertex.x > e1_ymax.x): e1_ymax = vertex for vertex in e2.vertices: if vertex.y < e2_ymin.y or (vertex.y == e2_ymin.y and vertex.x < e2_ymin.x): e2_ymin = vertex min_dist = Point.distance(e1_ymax, e2_ymin) ''' Produce a dictionary with vertices of e1 as the keys and, for each vertex, the points to which the vertex is connected as its value. The same is then done for e2. ''' e1_connections = {} e2_connections = {} for side in e1.sides: if side.p1 in e1_connections: e1_connections[side.p1].append(side.p2) else: e1_connections[side.p1] = [side.p2] if side.p2 in e1_connections: e1_connections[side.p2].append(side.p1) else: e1_connections[side.p2] = [side.p1] for side in e2.sides: if side.p1 in e2_connections: e2_connections[side.p1].append(side.p2) else: e2_connections[side.p1] = [side.p2] if side.p2 in e2_connections: e2_connections[side.p2].append(side.p1) else: e2_connections[side.p2] = [side.p1] e1_current = e1_ymax e2_current = e2_ymin support_line = Line(Point(S.Zero, S.Zero), Point(S.One, S.Zero)) ''' Determine which point in e1 and e2 will be selected after e2_ymin and e1_ymax, this information combined with the above produced dictionaries determines the path that will be taken around the polygons ''' point1 = e1_connections[e1_ymax][0] point2 = e1_connections[e1_ymax][1] angle1 = support_line.angle_between(Line(e1_ymax, point1)) angle2 = support_line.angle_between(Line(e1_ymax, point2)) if angle1 < angle2: e1_next = point1 elif angle2 < angle1: e1_next = point2 elif Point.distance(e1_ymax, point1) > Point.distance(e1_ymax, point2): e1_next = point2 else: e1_next = point1 point1 = e2_connections[e2_ymin][0] point2 = e2_connections[e2_ymin][1] angle1 = support_line.angle_between(Line(e2_ymin, point1)) angle2 = support_line.angle_between(Line(e2_ymin, point2)) if angle1 > angle2: e2_next = point1 elif angle2 > angle1: e2_next = point2 elif Point.distance(e2_ymin, point1) > Point.distance(e2_ymin, point2): e2_next = point2 else: e2_next = point1 ''' Loop which determines the distance between anti-podal pairs and updates the minimum distance accordingly. It repeats until it reaches the starting position. ''' while True: e1_angle = support_line.angle_between(Line(e1_current, e1_next)) e2_angle = pi - support_line.angle_between(Line( e2_current, e2_next)) if (e1_angle < e2_angle) is True: support_line = Line(e1_current, e1_next) e1_segment = Segment(e1_current, e1_next) min_dist_current = e1_segment.distance(e2_current) if min_dist_current.evalf() < min_dist.evalf(): min_dist = min_dist_current if e1_connections[e1_next][0] != e1_current: e1_current = e1_next e1_next = e1_connections[e1_next][0] else: e1_current = e1_next e1_next = e1_connections[e1_next][1] elif (e1_angle > e2_angle) is True: support_line = Line(e2_next, e2_current) e2_segment = Segment(e2_current, e2_next) min_dist_current = e2_segment.distance(e1_current) if min_dist_current.evalf() < min_dist.evalf(): min_dist = min_dist_current if e2_connections[e2_next][0] != e2_current: e2_current = e2_next e2_next = e2_connections[e2_next][0] else: e2_current = e2_next e2_next = e2_connections[e2_next][1] else: support_line = Line(e1_current, e1_next) e1_segment = Segment(e1_current, e1_next) e2_segment = Segment(e2_current, e2_next) min1 = e1_segment.distance(e2_next) min2 = e2_segment.distance(e1_next) min_dist_current = min(min1, min2) if min_dist_current.evalf() < min_dist.evalf(): min_dist = min_dist_current if e1_connections[e1_next][0] != e1_current: e1_current = e1_next e1_next = e1_connections[e1_next][0] else: e1_current = e1_next e1_next = e1_connections[e1_next][1] if e2_connections[e2_next][0] != e2_current: e2_current = e2_next e2_next = e2_connections[e2_next][0] else: e2_current = e2_next e2_next = e2_connections[e2_next][1] if e1_current == e1_ymax and e2_current == e2_ymin: break return min_dist def _svg(self, scale_factor=1., fill_color="#66cc99"): """Returns SVG path element for the Polygon. Parameters ========== scale_factor : float Multiplication factor for the SVG stroke-width. Default is 1. fill_color : str, optional Hex string for fill color. Default is "#66cc99". """ verts = map(N, self.vertices) coords = ["{},{}".format(p.x, p.y) for p in verts] path = "M {} L {} z".format(coords[0], " L ".join(coords[1:])) return ( '<path fill-rule="evenodd" fill="{2}" stroke="#555555" ' 'stroke-width="{0}" opacity="0.6" d="{1}" />' ).format(2. * scale_factor, path, fill_color) def _hashable_content(self): D = {} def ref_list(point_list): kee = {} for i, p in enumerate(ordered(set(point_list))): kee[p] = i D[i] = p return [kee[p] for p in point_list] S1 = ref_list(self.args) r_nor = rotate_left(S1, least_rotation(S1)) S2 = ref_list(list(reversed(self.args))) r_rev = rotate_left(S2, least_rotation(S2)) if r_nor < r_rev: r = r_nor else: r = r_rev canonical_args = [ D[order] for order in r ] return tuple(canonical_args) def __contains__(self, o): """ Return True if o is contained within the boundary lines of self.altitudes Parameters ========== other : GeometryEntity Returns ======= contained in : bool The points (and sides, if applicable) are contained in self. See Also ======== sympy.geometry.entity.GeometryEntity.encloses Examples ======== >>> from sympy import Line, Segment, Point >>> p = Point(0, 0) >>> q = Point(1, 1) >>> s = Segment(p, q*2) >>> l = Line(p, q) >>> p in q False >>> p in s True >>> q*3 in s False >>> s in l True """ if isinstance(o, Polygon): return self == o elif isinstance(o, Segment): return any(o in s for s in self.sides) elif isinstance(o, Point): if o in self.vertices: return True for side in self.sides: if o in side: return True return False def bisectors(p, prec=None): """Returns angle bisectors of a polygon. If prec is given then approximate the point defining the ray to that precision. The distance between the points defining the bisector ray is 1. Examples ======== >>> from sympy import Polygon, Point >>> p = Polygon(Point(0, 0), Point(2, 0), Point(1, 1), Point(0, 3)) >>> p.bisectors(2) {Point2D(0, 0): Ray2D(Point2D(0, 0), Point2D(0.71, 0.71)), Point2D(0, 3): Ray2D(Point2D(0, 3), Point2D(0.23, 2.0)), Point2D(1, 1): Ray2D(Point2D(1, 1), Point2D(0.19, 0.42)), Point2D(2, 0): Ray2D(Point2D(2, 0), Point2D(1.1, 0.38))} """ b = {} pts = list(p.args) pts.append(pts[0]) # close it cw = Polygon._is_clockwise(*pts[:3]) if cw: pts = list(reversed(pts)) for v, a in p.angles.items(): i = pts.index(v) p1, p2 = Point._normalize_dimension(pts[i], pts[i + 1]) ray = Ray(p1, p2).rotate(a/2, v) dir = ray.direction ray = Ray(ray.p1, ray.p1 + dir/dir.distance((0, 0))) if prec is not None: ray = Ray(ray.p1, ray.p2.n(prec)) b[v] = ray return b
Polygon
python
pypa__pip
src/pip/_vendor/distro/distro.py
{ "start": 21227, "end": 49430 }
class ____: """ Provides information about a OS distribution. This package creates a private module-global instance of this class with default initialization arguments, that is used by the `consolidated accessor functions`_ and `single source accessor functions`_. By using default initialization arguments, that module-global instance returns data about the current OS distribution (i.e. the distro this package runs on). Normally, it is not necessary to create additional instances of this class. However, in situations where control is needed over the exact data sources that are used, instances of this class can be created with a specific distro release file, or a specific os-release file, or without invoking the lsb_release command. """ def __init__( self, include_lsb: Optional[bool] = None, os_release_file: str = "", distro_release_file: str = "", include_uname: Optional[bool] = None, root_dir: Optional[str] = None, include_oslevel: Optional[bool] = None, ) -> None: """ The initialization method of this class gathers information from the available data sources, and stores that in private instance attributes. Subsequent access to the information items uses these private instance attributes, so that the data sources are read only once. Parameters: * ``include_lsb`` (bool): Controls whether the `lsb_release command output`_ is included as a data source. If the lsb_release command is not available in the program execution path, the data source for the lsb_release command will be empty. * ``os_release_file`` (string): The path name of the `os-release file`_ that is to be used as a data source. An empty string (the default) will cause the default path name to be used (see `os-release file`_ for details). If the specified or defaulted os-release file does not exist, the data source for the os-release file will be empty. * ``distro_release_file`` (string): The path name of the `distro release file`_ that is to be used as a data source. An empty string (the default) will cause a default search algorithm to be used (see `distro release file`_ for details). If the specified distro release file does not exist, or if no default distro release file can be found, the data source for the distro release file will be empty. * ``include_uname`` (bool): Controls whether uname command output is included as a data source. If the uname command is not available in the program execution path the data source for the uname command will be empty. * ``root_dir`` (string): The absolute path to the root directory to use to find distro-related information files. Note that ``include_*`` parameters must not be enabled in combination with ``root_dir``. * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command output is included as a data source. If the oslevel command is not available in the program execution path the data source will be empty. Public instance attributes: * ``os_release_file`` (string): The path name of the `os-release file`_ that is actually used as a data source. The empty string if no distro release file is used as a data source. * ``distro_release_file`` (string): The path name of the `distro release file`_ that is actually used as a data source. The empty string if no distro release file is used as a data source. * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter. This controls whether the lsb information will be loaded. * ``include_uname`` (bool): The result of the ``include_uname`` parameter. This controls whether the uname information will be loaded. * ``include_oslevel`` (bool): The result of the ``include_oslevel`` parameter. This controls whether (AIX) oslevel information will be loaded. * ``root_dir`` (string): The result of the ``root_dir`` parameter. The absolute path to the root directory to use to find distro-related information files. Raises: * :py:exc:`ValueError`: Initialization parameters combination is not supported. * :py:exc:`OSError`: Some I/O issue with an os-release file or distro release file. * :py:exc:`UnicodeError`: A data source has unexpected characters or uses an unexpected encoding. """ self.root_dir = root_dir self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR self.usr_lib_dir = ( os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR ) if os_release_file: self.os_release_file = os_release_file else: etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME) usr_lib_os_release_file = os.path.join( self.usr_lib_dir, _OS_RELEASE_BASENAME ) # NOTE: The idea is to respect order **and** have it set # at all times for API backwards compatibility. if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile( usr_lib_os_release_file ): self.os_release_file = etc_dir_os_release_file else: self.os_release_file = usr_lib_os_release_file self.distro_release_file = distro_release_file or "" # updated later is_root_dir_defined = root_dir is not None if is_root_dir_defined and (include_lsb or include_uname or include_oslevel): raise ValueError( "Including subprocess data sources from specific root_dir is disallowed" " to prevent false information" ) self.include_lsb = ( include_lsb if include_lsb is not None else not is_root_dir_defined ) self.include_uname = ( include_uname if include_uname is not None else not is_root_dir_defined ) self.include_oslevel = ( include_oslevel if include_oslevel is not None else not is_root_dir_defined ) def __repr__(self) -> str: """Return repr of all info""" return ( "LinuxDistribution(" "os_release_file={self.os_release_file!r}, " "distro_release_file={self.distro_release_file!r}, " "include_lsb={self.include_lsb!r}, " "include_uname={self.include_uname!r}, " "include_oslevel={self.include_oslevel!r}, " "root_dir={self.root_dir!r}, " "_os_release_info={self._os_release_info!r}, " "_lsb_release_info={self._lsb_release_info!r}, " "_distro_release_info={self._distro_release_info!r}, " "_uname_info={self._uname_info!r}, " "_oslevel_info={self._oslevel_info!r})".format(self=self) ) def linux_distribution( self, full_distribution_name: bool = True ) -> Tuple[str, str, str]: """ Return information about the OS distribution that is compatible with Python's :func:`platform.linux_distribution`, supporting a subset of its parameters. For details, see :func:`distro.linux_distribution`. """ return ( self.name() if full_distribution_name else self.id(), self.version(), self._os_release_info.get("release_codename") or self.codename(), ) def id(self) -> str: """Return the distro ID of the OS distribution, as a string. For details, see :func:`distro.id`. """ def normalize(distro_id: str, table: Dict[str, str]) -> str: distro_id = distro_id.lower().replace(" ", "_") return table.get(distro_id, distro_id) distro_id = self.os_release_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_OS_ID) distro_id = self.lsb_release_attr("distributor_id") if distro_id: return normalize(distro_id, NORMALIZED_LSB_ID) distro_id = self.distro_release_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) distro_id = self.uname_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) return "" def name(self, pretty: bool = False) -> str: """ Return the name of the OS distribution, as a string. For details, see :func:`distro.name`. """ name = ( self.os_release_attr("name") or self.lsb_release_attr("distributor_id") or self.distro_release_attr("name") or self.uname_attr("name") ) if pretty: name = self.os_release_attr("pretty_name") or self.lsb_release_attr( "description" ) if not name: name = self.distro_release_attr("name") or self.uname_attr("name") version = self.version(pretty=True) if version: name = f"{name} {version}" return name or "" def version(self, pretty: bool = False, best: bool = False) -> str: """ Return the version of the OS distribution, as a string. For details, see :func:`distro.version`. """ versions = [ self.os_release_attr("version_id"), self.lsb_release_attr("release"), self.distro_release_attr("version_id"), self._parse_distro_release_content(self.os_release_attr("pretty_name")).get( "version_id", "" ), self._parse_distro_release_content( self.lsb_release_attr("description") ).get("version_id", ""), self.uname_attr("release"), ] if self.uname_attr("id").startswith("aix"): # On AIX platforms, prefer oslevel command output. versions.insert(0, self.oslevel_info()) elif self.id() == "debian" or "debian" in self.like().split(): # On Debian-like, add debian_version file content to candidates list. versions.append(self._debian_version) version = "" if best: # This algorithm uses the last version in priority order that has # the best precision. If the versions are not in conflict, that # does not matter; otherwise, using the last one instead of the # first one might be considered a surprise. for v in versions: if v.count(".") > version.count(".") or version == "": version = v else: for v in versions: if v != "": version = v break if pretty and version and self.codename(): version = f"{version} ({self.codename()})" return version def version_parts(self, best: bool = False) -> Tuple[str, str, str]: """ Return the version of the OS distribution, as a tuple of version numbers. For details, see :func:`distro.version_parts`. """ version_str = self.version(best=best) if version_str: version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?") matches = version_regex.match(version_str) if matches: major, minor, build_number = matches.groups() return major, minor or "", build_number or "" return "", "", "" def major_version(self, best: bool = False) -> str: """ Return the major version number of the current distribution. For details, see :func:`distro.major_version`. """ return self.version_parts(best)[0] def minor_version(self, best: bool = False) -> str: """ Return the minor version number of the current distribution. For details, see :func:`distro.minor_version`. """ return self.version_parts(best)[1] def build_number(self, best: bool = False) -> str: """ Return the build number of the current distribution. For details, see :func:`distro.build_number`. """ return self.version_parts(best)[2] def like(self) -> str: """ Return the IDs of distributions that are like the OS distribution. For details, see :func:`distro.like`. """ return self.os_release_attr("id_like") or "" def codename(self) -> str: """ Return the codename of the OS distribution. For details, see :func:`distro.codename`. """ try: # Handle os_release specially since distros might purposefully set # this to empty string to have no codename return self._os_release_info["codename"] except KeyError: return ( self.lsb_release_attr("codename") or self.distro_release_attr("codename") or "" ) def info(self, pretty: bool = False, best: bool = False) -> InfoDict: """ Return certain machine-readable information about the OS distribution. For details, see :func:`distro.info`. """ return InfoDict( id=self.id(), version=self.version(pretty, best), version_parts=VersionDict( major=self.major_version(best), minor=self.minor_version(best), build_number=self.build_number(best), ), like=self.like(), codename=self.codename(), ) def os_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the OS distribution. For details, see :func:`distro.os_release_info`. """ return self._os_release_info def lsb_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the OS distribution. For details, see :func:`distro.lsb_release_info`. """ return self._lsb_release_info def distro_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the OS distribution. For details, see :func:`distro.distro_release_info`. """ return self._distro_release_info def uname_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the uname command data source of the OS distribution. For details, see :func:`distro.uname_info`. """ return self._uname_info def oslevel_info(self) -> str: """ Return AIX' oslevel command output. """ return self._oslevel_info def os_release_attr(self, attribute: str) -> str: """ Return a single named information item from the os-release file data source of the OS distribution. For details, see :func:`distro.os_release_attr`. """ return self._os_release_info.get(attribute, "") def lsb_release_attr(self, attribute: str) -> str: """ Return a single named information item from the lsb_release command output data source of the OS distribution. For details, see :func:`distro.lsb_release_attr`. """ return self._lsb_release_info.get(attribute, "") def distro_release_attr(self, attribute: str) -> str: """ Return a single named information item from the distro release file data source of the OS distribution. For details, see :func:`distro.distro_release_attr`. """ return self._distro_release_info.get(attribute, "") def uname_attr(self, attribute: str) -> str: """ Return a single named information item from the uname command output data source of the OS distribution. For details, see :func:`distro.uname_attr`. """ return self._uname_info.get(attribute, "") @cached_property def _os_release_info(self) -> Dict[str, str]: """ Get the information items from the specified os-release file. Returns: A dictionary containing all information items. """ if os.path.isfile(self.os_release_file): with open(self.os_release_file, encoding="utf-8") as release_file: return self._parse_os_release_content(release_file) return {} @staticmethod def _parse_os_release_content(lines: TextIO) -> Dict[str, str]: """ Parse the lines of an os-release file. Parameters: * lines: Iterable through the lines in the os-release file. Each line must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. """ props = {} lexer = shlex.shlex(lines, posix=True) lexer.whitespace_split = True tokens = list(lexer) for token in tokens: # At this point, all shell-like parsing has been done (i.e. # comments processed, quotes and backslash escape sequences # processed, multi-line values assembled, trailing newlines # stripped, etc.), so the tokens are now either: # * variable assignments: var=value # * commands or their arguments (not allowed in os-release) # Ignore any tokens that are not variable assignments if "=" in token: k, v = token.split("=", 1) props[k.lower()] = v if "version" in props: # extract release codename (if any) from version attribute match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"]) if match: release_codename = match.group(1) or match.group(2) props["codename"] = props["release_codename"] = release_codename if "version_codename" in props: # os-release added a version_codename field. Use that in # preference to anything else Note that some distros purposefully # do not have code names. They should be setting # version_codename="" props["codename"] = props["version_codename"] elif "ubuntu_codename" in props: # Same as above but a non-standard field name used on older Ubuntus props["codename"] = props["ubuntu_codename"] return props @cached_property def _lsb_release_info(self) -> Dict[str, str]: """ Get the information items from the lsb_release command output. Returns: A dictionary containing all information items. """ if not self.include_lsb: return {} try: cmd = ("lsb_release", "-a") stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) # Command not found or lsb_release returned error except (OSError, subprocess.CalledProcessError): return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(content) @staticmethod def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]: """ Parse the output of the lsb_release command. Parameters: * lines: Iterable through the lines of the lsb_release output. Each line must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. """ props = {} for line in lines: kv = line.strip("\n").split(":", 1) if len(kv) != 2: # Ignore lines without colon. continue k, v = kv props.update({k.replace(" ", "_").lower(): v.strip()}) return props @cached_property def _uname_info(self) -> Dict[str, str]: if not self.include_uname: return {} try: cmd = ("uname", "-rs") stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) except OSError: return {} content = self._to_str(stdout).splitlines() return self._parse_uname_content(content) @cached_property def _oslevel_info(self) -> str: if not self.include_oslevel: return "" try: stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL) except (OSError, subprocess.CalledProcessError): return "" return self._to_str(stdout).strip() @cached_property def _debian_version(self) -> str: try: with open( os.path.join(self.etc_dir, "debian_version"), encoding="ascii" ) as fp: return fp.readline().rstrip() except FileNotFoundError: return "" @staticmethod def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]: if not lines: return {} props = {} match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip()) if match: name, version = match.groups() # This is to prevent the Linux kernel version from # appearing as the 'best' version on otherwise # identifiable distributions. if name == "Linux": return {} props["id"] = name.lower() props["name"] = name props["release"] = version return props @staticmethod def _to_str(bytestring: bytes) -> str: encoding = sys.getfilesystemencoding() return bytestring.decode(encoding) @cached_property def _distro_release_info(self) -> Dict[str, str]: """ Get the information items from the specified distro release file. Returns: A dictionary containing all information items. """ if self.distro_release_file: # If it was specified, we use it and parse what we can, even if # its file name or content does not match the expected pattern. distro_info = self._parse_distro_release_file(self.distro_release_file) basename = os.path.basename(self.distro_release_file) # The file name pattern for user-specified distro release files # is somewhat more tolerant (compared to when searching for the # file), because we want to use what was specified as best as # possible. match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) else: try: basenames = [ basename for basename in os.listdir(self.etc_dir) if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES and os.path.isfile(os.path.join(self.etc_dir, basename)) ] # We sort for repeatability in cases where there are multiple # distro specific files; e.g. CentOS, Oracle, Enterprise all # containing `redhat-release` on top of their own. basenames.sort() except OSError: # This may occur when /etc is not readable but we can't be # sure about the *-release files. Check common entries of # /etc for information. If they turn out to not be there the # error is handled in `_parse_distro_release_file()`. basenames = _DISTRO_RELEASE_BASENAMES for basename in basenames: match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) if match is None: continue filepath = os.path.join(self.etc_dir, basename) distro_info = self._parse_distro_release_file(filepath) # The name is always present if the pattern matches. if "name" not in distro_info: continue self.distro_release_file = filepath break else: # the loop didn't "break": no candidate. return {} if match is not None: distro_info["id"] = match.group(1) # CloudLinux < 7: manually enrich info with proper id. if "cloudlinux" in distro_info.get("name", "").lower(): distro_info["id"] = "cloudlinux" return distro_info def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]: """ Parse a distro release file. Parameters: * filepath: Path name of the distro release file. Returns: A dictionary containing all information items. """ try: with open(filepath, encoding="utf-8") as fp: # Only parse the first line. For instance, on SLES there # are multiple lines. We don't want them... return self._parse_distro_release_content(fp.readline()) except OSError: # Ignore not being able to read a specific, seemingly version # related file. # See https://github.com/python-distro/distro/issues/162 return {} @staticmethod def _parse_distro_release_content(line: str) -> Dict[str, str]: """ Parse a line from a distro release file. Parameters: * line: Line from the distro release file. Must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. """ matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1]) distro_info = {} if matches: # regexp ensures non-None distro_info["name"] = matches.group(3)[::-1] if matches.group(2): distro_info["version_id"] = matches.group(2)[::-1] if matches.group(1): distro_info["codename"] = matches.group(1)[::-1] elif line: distro_info["name"] = line.strip() return distro_info _distro = LinuxDistribution() def main() -> None: logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) parser = argparse.ArgumentParser(description="OS distro info tool") parser.add_argument( "--json", "-j", help="Output in machine readable format", action="store_true" ) parser.add_argument( "--root-dir", "-r", type=str, dest="root_dir", help="Path to the root filesystem directory (defaults to /)", ) args = parser.parse_args() if args.root_dir: dist = LinuxDistribution( include_lsb=False, include_uname=False, include_oslevel=False, root_dir=args.root_dir, ) else: dist = _distro if args.json: logger.info(json.dumps(dist.info(), indent=4, sort_keys=True)) else: logger.info("Name: %s", dist.name(pretty=True)) distribution_version = dist.version(pretty=True) logger.info("Version: %s", distribution_version) distribution_codename = dist.codename() logger.info("Codename: %s", distribution_codename) if __name__ == "__main__": main()
LinuxDistribution
python
kamyu104__LeetCode-Solutions
Python/binary-tree-inorder-traversal.py
{ "start": 956, "end": 1510 }
class ____(object): def inorderTraversal(self, root): """ :type root: TreeNode :rtype: List[int] """ result, stack = [], [(root, False)] while stack: root, is_visited = stack.pop() if root is None: continue if is_visited: result.append(root.val) else: stack.append((root.right, False)) stack.append((root, True)) stack.append((root.left, False)) return result
Solution2
python
airbytehq__airbyte
airbyte-integrations/bases/base-normalization/normalization/transform_catalog/table_name_registry.py
{ "start": 3632, "end": 18360 }
class ____: """ A registry object that records table names being used during the run This registry helps detecting naming conflicts/collisions and how to resolve them. First, we collect all schema/stream_name/json_path listed in the catalog to detect any collisions, whether it is from: - table naming: truncated stream name could conflict with each other within the same destination schema - file naming: dbt use a global registry of file names without considering schema, so two tables with the same name in different schema is valid but dbt would fail to distinguish them. Thus, the file needs should be unique within a dbt project (for example, by adding the schema name to the file name when such collision occurs?) To do so, we build list of "simple" names without dealing with any collisions. Next, we check if/when we encounter such naming conflicts. They usually happen when destination require a certain naming convention with a limited number of characters, thus, we have to end up truncating names and creating collisions. In those cases, we resolve collisions using a more complex naming scheme using a suffix generated from hash of full names to make them short and unique (but hard to remember/use). """ def __init__(self, destination_type: DestinationType): """ @param destination_type is the destination type of warehouse """ self.destination_type: DestinationType = destination_type self.name_transformer: DestinationNameTransformer = DestinationNameTransformer(destination_type) # Simple XXX registry are collecting "simple" XXX names (with potential collisions) self.simple_file_registry: NormalizedFilesRegistry = NormalizedFilesRegistry() self.simple_table_registry: NormalizedTablesRegistry = NormalizedTablesRegistry(self.name_transformer) # Registry is the collision free (resolved) mapping of schema json_path of the stream to the names that should be used self.registry: Dict[str, ResolvedNameMetadata] = {} def register_table(self, intermediate_schema: str, schema: str, stream_name: str, json_path: List[str]): """ Record usages of simple table and file names used by each stream (top level and nested) in both intermediate_schema and schema. After going through all streams and sub-streams, we'll be able to find if any collisions are present within this catalog. """ intermediate_schema = self.name_transformer.normalize_schema_name(intermediate_schema, False, False) schema = self.name_transformer.normalize_schema_name(schema, False, False) table_name = self.get_simple_table_name(json_path) self.simple_table_registry.add(intermediate_schema, schema, json_path, stream_name, table_name) def get_simple_table_name(self, json_path: List[str]) -> str: """ Generates a simple table name, possibly in collisions within this catalog because of truncation """ return self.name_transformer.normalize_table_name("_".join(json_path)) def resolve_names(self) -> List[ConflictedNameMetadata]: conflicts = self.resolve_table_names() self.resolve_file_names() return conflicts def resolve_table_names(self) -> List[ConflictedNameMetadata]: """ Build a collision free registry from all schema/stream_name/json_path collected so far. """ resolved_keys = [] # deal with table name collisions within the same schema first. # file name should be equal to table name here table_count = 0 for key in self.simple_table_registry: for value in self.simple_table_registry[key]: table_count += 1 if self.simple_table_registry.has_collisions(key): # handle collisions with unique hashed names table_name = self.get_hashed_table_name(value.schema, value.json_path, value.stream_name, value.table_name) resolved_keys.append(ConflictedNameMetadata(value.schema, value.json_path, value.table_name, table_name)) else: table_name = value.table_name self.registry[self.get_registry_key(value.intermediate_schema, value.json_path, value.stream_name)] = ResolvedNameMetadata( value.intermediate_schema, table_name, # use table_name as file_name for now table_name, ) self.registry[self.get_registry_key(value.schema, value.json_path, value.stream_name)] = ResolvedNameMetadata( value.schema, table_name, # use table_name as file_name for now table_name, ) self.simple_file_registry.add(value.intermediate_schema, value.schema, value.json_path, value.stream_name, table_name) registry_size = len(self.registry) # Oracle doesnt support namespace and this break this logic. if self.destination_type != DestinationType.ORACLE: assert (table_count * 2) == registry_size, f"Mismatched number of tables {table_count * 2} vs {registry_size} being resolved" return resolved_keys def resolve_file_names(self): # deal with file name collisions across schemas and update the file name to use in the registry when necessary file_count = 0 for key in self.simple_file_registry: for value in self.simple_file_registry[key]: file_count += 1 if self.simple_file_registry.has_collisions(key): # handle collisions with unique hashed names including schema self.registry[ self.get_registry_key(value.intermediate_schema, value.json_path, value.stream_name) ] = ResolvedNameMetadata( value.intermediate_schema, value.table_name, self.resolve_file_name(value.intermediate_schema, value.table_name) ) self.registry[self.get_registry_key(value.schema, value.json_path, value.stream_name)] = ResolvedNameMetadata( value.schema, value.table_name, self.resolve_file_name(value.schema, value.table_name) ) registry_size = len(self.registry) # Oracle doesnt support namespace and this break this logic. if self.destination_type != DestinationType.ORACLE: assert (file_count * 2) == registry_size, f"Mismatched number of tables {file_count * 2} vs {registry_size} being resolved" def get_hashed_table_name(self, schema: str, json_path: List[str], stream_name: str, table_name: str) -> str: """ Generates a unique table name to avoid collisions within this catalog. This is using a hash of full names but it is hard to use and remember, so this should be done rarely... We'd prefer to use "simple" names instead as much as possible. """ if len(json_path) == 1: # collisions on a top level stream name, add a hash of schema + stream name to the (truncated?) table name to make it unique result = self.name_transformer.normalize_table_name(f"{stream_name}_{hash_json_path([schema] + json_path)}") else: # collisions on a nested sub-stream result = self.name_transformer.normalize_table_name( get_nested_hashed_table_name(self.name_transformer, schema, json_path, stream_name), False, False ) return result @staticmethod def get_registry_key(schema: str, json_path: List[str], stream_name: str) -> str: """ Build the key string used to index the registry """ return ".".join([schema, "_".join(json_path), stream_name]).lower() def resolve_file_name(self, schema: str, table_name: str) -> str: """ We prefer to use file_name = table_name when possible... When a catalog has ambiguity, we have to fallback and use schema in the file name too (which might increase a risk of truncate operation and thus collisions that we solve by adding a hash of the full names) """ if len(self.simple_file_registry[table_name]) == 1: # no collisions on file naming return table_name else: max_length = self.name_transformer.get_name_max_length() # if schema . table fits into the destination, we use this naming convention if len(schema) + len(table_name) + 1 < max_length: return f"{schema}_{table_name}" else: # we have to make sure our filename is unique, use hash of full name return self.name_transformer.normalize_table_name(f"{schema}_{table_name}_{hash_name(schema + table_name)}") def get_schema_name(self, schema: str, json_path: List[str], stream_name: str): """ Return the schema name from the registry that should be used for this combination of schema/json_path_to_substream """ key = self.get_registry_key(schema, json_path, stream_name) if key in self.registry: return self.name_transformer.normalize_schema_name(self.registry[key].schema, False, False) else: raise KeyError(f"Registry does not contain an entry for {schema} {json_path} {stream_name}") def get_table_name(self, schema: str, json_path: List[str], stream_name: str, suffix: str, truncate: bool = False): """ Return the table name from the registry that should be used for this combination of schema/json_path_to_substream """ key = self.get_registry_key(schema, json_path, stream_name) if key in self.registry: table_name = self.registry[key].table_name else: raise KeyError(f"Registry does not contain an entry for {schema} {json_path} {stream_name}") if suffix: norm_suffix = suffix if not suffix or suffix.startswith("_") else f"_{suffix}" else: norm_suffix = "" conflict = False conflict_solver = 0 if stream_name in json_path: conflict = True conflict_solver = len(json_path) return self.name_transformer.normalize_table_name(f"{table_name}{norm_suffix}", False, truncate, conflict, conflict_solver) def get_file_name(self, schema: str, json_path: List[str], stream_name: str, suffix: str, truncate: bool = False): """ Return the file name from the registry that should be used for this combination of schema/json_path_to_substream """ key = self.get_registry_key(schema, json_path, stream_name) if key in self.registry: file_name = self.registry[key].file_name else: raise KeyError(f"Registry does not contain an entry for {schema} {json_path} {stream_name}") if suffix: norm_suffix = suffix if not suffix or suffix.startswith("_") else f"_{suffix}" else: norm_suffix = "" conflict = False conflict_solver = 0 if stream_name in json_path: conflict = True conflict_solver = len(json_path) return self.name_transformer.normalize_table_name(f"{file_name}{norm_suffix}", False, truncate, conflict, conflict_solver) def to_dict(self, apply_function=(lambda x: x)) -> Dict: """ Converts to a pure dict to serialize as json """ result = {} for key in self.registry: value = self.registry[key] result[apply_function(key)] = { apply_function("schema"): apply_function(value.schema), apply_function("table"): apply_function(value.table_name), apply_function("file"): apply_function(value.file_name), } return result def hash_json_path(json_path: List[str]) -> str: return hash_name("&airbyte&".join(json_path)) def hash_name(input: str) -> str: h = hashlib.sha1() h.update(input.encode("utf-8").lower()) return h.hexdigest()[:3] def get_nested_hashed_table_name(name_transformer: DestinationNameTransformer, schema: str, json_path: List[str], child: str) -> str: """ In normalization code base, we often have to deal with naming for tables, combining informations from: - parent table: to denote where a table is extracted from (in case of nesting) - child table: in case of nesting, the field name or the original stream name - extra suffix: normalization is done in multiple transformation steps, each may need to generate separate tables, so we can add a suffix to distinguish the different transformation steps of a pipeline. - json path: in terms of parent and nested field names in order to reach the table currently being built All these informations should be included (if possible) in the table naming for the user to (somehow) identify and recognize what data is available there. """ parent = "_".join(json_path[:-1]) max_length = name_transformer.get_name_max_length() json_path_hash = hash_json_path([schema] + json_path) norm_parent = parent if not parent else name_transformer.normalize_table_name(parent, False, False) norm_child = name_transformer.normalize_table_name(child, False, False) min_parent_length = min(MINIMUM_PARENT_LENGTH, len(norm_parent)) # no parent if not parent: raise RuntimeError("There is no nested table names without parents") # if everything fits without truncation, don't truncate anything elif (len(norm_parent) + len(json_path_hash) + len(norm_child) + 2) < max_length: return f"{norm_parent}_{json_path_hash}_{norm_child}" # if everything fits except for the parent, just truncate the parent (still guarantees parent is of length min_parent_length) elif (min_parent_length + len(json_path_hash) + len(norm_child) + 2) < max_length: max_parent_length = max_length - len(json_path_hash) - len(norm_child) - 2 return f"{norm_parent[:max_parent_length]}_{json_path_hash}_{norm_child}" # otherwise first truncate parent to the minimum length and middle truncate the child too else: norm_child_max_length = max_length - len(json_path_hash) - 2 - min_parent_length trunc_norm_child = name_transformer.truncate_identifier_name(norm_child, norm_child_max_length) return f"{norm_parent[:min_parent_length]}_{json_path_hash}_{trunc_norm_child}"
TableNameRegistry
python
huggingface__transformers
src/transformers/models/electra/modeling_electra.py
{ "start": 21407, "end": 22067 }
class ____(nn.Module): """Prediction module for the generator, made up of two dense layers.""" def __init__(self, config): super().__init__() self.activation = get_activation("gelu") self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.dense = nn.Linear(config.hidden_size, config.embedding_size) def forward(self, generator_hidden_states): hidden_states = self.dense(generator_hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states @auto_docstring
ElectraGeneratorPredictions
python
getsentry__sentry
tests/sentry/integrations/jira_server/test_search.py
{ "start": 336, "end": 4572 }
class ____(APITestCase): @cached_property def integration(self): return get_integration(self.organization, self.user) @responses.activate def test_get_success_text_search(self) -> None: org = self.organization integration = self.integration responses.add( responses.GET, 'https://jira.example.org/rest/api/2/search/?jql=text ~ "test"', body=EXAMPLE_ISSUE_SEARCH, content_type="json", ) self.login_as(self.user) path = reverse("sentry-extensions-jiraserver-search", args=[org.slug, integration.id]) resp = self.client.get(f"{path}?field=externalIssue&query=test") assert resp.status_code == 200 assert resp.data == [{"label": "(HSP-1) this is a test issue summary", "value": "HSP-1"}] @responses.activate def test_get_success_id_search(self) -> None: org = self.organization integration = self.integration responses.add( responses.GET, 'https://jira.example.org/rest/api/2/search/?jql=id="HSP-1"', body=EXAMPLE_ISSUE_SEARCH, content_type="json", ) self.login_as(self.user) path = reverse("sentry-extensions-jiraserver-search", args=[org.slug, integration.id]) resp = self.client.get(f"{path}?field=externalIssue&query=HSP-1") assert resp.status_code == 200 assert resp.data == [{"label": "(HSP-1) this is a test issue summary", "value": "HSP-1"}] @responses.activate def test_get_network_error(self) -> None: org = self.organization integration = self.integration responses.add( responses.GET, 'https://jira.example.org/rest/api/2/search/?jql=id="HSP-1"', status=502, body="<p>We are down</p>", ) self.login_as(self.user) path = reverse("sentry-extensions-jiraserver-search", args=[org.slug, integration.id]) resp = self.client.get(f"{path}?field=externalIssue&query=HSP-1") assert resp.status_code == 400 def test_get_missing_integration(self) -> None: self.login_as(self.user) org = self.organization path = reverse("sentry-extensions-jiraserver-search", args=[org.slug, 99]) resp = self.client.get(f"{path}?field=externalIssue&query=HSP-1") assert resp.status_code == 404 @responses.activate def test_assignee_search(self) -> None: responses.add( responses.GET, "https://jira.example.org/rest/api/2/project", json=[{"key": "HSP", "id": "10000"}], ) def responder(request): query = parse_qs(urlparse(request.url).query) assert "HSP" == query["project"][0] assert "bob" == query["username"][0] return (200, {}, EXAMPLE_USER_SEARCH_RESPONSE) responses.add_callback( responses.GET, "https://jira.example.org/rest/api/2/user/assignable/search", callback=responder, content_type="json", ) org = self.organization self.login_as(self.user) path = reverse("sentry-extensions-jiraserver-search", args=[org.slug, self.integration.id]) resp = self.client.get(f"{path}?project=10000&field=assignee&query=bob") assert resp.status_code == 200 assert resp.data == [{"value": "bob", "label": "Bobby - bob@example.org (bob)"}] @responses.activate def test_assignee_search_error(self) -> None: responses.add( responses.GET, "https://jira.example.org/rest/api/2/project", json=[{"key": "HSP", "id": "10000"}], ) responses.add( responses.GET, "https://jira.example.org/rest/api/2/user/assignable/search", status=500, body="Bad things", ) org = self.organization self.login_as(self.user) path = reverse("sentry-extensions-jiraserver-search", args=[org.slug, self.integration.id]) resp = self.client.get(f"{path}?project=10000&field=assignee&query=bob") assert resp.status_code == 400
JiraServerSearchEndpointTest
python
getsentry__sentry
src/sentry/integrations/cursor/webhooks/handler.py
{ "start": 1167, "end": 9385 }
class ____(Endpoint): owner = ApiOwner.ML_AI publish_status = { "POST": ApiPublishStatus.PRIVATE, } authentication_classes = () permission_classes = () @method_decorator(csrf_exempt) def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponse: if request.method != "POST": raise MethodNotAllowed(request.method or "unknown") return super().dispatch(request, *args, **kwargs) def post(self, request: Request, organization_id: int) -> Response: organization = Organization.objects.get(id=organization_id) if not features.has("organizations:seer-coding-agent-integrations", organization): raise NotFound("Coding agent feature not enabled for this organization") try: payload = orjson.loads(request.body) except orjson.JSONDecodeError: logger.warning("cursor_webhook.invalid_json") raise ParseError("Invalid JSON") event_type = payload.get("event", payload.get("event_type", "unknown")) if not self._validate_signature(request, request.body, organization_id): logger.warning("cursor_webhook.invalid_signature") raise PermissionDenied("Invalid signature") self._process_webhook(payload) logger.info("cursor_webhook.success", extra={"event_type": event_type}) return self.respond(status=204) def _get_cursor_integration_secret(self, organization_id: int) -> str | None: """Get webhook secret from Cursor integration.""" integrations = integration_service.get_integrations( organization_id=organization_id, providers=["cursor"] ) if not integrations: logger.error( "cursor_webhook.no_integrations", extra={"organization_id": organization_id} ) return None if len(integrations) > 1: logger.error( "cursor_webhook.multiple_integrations", extra={ "organization_id": organization_id, "integration_ids": [integration.id for integration in integrations], }, ) return None installation = integrations[0].get_installation(organization_id) if not isinstance(installation, CursorAgentIntegration): logger.error( "cursor_webhook.unexpected_installation_type", extra={ "integration_id": integrations[0].id, "organization_id": organization_id, "type": type(installation).__name__, }, ) return None return installation.webhook_secret def _validate_signature(self, request: Request, raw_body: bytes, organization_id: int) -> bool: """Validate webhook signature.""" signature = request.headers.get("X-Webhook-Signature") # Get webhook secret from integration secret = self._get_cursor_integration_secret(organization_id) if not signature: logger.warning("cursor_webhook.no_signature_provided") raise PermissionDenied("No signature provided") if not secret: logger.warning("cursor_webhook.no_webhook_secret") raise PermissionDenied("No webhook secret set") # Remove "sha256=" prefix if present if signature.startswith("sha256="): signature = signature[7:] expected_signature = hmac.new(secret.encode("utf-8"), raw_body, hashlib.sha256).hexdigest() is_valid = constant_time_compare(expected_signature, signature) if not is_valid: logger.warning("cursor_webhook.signature_mismatch") return is_valid def _process_webhook(self, payload: dict[str, Any]) -> None: """Process webhook payload based on event type.""" event_type = payload.get("event", "unknown") handlers = { "unknown": self._handle_unknown_event, "statusChange": self._handle_status_change, } handler = handlers.get(event_type, self._handle_unknown_event) handler(payload) def _handle_unknown_event(self, payload: dict[str, Any]) -> None: """Handle unknown event types.""" logger.error("cursor_webhook.unknown_event", extra=payload) def _handle_status_change(self, payload: dict[str, Any]) -> None: """Handle status change events.""" agent_id = payload.get("id") cursor_status = payload.get("status") source = payload.get("source", {}) target = payload.get("target", {}) pr_url = target.get("prUrl") agent_url = target.get("url") summary = payload.get("summary") if not agent_id or not cursor_status: logger.error( "cursor_webhook.status_change_missing_data", extra={"agent_id": agent_id, "status": cursor_status}, ) return status = CodingAgentStatus.from_cursor_status(cursor_status) if not status: logger.error( "cursor_webhook.unknown_status", extra={"cursor_status": cursor_status, "agent_id": agent_id}, ) status = CodingAgentStatus.FAILED logger.info( "cursor_webhook.status_change", extra={ "agent_id": agent_id, "cursor_status": cursor_status, "status": status.value, "pr_url": pr_url, "summary": summary, }, ) repo_url = source.get("repository", None) if not repo_url: logger.error( "cursor_webhook.repo_not_found", extra={"agent_id": agent_id, "source": source}, ) return # Ensure the repo URL has a protocol, on their docs it says it should but we found it doesn't? if not repo_url.startswith("https://"): repo_url = f"https://{repo_url}" parsed = urlparse(repo_url) if parsed.netloc != "github.com": logger.error( "cursor_webhook.not_github_repo", extra={"agent_id": agent_id, "repo": repo_url}, ) return repo_provider = "github" repo_full_name = parsed.path.lstrip("/") # If the repo isn't in the owner/repo format we can't work with it # Allow dots in the repository name segment (owner.repo is common) if not re.match(r"^[a-zA-Z0-9_-]+/[a-zA-Z0-9_.-]+$", repo_full_name): logger.error( "cursor_webhook.repo_format_invalid", extra={"agent_id": agent_id, "source": source}, ) return result = CodingAgentResult( repo_full_name=repo_full_name, repo_provider=repo_provider, description=summary or f"Agent {status.lower()}", pr_url=pr_url if status == CodingAgentStatus.COMPLETED else None, ) self._update_coding_agent_status( agent_id=agent_id, status=status, agent_url=agent_url, result=result, ) def _update_coding_agent_status( self, agent_id: str, status: CodingAgentStatus, agent_url: str | None = None, result: CodingAgentResult | None = None, ): try: update_coding_agent_state( agent_id=agent_id, status=status, agent_url=agent_url, result=result, ) logger.info( "cursor_webhook.status_updated_to_seer", extra={ "agent_id": agent_id, "status": status.value, "has_result": result is not None, }, ) except SeerApiError: logger.exception( "cursor_webhook.seer_update_error", extra={ "agent_id": agent_id, "status": status.value, }, )
CursorWebhookEndpoint
python
zarr-developers__zarr-python
src/zarr/storage/_wrapper.py
{ "start": 458, "end": 4505 }
class ____(Store, Generic[T_Store]): """ Store that wraps an existing Store. By default all of the store methods are delegated to the wrapped store instance, which is accessible via the ``._store`` attribute of this class. Use this class to modify or extend the behavior of the other store classes. """ _store: T_Store def __init__(self, store: T_Store) -> None: self._store = store @classmethod async def open(cls: type[Self], store_cls: type[T_Store], *args: Any, **kwargs: Any) -> Self: store = store_cls(*args, **kwargs) await store._open() return cls(store=store) def __enter__(self) -> Self: return type(self)(self._store.__enter__()) def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: return self._store.__exit__(exc_type, exc_value, traceback) async def _open(self) -> None: await self._store._open() async def _ensure_open(self) -> None: await self._store._ensure_open() async def is_empty(self, prefix: str) -> bool: return await self._store.is_empty(prefix) @property def _is_open(self) -> bool: return self._store._is_open @_is_open.setter def _is_open(self, value: bool) -> None: raise NotImplementedError("WrapperStore must be opened via the `_open` method") async def clear(self) -> None: return await self._store.clear() @property def read_only(self) -> bool: return self._store.read_only def _check_writable(self) -> None: return self._store._check_writable() def __eq__(self, value: object) -> bool: return type(self) is type(value) and self._store.__eq__(value._store) # type: ignore[attr-defined] def __str__(self) -> str: return f"wrapping-{self._store}" def __repr__(self) -> str: return f"WrapperStore({self._store.__class__.__name__}, '{self._store}')" async def get( self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None ) -> Buffer | None: return await self._store.get(key, prototype, byte_range) async def get_partial_values( self, prototype: BufferPrototype, key_ranges: Iterable[tuple[str, ByteRequest | None]], ) -> list[Buffer | None]: return await self._store.get_partial_values(prototype, key_ranges) async def exists(self, key: str) -> bool: return await self._store.exists(key) async def set(self, key: str, value: Buffer) -> None: await self._store.set(key, value) async def set_if_not_exists(self, key: str, value: Buffer) -> None: return await self._store.set_if_not_exists(key, value) async def _set_many(self, values: Iterable[tuple[str, Buffer]]) -> None: await self._store._set_many(values) @property def supports_writes(self) -> bool: return self._store.supports_writes @property def supports_deletes(self) -> bool: return self._store.supports_deletes async def delete(self, key: str) -> None: await self._store.delete(key) @property def supports_listing(self) -> bool: return self._store.supports_listing def list(self) -> AsyncIterator[str]: return self._store.list() def list_prefix(self, prefix: str) -> AsyncIterator[str]: return self._store.list_prefix(prefix) def list_dir(self, prefix: str) -> AsyncIterator[str]: return self._store.list_dir(prefix) async def delete_dir(self, prefix: str) -> None: return await self._store.delete_dir(prefix) def close(self) -> None: self._store.close() async def _get_many( self, requests: Iterable[tuple[str, BufferPrototype, ByteRequest | None]] ) -> AsyncGenerator[tuple[str, Buffer | None], None]: async for req in self._store._get_many(requests): yield req
WrapperStore
python
redis__redis-py
redis/http/http_client.py
{ "start": 1693, "end": 15177 }
class ____: """ A lightweight HTTP client for REST API calls. """ def __init__( self, base_url: str = "", headers: Optional[Mapping[str, str]] = None, timeout: float = DEFAULT_TIMEOUT, retry: Retry = Retry( backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3 ), verify_tls: bool = True, # TLS verification (server) options ca_file: Optional[str] = None, ca_path: Optional[str] = None, ca_data: Optional[Union[str, bytes]] = None, # Mutual TLS (client cert) options client_cert_file: Optional[str] = None, client_key_file: Optional[str] = None, client_key_password: Optional[str] = None, auth_basic: Optional[Tuple[str, str]] = None, # (username, password) user_agent: str = DEFAULT_USER_AGENT, ) -> None: """ Initialize a new HTTP client instance. Args: base_url: Base URL for all requests. Will be prefixed to all paths. headers: Default headers to include in all requests. timeout: Default timeout in seconds for requests. retry: Retry configuration for failed requests. verify_tls: Whether to verify TLS certificates. ca_file: Path to CA certificate file for TLS verification. ca_path: Path to a directory containing CA certificates. ca_data: CA certificate data as string or bytes. client_cert_file: Path to client certificate for mutual TLS. client_key_file: Path to a client private key for mutual TLS. client_key_password: Password for an encrypted client private key. auth_basic: Tuple of (username, password) for HTTP basic auth. user_agent: User-Agent header value for requests. The client supports both regular HTTPS with server verification and mutual TLS authentication. For server verification, provide CA certificate information via ca_file, ca_path or ca_data. For mutual TLS, additionally provide a client certificate and key via client_cert_file and client_key_file. """ self.base_url = ( base_url.rstrip() + "/" if base_url and not base_url.endswith("/") else base_url ) self._default_headers = {k.lower(): v for k, v in (headers or {}).items()} self.timeout = timeout self.retry = retry self.retry.update_supported_errors((HTTPError, URLError, ssl.SSLError)) self.verify_tls = verify_tls # TLS settings self.ca_file = ca_file self.ca_path = ca_path self.ca_data = ca_data self.client_cert_file = client_cert_file self.client_key_file = client_key_file self.client_key_password = client_key_password self.auth_basic = auth_basic self.user_agent = user_agent # Public JSON-centric helpers def get( self, path: str, params: Optional[ Mapping[str, Union[None, str, int, float, bool, list, tuple]] ] = None, headers: Optional[Mapping[str, str]] = None, timeout: Optional[float] = None, expect_json: bool = True, ) -> Union[HttpResponse, Any]: return self._json_call( "GET", path, params=params, headers=headers, timeout=timeout, body=None, expect_json=expect_json, ) def delete( self, path: str, params: Optional[ Mapping[str, Union[None, str, int, float, bool, list, tuple]] ] = None, headers: Optional[Mapping[str, str]] = None, timeout: Optional[float] = None, expect_json: bool = True, ) -> Union[HttpResponse, Any]: return self._json_call( "DELETE", path, params=params, headers=headers, timeout=timeout, body=None, expect_json=expect_json, ) def post( self, path: str, json_body: Optional[Any] = None, data: Optional[Union[bytes, str]] = None, params: Optional[ Mapping[str, Union[None, str, int, float, bool, list, tuple]] ] = None, headers: Optional[Mapping[str, str]] = None, timeout: Optional[float] = None, expect_json: bool = True, ) -> Union[HttpResponse, Any]: return self._json_call( "POST", path, params=params, headers=headers, timeout=timeout, body=self._prepare_body(json_body=json_body, data=data), expect_json=expect_json, ) def put( self, path: str, json_body: Optional[Any] = None, data: Optional[Union[bytes, str]] = None, params: Optional[ Mapping[str, Union[None, str, int, float, bool, list, tuple]] ] = None, headers: Optional[Mapping[str, str]] = None, timeout: Optional[float] = None, expect_json: bool = True, ) -> Union[HttpResponse, Any]: return self._json_call( "PUT", path, params=params, headers=headers, timeout=timeout, body=self._prepare_body(json_body=json_body, data=data), expect_json=expect_json, ) def patch( self, path: str, json_body: Optional[Any] = None, data: Optional[Union[bytes, str]] = None, params: Optional[ Mapping[str, Union[None, str, int, float, bool, list, tuple]] ] = None, headers: Optional[Mapping[str, str]] = None, timeout: Optional[float] = None, expect_json: bool = True, ) -> Union[HttpResponse, Any]: return self._json_call( "PATCH", path, params=params, headers=headers, timeout=timeout, body=self._prepare_body(json_body=json_body, data=data), expect_json=expect_json, ) # Low-level request def request( self, method: str, path: str, params: Optional[ Mapping[str, Union[None, str, int, float, bool, list, tuple]] ] = None, headers: Optional[Mapping[str, str]] = None, body: Optional[Union[bytes, str]] = None, timeout: Optional[float] = None, ) -> HttpResponse: url = self._build_url(path, params) all_headers = self._prepare_headers(headers, body) data = body.encode("utf-8") if isinstance(body, str) else body req = Request(url=url, method=method.upper(), data=data, headers=all_headers) context: Optional[ssl.SSLContext] = None if url.lower().startswith("https"): if self.verify_tls: # Use provided CA material if any; fall back to system defaults context = ssl.create_default_context( cafile=self.ca_file, capath=self.ca_path, cadata=self.ca_data, ) # Load client certificate for mTLS if configured if self.client_cert_file: context.load_cert_chain( certfile=self.client_cert_file, keyfile=self.client_key_file, password=self.client_key_password, ) else: # Verification disabled context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE try: return self.retry.call_with_retry( lambda: self._make_request(req, context=context, timeout=timeout), lambda _: dummy_fail(), lambda error: self._is_retryable_http_error(error), ) except HTTPError as e: # Read error body, build response, and decide on retry err_body = b"" try: err_body = e.read() except Exception: pass headers_map = {k.lower(): v for k, v in (e.headers or {}).items()} err_body = self._maybe_decompress(err_body, headers_map) status = getattr(e, "code", 0) or 0 response = HttpResponse( status=status, headers=headers_map, url=url, content=err_body, ) return response def _make_request( self, request: Request, context: Optional[ssl.SSLContext] = None, timeout: Optional[float] = None, ): with urlopen(request, timeout=timeout or self.timeout, context=context) as resp: raw = resp.read() headers_map = {k.lower(): v for k, v in resp.headers.items()} raw = self._maybe_decompress(raw, headers_map) return HttpResponse( status=resp.status, headers=headers_map, url=resp.geturl(), content=raw, ) def _is_retryable_http_error(self, error: Exception) -> bool: if isinstance(error, HTTPError): return self._should_retry_status(error.code) return False # Internal utilities def _json_call( self, method: str, path: str, params: Optional[ Mapping[str, Union[None, str, int, float, bool, list, tuple]] ] = None, headers: Optional[Mapping[str, str]] = None, timeout: Optional[float] = None, body: Optional[Union[bytes, str]] = None, expect_json: bool = True, ) -> Union[HttpResponse, Any]: resp = self.request( method=method, path=path, params=params, headers=headers, body=body, timeout=timeout, ) if not (200 <= resp.status < 400): raise HttpError(resp.status, resp.url, resp.text()) if expect_json: return resp.json() return resp def _prepare_body( self, json_body: Optional[Any] = None, data: Optional[Union[bytes, str]] = None ) -> Optional[Union[bytes, str]]: if json_body is not None and data is not None: raise ValueError("Provide either json_body or data, not both.") if json_body is not None: return json.dumps(json_body, ensure_ascii=False, separators=(",", ":")) return data def _build_url( self, path: str, params: Optional[ Mapping[str, Union[None, str, int, float, bool, list, tuple]] ] = None, ) -> str: url = urljoin(self.base_url or "", path) if params: # urlencode with doseq=True supports list/tuple values query = urlencode( {k: v for k, v in params.items() if v is not None}, doseq=True ) separator = "&" if ("?" in url) else "?" url = f"{url}{separator}{query}" if query else url return url def _prepare_headers( self, headers: Optional[Mapping[str, str]], body: Optional[Union[bytes, str]] ) -> Dict[str, str]: # Start with defaults prepared: Dict[str, str] = {} prepared.update(self._default_headers) # Standard defaults for JSON REST usage prepared.setdefault("accept", "application/json") prepared.setdefault("user-agent", self.user_agent) # We will send gzip accept-encoding; handle decompression manually prepared.setdefault("accept-encoding", "gzip, deflate") # If we have a string body and content-type not specified, assume JSON if body is not None and isinstance(body, str): prepared.setdefault("content-type", "application/json; charset=utf-8") # Basic authentication if provided and not overridden if self.auth_basic and "authorization" not in prepared: user, pwd = self.auth_basic token = base64.b64encode(f"{user}:{pwd}".encode("utf-8")).decode("ascii") prepared["authorization"] = f"Basic {token}" # Merge per-call headers (case-insensitive) if headers: for k, v in headers.items(): prepared[k.lower()] = v # urllib expects header keys in canonical capitalization sometimes; but it’s tolerant. # We'll return as provided; urllib will handle it. return prepared def _should_retry_status(self, status: int) -> bool: return status in RETRY_STATUS_CODES def _maybe_decompress(self, content: bytes, headers: Mapping[str, str]) -> bytes: if not content: return content encoding = (headers.get("content-encoding") or "").lower() try: if "gzip" in encoding: return gzip.decompress(content) if "deflate" in encoding: # Try raw deflate, then zlib-wrapped try: return zlib.decompress(content, -zlib.MAX_WBITS) except zlib.error: return zlib.decompress(content) except Exception: # If decompression fails, return original bytes return content return content
HttpClient
python
readthedocs__readthedocs.org
readthedocs/redirects/exceptions.py
{ "start": 38, "end": 140 }
class ____(Exception): """Exception raised when a redirect loops forever."""
InfiniteRedirectException