language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
streamlit__streamlit
lib/tests/streamlit/runtime/runtime_util_test.py
{ "start": 984, "end": 2163 }
class ____(unittest.TestCase): def test_should_limit_msg_size(self): max_message_size_mb = 50 runtime_util._max_message_size_bytes = None # Reset cached value with patch_config_options({"server.maxMessageSize": max_message_size_mb}): # Set up a larger than limit ForwardMsg string large_msg = create_dataframe_msg([1, 2, 3]) large_msg.delta.new_element.markdown.body = ( "X" * (max_message_size_mb + 10) * 1000 * 1000 ) # Create a copy, since serialize_forward_msg modifies the original proto large_msg_copy = ForwardMsg() large_msg_copy.CopyFrom(large_msg) deserialized_msg = ForwardMsg() deserialized_msg.ParseFromString(serialize_forward_msg(large_msg_copy)) # The metadata should be the same, but contents should be replaced assert deserialized_msg.metadata == large_msg.metadata assert deserialized_msg != large_msg assert ( "exceeds the message size limit" in deserialized_msg.delta.new_element.exception.message )
RuntimeUtilTest
python
docker__docker-py
tests/unit/dockertypes_test.py
{ "start": 12722, "end": 14370 }
class ____(unittest.TestCase): def test_replicated_simple(self): mode = ServiceMode('replicated') assert mode == {'replicated': {}} assert mode.mode == 'replicated' assert mode.replicas is None def test_global_simple(self): mode = ServiceMode('global') assert mode == {'global': {}} assert mode.mode == 'global' assert mode.replicas is None def test_replicated_job_simple(self): mode = ServiceMode('replicated-job') assert mode == {'ReplicatedJob': {}} assert mode.mode == 'ReplicatedJob' assert mode.replicas is None def test_global_job_simple(self): mode = ServiceMode('global-job') assert mode == {'GlobalJob': {}} assert mode.mode == 'GlobalJob' assert mode.replicas is None def test_global_replicas_error(self): with pytest.raises(InvalidArgument): ServiceMode('global', 21) def test_global_job_replicas_simple(self): with pytest.raises(InvalidArgument): ServiceMode('global-job', 21) def test_replicated_replicas(self): mode = ServiceMode('replicated', 21) assert mode == {'replicated': {'Replicas': 21}} assert mode.mode == 'replicated' assert mode.replicas == 21 def test_replicated_replicas_0(self): mode = ServiceMode('replicated', 0) assert mode == {'replicated': {'Replicas': 0}} assert mode.mode == 'replicated' assert mode.replicas == 0 def test_invalid_mode(self): with pytest.raises(InvalidArgument): ServiceMode('foobar')
ServiceModeTest
python
numba__numba
numba/tests/test_listobject.py
{ "start": 26128, "end": 26884 }
class ____(MemoryLeakMixin, TestCase): """Test list extend. """ def test_list_extend_empty(self): @njit def foo(items): l = listobject.new_list(int32) l.extend(items) return len(l) self.assertEqual(foo((1,)), 1) self.assertEqual(foo((1,2)), 2) self.assertEqual(foo((1,2,3)), 3) def test_list_extend_typing_error_non_iterable(self): self.disable_leak_check() @njit def foo(): l = listobject.new_list(int32) l.extend(1) with self.assertRaises(TypingError) as raises: foo() self.assertIn( "extend argument must be iterable", str(raises.exception), )
TestExtend
python
mlflow__mlflow
mlflow/types/responses_helpers.py
{ "start": 2272, "end": 2748 }
class ____(BaseModel): model_config = ConfigDict(extra="allow") type: str @model_validator(mode="after") def check_type(self) -> "Content": if self.type == "output_text": ResponseOutputText(**self.model_dump()) elif self.type == "refusal": ResponseOutputRefusal(**self.model_dump()) else: raise ValueError(f"Invalid content type: {self.type} for {self.__class__.__name__}") return self
Content
python
walkccc__LeetCode
solutions/2194. Cells in a Range on an Excel Sheet/2194.py
{ "start": 0, "end": 281 }
class ____: def cellsInRange(self, s: str) -> list[str]: ans = [] startCol, startRow, _, endCol, endRow = s for j in range(ord(startCol), ord(endCol) + 1): for i in range(int(startRow), int(endRow) + 1): ans.append(chr(j) + str(i)) return ans
Solution
python
run-llama__llama_index
llama-index-integrations/embeddings/llama-index-embeddings-clip/llama_index/embeddings/clip/base.py
{ "start": 452, "end": 4282 }
class ____(MultiModalEmbedding): """ CLIP embedding models for encoding text and image for Multi-Modal purpose. This class provides an interface to generate embeddings using a model deployed in OpenAI CLIP. At the initialization it requires a model name of CLIP. Note: Requires `clip` package to be available in the PYTHONPATH. It can be installed with `pip install git+https://github.com/openai/CLIP.git`. """ embed_batch_size: int = Field(default=DEFAULT_EMBED_BATCH_SIZE, gt=0) _clip: Any = PrivateAttr() _model: Any = PrivateAttr() _preprocess: Any = PrivateAttr() _device: Any = PrivateAttr() @classmethod def class_name(cls) -> str: return "ClipEmbedding" def __init__( self, *, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, model_name: str = DEFAULT_CLIP_MODEL, **kwargs: Any, ): """ Initializes the ClipEmbedding class. During the initialization the `clip` package is imported. Args: embed_batch_size (int, optional): The batch size for embedding generation. Defaults to 10, must be > 0 and <= 100. model_name (str): The model name of Clip model. Raises: ImportError: If the `clip` package is not available in the PYTHONPATH. ValueError: If the model cannot be fetched from Open AI. or if the embed_batch_size is not in the range (0, 100]. """ if embed_batch_size <= 0: raise ValueError(f"Embed batch size {embed_batch_size} must be > 0.") try: import clip import torch except ImportError: raise ImportError( "ClipEmbedding requires `pip install git+https://github.com/openai/CLIP.git` and torch." ) super().__init__( embed_batch_size=embed_batch_size, model_name=model_name, **kwargs ) try: self._device = "cuda" if torch.cuda.is_available() else "cpu" self._model, self._preprocess = clip.load( self.model_name, device=self._device ) except Exception as e: logger.error("Error while loading clip model.") raise ValueError("Unable to fetch the requested embeddings model") from e # TEXT EMBEDDINGS async def _aget_query_embedding(self, query: str) -> Embedding: return self._get_query_embedding(query) def _get_text_embedding(self, text: str) -> Embedding: return self._get_text_embeddings([text])[0] def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]: results = [] for text in texts: try: import clip except ImportError: raise ImportError( "ClipEmbedding requires `pip install git+https://github.com/openai/CLIP.git` and torch." ) text_embedding = self._model.encode_text( clip.tokenize(text).to(self._device) ) results.append(text_embedding.tolist()[0]) return results def _get_query_embedding(self, query: str) -> Embedding: return self._get_text_embedding(query) # IMAGE EMBEDDINGS async def _aget_image_embedding(self, img_file_path: ImageType) -> Embedding: return self._get_image_embedding(img_file_path) def _get_image_embedding(self, img_file_path: ImageType) -> Embedding: import torch with torch.no_grad(): image = ( self._preprocess(Image.open(img_file_path)) .unsqueeze(0) .to(self._device) ) return self._model.encode_image(image).tolist()[0]
ClipEmbedding
python
Pylons__pyramid
docs/tutorials/wiki2/src/authorization/tutorial/security.py
{ "start": 260, "end": 1898 }
class ____: def __init__(self, secret): self.authtkt = AuthTktCookieHelper(secret) self.identity_cache = RequestLocalCache(self.load_identity) self.acl = ACLHelper() def load_identity(self, request): identity = self.authtkt.identify(request) if identity is None: return None userid = identity['userid'] user = request.dbsession.get(models.User, userid) return user def identity(self, request): return self.identity_cache.get_or_create(request) def authenticated_userid(self, request): user = self.identity(request) if user is not None: return user.id def remember(self, request, userid, **kw): return self.authtkt.remember(request, userid, **kw) def forget(self, request, **kw): return self.authtkt.forget(request, **kw) def permits(self, request, context, permission): principals = self.effective_principals(request) return self.acl.permits(context, principals, permission) def effective_principals(self, request): principals = [Everyone] user = self.identity(request) if user is not None: principals.append(Authenticated) principals.append('u:' + str(user.id)) principals.append('role:' + user.role) return principals def includeme(config): settings = config.get_settings() config.set_csrf_storage_policy(CookieCSRFStoragePolicy()) config.set_default_csrf_options(require_csrf=True) config.set_security_policy(MySecurityPolicy(settings['auth.secret']))
MySecurityPolicy
python
urllib3__urllib3
test/with_dummyserver/test_socketlevel.py
{ "start": 68575, "end": 69292 }
class ____(SocketDummyServerTestCase): def test_bad_statusline(self) -> None: self.start_response_handler( b"HTTP/1.1 Omg What Is This?\r\n" b"Content-Length: 0\r\n" b"\r\n" ) with HTTPConnectionPool(self.host, self.port, retries=False) as pool: with pytest.raises(ProtocolError): pool.request("GET", "/") def test_unknown_protocol(self) -> None: self.start_response_handler( b"HTTP/1000 200 OK\r\n" b"Content-Length: 0\r\n" b"\r\n" ) with HTTPConnectionPool(self.host, self.port, retries=False) as pool: with pytest.raises(ProtocolError): pool.request("GET", "/")
TestErrorWrapping
python
doocs__leetcode
solution/0700-0799/0798.Smallest Rotation with Highest Score/Solution.py
{ "start": 0, "end": 423 }
class ____: def bestRotation(self, nums: List[int]) -> int: n = len(nums) mx, ans = -1, n d = [0] * n for i, v in enumerate(nums): l, r = (i + 1) % n, (n + i + 1 - v) % n d[l] += 1 d[r] -= 1 s = 0 for k, t in enumerate(d): s += t if s > mx: mx = s ans = k return ans
Solution
python
huggingface__transformers
src/transformers/models/deberta_v2/tokenization_deberta_v2.py
{ "start": 1144, "end": 7081 }
class ____(TokenizersBackend): """ Construct a DeBERTa-v2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on Unigram tokenization. This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): Path to the vocabulary file (SentencePiece model file). Not used directly but kept for compatibility. vocab (`list`, *optional*): List of tuples (piece, score) for the vocabulary. precompiled_charsmap (`bytes`, *optional*): Precompiled character map for normalization. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. split_by_punct (`bool`, *optional*, defaults to `False`): Whether to split by punctuation. bos_token (`str`, *optional*, defaults to `"[CLS]"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"[SEP]"`): The end of sequence token. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `True`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. unk_id (`int`, *optional*, defaults to index of `unk_token` in vocab): The ID of the unknown token in the vocabulary. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask", "token_type_ids"] def __init__( self, vocab_file=None, vocab=None, do_lower_case=False, split_by_punct=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", add_prefix_space=True, unk_id=3, **kwargs, ): self.vocab_file = vocab_file self.do_lower_case = do_lower_case self.split_by_punct = split_by_punct self.add_prefix_space = add_prefix_space if vocab is None: self._vocab = [ (str(pad_token), 0.0), (str(unk_token), 0.0), (str(bos_token), 0.0), (str(eos_token), 0.0), (str(sep_token), 0.0), (str(cls_token), 0.0), (str(mask_token), 0.0), ] else: self._vocab = [tuple(item) if not isinstance(item, tuple) else item for item in vocab] computed_unk_id = {piece: i for i, (piece, _score) in enumerate(self._vocab)} unk_id = computed_unk_id.get(str(unk_token)) self._tokenizer = Tokenizer( Unigram( self._vocab, unk_id=unk_id, byte_fallback=False, ) ) list_normalizers = [] if do_lower_case: list_normalizers.append(normalizers.Lowercase()) list_normalizers.extend( [ normalizers.Replace("\n", " "), normalizers.Replace("\r", " "), normalizers.Replace("\t", " "), normalizers.Replace(Regex(r" {2,}"), " "), normalizers.NFC(), normalizers.Strip(left=False, right=True), ] ) self._tokenizer.normalizer = normalizers.Sequence(list_normalizers) list_pretokenizers = [] if split_by_punct: list_pretokenizers.append(pre_tokenizers.Punctuation(behavior="isolated")) prepend_scheme = _get_prepend_scheme(add_prefix_space) list_pretokenizers.append(pre_tokenizers.Metaspace(replacement="▁", prepend_scheme=prepend_scheme)) self._tokenizer.pre_tokenizer = pre_tokenizers.Sequence(list_pretokenizers) self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme=prepend_scheme) tokenizer_object = self._tokenizer super().__init__( tokenizer_object=tokenizer_object, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, unk_id=unk_id, do_lower_case=do_lower_case, split_by_punct=split_by_punct, add_prefix_space=add_prefix_space, **kwargs, ) __all__ = ["DebertaV2Tokenizer"]
DebertaV2Tokenizer
python
ray-project__ray
rllib/models/torch/torch_action_dist.py
{ "start": 17725, "end": 22634 }
class ____(TorchDistributionWrapper): """Action distribution that operates on multiple, possibly nested actions.""" def __init__(self, inputs, model, *, child_distributions, input_lens, action_space): """Initializes a TorchMultiActionDistribution object. Args: inputs (torch.Tensor): A single tensor of shape [BATCH, size]. model (TorchModelV2): The TorchModelV2 object used to produce inputs for this distribution. child_distributions (any[torch.Tensor]): Any struct that contains the child distribution classes to use to instantiate the child distributions from `inputs`. This could be an already flattened list or a struct according to `action_space`. input_lens (any[int]): A flat list or a nested struct of input split lengths used to split `inputs`. action_space (Union[gym.spaces.Dict,gym.spaces.Tuple]): The complex and possibly nested action space. """ if not isinstance(inputs, torch.Tensor): inputs = torch.from_numpy(inputs) if isinstance(model, TorchModelV2): inputs = inputs.to(next(model.parameters()).device) super().__init__(inputs, model) self.action_space_struct = get_base_struct_from_space(action_space) self.input_lens = tree.flatten(input_lens) flat_child_distributions = tree.flatten(child_distributions) split_inputs = torch.split(inputs, self.input_lens, dim=1) self.flat_child_distributions = tree.map_structure( lambda dist, input_: dist(input_, model), flat_child_distributions, list(split_inputs), ) @override(ActionDistribution) def logp(self, x): if isinstance(x, np.ndarray): x = torch.Tensor(x) # Single tensor input (all merged). if isinstance(x, torch.Tensor): split_indices = [] for dist in self.flat_child_distributions: if isinstance(dist, TorchCategorical): split_indices.append(1) elif ( isinstance(dist, TorchMultiCategorical) and dist.action_space is not None ): split_indices.append(int(np.prod(dist.action_space.shape))) else: sample = dist.sample() # Cover Box(shape=()) case. if len(sample.shape) == 1: split_indices.append(1) else: split_indices.append(sample.size()[1]) split_x = list(torch.split(x, split_indices, dim=1)) # Structured or flattened (by single action component) input. else: split_x = tree.flatten(x) def map_(val, dist): # Remove extra categorical dimension. if isinstance(dist, TorchCategorical): val = (torch.squeeze(val, dim=-1) if len(val.shape) > 1 else val).int() return dist.logp(val) # Remove extra categorical dimension and take the logp of each # component. flat_logps = tree.map_structure(map_, split_x, self.flat_child_distributions) return functools.reduce(lambda a, b: a + b, flat_logps) @override(ActionDistribution) def kl(self, other): kl_list = [ d.kl(o) for d, o in zip( self.flat_child_distributions, other.flat_child_distributions ) ] return functools.reduce(lambda a, b: a + b, kl_list) @override(ActionDistribution) def entropy(self): entropy_list = [d.entropy() for d in self.flat_child_distributions] return functools.reduce(lambda a, b: a + b, entropy_list) @override(ActionDistribution) def sample(self): child_distributions = tree.unflatten_as( self.action_space_struct, self.flat_child_distributions ) return tree.map_structure(lambda s: s.sample(), child_distributions) @override(ActionDistribution) def deterministic_sample(self): child_distributions = tree.unflatten_as( self.action_space_struct, self.flat_child_distributions ) return tree.map_structure( lambda s: s.deterministic_sample(), child_distributions ) @override(TorchDistributionWrapper) def sampled_action_logp(self): p = self.flat_child_distributions[0].sampled_action_logp() for c in self.flat_child_distributions[1:]: p += c.sampled_action_logp() return p @override(ActionDistribution) def required_model_output_shape(self, action_space, model_config): return np.sum(self.input_lens, dtype=np.int32) @OldAPIStack
TorchMultiActionDistribution
python
pytorch__pytorch
torch/ao/quantization/fake_quantize.py
{ "start": 14231, "end": 23563 }
class ____(FakeQuantize): r"""Define a fused module to observe the tensor. Fused module that is used to observe the input tensor (compute min/max), compute scale/zero_point and fake_quantize the tensor. This module uses calculation similar MovingAverageMinMaxObserver for the inputs, to compute the min/max values in order to compute the scale/zero_point. The qscheme input in the observer is used to differentiate between symmetric/affine quantization scheme. The output of this module is given by x_out = (clamp(round(x/scale + zero_point), quant_min, quant_max)-zero_point)*scale Similar to :class:`~torch.ao.quantization.FakeQuantize`, and accepts the same attributes as the base class. """ def __init__( self, observer: Any = MovingAverageMinMaxObserver, quant_min: int = 0, quant_max: int = 255, **observer_kwargs: Any, ) -> None: super().__init__(observer, quant_min, quant_max, **observer_kwargs) if not isinstance( self.activation_post_process, (MovingAverageMinMaxObserver, MovingAveragePerChannelMinMaxObserver), ): raise AssertionError( "Fused observer+fake_quant module only works with MovingAverageMinMaxObserver" ) self.register_buffer("fake_quant_enabled", torch.tensor([1], dtype=torch.long)) self.register_buffer("observer_enabled", torch.tensor([1], dtype=torch.long)) self.is_symmetric_quant = _is_symmetric_quant( self.activation_post_process.qscheme ) @torch.jit.export def calculate_qparams(self) -> tuple[torch.Tensor, torch.Tensor]: # type: ignore[override] return self.activation_post_process.calculate_qparams() @torch.jit.export def extra_repr(self) -> str: return ( f"fake_quant_enabled={self.fake_quant_enabled}, observer_enabled={self.observer_enabled}, " f"scale={self.scale}, zero_point={self.zero_point}, dtype={self.dtype}, " f"quant_min={self.activation_post_process.quant_min}, quant_max={self.activation_post_process.quant_max}, " f"qscheme={self.qscheme}, reduce_range={self.activation_post_process.reduce_range}" ) def forward(self, X: torch.Tensor) -> torch.Tensor: return torch.fused_moving_avg_obs_fake_quant( X, self.observer_enabled, self.fake_quant_enabled, self.activation_post_process.min_val, self.activation_post_process.max_val, self.scale, self.zero_point, self.activation_post_process.averaging_constant, self.activation_post_process.quant_min, self.activation_post_process.quant_max, self.ch_axis, self.is_per_channel, self.is_symmetric_quant, ) default_fake_quant = FakeQuantize.with_args( observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True, ) """ Default fake_quant for activations. """ default_weight_fake_quant = FakeQuantize.with_args( observer=MovingAverageMinMaxObserver, quant_min=-128, quant_max=127, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False, ) """ Default fake_quant for weights. Observer is memoryless since averaging_constant is 1. """ default_dynamic_fake_quant = FakeQuantize.with_args( observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, is_dynamic=True, dtype=torch.quint8, averaging_constant=1, ) """ Default dynamic fake_quant for activations. """ default_fixed_qparams_range_neg1to1_fake_quant = FixedQParamsFakeQuantize.with_args( observer=default_fixed_qparams_range_neg1to1_observer ) default_fixed_qparams_range_0to1_fake_quant = FixedQParamsFakeQuantize.with_args( observer=default_fixed_qparams_range_0to1_observer ) # TODO: the following 2 variables are kept for backwards compatibility; remove after a few releases default_symmetric_fixed_qparams_fake_quant = ( default_fixed_qparams_range_neg1to1_fake_quant ) default_affine_fixed_qparams_fake_quant = default_fixed_qparams_range_0to1_fake_quant default_per_channel_weight_fake_quant = FakeQuantize.with_args( observer=MovingAveragePerChannelMinMaxObserver, quant_min=-128, quant_max=127, dtype=torch.qint8, qscheme=torch.per_channel_symmetric, reduce_range=False, ch_axis=0, ) """ Default fake_quant for per-channel weights. Observer is memoryless since averaging_constant is 1. """ default_embedding_fake_quant = FakeQuantize.with_args( observer=MovingAveragePerChannelMinMaxObserver, qscheme=torch.per_channel_affine_float_qparams, dtype=torch.quint8, quant_min=0, quant_max=255, ch_axis=0, averaging_constant=1, ) """ Default fake_quant for embeddings. Observer is memoryless since averaging_constant is 1. """ default_embedding_fake_quant_4bit = FakeQuantize.with_args( observer=MovingAveragePerChannelMinMaxObserver, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0, dtype=torch.quint4x2, averaging_constant=1, ) default_histogram_fake_quant = FakeQuantize.with_args( observer=HistogramObserver, quant_min=0, quant_max=255, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True, ) """ Fake_quant for activations using a histogram.. """ default_fused_act_fake_quant = FusedMovingAvgObsFakeQuantize.with_args( observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, dtype=torch.quint8, ) """ Fused version of `default_fake_quant`, with improved performance. """ default_fused_wt_fake_quant = FusedMovingAvgObsFakeQuantize.with_args( observer=MovingAverageMinMaxObserver, quant_min=-128, quant_max=127, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, ) """ Fused version of `default_weight_fake_quant`, with improved performance. """ default_fused_per_channel_wt_fake_quant = FusedMovingAvgObsFakeQuantize.with_args( observer=MovingAveragePerChannelMinMaxObserver, quant_min=-128, quant_max=127, dtype=torch.qint8, qscheme=torch.per_channel_symmetric, ) """ Fused version of `default_per_channel_weight_fake_quant`, with improved performance. """ fused_wt_fake_quant_range_neg_127_to_127 = FusedMovingAvgObsFakeQuantize.with_args( observer=MovingAverageMinMaxObserver, quant_min=-127, quant_max=127, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, eps=2**-12, ) """ Fused version of `default_weight_fake_quant`, with the 8-bit values restricted to [-127, +127], excluding -128. """ fused_per_channel_wt_fake_quant_range_neg_127_to_127 = ( FusedMovingAvgObsFakeQuantize.with_args( observer=MovingAveragePerChannelMinMaxObserver, quant_min=-127, quant_max=127, dtype=torch.qint8, qscheme=torch.per_channel_symmetric, eps=2**-12, ) ) """ Fused version of `default_per_channel_weight_fake_quant`, with the 8-bit values restricted to [-127, +127], excluding -128. """ def _is_fake_quant_script_module(mod): """Return true if given mod is an instance of FakeQuantize script module.""" if isinstance(mod, torch.jit.RecursiveScriptModule): # qualified name looks like '__torch__.torch.ao.quantization.fake_quantize.___torch_mangle_2.FakeQuantize' suffix = mod._c.qualified_name.split(".", 1)[1] name = re.sub(r"\.___torch_mangle_\d+", "", suffix) return ( name == "torch.ao.quantization.fake_quantize.FakeQuantize" or name == "torch.ao.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize" ) return False def disable_fake_quant(mod): """Disable fake quantization for the module. Disable fake quantization for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.disable_fake_quant) """ if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod): mod.disable_fake_quant() def enable_fake_quant(mod): """Enable fake quantization for the module. Enable fake quantization for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.enable_fake_quant) """ if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod): mod.enable_fake_quant() def disable_observer(mod): """Disable observation for this module. Disable observation for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.disable_observer) """ if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod): mod.disable_observer() def enable_observer(mod): """Enable observation for this module. Enable observation for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.enable_observer) """ if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod): mod.enable_observer()
FusedMovingAvgObsFakeQuantize
python
PyCQA__pylint
doc/data/messages/i/invalid-class-object/good.py
{ "start": 24, "end": 86 }
class ____: pass Apple.__class__ = RedDelicious
RedDelicious
python
coleifer__peewee
tests/sqlite.py
{ "start": 3226, "end": 3397 }
class ____(FTS5Model): title = SearchField() data = SearchField() misc = SearchField(unindexed=True) class Meta: legacy_table_names = False
FTS5Test
python
oauthlib__oauthlib
oauthlib/oauth2/rfc6749/errors.py
{ "start": 4127, "end": 4278 }
class ____(OAuth2Error): error = 'mismatching_state' description = 'CSRF Warning! State not equal in request and response.'
MismatchingStateError
python
ray-project__ray
doc/source/serve/doc_code/application_level_autoscaling.py
{ "start": 501, "end": 964 }
class ____: def __init__(self, preprocessor, model): self._preprocessor = preprocessor self._model = model async def __call__(self, input_data: str) -> str: # Coordinate preprocessing and model inference preprocessed = await self._preprocessor.remote(input_data) result = await self._model.remote(preprocessed) return result app = Driver.bind(Preprocessor.bind(), Model.bind()) # __serve_example_end__
Driver
python
ray-project__ray
python/ray/tests/test_unavailable_actors.py
{ "start": 336, "end": 6363 }
class ____: def __init__( self, *, caller_pid: Optional[int] = None, init_signal: Optional[ray.actor.ActorHandle] = None, ): if init_signal is not None: ray.get(init_signal.wait.remote()) self._count = 0 self._caller_pid = caller_pid def getpid(self): return os.getpid() def get(self) -> int: return self._count def inc(self, *, disconnect: bool = False) -> int: if disconnect: assert self._caller_pid is not None, "Must provide caller PID." _close_common_connections(self._caller_pid) self._count += 1 return self._count def call_from(f, source): if source == "driver": return f() elif source == "actor": @ray.remote class Wrapper: def invoke(self): f() a = Wrapper.remote() return ray.get(a.invoke.remote()) elif source == "task": @ray.remote def wrapper(): return f() return ray.get(wrapper.remote()) else: raise ValueError(f"unknown {source}") def sigkill_actor(actor, timeout=5): """Sends SIGKILL to an actor's process. The actor must be on the same node, and it must has a `getpid` method.""" pid = ray.get(actor.getpid.remote()) print(f"killing actor {actor}'s process {pid}") try: proc = psutil.Process(pid) os.kill(pid, signal.SIGKILL) # Wait for the process to terminate (with timeout) try: proc.wait(timeout=timeout) print(f"Process {pid} terminated.") except psutil.TimeoutExpired: print(f"Process {pid} did not terminate within {timeout} seconds.") except psutil.NoSuchProcess: print(f"Process {pid} does not exist — it may have already exited.") def _close_common_connections(pid: int): """Closes ipv2 connections between the current process and the target process.""" current_process = psutil.Process() current_connections = current_process.connections(kind="inet") try: other_process = psutil.Process(pid) other_connections = other_process.connections(kind="inet") except psutil.NoSuchProcess: print(f"No process with PID {pid} found.") return # Finding common connections based on matching addresses and ports. common_connections = [] for conn1 in current_connections: for conn2 in other_connections: if conn1.laddr == conn2.raddr and conn1.raddr == conn2.laddr: common_connections.append((conn1.fd, conn1.laddr, conn1.raddr)) # Closing the FDs. for fd, laddr, raddr in common_connections: if fd != -1: # FD is -1 if it's not accessible or if it's a pseudo FD. os.close(fd) print(f"Closed FD: {fd}, laddr: {laddr}, raddr: {raddr}") @pytest.mark.parametrize("caller", ["actor", "task", "driver"]) @pytest.mark.skipif(sys.platform == "win32", reason="does not work on windows") def test_actor_unavailable_conn_broken(ray_start_regular, caller): def _run_test(): a = Counter.remote(caller_pid=os.getpid()) counter_pid = ray.get(a.getpid.remote()) # Server (counter actor) unexpectedly disconnects the connection once the method # has started executing. The task should raise `ActorUnavailableError` but its # side effects should be observable (count was incremented). obj_ref = a.inc.remote(disconnect=True) with pytest.raises(ActorUnavailableError): ray.get(obj_ref) assert ray.get(a.get.remote()) == 1 # Client (driver) unexpectedly disconnects the connection prior to submitting the # task. The task should raise `ActorUnavailableError` and should never have # executed, therefore the count should not have been incremented. _close_common_connections(counter_pid) with pytest.raises(ActorUnavailableError): ray.get(a.inc.remote(disconnect=False)) assert ray.get(a.get.remote()) == 1 call_from(_run_test, caller) @pytest.mark.parametrize("caller", ["actor", "task", "driver"]) @pytest.mark.skipif(sys.platform == "win32", reason="does not work on windows") def test_actor_unavailable_restarting(ray_start_regular, caller): def _run_test(): init_signal = SignalActor.remote() a = Counter.options(max_restarts=1).remote(init_signal=init_signal) wait_for_condition(lambda: ray.get(init_signal.cur_num_waiters.remote()) == 1) ray.get(init_signal.send.remote(clear=True)) assert ray.get(a.inc.remote()) == 1 # Kill the actor process and expect `ActorUnavailableError` as it restarts. sigkill_actor(a) with pytest.raises(ActorUnavailableError): ray.get(a.inc.remote()) with pytest.raises(ActorUnavailableError, match="The actor is restarting"): ray.get(a.inc.remote()) ray.get(init_signal.send.remote()) # After the actor starts, the next calls are OK. However the previous actor # instance's state is lost. wait_for_condition(lambda: ray.get(a.get.remote()) == 0) # Kill the actor again. This time it's not going to restart so ActorDiedError. sigkill_actor(a) with pytest.raises(ActorDiedError): print(ray.get(a.inc.remote())) call_from(_run_test, caller) @pytest.mark.parametrize("caller", ["actor", "task", "driver"]) @pytest.mark.skipif(sys.platform == "win32", reason="does not work on windows") def test_actor_unavailable_norestart(ray_start_regular, caller): def _run_test(): a = Counter.remote() assert ray.get(a.get.remote()) == 0 # Kill the actor process. The actor died permanently so ActorDiedError. sigkill_actor(a) with pytest.raises(ActorDiedError): print(ray.get(a.get.remote())) call_from(_run_test, caller) @ray.remote(max_restarts=-1, max_task_retries=0)
Counter
python
dagster-io__dagster
python_modules/dagster-pipes/dagster_pipes/__init__.py
{ "start": 22950, "end": 23661 }
class ____(PipesBlobStoreMessageWriterChannel): """Message writer channel that periodically writes message chunks to an endpoint mounted on the filesystem. Args: interval (float): interval in seconds between chunk uploads """ def __init__(self, path: str, *, interval: float = 10): super().__init__(interval=interval) self._path = path def upload_messages_chunk(self, payload: IO, index: int) -> None: message_path = os.path.join(self._path, f"{index}.json") with open(message_path, "w") as f: f.write(payload.read()) # ######################## # ##### IO - DEFAULT # ########################
PipesBufferedFilesystemMessageWriterChannel
python
scipy__scipy
scipy/linalg/tests/test_decomp.py
{ "start": 78541, "end": 84082 }
class ____: def check_schur(self, a, t, u, rtol, atol): # Check that the Schur decomposition is correct. assert_allclose(u @ t @ u.conj().T, a, rtol=rtol, atol=atol, err_msg="Schur decomposition does not match 'a'") # The expected value of u @ u.H - I is all zeros, so test # with absolute tolerance only. assert_allclose(u @ u.conj().T - np.eye(len(u)), 0, rtol=0, atol=atol, err_msg="u is not unitary") def test_simple(self): a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]] t, z = schur(a) self.check_schur(a, t, z, rtol=1e-14, atol=5e-15) tc, zc = schur(a, 'complex') assert_(np.any(ravel(iscomplex(zc))) and np.any(ravel(iscomplex(tc)))) self.check_schur(a, tc, zc, rtol=1e-14, atol=5e-15) tc2, zc2 = rsf2csf(tc, zc) self.check_schur(a, tc2, zc2, rtol=1e-14, atol=5e-15) @pytest.mark.parametrize( 'sort, expected_diag', [('lhp', [-np.sqrt(2), -0.5, np.sqrt(2), 0.5]), ('rhp', [np.sqrt(2), 0.5, -np.sqrt(2), -0.5]), ('iuc', [-0.5, 0.5, np.sqrt(2), -np.sqrt(2)]), ('ouc', [np.sqrt(2), -np.sqrt(2), -0.5, 0.5]), (lambda x: x >= 0.0, [np.sqrt(2), 0.5, -np.sqrt(2), -0.5])] ) def test_sort(self, sort, expected_diag): # The exact eigenvalues of this matrix are # -sqrt(2), sqrt(2), -1/2, 1/2. a = [[4., 3., 1., -1.], [-4.5, -3.5, -1., 1.], [9., 6., -4., 4.5], [6., 4., -3., 3.5]] t, u, sdim = schur(a, sort=sort) self.check_schur(a, t, u, rtol=1e-14, atol=5e-15) assert_allclose(np.diag(t), expected_diag, rtol=1e-12) assert_equal(2, sdim) def test_sort_errors(self): a = [[4., 3., 1., -1.], [-4.5, -3.5, -1., 1.], [9., 6., -4., 4.5], [6., 4., -3., 3.5]] assert_raises(ValueError, schur, a, sort='unsupported') assert_raises(ValueError, schur, a, sort=1) def test_check_finite(self): a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]] t, z = schur(a, check_finite=False) assert_array_almost_equal(z @ t @ z.conj().T, a) @pytest.mark.parametrize('dt', [int, float, np.float32, complex, np.complex64]) def test_empty(self, dt): a = np.empty((0, 0), dtype=dt) t, z = schur(a) t0, z0 = schur(np.eye(2, dtype=dt)) assert_allclose(t, np.empty((0, 0))) assert_allclose(z, np.empty((0, 0))) assert t.dtype == t0.dtype assert z.dtype == z0.dtype t, z, sdim = schur(a, sort='lhp') assert_allclose(t, np.empty((0, 0))) assert_allclose(z, np.empty((0, 0))) assert_equal(sdim, 0) assert t.dtype == t0.dtype assert z.dtype == z0.dtype @pytest.mark.parametrize('sort', ['iuc', 'ouc']) @pytest.mark.parametrize('output', ['real', 'complex']) @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.complex64, np.complex128]) def test_gh_13137_sort_str(self, sort, output, dtype): # gh-13137 reported that sort values 'iuc' and 'ouc' were not # correct because the callables assumed that the eigenvalues would # always be expressed as a single complex number. # In fact, when `output='real'` and the dtype is real, the # eigenvalues are passed as separate real and imaginary components # (yet no error is raised if the callable accepts only one argument). # # This tests these sort values by counting the number of eigenvalues # `schur` reports as being inside/outside the unit circle. # Real matrix with eigenvalues 0.1 +- 2j A = np.asarray([[0.1, -2], [2, 0.1]]) # Previously, this would fail for `output='real'` with real dtypes sdim = schur(A.astype(dtype), sort=sort, output=output)[-1] assert sdim == 0 if sort == 'iuc' else sdim == 2 @pytest.mark.parametrize('output', ['real', 'complex']) @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.complex64, np.complex128]) def test_gh_13137_sort_custom(self, output, dtype): # This simply tests our understanding of how eigenvalues are # passed to a sort callable. If `output='real'` and the dtype is real, # real and imaginary parts are passed as separate real arguments; # otherwise, they are passed a single complex argument. # Also, if `output='real'` and the dtype is real, when either # eigenvalue in a complex conjugate pair satisfies the sort condition, # `sdim` is incremented by TWO. # Real matrix with eigenvalues 0.1 +- 2j A = np.asarray([[0.1, -2], [2, 0.1]]) all_real = output=='real' and dtype in {np.float32, np.float64} def sort(x, y=None): if all_real: assert not np.iscomplexobj(x) assert y is not None and np.isreal(y) z = x + y*1j else: assert np.iscomplexobj(x) assert y is None z = x return z.imag > 1e-15 # Only one complex eigenvalue satisfies the condition, but when # `all_real` applies, both eigenvalues in the complex conjugate pair # are counted. sdim = schur(A.astype(dtype), sort=sort, output=output)[-1] assert sdim == 2 if all_real else sdim == 1
TestSchur
python
joblib__joblib
joblib/backports.py
{ "start": 1280, "end": 5450 }
class ____(Version): """Backport from deprecated distutils We maintain this backport to avoid introducing a new dependency on `packaging`. We might rexplore this choice in the future if all major Python projects introduce a dependency on packaging anyway. """ component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE) def __init__(self, vstring=None): if vstring: self.parse(vstring) def parse(self, vstring): # I've given up on thinking I can reconstruct the version string # from the parsed tuple -- so I just store the string here for # use by __str__ self.vstring = vstring components = [x for x in self.component_re.split(vstring) if x and x != "."] for i, obj in enumerate(components): try: components[i] = int(obj) except ValueError: pass self.version = components def __str__(self): return self.vstring def __repr__(self): return "LooseVersion ('%s')" % str(self) def _cmp(self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented if self.version == other.version: return 0 if self.version < other.version: return -1 if self.version > other.version: return 1 try: import numpy as np def make_memmap( filename, dtype="uint8", mode="r+", offset=0, shape=None, order="C", unlink_on_gc_collect=False, ): """Custom memmap constructor compatible with numpy.memmap. This function: - is a backport the numpy memmap offset fix (See https://github.com/numpy/numpy/pull/8443 for more details. The numpy fix is available starting numpy 1.13) - adds ``unlink_on_gc_collect``, which specifies explicitly whether the process re-constructing the memmap owns a reference to the underlying file. If set to True, it adds a finalizer to the newly-created memmap that sends a maybe_unlink request for the memmaped file to resource_tracker. """ util.debug( "[MEMMAP READ] creating a memmap (shape {}, filename {}, pid {})".format( shape, basename(filename), os.getpid() ) ) mm = np.memmap( filename, dtype=dtype, mode=mode, offset=offset, shape=shape, order=order ) if LooseVersion(np.__version__) < "1.13": mm.offset = offset if unlink_on_gc_collect: from ._memmapping_reducer import add_maybe_unlink_finalizer add_maybe_unlink_finalizer(mm) return mm except ImportError: def make_memmap( filename, dtype="uint8", mode="r+", offset=0, shape=None, order="C", unlink_on_gc_collect=False, ): raise NotImplementedError( "'joblib.backports.make_memmap' should not be used " "if numpy is not installed." ) if os.name == "nt": # https://github.com/joblib/joblib/issues/540 access_denied_errors = (5, 13) from os import replace def concurrency_safe_rename(src, dst): """Renames ``src`` into ``dst`` overwriting ``dst`` if it exists. On Windows os.replace can yield permission errors if executed by two different processes. """ max_sleep_time = 1 total_sleep_time = 0 sleep_time = 0.001 while total_sleep_time < max_sleep_time: try: replace(src, dst) break except Exception as exc: if getattr(exc, "winerror", None) in access_denied_errors: time.sleep(sleep_time) total_sleep_time += sleep_time sleep_time *= 2 else: raise else: raise else: from os import replace as concurrency_safe_rename # noqa
LooseVersion
python
PyCQA__pylint
tests/functional/m/method_hidden.py
{ "start": 314, "end": 429 }
class ____(Abcd): """dummy""" def abcd(self): # [method-hidden] """test""" print(self)
Cdef
python
numpy__numpy
numpy/_core/tests/test_scalar_ctors.py
{ "start": 162, "end": 1317 }
class ____: def test_floating(self): # Ticket #640, floats from string fsingle = np.single('1.234') fdouble = np.double('1.234') flongdouble = np.longdouble('1.234') assert_almost_equal(fsingle, 1.234) assert_almost_equal(fdouble, 1.234) assert_almost_equal(flongdouble, 1.234) def test_floating_overflow(self): """ Strings containing an unrepresentable float overflow """ fhalf = np.half('1e10000') assert_equal(fhalf, np.inf) fsingle = np.single('1e10000') assert_equal(fsingle, np.inf) fdouble = np.double('1e10000') assert_equal(fdouble, np.inf) flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '1e10000') assert_equal(flongdouble, np.inf) fhalf = np.half('-1e10000') assert_equal(fhalf, -np.inf) fsingle = np.single('-1e10000') assert_equal(fsingle, -np.inf) fdouble = np.double('-1e10000') assert_equal(fdouble, -np.inf) flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '-1e10000') assert_equal(flongdouble, -np.inf)
TestFromString
python
getsentry__sentry
fixtures/safe_migrations_apps/good_flow_delete_pending_with_fk_constraints_app/migrations/0001_initial.py
{ "start": 180, "end": 1303 }
class ____(CheckedMigration): initial = True dependencies = [] operations = [ migrations.CreateModel( name="FkTable", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID" ), ), ], ), migrations.CreateModel( name="TestTable", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID" ), ), ( "fk_table", sentry.db.models.fields.foreignkey.FlexibleForeignKey( on_delete=django.db.models.deletion.CASCADE, to="good_flow_delete_pending_with_fk_constraints_app.fktable", db_index=False, ), ), ], ), ]
Migration
python
dagster-io__dagster
python_modules/libraries/dagster-mysql/dagster_mysql/utils.py
{ "start": 869, "end": 5876 }
class ____(Exception): pass def get_conn(conn_string: str) -> MySQLConnectionUnion: parsed = urlparse(conn_string) conn = cast( "MySQLConnectionUnion", mysql.connect( user=parsed.username, passwd=parsed.password, host=parsed.hostname, database=parsed.path[1:], # Skip first char, URL parser retains leading "/" port=parsed.port, ), ) # https://github.com/dagster-io/dagster/issues/3735 return conn def mysql_url_from_config(config_value: MySqlStorageConfig) -> str: if config_value.get("mysql_url"): return config_value["mysql_url"] return get_conn_string(**config_value["mysql_db"]) def get_conn_string( username: str, password: str, hostname: str, db_name: str, port: Union[int, str] = "3306" ) -> str: return f"mysql+mysqlconnector://{username}:{urlquote(password)}@{hostname}:{port}/{db_name}" def parse_mysql_version(version: str) -> tuple[int, ...]: """Parse MySQL version into a tuple of ints. Args: version (str): MySQL version string. Returns: tuple: Tuple of ints representing the MySQL version. """ parsed = [] for part in re.split(r"\D+", version): if len(part) == 0: continue try: parsed.append(int(part)) except ValueError: continue return tuple(parsed) def retry_mysql_creation_fn( fn: Callable[[], T], retry_limit: int = 5, retry_wait: float = 0.2 ) -> T: # Retry logic to recover from the case where two processes are creating # tables at the same time using sqlalchemy check.callable_param(fn, "fn") check.int_param(retry_limit, "retry_limit") check.numeric_param(retry_wait, "retry_wait") while True: try: return fn() except ( mysql.ProgrammingError, mysql.IntegrityError, db_exc.ProgrammingError, db_exc.IntegrityError, ) as exc: if ( isinstance(exc, db_exc.ProgrammingError) and exc.orig and exc.orig.errno == mysql_errorcode.ER_TABLE_EXISTS_ERROR ) or ( isinstance(exc, mysql.ProgrammingError) and exc.errno == mysql_errorcode.ER_TABLE_EXISTS_ERROR ): raise logging.warning("Retrying failed database creation") if retry_limit == 0: raise DagsterMySQLException("too many retries for DB creation") from exc time.sleep(retry_wait) retry_limit -= 1 def retry_mysql_connection_fn( fn: Callable[[], T], retry_limit: int = 5, retry_wait: float = 0.2, ) -> T: """Reusable retry logic for any MySQL connection functions that may fail. Intended to be used anywhere we connect to MySQL, to gracefully handle transient connection issues. """ check.callable_param(fn, "fn") check.int_param(retry_limit, "retry_limit") check.numeric_param(retry_wait, "retry_wait") while True: try: return fn() except ( mysql.DatabaseError, mysql.OperationalError, db_exc.DatabaseError, db_exc.OperationalError, mysql.errors.InterfaceError, ) as exc: logging.warning("Retrying failed database connection") if retry_limit == 0: raise DagsterMySQLException("too many retries for DB connection") from exc time.sleep(retry_wait) retry_limit -= 1 def wait_for_connection(conn_string: str, retry_limit: int = 5, retry_wait: float = 0.2) -> bool: parsed = urlparse(conn_string) retry_mysql_connection_fn( lambda: cast( "Union[mysql.MySQLConnection, PooledMySQLConnection]", mysql.connect( user=parsed.username, passwd=parsed.password, host=parsed.hostname, database=parsed.path[1:], # Skip first char, URL parser retains leading "/" port=parsed.port, ), ), retry_limit=retry_limit, retry_wait=retry_wait, ) return True def mysql_alembic_config(dunder_file: str) -> Config: return get_alembic_config(dunder_file, config_path="../alembic/alembic.ini") def mysql_isolation_level(): return "REPEATABLE READ" @contextmanager def create_mysql_connection( engine: db.engine.Engine, dunder_file: str, storage_type_desc: Optional[str] = None ) -> Iterator[Connection]: check.inst_param(engine, "engine", db.engine.Engine) check.str_param(dunder_file, "dunder_file") check.opt_str_param(storage_type_desc, "storage_type_desc", "") if storage_type_desc: storage_type_desc += " " else: storage_type_desc = "" conn_cm = retry_mysql_connection_fn(engine.connect) with conn_cm as conn: with conn.begin(): yield conn
DagsterMySQLException
python
apache__airflow
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/operators/pod.py
{ "start": 4181, "end": 4305 }
class ____(AirflowException): """When during reconnect more than one matching pod was found."""
FoundMoreThanOnePodFailure
python
numba__numba
numba/cuda/vector_types.py
{ "start": 830, "end": 6750 }
class ____(types.Type): def __init__(self, name, base_type, attr_names, user_facing_object): self._base_type = base_type self._attr_names = attr_names self._user_facing_object = user_facing_object super().__init__(name=name) @property def base_type(self): return self._base_type @property def attr_names(self): return self._attr_names @property def num_elements(self): return len(self._attr_names) @property def user_facing_object(self): return self._user_facing_object def make_vector_type( name: str, base_type: types.Type, attr_names: Tuple[str, ...], user_facing_object ) -> types.Type: """Create a vector type. Parameters ---------- name: str The name of the type. base_type: numba.types.Type The primitive type for each element in the vector. attr_names: tuple of str Name for each attribute. user_facing_object: object The handle to be used in cuda kernel. """ class _VectorType(VectorType): """Internal instantiation of VectorType.""" pass class VectorTypeModel(models.StructModel): def __init__(self, dmm, fe_type): members = [(attr_name, base_type) for attr_name in attr_names] super().__init__(dmm, fe_type, members) vector_type = _VectorType(name, base_type, attr_names, user_facing_object) register_model(_VectorType)(VectorTypeModel) for attr_name in attr_names: make_attribute_wrapper(_VectorType, attr_name, attr_name) return vector_type def enable_vector_type_ctor( vector_type: VectorType, overloads: List[List[types.Type]] ): """Create typing and lowering for vector type constructor. Parameters ---------- vector_type: VectorType The type whose constructor to type and lower. overloads: List of argument types A list containing different overloads of the constructor. Each base type in the argument list should either be primitive type or VectorType. """ ctor = vector_type.user_facing_object @register class CtorTemplate(ConcreteTemplate): key = ctor cases = [signature(vector_type, *arglist) for arglist in overloads] register_global(ctor, types.Function(CtorTemplate)) # Lowering def make_lowering(fml_arg_list): """Meta function to create a lowering for the constructor. Flattens the arguments by converting vector_type into load instructions for each of its attributes. Such as float2 -> float2.x, float2.y. """ def lowering(context, builder, sig, actual_args): # A list of elements to assign from source_list = [] # Convert the list of argument types to a list of load IRs. for argidx, fml_arg in enumerate(fml_arg_list): if isinstance(fml_arg, VectorType): pxy = cgutils.create_struct_proxy(fml_arg)( context, builder, actual_args[argidx] ) source_list += [ getattr(pxy, attr) for attr in fml_arg.attr_names ] else: # assumed primitive type source_list.append(actual_args[argidx]) if len(source_list) != vector_type.num_elements: raise CudaLoweringError( f"Unmatched number of source elements ({len(source_list)}) " "and target elements ({vector_type.num_elements})." ) out = cgutils.create_struct_proxy(vector_type)(context, builder) for attr_name, source in zip(vector_type.attr_names, source_list): setattr(out, attr_name, source) return out._getvalue() return lowering for arglist in overloads: lowering = make_lowering(arglist) lower(ctor, *arglist)(lowering) vector_types : Dict[str, VectorType] = {} def build_constructor_overloads(base_type, vty_name, num_elements, arglists, l): """ For a given vector type, build a list of overloads for its constructor. """ # TODO: speed up with memoization if num_elements == 0: arglists.append(l[:]) for i in range(1, num_elements + 1): if i == 1: # For 1-element component, it can construct with either a # primitive type or other 1-element component. l.append(base_type) build_constructor_overloads( base_type, vty_name, num_elements - i, arglists, l ) l.pop(-1) l.append(vector_types[f"{vty_name[:-1]}1"]) build_constructor_overloads( base_type, vty_name, num_elements - i, arglists, l ) l.pop(-1) else: l.append(vector_types[f"{vty_name[:-1]}{i}"]) build_constructor_overloads( base_type, vty_name, num_elements - i, arglists, l ) l.pop(-1) def _initialize(): """ Construct the vector types, populate `vector_types` dictionary, and enable the constructors. """ vector_type_attribute_names = ("x", "y", "z", "w") for stub in stubs._vector_type_stubs: type_name = stub.__name__ base_type = getattr(types, type_name[:-2]) num_elements = int(type_name[-1]) attributes = vector_type_attribute_names[:num_elements] vector_type = make_vector_type(type_name, base_type, attributes, stub) vector_types[type_name] = vector_type for vty in vector_types.values(): arglists, l = [], [] build_constructor_overloads( vty.base_type, vty.name, vty.num_elements, arglists, l ) enable_vector_type_ctor(vty, arglists) _initialize()
VectorType
python
prabhupant__python-ds
data_structures/binary_trees/sum_of_all_left_leaves.py
{ "start": 53, "end": 825 }
class ____: def __init__(self, val): self.val = val self.left = None self.right = None def is_leaf(root): if root is None: return False if root.left is None and root.right is None: return True return False def sum_left(root): s = 0 stack = [] while True: if root: stack.append(root) root = root.left else: if not stack: break root = stack.pop() if is_leaf(root.left): s += root.left.val root = root.right return s root = Node(9) root.left = Node(8) root.right = Node(6) root.right.left = Node(1) root.left.left = Node(5) root.left.right = Node(2) print(sum_left(root))
Node
python
numba__numba
numba/cuda/tests/cudapy/test_blackscholes.py
{ "start": 1127, "end": 4023 }
class ____(CUDATestCase): def test_blackscholes(self): OPT_N = 400 iterations = 2 stockPrice = randfloat(np.random.random(OPT_N), 5.0, 30.0) optionStrike = randfloat(np.random.random(OPT_N), 1.0, 100.0) optionYears = randfloat(np.random.random(OPT_N), 0.25, 10.0) callResultNumpy = np.zeros(OPT_N) putResultNumpy = -np.ones(OPT_N) callResultNumba = np.zeros(OPT_N) putResultNumba = -np.ones(OPT_N) # numpy for i in range(iterations): black_scholes(callResultNumpy, putResultNumpy, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY) @cuda.jit(double(double), device=True, inline=True) def cnd_cuda(d): K = 1.0 / (1.0 + 0.2316419 * math.fabs(d)) ret_val = (RSQRT2PI * math.exp(-0.5 * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))))) if d > 0: ret_val = 1.0 - ret_val return ret_val @cuda.jit(void(double[:], double[:], double[:], double[:], double[:], double, double)) def black_scholes_cuda(callResult, putResult, S, X, T, R, V): i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x if i >= S.shape[0]: return sqrtT = math.sqrt(T[i]) d1 = ((math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)) d2 = d1 - V * sqrtT cndd1 = cnd_cuda(d1) cndd2 = cnd_cuda(d2) expRT = math.exp((-1. * R) * T[i]) callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2) putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1)) # numba blockdim = 512, 1 griddim = int(math.ceil(float(OPT_N) / blockdim[0])), 1 stream = cuda.stream() d_callResult = cuda.to_device(callResultNumba, stream) d_putResult = cuda.to_device(putResultNumba, stream) d_stockPrice = cuda.to_device(stockPrice, stream) d_optionStrike = cuda.to_device(optionStrike, stream) d_optionYears = cuda.to_device(optionYears, stream) for i in range(iterations): black_scholes_cuda[griddim, blockdim, stream]( d_callResult, d_putResult, d_stockPrice, d_optionStrike, d_optionYears, RISKFREE, VOLATILITY) d_callResult.copy_to_host(callResultNumba, stream) d_putResult.copy_to_host(putResultNumba, stream) stream.synchronize() delta = np.abs(callResultNumpy - callResultNumba) L1norm = delta.sum() / np.abs(callResultNumpy).sum() max_abs_err = delta.max() self.assertTrue(L1norm < 1e-13) self.assertTrue(max_abs_err < 1e-13) if __name__ == '__main__': unittest.main()
TestBlackScholes
python
getsentry__sentry
src/sentry/middleware/customer_domain.py
{ "start": 2528, "end": 4371 }
class ____: """ Set active organization from request.domain. """ def __init__(self, get_response: Callable[[HttpRequest], HttpResponseBase]) -> None: self.get_response = get_response def __call__(self, request: HttpRequest) -> HttpResponseBase: if ( request.method != "GET" or not features.has("system:multi-region") or not hasattr(request, "subdomain") ): return self.get_response(request) subdomain = request.subdomain if subdomain is None or subdomain_is_region(request): return self.get_response(request) if ( settings.DISALLOWED_CUSTOMER_DOMAINS and request.subdomain in settings.DISALLOWED_CUSTOMER_DOMAINS ): # DISALLOWED_CUSTOMER_DOMAINS is a list of org slugs that are explicitly not allowed to use customer domains. # We kick any request to the logout view. logout(request) redirect_url = absolute_uri(reverse("sentry-logout")) logger.info("customer_domain.redirect.logout", extra={"location": redirect_url}) return HttpResponseRedirect(redirect_url) activeorg = _resolve_activeorg(request) if not activeorg: session = getattr(request, "session", None) if session and "activeorg" in session: del session["activeorg"] return self.get_response(request) auth.set_active_org(request, activeorg) redirect_url = _resolve_redirect_url(request, activeorg) if redirect_url is not None and len(redirect_url) > 0: logger.info("customer_domain.redirect", extra={"location": redirect_url}) return HttpResponseRedirect(redirect_url) return self.get_response(request)
CustomerDomainMiddleware
python
huggingface__transformers
src/transformers/models/llava_onevision/configuration_llava_onevision.py
{ "start": 802, "end": 8061 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`LlavaOnevisionForConditionalGeneration`]. It is used to instantiate an Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [llava-hf/llava-onevision-qwen2-7b-ov-hf](https://huggingface.co/llava-hf/llava-onevision-qwen2-7b-ov-hf) model. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 151646): The image token index to encode the image prompt. video_token_index (`int`, *optional*, defaults to 151647): The video token index to encode the video prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"full"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. If `"full"`, the full vision features are used. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -1): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`): Aspect ratio used when processong image features. The default value is "anyres_max_9". image_grid_pinpoints (`List`, *optional*): A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list of the form `(height, width)`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. Example: ```python >>> from transformers import LlavaOnevisionForConditionalGeneration, LlavaOnevisionConfig, SiglipVisionConfig, Qwen2Config >>> # Initializing a CLIP-vision config >>> vision_config = SiglipVisionConfig() >>> # Initializing a Llama config >>> text_config = Qwen2Config() >>> # Initializing a Llava-Next llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration >>> configuration = LlavaOnevisionConfig(vision_config, text_config) >>> # Initializing a model from the llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration >>> model = LlavaOnevisionForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "llava_onevision" attribute_map = { "image_token_id": "image_token_index", "video_token_id": "video_token_index", } sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig} def __init__( self, vision_config=None, text_config=None, image_token_index=151646, video_token_index=151647, projector_hidden_act="gelu", vision_feature_select_strategy="full", vision_feature_layer=-1, vision_aspect_ratio="anyres_max_9", image_grid_pinpoints=None, tie_word_embeddings=False, multimodal_projector_bias=True, **kwargs, ): self.image_token_index = image_token_index self.video_token_index = video_token_index self.projector_hidden_act = projector_hidden_act self.multimodal_projector_bias = multimodal_projector_bias if vision_feature_select_strategy not in ["default", "full"]: raise ValueError( "vision_feature_select_strategy should be one of 'default', 'full'." f"Got: {vision_feature_select_strategy}" ) self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.vision_aspect_ratio = vision_aspect_ratio image_grid_pinpoints = ( image_grid_pinpoints if image_grid_pinpoints is not None else [ [384, 384], [384, 768], [384, 1152], [384, 1536], [384, 1920], [384, 2304], [768, 384], [768, 768], [768, 1152], [768, 1536], [768, 1920], [768, 2304], [1152, 384], [1152, 768], [1152, 1152], [1152, 1536], [1152, 1920], [1152, 2304], [1536, 384], [1536, 768], [1536, 1152], [1536, 1536], [1536, 1920], [1536, 2304], [1920, 384], [1920, 768], [1920, 1152], [1920, 1536], [1920, 1920], [1920, 2304], [2304, 384], [2304, 768], [2304, 1152], [2304, 1536], [2304, 1920], [2304, 2304], ] ) self.image_grid_pinpoints = image_grid_pinpoints if isinstance(vision_config, dict): vision_config["model_type"] = vision_config.get("model_type", "siglip_vision_model") vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: vision_config = CONFIG_MAPPING["siglip_vision_model"]( hidden_size=1152, intermediate_size=4304, patch_size=14, image_size=384, num_hidden_layers=26, num_attention_heads=16, vision_use_head=False, ) self.vision_config = vision_config if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "qwen2") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["qwen2"]() self.text_config = text_config super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) __all__ = ["LlavaOnevisionConfig"]
LlavaOnevisionConfig
python
allegroai__clearml
clearml/backend_api/services/v2_23/events.py
{ "start": 165491, "end": 171629 }
class ____(Request): """ Get plot events for the requested amount of iterations per each task :param metrics: List of metrics and variants :type metrics: Sequence[TaskMetricVariants] :param iters: Max number of latest iterations for which to return plots :type iters: int :param navigate_earlier: If set then events are retrieved from latest iterations to earliest ones. Otherwise from earliest iterations to the latest. The default is True :type navigate_earlier: bool :param refresh: If set then scroll will be moved to the latest iterations. The default is False :type refresh: bool :param scroll_id: Scroll ID of previous call (used for getting more results) :type scroll_id: str :param model_events: If set then the retrieving model plots. Otherwise task plots :type model_events: bool """ _service = "events" _action = "plots" _version = "2.23" _schema = { "definitions": { "task_metric_variants": { "properties": { "metric": {"description": "Metric name", "type": "string"}, "task": {"description": "Task ID", "type": "string"}, "variants": { "description": "Metric variant names", "items": {"type": "string"}, "type": "array", }, }, "required": ["task"], "type": "object", } }, "properties": { "iters": { "description": "Max number of latest iterations for which to return plots", "type": "integer", }, "metrics": { "description": "List of metrics and variants", "items": {"$ref": "#/definitions/task_metric_variants"}, "type": "array", }, "model_events": { "default": False, "description": "If set then the retrieving model plots. Otherwise task plots", "type": "boolean", }, "navigate_earlier": { "description": "If set then events are retrieved from latest iterations to earliest ones. Otherwise from earliest iterations to the latest. The default is True", "type": "boolean", }, "refresh": { "description": "If set then scroll will be moved to the latest iterations. The default is False", "type": "boolean", }, "scroll_id": { "description": "Scroll ID of previous call (used for getting more results)", "type": "string", }, }, "required": ["metrics"], "type": "object", } def __init__( self, metrics: List[Any], iters: Optional[int] = None, navigate_earlier: Optional[bool] = None, refresh: Optional[bool] = None, scroll_id: Optional[str] = None, model_events: Optional[bool] = False, **kwargs: Any ) -> None: super(PlotsRequest, self).__init__(**kwargs) self.metrics = metrics self.iters = iters self.navigate_earlier = navigate_earlier self.refresh = refresh self.scroll_id = scroll_id self.model_events = model_events @schema_property("metrics") def metrics(self) -> List[Any]: return self._property_metrics @metrics.setter def metrics(self, value: List[Any]) -> None: if value is None: self._property_metrics = None return self.assert_isinstance(value, "metrics", (list, tuple)) if any((isinstance(v, dict) for v in value)): value = [TaskMetricVariants.from_dict(v) if isinstance(v, dict) else v for v in value] else: self.assert_isinstance(value, "metrics", TaskMetricVariants, is_array=True) self._property_metrics = value @schema_property("iters") def iters(self) -> Optional[int]: return self._property_iters @iters.setter def iters(self, value: Optional[int]) -> None: if value is None: self._property_iters = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "iters", six.integer_types) self._property_iters = value @schema_property("navigate_earlier") def navigate_earlier(self) -> Optional[bool]: return self._property_navigate_earlier @navigate_earlier.setter def navigate_earlier(self, value: Optional[bool]) -> None: if value is None: self._property_navigate_earlier = None return self.assert_isinstance(value, "navigate_earlier", (bool,)) self._property_navigate_earlier = value @schema_property("refresh") def refresh(self) -> Optional[bool]: return self._property_refresh @refresh.setter def refresh(self, value: Optional[bool]) -> None: if value is None: self._property_refresh = None return self.assert_isinstance(value, "refresh", (bool,)) self._property_refresh = value @schema_property("scroll_id") def scroll_id(self) -> Optional[str]: return self._property_scroll_id @scroll_id.setter def scroll_id(self, value: Optional[str]) -> None: if value is None: self._property_scroll_id = None return self.assert_isinstance(value, "scroll_id", six.string_types) self._property_scroll_id = value @schema_property("model_events") def model_events(self) -> Optional[bool]: return self._property_model_events @model_events.setter def model_events(self, value: Optional[bool]) -> None: if value is None: self._property_model_events = None return self.assert_isinstance(value, "model_events", (bool,)) self._property_model_events = value
PlotsRequest
python
facebook__pyre-check
client/log/log.py
{ "start": 1501, "end": 1542 }
class ____: LAMBDA: str = "ƛ"
Character
python
tensorflow__tensorflow
tensorflow/lite/python/util_test.py
{ "start": 12959, "end": 16289 }
class ____(test_util.TensorFlowTestCase, parameterized.TestCase): @classmethod def setUpClass(cls): super(UtilModifyIntegerQuantizedModelIOTypeTest, cls).setUpClass() cls.post_train_int8_model = _generate_integer_tflite_model() cls.post_train_int16_model = _generate_integer_tflite_model( quantization_type=dtypes.int16) @parameterized.named_parameters(_test_param_modify_integer_model_io_type()) def test(self, is_post_train, quantization_type, in_tftype, out_tftype): """Modify the float input/output type of an integer quantized model.""" def _run_tflite_inference(model, in_tftype, out_tftype): """Run inference on a model with a specific input/output type.""" # Load TFLite model and allocate tensors. interpreter = lite.Interpreter(model_content=model) interpreter.allocate_tensors() input_details = interpreter.get_input_details()[0] output_details = interpreter.get_output_details()[0] # Validate TFLite model input and output types self.assertEqual(input_details["dtype"], in_tftype.as_numpy_dtype) self.assertEqual(output_details["dtype"], out_tftype.as_numpy_dtype) # Define Input np.random.seed(0) input_data = np.random.uniform(low=0, high=1, size=(1, 28, 28)) input_data = input_data.astype(np.float32) if input_details["dtype"] != np.float32: # quantize float to int scale, zero_point = input_details["quantization"] input_data = input_data / scale + zero_point input_data = input_data.astype(input_details["dtype"]) # Run Inference interpreter.set_tensor(input_details["index"], input_data) interpreter.invoke() # Get output output_data = interpreter.get_tensor(output_details["index"])[0] if output_details["dtype"] != np.float32: # dequantize int to float scale, zero_point = output_details["quantization"] output_data = output_data.astype(np.float32) output_data = (output_data - zero_point) * scale return output_data if is_post_train and quantization_type == tf.int8: model = self.__class__.post_train_int8_model elif is_post_train and quantization_type == tf.int16: model = self.__class__.post_train_int16_model else: model = None # Run model inference with float input output type output_data = _run_tflite_inference(model, tf.float32, tf.float32) # Modify the model io types to the target input/output types. model_io = util.modify_model_io_type(model, in_tftype, out_tftype) # Run model inference with modified integer input output type output_io_data = _run_tflite_inference(model_io, in_tftype, out_tftype) # Validate that both the outputs are the same self.assertAllClose(output_data, output_io_data, atol=1.0) # Modify the model with the target input/output types should be a no op. model_io = util.modify_model_io_type(model_io, in_tftype, out_tftype) # Run model inference with modified integer input output type output_io_data = _run_tflite_inference(model_io, in_tftype, out_tftype) # Validate that both the outputs are the same self.assertAllClose(output_data, output_io_data, atol=1.0)
UtilModifyIntegerQuantizedModelIOTypeTest
python
getsentry__sentry
src/sentry/templatetags/sentry_features.py
{ "start": 695, "end": 1345 }
class ____(template.Node): def __init__(self, nodelist_true, nodelist_false, name, params): self.nodelist_true = nodelist_true self.nodelist_false = nodelist_false self.name = name self.params = [template.Variable(i) for i in params] def render(self, context): params = [i.resolve(context) for i in self.params] if "request" in context: user = context["request"].user else: user = None if not features.has(self.name, actor=user, *params): return self.nodelist_false.render(context) return self.nodelist_true.render(context)
FeatureNode
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/api/secret.py
{ "start": 441, "end": 2270 }
class ____: """Secret API operations.""" client: IGraphQLClient def list_secrets( self, location_name: Optional[str] = None, scope: Optional[str] = None, limit: Optional[int] = None, ) -> "DgApiSecretList": """List secrets with optional filtering. Args: location_name: Optional filter by code location name scope: Optional scope filter ("deployment" or "organization") limit: Optional limit on number of results Returns: DgApiSecretList: List of secrets (values are never included for security) Note: Secret values are never exposed in list operations for security. Use get_secret with include_value=True to retrieve specific values. """ return list_secrets_via_graphql( self.client, location_name=location_name, scope=scope, include_values=False, # Never expose values in list operations limit=limit, ) def get_secret( self, secret_name: str, location_name: Optional[str] = None, include_value: bool = False, ) -> "DgApiSecret": """Get a specific secret. Args: secret_name: Name of the secret to retrieve location_name: Optional filter by code location name include_value: Whether to include the secret value (default: False for security) Returns: DgApiSecret: Single secret with optional value Raises: ValueError: If secret not found """ return get_secret_via_graphql( self.client, secret_name=secret_name, location_name=location_name, include_value=include_value, )
DgApiSecretApi
python
openai__openai-python
src/openai/types/conversations/conversation_item.py
{ "start": 4908, "end": 5345 }
class ____(BaseModel): id: str """The unique ID of the approval response""" approval_request_id: str """The ID of the approval request being answered.""" approve: bool """Whether the request was approved.""" type: Literal["mcp_approval_response"] """The type of the item. Always `mcp_approval_response`.""" reason: Optional[str] = None """Optional reason for the decision."""
McpApprovalResponse
python
pytest-dev__pytest
testing/test_terminal.py
{ "start": 49737, "end": 67825 }
class ____: """Test class which can be subclassed with a different option provider to run e.g. distributed tests.""" def test_collect_fail(self, pytester: Pytester, option) -> None: pytester.makepyfile("import xyz\n") result = pytester.runpytest(*option.args) result.stdout.fnmatch_lines( ["ImportError while importing*", "*No module named *xyz*", "*1 error*"] ) def test_maxfailures(self, pytester: Pytester, option) -> None: pytester.makepyfile( """ def test_1(): assert 0 def test_2(): assert 0 def test_3(): assert 0 """ ) result = pytester.runpytest("--maxfail=2", *option.args) result.stdout.fnmatch_lines( [ "*def test_1():*", "*def test_2():*", "*! stopping after 2 failures !*", "*2 failed*", ] ) def test_maxfailures_with_interrupted(self, pytester: Pytester) -> None: pytester.makepyfile( """ def test(request): request.session.shouldstop = "session_interrupted" assert 0 """ ) result = pytester.runpytest("--maxfail=1", "-ra") result.stdout.fnmatch_lines( [ "*= short test summary info =*", "FAILED *", "*! stopping after 1 failures !*", "*! session_interrupted !*", "*= 1 failed in*", ] ) def test_tb_option(self, pytester: Pytester, option) -> None: pytester.makepyfile( """ import pytest def g(): raise IndexError def test_func(): print(6*7) g() # --calling-- """ ) for tbopt in ["long", "short", "no"]: print(f"testing --tb={tbopt}...") result = pytester.runpytest("-rN", f"--tb={tbopt}") s = result.stdout.str() if tbopt == "long": assert "print(6*7)" in s else: assert "print(6*7)" not in s if tbopt != "no": assert "--calling--" in s assert "IndexError" in s else: assert "FAILURES" not in s assert "--calling--" not in s assert "IndexError" not in s def test_tb_line_show_capture(self, pytester: Pytester, option) -> None: output_to_capture = "help! let me out!" pytester.makepyfile( f""" import pytest def test_fail(): print('{output_to_capture}') assert False """ ) result = pytester.runpytest("--tb=line") result.stdout.fnmatch_lines(["*- Captured stdout call -*", output_to_capture]) def test_tb_crashline(self, pytester: Pytester, option) -> None: p = pytester.makepyfile( """ import pytest def g(): raise IndexError def test_func1(): print(6*7) g() # --calling-- def test_func2(): assert 0, "hello" """ ) result = pytester.runpytest("--tb=line") bn = p.name result.stdout.fnmatch_lines( [f"*{bn}:3: IndexError*", f"*{bn}:8: AssertionError: hello*"] ) s = result.stdout.str() assert "def test_func2" not in s def test_tb_crashline_pytrace_false(self, pytester: Pytester, option) -> None: p = pytester.makepyfile( """ import pytest def test_func1(): pytest.fail('test_func1', pytrace=False) """ ) result = pytester.runpytest("--tb=line") result.stdout.str() bn = p.name result.stdout.fnmatch_lines([f"*{bn}:3: Failed: test_func1"]) def test_pytest_report_header(self, pytester: Pytester, option) -> None: pytester.makeconftest( """ def pytest_sessionstart(session): session.config._somevalue = 42 def pytest_report_header(config): return "hello: %s" % config._somevalue """ ) pytester.mkdir("a").joinpath("conftest.py").write_text( """ def pytest_report_header(config, start_path): return ["line1", str(start_path)] """, encoding="utf-8", ) result = pytester.runpytest("a") result.stdout.fnmatch_lines(["*hello: 42*", "line1", str(pytester.path)]) def test_show_capture(self, pytester: Pytester) -> None: pytester.makepyfile( """ import sys import logging def test_one(): sys.stdout.write('!This is stdout!') sys.stderr.write('!This is stderr!') logging.warning('!This is a warning log msg!') assert False, 'Something failed' """ ) result = pytester.runpytest("--tb=short") result.stdout.fnmatch_lines( [ "!This is stdout!", "!This is stderr!", "*WARNING*!This is a warning log msg!", ] ) result = pytester.runpytest("--show-capture=all", "--tb=short") result.stdout.fnmatch_lines( [ "!This is stdout!", "!This is stderr!", "*WARNING*!This is a warning log msg!", ] ) stdout = pytester.runpytest("--show-capture=stdout", "--tb=short").stdout.str() assert "!This is stderr!" not in stdout assert "!This is stdout!" in stdout assert "!This is a warning log msg!" not in stdout stdout = pytester.runpytest("--show-capture=stderr", "--tb=short").stdout.str() assert "!This is stdout!" not in stdout assert "!This is stderr!" in stdout assert "!This is a warning log msg!" not in stdout stdout = pytester.runpytest("--show-capture=log", "--tb=short").stdout.str() assert "!This is stdout!" not in stdout assert "!This is stderr!" not in stdout assert "!This is a warning log msg!" in stdout stdout = pytester.runpytest("--show-capture=no", "--tb=short").stdout.str() assert "!This is stdout!" not in stdout assert "!This is stderr!" not in stdout assert "!This is a warning log msg!" not in stdout def test_show_capture_with_teardown_logs(self, pytester: Pytester) -> None: """Ensure that the capturing of teardown logs honor --show-capture setting""" pytester.makepyfile( """ import logging import sys import pytest @pytest.fixture(scope="function", autouse="True") def hook_each_test(request): yield sys.stdout.write("!stdout!") sys.stderr.write("!stderr!") logging.warning("!log!") def test_func(): assert False """ ) result = pytester.runpytest("--show-capture=stdout", "--tb=short").stdout.str() assert "!stdout!" in result assert "!stderr!" not in result assert "!log!" not in result result = pytester.runpytest("--show-capture=stderr", "--tb=short").stdout.str() assert "!stdout!" not in result assert "!stderr!" in result assert "!log!" not in result result = pytester.runpytest("--show-capture=log", "--tb=short").stdout.str() assert "!stdout!" not in result assert "!stderr!" not in result assert "!log!" in result result = pytester.runpytest("--show-capture=no", "--tb=short").stdout.str() assert "!stdout!" not in result assert "!stderr!" not in result assert "!log!" not in result @pytest.mark.xfail("not hasattr(os, 'dup')") def test_fdopen_kept_alive_issue124(pytester: Pytester) -> None: pytester.makepyfile( """ import os, sys k = [] def test_open_file_and_keep_alive(capfd): stdout = os.fdopen(1, 'w', buffering=1, encoding='utf-8') k.append(stdout) def test_close_kept_alive_file(): stdout = k.pop() stdout.close() """ ) result = pytester.runpytest() result.stdout.fnmatch_lines(["*2 passed*"]) def test_tbstyle_native_setup_error(pytester: Pytester) -> None: pytester.makepyfile( """ import pytest @pytest.fixture def setup_error_fixture(): raise Exception("error in exception") def test_error_fixture(setup_error_fixture): pass """ ) result = pytester.runpytest("--tb=native") result.stdout.fnmatch_lines( ['*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*'] ) def test_terminal_summary(pytester: Pytester) -> None: pytester.makeconftest( """ def pytest_terminal_summary(terminalreporter, exitstatus): w = terminalreporter w.section("hello") w.line("world") w.line("exitstatus: {0}".format(exitstatus)) """ ) result = pytester.runpytest() result.stdout.fnmatch_lines( """ *==== hello ====* world exitstatus: 5 """ ) @pytest.mark.filterwarnings("default::UserWarning") def test_terminal_summary_warnings_are_displayed(pytester: Pytester) -> None: """Test that warnings emitted during pytest_terminal_summary are displayed. (#1305). """ pytester.makeconftest( """ import warnings def pytest_terminal_summary(terminalreporter): warnings.warn(UserWarning('internal warning')) """ ) pytester.makepyfile( """ def test_failure(): import warnings warnings.warn("warning_from_" + "test") assert 0 """ ) result = pytester.runpytest("-ra") result.stdout.fnmatch_lines( [ "*= warnings summary =*", "*warning_from_test*", "*= short test summary info =*", "*= warnings summary (final) =*", "*conftest.py:3:*internal warning", "*== 1 failed, 2 warnings in *", ] ) result.stdout.no_fnmatch_line("*None*") stdout = result.stdout.str() assert stdout.count("warning_from_test") == 1 assert stdout.count("=== warnings summary ") == 2 @pytest.mark.filterwarnings("default::UserWarning") def test_terminal_summary_warnings_header_once(pytester: Pytester) -> None: pytester.makepyfile( """ def test_failure(): import warnings warnings.warn("warning_from_" + "test") assert 0 """ ) result = pytester.runpytest("-ra") result.stdout.fnmatch_lines( [ "*= warnings summary =*", "*warning_from_test*", "*= short test summary info =*", "*== 1 failed, 1 warning in *", ] ) result.stdout.no_fnmatch_line("*None*") stdout = result.stdout.str() assert stdout.count("warning_from_test") == 1 assert stdout.count("=== warnings summary ") == 1 @pytest.mark.filterwarnings("default") def test_terminal_no_summary_warnings_header_once(pytester: Pytester) -> None: pytester.makepyfile( """ def test_failure(): import warnings warnings.warn("warning_from_" + "test") assert 0 """ ) result = pytester.runpytest("--no-summary") result.stdout.no_fnmatch_line("*= warnings summary =*") result.stdout.no_fnmatch_line("*= short test summary info =*") @pytest.fixture(scope="session") def tr() -> TerminalReporter: config = _pytest.config._prepareconfig([]) return TerminalReporter(config) @pytest.mark.parametrize( "exp_color, exp_line, stats_arg", [ # The method under test only cares about the length of each # dict value, not the actual contents, so tuples of anything # suffice # Important statuses -- the highest priority of these always wins ("red", [("1 failed", {"bold": True, "red": True})], {"failed": [1]}), ( "red", [ ("1 failed", {"bold": True, "red": True}), ("1 passed", {"bold": False, "green": True}), ], {"failed": [1], "passed": [1]}, ), ("red", [("1 error", {"bold": True, "red": True})], {"error": [1]}), ("red", [("2 errors", {"bold": True, "red": True})], {"error": [1, 2]}), ( "red", [ ("1 passed", {"bold": False, "green": True}), ("1 error", {"bold": True, "red": True}), ], {"error": [1], "passed": [1]}, ), # (a status that's not known to the code) ("yellow", [("1 weird", {"bold": True, "yellow": True})], {"weird": [1]}), ( "yellow", [ ("1 passed", {"bold": False, "green": True}), ("1 weird", {"bold": True, "yellow": True}), ], {"weird": [1], "passed": [1]}, ), ("yellow", [("1 warning", {"bold": True, "yellow": True})], {"warnings": [1]}), ( "yellow", [ ("1 passed", {"bold": False, "green": True}), ("1 warning", {"bold": True, "yellow": True}), ], {"warnings": [1], "passed": [1]}, ), ( "green", [("5 passed", {"bold": True, "green": True})], {"passed": [1, 2, 3, 4, 5]}, ), # "Boring" statuses. These have no effect on the color of the summary # line. Thus, if *every* test has a boring status, the summary line stays # at its default color, i.e. yellow, to warn the user that the test run # produced no useful information ("yellow", [("1 skipped", {"bold": True, "yellow": True})], {"skipped": [1]}), ( "green", [ ("1 passed", {"bold": True, "green": True}), ("1 skipped", {"bold": False, "yellow": True}), ], {"skipped": [1], "passed": [1]}, ), ( "yellow", [("1 deselected", {"bold": True, "yellow": True})], {"deselected": [1]}, ), ( "green", [ ("1 passed", {"bold": True, "green": True}), ("1 deselected", {"bold": False, "yellow": True}), ], {"deselected": [1], "passed": [1]}, ), ("yellow", [("1 xfailed", {"bold": True, "yellow": True})], {"xfailed": [1]}), ( "green", [ ("1 passed", {"bold": True, "green": True}), ("1 xfailed", {"bold": False, "yellow": True}), ], {"xfailed": [1], "passed": [1]}, ), ("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": [1]}), ( "yellow", [ ("1 passed", {"bold": False, "green": True}), ("1 xpassed", {"bold": True, "yellow": True}), ], {"xpassed": [1], "passed": [1]}, ), # Likewise if no tests were found at all ("yellow", [("no tests ran", {"yellow": True})], {}), # Test the empty-key special case ("yellow", [("no tests ran", {"yellow": True})], {"": [1]}), ( "green", [("1 passed", {"bold": True, "green": True})], {"": [1], "passed": [1]}, ), # A couple more complex combinations ( "red", [ ("1 failed", {"bold": True, "red": True}), ("2 passed", {"bold": False, "green": True}), ("3 xfailed", {"bold": False, "yellow": True}), ], {"passed": [1, 2], "failed": [1], "xfailed": [1, 2, 3]}, ), ( "green", [ ("1 passed", {"bold": True, "green": True}), ("2 skipped", {"bold": False, "yellow": True}), ("3 deselected", {"bold": False, "yellow": True}), ("2 xfailed", {"bold": False, "yellow": True}), ], { "passed": [1], "skipped": [1, 2], "deselected": [1, 2, 3], "xfailed": [1, 2], }, ), ], ) def test_summary_stats( tr: TerminalReporter, exp_line: list[tuple[str, dict[str, bool]]], exp_color: str, stats_arg: dict[str, list[object]], ) -> None: tr.stats = stats_arg # Fake "_is_last_item" to be True. class fake_session: testscollected = 0 tr._session = fake_session # type: ignore[assignment] assert tr._is_last_item # Reset cache. tr._main_color = None print(f"Based on stats: {stats_arg}") print(f'Expect summary: "{exp_line}"; with color "{exp_color}"') (line, color) = tr.build_summary_stats_line() print(f'Actually got: "{line}"; with color "{color}"') assert line == exp_line assert color == exp_color def test_skip_counting_towards_summary(tr): class DummyReport(BaseReport): count_towards_summary = True r1 = DummyReport() r2 = DummyReport() tr.stats = {"failed": (r1, r2)} tr._main_color = None res = tr.build_summary_stats_line() assert res == ([("2 failed", {"bold": True, "red": True})], "red") r1.count_towards_summary = False tr.stats = {"failed": (r1, r2)} tr._main_color = None res = tr.build_summary_stats_line() assert res == ([("1 failed", {"bold": True, "red": True})], "red")
TestGenericReporting
python
ray-project__ray
python/ray/runtime_context.py
{ "start": 450, "end": 19883 }
class ____(object): """A class used for getting runtime context.""" def __init__(self, worker): assert worker is not None self.worker = worker @Deprecated( message="Use get_xxx_id() methods to get relevant ids instead", warning=True ) def get(self) -> Dict[str, Any]: """Get a dictionary of the current context. Returns: dict: Dictionary of the current context. """ context = { "job_id": self.job_id, "node_id": self.node_id, "namespace": self.namespace, } if self.worker.mode == ray._private.worker.WORKER_MODE: if self.task_id is not None: context["task_id"] = self.task_id if self.actor_id is not None: context["actor_id"] = self.actor_id return context @property @Deprecated(message="Use get_job_id() instead", warning=True) def job_id(self): """Get current job ID for this worker or driver. Job ID is the id of your Ray drivers that create tasks or actors. Returns: If called by a driver, this returns the job ID. If called in a task, return the job ID of the associated driver. """ job_id = self.worker.current_job_id assert not job_id.is_nil() return job_id def get_job_id(self) -> str: """Get current job ID for this worker or driver. Job ID is the id of your Ray drivers that create tasks or actors. Returns: If called by a driver, this returns the job ID. If called in a task, return the job ID of the associated driver. The job ID will be hex format. Raises: AssertionError: If not called in a driver or worker. Generally, this means that ray.init() was not called. """ assert ( ray.is_initialized() ), "Job ID is not available because Ray has not been initialized." job_id = self.worker.current_job_id return job_id.hex() @property @Deprecated(message="Use get_node_id() instead", warning=True) def node_id(self): """Get the ID for the node that this process is running on. This can be called from within a driver, task, or actor. When called from a driver that is connected to a remote Ray cluster using Ray Client, this returns the ID of the head node. Returns: A node id for this worker or driver. """ node_id = self.worker.current_node_id assert not node_id.is_nil() return node_id def get_node_id(self) -> str: """Get the ID for the node that this process is running on. This can be called from within a driver, task, or actor. When called from a driver that is connected to a remote Ray cluster using Ray Client, this returns the ID of the head node. Returns: A node id in hex format for this worker or driver. Raises: AssertionError: If not called in a driver or worker. Generally, this means that ray.init() was not called. """ assert ( ray.is_initialized() ), "Node ID is not available because Ray has not been initialized." node_id = self.worker.current_node_id return node_id.hex() def get_worker_id(self) -> str: """Get current worker ID for this worker or driver process. Returns: A worker id in hex format for this worker or driver process. """ assert ( ray.is_initialized() ), "Worker ID is not available because Ray has not been initialized." return self.worker.worker_id.hex() @property @Deprecated(message="Use get_task_id() instead", warning=True) def task_id(self): """Get current task ID for this worker. Task ID is the id of a Ray task. This shouldn't be used in a driver process. Example: .. testcode:: import ray @ray.remote class Actor: def ready(self): return True @ray.remote def f(): return True # All the below code generates different task ids. # Task ids are available for actor creation. a = Actor.remote() # Task ids are available for actor tasks. a.ready.remote() # Task ids are available for normal tasks. f.remote() Returns: The current worker's task id. None if there's no task id. """ # only worker mode has task_id assert ( self.worker.mode == ray._private.worker.WORKER_MODE ), f"This method is only available when the process is a\ worker. Current mode: {self.worker.mode}" task_id = self._get_current_task_id() return task_id if not task_id.is_nil() else None def get_task_id(self) -> Optional[str]: """Get current task ID for this worker. Task ID is the id of a Ray task. The ID will be in hex format. This shouldn't be used in a driver process. Example: .. testcode:: import ray @ray.remote class Actor: def get_task_id(self): return ray.get_runtime_context().get_task_id() @ray.remote def get_task_id(): return ray.get_runtime_context().get_task_id() # All the below code generates different task ids. a = Actor.remote() # Task ids are available for actor tasks. print(ray.get(a.get_task_id.remote())) # Task ids are available for normal tasks. print(ray.get(get_task_id.remote())) .. testoutput:: :options: +MOCK 16310a0f0a45af5c2746a0e6efb235c0962896a201000000 c2668a65bda616c1ffffffffffffffffffffffff01000000 Returns: The current worker's task id in hex. None if there's no task id. """ # only worker mode has task_id if self.worker.mode != ray._private.worker.WORKER_MODE: logger.warning( "This method is only available when the process is a " f"worker. Current mode: {self.worker.mode}" ) return None task_id = self._get_current_task_id() return task_id.hex() if not task_id.is_nil() else None def _get_current_task_id(self) -> TaskID: return self.worker.current_task_id def get_task_name(self) -> Optional[str]: """Get current task name for this worker. Task name by default is the task's funciton call string. It can also be specified in options when triggering a task. Example: .. testcode:: import ray @ray.remote class Actor: def get_task_name(self): return ray.get_runtime_context().get_task_name() @ray.remote class AsyncActor: async def get_task_name(self): return ray.get_runtime_context().get_task_name() @ray.remote def get_task_name(): return ray.get_runtime_context().get_task_name() a = Actor.remote() b = AsyncActor.remote() # Task names are available for actor tasks. print(ray.get(a.get_task_name.remote())) # Task names are avaiable for async actor tasks. print(ray.get(b.get_task_name.remote())) # Task names are available for normal tasks. # Get default task name print(ray.get(get_task_name.remote())) # Get specified task name print(ray.get(get_task_name.options(name="task_name").remote())) .. testoutput:: :options: +MOCK Actor.get_task_name AsyncActor.get_task_name get_task_name task_nams Returns: The current worker's task name """ # only worker mode has task_name if self.worker.mode != ray._private.worker.WORKER_MODE: logger.warning( "This method is only available when the process is a " f"worker. Current mode: {self.worker.mode}" ) return None return self.worker.current_task_name def get_task_function_name(self) -> Optional[str]: """Get current task function name string for this worker. Example: .. testcode:: import ray @ray.remote class Actor: def get_task_function_name(self): return ray.get_runtime_context().get_task_function_name() @ray.remote class AsyncActor: async def get_task_function_name(self): return ray.get_runtime_context().get_task_function_name() @ray.remote def get_task_function_name(): return ray.get_runtime_context().get_task_function_name() a = Actor.remote() b = AsyncActor.remote() # Task functions are available for actor tasks. print(ray.get(a.get_task_function_name.remote())) # Task functions are available for async actor tasks. print(ray.get(b.get_task_function_name.remote())) # Task functions are available for normal tasks. print(ray.get(get_task_function_name.remote())) .. testoutput:: :options: +MOCK [python modual name].Actor.get_task_function_name [python modual name].AsyncActor.get_task_function_name [python modual name].get_task_function_name Returns: The current worker's task function call string """ # only worker mode has task_function_name if self.worker.mode != ray._private.worker.WORKER_MODE: logger.warning( "This method is only available when the process is a " f"worker. Current mode: {self.worker.mode}" ) return None return self.worker.current_task_function_name @property @Deprecated(message="Use get_actor_id() instead", warning=True) def actor_id(self): """Get the current actor ID in this worker. ID of the actor of the current process. This shouldn't be used in a driver process. Returns: The current actor id in this worker. None if there's no actor id. """ # only worker mode has actor_id assert ( self.worker.mode == ray._private.worker.WORKER_MODE ), f"This method is only available when the process is a\ worker. Current mode: {self.worker.mode}" actor_id = self.worker.actor_id return actor_id if not actor_id.is_nil() else None def get_actor_id(self) -> Optional[str]: """Get the current actor ID in this worker. ID of the actor of the current process. This shouldn't be used in a driver process. The ID will be in hex format. Returns: The current actor id in hex format in this worker. None if there's no actor id. """ # only worker mode has actor_id if self.worker.mode != ray._private.worker.WORKER_MODE: logger.debug( "This method is only available when the process is a " f"worker. Current mode: {self.worker.mode}" ) return None actor_id = self.worker.actor_id return actor_id.hex() if not actor_id.is_nil() else None def get_actor_name(self) -> Optional[str]: """Get the current actor name of this worker. This shouldn't be used in a driver process. The name is in string format. Returns: The current actor name of this worker. If a current worker is an actor, and if actor name doesn't exist, it returns an empty string. If a current worker is not an actor, it returns None. """ # only worker mode has actor_id if self.worker.mode != ray._private.worker.WORKER_MODE: logger.warning( "This method is only available when the process is a " f"worker. Current mode: {self.worker.mode}" ) return None actor_id = self.worker.actor_id return self.worker.actor_name if not actor_id.is_nil() else None @property def namespace(self): """Get the current namespace of this worker. Returns: The current namespace of this worker. """ return self.worker.namespace @property def was_current_actor_reconstructed(self): """Check whether this actor has been restarted. Returns: Whether this actor has been ever restarted. """ assert ( not self.actor_id.is_nil() ), "This method should't be called inside Ray tasks." actor_info = actors(actor_id=self.actor_id.hex()) return actor_info and actor_info["NumRestarts"] != 0 @property @Deprecated(message="Use get_placement_group_id() instead", warning=True) def current_placement_group_id(self): """Get the current Placement group ID of this worker. Returns: The current placement group id of this worker. """ return self.worker.placement_group_id def get_placement_group_id(self) -> Optional[str]: """Get the current Placement group ID of this worker. Returns: The current placement group id in hex format of this worker. """ pg_id = self.worker.placement_group_id return pg_id.hex() if not pg_id.is_nil() else None @property def should_capture_child_tasks_in_placement_group(self): """Get if the current task should capture parent's placement group. This returns True if it is called inside a driver. Returns: Return True if the current task should implicitly capture the parent placement group. """ return self.worker.should_capture_child_tasks_in_placement_group def get_assigned_resources(self): """Get the assigned resources to this worker. By default for tasks, this will return {"CPU": 1}. By default for actors, this will return {}. This is because actors do not have CPUs assigned to them by default. Returns: A dictionary mapping the name of a resource to a float, where the float represents the amount of that resource reserved for this worker. """ assert ( self.worker.mode == ray._private.worker.WORKER_MODE ), f"This method is only available when the process is a\ worker. Current mode: {self.worker.mode}" self.worker.check_connected() resource_id_map = self.worker.core_worker.resource_ids() resource_map = { res: sum(amt for _, amt in mapping) for res, mapping in resource_id_map.items() } result = parse_pg_formatted_resources_to_original(resource_map) return result def get_runtime_env_string(self): """Get the runtime env string used for the current driver or worker. Returns: The runtime env string currently using by this worker. """ return self.worker.runtime_env @property def runtime_env(self): """Get the runtime env used for the current driver or worker. Returns: The runtime env currently using by this worker. The type of return value is ray.runtime_env.RuntimeEnv. """ return RuntimeEnv.deserialize(self.get_runtime_env_string()) @property def current_actor(self): """Get the current actor handle of this actor itself. Returns: The handle of current actor. """ worker = self.worker worker.check_connected() actor_id = worker.actor_id if actor_id.is_nil(): raise RuntimeError("This method is only available in an actor.") return worker.core_worker.get_actor_handle(actor_id) @property def gcs_address(self): """Get the GCS address of the ray cluster. Returns: The GCS address of the cluster. """ self.worker.check_connected() return self.worker.gcs_client.address @Deprecated(message="Use get_accelerator_ids() instead", warning=True) def get_resource_ids(self) -> Dict[str, List[str]]: return self.get_accelerator_ids() def get_accelerator_ids(self) -> Dict[str, List[str]]: """ Get the current worker's visible accelerator ids. Returns: A dictionary keyed by the accelerator resource name. The values are a list of ids `{'GPU': ['0', '1'], 'neuron_cores': ['0', '1'], 'TPU': ['0', '1']}`. """ worker = self.worker worker.check_connected() ids_dict: Dict[str, List[str]] = {} for ( accelerator_resource_name ) in ray._private.accelerators.get_all_accelerator_resource_names(): accelerator_ids = worker.get_accelerator_ids_for_accelerator_resource( accelerator_resource_name, f"^{accelerator_resource_name}_group_[0-9A-Za-z]+$", ) ids_dict[accelerator_resource_name] = [str(id) for id in accelerator_ids] return ids_dict def get_node_labels(self) -> Dict[str, List[str]]: """ Get the node labels of the current worker. Returns: A dictionary of label key-value pairs. """ worker = self.worker worker.check_connected() return worker.current_node_labels _runtime_context = None _runtime_context_lock = threading.Lock() @PublicAPI @client_mode_hook def get_runtime_context() -> RuntimeContext: """Get the runtime context of the current driver/worker. The obtained runtime context can be used to get the metadata of the current driver, task, or actor. Example: .. testcode:: import ray # Get the job id. ray.get_runtime_context().get_job_id() # Get the actor id. ray.get_runtime_context().get_actor_id() # Get the task id. ray.get_runtime_context().get_task_id() """ with _runtime_context_lock: global _runtime_context if _runtime_context is None: _runtime_context = RuntimeContext(ray._private.worker.global_worker) return _runtime_context
RuntimeContext
python
Textualize__textual
src/textual/messages.py
{ "start": 2748, "end": 3834 }
class ____(Message): """Reports if the in-band window resize protocol is supported. https://gist.github.com/rockorager/e695fb2924d36b2bcf1fff4a3704bd83""" def __init__(self, supported: bool, enabled: bool) -> None: """Initialize message. Args: supported: Is the protocol supported? enabled: Is the protocol enabled. """ self.supported = supported self.enabled = enabled super().__init__() def __rich_repr__(self) -> rich.repr.Result: yield "supported", self.supported yield "enabled", self.enabled @classmethod def from_setting_parameter(cls, setting_parameter: int) -> InBandWindowResize: """Construct the message from the setting parameter. Args: setting_parameter: Setting parameter from stdin. Returns: New InBandWindowResize instance. """ supported = setting_parameter not in (0, 4) enabled = setting_parameter in (1, 3) return InBandWindowResize(supported, enabled)
InBandWindowResize
python
tensorflow__tensorflow
tensorflow/python/ops/math_grad_test.py
{ "start": 25993, "end": 26620 }
class ____(test.TestCase): def test_zero_grad_tf_gradients(self): if context.executing_eagerly(): self.skipTest("tf.gradients not supported in eager.") x = constant_op.constant([-1., 0., 1.]) g = self.evaluate(gradients.gradients(math_ops.pow(x, 2), x)[0]) self.assertAllClose([-2., 0., 2.], g) def test_zero_grad_tape(self): x = constant_op.constant([-1, 0., 1.]) with backprop.GradientTape() as tape: tape.watch(x) g = tape.gradient(math_ops.pow(x, 2), x) g = self.evaluate(g) self.assertAllClose([-2., 0., 2.], g) @test_util.run_all_in_graph_and_eager_modes
PowGradTest
python
huggingface__transformers
src/transformers/models/chinese_clip/modeling_chinese_clip.py
{ "start": 7040, "end": 11854 }
class ____(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})." ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Copied from transformers.models.align.modeling_align.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.align.modeling_align.AlignTextSelfAttention with Align->ChineseCLIP
ChineseCLIPVisionEmbeddings
python
pytorch__pytorch
test/test_overrides.py
{ "start": 50183, "end": 50790 }
class ____(TestCase): # Regression test for gh-64687 def test_parameter_does_not_prevent_dispatch(self): class MyTensor: @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): return "called" t1 = MyTensor() t2 = torch.nn.Parameter(torch.rand(2, 2)) self.assertEqual(torch.add(t2, t1), "called") inp = torch.rand(10, 10) self.assertEqual(torch.nn.functional.linear(inp, t1, t2), "called") self.assertEqual(torch.nn.functional.linear(inp, t2, t1), "called")
TestDisabledTorchFunction
python
huggingface__transformers
tests/models/vivit/test_modeling_vivit.py
{ "start": 1514, "end": 5736 }
class ____: def __init__( self, parent, batch_size=2, is_training=True, use_labels=True, num_labels=10, image_size=10, num_frames=8, # decreased, because default 32 takes too much RAM at inference tubelet_size=[2, 4, 4], num_channels=3, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu_fast", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, scope=None, attn_implementation="eager", mask_ratio=0.5, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.image_size = image_size self.num_frames = num_frames self.tubelet_size = tubelet_size self.num_channels = num_channels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.scope = scope self.attn_implementation = attn_implementation self.seq_length = ( (self.image_size // self.tubelet_size[2]) * (self.image_size // self.tubelet_size[1]) * (self.num_frames // self.tubelet_size[0]) ) + 1 # CLS token self.mask_ratio = mask_ratio self.num_masks = int(mask_ratio * self.seq_length) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): config = VivitConfig( num_frames=self.num_frames, image_size=self.image_size, tubelet_size=self.tubelet_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, qkv_bias=self.qkv_bias, attn_implementation=self.attn_implementation, ) config.num_labels = self.num_labels return config def create_and_check_model(self, config, pixel_values, labels): model = VivitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_video_classification(self, config, pixel_values, labels): model = VivitForVideoClassification(config) model.to(torch_device) model.eval() result = model(pixel_values) # verify the logits shape expected_shape = torch.Size((self.batch_size, self.num_labels)) self.parent.assertEqual(result.logits.shape, expected_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch
VivitModelTester
python
readthedocs__readthedocs.org
readthedocs/api/v3/serializers.py
{ "start": 31396, "end": 32058 }
class ____(BaseLinksSerializer): _self = serializers.SerializerMethodField() project = serializers.SerializerMethodField() def get__self(self, obj): path = reverse( "projects-redirects-detail", kwargs={ "parent_lookup_project__slug": obj.project.slug, "redirect_pk": obj.pk, }, ) return self._absolute_url(path) def get_project(self, obj): path = reverse( "projects-detail", kwargs={ "project_slug": obj.project.slug, }, ) return self._absolute_url(path)
RedirectLinksSerializer
python
mozilla__bleach
bleach/_vendor/html5lib/treebuilders/base.py
{ "start": 4821, "end": 14565 }
class ____(object): """Base treebuilder implementation * documentClass - the class to use for the bottommost node of a document * elementClass - the class to use for HTML Elements * commentClass - the class to use for comments * doctypeClass - the class to use for doctypes """ # pylint:disable=not-callable # Document class documentClass = None # The class to use for creating a node elementClass = None # The class to use for creating comments commentClass = None # The class to use for creating doctypes doctypeClass = None # Fragment class fragmentClass = None def __init__(self, namespaceHTMLElements): """Create a TreeBuilder :arg namespaceHTMLElements: whether or not to namespace HTML elements """ if namespaceHTMLElements: self.defaultNamespace = "http://www.w3.org/1999/xhtml" else: self.defaultNamespace = None self.reset() def reset(self): self.openElements = [] self.activeFormattingElements = ActiveFormattingElements() # XXX - rename these to headElement, formElement self.headPointer = None self.formPointer = None self.insertFromTable = False self.document = self.documentClass() def elementInScope(self, target, variant=None): # If we pass a node in we match that. if we pass a string # match any node with that name exactNode = hasattr(target, "nameTuple") if not exactNode: if isinstance(target, text_type): target = (namespaces["html"], target) assert isinstance(target, tuple) listElements, invert = listElementsMap[variant] for node in reversed(self.openElements): if exactNode and node == target: return True elif not exactNode and node.nameTuple == target: return True elif (invert ^ (node.nameTuple in listElements)): return False assert False # We should never reach this point def reconstructActiveFormattingElements(self): # Within this algorithm the order of steps described in the # specification is not quite the same as the order of steps in the # code. It should still do the same though. # Step 1: stop the algorithm when there's nothing to do. if not self.activeFormattingElements: return # Step 2 and step 3: we start with the last element. So i is -1. i = len(self.activeFormattingElements) - 1 entry = self.activeFormattingElements[i] if entry == Marker or entry in self.openElements: return # Step 6 while entry != Marker and entry not in self.openElements: if i == 0: # This will be reset to 0 below i = -1 break i -= 1 # Step 5: let entry be one earlier in the list. entry = self.activeFormattingElements[i] while True: # Step 7 i += 1 # Step 8 entry = self.activeFormattingElements[i] clone = entry.cloneNode() # Mainly to get a new copy of the attributes # Step 9 element = self.insertElement({"type": "StartTag", "name": clone.name, "namespace": clone.namespace, "data": clone.attributes}) # Step 10 self.activeFormattingElements[i] = element # Step 11 if element == self.activeFormattingElements[-1]: break def clearActiveFormattingElements(self): entry = self.activeFormattingElements.pop() while self.activeFormattingElements and entry != Marker: entry = self.activeFormattingElements.pop() def elementInActiveFormattingElements(self, name): """Check if an element exists between the end of the active formatting elements and the last marker. If it does, return it, else return false""" for item in self.activeFormattingElements[::-1]: # Check for Marker first because if it's a Marker it doesn't have a # name attribute. if item == Marker: break elif item.name == name: return item return False def insertRoot(self, token): element = self.createElement(token) self.openElements.append(element) self.document.appendChild(element) def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] doctype = self.doctypeClass(name, publicId, systemId) self.document.appendChild(doctype) def insertComment(self, token, parent=None): if parent is None: parent = self.openElements[-1] parent.appendChild(self.commentClass(token["data"])) def createElement(self, token): """Create an element but don't insert it anywhere""" name = token["name"] namespace = token.get("namespace", self.defaultNamespace) element = self.elementClass(name, namespace) element.attributes = token["data"] return element def _getInsertFromTable(self): return self._insertFromTable def _setInsertFromTable(self, value): """Switch the function used to insert an element from the normal one to the misnested table one and back again""" self._insertFromTable = value if value: self.insertElement = self.insertElementTable else: self.insertElement = self.insertElementNormal insertFromTable = property(_getInsertFromTable, _setInsertFromTable) def insertElementNormal(self, token): name = token["name"] assert isinstance(name, text_type), "Element %s not unicode" % name namespace = token.get("namespace", self.defaultNamespace) element = self.elementClass(name, namespace) element.attributes = token["data"] self.openElements[-1].appendChild(element) self.openElements.append(element) return element def insertElementTable(self, token): """Create an element and insert it into the tree""" element = self.createElement(token) if self.openElements[-1].name not in tableInsertModeElements: return self.insertElementNormal(token) else: # We should be in the InTable mode. This means we want to do # special magic element rearranging parent, insertBefore = self.getTableMisnestedNodePosition() if insertBefore is None: parent.appendChild(element) else: parent.insertBefore(element, insertBefore) self.openElements.append(element) return element def insertText(self, data, parent=None): """Insert text data.""" if parent is None: parent = self.openElements[-1] if (not self.insertFromTable or (self.insertFromTable and self.openElements[-1].name not in tableInsertModeElements)): parent.insertText(data) else: # We should be in the InTable mode. This means we want to do # special magic element rearranging parent, insertBefore = self.getTableMisnestedNodePosition() parent.insertText(data, insertBefore) def getTableMisnestedNodePosition(self): """Get the foster parent element, and sibling to insert before (or None) when inserting a misnested table node""" # The foster parent element is the one which comes before the most # recently opened table element # XXX - this is really inelegant lastTable = None fosterParent = None insertBefore = None for elm in self.openElements[::-1]: if elm.name == "table": lastTable = elm break if lastTable: # XXX - we should really check that this parent is actually a # node here if lastTable.parent: fosterParent = lastTable.parent insertBefore = lastTable else: fosterParent = self.openElements[ self.openElements.index(lastTable) - 1] else: fosterParent = self.openElements[0] return fosterParent, insertBefore def generateImpliedEndTags(self, exclude=None): name = self.openElements[-1].name # XXX td, th and tr are not actually needed if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and name != exclude): self.openElements.pop() # XXX This is not entirely what the specification says. We should # investigate it more closely. self.generateImpliedEndTags(exclude) def getDocument(self): """Return the final tree""" return self.document def getFragment(self): """Return the final fragment""" # assert self.innerHTML fragment = self.fragmentClass() self.openElements[0].reparentChildren(fragment) return fragment def testSerializer(self, node): """Serialize the subtree of node in the format required by unit tests :arg node: the node from which to start serializing """ raise NotImplementedError
TreeBuilder
python
getsentry__sentry
tests/sentry/notifications/notifications/organization_request/test_integration_request.py
{ "start": 233, "end": 2452 }
class ____(TestCase): def test_get_context(self) -> None: owner = self.create_user("owner@example.com") org = self.create_organization(owner=owner) requester = self.create_user() self.create_member(user=requester, organization=org) message = "hello" notification = IntegrationRequestNotification( org, requester, provider_type="first_party", provider_slug="slack", provider_name="Slack", message=message, ) context = notification.get_context() assert context["requester_name"] == requester.get_display_name() assert context["organization_name"] == org.name assert context["message"] == message def test_determine_recipients(self) -> None: owner = self.create_user("owner@example.com") org = self.create_organization(owner=owner) requester = self.create_user() self.create_member(user=requester, organization=org) message = "hello" notification = IntegrationRequestNotification( org, requester, provider_type="first_party", provider_slug="slack", provider_name="Slack", message=message, ) recipients = notification.determine_recipients() assert len(recipients) == 1 assert recipients[0].id == owner.id @with_feature("system:multi-region") def test_get_context_customer_domain(self) -> None: owner = self.create_user("owner@example.com") org = self.create_organization(owner=owner) requester = self.create_user() self.create_member(user=requester, organization=org) message = "hello" notification = IntegrationRequestNotification( org, requester, provider_type="first_party", provider_slug="slack", provider_name="Slack", message=message, ) context = notification.get_context() assert ( org.absolute_url(f"/settings/{org.slug}/integrations/slack/") in context["integration_link"] )
TestIntegrationRequestNotification
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/testing/suite/test_dialect.py
{ "start": 4532, "end": 8672 }
class ____(fixtures.TestBase): __backend__ = True __requires__ = ("isolation_level",) def _get_non_default_isolation_level(self): levels = requirements.get_isolation_levels(config) default = levels["default"] supported = levels["supported"] s = set(supported).difference(["AUTOCOMMIT", default]) if s: return s.pop() else: config.skip_test("no non-default isolation level available") def test_default_isolation_level(self): eq_( config.db.dialect.default_isolation_level, requirements.get_isolation_levels(config)["default"], ) def test_non_default_isolation_level(self): non_default = self._get_non_default_isolation_level() with config.db.connect() as conn: existing = conn.get_isolation_level() ne_(existing, non_default) conn.execution_options(isolation_level=non_default) eq_(conn.get_isolation_level(), non_default) conn.dialect.reset_isolation_level( conn.connection.dbapi_connection ) eq_(conn.get_isolation_level(), existing) def test_all_levels(self): levels = requirements.get_isolation_levels(config) all_levels = levels["supported"] for level in set(all_levels).difference(["AUTOCOMMIT"]): with config.db.connect() as conn: conn.execution_options(isolation_level=level) eq_(conn.get_isolation_level(), level) trans = conn.begin() trans.rollback() eq_(conn.get_isolation_level(), level) with config.db.connect() as conn: eq_( conn.get_isolation_level(), levels["default"], ) @testing.requires.get_isolation_level_values def test_invalid_level_execution_option(self, connection_no_trans): """test for the new get_isolation_level_values() method""" connection = connection_no_trans with expect_raises_message( exc.ArgumentError, "Invalid value '%s' for isolation_level. " "Valid isolation levels for '%s' are %s" % ( "FOO", connection.dialect.name, ", ".join( requirements.get_isolation_levels(config)["supported"] ), ), ): connection.execution_options(isolation_level="FOO") @testing.requires.get_isolation_level_values @testing.requires.dialect_level_isolation_level_param def test_invalid_level_engine_param(self, testing_engine): """test for the new get_isolation_level_values() method and support for the dialect-level 'isolation_level' parameter. """ eng = testing_engine(options=dict(isolation_level="FOO")) with expect_raises_message( exc.ArgumentError, "Invalid value '%s' for isolation_level. " "Valid isolation levels for '%s' are %s" % ( "FOO", eng.dialect.name, ", ".join( requirements.get_isolation_levels(config)["supported"] ), ), ): eng.connect() @testing.requires.independent_readonly_connections def test_dialect_user_setting_is_restored(self, testing_engine): levels = requirements.get_isolation_levels(config) default = levels["default"] supported = ( sorted( set(levels["supported"]).difference([default, "AUTOCOMMIT"]) ) )[0] e = testing_engine(options={"isolation_level": supported}) with e.connect() as conn: eq_(conn.get_isolation_level(), supported) with e.connect() as conn: conn.execution_options(isolation_level=default) eq_(conn.get_isolation_level(), default) with e.connect() as conn: eq_(conn.get_isolation_level(), supported)
IsolationLevelTest
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_decorators_2.py
{ "start": 246, "end": 289 }
class ____: x: pandas.DataFrame @frozen
B
python
pytorch__pytorch
torch/_dynamo/variables/user_defined.py
{ "start": 37007, "end": 37685 }
class ____: pass def call_random_fn(tx, fn, args, kwargs): from .builder import VariableBuilder args = [x.as_python_constant() for x in args] kwargs = {k: v.as_python_constant() for k, v in kwargs.items()} random_call_index = len(tx.output.random_calls) example_value = fn(*args, **kwargs) source = RandomValueSource(random_call_index) tx.output.random_calls.append((fn, args, kwargs)) # TODO: arguably, this should route to wrap_symint/wrap_symfloat # (currently hypothetical), but I'm not going to poke my hand in # this nest for now return VariableBuilder(tx, source).wrap_unspecialized_primitive(example_value)
NO_SUCH_SUBOBJ
python
walkccc__LeetCode
solutions/3412. Find Mirror Score of a String/3412.py
{ "start": 0, "end": 347 }
class ____: def calculateScore(self, s: str) -> int: ans = 0 indices = [[] for _ in range(26)] for i, c in enumerate(s): index = ord(c) - ord('a') oppositeIndex = 25 - index if indices[oppositeIndex]: ans += i - indices[oppositeIndex].pop() else: indices[index].append(i) return ans
Solution
python
huggingface__transformers
src/transformers/models/xlm_roberta/modeling_xlm_roberta.py
{ "start": 47495, "end": 50356 }
class ____(XLMRobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.roberta = XLMRobertaModel(config, add_pooling_layer=False) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring
XLMRobertaForTokenClassification
python
urllib3__urllib3
src/urllib3/poolmanager.py
{ "start": 18453, "end": 23811 }
class ____(PoolManager): """ Behaves just like :class:`PoolManager`, but sends all requests through the defined proxy, using the CONNECT method for HTTPS URLs. :param proxy_url: The URL of the proxy to be used. :param proxy_headers: A dictionary containing headers that will be sent to the proxy. In case of HTTP they are being sent with each request, while in the HTTPS/CONNECT case they are sent only once. Could be used for proxy authentication. :param proxy_ssl_context: The proxy SSL context is used to establish the TLS connection to the proxy when using HTTPS proxies. :param use_forwarding_for_https: (Defaults to False) If set to True will forward requests to the HTTPS proxy to be made on behalf of the client instead of creating a TLS tunnel via the CONNECT method. **Enabling this flag means that request and response headers and content will be visible from the HTTPS proxy** whereas tunneling keeps request and response headers and content private. IP address, target hostname, SNI, and port are always visible to an HTTPS proxy even when this flag is disabled. :param proxy_assert_hostname: The hostname of the certificate to verify against. :param proxy_assert_fingerprint: The fingerprint of the certificate to verify against. Example: .. code-block:: python import urllib3 proxy = urllib3.ProxyManager("https://localhost:3128/") resp1 = proxy.request("GET", "https://google.com/") resp2 = proxy.request("GET", "https://httpbin.org/") print(len(proxy.pools)) # 1 resp3 = proxy.request("GET", "https://httpbin.org/") resp4 = proxy.request("GET", "https://twitter.com/") print(len(proxy.pools)) # 3 """ def __init__( self, proxy_url: str, num_pools: int = 10, headers: typing.Mapping[str, str] | None = None, proxy_headers: typing.Mapping[str, str] | None = None, proxy_ssl_context: ssl.SSLContext | None = None, use_forwarding_for_https: bool = False, proxy_assert_hostname: None | str | typing.Literal[False] = None, proxy_assert_fingerprint: str | None = None, **connection_pool_kw: typing.Any, ) -> None: if isinstance(proxy_url, HTTPConnectionPool): str_proxy_url = f"{proxy_url.scheme}://{proxy_url.host}:{proxy_url.port}" else: str_proxy_url = proxy_url proxy = parse_url(str_proxy_url) if proxy.scheme not in ("http", "https"): raise ProxySchemeUnknown(proxy.scheme) if not proxy.port: port = port_by_scheme.get(proxy.scheme, 80) proxy = proxy._replace(port=port) self.proxy = proxy self.proxy_headers = proxy_headers or {} self.proxy_ssl_context = proxy_ssl_context self.proxy_config = ProxyConfig( proxy_ssl_context, use_forwarding_for_https, proxy_assert_hostname, proxy_assert_fingerprint, ) connection_pool_kw["_proxy"] = self.proxy connection_pool_kw["_proxy_headers"] = self.proxy_headers connection_pool_kw["_proxy_config"] = self.proxy_config super().__init__(num_pools, headers, **connection_pool_kw) def connection_from_host( self, host: str | None, port: int | None = None, scheme: str | None = "http", pool_kwargs: dict[str, typing.Any] | None = None, ) -> HTTPConnectionPool: if scheme == "https": return super().connection_from_host( host, port, scheme, pool_kwargs=pool_kwargs ) return super().connection_from_host( self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs # type: ignore[union-attr] ) def _set_proxy_headers( self, url: str, headers: typing.Mapping[str, str] | None = None ) -> typing.Mapping[str, str]: """ Sets headers needed by proxies: specifically, the Accept and Host headers. Only sets headers not provided by the user. """ headers_ = {"Accept": "*/*"} netloc = parse_url(url).netloc if netloc: headers_["Host"] = netloc if headers: headers_.update(headers) return headers_ def urlopen( # type: ignore[override] self, method: str, url: str, redirect: bool = True, **kw: typing.Any ) -> BaseHTTPResponse: "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." u = parse_url(url) if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme): # For connections using HTTP CONNECT, httplib sets the necessary # headers on the CONNECT to the proxy. If we're not using CONNECT, # we'll definitely need to set 'Host' at the very least. headers = kw.get("headers", self.headers) kw["headers"] = self._set_proxy_headers(url, headers) return super().urlopen(method, url, redirect=redirect, **kw) def proxy_from_url(url: str, **kw: typing.Any) -> ProxyManager: return ProxyManager(proxy_url=url, **kw)
ProxyManager
python
pytorch__pytorch
test/distributed/fsdp/test_fsdp_freezing_weights.py
{ "start": 3324, "end": 3426 }
class ____(str, Enum): GradToNone = "grad_to_none" RequiresGrad = "requires_grad"
FreezingMethod
python
rq__rq
tests/test_worker.py
{ "start": 62437, "end": 63838 }
class ____(RQTestCase): def setUp(self): super().setUp() db_num = self.connection.connection_pool.connection_kwargs['db'] self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num def test_run_empty_queue(self): """Run the worker in its own process with an empty queue""" subprocess.check_call(['rqworker', '-u', self.redis_url, '-b']) def test_run_access_self(self): """Schedule a job, then run the worker as subprocess""" q = Queue(connection=self.connection) job = q.enqueue(access_self) subprocess.check_call(['rqworker', '-u', self.redis_url, '-b']) registry = FinishedJobRegistry(queue=q) self.assertIn(job, registry) assert q.count == 0 @skipIf('pypy' in sys.version.lower(), 'often times out with pypy') def test_run_scheduled_access_self(self): """Schedule a job that schedules a job, then run the worker as subprocess""" q = Queue(connection=self.connection) job = q.enqueue(schedule_access_self) subprocess.check_call(['rqworker', '-u', self.redis_url, '-b']) registry = FinishedJobRegistry(queue=q) self.assertIn(job, registry) assert q.count == 0 @pytest.mark.skipif(sys.platform == 'darwin', reason='requires Linux signals') @skipIf('pypy' in sys.version.lower(), 'these tests often fail on pypy')
TestWorkerSubprocess
python
doocs__leetcode
solution/2300-2399/2370.Longest Ideal Subsequence/Solution.py
{ "start": 0, "end": 441 }
class ____: def longestIdealString(self, s: str, k: int) -> int: n = len(s) ans = 1 dp = [1] * n d = {s[0]: 0} for i in range(1, n): a = ord(s[i]) for b in ascii_lowercase: if abs(a - ord(b)) > k: continue if b in d: dp[i] = max(dp[i], dp[d[b]] + 1) d[s[i]] = i return max(dp)
Solution
python
django__django
django/core/exceptions.py
{ "start": 1184, "end": 1361 }
class ____(SuspiciousOperation): """ The number of fields in a GET or POST request exceeded settings.DATA_UPLOAD_MAX_NUMBER_FIELDS. """ pass
TooManyFieldsSent
python
Textualize__textual
docs/examples/guide/workers/weather01.py
{ "start": 174, "end": 1262 }
class ____(App): """App to display the current weather.""" CSS_PATH = "weather.tcss" def compose(self) -> ComposeResult: yield Input(placeholder="Enter a City") with VerticalScroll(id="weather-container"): yield Static(id="weather") async def on_input_changed(self, message: Input.Changed) -> None: """Called when the input changes""" await self.update_weather(message.value) async def update_weather(self, city: str) -> None: """Update the weather for the given city.""" weather_widget = self.query_one("#weather", Static) if city: # Query the network API url = f"https://wttr.in/{city}" async with httpx.AsyncClient() as client: response = await client.get(url) weather = Text.from_ansi(response.text) weather_widget.update(weather) else: # No city, so just blank out the weather weather_widget.update("") if __name__ == "__main__": app = WeatherApp() app.run()
WeatherApp
python
django__django
tests/postgres_tests/__init__.py
{ "start": 1391, "end": 1470 }
class ____(WidgetTest, PostgreSQLSimpleTestCase): pass
PostgreSQLWidgetTestCase
python
huggingface__transformers
src/transformers/models/t5gemma/configuration_t5gemma.py
{ "start": 1336, "end": 10208 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`T5GemmaModuleModel`]. It is used to instantiate an T5GemmaModule model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the T5GemmaModule-7B. e.g. [google/t5_gemma_module-7b](https://huggingface.co/google/t5_gemma_module-7b) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the T5GemmaModule model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`T5GemmaModuleModel`] hidden_size (`int`, *optional*, defaults to 2304): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 9216): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 26): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 4): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 256): The attention head dimension. hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. query_pre_attn_scalar (`float`, *optional*, defaults to 256): scaling factor used on the attention scores sliding_window (`int`, *optional*, defaults to 4096): in T5GemmaModule, every other layer uses sliding window attention. This is the size of the sliding window. layer_types (`list`, *optional*): Attention pattern for each layer. final_logit_softcapping (`float`, *optional*, defaults to 30.0): scaling factor when applying tanh softcapping on the logits. attn_logit_softcapping (`float`, *optional*, defaults to 50.0): scaling factor when applying tanh softcapping on the attention scores. ```python >>> from transformers import T5GemmaModuleModel, T5GemmaModuleConfig >>> # Initializing a T5GemmaModule t5_gemma_module-7b style configuration >>> configuration = T5GemmaModuleConfig() >>> # Initializing a model from the t5_gemma_module-7b style configuration >>> model = T5GemmaModuleModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "t5_gemma_module" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: Optional[int] = 256000, hidden_size: Optional[int] = 2304, intermediate_size: Optional[int] = 9216, num_hidden_layers: Optional[int] = 26, num_attention_heads: Optional[int] = 8, num_key_value_heads: Optional[int] = 4, head_dim: Optional[int] = 256, hidden_activation: Optional[str] = "gelu_pytorch_tanh", max_position_embeddings: Optional[int] = 8192, initializer_range: Optional[float] = 0.02, rms_norm_eps: Optional[int] = 1e-6, use_cache: Optional[bool] = True, pad_token_id: Optional[int] = 0, eos_token_id: Optional[int] = 1, bos_token_id: Optional[int] = 2, tie_word_embeddings: Optional[bool] = True, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, attention_bias: Optional[bool] = False, attention_dropout: Optional[float] = 0.0, query_pre_attn_scalar: Optional[int] = 256, sliding_window: Optional[int] = 4096, layer_types: Optional[list[str]] = None, final_logit_softcapping: Optional[float] = 30.0, attn_logit_softcapping: Optional[float] = 50.0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.hidden_activation = hidden_activation self.query_pre_attn_scalar = query_pre_attn_scalar self.sliding_window = sliding_window self.final_logit_softcapping = final_logit_softcapping self.attn_logit_softcapping = attn_logit_softcapping self.layer_types = layer_types if self.layer_types is None: self.layer_types = [ "sliding_attention" if bool((i + 1) % 2) else "full_attention" for i in range(self.num_hidden_layers) ] layer_type_validation(self.layer_types, self.num_hidden_layers) self.rope_parameters = rope_parameters super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, )
T5GemmaModuleConfig
python
django__django
tests/model_fields/test_durationfield.py
{ "start": 2704, "end": 2945 }
class ____(SimpleTestCase): # Tests for forms.DurationField are in the forms_tests app. def test_formfield(self): field = models.DurationField() self.assertIsInstance(field.formfield(), forms.DurationField)
TestFormField
python
tensorflow__tensorflow
tensorflow/python/distribute/distribute_coordinator.py
{ "start": 3336, "end": 34550 }
class ____(object): """The worker context class. This context object provides configuration information for each task. One context manager with a worker context object will be created per invocation to the `worker_fn` where `get_current_worker_context` can be called to access the worker context object. """ def __init__(self, strategy, cluster_spec, task_type, task_id, session_config=None, rpc_layer="grpc", worker_barrier=None): """Initialize the worker context object. Args: strategy: a `DistributionStrategy` object. cluster_spec: a ClusterSpec object. It can be empty or None in the local training case. task_type: a string indicating the role of the corresponding task, such as "worker" or "ps". It can be None if it is local training or in-graph replicated training. task_id: an integer indicating id of the corresponding task. It can be None if it is local training or in-graph replicated training. session_config: an optional `tf.compat.v1.ConfigProto` object. rpc_layer: optional string specifying the RPC protocol for communication with worker masters. If None or empty, hosts in the `cluster_spec` will be used directly. worker_barrier: optional, the barrier object for worker synchronization. """ self._strategy = strategy self._cluster_spec = cluster_spec self._task_type = task_type self._task_id = task_id self._session_config = session_config self._worker_barrier = worker_barrier self._rpc_layer = rpc_layer self._master_target = self._get_master_target() self._num_workers = _get_num_workers(cluster_spec) self._is_chief_node = self._is_chief() def _debug_message(self): if self._cluster_spec: return "[cluster_spec: %r, task_type: %r, task_id: %r]" % ( self._cluster_spec, self.task_type, self.task_id) else: return "[local]" def __enter__(self): old_context = distribute_coordinator_context.get_current_worker_context() if old_context: raise ValueError( "You cannot run distribute coordinator in a `worker_fn`.\t" + self._debug_message()) # pylint: disable=protected-access distribute_coordinator_context._worker_context.current = self def __exit__(self, unused_exception_type, unused_exception_value, unused_traceback): # pylint: disable=protected-access distribute_coordinator_context._worker_context.current = None def _get_master_target(self): """Return the master target for a task.""" # If cluster_spec is None or empty, we use local master. if not self._cluster_spec or self._task_type == _TaskType.EVALUATOR: return "" # If task_type is None, then it is in-graph replicated training. In this # case we use the chief or first worker's master target. if not self._task_type: if _TaskType.CHIEF in self._cluster_spec.jobs: task_type = _TaskType.CHIEF task_id = 0 else: assert _TaskType.WORKER in self._cluster_spec.jobs task_type = _TaskType.WORKER task_id = 0 else: task_type = self._task_type task_id = self._task_id prefix = "" if self._rpc_layer: prefix = self._rpc_layer + "://" return prefix + self._cluster_spec.job_tasks(task_type)[task_id or 0] def _is_chief(self): """Return whether the task is the chief worker.""" if (not self._cluster_spec or self._task_type in [_TaskType.CHIEF, _TaskType.EVALUATOR, None]): return True # If not local and chief not in the cluster_spec, use the first worker as # chief. if (_TaskType.CHIEF not in self._cluster_spec.jobs and self._task_type == _TaskType.WORKER and self._task_id == 0): return True return False def wait_for_other_workers(self): """Waits for other workers to reach the same call to this method. Raises: ValueError: if `worker_barrier` is not passed to the __init__ method. """ if not self._worker_barrier: # TODO(yuefengz): we should throw an error in independent worker mode. return self._worker_barrier.wait() def session_creator(self, scaffold=None, config=None, checkpoint_dir=None, checkpoint_filename_with_path=None, max_wait_secs=7200): """Returns a session creator. The returned session creator will be configured with the correct master target and session configs. It will also run either init ops or ready ops by querying the `strategy` object when `create_session` is called on it. Args: scaffold: A `Scaffold` used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. config: `ConfigProto` proto used to configure the session. checkpoint_dir: A string. Optional path to a directory where to restore variables. checkpoint_filename_with_path: Full file name path to the checkpoint file. Only one of `checkpoint_dir` or `checkpoint_filename_with_path` can be specified. max_wait_secs: Maximum time to wait for the session to become available. Returns: a descendant of SessionCreator. """ if config: session_config = copy.deepcopy(config) session_config.MergeFrom(self._session_config) else: session_config = self._session_config if not self._strategy or self._strategy.extended.experimental_should_init: logging.info("Creating chief session creator with config: %r", config) return monitored_session.ChiefSessionCreator( scaffold, master=self.master_target, config=session_config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path) else: logging.info("Creating worker session creator with config: %r", config) return monitored_session.WorkerSessionCreator( scaffold, master=self.master_target, config=session_config, max_wait_secs=max_wait_secs) @property def session_config(self): return copy.deepcopy(self._session_config) @property def has_barrier(self): """Whether the barrier is set or not.""" return self._worker_barrier is not None @property def distributed_mode(self): """Whether it is distributed training or not.""" return bool(self._cluster_spec) and self._task_type != _TaskType.EVALUATOR @property def cluster_spec(self): """Returns a copy of the cluster_spec object.""" return copy.deepcopy(self._cluster_spec) @property def task_type(self): """Returns the role of the corresponding task.""" return self._task_type @property def task_id(self): """Returns the id or index of the corresponding task.""" return self._task_id @property def master_target(self): """Returns the session master for the corresponding task to connect to.""" return self._master_target @property def is_chief(self): """Returns whether the task is a chief node.""" return self._is_chief_node @property def num_workers(self): """Returns number of workers in the cluster, including chief.""" return self._num_workers @property def experimental_should_init(self): """Whether to run init ops.""" return self._strategy.extended.experimental_should_init @property def should_checkpoint(self): """Whether to save checkpoint.""" return self._strategy.extended.should_checkpoint @property def should_save_summary(self): """Whether to save summaries.""" return self._strategy.extended.should_save_summary def _run_single_worker(worker_fn, strategy, cluster_spec, task_type, task_id, session_config, rpc_layer="", worker_barrier=None, coord=None): """Runs a single worker by calling `worker_fn` under context.""" session_config = copy.deepcopy(session_config) strategy = copy.deepcopy(strategy) # If there is an EVALUATOR task, we run single-machine eval on that task. if task_type == _TaskType.EVALUATOR: # It is possible to not have a strategy object for EVALUATOR task. if strategy: strategy.configure(session_config) else: assert strategy strategy.configure(session_config, cluster_spec, task_type, task_id) context = _WorkerContext( strategy, cluster_spec, task_type, task_id, session_config=session_config, rpc_layer=rpc_layer, worker_barrier=worker_barrier) with context: if coord: with coord.stop_on_exception(): return worker_fn(strategy) else: return worker_fn(strategy) def _split_cluster_for_evaluator(cluster_spec, task_type): """Split the cluster for evaluator since it needn't talk to other tasks.""" # Splitting the cluster is important to prevent the evaluator from talking to # other tasks in the cluster. Since we allow evaluator not to use # distribution strategies and as a result ops in the evaluator task may have # unspecified devices. Those ops may end up on other tasks if we don't split # the cluster. # Note: if you bypass distribute coordinator and bring the cluster yourself, # you can equivalently set device filters to split clusters. This is already # done by distribution strategy's `update_config_proto` method. new_cluster_spec = multi_worker_util.normalize_cluster_spec( cluster_spec).as_dict() if task_type == _TaskType.EVALUATOR: assert _TaskType.EVALUATOR in new_cluster_spec new_cluster_spec = { _TaskType.EVALUATOR: new_cluster_spec[_TaskType.EVALUATOR] } else: new_cluster_spec.pop(_TaskType.EVALUATOR, None) return multi_worker_util.normalize_cluster_spec(new_cluster_spec) def _run_std_server(cluster_spec=None, task_type=None, task_id=None, session_config=None, rpc_layer=None, environment=None): """Runs a standard server.""" # Check if the Server is already running. If so, assert that no configuration # options have changed, and return the existing Server. This allows us to # call `run_distribute_coordinator` multiple times. if getattr(_thread_local, "server", None) is not None: assert _thread_local.cluster_spec == cluster_spec assert _thread_local.task_type == task_type assert _thread_local.task_id == task_id assert _thread_local.session_config_str == repr(session_config) assert _thread_local.rpc_layer == rpc_layer assert _thread_local.environment == environment return _thread_local.server else: # This method is not thread-safe. _thread_local.server_started = True _thread_local.cluster_spec = cluster_spec _thread_local.task_type = task_type _thread_local.task_id = task_id _thread_local.session_config_str = repr(session_config) _thread_local.rpc_layer = rpc_layer _thread_local.environment = environment assert cluster_spec target = cluster_spec.task_address(task_type, task_id) if rpc_layer: target = rpc_layer + "://" + target class _FakeServer(object): """A fake server that runs a master session.""" def start(self): # A tensorflow server starts when a remote session is created. logging.info( "Creating a remote session to start a TensorFlow server, " "target = %r, session_config=%r", target, session_config) session.Session(target=target, config=session_config) def join(self): while True: time.sleep(5) if environment == "google": server = _FakeServer() else: if session_config: logging.info( "Starting standard TensorFlow server, target = %r, session_config= " "%r", target, session_config) else: logging.info("Starting standard TensorFlow server, target = %r", target) cluster_spec = _split_cluster_for_evaluator(cluster_spec, task_type) server = server_lib.Server( cluster_spec, job_name=task_type, task_index=task_id, config=session_config, protocol=rpc_layer) server.start() _thread_local.server = server return server def _run_between_graph_client(worker_fn, strategy, eval_fn, eval_strategy, cluster_spec, session_config, rpc_layer): """Runs a standalone client for between-graph replication.""" coord = coordinator.Coordinator() eval_thread = None if _TaskType.EVALUATOR in cluster_spec.jobs: eval_thread = threading.Thread( target=_run_single_worker, args=(eval_fn, eval_strategy, cluster_spec, _TaskType.EVALUATOR, 0, session_config), kwargs={ "rpc_layer": rpc_layer, "coord": coord, }) eval_thread.start() threads = [] worker_barrier = _Barrier(_get_num_workers(cluster_spec)) for task_type in [_TaskType.CHIEF, _TaskType.WORKER]: for task_id in range(len(cluster_spec.as_dict().get(task_type, []))): t = threading.Thread( target=_run_single_worker, args=(worker_fn, strategy, cluster_spec, task_type, task_id, session_config), kwargs={ "rpc_layer": rpc_layer, "worker_barrier": worker_barrier, "coord": coord, }) t.start() threads.append(t) if eval_thread: # TODO(yuefengz): is it necessary to join eval thread? threads_to_join = threads + [eval_thread] else: threads_to_join = threads coord.join(threads_to_join) # TODO(yuefengz): we probably want to return results from all workers? return None def _run_in_graph_client(worker_fn, strategy, eval_fn, eval_strategy, cluster_spec, session_config, rpc_layer): """Runs a standalone client for in-graph replication.""" coord = coordinator.Coordinator() eval_thread = None if _TaskType.EVALUATOR in cluster_spec.jobs: eval_thread = threading.Thread( target=_run_single_worker, args=(eval_fn, eval_strategy, cluster_spec, _TaskType.EVALUATOR, 0, session_config), kwargs={ "rpc_layer": rpc_layer, "coord": coord, }) eval_thread.start() worker_result = _run_single_worker( worker_fn, strategy, cluster_spec, None, None, session_config, rpc_layer=rpc_layer, coord=coord) if eval_thread: coord.join([eval_thread]) return worker_result def _configure_session_config_for_std_servers( strategy, eval_strategy, session_config, cluster_spec, task_type, task_id): # pylint: disable=g-doc-args """Call strategy's `configure` to mutate the session_config. The session_config is currently needed as default config for a TensorFlow server. In the future, we should be able to remove this method and only pass the session config to a client session. """ if task_type == _TaskType.EVALUATOR: if eval_strategy: eval_strategy.configure(session_config=session_config) else: # The strategy may be shared in standalone client mode. strategy = copy.deepcopy(strategy) strategy.configure( session_config=session_config, cluster_spec=cluster_spec, task_type=task_type, task_id=task_id) # Remove the device filters specific to the strategy, so that the # TensorFlow server brought up with one strategy can be used by other # strategies. The device filters can be set in the client side as well. del session_config.device_filters[:] def run_standard_tensorflow_server(session_config=None): """Starts a standard TensorFlow server. This method parses configurations from "TF_CONFIG" environment variable and starts a TensorFlow server. The "TF_CONFIG" is typically a json string and must have information of the cluster and the role of the server in the cluster. One example is: TF_CONFIG='{ "cluster": { "worker": ["host1:2222", "host2:2222", "host3:2222"], "ps": ["host4:2222", "host5:2222"] }, "task": {"type": "worker", "index": 1} }' This "TF_CONFIG" specifies there are 3 workers and 2 ps tasks in the cluster and the current role is worker 1. Valid task types are "chief", "worker", "ps" and "evaluator" and you can have at most one "chief" and at most one "evaluator". An optional key-value can be specified is "rpc_layer". The default value is "grpc". Args: session_config: an optional `tf.compat.v1.ConfigProto` object. Users can pass in the session config object to configure server-local devices. Returns: a `tf.distribute.Server` object which has already been started. Raises: ValueError: if the "TF_CONFIG" environment is not complete. """ tf_config = json.loads(os.environ.get("TF_CONFIG", "{}")) if "cluster" not in tf_config: raise ValueError("\"cluster\" is not found in TF_CONFIG.") cluster_spec = multi_worker_util.normalize_cluster_spec(tf_config["cluster"]) if "task" not in tf_config: raise ValueError("\"task\" is not found in TF_CONFIG.") task_env = tf_config["task"] if "type" not in task_env: raise ValueError( "\"task_type\" is not found in the `task` part of TF_CONFIG.") task_type = task_env["type"] task_id = int(task_env.get("index", 0)) rpc_layer = tf_config.get("rpc_layer", "grpc") session_config = session_config or config_pb2.ConfigProto() # Set the collective group leader for collective ops to initialize collective # ops when server starts. if "chief" in cluster_spec.jobs: session_config.experimental.collective_group_leader = ( "/job:chief/replica:0/task:0") else: if "worker" not in cluster_spec.jobs: raise ValueError( "You must have `chief` or `worker` jobs in the `cluster_spec`.") session_config.experimental.collective_group_leader = ( "/job:worker/replica:0/task:0") server = _run_std_server( cluster_spec=cluster_spec, task_type=task_type, task_id=task_id, session_config=session_config, rpc_layer=rpc_layer) server.start() return server # TODO(yuefengz): propagate cluster_spec in the STANDALONE_CLIENT mode. # TODO(yuefengz): we may need a smart way to figure out whether the current task # is the special task when we support cluster_spec propagation. def run_distribute_coordinator(worker_fn, strategy, eval_fn=None, eval_strategy=None, mode=CoordinatorMode.STANDALONE_CLIENT, cluster_spec=None, task_type=None, task_id=None, session_config=None, rpc_layer="grpc"): """Runs the coordinator for distributed TensorFlow. This function runs a split coordinator for distributed TensorFlow in its default mode, i.e the STANDALONE_CLIENT mode. Given a `cluster_spec` specifying server addresses and their roles in a cluster, this coordinator will figure out how to set them up, give the underlying function the right targets for master sessions via a scope object and coordinate their training. The cluster consisting of standard servers needs to be brought up either with the standard server binary or with a binary running distribute coordinator with `task_type` set to non-client type which will then turn into standard servers. In addition to be the distribute coordinator, this is also the source of configurations for each job in the distributed training. As there are multiple ways to configure a distributed TensorFlow cluster, its context object provides these configurations so that users or higher-level APIs don't have to figure out the configuration for each job by themselves. In the between-graph replicated training, this coordinator will create multiple threads and each calls the `worker_fn` which is supposed to create its own graph and connect to one worker master given by its context object. In the in-graph replicated training, it has only one thread calling this `worker_fn`. Another mode is the INDEPENDENT_WORKER mode where each server runs a distribute coordinator which will start a standard server and optionally runs `worker_fn` depending whether it is between-graph training or in-graph replicated training. The `strategy` object is expected to be a DistributionStrategy object which has implemented methods needed by distributed coordinator such as `configure(session_config, cluster_spec, task_type, task_id)` which configures the strategy object for a specific task and `experimental_should_init` property which instructs the distribute coordinator whether to run init ops for a task. The distribute coordinator will make a copy of the `strategy` object, call its `configure` method and pass it to `worker_fn` as an argument. The `worker_fn` defines the training logic and is called under its own worker context which can be accessed to via `get_current_worker_context`. A worker context provides access to configurations for each task, e.g. the task_type, task_id, master target and so on. Since `worker_fn` will be called in a thread and possibly multiple times, caller should be careful when it accesses global data. For example, it is unsafe to define flags in a `worker_fn` or to define different environment variables for different `worker_fn`s. The `worker_fn` for the between-graph replication is defined as if there is only one worker corresponding to the `worker_fn` and possibly ps jobs. For example, when training with parameter servers, it assigns variables to parameter servers and all other operations to that worker. In the in-graph replication case, the `worker_fn` has to define operations for all worker jobs. Using a distribution strategy can simplify the `worker_fn` by not having to worry about the replication and device assignment of variables and operations. This method is intended to be invoked by high-level APIs so that users don't have to explicitly call it to run this coordinator. For those who don't use high-level APIs, to change a program to use this coordinator, wrap everything in a the program after global data definitions such as commandline flag definition into the `worker_fn` and get task-specific configurations from the worker context. The `cluster_spec` can be either passed by the argument or parsed from the "TF_CONFIG" environment variable. Example of a TF_CONFIG: ``` cluster = {'chief': ['host0:2222'], 'ps': ['host1:2222', 'host2:2222'], 'worker': ['host3:2222', 'host4:2222', 'host5:2222']} os.environ['TF_CONFIG'] = json.dumps({'cluster': cluster}) ``` If `cluster_spec` is not given in any format, it becomes local training and this coordinator will connect to a local session. For evaluation, if "evaluator" exists in the cluster_spec, a separate thread will be created to call `eval_fn` with its `task_type` set to "evaluator". If `eval_fn` is not defined, fall back to `worker_fn`. This implies that evaluation will be done on a single machine if there is an "evaluator" task. If "evaluator" doesn't exist in the cluster_spec, it entirely depends on the `worker_fn` for how to do evaluation. Args: worker_fn: the function to be called. The function should accept a `strategy` object and will be given access to a context object via a context manager scope. strategy: a DistributionStrategy object specifying whether it should run between-graph replicated training or not, whether to run init ops, etc. This object will also be configured given `session_config`, `cluster_spec`, `task_type` and `task_id`. eval_fn: optional function for "evaluator" task. If `eval_fn` is not passed in but a "evaluator" task is found in the `cluster_spec`, the `worker_fn` will be used for this task. eval_strategy: optional DistributionStrategy object for "evaluator" task. mode: in which mode this distribute coordinator runs. cluster_spec: a dict, ClusterDef or ClusterSpec specifying servers and roles in a cluster. If not set or empty, fall back to local training. task_type: the current task type, optional if this is a client. task_id: the current task id, optional if this is a client. session_config: an optional `tf.compat.v1.ConfigProto` object which will be passed to `strategy`'s `configure` method and used to create a session. rpc_layer: optional string, the protocol for RPC, e.g. "grpc". Raises: ValueError: if `cluster_spec` is supplied but not a dict or a ClusterDef or a ClusterSpec. Returns: In the client job, return the value returned by `worker_fn` if it is in-graph replication or INDEPENDENT_WORKER mode; return None otherwise. """ tf_config = json.loads(os.environ.get("TF_CONFIG", "{}")) rpc_layer = tf_config.get("rpc_layer", rpc_layer) environment = tf_config.get("environment", None) if not cluster_spec: cluster_spec = tf_config.get("cluster", {}) task_env = tf_config.get("task", {}) if task_env: task_type = task_env.get("type", task_type) task_id = int(task_env.get("index", task_id)) if cluster_spec: # TODO(yuefengz): validate cluster_spec. cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec) elif hasattr(strategy.extended, "_cluster_resolver"): cluster_resolver = strategy.extended._cluster_resolver # pylint: disable=protected-access task_type = cluster_resolver.task_type task_id = cluster_resolver.task_id rpc_layer = cluster_resolver.rpc_layer or rpc_layer environment = cluster_resolver.environment cluster_spec = cluster_resolver.cluster_spec() # Setting the session config is necessary for some strategies such as # CollectiveAllReduceStrategy. session_config = session_config or config_pb2.ConfigProto( allow_soft_placement=True) if cluster_spec: logging.info( "Running Distribute Coordinator with mode = %r, cluster_spec = %r, " "task_type = %r, task_id = %r, environment = %r, rpc_layer = %r", mode, cluster_spec.as_dict(), task_type, task_id, environment, rpc_layer) if not cluster_spec: # `mode` is ignored in the local case. logging.info("Running local Distribute Coordinator.") _run_single_worker(worker_fn, strategy, None, None, None, session_config, rpc_layer) if eval_fn: _run_single_worker(eval_fn, eval_strategy, None, None, None, session_config, rpc_layer) else: logging.warning("Skipped evaluation since `eval_fn` is not passed in.") elif mode == CoordinatorMode.STANDALONE_CLIENT: if not eval_fn: logging.warning("`eval_fn` is not passed in. The `worker_fn` will be " "used if an \"evaluator\" task exists in the cluster.") eval_fn = eval_fn or worker_fn if not eval_strategy: logging.warning("`eval_strategy` is not passed in. No distribution " "strategy will be used for evaluation.") # The client must know the cluster but servers in the cluster don't have to # know the client. if task_type in [_TaskType.CLIENT, None]: if strategy.extended.experimental_between_graph: return _run_between_graph_client(worker_fn, strategy, eval_fn, eval_strategy, cluster_spec, session_config, rpc_layer) else: return _run_in_graph_client(worker_fn, strategy, eval_fn, eval_strategy, cluster_spec, session_config, rpc_layer) else: # If not a client job, run the standard server. _configure_session_config_for_std_servers(strategy, eval_strategy, session_config, cluster_spec, task_type, task_id) server = _run_std_server( cluster_spec=cluster_spec, task_type=task_type, task_id=task_id, session_config=session_config, rpc_layer=rpc_layer, environment=environment) server.join() else: if mode != CoordinatorMode.INDEPENDENT_WORKER: raise ValueError("Unexpected coordinator mode: %r" % mode) if not eval_fn: logging.warning("`eval_fn` is not passed in. The `worker_fn` will be " "used if an \"evaluator\" task exists in the cluster.") eval_fn = eval_fn or worker_fn if not eval_strategy: logging.warning("`eval_strategy` is not passed in. No distribution " "strategy will be used for evaluation.") # Every one starts a standard server, get session config from `configure` # method. _configure_session_config_for_std_servers(strategy, eval_strategy, session_config, cluster_spec, task_type, task_id) if (task_type != _TaskType.EVALUATOR and not getattr(strategy.extended, "_std_server_started", False)): # Right now, with eager mode, context is configured with a std server at # the very beginning while with graph mode the std server is started when # distribute coordinator is called. We should consolidate these two paths. server = _run_std_server( cluster_spec=cluster_spec, task_type=task_type, task_id=task_id, session_config=session_config, rpc_layer=rpc_layer, environment=environment) if task_type in [_TaskType.CHIEF, _TaskType.WORKER]: if strategy.extended.experimental_between_graph: # All jobs run `worker_fn` if between-graph. return _run_single_worker(worker_fn, strategy, cluster_spec, task_type, task_id, session_config, rpc_layer) else: # Only one node runs `worker_fn` if in-graph. context = _WorkerContext(strategy, cluster_spec, task_type, task_id) if context.is_chief: return _run_single_worker(worker_fn, strategy, cluster_spec, None, None, session_config, rpc_layer) else: server.join() elif task_type == _TaskType.EVALUATOR: return _run_single_worker(eval_fn, eval_strategy, cluster_spec, task_type, task_id, session_config, rpc_layer) else: if task_type != _TaskType.PS: raise ValueError("Unexpected task_type: %r" % task_type) server.join()
_WorkerContext
python
conda__conda
tests/shell/__init__.py
{ "start": 1493, "end": 2772 }
class ____: name: str | tuple[str, ...] # shell name path: str | None = None # $PATH style path to search for shell exe: str | None = None # shell executable path def __post_init__(self) -> None: if isinstance(self.name, str): pass elif isinstance(self.name, tuple) and all( isinstance(name, str) for name in self.name ): pass else: raise TypeError( f"shell name must be str or tuple of str, not {self.name!r}" ) @classmethod def resolve(cls, value: str | tuple[str, ...] | Shell) -> Shell | None: shell = value if isinstance(value, Shell) else cls(value) # if shell.exe is already set, use it if shell.exe: return shell # find shell executable names = [shell.name] if isinstance(shell.name, str) else list(shell.name) for name in names: if exe := which(name, path=shell.path): return Shell(name=name, exe=exe) raise FileNotFoundError(f"{shell} not found") @contextmanager def interactive(self, *args, **kwargs) -> InteractiveShell: with InteractiveShell(self, *args, **kwargs) as interactive: yield interactive
Shell
python
getsentry__sentry
tests/sentry/backup/test_imports.py
{ "start": 43388, "end": 49090 }
class ____(ImportTestCase): """ Ensures that decryption actually works. We only test one model for each scope, because it's extremely unlikely that a failed decryption will leave only part of the data unmangled. """ @staticmethod def encrypt_json_fixture(tmp_dir) -> tuple[Path, Path]: good_file_path = get_fixture_path("backup", "fresh-install.json") (priv_key_pem, pub_key_pem) = generate_rsa_key_pair() tmp_priv_key_path = Path(tmp_dir).joinpath("key") with open(tmp_priv_key_path, "wb") as f: f.write(priv_key_pem) tmp_pub_key_path = Path(tmp_dir).joinpath("key.pub") with open(tmp_pub_key_path, "wb") as f: f.write(pub_key_pem) with open(good_file_path, "rb") as f: json_data = orjson.loads(f.read()) tmp_tarball_path = Path(tmp_dir).joinpath("input.tar") with open(tmp_tarball_path, "wb") as i, open(tmp_pub_key_path, "rb") as p: pem = p.read() data_encryption_key = Fernet.generate_key() backup_encryptor = Fernet(data_encryption_key) encrypted_json_export = backup_encryptor.encrypt(orjson.dumps(json_data)) dek_encryption_key = serialization.load_pem_public_key(pem, default_backend()) sha256 = hashes.SHA256() mgf = padding.MGF1(algorithm=sha256) oaep_padding = padding.OAEP(mgf=mgf, algorithm=sha256, label=None) encrypted_dek = dek_encryption_key.encrypt(data_encryption_key, oaep_padding) # type: ignore[union-attr] tar_buffer = io.BytesIO() with tarfile.open(fileobj=tar_buffer, mode="w") as tar: json_info = tarfile.TarInfo("export.json") json_info.size = len(encrypted_json_export) tar.addfile(json_info, fileobj=io.BytesIO(encrypted_json_export)) key_info = tarfile.TarInfo("data.key") key_info.size = len(encrypted_dek) tar.addfile(key_info, fileobj=io.BytesIO(encrypted_dek)) pub_info = tarfile.TarInfo("key.pub") pub_info.size = len(pem) tar.addfile(pub_info, fileobj=io.BytesIO(pem)) i.write(tar_buffer.getvalue()) return (tmp_tarball_path, tmp_priv_key_path) def test_user_import_decryption(self) -> None: with tempfile.TemporaryDirectory() as tmp_dir: (tmp_tarball_path, tmp_priv_key_path) = self.encrypt_json_fixture(tmp_dir) with assume_test_silo_mode(SiloMode.CONTROL): assert User.objects.count() == 0 with ( open(tmp_tarball_path, "rb") as tmp_tarball_file, open(tmp_priv_key_path, "rb") as tmp_priv_key_file, ): import_in_user_scope( tmp_tarball_file, decryptor=LocalFileDecryptor(tmp_priv_key_file), printer=NOOP_PRINTER, ) with assume_test_silo_mode(SiloMode.CONTROL): assert User.objects.count() > 0 def test_organization_import_decryption(self) -> None: with tempfile.TemporaryDirectory() as tmp_dir: (tmp_tarball_path, tmp_priv_key_path) = self.encrypt_json_fixture(tmp_dir) assert Organization.objects.count() == 0 with ( open(tmp_tarball_path, "rb") as tmp_tarball_file, open(tmp_priv_key_path, "rb") as tmp_priv_key_file, ): import_in_organization_scope( tmp_tarball_file, decryptor=LocalFileDecryptor(tmp_priv_key_file), printer=NOOP_PRINTER, ) assert Organization.objects.count() > 0 def test_config_import_decryption(self) -> None: with tempfile.TemporaryDirectory() as tmp_dir: (tmp_tarball_path, tmp_priv_key_path) = self.encrypt_json_fixture(tmp_dir) with assume_test_silo_mode(SiloMode.CONTROL): assert UserRole.objects.count() == 0 with ( open(tmp_tarball_path, "rb") as tmp_tarball_file, open(tmp_priv_key_path, "rb") as tmp_priv_key_file, ): import_in_config_scope( tmp_tarball_file, decryptor=LocalFileDecryptor(tmp_priv_key_file), printer=NOOP_PRINTER, ) with assume_test_silo_mode(SiloMode.CONTROL): assert UserRole.objects.count() > 0 def test_global_import_decryption(self) -> None: with tempfile.TemporaryDirectory() as tmp_dir: (tmp_tarball_path, tmp_priv_key_path) = self.encrypt_json_fixture(tmp_dir) assert Organization.objects.count() == 0 with assume_test_silo_mode(SiloMode.CONTROL): assert User.objects.count() == 0 assert UserRole.objects.count() == 0 with ( open(tmp_tarball_path, "rb") as tmp_tarball_file, open(tmp_priv_key_path, "rb") as tmp_priv_key_file, ): import_in_global_scope( tmp_tarball_file, decryptor=LocalFileDecryptor(tmp_priv_key_file), printer=NOOP_PRINTER, ) assert Organization.objects.count() > 0 with assume_test_silo_mode(SiloMode.CONTROL): assert User.objects.count() > 0 assert UserRole.objects.count() > 0 # Filters should work identically in both silo and monolith modes, so no need to repeat the tests # here.
DecryptionTests
python
spyder-ide__spyder
spyder/plugins/editor/extensions/manager.py
{ "start": 851, "end": 3186 }
class ____(Manager): """Manages the list of editor extensions of the CodeEdit widget.""" def __init__(self, editor): """Initialize and add a reference to the editor.""" super().__init__(editor) self._extensions = {} def add(self, extension): """ Add a extension to the editor. :param extension: The extension instance to add. """ logger.debug('adding extension {}'.format(extension.name)) self._extensions[extension.name] = extension extension.on_install(self.editor) return extension def remove(self, name_or_klass): """ Remove a extension from the editor. :param name_or_klass: The name (or class) of the extension to remove. :returns: The removed extension. """ logger.debug('removing extension {}'.format(name_or_klass)) extension = self.get(name_or_klass) extension.on_uninstall() self._extensions.pop(extension.name) return extension def clear(self): """ Remove all extensions from the editor. All extensions are removed fromlist and deleted. """ while len(self._extensions): key = sorted(list(self._extensions.keys()))[0] self.remove(key) def get(self, name_or_klass): """ Get a extension by name (or class). :param name_or_klass: The name or the class of the extension to get :type name_or_klass: str or type :rtype: spyder.api.mode.EditorExtension """ if not isinstance(name_or_klass, str): name_or_klass = name_or_klass.__name__ return self._extensions[name_or_klass] def keys(self): """ Return the list of the names of the installed extensions. """ return self._extensions.keys() def values(self): """ Return the list of installed extensions. """ return self._extensions.values() def __len__(self): """Return the amount of installed extensions.""" return len(list(self._extensions.values())) def __iter__(self): """ Return the list of extensions. :return: """ return iter([v for k, v in sorted(self._extensions.items())])
EditorExtensionsManager
python
pytorch__pytorch
test/dynamo/test_modules.py
{ "start": 20943, "end": 21418 }
class ____(torch.nn.ModuleDict): def __init__( self, num_layers: int = 3, ) -> None: super().__init__() for i in range(num_layers): self.add_module(f"denselayer{i + 1:d}", _Block()) def forward(self, init_features): features = [init_features] for layer in self.values(): new_features = layer(features) features.append(new_features) return torch.cat(features, 1)
EnumValues
python
getsentry__sentry
src/sentry/services/eventstore/models.py
{ "start": 28707, "end": 28797 }
class ____(string.Template): idpattern = r"(tag:)?[_a-z][_a-z0-9]*"
EventSubjectTemplate
python
keras-team__keras
integration_tests/dataset_tests/boston_housing_test.py
{ "start": 78, "end": 887 }
class ____(testing.TestCase): def test_load_data(self): (x_train, y_train), (x_test, y_test) = boston_housing.load_data() self.assertEqual(x_train.shape[1], 13) self.assertEqual(x_train.shape[0] + x_test.shape[0], 506) def test_seed_reproducibility(self): seed = 123 first_load = boston_housing.load_data(seed=seed) second_load = boston_housing.load_data(seed=seed) self.assertAllClose(first_load[0][0], second_load[0][0]) self.assertAllClose(first_load[1][0], second_load[1][0]) def test_invalid_test_split(self): with self.assertRaises(AssertionError): boston_housing.load_data(test_split=-0.1) with self.assertRaises(AssertionError): boston_housing.load_data(test_split=1.0)
BostonHousingTest
python
doocs__leetcode
solution/3000-3099/3077.Maximum Strength of K Disjoint Subarrays/Solution.py
{ "start": 0, "end": 657 }
class ____: def maximumStrength(self, nums: List[int], k: int) -> int: n = len(nums) f = [[[-inf, -inf] for _ in range(k + 1)] for _ in range(n + 1)] f[0][0][0] = 0 for i, x in enumerate(nums, 1): for j in range(k + 1): sign = 1 if j & 1 else -1 f[i][j][0] = max(f[i - 1][j][0], f[i - 1][j][1]) f[i][j][1] = max(f[i][j][1], f[i - 1][j][1] + sign * x * (k - j + 1)) if j: f[i][j][1] = max( f[i][j][1], max(f[i - 1][j - 1]) + sign * x * (k - j + 1) ) return max(f[n][k])
Solution
python
dagster-io__dagster
examples/docs_projects/project_ml/src/project_ml/defs/types.py
{ "start": 50, "end": 191 }
class ____(TypedDict): model: torch.nn.Module config: dict accuracy: float timestamp: str model_architecture: str
ModelData
python
facelessuser__pymdown-extensions
tests/test_extensions/test_blocks/test_definition.py
{ "start": 61, "end": 3002 }
class ____(util.MdCase): """Test Blocks admonitions cases.""" extension = ['pymdownx.blocks.definition', 'pymdownx.blocks.html'] def test_def(self): """Test definition.""" self.check_markdown( R''' /// define Apple - Pomaceous fruit of plants of the genus Malus in the family Rosaceae. /// ''', R''' <dl> <dt>Apple</dt> <dd>Pomaceous fruit of plants of the genus Malus in the family Rosaceae.</dd> </dl> ''', True ) def test_multi_def(self): """Test multiple definitions.""" self.check_markdown( R''' /// define Apple - Pomaceous fruit of plants of the genus Malus in the family Rosaceae. Orange - The fruit of an evergreen tree of the genus Citrus. /// ''', R''' <dl> <dt>Apple</dt> <dd>Pomaceous fruit of plants of the genus Malus in the family Rosaceae.</dd> <dt>Orange</dt> <dd>The fruit of an evergreen tree of the genus Citrus.</dd> </dl> ''', True ) def test_multi_term(self): """Test definitions with multiple terms.""" self.check_markdown( R''' /// define Term 1 Term 2 - Definition a Term 3 - Definition b /// ''', r''' <dl> <dt>Term 1</dt> <dt>Term 2</dt> <dd>Definition a</dd> <dt>Term 3</dt> <dd>Definition b</dd> </dl> ''', True ) def test_handling_of_dt_dd(self): """Test that we ignore `dt` and `dd` tags.""" self.check_markdown( R''' /// define //// html | dt term //// //// html | dd Some description. //// /// ''', R''' <dl> <dt>term</dt> <dd>Some description.</dd> </dl> ''', True ) def test_non_paragraph_block(self): """Test that we sanely handle a non-paragraph term.""" self.check_markdown( R''' /// define > A non non-paragraph term - description /// ''', R''' <dl> <dt> <blockquote> <p>A non non-paragraph term</p> </blockquote> </dt> <dd>description</dd> </dl> ''', True )
TestBlocksDefinition
python
pandas-dev__pandas
pandas/core/accessor.py
{ "start": 1339, "end": 5690 }
class ____: """ Abstract base class for delegating methods/properties. """ def _delegate_property_get(self, name: str, *args, **kwargs): raise TypeError(f"You cannot access the property {name}") def _delegate_property_set(self, name: str, value, *args, **kwargs) -> None: raise TypeError(f"The property {name} cannot be set") def _delegate_method(self, name: str, *args, **kwargs): raise TypeError(f"You cannot call method {name}") @classmethod def _add_delegate_accessors( cls, delegate, accessors: list[str], typ: str, overwrite: bool = False, accessor_mapping: Callable[[str], str] = lambda x: x, raise_on_missing: bool = True, ) -> None: """ Add accessors to cls from the delegate class. Parameters ---------- cls Class to add the methods/properties to. delegate Class to get methods/properties and docstrings. accessors : list of str List of accessors to add. typ : {'property', 'method'} overwrite : bool, default False Overwrite the method/property in the target class if it exists. accessor_mapping: Callable, default lambda x: x Callable to map the delegate's function to the cls' function. raise_on_missing: bool, default True Raise if an accessor does not exist on delegate. False skips the missing accessor. """ def _create_delegator_property(name: str): def _getter(self): return self._delegate_property_get(name) def _setter(self, new_values): return self._delegate_property_set(name, new_values) _getter.__name__ = name _setter.__name__ = name return property( fget=_getter, fset=_setter, doc=getattr(delegate, accessor_mapping(name)).__doc__, ) def _create_delegator_method(name: str): method = getattr(delegate, accessor_mapping(name)) @functools.wraps(method) def f(self, *args, **kwargs): return self._delegate_method(name, *args, **kwargs) return f for name in accessors: if ( not raise_on_missing and getattr(delegate, accessor_mapping(name), None) is None ): continue if typ == "property": f = _create_delegator_property(name) else: f = _create_delegator_method(name) # don't overwrite existing methods/properties if overwrite or not hasattr(cls, name): setattr(cls, name, f) def delegate_names( delegate, accessors: list[str], typ: str, overwrite: bool = False, accessor_mapping: Callable[[str], str] = lambda x: x, raise_on_missing: bool = True, ): """ Add delegated names to a class using a class decorator. This provides an alternative usage to directly calling `_add_delegate_accessors` below a class definition. Parameters ---------- delegate : object The class to get methods/properties & docstrings. accessors : Sequence[str] List of accessor to add. typ : {'property', 'method'} overwrite : bool, default False Overwrite the method/property in the target class if it exists. accessor_mapping: Callable, default lambda x: x Callable to map the delegate's function to the cls' function. raise_on_missing: bool, default True Raise if an accessor does not exist on delegate. False skips the missing accessor. Returns ------- callable A class decorator. Examples -------- @delegate_names(Categorical, ["categories", "ordered"], "property") class CategoricalAccessor(PandasDelegate): [...] """ def add_delegate_accessors(cls): cls._add_delegate_accessors( delegate, accessors, typ, overwrite=overwrite, accessor_mapping=accessor_mapping, raise_on_missing=raise_on_missing, ) return cls return add_delegate_accessors
PandasDelegate
python
kamyu104__LeetCode-Solutions
Python/maximize-spanning-tree-stability-with-upgrades.py
{ "start": 808, "end": 1657 }
class ____(object): def maxStability(self, n, edges, k): """ :type n: int :type edges: List[List[int]] :type k: int :rtype: int """ uf = UnionFind(n) cnt = 0 result = float("inf") for u, v, s, m in edges: if not m: continue if not uf.union_set(u, v): return -1 cnt += 1 result = min(result, s) edges.sort(key=lambda x: -x[2]) for u, v, s, m in edges: if m: continue if not uf.union_set(u, v): continue cnt += 1 if cnt == (n-1)-k: result = min(result, s) elif cnt == n-1: result = min(result, 2*s) return result if cnt == n-1 else -1
Solution
python
dask__dask
dask/diagnostics/profile.py
{ "start": 6428, "end": 8507 }
class ____(Process): """Background process for tracking resource usage""" def __init__(self, dt=1): super().__init__() self.daemon = True self.dt = dt self.parent_pid = current_process().pid self.parent_conn, self.child_conn = Pipe() def shutdown(self): if not self.parent_conn.closed: self.parent_conn.send("shutdown") self.parent_conn.close() self.join() def _update_pids(self, pid): return [self.parent] + [ p for p in self.parent.children() if p.pid != pid and p.status() != "zombie" ] def run(self): psutil = import_required( "psutil", "Tracking resource usage requires `psutil` to be installed" ) self.parent = psutil.Process(self.parent_pid) pid = current_process() data = [] while True: try: msg = self.child_conn.recv() except KeyboardInterrupt: continue if msg == "shutdown": break elif msg == "collect": ps = self._update_pids(pid) while not data or not self.child_conn.poll(): tic = default_timer() mem = cpu = 0 for p in ps: try: mem2 = p.memory_info().rss cpu2 = p.cpu_percent() except Exception: # could be a few different exceptions pass else: # Only increment if both were successful mem += mem2 cpu += cpu2 data.append((tic, mem / 1e6, cpu)) sleep(self.dt) elif msg == "send_data": self.child_conn.send(data) data = [] self.child_conn.close() CacheData = namedtuple( "CacheData", ("key", "task", "metric", "cache_time", "free_time") )
_Tracker
python
huggingface__transformers
src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
{ "start": 54747, "end": 58611 }
class ____(RobertaPreLayerNormPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring # Copied from transformers.models.roberta.modeling_roberta.RobertaForQuestionAnswering.forward with roberta->roberta_prelayernorm def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) """ outputs = self.roberta_prelayernorm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "RobertaPreLayerNormForCausalLM", "RobertaPreLayerNormForMaskedLM", "RobertaPreLayerNormForMultipleChoice", "RobertaPreLayerNormForQuestionAnswering", "RobertaPreLayerNormForSequenceClassification", "RobertaPreLayerNormForTokenClassification", "RobertaPreLayerNormModel", "RobertaPreLayerNormPreTrainedModel", ]
RobertaPreLayerNormForQuestionAnswering
python
huggingface__transformers
src/transformers/modeling_gguf_pytorch_utils.py
{ "start": 8486, "end": 8866 }
class ____(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) # ref : https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L4666 def process(self, weights, name, **kwargs): if "norm.weight" in name: weights = weights - 1 return GGUFTensor(weights, name, {})
NemotronTensorProcessor
python
keon__algorithms
tests/test_map.py
{ "start": 4327, "end": 4668 }
class ____(unittest.TestCase): def test_word_pattern(self): self.assertTrue(word_pattern("abba", "dog cat cat dog")) self.assertFalse(word_pattern("abba", "dog cat cat fish")) self.assertFalse(word_pattern("abba", "dog dog dog dog")) self.assertFalse(word_pattern("aaaa", "dog cat cat dog"))
TestWordPattern
python
mlflow__mlflow
mlflow/store/artifact/databricks_artifact_repo_resources.py
{ "start": 832, "end": 892 }
class ____: name: str value: str @dataclass
HttpHeader
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
{ "start": 2806, "end": 2973 }
class ____(graphene.ObjectType): class Meta: interfaces = (GrapheneMessageEvent, GrapheneRunEvent) name = "RunEnqueuedEvent"
GrapheneRunEnqueuedEvent
python
realpython__materials
python-script-structure/iris_summary.py
{ "start": 920, "end": 3889 }
class ____: data: pd.Series mean: float = field(init=False) median: float = field(init=False) mm_diff: float = field(init=False) def __post_init__(self): if not isinstance(self.data, pd.Series): raise TypeError( f"data must be a pandas Series, not {type(self.data)}" ) self.mean = self.data.mean() self.median = self.data.median() self.mm_diff = self.mean - self.median def __str__(self): return pformat(self) @click.command() @click.option( "--operation", default=Operation.SUMMARY, type=click.Choice(Operation), help="Operation to perform: variable summary or dataset metadata", ) @click.option( "--variable", type=click.Choice(IrisVariable), help="Variable to summarize.", required=False, ) def main(operation, variable): """Fetch the Iris dataset from UCI.""" iris = fetch_iris() if operation is Operation.SUMMARY: if variable: table = generate_table(iris, variable) logging.info(format_rich_for_log(table)) logging.info(f"{IrisVariable(variable)} summary:") logging.info( DescriptiveStatistics( iris.data.features[IrisVariable(variable).value] ) ) else: logging.info("All variables:") logging.info(pformat(iris.variables)) elif operation is Operation.METADATA: logging.info("Metadata summary:") logging.info(pformat(iris.metadata)) def fetch_iris(): """Return the Iris dataset from the UCI ML Repository.""" logging.info("Fetching Iris dataset...") try: iris_data = fetch_ucirepo(id=UCIDataset.IRIS.value) assert "data" in iris_data.keys(), ( "Object does not have expected structure" ) except Exception as e: logging.critical(f"Failed to correctly fetch Iris dataset: {e}") sys.exit(1) else: logging.info("Iris dataset fetched successfully") return iris_data def generate_table(dataset, variable): """Generate a formatted table of descriptive statistics for a variable.""" column = IrisVariable(variable).value stats = DescriptiveStatistics(dataset.data.features[column]) table = Table(title=f"{column} summary") table.add_column("Metric", style="cyan", justify="right") table.add_column("Value", style="magenta") table.add_row("Mean", f"{stats.mean:.2f}") table.add_row("Median", f"{stats.median:.2f}") table.add_row("Mean-Median Diff", f"{stats.mm_diff:.2f}") return table def format_rich_for_log(renderable, width=100): """Render a rich object to a plain text string suitable for logging.""" console = Console(width=width) with console.capture() as capture: console.print(renderable) return Text.from_ansi(capture.get()) if __name__ == "__main__": main()
DescriptiveStatistics
python
getsentry__sentry
src/sentry/api/endpoints/relay/register_response.py
{ "start": 845, "end": 976 }
class ____(RelayIdSerializer): token = serializers.CharField(required=True) @region_silo_endpoint
RelayRegisterResponseSerializer
python
dagster-io__dagster
python_modules/dagster/dagster_tests/components_tests/unit_tests/test_component_decl.py
{ "start": 433, "end": 1045 }
class ____(ComponentTree): def set_root_decl(self, root_decl: ComponentDecl): setattr(self, "_root_decl", root_decl) def find_root_decl(self): if hasattr(self, "_root_decl"): return getattr(self, "_root_decl") return super().find_root_decl() @pytest.fixture def component_tree() -> MockComponentTree: # add file parent to sys path to make it a package sys.path.append(str(Path(__file__).parent)) return MockComponentTree( defs_module=importlib.import_module(Path(__file__).stem), project_root=Path(__file__).parent, )
MockComponentTree
python
airbytehq__airbyte
airbyte-integrations/connectors/source-twilio/unit_tests/test_streams.py
{ "start": 1267, "end": 3938 }
class ____: def test_next_page_token(self, requests_mock): accounts_page_1_json = { "accounts": [ { "sid": "AC123", "date_created": "2022-01-01T00:00:00Z", "subresource_uris": {"addresses": "/2010-04-01/Accounts/AC123/Addresses.json"}, } ], "next_page_uri": "/2010-04-01/Accounts.json?PageSize=1000&Page=2&PageToken=PAAD42931b949c0dedce94b2f93847fdcf95", } requests_mock.get(f"{BASE}/Accounts.json", json=accounts_page_1_json, status_code=200) accounts_page_2_json = { "accounts": [ { "sid": "AC124", "date_created": "2022-01-01T00:00:00Z", "subresource_uris": {"addresses": "/2010-04-01/Accounts/AC123/Addresses.json"}, } ] } requests_mock.get( f"{BASE}/Accounts.json?PageSize=1000&Page=2&PageToken=PAAD42931b949c0dedce94b2f93847fdcf95", json=accounts_page_2_json, status_code=200, ) records = read_from_stream(TEST_CONFIG, "accounts", SyncMode.full_refresh).records assert len(records) == 2 def test_backoff_time(self, requests_mock, mocker): sleep_mock = mocker.patch("time.sleep") requests_mock.register_uri( "GET", f"{BASE}/Accounts.json", [ {"status_code": 429, "json": {}, "headers": {"retry-after": "5.5"}}, {"status_code": 200, "json": ACCOUNTS_JSON}, ], ) records = read_from_stream(TEST_CONFIG, "accounts", SyncMode.full_refresh).records assert len(records) == 1 assert sleep_mock.called sleep_mock.assert_any_call(pytest.approx(6.5)) def test_transform_function(self, requests_mock): accounts_json = { "accounts": [ { "sid": "AC123", "date_created": "2022-01-01T00:00:00Z", "date_updated": "Fri, 11 Dec 2020 04:28:40 +0000", "subresource_uris": {"addresses": "/2010-04-01/Accounts/AC123/Addresses.json"}, } ] } requests_mock.get(f"{BASE}/Accounts.json", json=accounts_json, status_code=200) records = read_from_stream(TEST_CONFIG, "accounts", SyncMode.full_refresh).records assert len(records) == 1 assert records[0].record.data["date_created"] == "2022-01-01T00:00:00Z" assert records[0].record.data["date_updated"] == "2020-12-11T04:28:40Z"
TestTwilioStream
python
django__django
tests/view_tests/tests/test_debug.py
{ "start": 19529, "end": 21030 }
class ____(SimpleTestCase): def test_400(self): # When DEBUG=True, technical_500_template() is called. with self.assertLogs("django.security", "WARNING"): response = self.client.get("/raises400/") self.assertContains(response, '<div class="context" id="', status_code=400) def test_400_bad_request(self): # When DEBUG=True, technical_500_template() is called. with self.assertLogs("django.request", "WARNING") as cm: response = self.client.get("/raises400_bad_request/") self.assertContains(response, '<div class="context" id="', status_code=400) self.assertEqual( cm.records[0].getMessage(), "Malformed request syntax: /raises400_bad_request/", ) def test_403(self): response = self.client.get("/raises403/") self.assertContains(response, "<h1>403 Forbidden</h1>", status_code=403) def test_404(self): response = self.client.get("/raises404/") self.assertEqual(response.status_code, 404) def test_template_not_found_error(self): # Raises a TemplateDoesNotExist exception and shows the debug view. url = reverse( "raises_template_does_not_exist", kwargs={"path": "notfound.html"} ) with self.assertLogs("django.request", "ERROR"): response = self.client.get(url) self.assertContains(response, '<div class="context" id="', status_code=500)
NonDjangoTemplatesDebugViewTests
python
spyder-ide__spyder
external-deps/qtconsole/qtconsole/tests/test_completion_widget.py
{ "start": 272, "end": 852 }
class ____(object): """ Context manager for tempfile.mkdtemp(). This class is available in python +v3.2. See: https://gist.github.com/cpelley/10e2eeaf60dacc7956bb """ def __enter__(self): self.dir_name = tempfile.mkdtemp() return self.dir_name def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.dir_name) TemporaryDirectory = getattr(tempfile, 'TemporaryDirectory', TemporaryDirectory) @pytest.mark.skipif(no_display, reason="Doesn't work without a display")
TemporaryDirectory
python
huggingface__transformers
src/transformers/models/pop2piano/feature_extraction_pop2piano.py
{ "start": 1388, "end": 19974 }
class ____(SequenceFeatureExtractor): r""" Constructs a Pop2Piano feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts rhythm and preprocesses the audio before it is passed to the model. First the audio is passed to `RhythmExtractor2013` algorithm which extracts the beat_times, beat positions and estimates their confidence as well as tempo in bpm, then beat_times is interpolated and to get beatsteps. Later we calculate extrapolated_beatsteps from it to be used in tokenizer. On the other hand audio is resampled to self.sampling_rate and preprocessed and then log mel spectogram is computed from that to be used in our transformer model. Args: sampling_rate (`int`, *optional*, defaults to 22050): Target Sampling rate of audio signal. It's the sampling rate that we forward to the model. padding_value (`int`, *optional*, defaults to 0): Padding value used to pad the audio. Should correspond to silences. window_size (`int`, *optional*, defaults to 4096): Length of the window in samples to which the Fourier transform is applied. hop_length (`int`, *optional*, defaults to 1024): Step size between each window of the waveform, in samples. min_frequency (`float`, *optional*, defaults to 10.0): Lowest frequency that will be used in the log-mel spectrogram. feature_size (`int`, *optional*, defaults to 512): The feature dimension of the extracted features. num_bars (`int`, *optional*, defaults to 2): Determines interval between each sequence. """ model_input_names = ["input_features", "beatsteps", "extrapolated_beatstep"] def __init__( self, sampling_rate: int = 22050, padding_value: int = 0, window_size: int = 4096, hop_length: int = 1024, min_frequency: float = 10.0, feature_size: int = 512, num_bars: int = 2, **kwargs, ): super().__init__( feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs, ) self.sampling_rate = sampling_rate self.padding_value = padding_value self.window_size = window_size self.hop_length = hop_length self.min_frequency = min_frequency self.feature_size = feature_size self.num_bars = num_bars self.mel_filters = mel_filter_bank( num_frequency_bins=(self.window_size // 2) + 1, num_mel_filters=self.feature_size, min_frequency=self.min_frequency, max_frequency=float(self.sampling_rate // 2), sampling_rate=self.sampling_rate, norm=None, mel_scale="htk", ) def mel_spectrogram(self, sequence: np.ndarray): """ Generates MelSpectrogram. Args: sequence (`numpy.ndarray`): The sequence of which the mel-spectrogram will be computed. """ mel_specs = [] for seq in sequence: window = np.hanning(self.window_size + 1)[:-1] mel_specs.append( spectrogram( waveform=seq, window=window, frame_length=self.window_size, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, ) ) mel_specs = np.array(mel_specs) return mel_specs def extract_rhythm(self, audio: np.ndarray): """ This algorithm(`RhythmExtractor2013`) extracts the beat positions and estimates their confidence as well as tempo in bpm for an audio signal. For more information please visit https://essentia.upf.edu/reference/std_RhythmExtractor2013.html . Args: audio(`numpy.ndarray`): raw audio waveform which is passed to the Rhythm Extractor. """ requires_backends(self, ["essentia"]) essentia_tracker = essentia.standard.RhythmExtractor2013(method="multifeature") bpm, beat_times, confidence, estimates, essentia_beat_intervals = essentia_tracker(audio) return bpm, beat_times, confidence, estimates, essentia_beat_intervals def interpolate_beat_times( self, beat_times: numpy.ndarray, steps_per_beat: numpy.ndarray, n_extend: numpy.ndarray ): """ This method takes beat_times and then interpolates that using `scipy.interpolate.interp1d` and the output is then used to convert raw audio to log-mel-spectrogram. Args: beat_times (`numpy.ndarray`): beat_times is passed into `scipy.interpolate.interp1d` for processing. steps_per_beat (`int`): used as an parameter to control the interpolation. n_extend (`int`): used as an parameter to control the interpolation. """ requires_backends(self, ["scipy"]) beat_times_function = scipy.interpolate.interp1d( np.arange(beat_times.size), beat_times, bounds_error=False, fill_value="extrapolate", ) ext_beats = beat_times_function( np.linspace(0, beat_times.size + n_extend - 1, beat_times.size * steps_per_beat + n_extend) ) return ext_beats def preprocess_mel(self, audio: np.ndarray, beatstep: np.ndarray): """ Preprocessing for log-mel-spectrogram Args: audio (`numpy.ndarray` of shape `(audio_length, )` ): Raw audio waveform to be processed. beatstep (`numpy.ndarray`): Interpolated values of the raw audio. If beatstep[0] is greater than 0.0, then it will be shifted by the value at beatstep[0]. """ if audio is not None and len(audio.shape) != 1: raise ValueError( f"Expected `audio` to be a single channel audio input of shape `(n, )` but found shape {audio.shape}." ) if beatstep[0] > 0.0: beatstep = beatstep - beatstep[0] num_steps = self.num_bars * 4 num_target_steps = len(beatstep) extrapolated_beatstep = self.interpolate_beat_times( beat_times=beatstep, steps_per_beat=1, n_extend=(self.num_bars + 1) * 4 + 1 ) sample_indices = [] max_feature_length = 0 for i in range(0, num_target_steps, num_steps): start_idx = i end_idx = min(i + num_steps, num_target_steps) start_sample = int(extrapolated_beatstep[start_idx] * self.sampling_rate) end_sample = int(extrapolated_beatstep[end_idx] * self.sampling_rate) sample_indices.append((start_sample, end_sample)) max_feature_length = max(max_feature_length, end_sample - start_sample) padded_batch = [] for start_sample, end_sample in sample_indices: feature = audio[start_sample:end_sample] padded_feature = np.pad( feature, ((0, max_feature_length - feature.shape[0]),), "constant", constant_values=0, ) padded_batch.append(padded_feature) padded_batch = np.asarray(padded_batch) return padded_batch, extrapolated_beatstep def _pad(self, features: np.ndarray, add_zero_line=True): features_shapes = [each_feature.shape for each_feature in features] attention_masks, padded_features = [], [] for i, each_feature in enumerate(features): # To pad "input_features". if len(each_feature.shape) == 3: features_pad_value = max([*zip(*features_shapes)][1]) - features_shapes[i][1] attention_mask = np.ones(features_shapes[i][:2], dtype=np.int64) feature_padding = ((0, 0), (0, features_pad_value), (0, 0)) attention_mask_padding = (feature_padding[0], feature_padding[1]) # To pad "beatsteps" and "extrapolated_beatstep". else: each_feature = each_feature.reshape(1, -1) features_pad_value = max([*zip(*features_shapes)][0]) - features_shapes[i][0] attention_mask = np.ones(features_shapes[i], dtype=np.int64).reshape(1, -1) feature_padding = attention_mask_padding = ((0, 0), (0, features_pad_value)) each_padded_feature = np.pad(each_feature, feature_padding, "constant", constant_values=self.padding_value) attention_mask = np.pad( attention_mask, attention_mask_padding, "constant", constant_values=self.padding_value ) if add_zero_line: # if it is batched then we separate each examples using zero array zero_array_len = max([*zip(*features_shapes)][1]) # we concatenate the zero array line here each_padded_feature = np.concatenate( [each_padded_feature, np.zeros([1, zero_array_len, self.feature_size])], axis=0 ) attention_mask = np.concatenate( [attention_mask, np.zeros([1, zero_array_len], dtype=attention_mask.dtype)], axis=0 ) padded_features.append(each_padded_feature) attention_masks.append(attention_mask) padded_features = np.concatenate(padded_features, axis=0).astype(np.float32) attention_masks = np.concatenate(attention_masks, axis=0).astype(np.int64) return padded_features, attention_masks def pad( self, inputs: BatchFeature, is_batched: bool, return_attention_mask: bool, return_tensors: Optional[Union[str, TensorType]] = None, ): """ Pads the inputs to same length and returns attention_mask. Args: inputs (`BatchFeature`): Processed audio features. is_batched (`bool`): Whether inputs are batched or not. return_attention_mask (`bool`): Whether to return attention mask or not. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. If nothing is specified, it will return list of `np.ndarray` arrays. Return: `BatchFeature` with attention_mask, attention_mask_beatsteps and attention_mask_extrapolated_beatstep added to it: - **attention_mask** numpy.ndarray of shape `(batch_size, max_input_features_seq_length)` -- Example : 1, 1, 1, 0, 0 (audio 1, also here it is padded to max length of 5 that's why there are 2 zeros at the end indicating they are padded) 0, 0, 0, 0, 0 (zero pad to separate audio 1 and 2) 1, 1, 1, 1, 1 (audio 2) 0, 0, 0, 0, 0 (zero pad to separate audio 2 and 3) 1, 1, 1, 1, 1 (audio 3) - **attention_mask_beatsteps** numpy.ndarray of shape `(batch_size, max_beatsteps_seq_length)` - **attention_mask_extrapolated_beatstep** numpy.ndarray of shape `(batch_size, max_extrapolated_beatstep_seq_length)` """ processed_features_dict = {} for feature_name, feature_value in inputs.items(): if feature_name == "input_features": padded_feature_values, attention_mask = self._pad(feature_value, add_zero_line=True) processed_features_dict[feature_name] = padded_feature_values if return_attention_mask: processed_features_dict["attention_mask"] = attention_mask else: padded_feature_values, attention_mask = self._pad(feature_value, add_zero_line=False) processed_features_dict[feature_name] = padded_feature_values if return_attention_mask: processed_features_dict[f"attention_mask_{feature_name}"] = attention_mask # If we are processing only one example, we should remove the zero array line since we don't need it to # separate examples from each other. if not is_batched and not return_attention_mask: processed_features_dict["input_features"] = processed_features_dict["input_features"][:-1, ...] outputs = BatchFeature(processed_features_dict, tensor_type=return_tensors) return outputs def __call__( self, audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], sampling_rate: Union[int, list[int]], steps_per_beat: int = 2, resample: Optional[bool] = True, return_attention_mask: Optional[bool] = False, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model. Args: audio (`np.ndarray`, `List`): The audio or batch of audio to be processed. Each audio can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. sampling_rate (`int`): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. steps_per_beat (`int`, *optional*, defaults to 2): This is used in interpolating `beat_times`. resample (`bool`, *optional*, defaults to `True`): Determines whether to resample the audio to `sampling_rate` or not before processing. Must be True during inference. return_attention_mask (`bool` *optional*, defaults to `False`): Denotes if attention_mask for input_features, beatsteps and extrapolated_beatstep will be given as output or not. Automatically set to True for batched inputs. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. If nothing is specified, it will return list of `np.ndarray` arrays. """ requires_backends(self, ["librosa"]) is_batched = isinstance(audio, (list, tuple)) and isinstance(audio[0], (np.ndarray, tuple, list)) if is_batched: # This enables the user to process files of different sampling_rate at same time if not isinstance(sampling_rate, list): raise ValueError( "Please give sampling_rate of each audio separately when you are passing multiple raw_audios at the same time. " f"Received {sampling_rate}, expected [audio_1_sr, ..., audio_n_sr]." ) return_attention_mask = True if return_attention_mask is None else return_attention_mask else: audio = [audio] sampling_rate = [sampling_rate] return_attention_mask = False if return_attention_mask is None else return_attention_mask batch_input_features, batch_beatsteps, batch_ext_beatstep = [], [], [] for single_raw_audio, single_sampling_rate in zip(audio, sampling_rate): bpm, beat_times, confidence, estimates, essentia_beat_intervals = self.extract_rhythm( audio=single_raw_audio ) beatsteps = self.interpolate_beat_times(beat_times=beat_times, steps_per_beat=steps_per_beat, n_extend=1) if self.sampling_rate != single_sampling_rate and self.sampling_rate is not None: if resample: # Change sampling_rate to self.sampling_rate single_raw_audio = librosa.core.resample( single_raw_audio, orig_sr=single_sampling_rate, target_sr=self.sampling_rate, res_type="kaiser_best", ) else: warnings.warn( f"The sampling_rate of the provided audio is different from the target sampling_rate " f"of the Feature Extractor, {self.sampling_rate} vs {single_sampling_rate}. " f"In these cases it is recommended to use `resample=True` in the `__call__` method to " f"get the optimal behaviour." ) single_sampling_rate = self.sampling_rate start_sample = int(beatsteps[0] * single_sampling_rate) end_sample = int(beatsteps[-1] * single_sampling_rate) input_features, extrapolated_beatstep = self.preprocess_mel( single_raw_audio[start_sample:end_sample], beatsteps - beatsteps[0] ) mel_specs = self.mel_spectrogram(input_features.astype(np.float32)) # apply np.log to get log mel-spectrograms log_mel_specs = np.log(np.clip(mel_specs, a_min=1e-6, a_max=None)) input_features = np.transpose(log_mel_specs, (0, -1, -2)) batch_input_features.append(input_features) batch_beatsteps.append(beatsteps) batch_ext_beatstep.append(extrapolated_beatstep) output = BatchFeature( { "input_features": batch_input_features, "beatsteps": batch_beatsteps, "extrapolated_beatstep": batch_ext_beatstep, } ) output = self.pad( output, is_batched=is_batched, return_attention_mask=return_attention_mask, return_tensors=return_tensors, ) return output __all__ = ["Pop2PianoFeatureExtractor"]
Pop2PianoFeatureExtractor
python
dagster-io__dagster
python_modules/dagster/dagster_tests/components_tests/test_component_scaffolding.py
{ "start": 564, "end": 662 }
class ____(BaseModel): name: str age: int is_active: bool
TestParamsModelWithoutDefaults
python
openai__openai-python
src/openai/types/upload_create_params.py
{ "start": 273, "end": 1134 }
class ____(TypedDict, total=False): bytes: Required[int] """The number of bytes in the file you are uploading.""" filename: Required[str] """The name of the file to upload.""" mime_type: Required[str] """The MIME type of the file. This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. """ purpose: Required[FilePurpose] """The intended purpose of the uploaded file. See the [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). """ expires_after: ExpiresAfter """The expiration policy for a file. By default, files with `purpose=batch` expire after 30 days and all other files are persisted until they are manually deleted. """
UploadCreateParams
python
keras-team__keras
keras/src/ops/nn.py
{ "start": 72637, "end": 76348 }
class ____(Operation): def __init__( self, strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, mask_index=0, *, name=None, ): super().__init__(name=name) self.strategy = strategy self.beam_width = beam_width self.top_paths = top_paths self.merge_repeated = merge_repeated self.mask_index = mask_index def call(self, inputs, sequence_lengths): return backend.nn.ctc_decode( inputs, sequence_lengths, strategy=self.strategy, beam_width=self.beam_width, top_paths=self.top_paths, merge_repeated=self.merge_repeated, mask_index=self.mask_index, ) def compute_output_spec(self, inputs, sequence_lengths): inputs_shape = inputs.shape if self.strategy == "greedy": top_paths = 1 else: top_paths = self.top_paths dtype = backend.result_type(inputs.dtype, "float32") return ( KerasTensor( (top_paths, inputs_shape[0], inputs_shape[1]), dtype="int32" ), KerasTensor((inputs_shape[0], top_paths), dtype=dtype), ) @keras_export( [ "keras.ops.ctc_decode", "keras.ops.nn.ctc_decode", ] ) def ctc_decode( inputs, sequence_lengths, strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, mask_index=0, ): """Decodes the output of a CTC model. Args: inputs: A tensor of shape `(batch_size, max_length, num_classes)` containing the logits (the output of the model). They should *not* be normalized via softmax. sequence_lengths: A tensor of shape `(batch_size,)` containing the sequence lengths for the batch. strategy: A string for the decoding strategy. Supported values are `"greedy"` and `"beam_search"`. beam_width: An integer scalar beam width used in beam search. Defaults to 100. top_paths: An integer scalar, the number of top paths to return. Defaults to 1. merge_repeated: A boolean scalar, whether to merge repeated labels in the output. Defaults to `True`. mask_index: An integer scalar, the index of the mask character in the vocabulary. Defaults to `0`. Returns: A tuple containing: - The tensor representing the list of decoded sequences. If `strategy="greedy"`, the shape is `(1, batch_size, max_length)`. If `strategy="beam_search"`, the shape is `(top_paths, batch_size, max_length)`. Note that: `-1` indicates the blank label. - If `strategy="greedy"`, a tensor of shape `(batch_size, 1)` representing the negative of the sum of the probability logits for each sequence. If `strategy="beam_seatch"`, a tensor of shape `(batch_size, top_paths)` representing the log probability for each sequence. """ if any_symbolic_tensors((inputs, sequence_lengths)): return CTCDecode( strategy=strategy, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated, mask_index=mask_index, ).symbolic_call(inputs, sequence_lengths) return backend.nn.ctc_decode( inputs=inputs, sequence_lengths=sequence_lengths, strategy=strategy, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated, mask_index=mask_index, )
CTCDecode
python
ansible__ansible
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/plugin_lookup.py
{ "start": 119, "end": 1262 }
class ____(ActionBase): TRANSFERS_FILES = False _VALID_ARGS = frozenset(('type', 'name')) def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(None, task_vars) plugin_type = self._task.args.get('type') name = self._task.args.get('name') result = dict(changed=False, collection_list=self._task.collections) if all([plugin_type, name]): attr_name = '{0}_loader'.format(plugin_type) typed_loader = getattr(loader, attr_name, None) if not typed_loader: return (dict(failed=True, msg='invalid plugin type {0}'.format(plugin_type))) context = typed_loader.find_plugin_with_context(name, collection_list=self._task.collections) if not context.resolved: result['plugin_path'] = None result['redirect_list'] = [] else: result['plugin_path'] = context.plugin_resolved_path result['redirect_list'] = context.redirect_list return result
ActionModule
python
GoogleCloudPlatform__python-docs-samples
monitoring/snippets/v3/uptime-check-client/snippets_test.py
{ "start": 857, "end": 3277 }
class ____: """A test fixture that creates uptime check config.""" def __init__(self): self.project_id = snippets.project_id() self.project_name = snippets.project_name() def __enter__(self): # Create an uptime check config (GET request). self.config_get = snippets.create_uptime_check_config_get( self.project_name, display_name=random_name(10) ) # Create an uptime check config (POST request). self.config_post = snippets.create_uptime_check_config_post( self.project_name, display_name=random_name(10) ) return self def __exit__(self, type, value, traceback): # Delete the config. snippets.delete_uptime_check_config(self.config_get.name) snippets.delete_uptime_check_config(self.config_post.name) @pytest.fixture(scope="session") def uptime(): with UptimeFixture() as uptime: yield uptime def test_create_and_delete() -> None: # create and delete happen in uptime fixture. with UptimeFixture(): pass def test_update_uptime_config() -> None: # create and delete happen in uptime fixture. new_display_name = random_name(10) new_uptime_check_path = "/" + random_name(10) with UptimeFixture() as fixture: # We sometimes see the permission error saying the resource # may not exist. Weirdly DeadlineExceeded instance is raised # in this case. @backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=120) def call_sample(): return snippets.update_uptime_check_config( fixture.config_get.name, new_display_name, new_uptime_check_path ) result = call_sample() assert new_display_name == result.display_name assert new_uptime_check_path == result.http_check.path def test_get_uptime_check_config(uptime) -> None: config = snippets.get_uptime_check_config(uptime.config_get.name) assert uptime.config_get.display_name == config.display_name def test_list_uptime_check_configs(uptime) -> None: result = snippets.list_uptime_check_configs(uptime.project_name) assert any(item.display_name == uptime.config_get.display_name for item in result) def test_list_uptime_check_ips() -> None: result = snippets.list_uptime_check_ips() assert any(item.location == "Singapore" for item in result)
UptimeFixture
python
great-expectations__great_expectations
tests/expectations/fixtures/expect_column_values_to_equal_three.py
{ "start": 1346, "end": 3412 }
class ____(ExpectColumnValuesToEqualThree): """Expect values in this column to equal the number three.""" examples = [ { "dataset_name": "mostly_threes_second_iteration", "data": { "mostly_threes": [3, 3, 3, 3, 3, 3, 2, -1, None, None], }, "tests": [ { "title": "positive_test_with_mostly", "exact_match_out": False, "in": {"column": "mostly_threes", "mostly": 0.6}, "include_in_gallery": True, "out": { "success": True, "unexpected_index_list": [6, 7], "unexpected_list": [2, -1], }, }, { "title": "negative_test_with_mostly", "exact_match_out": False, "in": {"column": "mostly_threes", "mostly": 0.9}, "include_in_gallery": False, "out": { "success": False, "unexpected_index_list": [6, 7], "unexpected_list": [2, -1], }, }, { "title": "other_negative_test_with_mostly", "exact_match_out": False, "in": {"column": "mostly_threes", "mostly": 0.9}, # "include_in_gallery": False, #This key is omitted, so the example shouldn't show up in the gallery # noqa: E501 # FIXME CoP "out": { "success": False, "unexpected_index_list": [6, 7], "unexpected_list": [2, -1], }, }, ], } ] library_metadata = { "maturity": "EXPERIMENTAL", "tags": ["tag", "other_tag"], "contributors": [ "@abegong", ], }
ExpectColumnValuesToEqualThree__SecondIteration
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_T.py
{ "start": 11344, "end": 12659 }
class ____(Benchmark): r""" Tripod objective function. This class defines the Tripod [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Tripod}}(x) = p(x_2) \left[1 + p(x_1) \right] + \lvert x_1 + 50p(x_2) \left[1 - 2p(x_1) \right] \rvert + \lvert x_2 + 50\left[1 - 2p(x_2)\right] \rvert with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [0, -50]` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.global_optimum = [[0.0, -50.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 p1 = float(x[0] >= 0) p2 = float(x[1] >= 0) return (p2 * (1.0 + p1) + abs(x[0] + 50.0 * p2 * (1.0 - 2.0 * p1)) + abs(x[1] + 50.0 * (1.0 - 2.0 * p2)))
Tripod
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py
{ "start": 238, "end": 1652 }
class ____(Benchmark): r""" CarromTable objective function. The CarromTable [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\text{CarromTable}}(x) = - \frac{1}{30}\left(\cos(x_1) cos(x_2) e^{\left|1 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi}\right|}\right)^2 with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = -24.15681551650653` for :math:`x_i = \pm 9.646157266348881` for :math:`i = 1, 2` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N)) self.global_optimum = [(9.646157266348881, 9.646134286497169), (-9.646157266348881, 9.646134286497169), (9.646157266348881, -9.646134286497169), (-9.646157266348881, -9.646134286497169)] self.fglob = -24.15681551650653 def fun(self, x, *args): self.nfev += 1 u = cos(x[0]) * cos(x[1]) v = sqrt(x[0] ** 2 + x[1] ** 2) return -((u * exp(abs(1 - v / pi))) ** 2) / 30.
CarromTable
python
ray-project__ray
release/ray_release/exception.py
{ "start": 1910, "end": 2014 }
class ____(ClusterManagerError): exit_code = ExitCode.CLUSTER_ENV_BUILD_TIMEOUT
ClusterEnvBuildTimeout
python
python-pillow__Pillow
src/PIL/DdsImagePlugin.py
{ "start": 990, "end": 1258 }
class ____(IntFlag): CUBEMAP = 0x200 CUBEMAP_POSITIVEX = 0x400 CUBEMAP_NEGATIVEX = 0x800 CUBEMAP_POSITIVEY = 0x1000 CUBEMAP_NEGATIVEY = 0x2000 CUBEMAP_POSITIVEZ = 0x4000 CUBEMAP_NEGATIVEZ = 0x8000 VOLUME = 0x200000 # Pixel Format
DDSCAPS2
python
python-poetry__poetry
src/poetry/repositories/link_sources/html.py
{ "start": 460, "end": 2409 }
class ____(LinkSource): def __init__(self, url: str, content: str) -> None: super().__init__(url=url) parser = HTMLPageParser() parser.feed(content) self._parsed = parser.anchors self._base_url: str | None = parser.base_url @cached_property def _link_cache(self) -> LinkCache: links: LinkCache = defaultdict(lambda: defaultdict(list)) for anchor in self._parsed: if href := anchor.get("href"): url = self.clean_link( urllib.parse.urljoin(self._base_url or self._url, href) ) pyrequire = anchor.get("data-requires-python") pyrequire = unescape(pyrequire) if pyrequire else None yanked_value = anchor.get("data-yanked") yanked: str | bool if yanked_value: yanked = unescape(yanked_value) else: yanked = "data-yanked" in anchor # see https://peps.python.org/pep-0714/#clients # and https://peps.python.org/pep-0658/#specification metadata: str | bool for metadata_key in ("data-core-metadata", "data-dist-info-metadata"): metadata_value = anchor.get(metadata_key) if metadata_value: metadata = unescape(metadata_value) else: metadata = metadata_key in anchor if metadata: break link = Link( url, requires_python=pyrequire, yanked=yanked, metadata=metadata ) if link.ext not in self.SUPPORTED_FORMATS: continue pkg = self.link_package_data(link) if pkg: links[pkg.name][pkg.version].append(link) return links
HTMLPage
python
doocs__leetcode
solution/2200-2299/2244.Minimum Rounds to Complete All Tasks/Solution.py
{ "start": 0, "end": 255 }
class ____: def minimumRounds(self, tasks: List[int]) -> int: cnt = Counter(tasks) ans = 0 for v in cnt.values(): if v == 1: return -1 ans += v // 3 + (v % 3 != 0) return ans
Solution
python
altair-viz__altair
altair/vegalite/v6/api.py
{ "start": 24159, "end": 24813 }
class ____(TypedDict, t.Generic[_C], total=False): """ A dictionary representation of a conditional encoding or property. Parameters ---------- condition One or more (predicate, statement) pairs which each form a condition. value An optional default value, used when no predicates were met. """ condition: Required[_C] value: Any IntoCondition: TypeAlias = Union[ConditionLike, _Conditional[Any]] """ Anything that can be converted into a conditional encoding or property. Notes ----- Represents all outputs from `when-then-otherwise` conditions, which are not ``SchemaBase`` types. """
_Conditional
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 89177, "end": 92979 }
class ____(GeneratedAirbyteSource): @public def __init__( self, name: str, lwa_app_id: str, lwa_client_secret: str, refresh_token: str, aws_access_key: str, aws_secret_key: str, role_arn: str, replication_start_date: str, aws_environment: str, region: str, app_id: Optional[str] = None, auth_type: Optional[str] = None, replication_end_date: Optional[str] = None, period_in_days: Optional[int] = None, report_options: Optional[str] = None, max_wait_seconds: Optional[int] = None, ): """Airbyte Source for Amazon Seller Partner. Documentation can be found at https://docs.airbyte.com/integrations/sources/amazon-seller-partner Args: name (str): The name of the destination. app_id (Optional[str]): Your Amazon App ID lwa_app_id (str): Your Login with Amazon Client ID. lwa_client_secret (str): Your Login with Amazon Client Secret. refresh_token (str): The Refresh Token obtained via OAuth flow authorization. aws_access_key (str): Specifies the AWS access key used as part of the credentials to authenticate the user. aws_secret_key (str): Specifies the AWS secret key used as part of the credentials to authenticate the user. role_arn (str): Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). replication_start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. replication_end_date (Optional[str]): UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. period_in_days (Optional[int]): Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. report_options (Optional[str]): Additional information passed to reports. This varies by report type. Must be a valid json string. max_wait_seconds (Optional[int]): Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. aws_environment (str): An enumeration. region (str): An enumeration. """ self.app_id = check.opt_str_param(app_id, "app_id") self.auth_type = check.opt_str_param(auth_type, "auth_type") self.lwa_app_id = check.str_param(lwa_app_id, "lwa_app_id") self.lwa_client_secret = check.str_param(lwa_client_secret, "lwa_client_secret") self.refresh_token = check.str_param(refresh_token, "refresh_token") self.aws_access_key = check.str_param(aws_access_key, "aws_access_key") self.aws_secret_key = check.str_param(aws_secret_key, "aws_secret_key") self.role_arn = check.str_param(role_arn, "role_arn") self.replication_start_date = check.str_param( replication_start_date, "replication_start_date" ) self.replication_end_date = check.opt_str_param( replication_end_date, "replication_end_date" ) self.period_in_days = check.opt_int_param(period_in_days, "period_in_days") self.report_options = check.opt_str_param(report_options, "report_options") self.max_wait_seconds = check.opt_int_param(max_wait_seconds, "max_wait_seconds") self.aws_environment = check.str_param(aws_environment, "aws_environment") self.region = check.str_param(region, "region") super().__init__("Amazon Seller Partner", name)
AmazonSellerPartnerSource
python
encode__django-rest-framework
tests/test_reverse.py
{ "start": 374, "end": 708 }
class ____(BaseVersioning): def __init__(self, raise_error=False): self.raise_error = raise_error def reverse(self, *args, **kwargs): if self.raise_error: raise NoReverseMatch() return 'http://scheme-reversed/view' @override_settings(ROOT_URLCONF='tests.test_reverse')
MockVersioningScheme